├── .github └── workflows │ ├── ci.yml │ └── typos.toml ├── .gitignore ├── Cargo.lock ├── Cargo.toml ├── LICENSE-APACHE ├── LICENSE-MIT ├── README.md ├── cyclotomic-rings ├── Cargo.toml └── src │ ├── challenge_set.rs │ ├── challenge_set │ └── error.rs │ ├── lib.rs │ ├── rings.rs │ ├── rings │ ├── babybear.rs │ ├── frog.rs │ ├── goldilocks.rs │ ├── poseidon.rs │ ├── poseidon │ │ ├── babybear.rs │ │ ├── frog.rs │ │ ├── goldilocks.rs │ │ └── stark.rs │ └── stark.rs │ └── rotation.rs ├── deny.toml ├── docs-header.html ├── latticefold ├── Cargo.toml ├── benches │ ├── README.md │ ├── ajtai.rs │ ├── config.toml │ ├── decomposition.rs │ ├── e2e.rs │ ├── env.rs │ ├── folding.rs │ ├── linearization.rs │ └── utils.rs ├── build.rs ├── examples │ ├── README.md │ ├── babybear.rs │ ├── frog.rs │ ├── goldilocks.rs │ └── starkprime.rs └── src │ ├── arith.rs │ ├── arith │ ├── ccs.rs │ ├── error.rs │ ├── r1cs.rs │ └── utils.rs │ ├── commitment.rs │ ├── commitment │ ├── commitment_scheme.rs │ ├── homomorphic_commitment.rs │ └── operations.rs │ ├── decomposition_parameters.rs │ ├── lib.rs │ ├── nifs.rs │ ├── nifs │ ├── decomposition.rs │ ├── decomposition │ │ ├── structs.rs │ │ ├── tests │ │ │ └── mod.rs │ │ └── utils.rs │ ├── error.rs │ ├── folding.rs │ ├── folding │ │ ├── structs.rs │ │ ├── tests │ │ │ └── mod.rs │ │ └── utils.rs │ ├── linearization.rs │ ├── linearization │ │ ├── structs.rs │ │ ├── tests │ │ │ └── mod.rs │ │ └── utils.rs │ └── tests.rs │ ├── transcript.rs │ ├── transcript │ └── poseidon.rs │ ├── utils.rs │ └── utils │ ├── mle_helpers.rs │ ├── security_check.rs │ ├── sumcheck.rs │ └── sumcheck │ ├── prover.rs │ ├── utils.rs │ └── verifier.rs ├── notebooks └── bounds.sage ├── rust-toolchain └── rustfmt.toml /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: CI Check 2 | on: 3 | merge_group: 4 | pull_request: 5 | push: 6 | branches: 7 | - main 8 | env: 9 | # Use the same ssh-agent socket value across all jobs 10 | # Useful when a GH action is using SSH behind-the-scenes 11 | SSH_AUTH_SOCK: /tmp/ssh_agent.sock 12 | CARGO_TERM_COLOR: always 13 | # Disable incremental compilation. 14 | # 15 | # Incremental compilation is useful as part of an edit-build-test-edit cycle, 16 | # as it lets the compiler avoid recompiling code that hasn't changed. However, 17 | # on CI, we're not making small edits; we're almost always building the entire 18 | # project from scratch. Thus, incremental compilation on CI actually 19 | # introduces *additional* overhead to support making future builds 20 | # faster...but no future builds will ever occur in any given CI environment. 21 | # 22 | # See https://matklad.github.io/2021/09/04/fast-rust-builds.html#ci-workflow 23 | # for details. 24 | CARGO_INCREMENTAL: 0 25 | # Allow more retries for network requests in cargo (downloading crates) and 26 | # rustup (installing toolchains). This should help to reduce flaky CI failures 27 | # from transient network timeouts or other issues. 28 | CARGO_NET_RETRY: 10 29 | RUSTUP_MAX_RETRIES: 10 30 | # Don't emit giant backtraces in the CI logs. 31 | RUST_BACKTRACE: short 32 | 33 | # Jobs launched for a PR event cancel the ongoing one for the same workflow + PR, 34 | # Only retries (of the same run) for a Push event cancel the prior one. 35 | concurrency: 36 | group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} 37 | cancel-in-progress: true 38 | 39 | 40 | jobs: 41 | test: 42 | name: Test 43 | runs-on: ubuntu-latest 44 | strategy: 45 | matrix: 46 | include: 47 | - feature: default 48 | steps: 49 | - uses: actions/checkout@v3 50 | - uses: actions-rs/toolchain@v1 51 | # use the more efficient nextest 52 | - uses: taiki-e/install-action@nextest 53 | - uses: Swatinem/rust-cache@v2 54 | - name: Build 55 | # This build will be reused by nextest, 56 | # and also checks (--all-targets) that benches don't bit-rot 57 | run: cargo build --release --all-targets --no-default-features 58 | - name: Test 59 | run: | 60 | cargo nextest run --release --workspace --no-default-features 61 | - name: Doctests # nextest does not support doc tests 62 | run: | 63 | cargo test --doc 64 | 65 | fmt: 66 | name: Rustfmt 67 | timeout-minutes: 30 68 | runs-on: ubuntu-latest 69 | steps: 70 | - uses: actions/checkout@v2 71 | - uses: actions-rs/toolchain@v1 72 | - uses: Swatinem/rust-cache@v2 73 | - run: rustup component add rustfmt 74 | - uses: actions-rs/cargo@v1 75 | with: 76 | command: fmt 77 | args: --all -- --check 78 | 79 | clippy: 80 | name: Clippy lint checks 81 | runs-on: ubuntu-latest 82 | steps: 83 | - name: Setup SSH passphrase 84 | env: 85 | SSH_PASSPHRASE: ${{secrets.CI_SSH_PASSPHRASE}} 86 | SSH_PRIVATE_KEY: ${{secrets.CI_KEY}} 87 | run: | 88 | ssh-agent -a $SSH_AUTH_SOCK > /dev/null 89 | echo 'echo $SSH_PASSPHRASE' > ~/.ssh_askpass && chmod +x ~/.ssh_askpass 90 | echo "$SSH_PRIVATE_KEY" | tr -d '\r' | DISPLAY=None SSH_ASKPASS=~/.ssh_askpass ssh-add - >/dev/null 91 | eval `ssh-agent -s` 92 | - uses: actions/checkout@v2 93 | - uses: actions-rs/toolchain@v1 94 | with: 95 | components: clippy 96 | - uses: Swatinem/rust-cache@v2 97 | - name: Run clippy 98 | uses: actions-rs/cargo@v1 99 | with: 100 | command: clippy 101 | args: --all-targets --all-features -- -D warnings 102 | 103 | audit: 104 | name: Cargo deny checks (licenses, vulnerabilities, unused dependencies) 105 | runs-on: ubuntu-latest 106 | steps: 107 | - name: Setup SSH passphrase 108 | env: 109 | SSH_PASSPHRASE: ${{secrets.CI_SSH_PASSPHRASE}} 110 | SSH_PRIVATE_KEY: ${{secrets.CI_KEY}} 111 | run: | 112 | ssh-agent -a $SSH_AUTH_SOCK > /dev/null 113 | echo 'echo $SSH_PASSPHRASE' > ~/.ssh_askpass && chmod +x ~/.ssh_askpass 114 | echo "$SSH_PRIVATE_KEY" | tr -d '\r' | DISPLAY=None SSH_ASKPASS=~/.ssh_askpass ssh-add - >/dev/null 115 | eval `ssh-agent -s` 116 | - uses: actions/checkout@v2 117 | - uses: actions-rs/toolchain@v1 118 | - uses: Swatinem/rust-cache@v2 119 | - name: Run cargo deny 120 | run: | 121 | cargo install cargo-deny 122 | cargo deny check 123 | 124 | typos: 125 | name: Spell Check with Typos 126 | runs-on: ubuntu-latest 127 | steps: 128 | - uses: actions/checkout@v4 129 | - name: Use typos with config file 130 | uses: crate-ci/typos@master 131 | with: 132 | config: .github/workflows/typos.toml -------------------------------------------------------------------------------- /.github/workflows/typos.toml: -------------------------------------------------------------------------------- 1 | [default] 2 | extend-ignore-re = [ 3 | "0x.*_i128", 4 | ] -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | /.vscode 3 | *.sage.py -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [workspace] 2 | members = [ "latticefold", "cyclotomic-rings" ] 3 | resolver = "2" 4 | 5 | [workspace.package] 6 | edition = "2021" 7 | license = "Apache-2.0 OR MIT" 8 | 9 | [workspace.dependencies] 10 | ark-crypto-primitives = { version = "0.4.0", default-features = false, features = [ 11 | "sponge", 12 | ] } 13 | ark-ff = { version = "0.4.2", default-features = false } 14 | ark-serialize = { version = "0.4.2", features = ["derive"] } 15 | ark-std = { version = "0.4.0", default-features = false } 16 | stark-rings = { git = "https://github.com/NethermindEth/stark-rings.git", branch = "main", default-features = false } 17 | stark-rings-linalg = { git = "https://github.com/NethermindEth/stark-rings.git", branch = "main", default-features = false } 18 | stark-rings-poly = { git = "https://github.com/NethermindEth/stark-rings.git", branch = "main", default-features = false } 19 | num-bigint = { version = "0.4.5", default-features = false } 20 | rand = { version = "0.8.5", default-features = false } 21 | thiserror = { version = "2.0.3", default-features = false } 22 | cyclotomic-rings = { path = "cyclotomic-rings", default-features = false } 23 | [workspace.metadata.docs.rs] 24 | # To build locally, use 25 | # RUSTDOCFLAGS="--html-in-header docs-header.html" cargo doc --no-deps --document-private-items --open 26 | rustdoc-args = [ "--html-in-header", "docs-header.html" ] 27 | -------------------------------------------------------------------------------- /LICENSE-APACHE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright 2024 Demerzel Solutions Ltd (A.K.A Nethermind) (nethermind.io) 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. -------------------------------------------------------------------------------- /LICENSE-MIT: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2024 Demerzel Solutions Ltd (A.K.A Nethermind) (nethermind.io) 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in 13 | all copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 21 | THE SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # LatticeFold 2 | 3 | A proof-of-concept implementation of the LatticeFold folding scheme engineered by [Nethermind](https://nethermind.io) based on the work 4 | [LatticeFold: A Lattice-based Folding Scheme and its Applications to Succinct Proof Systems](https://eprint.iacr.org/2024/257) by Dan Boneh and Binyi Chen. 5 | 6 | **DISCLAIMER:** This is a proof-of-concept prototype, and in particular has not received careful code review. This implementation is provided "as is" and NOT ready for production use. Use at your own risk. 7 | 8 | ## Benchmarks 9 | 10 | To run the benchmarks on your local machine, simply execute `cargo bench`. This will take around 48 hours. 11 | Use `cargo bench --bench` to measure relevant parts of the protocol as well as the ajtai commitment scheme, and comment the prime fields you don't want to measure. 12 | 13 | ## Building 14 | 15 | The [rust-toolchain](https://github.com/NethermindEth/latticefold/blob/main/rust-toolchain) file pins the version of the Rust toolchain, which the LatticeFold library builds with, to the specific version `nightly-2025-03-06`. 16 | 17 | One can install the `nightly-2025-03-06` toolchain by invoking: 18 | ```bash 19 | rustup install nightly-2025-03-06 20 | ``` 21 | 22 | After that, use `cargo`, the standard Rust build tool, to build the library: 23 | 24 | ```bash 25 | git clone https://github.com/NethermindEth/latticefold.git 26 | cd latticefold 27 | cargo build --release 28 | ``` 29 | 30 | ## Usage 31 | Import the library: 32 | ```toml 33 | [dependencies] 34 | latticefold = { git = "https://github.com/NethermindEth/latticefold.git", package = "latticefold" } 35 | ``` 36 | 37 | Available packages: 38 | - `latticefold`: main crate, contains the non-interactive folding scheme implementation, together with the Ajtai commitment scheme, R1CS/CCS structures, Fiat-Shamir transcript machinery, etc. 39 | - `cyclotomic-rings`: contains the trait definition of a ring suitable to be used in the LatticeFold protocol, a few ready-to-use rings and short challenge set machinery. 40 | 41 | ## Performance report 42 | The performance report for this library can be found [here](https://nethermind.notion.site/Latticefold-and-lattice-based-operations-performance-report-153360fc38d080ac930cdeeffed69559). 43 | 44 | ## Examples 45 | 46 | Check [latticefold/examples/README.md](latticefold/examples/README.md) for examples. 47 | 48 | ## Frontends 49 | 50 | Currently, the only way to define a circuit to be folded is by specifying it as a [rank-1 constraint system (R1CS)](https://github.com/NethermindEth/latticefold/blob/main/latticefold/src/arith/r1cs.rs) or a [customizable constraint system (CCS)](https://github.com/NethermindEth/latticefold/blob/main/latticefold/src/arith.rs). 51 | 52 | ## License 53 | The crates in this repository are licensed under either of the following licenses, at your discretion. 54 | 55 | * Apache License Version 2.0 ([LICENSE-APACHE](LICENSE-APACHE)) 56 | * MIT license ([LICENSE-MIT](LICENSE-MIT)) 57 | 58 | Unless you explicitly state otherwise, any contribution submitted for inclusion in this library by you shall be dual licensed as above (as defined in the Apache v2 License), without any additional terms or conditions. 59 | 60 | ## Acknowledgments 61 | 62 | - This project is built on top of [our fork](https://github.com/NethermindEth/stark-rings) of [lattirust library](https://github.com/cknabs/lattirust) originally developed by [Christian Knabenhans](https://github.com/cknabs) and [Giacomo Fenzi](https://github.com/WizardOfMenlo). 63 | - We adapted [the sumcheck protocol from Jolt](https://github.com/a16z/jolt/blob/fa45507aaddb1815bafd54332e4b14173a7f8699/jolt-core/src/subprotocols/sumcheck.rs#L35) to the ring setting. 64 | - A lot of definitions are directly transferred from [sonobe](https://github.com/privacy-scaling-explorations/sonobe) library. 65 | - The implementation is supported by Ethereum Foundation [ZK Grant](https://blog.ethereum.org/2024/06/25/zk-grants-round-announce). 66 | -------------------------------------------------------------------------------- /cyclotomic-rings/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "cyclotomic-rings" 3 | version = "0.1.0" 4 | edition.workspace = true 5 | license.workspace = true 6 | 7 | [dependencies] 8 | ark-ff = { workspace = true } 9 | ark-std = { workspace = true } 10 | ark-crypto-primitives = { workspace = true } 11 | num-bigint = { workspace = true } 12 | rand = { workspace = true } 13 | stark-rings = { workspace = true } 14 | stark-rings-linalg = { workspace = true } 15 | stark-rings-poly = { workspace = true } 16 | thiserror = { workspace = true } 17 | 18 | [features] 19 | default = [ "std" ] 20 | std = [] 21 | getrandom = [ "ark-std/getrandom" ] 22 | -------------------------------------------------------------------------------- /cyclotomic-rings/src/challenge_set.rs: -------------------------------------------------------------------------------- 1 | //! 2 | //! Short challenge set API. 3 | //! 4 | 5 | use error::ChallengeSetError; 6 | 7 | use crate::{ark_base::*, rings::SuitableRing}; 8 | 9 | pub mod error; 10 | 11 | /// A trait to specify short challenge set for use in the LatticeFold protocol. 12 | pub trait LatticefoldChallengeSet { 13 | /// Amount of bytes needed to obtain a single short challenge. 14 | const BYTES_NEEDED: usize; 15 | 16 | /// Given a slice of bytes `bs` returns the short challenge encode with these bytes 17 | /// in the coefficient form. Returns `TooFewBytes` error if there is not enough bytes 18 | /// to obtain a short challenge. 19 | fn short_challenge_from_random_bytes( 20 | bs: &[u8], 21 | ) -> Result; 22 | } 23 | -------------------------------------------------------------------------------- /cyclotomic-rings/src/challenge_set/error.rs: -------------------------------------------------------------------------------- 1 | use thiserror::Error; 2 | 3 | use crate::ark_base::*; 4 | 5 | /// Short challenge generation error. 6 | #[derive(Debug, Error)] 7 | pub enum ChallengeSetError { 8 | /// An error meaning there is not enough bytes to generate 9 | /// a short challenge. 10 | #[error("too few bytes: got {0}, expected {1}")] 11 | TooFewBytes(usize, usize), 12 | } 13 | -------------------------------------------------------------------------------- /cyclotomic-rings/src/lib.rs: -------------------------------------------------------------------------------- 1 | //! 2 | //! A crate containing the trait definition of a ring suitable to be used in the LatticeFold protocol, 3 | //! a few ready-to-use rings and short challenge set machinery. 4 | //! 5 | 6 | #![cfg_attr(not(feature = "std"), no_std)] 7 | #![forbid(unsafe_code)] 8 | 9 | #[macro_use] 10 | extern crate ark_std; 11 | 12 | pub mod challenge_set; 13 | pub mod rings; 14 | pub mod rotation; 15 | 16 | #[doc(hidden)] 17 | mod ark_base { 18 | pub use ark_std::{ 19 | clone::Clone, 20 | convert::From, 21 | iter::Iterator, 22 | prelude::rust_2021::{derive, Debug}, 23 | result::Result::{self, Err, Ok}, 24 | vec::*, 25 | }; 26 | } 27 | -------------------------------------------------------------------------------- /cyclotomic-rings/src/rings.rs: -------------------------------------------------------------------------------- 1 | //! 2 | //! Cyclotomic ring API for the LatticeFold protocol. 3 | //! 4 | 5 | use ark_crypto_primitives::sponge::{poseidon::PoseidonConfig, Absorb}; 6 | use ark_ff::{Field, PrimeField}; 7 | use ark_std::ops::MulAssign; 8 | use stark_rings::{ 9 | balanced_decomposition::Decompose, 10 | cyclotomic_ring::{CRT, ICRT}, 11 | traits::MulUnchecked, 12 | Cyclotomic, OverField, PolyRing, 13 | }; 14 | 15 | mod babybear; 16 | mod frog; 17 | mod goldilocks; 18 | mod poseidon; 19 | mod stark; 20 | 21 | pub use babybear::*; 22 | pub use frog::*; 23 | pub use goldilocks::*; 24 | pub use stark::*; 25 | 26 | /// An umbrella trait of a ring suitable to be used in the LatticeFold protocol. 27 | /// 28 | /// The ring is assumed to be of the form $$\mathbb{Z}_p\[X\]/(f(X)),$$ for a polynomial 29 | /// $f(X) \in \mathbb{Z}_p\[X\],\: d=\mathrm{deg}\ f(X)$, (typically, this is a cyclotomic polynomial $\Phi_m(X)$) so it has 30 | /// two isomorphic forms: 31 | /// * The coefficient form, i.e. a ring element is represented as the unique polynomial $g$ of the 32 | /// degree $\mathrm{deg}\ g < d$. 33 | /// * The NTT form, i.e. a ring element is represented as its image along the Chinese-remainder isomorphism 34 | /// $$\mathbb{Z}_p\[X\]/(f(X))\cong \prod\limits\_{i=1}^t\mathbb{Z}_p\[X\]/(f\_i(X)),$$ 35 | /// where $f\_1(X),\ldots, f\_t(X)$ are irreducible polynomials in $ \mathbb{Z}_p\[X\]$ such that 36 | /// $$f(X) = f\_1(X)\cdot\ldots\cdot f\_t(X).$$ 37 | /// 38 | /// When $f(X)$ is a cyclotomic polynomial the factors $f\_1(X),\ldots, f\_t(X)$ have equal degrees, thus the fields in the RHS of 39 | /// the Chinese-remainder isomorphism are all isomorphic to the same extension of the field $\mathbb{Z}\_p$, implying the NTT form 40 | /// of the ring is a direct product of $t$ instances of $\mathbb{Z}\_{p^\tau}$ for $\tau=\frac{d}{t}$ with componentwise operations. 41 | /// 42 | /// If `R: SuitableRing` then we assume that the type `R` represents the NTT form of the ring as the arithmetic operations 43 | /// in the NTT form are much faster and we intend to use the NTT form as much as possible only occasionally turning to the 44 | /// coefficient form (usually, when Ajtai security aspects are discussed). The associated type `CoefficientRepresentation` is the corresponding 45 | /// coefficient form representation of the ring. 46 | /// 47 | /// A type `R: SuitableRing` and its `R::CoefficientRepresentation` has to satisfy the following conditions: 48 | /// * `R` has to be an `OverField` to exhibit an algebra over a field `R::BaseRing` structure. 49 | /// * `R::CoefficientRepresentation` has to be an algebra over the prime field `R::BaseRing::BasePrimeField` of the field `R::BaseRing`. 50 | /// * `R::BaseRing::BasePrimeField` has to be absorbable by sponge hashes (`R::BaseRing::BasePrimeField: Absorb`). 51 | /// * `R` and `R::CoefficientRepresentation` should be convertible into each other. 52 | /// * `R::CoefficientRepresentation` is radix-$B$ decomposable and exhibits cyclotomic structure (`R::CoefficientRepresentation: Decompose + Cyclotomic`). 53 | /// 54 | /// In addition to the data above a suitable ring has to provide Poseidon hash parameters for its base prime field (i.e. $\mathbb{Z}\_p$). 55 | pub trait SuitableRing: 56 | OverField 57 | + ICRT 58 | + for<'a> MulAssign<&'a u128> 59 | + MulUnchecked 60 | where 61 | <::BaseRing as Field>::BasePrimeField: Absorb, 62 | { 63 | /// The coefficient form version of the ring. 64 | type CoefficientRepresentation: OverField::BaseRing as Field>::BasePrimeField> 65 | + Decompose 66 | + Cyclotomic 67 | + for<'a> MulAssign<&'a u128> 68 | + CRT; 69 | 70 | /// Poseidon sponge parameters for the base prime field. 71 | type PoseidonParams: GetPoseidonParams<<::BaseRing as Field>::BasePrimeField>; 72 | } 73 | 74 | /// A trait for types with an associated Poseidon sponge configuration. 75 | pub trait GetPoseidonParams { 76 | /// Returns the associated Poseidon sponge configuration. 77 | fn get_poseidon_config() -> PoseidonConfig; 78 | } 79 | -------------------------------------------------------------------------------- /cyclotomic-rings/src/rings/babybear.rs: -------------------------------------------------------------------------------- 1 | use stark_rings::cyclotomic_ring::models::babybear::{Fq, RqNTT, RqPoly}; 2 | 3 | use super::SuitableRing; 4 | use crate::{ 5 | ark_base::*, 6 | challenge_set::{error, LatticefoldChallengeSet}, 7 | }; 8 | 9 | /// BabyBear ring in the NTT form. 10 | /// 11 | /// The base field of the NTT form is a degree-9 12 | /// extension of the BabyBear field. 13 | /// 14 | /// The NTT form has 8 components. 15 | pub type BabyBearRingNTT = RqNTT; 16 | 17 | /// BabyBear ring in the coefficient form. 18 | /// 19 | /// The cyclotomic polynomial is $X^72 - X^36 + 1$ of degree 72. 20 | pub type BabyBearRingPoly = RqPoly; 21 | 22 | impl SuitableRing for BabyBearRingNTT { 23 | type CoefficientRepresentation = RqPoly; 24 | type PoseidonParams = BabyBearPoseidonConfig; 25 | } 26 | 27 | pub struct BabyBearPoseidonConfig; 28 | 29 | #[derive(Clone)] 30 | pub struct BabyBearChallengeSet; 31 | 32 | const MAX_COEFF: i16 = 32; 33 | 34 | /// For Babybear prime the challenge set is the set of all 35 | /// ring elements whose coefficients are in the range [-32, 32[. 36 | impl LatticefoldChallengeSet for BabyBearChallengeSet { 37 | /// To generate an element in [-32, 32[ it is enough to use 6 bits. 38 | /// Thus to generate 24 coefficients in that range 18 bytes is enough. 39 | const BYTES_NEEDED: usize = 18; 40 | 41 | fn short_challenge_from_random_bytes( 42 | bs: &[u8], 43 | ) -> Result { 44 | if bs.len() != Self::BYTES_NEEDED { 45 | return Err(error::ChallengeSetError::TooFewBytes( 46 | bs.len(), 47 | Self::BYTES_NEEDED, 48 | )); 49 | } 50 | 51 | let mut coeffs: Vec = Vec::with_capacity(24); 52 | 53 | for i in 0..6 { 54 | let x0: i16 = (bs[3 * i] & 0b0011_1111) as i16 - MAX_COEFF; 55 | let x1: i16 = (((bs[3 * i] & 0b1100_0000) >> 6) | ((bs[3 * i + 1] & 0b0000_1111) << 2)) 56 | as i16 57 | - MAX_COEFF; 58 | let x2: i16 = (((bs[3 * i + 1] & 0b1111_0000) >> 4) 59 | | ((bs[3 * i + 2] & 0b0000_0011) << 4)) as i16 60 | - MAX_COEFF; 61 | let x3: i16 = ((bs[3 * i + 2] & 0b1111_1100) >> 2) as i16 - MAX_COEFF; 62 | 63 | coeffs.extend_from_slice(&[Fq::from(x0), Fq::from(x1), Fq::from(x2), Fq::from(x3)]); 64 | } 65 | 66 | Ok(BabyBearRingPoly::from(coeffs)) 67 | } 68 | } 69 | 70 | #[cfg(test)] 71 | mod tests { 72 | use ark_ff::BigInt; 73 | use stark_rings::cyclotomic_ring::models::babybear::Fq; 74 | 75 | use super::*; 76 | 77 | #[test] 78 | fn test_small_challenge_from_random_bytes() { 79 | let challenge = BabyBearChallengeSet::short_challenge_from_random_bytes(&[ 80 | 0x7b, 0x4b, 0xe5, 0x8e, 0xe5, 0x11, 0xd2, 0xd0, 0x9c, 0x22, 0xba, 0x2e, 0xeb, 0xa8, 81 | 0xba, 0x35, 0xf2, 0x18, 82 | ]) 83 | .unwrap(); 84 | 85 | let res_coeffs: Vec = vec![ 86 | Fq::new(BigInt([27])), 87 | Fq::new(BigInt([13])), 88 | Fq::new(BigInt([2013265909])), 89 | Fq::new(BigInt([25])), 90 | Fq::new(BigInt([2013265903])), 91 | Fq::new(BigInt([2013265911])), 92 | Fq::new(BigInt([2013265919])), 93 | Fq::new(BigInt([2013265893])), 94 | Fq::new(BigInt([2013265907])), 95 | Fq::new(BigInt([2013265892])), 96 | Fq::new(BigInt([2013265902])), 97 | Fq::new(BigInt([7])), 98 | Fq::new(BigInt([2])), 99 | Fq::new(BigInt([8])), 100 | Fq::new(BigInt([11])), 101 | Fq::new(BigInt([2013265900])), 102 | Fq::new(BigInt([11])), 103 | Fq::new(BigInt([3])), 104 | Fq::new(BigInt([10])), 105 | Fq::new(BigInt([14])), 106 | Fq::new(BigInt([21])), 107 | Fq::new(BigInt([2013265897])), 108 | Fq::new(BigInt([2013265904])), 109 | Fq::new(BigInt([2013265895])), 110 | ]; 111 | let expected = BabyBearRingPoly::from(res_coeffs); 112 | 113 | assert_eq!(expected, challenge) 114 | } 115 | } 116 | -------------------------------------------------------------------------------- /cyclotomic-rings/src/rings/frog.rs: -------------------------------------------------------------------------------- 1 | use stark_rings::cyclotomic_ring::models::frog_ring::{Fq, RqNTT, RqPoly}; 2 | 3 | use super::SuitableRing; 4 | use crate::{ 5 | ark_base::*, 6 | challenge_set::{error, LatticefoldChallengeSet}, 7 | }; 8 | 9 | /// Frog ring in the NTT form. 10 | /// 11 | /// The base field of the NTT form is a degree-4 12 | /// extension of the Frog field ($p=15912092521325583641$). 13 | /// 14 | /// The NTT norm has 4 components. 15 | pub type FrogRingNTT = RqNTT; 16 | 17 | /// Frog ring in the coefficient form. 18 | /// 19 | /// The cyclotomic polynomial is $X^16+1$ of degree 16. 20 | pub type FrogRingPoly = RqPoly; 21 | 22 | impl SuitableRing for FrogRingNTT { 23 | type CoefficientRepresentation = RqPoly; 24 | type PoseidonParams = FrogPoseidonConfig; 25 | } 26 | 27 | pub struct FrogPoseidonConfig; 28 | 29 | #[derive(Clone)] 30 | pub struct FrogChallengeSet; 31 | 32 | /// For Frog prime the challenge set is the set of all 33 | /// ring elements whose coefficients are in the range [-128, 128[. 34 | impl LatticefoldChallengeSet for FrogChallengeSet { 35 | const BYTES_NEEDED: usize = 16; 36 | 37 | fn short_challenge_from_random_bytes( 38 | bs: &[u8], 39 | ) -> Result< 40 | ::CoefficientRepresentation, 41 | crate::challenge_set::error::ChallengeSetError, 42 | > { 43 | if bs.len() != Self::BYTES_NEEDED { 44 | return Err(error::ChallengeSetError::TooFewBytes( 45 | bs.len(), 46 | Self::BYTES_NEEDED, 47 | )); 48 | } 49 | 50 | Ok(FrogRingPoly::from( 51 | bs.iter() 52 | .map(|&x| Fq::from(x as i16 - 128)) 53 | .collect::>(), 54 | )) 55 | } 56 | } 57 | 58 | #[cfg(test)] 59 | mod tests { 60 | use ark_ff::BigInt; 61 | use stark_rings::cyclotomic_ring::models::frog_ring::Fq; 62 | 63 | use super::*; 64 | 65 | #[test] 66 | fn test_small_challenge_from_random_bytes() { 67 | let challenge = FrogChallengeSet::short_challenge_from_random_bytes(&[ 68 | 0x7b, 0x4b, 0xe5, 0x8e, 0xe5, 0x11, 0xd2, 0xd0, 0x9c, 0x22, 0xba, 0x2e, 0xeb, 0xa8, 69 | 0xba, 0x35, 70 | ]) 71 | .unwrap(); 72 | 73 | let res_coeffs: Vec = vec![ 74 | Fq::new(BigInt([15912092521325583636])), 75 | Fq::new(BigInt([15912092521325583588])), 76 | Fq::new(BigInt([101])), 77 | Fq::new(BigInt([14])), 78 | Fq::new(BigInt([101])), 79 | Fq::new(BigInt([15912092521325583530])), 80 | Fq::new(BigInt([82])), 81 | Fq::new(BigInt([80])), 82 | Fq::new(BigInt([28])), 83 | Fq::new(BigInt([15912092521325583547])), 84 | Fq::new(BigInt([58])), 85 | Fq::new(BigInt([15912092521325583559])), 86 | Fq::new(BigInt([107])), 87 | Fq::new(BigInt([40])), 88 | Fq::new(BigInt([58])), 89 | Fq::new(BigInt([15912092521325583566])), 90 | ]; 91 | 92 | let expected = FrogRingPoly::from(res_coeffs); 93 | 94 | assert_eq!(expected, challenge) 95 | } 96 | } 97 | -------------------------------------------------------------------------------- /cyclotomic-rings/src/rings/goldilocks.rs: -------------------------------------------------------------------------------- 1 | use stark_rings::cyclotomic_ring::models::goldilocks::{Fq, RqNTT, RqPoly}; 2 | 3 | use super::SuitableRing; 4 | use crate::{ 5 | ark_base::*, 6 | challenge_set::{error, LatticefoldChallengeSet}, 7 | }; 8 | 9 | /// Goldilocks ring in the NTT form. 10 | /// 11 | /// The base field of the NTT form is a degree-3 12 | /// extension of the Goldilocks field. 13 | /// 14 | /// The NTT form has 8 components. 15 | pub type GoldilocksRingNTT = RqNTT; 16 | 17 | /// BabyBear ring in the coefficient form. 18 | /// 19 | /// The cyclotomic polynomial is $X^24-X^12+1$ of degree 24. 20 | pub type GoldilocksRingPoly = RqPoly; 21 | 22 | impl SuitableRing for GoldilocksRingNTT { 23 | type CoefficientRepresentation = RqPoly; 24 | type PoseidonParams = GoldilocksPoseidonConfig; 25 | } 26 | 27 | pub struct GoldilocksPoseidonConfig; 28 | 29 | #[derive(Clone)] 30 | pub struct GoldilocksChallengeSet; 31 | 32 | const MAX_COEFF: i16 = 32; 33 | 34 | /// For Goldilocks prime the challenge set is the set of all 35 | /// ring elements whose coefficients are in the range [-32, 32[. 36 | impl LatticefoldChallengeSet for GoldilocksChallengeSet { 37 | /// To generate an element in [-32, 32[ it is enough to use 6 bits. 38 | /// Thus to generate 24 coefficients in that range 18 bytes is enough. 39 | const BYTES_NEEDED: usize = 18; 40 | 41 | fn short_challenge_from_random_bytes( 42 | bs: &[u8], 43 | ) -> Result { 44 | if bs.len() != Self::BYTES_NEEDED { 45 | return Err(error::ChallengeSetError::TooFewBytes( 46 | bs.len(), 47 | Self::BYTES_NEEDED, 48 | )); 49 | } 50 | 51 | let mut coeffs: Vec = Vec::with_capacity(24); 52 | 53 | for i in 0..6 { 54 | let x0: i16 = (bs[3 * i] & 0b0011_1111) as i16 - MAX_COEFF; 55 | let x1: i16 = (((bs[3 * i] & 0b1100_0000) >> 6) | ((bs[3 * i + 1] & 0b0000_1111) << 2)) 56 | as i16 57 | - MAX_COEFF; 58 | let x2: i16 = (((bs[3 * i + 1] & 0b1111_0000) >> 4) 59 | | ((bs[3 * i + 2] & 0b0000_0011) << 4)) as i16 60 | - MAX_COEFF; 61 | let x3: i16 = ((bs[3 * i + 2] & 0b1111_1100) >> 2) as i16 - MAX_COEFF; 62 | 63 | coeffs.extend_from_slice(&[Fq::from(x0), Fq::from(x1), Fq::from(x2), Fq::from(x3)]); 64 | } 65 | 66 | Ok(GoldilocksRingPoly::from(coeffs)) 67 | } 68 | } 69 | 70 | #[cfg(test)] 71 | mod tests { 72 | use ark_ff::BigInt; 73 | use stark_rings::cyclotomic_ring::models::goldilocks::Fq; 74 | 75 | use super::*; 76 | 77 | #[test] 78 | fn test_small_challenge_from_random_bytes() { 79 | let challenge = GoldilocksChallengeSet::short_challenge_from_random_bytes(&[ 80 | 0x7b, 0x4b, 0xe5, 0x8e, 0xe5, 0x11, 0xd2, 0xd0, 0x9c, 0x22, 0xba, 0x2e, 0xeb, 0xa8, 81 | 0xba, 0x35, 0xf2, 0x18, 82 | ]) 83 | .unwrap(); 84 | 85 | let res_coeffs: Vec = vec![ 86 | Fq::new(BigInt([27])), 87 | Fq::new(BigInt([13])), 88 | Fq::new(BigInt([18446744069414584309])), 89 | Fq::new(BigInt([25])), 90 | Fq::new(BigInt([18446744069414584303])), 91 | Fq::new(BigInt([18446744069414584311])), 92 | Fq::new(BigInt([18446744069414584319])), 93 | Fq::new(BigInt([18446744069414584293])), 94 | Fq::new(BigInt([18446744069414584307])), 95 | Fq::new(BigInt([18446744069414584292])), 96 | Fq::new(BigInt([18446744069414584302])), 97 | Fq::new(BigInt([7])), 98 | Fq::new(BigInt([2])), 99 | Fq::new(BigInt([8])), 100 | Fq::new(BigInt([11])), 101 | Fq::new(BigInt([18446744069414584300])), 102 | Fq::new(BigInt([11])), 103 | Fq::new(BigInt([3])), 104 | Fq::new(BigInt([10])), 105 | Fq::new(BigInt([14])), 106 | Fq::new(BigInt([21])), 107 | Fq::new(BigInt([18446744069414584297])), 108 | Fq::new(BigInt([18446744069414584304])), 109 | Fq::new(BigInt([18446744069414584295])), 110 | ]; 111 | 112 | let expected = GoldilocksRingPoly::from(res_coeffs); 113 | 114 | assert_eq!(expected, challenge) 115 | } 116 | } 117 | -------------------------------------------------------------------------------- /cyclotomic-rings/src/rings/poseidon.rs: -------------------------------------------------------------------------------- 1 | mod babybear; 2 | mod frog; 3 | mod goldilocks; 4 | mod stark; 5 | -------------------------------------------------------------------------------- /cyclotomic-rings/src/rings/stark.rs: -------------------------------------------------------------------------------- 1 | // PGold = 2^64 − 2^32 + 1 2 | use stark_rings::cyclotomic_ring::models::stark_prime::{Fq, RqNTT, RqPoly}; 3 | 4 | use super::SuitableRing; 5 | use crate::{ 6 | ark_base::*, 7 | challenge_set::{error, LatticefoldChallengeSet}, 8 | }; 9 | 10 | /// Starknet prime ring in the NTT form. 11 | /// 12 | /// The base field of the NTT form is the Starknet prime field. 13 | /// 14 | /// The NTT form has 16 components. 15 | pub type StarkRingNTT = RqNTT; 16 | 17 | /// Starknet prime ring in the coefficient form. 18 | /// 19 | /// The cyclotomic polynomial is $X^16 + 1$ of degree 16. 20 | pub type StarkRingPoly = RqPoly; 21 | 22 | impl SuitableRing for StarkRingNTT { 23 | type CoefficientRepresentation = StarkRingPoly; 24 | 25 | type PoseidonParams = StarkPoseidonConfig; 26 | } 27 | 28 | pub struct StarkPoseidonConfig; 29 | 30 | #[derive(Clone)] 31 | pub struct StarkChallengeSet; 32 | 33 | /// Small challenges are the ring elements with coefficients in range [0; 2^8[. 34 | impl LatticefoldChallengeSet for StarkChallengeSet { 35 | const BYTES_NEEDED: usize = 16; 36 | 37 | fn short_challenge_from_random_bytes( 38 | bs: &[u8], 39 | ) -> Result<::CoefficientRepresentation, error::ChallengeSetError> 40 | { 41 | if bs.len() != Self::BYTES_NEEDED { 42 | return Err(error::ChallengeSetError::TooFewBytes( 43 | bs.len(), 44 | Self::BYTES_NEEDED, 45 | )); 46 | } 47 | 48 | Ok(StarkRingPoly::from( 49 | bs.iter().map(|&x| Fq::from(x)).collect::>(), 50 | )) 51 | } 52 | } 53 | 54 | #[cfg(test)] 55 | mod tests { 56 | use ark_ff::BigInt; 57 | 58 | use super::*; 59 | 60 | #[test] 61 | fn test_small_challenge_from_random_bytes() { 62 | let challenge = StarkChallengeSet::short_challenge_from_random_bytes(&[ 63 | 0x7b, 0x4b, 0xe5, 0x8e, 0xe5, 0x11, 0xd2, 0xd0, 0x9c, 0x22, 0xba, 0x2e, 0xeb, 0xa8, 64 | 0xba, 0x35, 65 | ]) 66 | .unwrap(); 67 | 68 | let res_coeffs: Vec = vec![ 69 | Fq::new(BigInt([123, 0, 0, 0])), 70 | Fq::new(BigInt([75, 0, 0, 0])), 71 | Fq::new(BigInt([229, 0, 0, 0])), 72 | Fq::new(BigInt([142, 0, 0, 0])), 73 | Fq::new(BigInt([229, 0, 0, 0])), 74 | Fq::new(BigInt([17, 0, 0, 0])), 75 | Fq::new(BigInt([210, 0, 0, 0])), 76 | Fq::new(BigInt([208, 0, 0, 0])), 77 | Fq::new(BigInt([156, 0, 0, 0])), 78 | Fq::new(BigInt([34, 0, 0, 0])), 79 | Fq::new(BigInt([186, 0, 0, 0])), 80 | Fq::new(BigInt([46, 0, 0, 0])), 81 | Fq::new(BigInt([235, 0, 0, 0])), 82 | Fq::new(BigInt([168, 0, 0, 0])), 83 | Fq::new(BigInt([186, 0, 0, 0])), 84 | Fq::new(BigInt([53, 0, 0, 0])), 85 | ]; 86 | 87 | let expected = StarkRingPoly::from(res_coeffs); 88 | 89 | assert_eq!(expected, challenge) 90 | } 91 | } 92 | -------------------------------------------------------------------------------- /deny.toml: -------------------------------------------------------------------------------- 1 | # https://embarkstudios.github.io/cargo-deny/index.html 2 | 3 | [graph] 4 | targets = [ 5 | { triple = "x86_64-unknown-linux-gnu" }, 6 | { triple = "aarch64-unknown-linux-gnu" }, 7 | { triple = "x86_64-unknown-linux-musl" }, 8 | { triple = "aarch64-apple-darwin" }, 9 | { triple = "x86_64-apple-darwin" }, 10 | { triple = "x86_64-pc-windows-msvc" }, 11 | { triple = "wasm32-unknown-unknown" }, 12 | { triple = "wasm32-wasi" }, 13 | { triple = "aarch64-linux-android" }, 14 | { triple = "aarch64-apple-ios" }, 15 | ] 16 | all-features = true 17 | no-default-features = false 18 | 19 | [output] 20 | feature-depth = 1 21 | 22 | [advisories] 23 | db-path = "~/.cargo/advisory-db" 24 | db-urls = ["https://github.com/rustsec/advisory-db"] 25 | yanked = "deny" 26 | ignore = [ 27 | "RUSTSEC-2024-0388", # https://rustsec.org/advisories/RUSTSEC-2024-0388 28 | "RUSTSEC-2024-0436" # https://rustsec.org/advisories/RUSTSEC-2024-0436 29 | ] 30 | 31 | [licenses] 32 | allow = [ 33 | "MIT", 34 | "Apache-2.0", 35 | "BSD-3-Clause", 36 | "BSD-2-Clause", 37 | "Unicode-3.0", 38 | "Unicode-DFS-2016", 39 | "Zlib", 40 | ] 41 | confidence-threshold = 0.8 42 | 43 | [licenses.private] 44 | ignore = false 45 | registries = [] 46 | 47 | [bans] 48 | multiple-versions = "warn" 49 | wildcards = "allow" # TODO change to deny when publishing to crates.io (and remove all git and path deps) 50 | highlight = "all" 51 | 52 | [bans.workspace-dependencies] 53 | duplicates = 'deny' 54 | unused = 'deny' 55 | 56 | [sources] 57 | unknown-registry = "deny" 58 | unknown-git = "deny" 59 | # TODO remove this git dependency after the stark-rings status is clarified 60 | allow-git = [ 61 | "https://github.com/NethermindEth/stark-rings.git", 62 | ] 63 | 64 | [sources.allow-org] 65 | github = [] 66 | gitlab = [] 67 | bitbucket = [] 68 | -------------------------------------------------------------------------------- /docs-header.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 16 | 17 | -------------------------------------------------------------------------------- /latticefold/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "latticefold" 3 | version = "0.1.0" 4 | edition.workspace = true 5 | license.workspace = true 6 | 7 | [dependencies] 8 | ark-crypto-primitives = { workspace = true } 9 | ark-ff = { workspace = true } 10 | ark-serialize = { workspace = true } 11 | ark-std = { workspace = true } 12 | cyclotomic-rings = { workspace = true } 13 | hashbrown = "0.15" 14 | stark-rings = { workspace = true } 15 | stark-rings-linalg = { workspace = true } 16 | stark-rings-poly = { workspace = true } 17 | num-traits = { version = "0.2.19", default-features = false } 18 | rand = { workspace = true } 19 | thiserror = { workspace = true } 20 | num-bigint = { workspace = true } 21 | rayon = { version = "1.10.0", optional = true } 22 | 23 | 24 | [build-dependencies] 25 | serde = { version = "1.0.216", features = ["derive"] } 26 | toml = "0.8.19" 27 | quote = "1.0.37" 28 | proc-macro2 = "1.0.92" 29 | 30 | [features] 31 | default = [ "std" ] 32 | std = [ "ark-std/std", "cyclotomic-rings/std", "rand/std" ] 33 | parallel = [ 34 | "dep:rayon", 35 | "std", 36 | "ark-std/parallel", 37 | "stark-rings/parallel", 38 | "stark-rings-linalg/parallel", 39 | "stark-rings-poly/parallel", 40 | ] 41 | getrandom = [ "ark-std/getrandom" ] 42 | 43 | # dev-only 44 | dhat-heap = [] 45 | 46 | [profile.release] 47 | debug = 1 48 | 49 | [profile.bench] 50 | lto = false 51 | codegen-units = 16 52 | opt-level = 3 53 | rpath = true 54 | 55 | [lints.clippy] 56 | single_match = "warn" 57 | single_match_else = "warn" 58 | needless_match = "warn" 59 | needless_late_init = "warn" 60 | redundant_pattern_matching = "warn" 61 | redundant_pattern = "warn" 62 | redundant_guards = "warn" 63 | collapsible_match = "warn" 64 | match_single_binding = "warn" 65 | match_same_arms = "warn" 66 | match_ref_pats = "warn" 67 | match_bool = "warn" 68 | needless_bool = "warn" 69 | implicit_clone = "warn" 70 | 71 | [dev-dependencies] 72 | criterion = "0.5.1" 73 | dhat = "0.3.2" 74 | humansize = "2.1.3" 75 | lazy_static = "1.5.0" 76 | 77 | [[bench]] 78 | name = "ajtai" 79 | harness = false 80 | 81 | [[bench]] 82 | name = "linearization" 83 | harness = false 84 | 85 | 86 | [[bench]] 87 | name = "decomposition" 88 | harness = false 89 | 90 | 91 | [[bench]] 92 | name = "folding" 93 | harness = false 94 | 95 | [[bench]] 96 | name = "e2e" 97 | harness = false 98 | -------------------------------------------------------------------------------- /latticefold/benches/README.md: -------------------------------------------------------------------------------- 1 | # Benchmarking with Environment Variables 2 | 3 | This benchmark suite allows users to selectively enable or disable certain benchmarks using environment variables. By leveraging environment variables, users can tailor the benchmark runs to specific configurations. 4 | 5 | --- 6 | 7 | ## Environment Variables 8 | 9 | The following environment variables can be set to **enable or filter benchmarks**: 10 | 11 | ### Boolean Flags (Enable/Disable Filtering) 12 | - `GOLDILOCKS` 13 | - `STARK` 14 | - `BABYBEAR` 15 | - `FROG` 16 | 17 | - `PROVER` 18 | - `VERIFIER` 19 | - `AJTAI` 20 | 21 | - `LINEARIZATION` 22 | - `DECOMPOSITION` 23 | - `FOLDING` 24 | - `E2E` 25 | 26 | > **Default Behavior**: 27 | If none of the flags for a group are set, all flags are enabled by default. 28 | 29 | ### Optional Variables (Numeric Values) 30 | The following variables control specific parameters for benchmarks. If not set, they will be ignored: 31 | - `DURATION` (default: `50.0` seconds as floating point number) – Duration for benchmarks 32 | - `WARMUP` (default: `1.0` seconds as floating point number) – Warmup time 33 | 34 | --- 35 | 36 | ## How It Works 37 | - If none of `LINEARIZATION`, `DECOMPOSITION`, `FOLDING`, and `E2E` is set, all benchmarks would be run. 38 | - If some of them is set, only set benchmarks would be run. 39 | - Similarly, if none of `PROVER`, `VERIFIER` and `AJTAI` is set, all benchmarks would be run, otherwise, 40 | only set benchmarks would be run. 41 | - `GOLDILOCKS`, `STARK`, `BABYBEAR` and `FROG` are used similarly. 42 | 43 | --- 44 | 45 | ## Examples 46 | 47 | ### **Bash** 48 | 49 | 1. **Run only `LINEARIZATION` benchmarks:** 50 | ```bash 51 | LINEARIZATION=1 cargo bench 52 | ``` 53 | 54 | 2. **Run only `PROVER` `DECOMPOSITION` benchmarks:** 55 | ```bash 56 | PROVER=1 DECOMPOSITION=1 cargo bench 57 | ``` 58 | 59 | 3. **Run benchmarks matching specific parameters for GOLDILOCKS ring:** 60 | ```bash 61 | KAPPA=10 GOLDILOCKS=1 L=8 GOLDILOCKS=1 cargo bench 62 | ``` 63 | 64 | --- 65 | 66 | ### **PowerShell** 67 | 1. **Run only `LINEARIZATION` benchmarks:** 68 | ```powershell 69 | $env:LINEARIZATION=1; cargo bench 70 | ``` 71 | 72 | 2. **Run only `PROVER` `DECOMPOSITION` benchmarks:** 73 | ```powershell 74 | $env:PROVER=1; $env:DECOMPOSITION=1; cargo bench 75 | ``` 76 | 77 | 3. **Run benchmarks matching specific parameters for GOLDILOCKS ring:** 78 | ```powershell 79 | $env:KAPPA=10; $env:GOLDILOCKS=1; $env:L=8; $env:GOLDILOCKS=1; cargo bench 80 | ``` 81 | 82 | --- 83 | 84 | ## Numeric Parameters 85 | 86 | Numeric parameters can be used to filter benchmarks, if they are set they only benchmarks with matching parameter would be run 87 | 88 | - X_LEN 89 | - KAPPA 90 | - W 91 | - WIT_LEN 92 | - B 93 | - L 94 | - B_SMALL 95 | - K 96 | 97 | -------------------------------------------------------------------------------- /latticefold/benches/ajtai.rs: -------------------------------------------------------------------------------- 1 | use ark_std::{time::Duration, UniformRand}; 2 | use criterion::{ 3 | criterion_group, criterion_main, AxisScale, BatchSize::SmallInput, BenchmarkId, Criterion, 4 | PlotConfiguration, 5 | }; 6 | use cyclotomic_rings::rings::{BabyBearRingNTT, FrogRingNTT, GoldilocksRingNTT, StarkRingNTT}; 7 | use env::ENV; 8 | use latticefold::commitment::AjtaiCommitmentScheme; 9 | use stark_rings::cyclotomic_ring::{CRT, ICRT}; 10 | 11 | mod env; 12 | 13 | include!(concat!(env!("OUT_DIR"), "/generated_ajtai_benchmarks.rs")); 14 | 15 | fn ajtai_benchmarks(c: &mut Criterion) { 16 | bench_ajtai_goldilocks(c); 17 | bench_ajtai_starkprime(c); 18 | bench_ajtai_babybear(c); 19 | bench_ajtai_frog(c); 20 | } 21 | 22 | pub fn benchmarks_main(c: &mut Criterion) { 23 | ajtai_benchmarks(c); 24 | } 25 | 26 | criterion_group!( 27 | name=benches; 28 | config = Criterion::default().sample_size(10).measurement_time(Duration::from_secs_f32(ENV.duration)).warm_up_time(Duration::from_secs_f32(ENV.warmup)); 29 | targets = benchmarks_main 30 | ); 31 | criterion_main!(benches); 32 | -------------------------------------------------------------------------------- /latticefold/benches/decomposition.rs: -------------------------------------------------------------------------------- 1 | use ark_std::time::Duration; 2 | use criterion::{criterion_group, criterion_main, AxisScale, Criterion, PlotConfiguration}; 3 | use env::ENV; 4 | use latticefold::decomposition_parameters::DecompositionParams; 5 | 6 | mod env; 7 | mod utils; 8 | 9 | include!(concat!( 10 | env!("OUT_DIR"), 11 | "/generated_decomposition_benchmarks.rs" 12 | )); 13 | 14 | pub fn benchmarks_main(c: &mut Criterion) { 15 | bench_goldilocks_decomposition(c); 16 | bench_goldilocks_non_scalar_decomposition(c); 17 | bench_goldilocks_degree_three_non_scalar_decomposition(c); 18 | 19 | bench_stark_prime_decomposition(c); 20 | bench_stark_prime_non_scalar_decomposition(c); 21 | bench_stark_prime_degree_three_non_scalar_decomposition(c); 22 | 23 | bench_frog_decomposition(c); 24 | bench_frog_non_scalar_decomposition(c); 25 | bench_frog_degree_three_non_scalar_decomposition(c); 26 | 27 | bench_single_babybear_decomposition(c); 28 | bench_single_babybear_non_scalar_decomposition(c); 29 | bench_single_babybear_degree_three_non_scalar_decomposition(c); 30 | } 31 | 32 | criterion_group!( 33 | name=benches; 34 | config = Criterion::default().sample_size(10).measurement_time(Duration::from_secs_f32(ENV.duration)).warm_up_time(Duration::from_secs_f32(ENV.warmup)); 35 | targets = benchmarks_main); 36 | criterion_main!(benches); 37 | -------------------------------------------------------------------------------- /latticefold/benches/e2e.rs: -------------------------------------------------------------------------------- 1 | use std::time::Duration; 2 | 3 | use criterion::{criterion_group, criterion_main, AxisScale, Criterion, PlotConfiguration}; 4 | use env::ENV; 5 | use latticefold::decomposition_parameters::DecompositionParams; 6 | 7 | mod env; 8 | 9 | mod utils; 10 | 11 | include!(concat!(env!("OUT_DIR"), "/generated_e2e_benchmarks.rs")); 12 | 13 | pub fn benchmarks_main(c: &mut Criterion) { 14 | bench_goldilocks_e2e(c); 15 | bench_goldilocks_non_scalar_e2e(c); 16 | bench_goldilocks_degree_three_non_scalar_e2e(c); 17 | 18 | bench_stark_prime_e2e(c); 19 | bench_stark_prime_non_scalar_e2e(c); 20 | bench_stark_prime_degree_three_non_scalar_e2e(c); 21 | 22 | bench_frog_e2e(c); 23 | bench_frog_non_scalar_e2e(c); 24 | bench_frog_degree_three_non_scalar_e2e(c); 25 | 26 | bench_single_babybear_e2e(c); 27 | bench_single_babybear_non_scalar_e2e(c); 28 | bench_single_babybear_degree_three_non_scalar_e2e(c); 29 | } 30 | 31 | criterion_group!( 32 | name=benches; 33 | config = Criterion::default().sample_size(10).measurement_time(Duration::from_secs_f32(ENV.duration)).warm_up_time(Duration::from_secs_f32(ENV.warmup)); 34 | targets = benchmarks_main); 35 | criterion_main!(benches); 36 | -------------------------------------------------------------------------------- /latticefold/benches/env.rs: -------------------------------------------------------------------------------- 1 | use std::{env, str::FromStr}; 2 | 3 | use lazy_static::lazy_static; 4 | 5 | fn get_env_var(key: &str) -> Option { 6 | if let Ok(var) = env::var(key) { 7 | var.parse::().ok() 8 | } else { 9 | None 10 | } 11 | } 12 | 13 | #[allow(dead_code)] 14 | #[allow(non_snake_case)] 15 | pub struct Env { 16 | pub duration: f32, 17 | pub warmup: f32, 18 | pub GoldilocksRingNTT: bool, 19 | pub StarkRingNTT: bool, 20 | pub BabyBearRingNTT: bool, 21 | pub FrogRingNTT: bool, 22 | pub prover: bool, 23 | pub verifier: bool, 24 | pub ajtai: bool, 25 | pub linearization: bool, 26 | pub decomposition: bool, 27 | pub folding: bool, 28 | pub e2e: bool, 29 | pub x_len: Option, 30 | pub kappa: Option, 31 | pub w: Option, 32 | pub wit_len: Option, 33 | pub b: Option, 34 | pub l: Option, 35 | pub b_small: Option, 36 | pub k: Option, 37 | } 38 | 39 | lazy_static! { 40 | pub static ref ENV: Env = { 41 | let goldilocks = get_env_var::("GOLDILOCKS").is_some(); 42 | let stark = get_env_var::("STARK").is_some(); 43 | let babybear = get_env_var::("BABYBEAR").is_some(); 44 | let frog = get_env_var::("FROG").is_some(); 45 | 46 | let (goldilocks, stark, babybear, frog) = ( 47 | goldilocks || !stark && !babybear && !frog, 48 | stark || !goldilocks && !babybear && !frog, 49 | babybear || !goldilocks && !stark && !frog, 50 | frog || !goldilocks && !stark && !babybear, 51 | ); 52 | 53 | let prover = get_env_var::("PROVER").is_some(); 54 | let verifier = get_env_var::("VERIFIER").is_some(); 55 | let ajtai = get_env_var::("AJTAI").is_some(); 56 | let (prover, verifier, ajtai) = ( 57 | prover || !verifier && !ajtai, 58 | verifier || !prover && !ajtai, 59 | ajtai || !prover && !verifier, 60 | ); 61 | 62 | let linearization = get_env_var::("LINEARIZATION").is_some(); 63 | let decomposition = get_env_var::("DECOMPOSITION").is_some(); 64 | let folding = get_env_var::("FOLDING").is_some(); 65 | let e2e = get_env_var::("E2E").is_some(); 66 | 67 | let (linearization, decomposition, folding, e2e) = ( 68 | linearization || !decomposition && !folding && !e2e, 69 | decomposition || !linearization && !folding && !e2e, 70 | folding || !linearization && !decomposition && !e2e, 71 | e2e || !linearization && !decomposition && !folding, 72 | ); 73 | 74 | Env { 75 | duration: get_env_var("DURATION").unwrap_or(50.0), 76 | warmup: get_env_var("WARMUP").unwrap_or(1.0), 77 | GoldilocksRingNTT: goldilocks, 78 | StarkRingNTT: stark, 79 | BabyBearRingNTT: babybear, 80 | FrogRingNTT: frog, 81 | prover, 82 | verifier, 83 | ajtai, 84 | linearization, 85 | decomposition, 86 | folding, 87 | e2e, 88 | x_len: get_env_var("X_LEN"), 89 | kappa: get_env_var("KAPPA"), 90 | w: get_env_var("W"), 91 | wit_len: get_env_var("WIT_LEN"), 92 | b: get_env_var("B"), 93 | l: get_env_var("L"), 94 | b_small: get_env_var("B_SMALL"), 95 | k: get_env_var("K"), 96 | } 97 | }; 98 | } 99 | -------------------------------------------------------------------------------- /latticefold/benches/folding.rs: -------------------------------------------------------------------------------- 1 | use std::time::Duration; 2 | 3 | use criterion::{criterion_group, criterion_main, AxisScale, Criterion, PlotConfiguration}; 4 | use env::ENV; 5 | use latticefold::decomposition_parameters::DecompositionParams; 6 | 7 | mod env; 8 | mod utils; 9 | 10 | include!(concat!(env!("OUT_DIR"), "/generated_folding_benchmarks.rs")); 11 | 12 | pub fn benchmarks_main(c: &mut Criterion) { 13 | bench_goldilocks_folding(c); 14 | bench_goldilocks_non_scalar_folding(c); 15 | bench_goldilocks_degree_three_non_scalar_folding(c); 16 | 17 | bench_stark_prime_folding(c); 18 | bench_stark_prime_non_scalar_folding(c); 19 | bench_stark_prime_degree_three_non_scalar_folding(c); 20 | 21 | bench_frog_folding(c); 22 | bench_frog_non_scalar_folding(c); 23 | bench_frog_degree_three_non_scalar_folding(c); 24 | 25 | bench_single_babybear_folding(c); 26 | bench_single_babybear_non_scalar_folding(c); 27 | bench_single_babybear_degree_three_non_scalar_folding(c); 28 | } 29 | 30 | criterion_group!( 31 | name=benches; 32 | config = Criterion::default().sample_size(10).measurement_time(Duration::from_secs_f32(ENV.duration)).warm_up_time(Duration::from_secs_f32(ENV.warmup)); 33 | targets = benchmarks_main); 34 | criterion_main!(benches); 35 | -------------------------------------------------------------------------------- /latticefold/benches/linearization.rs: -------------------------------------------------------------------------------- 1 | use ark_std::time::Duration; 2 | use criterion::{criterion_group, criterion_main, AxisScale, Criterion, PlotConfiguration}; 3 | use env::ENV; 4 | use latticefold::decomposition_parameters::DecompositionParams; 5 | 6 | mod env; 7 | mod utils; 8 | 9 | include!(concat!( 10 | env!("OUT_DIR"), 11 | "/generated_linearization_benchmarks.rs" 12 | )); 13 | 14 | pub fn benchmarks_main(c: &mut Criterion) { 15 | bench_goldilocks_linearization(c); 16 | bench_goldilocks_non_scalar_linearization(c); 17 | bench_goldilocks_degree_three_non_scalar_linearization(c); 18 | 19 | bench_stark_prime_linearization(c); 20 | bench_stark_prime_non_scalar_linearization(c); 21 | bench_stark_prime_degree_three_non_scalar_linearization(c); 22 | 23 | bench_frog_linearization(c); 24 | bench_frog_non_scalar_linearization(c); 25 | bench_frog_degree_three_non_scalar_linearization(c); 26 | 27 | bench_single_babybear_linearization(c); 28 | bench_single_babybear_non_scalar_linearization(c); 29 | bench_single_babybear_degree_three_non_scalar_linearization(c); 30 | } 31 | 32 | criterion_group!( 33 | name=benches; 34 | config = Criterion::default().sample_size(10).measurement_time(Duration::from_secs_f32(ENV.duration)).warm_up_time(Duration::from_secs_f32(ENV.warmup)); 35 | targets = benchmarks_main); 36 | criterion_main!(benches); 37 | -------------------------------------------------------------------------------- /latticefold/examples/README.md: -------------------------------------------------------------------------------- 1 | # Examples README 2 | 3 | This file explains how to use the examples in this repository. Examples demonstrate functionality and can be customized using environment variables. Instructions are provided for Linux/MacOS (bash/zsh) and Windows (PowerShell). 4 | 5 | ## Implemented examples 6 | 7 | - goldilocks 8 | - babybear 9 | - frog 10 | - starkprime 11 | 12 | ## Customization with Environment Variables 13 | 14 | The examples in this repository support customization via environment variables. Most examples use the following parameters, except for `starkprime`, which has its own set of parameters detailed below, to tailor their behavior: 15 | 16 | - **`PARAM_B`**: Sets the value of `B` in `DecompositionParams`. 17 | - Default: `32768` (`1 << 15`) 18 | - **`PARAM_L`**: Sets the value of `L` in `DecompositionParams`. 19 | - Default: `5` 20 | - **`PARAM_B_SMALL`**: Sets the value of `B_SMALL` in `DecompositionParams`. 21 | - Default: `2` 22 | - **`PARAM_K`**: Sets the value of `K` in `DecompositionParams`. 23 | - Default: `15` 24 | - **`PARAM_C`**: Sets the value of `C`, controlling challenge set parameters. 25 | - Default: `4` 26 | - **`PARAM_WIT_LEN`**: Sets the witness length. 27 | - Default: `4` 28 | 29 | ### starkprime-specific Parameters 30 | 31 | - **`PARAM_B_STARK`**: Sets the value of `B` in `DecompositionParams`. 32 | - Default: `1073741824u128` 33 | - **`PARAM_L_STARK`**: Sets the value of `L` in `DecompositionParams`. 34 | - Default: `9` 35 | - **`PARAM_B_SMALL_STARK`**: Sets the value of `B_SMALL` in `DecompositionParams`. 36 | - Default: `2` 37 | - **`PARAM_K_STARK`**: Sets the value of `K` in `DecompositionParams`. 38 | - Default: `30` 39 | - **`PARAM_C_STARK`**: Sets the value of `C`, controlling challenge set parameters. 40 | - Default: `4` 41 | - **`PARAM_WIT_LEN_STARK`**: Sets the witness length. 42 | - Default: `4` 43 | 44 | These parameters influence the behavior and output of the examples. 45 | 46 | ## Setting Environment Variables 47 | 48 | ### Linux/MacOS (bash/zsh) 49 | 50 | 1. Open a terminal. 51 | 52 | 2. Export the desired environment variables before running the examples. For example: 53 | 54 | ```bash 55 | export PARAM_B=65536 56 | export PARAM_L=6 57 | export PARAM_B_SMALL=3 58 | export PARAM_K=16 59 | export PARAM_C=5 60 | export PARAM_WIT_LEN=5 61 | 62 | cargo run --example 63 | ``` 64 | 65 | 3. Replace `` with the name of the example you want to run. 66 | 67 | ### Windows (PowerShell) 68 | 69 | 1. Open PowerShell. 70 | 71 | 2. Set the desired environment variables before running the examples. For example: 72 | 73 | ```powershell 74 | $env:PARAM_B=65536 75 | $env:PARAM_L=6 76 | $env:PARAM_B_SMALL=3 77 | $env:PARAM_K=16 78 | $env:PARAM_C=5 79 | $env:PARAM_WIT_LEN=5 80 | 81 | cargo run --example 82 | ``` 83 | 84 | 3. Replace `` with the name of the example you want to run. 85 | 86 | ## Example Output 87 | 88 | When you modify environment variables, the generated parameters are automatically updated in the example's output. This allows for testing different configurations and validating results under various conditions. 89 | 90 | ## Default Values 91 | 92 | If no environment variables are specified, the examples will run with the following defaults: 93 | 94 | - `PARAM_B`: `32768` 95 | - `PARAM_L`: `5` 96 | - `PARAM_B_SMALL`: `2` 97 | - `PARAM_K`: `15` 98 | - `PARAM_C`: `4` 99 | - `PARAM_WIT_LEN`: `4` 100 | 101 | ## Notes 102 | 103 | - Ensure you rebuild the examples after modifying environment variables to see the changes. 104 | 105 | ```bash 106 | cargo clean && cargo run --example 107 | ``` 108 | 109 | - For detailed instructions on each example, refer to the example's source code or inline comments. 110 | 111 | -------------------------------------------------------------------------------- /latticefold/examples/babybear.rs: -------------------------------------------------------------------------------- 1 | #![allow(dead_code)] 2 | 3 | use std::{fmt::Debug, time::Instant}; 4 | 5 | use ark_serialize::{CanonicalSerialize, Compress}; 6 | use ark_std::{vec::Vec, UniformRand}; 7 | use cyclotomic_rings::{ 8 | challenge_set::LatticefoldChallengeSet, 9 | rings::{BabyBearChallengeSet, BabyBearRingNTT, SuitableRing}, 10 | }; 11 | use latticefold::{ 12 | arith::{ 13 | ccs::get_test_dummy_degree_three_ccs_non_scalar, r1cs::get_test_dummy_z_split_ntt, Arith, 14 | Witness, CCCS, CCS, LCCCS, 15 | }, 16 | commitment::AjtaiCommitmentScheme, 17 | nifs::{ 18 | linearization::{LFLinearizationProver, LinearizationProver}, 19 | NIFSProver, NIFSVerifier, 20 | }, 21 | transcript::poseidon::PoseidonTranscript, 22 | }; 23 | 24 | include!(concat!(env!("OUT_DIR"), "/examples_generated.rs")); 25 | 26 | #[allow(dead_code)] 27 | pub fn wit_and_ccs_gen_degree_three_non_scalar< 28 | const X_LEN: usize, 29 | const C: usize, // rows 30 | const WIT_LEN: usize, 31 | const W: usize, // columns 32 | P: DecompositionParams, 33 | R: Clone + UniformRand + Debug + SuitableRing, 34 | >( 35 | r1cs_rows: usize, 36 | ) -> ( 37 | CCCS, 38 | Witness, 39 | CCS, 40 | AjtaiCommitmentScheme, 41 | ) { 42 | let mut rng = ark_std::test_rng(); 43 | 44 | let new_r1cs_rows = if P::L == 1 && (WIT_LEN > 0 && (WIT_LEN & (WIT_LEN - 1)) == 0) { 45 | r1cs_rows - 2 46 | } else { 47 | r1cs_rows // This makes a square matrix but is too much memory 48 | }; 49 | let (one, x_ccs, w_ccs) = get_test_dummy_z_split_ntt::(); 50 | 51 | let mut z = vec![one]; 52 | z.extend(&x_ccs); 53 | z.extend(&w_ccs); 54 | let ccs: CCS = 55 | get_test_dummy_degree_three_ccs_non_scalar::(&z, P::L, new_r1cs_rows); 56 | ccs.check_relation(&z).expect("R1CS invalid!"); 57 | 58 | let scheme: AjtaiCommitmentScheme = AjtaiCommitmentScheme::rand(&mut rng); 59 | let wit: Witness = Witness::from_w_ccs::

(w_ccs); 60 | 61 | let cm_i: CCCS = CCCS { 62 | cm: wit.commit::(&scheme).unwrap(), 63 | x_ccs, 64 | }; 65 | 66 | (cm_i, wit, ccs, scheme) 67 | } 68 | 69 | #[allow(clippy::type_complexity)] 70 | fn setup_example_environment< 71 | const X_LEN: usize, 72 | const C: usize, 73 | RqNTT: SuitableRing, 74 | DP: DecompositionParams, 75 | const W: usize, 76 | const WIT_LEN: usize, 77 | CS: LatticefoldChallengeSet, 78 | >() -> ( 79 | LCCCS, 80 | Witness, 81 | CCCS, 82 | Witness, 83 | CCS, 84 | AjtaiCommitmentScheme, 85 | ) { 86 | let r1cs_rows = X_LEN + WIT_LEN + 1; 87 | 88 | let (cm_i, wit, ccs, scheme) = 89 | wit_and_ccs_gen_degree_three_non_scalar::(r1cs_rows); 90 | 91 | let rand_w_ccs: Vec = (0..WIT_LEN).map(|i| RqNTT::from(i as u64)).collect(); 92 | let wit_acc = Witness::from_w_ccs::(rand_w_ccs); 93 | 94 | let mut transcript = PoseidonTranscript::::default(); 95 | 96 | let (acc, _) = LFLinearizationProver::<_, PoseidonTranscript>::prove( 97 | &cm_i, 98 | &wit_acc, 99 | &mut transcript, 100 | &ccs, 101 | ) 102 | .expect("Failed to generate linearization proof"); 103 | 104 | (acc, wit_acc, cm_i, wit, ccs, scheme) 105 | } 106 | 107 | type RqNTT = BabyBearRingNTT; 108 | type CS = BabyBearChallengeSet; 109 | type T = PoseidonTranscript; 110 | 111 | fn main() { 112 | println!("Setting up example environment..."); 113 | 114 | println!("Decomposition parameters:"); 115 | println!("\tB: {}", BabyBearExampleDP::B); 116 | println!("\tL: {}", BabyBearExampleDP::L); 117 | println!("\tB_SMALL: {}", BabyBearExampleDP::B_SMALL); 118 | println!("\tK: {}", BabyBearExampleDP::K); 119 | 120 | let (acc, wit_acc, cm_i, wit_i, ccs, scheme) = 121 | setup_example_environment::(); 122 | 123 | let mut prover_transcript = PoseidonTranscript::::default(); 124 | let mut verifier_transcript = PoseidonTranscript::::default(); 125 | println!("Generating proof..."); 126 | let start = Instant::now(); 127 | 128 | let (_, _, proof) = NIFSProver::::prove( 129 | &acc, 130 | &wit_acc, 131 | &cm_i, 132 | &wit_i, 133 | &mut prover_transcript, 134 | &ccs, 135 | &scheme, 136 | ) 137 | .unwrap(); 138 | let duration = start.elapsed(); 139 | println!("Proof generated in {:?}", duration); 140 | 141 | let mut serialized_proof = Vec::new(); 142 | 143 | println!("Serializing proof (with compression)..."); 144 | proof 145 | .serialize_with_mode(&mut serialized_proof, Compress::Yes) 146 | .unwrap(); 147 | let compressed_size = serialized_proof.len(); 148 | println!( 149 | "Proof size (with compression) size: {}", 150 | humansize::format_size(compressed_size, humansize::BINARY) 151 | ); 152 | 153 | println!("Serializing proof (without compression)..."); 154 | proof 155 | .serialize_with_mode(&mut serialized_proof, Compress::No) 156 | .unwrap(); 157 | let uncompressed_size = serialized_proof.len(); 158 | println!( 159 | "Proof (without compression) size: {}", 160 | humansize::format_size(uncompressed_size, humansize::BINARY) 161 | ); 162 | 163 | println!("Verifying proof"); 164 | let start = Instant::now(); 165 | NIFSVerifier::::verify( 166 | &acc, 167 | &cm_i, 168 | &proof, 169 | &mut verifier_transcript, 170 | &ccs, 171 | ) 172 | .unwrap(); 173 | let duration = start.elapsed(); 174 | println!("Proof verified in {:?}", duration); 175 | } 176 | -------------------------------------------------------------------------------- /latticefold/examples/frog.rs: -------------------------------------------------------------------------------- 1 | #![allow(dead_code)] 2 | 3 | use std::{fmt::Debug, time::Instant}; 4 | 5 | use ark_serialize::{CanonicalSerialize, Compress}; 6 | use ark_std::{vec::Vec, UniformRand}; 7 | use cyclotomic_rings::{ 8 | challenge_set::LatticefoldChallengeSet, 9 | rings::{FrogChallengeSet, FrogRingNTT, SuitableRing}, 10 | }; 11 | use latticefold::{ 12 | arith::{ 13 | ccs::get_test_dummy_degree_three_ccs_non_scalar, r1cs::get_test_dummy_z_split_ntt, Arith, 14 | Witness, CCCS, CCS, LCCCS, 15 | }, 16 | commitment::AjtaiCommitmentScheme, 17 | nifs::{ 18 | linearization::{LFLinearizationProver, LinearizationProver}, 19 | NIFSProver, NIFSVerifier, 20 | }, 21 | transcript::poseidon::PoseidonTranscript, 22 | }; 23 | 24 | include!(concat!(env!("OUT_DIR"), "/examples_generated.rs")); 25 | 26 | #[allow(dead_code)] 27 | pub fn wit_and_ccs_gen_degree_three_non_scalar< 28 | const X_LEN: usize, 29 | const C: usize, // rows 30 | const WIT_LEN: usize, 31 | const W: usize, // columns 32 | P: DecompositionParams, 33 | R: Clone + UniformRand + Debug + SuitableRing, 34 | >( 35 | r1cs_rows: usize, 36 | ) -> ( 37 | CCCS, 38 | Witness, 39 | CCS, 40 | AjtaiCommitmentScheme, 41 | ) { 42 | let mut rng = ark_std::test_rng(); 43 | 44 | let new_r1cs_rows = if P::L == 1 && (WIT_LEN > 0 && (WIT_LEN & (WIT_LEN - 1)) == 0) { 45 | r1cs_rows - 2 46 | } else { 47 | r1cs_rows // This makes a square matrix but is too much memory 48 | }; 49 | let (one, x_ccs, w_ccs) = get_test_dummy_z_split_ntt::(); 50 | 51 | let mut z = vec![one]; 52 | z.extend(&x_ccs); 53 | z.extend(&w_ccs); 54 | let ccs: CCS = 55 | get_test_dummy_degree_three_ccs_non_scalar::(&z, P::L, new_r1cs_rows); 56 | ccs.check_relation(&z).expect("R1CS invalid!"); 57 | 58 | let scheme: AjtaiCommitmentScheme = AjtaiCommitmentScheme::rand(&mut rng); 59 | let wit: Witness = Witness::from_w_ccs::

(w_ccs); 60 | 61 | let cm_i: CCCS = CCCS { 62 | cm: wit.commit::(&scheme).unwrap(), 63 | x_ccs, 64 | }; 65 | 66 | (cm_i, wit, ccs, scheme) 67 | } 68 | 69 | #[allow(clippy::type_complexity)] 70 | fn setup_example_environment< 71 | const X_LEN: usize, 72 | const C: usize, 73 | RqNTT: SuitableRing, 74 | DP: DecompositionParams, 75 | const W: usize, 76 | const WIT_LEN: usize, 77 | CS: LatticefoldChallengeSet, 78 | >() -> ( 79 | LCCCS, 80 | Witness, 81 | CCCS, 82 | Witness, 83 | CCS, 84 | AjtaiCommitmentScheme, 85 | ) { 86 | let r1cs_rows = X_LEN + WIT_LEN + 1; 87 | 88 | let (cm_i, wit, ccs, scheme) = 89 | wit_and_ccs_gen_degree_three_non_scalar::(r1cs_rows); 90 | 91 | let rand_w_ccs: Vec = (0..WIT_LEN).map(|i| RqNTT::from(i as u64)).collect(); 92 | let wit_acc = Witness::from_w_ccs::(rand_w_ccs); 93 | 94 | let mut transcript = PoseidonTranscript::::default(); 95 | 96 | let (acc, _) = LFLinearizationProver::<_, PoseidonTranscript>::prove( 97 | &cm_i, 98 | &wit_acc, 99 | &mut transcript, 100 | &ccs, 101 | ) 102 | .expect("Failed to generate linearization proof"); 103 | 104 | (acc, wit_acc, cm_i, wit, ccs, scheme) 105 | } 106 | 107 | type RqNTT = FrogRingNTT; 108 | type CS = FrogChallengeSet; 109 | type T = PoseidonTranscript; 110 | 111 | fn main() { 112 | println!("Setting up example environment..."); 113 | 114 | println!("Decomposition parameters:"); 115 | println!("\tB: {}", FrogExampleDP::B); 116 | println!("\tL: {}", FrogExampleDP::L); 117 | println!("\tB_SMALL: {}", FrogExampleDP::B_SMALL); 118 | println!("\tK: {}", FrogExampleDP::K); 119 | 120 | let (acc, wit_acc, cm_i, wit_i, ccs, scheme) = 121 | setup_example_environment::(); 122 | 123 | let mut prover_transcript = PoseidonTranscript::::default(); 124 | let mut verifier_transcript = PoseidonTranscript::::default(); 125 | println!("Generating proof..."); 126 | let start = Instant::now(); 127 | 128 | let (_, _, proof) = NIFSProver::::prove( 129 | &acc, 130 | &wit_acc, 131 | &cm_i, 132 | &wit_i, 133 | &mut prover_transcript, 134 | &ccs, 135 | &scheme, 136 | ) 137 | .unwrap(); 138 | let duration = start.elapsed(); 139 | println!("Proof generated in {:?}", duration); 140 | 141 | let mut serialized_proof = Vec::new(); 142 | 143 | println!("Serializing proof (with compression)..."); 144 | proof 145 | .serialize_with_mode(&mut serialized_proof, Compress::Yes) 146 | .unwrap(); 147 | let compressed_size = serialized_proof.len(); 148 | println!( 149 | "Proof size (with compression) size: {}", 150 | humansize::format_size(compressed_size, humansize::BINARY) 151 | ); 152 | 153 | println!("Serializing proof (without compression)..."); 154 | proof 155 | .serialize_with_mode(&mut serialized_proof, Compress::No) 156 | .unwrap(); 157 | let uncompressed_size = serialized_proof.len(); 158 | println!( 159 | "Proof (without compression) size: {}", 160 | humansize::format_size(uncompressed_size, humansize::BINARY) 161 | ); 162 | 163 | println!("Verifying proof"); 164 | let start = Instant::now(); 165 | NIFSVerifier::::verify( 166 | &acc, 167 | &cm_i, 168 | &proof, 169 | &mut verifier_transcript, 170 | &ccs, 171 | ) 172 | .unwrap(); 173 | let duration = start.elapsed(); 174 | println!("Proof verified in {:?}", duration); 175 | } 176 | -------------------------------------------------------------------------------- /latticefold/examples/goldilocks.rs: -------------------------------------------------------------------------------- 1 | #![allow(dead_code)] 2 | 3 | use std::{fmt::Debug, time::Instant}; 4 | 5 | use ark_serialize::{CanonicalSerialize, Compress}; 6 | use ark_std::{vec::Vec, UniformRand}; 7 | use cyclotomic_rings::{ 8 | challenge_set::LatticefoldChallengeSet, 9 | rings::{GoldilocksChallengeSet, GoldilocksRingNTT, SuitableRing}, 10 | }; 11 | use latticefold::{ 12 | arith::{ 13 | ccs::get_test_dummy_degree_three_ccs_non_scalar, r1cs::get_test_dummy_z_split_ntt, Arith, 14 | Witness, CCCS, CCS, LCCCS, 15 | }, 16 | commitment::AjtaiCommitmentScheme, 17 | nifs::{ 18 | linearization::{LFLinearizationProver, LinearizationProver}, 19 | NIFSProver, NIFSVerifier, 20 | }, 21 | transcript::poseidon::PoseidonTranscript, 22 | }; 23 | 24 | include!(concat!(env!("OUT_DIR"), "/examples_generated.rs")); 25 | 26 | #[allow(dead_code)] 27 | pub fn wit_and_ccs_gen_degree_three_non_scalar< 28 | const X_LEN: usize, 29 | const C: usize, // rows 30 | const WIT_LEN: usize, 31 | const W: usize, // columns 32 | P: DecompositionParams, 33 | R: Clone + UniformRand + Debug + SuitableRing, 34 | >( 35 | r1cs_rows: usize, 36 | ) -> ( 37 | CCCS, 38 | Witness, 39 | CCS, 40 | AjtaiCommitmentScheme, 41 | ) { 42 | let mut rng = ark_std::test_rng(); 43 | 44 | let new_r1cs_rows = if P::L == 1 && (WIT_LEN > 0 && (WIT_LEN & (WIT_LEN - 1)) == 0) { 45 | r1cs_rows - 2 46 | } else { 47 | r1cs_rows // This makes a square matrix but is too much memory 48 | }; 49 | let (one, x_ccs, w_ccs) = get_test_dummy_z_split_ntt::(); 50 | 51 | let mut z = vec![one]; 52 | z.extend(&x_ccs); 53 | z.extend(&w_ccs); 54 | let ccs: CCS = 55 | get_test_dummy_degree_three_ccs_non_scalar::(&z, P::L, new_r1cs_rows); 56 | ccs.check_relation(&z).expect("R1CS invalid!"); 57 | 58 | let scheme: AjtaiCommitmentScheme = AjtaiCommitmentScheme::rand(&mut rng); 59 | let wit: Witness = Witness::from_w_ccs::

(w_ccs); 60 | 61 | let cm_i: CCCS = CCCS { 62 | cm: wit.commit::(&scheme).unwrap(), 63 | x_ccs, 64 | }; 65 | 66 | (cm_i, wit, ccs, scheme) 67 | } 68 | 69 | #[allow(clippy::type_complexity)] 70 | fn setup_example_environment< 71 | const X_LEN: usize, 72 | const C: usize, 73 | RqNTT: SuitableRing, 74 | DP: DecompositionParams, 75 | const W: usize, 76 | const WIT_LEN: usize, 77 | CS: LatticefoldChallengeSet, 78 | >() -> ( 79 | LCCCS, 80 | Witness, 81 | CCCS, 82 | Witness, 83 | CCS, 84 | AjtaiCommitmentScheme, 85 | ) { 86 | let r1cs_rows = X_LEN + WIT_LEN + 1; 87 | 88 | let (cm_i, wit, ccs, scheme) = 89 | wit_and_ccs_gen_degree_three_non_scalar::(r1cs_rows); 90 | 91 | let rand_w_ccs: Vec = (0..WIT_LEN).map(|i| RqNTT::from(i as u64)).collect(); 92 | let wit_acc = Witness::from_w_ccs::(rand_w_ccs); 93 | 94 | let mut transcript = PoseidonTranscript::::default(); 95 | 96 | let (acc, _) = LFLinearizationProver::<_, PoseidonTranscript>::prove( 97 | &cm_i, 98 | &wit_acc, 99 | &mut transcript, 100 | &ccs, 101 | ) 102 | .expect("Failed to generate linearization proof"); 103 | 104 | (acc, wit_acc, cm_i, wit, ccs, scheme) 105 | } 106 | 107 | type RqNTT = GoldilocksRingNTT; 108 | type CS = GoldilocksChallengeSet; 109 | type T = PoseidonTranscript; 110 | 111 | fn main() { 112 | println!("Setting up example environment..."); 113 | 114 | println!("Decomposition parameters:"); 115 | println!("\tB: {}", GoldilocksExampleDP::B); 116 | println!("\tL: {}", GoldilocksExampleDP::L); 117 | println!("\tB_SMALL: {}", GoldilocksExampleDP::B_SMALL); 118 | println!("\tK: {}", GoldilocksExampleDP::K); 119 | 120 | let (acc, wit_acc, cm_i, wit_i, ccs, scheme) = setup_example_environment::< 121 | X_LEN, 122 | C, 123 | RqNTT, 124 | GoldilocksExampleDP, 125 | W_GOLDILOCKS, 126 | WIT_LEN, 127 | CS, 128 | >(); 129 | 130 | let mut prover_transcript = PoseidonTranscript::::default(); 131 | let mut verifier_transcript = PoseidonTranscript::::default(); 132 | println!("Generating proof..."); 133 | let start = Instant::now(); 134 | 135 | let (_, _, proof) = NIFSProver::::prove( 136 | &acc, 137 | &wit_acc, 138 | &cm_i, 139 | &wit_i, 140 | &mut prover_transcript, 141 | &ccs, 142 | &scheme, 143 | ) 144 | .unwrap(); 145 | let duration = start.elapsed(); 146 | println!("Proof generated in {:?}", duration); 147 | 148 | let mut serialized_proof = Vec::new(); 149 | 150 | println!("Serializing proof (with compression)..."); 151 | proof 152 | .serialize_with_mode(&mut serialized_proof, Compress::Yes) 153 | .unwrap(); 154 | let compressed_size = serialized_proof.len(); 155 | println!( 156 | "Proof size (with compression) size: {}", 157 | humansize::format_size(compressed_size, humansize::BINARY) 158 | ); 159 | 160 | println!("Serializing proof (without compression)..."); 161 | proof 162 | .serialize_with_mode(&mut serialized_proof, Compress::No) 163 | .unwrap(); 164 | let uncompressed_size = serialized_proof.len(); 165 | println!( 166 | "Proof (without compression) size: {}", 167 | humansize::format_size(uncompressed_size, humansize::BINARY) 168 | ); 169 | 170 | println!("Verifying proof"); 171 | let start = Instant::now(); 172 | NIFSVerifier::::verify( 173 | &acc, 174 | &cm_i, 175 | &proof, 176 | &mut verifier_transcript, 177 | &ccs, 178 | ) 179 | .unwrap(); 180 | let duration = start.elapsed(); 181 | println!("Proof verified in {:?}", duration); 182 | } 183 | -------------------------------------------------------------------------------- /latticefold/examples/starkprime.rs: -------------------------------------------------------------------------------- 1 | #![allow(dead_code)] 2 | 3 | use std::{fmt::Debug, time::Instant}; 4 | 5 | use ark_serialize::{CanonicalSerialize, Compress}; 6 | use ark_std::{vec::Vec, UniformRand}; 7 | use cyclotomic_rings::{ 8 | challenge_set::LatticefoldChallengeSet, 9 | rings::{StarkChallengeSet, StarkRingNTT, SuitableRing}, 10 | }; 11 | use latticefold::{ 12 | arith::{ 13 | ccs::get_test_dummy_degree_three_ccs_non_scalar, r1cs::get_test_dummy_z_split_ntt, Arith, 14 | Witness, CCCS, CCS, LCCCS, 15 | }, 16 | commitment::AjtaiCommitmentScheme, 17 | nifs::{ 18 | linearization::{LFLinearizationProver, LinearizationProver}, 19 | NIFSProver, NIFSVerifier, 20 | }, 21 | transcript::poseidon::PoseidonTranscript, 22 | }; 23 | 24 | include!(concat!(env!("OUT_DIR"), "/examples_generated.rs")); 25 | 26 | #[allow(dead_code)] 27 | pub fn wit_and_ccs_gen_degree_three_non_scalar< 28 | const X_LEN: usize, 29 | const C: usize, // rows 30 | const WIT_LEN: usize, 31 | const W: usize, // columns 32 | P: DecompositionParams, 33 | R: Clone + UniformRand + Debug + SuitableRing, 34 | >( 35 | r1cs_rows: usize, 36 | ) -> ( 37 | CCCS, 38 | Witness, 39 | CCS, 40 | AjtaiCommitmentScheme, 41 | ) { 42 | let mut rng = ark_std::test_rng(); 43 | 44 | let new_r1cs_rows = if P::L == 1 && (WIT_LEN > 0 && (WIT_LEN & (WIT_LEN - 1)) == 0) { 45 | r1cs_rows - 2 46 | } else { 47 | r1cs_rows // This makes a square matrix but is too much memory 48 | }; 49 | let (one, x_ccs, w_ccs) = get_test_dummy_z_split_ntt::(); 50 | 51 | let mut z = vec![one]; 52 | z.extend(&x_ccs); 53 | z.extend(&w_ccs); 54 | let ccs: CCS = 55 | get_test_dummy_degree_three_ccs_non_scalar::(&z, P::L, new_r1cs_rows); 56 | ccs.check_relation(&z).expect("R1CS invalid!"); 57 | 58 | let scheme: AjtaiCommitmentScheme = AjtaiCommitmentScheme::rand(&mut rng); 59 | let wit: Witness = Witness::from_w_ccs::

(w_ccs); 60 | 61 | let cm_i: CCCS = CCCS { 62 | cm: wit.commit::(&scheme).unwrap(), 63 | x_ccs, 64 | }; 65 | 66 | (cm_i, wit, ccs, scheme) 67 | } 68 | 69 | #[allow(clippy::type_complexity)] 70 | fn setup_example_environment< 71 | const X_LEN: usize, 72 | const C: usize, 73 | RqNTT: SuitableRing, 74 | DP: DecompositionParams, 75 | const W: usize, 76 | const WIT_LEN: usize, 77 | CS: LatticefoldChallengeSet, 78 | >() -> ( 79 | LCCCS, 80 | Witness, 81 | CCCS, 82 | Witness, 83 | CCS, 84 | AjtaiCommitmentScheme, 85 | ) { 86 | let r1cs_rows = X_LEN + WIT_LEN + 1; 87 | 88 | let (cm_i, wit, ccs, scheme) = 89 | wit_and_ccs_gen_degree_three_non_scalar::(r1cs_rows); 90 | 91 | let rand_w_ccs: Vec = (0..WIT_LEN_STARK).map(|i| RqNTT::from(i as u64)).collect(); 92 | let wit_acc = Witness::from_w_ccs::(rand_w_ccs); 93 | 94 | let mut transcript = PoseidonTranscript::::default(); 95 | 96 | let (acc, _) = LFLinearizationProver::<_, PoseidonTranscript>::prove( 97 | &cm_i, 98 | &wit_acc, 99 | &mut transcript, 100 | &ccs, 101 | ) 102 | .expect("Failed to generate linearization proof"); 103 | 104 | (acc, wit_acc, cm_i, wit, ccs, scheme) 105 | } 106 | 107 | type RqNTT = StarkRingNTT; 108 | type CS = StarkChallengeSet; 109 | type T = PoseidonTranscript; 110 | 111 | fn main() { 112 | println!("Setting up example environment..."); 113 | 114 | println!("Decomposition parameters:"); 115 | println!("\tB: {}", StarkPrimeExampleDP::B); 116 | println!("\tL: {}", StarkPrimeExampleDP::L); 117 | println!("\tB_SMALL: {}", StarkPrimeExampleDP::B_SMALL); 118 | println!("\tK: {}", StarkPrimeExampleDP::K); 119 | 120 | let (acc, wit_acc, cm_i, wit_i, ccs, scheme) = setup_example_environment::< 121 | X_LEN_STARK, 122 | C_STARK, 123 | RqNTT, 124 | StarkPrimeExampleDP, 125 | W_STARK, 126 | WIT_LEN_STARK, 127 | CS, 128 | >(); 129 | 130 | let mut prover_transcript = PoseidonTranscript::::default(); 131 | let mut verifier_transcript = PoseidonTranscript::::default(); 132 | println!("Generating proof..."); 133 | let start = Instant::now(); 134 | 135 | let (_, _, proof) = NIFSProver::::prove( 136 | &acc, 137 | &wit_acc, 138 | &cm_i, 139 | &wit_i, 140 | &mut prover_transcript, 141 | &ccs, 142 | &scheme, 143 | ) 144 | .unwrap(); 145 | let duration = start.elapsed(); 146 | println!("Proof generated in {:?}", duration); 147 | 148 | let mut serialized_proof = Vec::new(); 149 | 150 | println!("Serializing proof (with compression)..."); 151 | proof 152 | .serialize_with_mode(&mut serialized_proof, Compress::Yes) 153 | .unwrap(); 154 | let compressed_size = serialized_proof.len(); 155 | println!( 156 | "Proof size (with compression) size: {}", 157 | humansize::format_size(compressed_size, humansize::BINARY) 158 | ); 159 | 160 | println!("Serializing proof (without compression)..."); 161 | proof 162 | .serialize_with_mode(&mut serialized_proof, Compress::No) 163 | .unwrap(); 164 | let uncompressed_size = serialized_proof.len(); 165 | println!( 166 | "Proof (without compression) size: {}", 167 | humansize::format_size(uncompressed_size, humansize::BINARY) 168 | ); 169 | 170 | println!("Verifying proof"); 171 | let start = Instant::now(); 172 | NIFSVerifier::::verify( 173 | &acc, 174 | &cm_i, 175 | &proof, 176 | &mut verifier_transcript, 177 | &ccs, 178 | ) 179 | .unwrap(); 180 | let duration = start.elapsed(); 181 | println!("Proof verified in {:?}", duration); 182 | } 183 | -------------------------------------------------------------------------------- /latticefold/src/arith/ccs.rs: -------------------------------------------------------------------------------- 1 | //! Provide test and benchmark utility for CCS 2 | 3 | use ark_std::{log2, vec::Vec}; 4 | use cyclotomic_rings::rings::SuitableRing; 5 | use stark_rings::Ring; 6 | use stark_rings_linalg::SparseMatrix; 7 | 8 | use super::{ 9 | r1cs::{create_dummy_identity_sparse_matrix, to_F_matrix, to_F_vec}, 10 | CCS, 11 | }; 12 | 13 | /// Given a witness, provides a satisfying degree three CCS of arbitrary size 14 | pub fn get_test_dummy_degree_three_ccs_non_scalar< 15 | R: Ring, 16 | const X_LEN: usize, 17 | const WIT_LEN: usize, 18 | const W: usize, 19 | >( 20 | witness: &[R], 21 | L: usize, 22 | n_rows: usize, 23 | ) -> CCS { 24 | let A = create_dummy_identity_sparse_matrix(n_rows, X_LEN + WIT_LEN + 1); 25 | let B = A.clone(); 26 | let C = A.clone(); 27 | let D = create_dummy_cubing_sparse_matrix(n_rows, X_LEN + WIT_LEN + 1, witness); 28 | 29 | let mut ccs = CCS { 30 | m: W, 31 | n: X_LEN + WIT_LEN + 1, 32 | l: 1, 33 | t: 4, 34 | q: 2, 35 | d: 3, 36 | s: log2(W) as usize, 37 | s_prime: (X_LEN + WIT_LEN + 1), 38 | M: vec![A, B, C, D], 39 | S: vec![vec![0, 1, 2], vec![3]], 40 | c: vec![R::one(), R::one().neg()], 41 | }; 42 | let len = usize::max((ccs.n - ccs.l - 1) * L, ccs.m).next_power_of_two(); 43 | ccs.pad_rows_to(len); 44 | ccs 45 | } 46 | 47 | pub(crate) fn get_test_degree_three_z(input: usize) -> Vec { 48 | // z = (x, 1, w) 49 | to_F_vec(vec![ 50 | input, // x 51 | 1, 52 | input * input * input, // x^3 53 | input * input * input + input, // x^3 + x 54 | input * input * input + input + 5, // x^3 +x + 5 55 | ]) 56 | } 57 | 58 | pub(crate) fn get_test_degree_three_z_non_scalar() -> Vec { 59 | let mut res = Vec::new(); 60 | for input in 0..R::dimension() { 61 | // z = (io, 1, w) 62 | res.push(to_F_vec::(vec![ 63 | input, // x 64 | 1, 65 | input * input * input, // x^3 66 | input * input * input + input, // x^3 + x 67 | input * input * input + input + 5, // x^3 +x + 5 68 | ])) 69 | } 70 | 71 | let mut ret: Vec = Vec::new(); 72 | for j in 0..res[0].len() { 73 | let mut vec = Vec::new(); 74 | for witness in &res { 75 | vec.push(witness[j]); 76 | } 77 | ret.push(R::from(vec)); 78 | } 79 | 80 | ret 81 | } 82 | 83 | #[allow(dead_code)] 84 | pub(crate) fn get_test_degree_three_z_split(input: usize) -> (R, Vec, Vec) { 85 | let z = get_test_degree_three_z(input); 86 | (z[1], vec![z[0]], z[2..].to_vec()) 87 | } 88 | 89 | #[allow(dead_code)] 90 | pub(crate) fn get_test_degree_three_z_non_scalar_split() -> (R, Vec, Vec) { 91 | let z = get_test_degree_three_z_non_scalar(); 92 | (z[1], vec![z[0]], z[2..].to_vec()) 93 | } 94 | 95 | #[allow(dead_code)] 96 | pub(crate) fn get_test_degree_three_ccs() -> CCS { 97 | // Degree 3 CCS for: x^3 + x + 5 = y 98 | let A = to_F_matrix::(vec![ 99 | vec![1, 0, 0, 0, 0], 100 | vec![1, 0, 1, 0, 0], 101 | vec![0, 5, 0, 1, 0], 102 | ]); 103 | let B = to_F_matrix::(vec![ 104 | vec![1, 0, 0, 0, 0], 105 | vec![0, 1, 0, 0, 0], 106 | vec![0, 1, 0, 0, 0], 107 | ]); 108 | 109 | let C = to_F_matrix::(vec![ 110 | vec![1, 0, 0, 0, 0], 111 | vec![0, 1, 0, 0, 0], 112 | vec![0, 1, 0, 0, 0], 113 | ]); 114 | let D = to_F_matrix::(vec![ 115 | vec![0, 0, 1, 0, 0], 116 | vec![0, 0, 0, 1, 0], 117 | vec![0, 0, 0, 0, 1], 118 | ]); 119 | 120 | CCS { 121 | m: 3, 122 | n: 5, 123 | l: 1, 124 | t: 4, 125 | q: 2, 126 | d: 3, 127 | s: log2(3) as usize, 128 | s_prime: log2(5) as usize, 129 | M: vec![A, B, C, D], 130 | S: vec![vec![0, 1, 2], vec![3]], 131 | c: vec![R::one(), R::one().neg()], 132 | } 133 | } 134 | 135 | #[cfg(test)] 136 | pub(crate) fn get_test_degree_three_ccs_padded(W: usize, L: usize) -> CCS { 137 | let mut ccs = get_test_degree_three_ccs(); 138 | 139 | ccs.m = W; 140 | ccs.s = log2(W) as usize; 141 | let len = usize::max((ccs.n - ccs.l - 1) * L, ccs.m).next_power_of_two(); 142 | ccs.pad_rows_to(len); 143 | ccs 144 | } 145 | 146 | // Takes a vector and returns a matrix that will square the vector 147 | pub(crate) fn create_dummy_cubing_sparse_matrix( 148 | rows: usize, 149 | columns: usize, 150 | witness: &[R], 151 | ) -> SparseMatrix { 152 | assert_eq!( 153 | rows, 154 | witness.len(), 155 | "Length of witness vector must be equal to ccs width" 156 | ); 157 | let mut matrix = SparseMatrix { 158 | n_rows: rows, 159 | n_cols: columns, 160 | coeffs: vec![vec![]; rows], 161 | }; 162 | for (i, row) in matrix.coeffs.iter_mut().enumerate() { 163 | row.push((witness[i] * witness[i], i)); 164 | } 165 | matrix 166 | } 167 | #[allow(clippy::upper_case_acronyms)] 168 | #[cfg(test)] 169 | mod tests { 170 | use cyclotomic_rings::rings::GoldilocksRingNTT; 171 | 172 | use super::{get_test_degree_three_ccs, get_test_dummy_degree_three_ccs_non_scalar}; 173 | use crate::arith::{ 174 | ccs::{get_test_degree_three_z, get_test_degree_three_z_non_scalar}, 175 | r1cs::get_test_dummy_z_split_ntt, 176 | Arith, CCS, 177 | }; 178 | type NTT = GoldilocksRingNTT; 179 | 180 | #[test] 181 | fn test_degree_three_ccs() { 182 | let input = 5; 183 | let ccs: CCS = get_test_degree_three_ccs(); 184 | let z = get_test_degree_three_z(input); 185 | assert!(ccs.check_relation(&z).is_ok()) 186 | } 187 | 188 | #[test] 189 | fn test_degree_three_ccs_non_scalar() { 190 | let ccs: CCS = get_test_degree_three_ccs(); 191 | let z = get_test_degree_three_z_non_scalar(); 192 | assert!(ccs.check_relation(&z).is_ok()) 193 | } 194 | #[test] 195 | fn test_degree_three_dummy_ccs_non_scalar() { 196 | let (one, x_ccs, w_ccs) = get_test_dummy_z_split_ntt::(); 197 | let mut z = vec![one]; 198 | z.extend(&x_ccs); 199 | z.extend(&w_ccs); 200 | let ccs = get_test_dummy_degree_three_ccs_non_scalar::(&z, 1, 2050); 201 | assert!(ccs.check_relation(&z).is_ok()) 202 | } 203 | } 204 | -------------------------------------------------------------------------------- /latticefold/src/arith/error.rs: -------------------------------------------------------------------------------- 1 | //! Provides error functionality for constraint systems. 2 | use thiserror::Error; 3 | 4 | use crate::ark_base::*; 5 | 6 | /// Errors that can arise in constraint system calculations 7 | #[derive(Debug, Error)] 8 | pub enum CSError { 9 | /// The constraint system is not satisfied by the provided witness 10 | #[error("constraint system is not satisfied")] 11 | NotSatisfied, 12 | 13 | /// The constraint system is not of length $2^k$ for any $k \in \mathbb{N}$. 14 | /// 15 | /// More to the point, the witness length will not be a power of 2, 16 | /// so we cannot use it as a MLE. 17 | #[error("constraint system matrices rows length (m) not a power of 2: {0}")] 18 | MatricesRowsLengthNotPowerOf2(usize), 19 | 20 | /// This error occurs if the CCS instance is not correctly padded 21 | /// 22 | /// See [definition 4.3](https://eprint.iacr.org/2024/257.pdf#page=40) of LatticeFold paper. 23 | #[error("constraint system has invalid size bounds: m = {0}, n = {1}, L = {2}")] 24 | InvalidSizeBounds(usize, usize, usize), 25 | 26 | /// This error occurs when performing operations on vectors of differing lengths. 27 | #[error("vectors {0} and {1} have different lengths: {0} and {1}")] 28 | LengthsNotEqual(String, String, usize, usize), 29 | } 30 | -------------------------------------------------------------------------------- /latticefold/src/arith/r1cs.rs: -------------------------------------------------------------------------------- 1 | //! Defines behaviour of R1CS, a degree two constraint system 2 | 3 | use cyclotomic_rings::rings::SuitableRing; 4 | use stark_rings::Ring; 5 | use stark_rings_linalg::{sparse_matrix::dense_matrix_u64_to_sparse, SparseMatrix}; 6 | 7 | use super::{ 8 | error::CSError as Error, 9 | utils::{mat_vec_mul, vec_add, vec_scalar_mul}, 10 | }; 11 | use crate::{arith::hadamard, ark_base::*}; 12 | 13 | /// a representation of a R1CS instance 14 | #[derive(Debug, Clone, PartialEq)] 15 | pub struct R1CS { 16 | /// Length of public input 17 | pub l: usize, 18 | /// First constraint matrix 19 | pub A: SparseMatrix, 20 | /// Second constraint matrix 21 | pub B: SparseMatrix, 22 | /// Third constraint matrix 23 | pub C: SparseMatrix, 24 | } 25 | 26 | impl R1CS { 27 | /// check that a R1CS structure is satisfied by a z vector. Only for testing. 28 | #[cfg(test)] 29 | pub(crate) fn check_relation(&self, z: &[R]) -> Result<(), Error> { 30 | let Az = mat_vec_mul(&self.A, z)?; 31 | let Bz = mat_vec_mul(&self.B, z)?; 32 | 33 | let Cz = mat_vec_mul(&self.C, z)?; 34 | let AzBz = hadamard(&Az, &Bz)?; 35 | 36 | if AzBz != Cz { 37 | Err(Error::NotSatisfied) 38 | } else { 39 | Ok(()) 40 | } 41 | } 42 | /// Converts the R1CS instance into a RelaxedR1CS as described in 43 | /// [Nova](https://eprint.iacr.org/2021/370.pdf#page=14) 44 | pub fn relax(self) -> RelaxedR1CS { 45 | RelaxedR1CS:: { 46 | l: self.l, 47 | E: vec![R::zero(); self.A.nrows()], 48 | A: self.A, 49 | B: self.B, 50 | C: self.C, 51 | u: R::one(), 52 | } 53 | } 54 | } 55 | 56 | /// A RelaxedR1CS instance as described in 57 | /// [Nova](https://eprint.iacr.org/2021/370.pdf#page=14). 58 | /// 59 | /// A witness $z$ is satisfying if $(A \cdot z) \circ (B \cdot z) = u \cdot (C \cdot z) + E$. 60 | #[derive(Debug, Clone, PartialEq)] 61 | pub struct RelaxedR1CS { 62 | /// Public input length 63 | pub l: usize, 64 | /// First constraint matrix 65 | pub A: SparseMatrix, 66 | /// Second constraint matrix 67 | pub B: SparseMatrix, 68 | /// Third constraint matrix 69 | pub C: SparseMatrix, 70 | /// Scalar coefficient of $(C \cdot z)$ 71 | pub u: R, 72 | /// The error matrix 73 | pub E: Vec, 74 | } 75 | 76 | impl RelaxedR1CS { 77 | /// check that a RelaxedR1CS structure is satisfied by a z vector. 78 | pub fn check_relation(&self, z: &[R]) -> Result<(), Error> { 79 | let Az = mat_vec_mul(&self.A, z)?; 80 | let Bz = mat_vec_mul(&self.B, z)?; 81 | let Cz = mat_vec_mul(&self.C, z)?; 82 | 83 | let uCz = vec_scalar_mul(&Cz, &self.u); 84 | let uCzE = vec_add(&uCz, &self.E)?; 85 | let AzBz = hadamard(&Az, &Bz)?; 86 | if AzBz != uCzE { 87 | Err(Error::NotSatisfied) 88 | } else { 89 | Ok(()) 90 | } 91 | } 92 | } 93 | 94 | /// Returns a matrix of ring elements given a matrix of unsigned ints 95 | pub fn to_F_matrix(M: Vec>) -> SparseMatrix { 96 | // dense_matrix_to_sparse(to_F_dense_matrix::(M)) 97 | let M_u64: Vec> = M 98 | .iter() 99 | .map(|m| m.iter().map(|r| *r as u64).collect()) 100 | .collect(); 101 | dense_matrix_u64_to_sparse(M_u64) 102 | } 103 | 104 | /// Returns a dense matrix of ring elements given a matrix of unsigned ints 105 | pub fn to_F_dense_matrix(M: Vec>) -> Vec> { 106 | M.iter() 107 | .map(|m| m.iter().map(|r| R::from(*r as u64)).collect()) 108 | .collect() 109 | } 110 | 111 | /// Returns a vector of ring elements given a vector of unsigned ints 112 | pub fn to_F_vec(z: Vec) -> Vec { 113 | z.iter().map(|c| R::from(*c as u64)).collect() 114 | } 115 | 116 | #[cfg(test)] 117 | pub(crate) fn get_test_r1cs() -> R1CS { 118 | // R1CS for: x^3 + x + 5 = y (example from article 119 | // https://www.vitalik.ca/general/2016/12/10/qap.html ) 120 | let A = to_F_matrix::(vec![ 121 | vec![1, 0, 0, 0, 0, 0], 122 | vec![0, 0, 0, 1, 0, 0], 123 | vec![1, 0, 0, 0, 1, 0], 124 | vec![0, 5, 0, 0, 0, 1], 125 | ]); 126 | let B = to_F_matrix::(vec![ 127 | vec![1, 0, 0, 0, 0, 0], 128 | vec![1, 0, 0, 0, 0, 0], 129 | vec![0, 1, 0, 0, 0, 0], 130 | vec![0, 1, 0, 0, 0, 0], 131 | ]); 132 | let C = to_F_matrix::(vec![ 133 | vec![0, 0, 0, 1, 0, 0], 134 | vec![0, 0, 0, 0, 1, 0], 135 | vec![0, 0, 0, 0, 0, 1], 136 | vec![0, 0, 1, 0, 0, 0], 137 | ]); 138 | 139 | R1CS:: { l: 1, A, B, C } 140 | } 141 | 142 | /// Return a R1CS instance of arbitrary size, useful for benching. 143 | /// Only works when z vector consists of multiplicative identities. 144 | pub fn get_test_dummy_r1cs( 145 | rows: usize, 146 | ) -> R1CS { 147 | let R1CS_A = create_dummy_identity_sparse_matrix(rows, X_LEN + WIT_LEN + 1); 148 | let R1CS_B = R1CS_A.clone(); 149 | let R1CS_C = R1CS_A.clone(); 150 | 151 | R1CS:: { 152 | l: 1, 153 | A: R1CS_A, 154 | B: R1CS_B, 155 | C: R1CS_C, 156 | } 157 | } 158 | 159 | /// Return a R1CS instance of arbitrary size, useful for benching. 160 | /// Works for arbitrary z vector. 161 | pub fn get_test_dummy_r1cs_non_scalar( 162 | rows: usize, 163 | witness: &[R], 164 | ) -> R1CS { 165 | let R1CS_A = create_dummy_identity_sparse_matrix(rows, X_LEN + WIT_LEN + 1); 166 | let R1CS_B = R1CS_A.clone(); 167 | let R1CS_C = create_dummy_squaring_sparse_matrix(rows, X_LEN + WIT_LEN + 1, witness); 168 | 169 | R1CS:: { 170 | l: 1, 171 | A: R1CS_A, 172 | B: R1CS_B, 173 | C: R1CS_C, 174 | } 175 | } 176 | 177 | pub(crate) fn create_dummy_identity_sparse_matrix( 178 | rows: usize, 179 | columns: usize, 180 | ) -> SparseMatrix { 181 | let mut matrix = SparseMatrix { 182 | n_rows: rows, 183 | n_cols: columns, 184 | coeffs: vec![vec![]; rows], 185 | }; 186 | for (i, row) in matrix.coeffs.iter_mut().enumerate() { 187 | row.push((R::one(), i)); 188 | } 189 | matrix 190 | } 191 | 192 | // Takes a vector and returns a matrix that will square the vector 193 | pub(crate) fn create_dummy_squaring_sparse_matrix( 194 | rows: usize, 195 | columns: usize, 196 | witness: &[R], 197 | ) -> SparseMatrix { 198 | assert_eq!( 199 | rows, 200 | witness.len(), 201 | "Length of witness vector must be equal to ccs width" 202 | ); 203 | let mut matrix = SparseMatrix { 204 | n_rows: rows, 205 | n_cols: columns, 206 | coeffs: vec![vec![]; rows], 207 | }; 208 | for (i, row) in matrix.coeffs.iter_mut().enumerate() { 209 | row.push((witness[i], i)); 210 | } 211 | matrix 212 | } 213 | 214 | pub(crate) fn get_test_z(input: usize) -> Vec { 215 | // z = (io, 1, w) 216 | to_F_vec(vec![ 217 | input, // io 218 | 1, 219 | input * input * input + input + 5, // x^3 + x + 5 220 | input * input, // x^2 221 | input * input * input, // x^2 * x 222 | input * input * input + input, // x^3 + x 223 | ]) 224 | } 225 | 226 | pub(crate) fn get_test_z_ntt() -> Vec { 227 | let mut res = Vec::new(); 228 | for input in 0..R::dimension() { 229 | // z = (io, 1, w) 230 | res.push(to_F_vec::(vec![ 231 | input, // io 232 | 1, 233 | input * input * input + input + 5, // x^3 + x + 5 234 | input * input, // x^2 235 | input * input * input, // x^2 * x 236 | input * input * input + input, // x^3 + x 237 | ])) 238 | } 239 | 240 | let mut ret: Vec = Vec::new(); 241 | for j in 0..res[0].len() { 242 | let mut vec = Vec::new(); 243 | for witness in &res { 244 | vec.push(witness[j]); 245 | } 246 | ret.push(R::from(vec)); 247 | } 248 | 249 | ret 250 | } 251 | 252 | /// Return scalar z vector for Vitalik's [R1CS example](https://medium.com/@VitalikButerin/quadratic-arithmetic-programs-from-zero-to-hero-f6d558cea649#81e4), 253 | /// split into statement, constant, and witness. 254 | pub fn get_test_z_split(input: usize) -> (R, Vec, Vec) { 255 | let z = get_test_z(input); 256 | (z[1], vec![z[0]], z[2..].to_vec()) 257 | } 258 | 259 | /// Return non-scalar z vector for Vitalik's [R1CS example](https://medium.com/@VitalikButerin/quadratic-arithmetic-programs-from-zero-to-hero-f6d558cea649#81e4), 260 | /// split into statement, constant, and witness. 261 | pub fn get_test_z_ntt_split() -> (R, Vec, Vec) { 262 | let z = get_test_z_ntt(); 263 | (z[1], vec![z[0]], z[2..].to_vec()) 264 | } 265 | 266 | /// Return z vector consisting only of multiplicative identities, 267 | /// split into statement, constant, and witness. 268 | pub fn get_test_dummy_z_split( 269 | ) -> (R, Vec, Vec) { 270 | ( 271 | R::one(), 272 | to_F_vec(vec![1; X_LEN]), 273 | to_F_vec(vec![1; WIT_LEN]), 274 | ) 275 | } 276 | 277 | /// Return z vector consisting of non scalar ring elements, 278 | /// split into statement, constant, and witness. 279 | pub fn get_test_dummy_z_split_ntt( 280 | ) -> (R, Vec, Vec) { 281 | let statement_vec = (0..X_LEN).map(|_| R::one()).collect(); 282 | 283 | let witness_vec = (0..WIT_LEN) 284 | .map(|_| { 285 | R::from( 286 | (0..R::dimension()) 287 | .map(|i| R::BaseRing::from(i as u128)) 288 | .collect::>(), 289 | ) 290 | }) 291 | .collect(); 292 | 293 | (R::one(), statement_vec, witness_vec) 294 | } 295 | 296 | #[cfg(test)] 297 | mod tests { 298 | use cyclotomic_rings::rings::FrogRingNTT; 299 | 300 | use super::*; 301 | 302 | #[test] 303 | fn test_check_relation() { 304 | let r1cs = get_test_r1cs::(); 305 | let z = get_test_z(5); 306 | 307 | r1cs.check_relation(&z).unwrap(); 308 | r1cs.relax().check_relation(&z).unwrap(); 309 | } 310 | } 311 | -------------------------------------------------------------------------------- /latticefold/src/arith/utils.rs: -------------------------------------------------------------------------------- 1 | //! Provides operations used for working with constraint systems 2 | 3 | #[cfg(feature = "parallel")] 4 | use rayon::iter::{IntoParallelRefIterator, ParallelIterator}; 5 | use stark_rings::Ring; 6 | use stark_rings_linalg::SparseMatrix; 7 | 8 | use super::error::CSError as Error; 9 | use crate::ark_base::*; 10 | 11 | // Computes the hadamard product of two ring 12 | #[allow(dead_code)] 13 | pub(crate) fn hadamard_vec(lhs: &[R], rhs: &[R]) -> Vec { 14 | lhs.iter().zip(rhs).map(|(lhs, rhs)| *lhs * rhs).collect() 15 | } 16 | 17 | // Multiplies Vector of rings by another ring 18 | #[allow(dead_code)] 19 | pub(crate) fn vec_value_mul(lhs: &[R], rhs: &R) -> Vec { 20 | lhs.iter().map(|lhs_i| *lhs_i * rhs).collect() 21 | } 22 | 23 | // Adds two ring vectors 24 | pub(crate) fn vec_add(a: &[R], b: &[R]) -> Result, Error> { 25 | if a.len() != b.len() { 26 | return Err(Error::LengthsNotEqual( 27 | "a".to_string(), 28 | "b".to_string(), 29 | a.len(), 30 | b.len(), 31 | )); 32 | } 33 | Ok(a.iter().zip(b.iter()).map(|(x, y)| *x + y).collect()) 34 | } 35 | 36 | pub(crate) fn vec_scalar_mul(vec: &[R], c: &R) -> Vec { 37 | vec.iter().map(|a| *a * c).collect() 38 | } 39 | 40 | pub(crate) fn hadamard(a: &[R], b: &[R]) -> Result, Error> { 41 | if a.len() != b.len() { 42 | return Err(Error::LengthsNotEqual( 43 | "a".to_string(), 44 | "b".to_string(), 45 | a.len(), 46 | b.len(), 47 | )); 48 | } 49 | Ok(a.iter().zip(b).map(|(a, b)| *a * b).collect()) 50 | } 51 | 52 | pub(crate) fn mat_vec_mul(M: &SparseMatrix, z: &[R]) -> Result, Error> { 53 | if M.n_cols != z.len() { 54 | return Err(Error::LengthsNotEqual( 55 | "M".to_string(), 56 | "z".to_string(), 57 | M.n_cols, 58 | z.len(), 59 | )); 60 | } 61 | 62 | Ok(cfg_iter!(M.coeffs) 63 | .map(|row| row.iter().map(|(value, col_i)| *value * z[*col_i]).sum()) 64 | .collect()) 65 | } 66 | 67 | #[cfg(test)] 68 | mod tests { 69 | use ark_ff::Zero; 70 | use stark_rings::cyclotomic_ring::models::goldilocks::Fq; 71 | use stark_rings_linalg::sparse_matrix::dense_matrix_to_sparse; 72 | 73 | use super::*; 74 | 75 | #[test] 76 | fn test_hadamard_vec() { 77 | let a = [Fq::from(2u64), Fq::from(3u64), Fq::from(4u64)]; 78 | let b = [Fq::from(5u64), Fq::from(6u64), Fq::from(7u64)]; 79 | let result = hadamard_vec(&a, &b); 80 | let expected = vec![Fq::from(10u64), Fq::from(18u64), Fq::from(28u64)]; 81 | assert_eq!(result, expected); 82 | } 83 | 84 | // Add similar tests for other functions here... 85 | 86 | #[test] 87 | fn test_vec_value_mul() { 88 | let a = [Fq::from(2u64), Fq::from(3u64), Fq::from(4u64)]; 89 | let scalar = Fq::from(2u64); 90 | let result = vec_value_mul(&a, &scalar); 91 | let expected = vec![Fq::from(4u64), Fq::from(6u64), Fq::from(8u64)]; 92 | assert_eq!(result, expected); 93 | } 94 | 95 | #[test] 96 | fn test_vec_add() { 97 | let a = [Fq::from(1u64), Fq::from(2u64), Fq::from(3u64)]; 98 | let b = [Fq::from(4u64), Fq::from(5u64), Fq::from(6u64)]; 99 | let result = vec_add(&a, &b); 100 | let expected = vec![Fq::from(5u64), Fq::from(7u64), Fq::from(9u64)]; 101 | assert_eq!(result.unwrap(), expected); 102 | 103 | // Test error case 104 | let a = [Fq::from(1u64), Fq::from(2u64)]; 105 | let b = [Fq::from(3u64), Fq::from(4u64), Fq::from(5u64)]; 106 | let result = vec_add(&a, &b); 107 | assert!(result.is_err()); 108 | } 109 | 110 | #[test] 111 | fn test_vec_scalar_mul() { 112 | let vec = [Fq::from(1u64), Fq::from(2u64), Fq::from(3u64)]; 113 | let c = Fq::from(3u64); 114 | let result = vec_scalar_mul(&vec, &c); 115 | let expected = vec![Fq::from(3u64), Fq::from(6u64), Fq::from(9u64)]; 116 | assert_eq!(result, expected); 117 | } 118 | 119 | #[test] 120 | fn test_hadamard() { 121 | let a = [Fq::from(2u64), Fq::from(3u64), Fq::from(4u64)]; 122 | let b = [Fq::from(5u64), Fq::from(6u64), Fq::from(7u64)]; 123 | let result = hadamard(&a, &b); 124 | let expected = vec![Fq::from(10u64), Fq::from(18u64), Fq::from(28u64)]; 125 | assert_eq!(result.unwrap(), expected); 126 | 127 | // Test error case 128 | let a = [Fq::from(2u64), Fq::from(3u64)]; 129 | let b = [Fq::from(5u64), Fq::from(6u64), Fq::from(7u64)]; 130 | let result = hadamard(&a, &b); 131 | assert!(result.is_err()); 132 | } 133 | 134 | #[test] 135 | fn test_mat_vec_mul() { 136 | // Construct a sparse matrix M 137 | let dense_matrix = vec![ 138 | vec![Fq::from(1u64), Fq::zero(), Fq::zero()], // Row 0 139 | vec![Fq::zero(), Fq::from(2u64), Fq::from(1u64)], // Row 1 140 | vec![Fq::zero(), Fq::zero(), Fq::from(3u64)], // Row 2 141 | ]; 142 | 143 | let M = dense_matrix_to_sparse(dense_matrix); 144 | 145 | let z = [Fq::from(1u64), Fq::from(1u64), Fq::from(1u64)]; 146 | let result = mat_vec_mul(&M, &z); 147 | let expected = vec![Fq::from(1u64), Fq::from(3u64), Fq::from(3u64)]; 148 | assert_eq!(result.unwrap(), expected); 149 | 150 | // Test error case 151 | let z = [Fq::from(1u64), Fq::from(1u64)]; // Wrong size vector 152 | let result = mat_vec_mul(&M, &z); 153 | assert!(result.is_err()); 154 | } 155 | } 156 | -------------------------------------------------------------------------------- /latticefold/src/commitment.rs: -------------------------------------------------------------------------------- 1 | //! Provides utility for committing to witnesses. 2 | 3 | use thiserror::Error; 4 | 5 | mod commitment_scheme; 6 | mod homomorphic_commitment; 7 | #[macro_use] 8 | mod operations; 9 | pub use commitment_scheme::*; 10 | pub use homomorphic_commitment::*; 11 | 12 | /// Errors that can occur in commitment operations. 13 | #[derive(Debug, Error)] 14 | pub enum CommitmentError { 15 | /// The witness to be committed has the wrong length. 16 | #[error("Wrong length of the witness: {0}, expected: {1}")] 17 | WrongWitnessLength(usize, usize), 18 | /// The commitment to a witness has the wrong length. 19 | #[error("Wrong length of the commitment: {0}, expected: {1}")] 20 | WrongCommitmentLength(usize, usize), 21 | /// The Ajtai commitment matrix has the wrong dimensions. 22 | /// 23 | /// An Ajtai matrix should have size commitment_length x witness_length. 24 | #[error("Ajtai matrix has dimensions: {0}x{1}, expected: {2}x{3}")] 25 | WrongAjtaiMatrixDimensions(usize, usize, usize, usize), 26 | } 27 | -------------------------------------------------------------------------------- /latticefold/src/commitment/commitment_scheme.rs: -------------------------------------------------------------------------------- 1 | use cyclotomic_rings::rings::SuitableRing; 2 | #[cfg(feature = "parallel")] 3 | use rayon::prelude::*; 4 | use stark_rings::{ 5 | balanced_decomposition::decompose_balanced_vec, 6 | cyclotomic_ring::{CRT, ICRT}, 7 | OverField, 8 | }; 9 | 10 | use super::homomorphic_commitment::Commitment; 11 | use crate::{ 12 | ark_base::*, commitment::CommitmentError, decomposition_parameters::DecompositionParams, 13 | }; 14 | 15 | /// A concrete instantiation of the Ajtai commitment scheme. 16 | /// Contains a random Ajtai matrix for the corresponding Ajtai parameters 17 | /// `C` is the length of commitment vectors or, equivalently, the number of rows of the Ajtai matrix. 18 | /// `W` is the length of witness vectors or, equivalently, the number of columns of the Ajtai matrix. 19 | /// `NTT` is a suitable cyclotomic ring. 20 | #[derive(Clone, Debug)] 21 | pub struct AjtaiCommitmentScheme { 22 | matrix: Vec>, 23 | } 24 | 25 | impl TryFrom>> 26 | for AjtaiCommitmentScheme 27 | { 28 | type Error = CommitmentError; 29 | 30 | fn try_from(matrix: Vec>) -> Result { 31 | if matrix.len() != C || matrix[0].len() != W { 32 | return Err(CommitmentError::WrongAjtaiMatrixDimensions( 33 | matrix.len(), 34 | matrix[0].len(), 35 | C, 36 | W, 37 | )); 38 | } 39 | 40 | let mut ajtai_matrix: Vec> = Vec::with_capacity(C); 41 | 42 | for row in matrix.into_iter() { 43 | let len = row.len(); 44 | 45 | if len != W { 46 | return Err(CommitmentError::WrongAjtaiMatrixDimensions(C, len, C, W)); 47 | } 48 | ajtai_matrix.push(row) 49 | } 50 | 51 | Ok(Self { 52 | matrix: ajtai_matrix, 53 | }) 54 | } 55 | } 56 | 57 | impl AjtaiCommitmentScheme { 58 | /// Returns a random Ajtai commitment matrix 59 | pub fn rand(rng: &mut Rng) -> Self { 60 | Self { 61 | matrix: vec![vec![NTT::rand(rng); W]; C], 62 | } 63 | } 64 | } 65 | 66 | impl AjtaiCommitmentScheme { 67 | /// Commit to a witness in the NTT form. 68 | /// The most basic one just multiplies by the matrix. 69 | pub fn commit_ntt(&self, f: &[NTT]) -> Result, CommitmentError> { 70 | if f.len() != W { 71 | return Err(CommitmentError::WrongWitnessLength(f.len(), W)); 72 | } 73 | 74 | let commitment: Vec = cfg_iter!(self.matrix) 75 | .map(|row| { 76 | row.iter() 77 | .zip(f.iter()) 78 | .fold(NTT::zero(), |acc, (row_j, f_j)| acc + *row_j * f_j) 79 | }) 80 | .collect(); 81 | 82 | Ok(Commitment::from_vec_raw(commitment)) 83 | } 84 | 85 | /// Commit to a witness in the coefficient form. 86 | /// Performs NTT on each component of the witness and then does Ajtai commitment. 87 | pub fn commit_coeff( 88 | &self, 89 | f: Vec, 90 | ) -> Result, CommitmentError> { 91 | if f.len() != W { 92 | return Err(CommitmentError::WrongWitnessLength(f.len(), W)); 93 | } 94 | 95 | self.commit_ntt(&CRT::elementwise_crt(f)) 96 | } 97 | 98 | /// Takes a coefficient form witness, decomposes it vertically in radix-B, 99 | /// i.e. computes a preimage G_B^{-1}(w), and Ajtai commits to the result. 100 | pub fn decompose_and_commit_coeff( 101 | &self, 102 | f: &[NTT::CoefficientRepresentation], 103 | ) -> Result, CommitmentError> { 104 | let f = decompose_balanced_vec(f, P::B, P::L) 105 | .into_iter() 106 | .flatten() 107 | .collect::>(); 108 | 109 | self.commit_coeff::

(f) 110 | } 111 | 112 | /// Takes an NTT form witness, transforms it into the coefficient form, 113 | /// decomposes it vertically in radix-B, i.e. 114 | /// computes a preimage G_B^{-1}(w), and Ajtai commits to the result. 115 | pub fn decompose_and_commit_ntt( 116 | &self, 117 | w: Vec, 118 | ) -> Result, CommitmentError> { 119 | let coeff: Vec = ICRT::elementwise_icrt(w); 120 | 121 | self.decompose_and_commit_coeff::

(&coeff) 122 | } 123 | } 124 | 125 | #[cfg(test)] 126 | mod tests { 127 | use cyclotomic_rings::rings::GoldilocksRingNTT; 128 | use stark_rings::OverField; 129 | 130 | use super::{AjtaiCommitmentScheme, CommitmentError}; 131 | use crate::ark_base::*; 132 | 133 | pub(crate) fn generate_ajtai( 134 | ) -> Result, CommitmentError> { 135 | let mut matrix = Vec::>::new(); 136 | 137 | for i in 0..C { 138 | let mut row = Vec::::new(); 139 | for j in 0..W { 140 | row.push(NTT::from((i * W + j) as u128)); 141 | } 142 | matrix.push(row) 143 | } 144 | 145 | AjtaiCommitmentScheme::try_from(matrix) 146 | } 147 | 148 | #[test] 149 | fn test_commit_ntt() -> Result<(), CommitmentError> { 150 | const WITNESS_SIZE: usize = 1 << 15; 151 | const OUTPUT_SIZE: usize = 9; 152 | 153 | let ajtai_data: AjtaiCommitmentScheme = 154 | generate_ajtai()?; 155 | let witness: Vec<_> = (0..(1 << 15)).map(|_| 2_u128.into()).collect(); 156 | 157 | let committed = ajtai_data.commit_ntt(&witness)?; 158 | 159 | for (i, &x) in committed.as_ref().iter().enumerate() { 160 | let expected: u128 = 161 | ((WITNESS_SIZE) * (2 * i * WITNESS_SIZE + (WITNESS_SIZE - 1))) as u128; 162 | assert_eq!(x, expected.into()); 163 | } 164 | 165 | Ok(()) 166 | } 167 | } 168 | -------------------------------------------------------------------------------- /latticefold/src/commitment/homomorphic_commitment.rs: -------------------------------------------------------------------------------- 1 | use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; 2 | use ark_std::{ 3 | ops::{Add, Mul, Sub}, 4 | Zero, 5 | }; 6 | use stark_rings::Ring; 7 | 8 | use crate::{ 9 | ark_base::*, commitment::CommitmentError, impl_additive_ops_from_ref, 10 | impl_multiplicative_ops_from_ref, impl_subtractive_ops_from_ref, 11 | }; 12 | 13 | /// The Ajtai commitment type. Meant to contain the output of the 14 | /// matrix-vector multiplication `A \cdot x`. 15 | /// Enforced to have the length `C`. 16 | /// Since Ajtai commitment is bounded-additively homomorphic 17 | /// one can add commitments and multiply them by a scalar. 18 | #[derive(Clone, Debug, PartialEq, Eq, CanonicalSerialize, CanonicalDeserialize)] 19 | pub struct Commitment { 20 | val: Vec, 21 | } 22 | 23 | impl Commitment { 24 | pub(super) fn from_vec_raw(vec: Vec) -> Self { 25 | Self { val: vec } 26 | } 27 | } 28 | 29 | impl From<[R; C]> for Commitment { 30 | fn from(val: [R; C]) -> Self { 31 | Self { val: val.into() } 32 | } 33 | } 34 | 35 | impl<'a, const C: usize, R: Ring> From<&'a [R]> for Commitment { 36 | fn from(slice: &'a [R]) -> Self { 37 | Self { val: slice.into() } 38 | } 39 | } 40 | 41 | impl TryFrom> for Commitment { 42 | type Error = CommitmentError; 43 | 44 | fn try_from(vec: Vec) -> Result { 45 | if vec.len() != C { 46 | return Err(CommitmentError::WrongCommitmentLength(vec.len(), C)); 47 | } 48 | 49 | Ok(Self { val: vec }) 50 | } 51 | } 52 | 53 | impl AsRef<[R]> for Commitment { 54 | fn as_ref(&self) -> &[R] { 55 | &self.val 56 | } 57 | } 58 | 59 | impl<'a, const C: usize, R: Ring> Add<&'a Commitment> for &Commitment { 60 | type Output = Commitment; 61 | 62 | fn add(self, rhs: &'a Commitment) -> Self::Output { 63 | let mut res_vec = vec![R::zero(); C]; 64 | 65 | res_vec 66 | .iter_mut() 67 | .zip(self.val.iter()) 68 | .zip(rhs.val.iter()) 69 | .for_each(|((res, &a), &b)| *res = a + b); 70 | 71 | Commitment::from_vec_raw(res_vec) 72 | } 73 | } 74 | 75 | impl<'a, const C: usize, R: Ring> Sub<&'a Commitment> for &Commitment { 76 | type Output = Commitment; 77 | 78 | fn sub(self, rhs: &'a Commitment) -> Self::Output { 79 | let mut res_vec = vec![R::zero(); C]; 80 | 81 | res_vec 82 | .iter_mut() 83 | .zip(self.val.iter()) 84 | .zip(rhs.val.iter()) 85 | .for_each(|((res, &a), &b)| *res = a - b); 86 | 87 | Commitment::from_vec_raw(res_vec) 88 | } 89 | } 90 | 91 | impl<'a, const C: usize, R: Ring> Mul<&'a R> for &Commitment { 92 | type Output = Commitment; 93 | 94 | fn mul(self, rhs: &'a R) -> Self::Output { 95 | let mut res_vec = vec![R::zero(); C]; 96 | 97 | res_vec 98 | .iter_mut() 99 | .zip(self.val.iter()) 100 | .for_each(|(res, &a)| *res = a * rhs); 101 | 102 | Commitment::from_vec_raw(res_vec) 103 | } 104 | } 105 | 106 | impl_additive_ops_from_ref!(Commitment, Ring, usize); 107 | impl_subtractive_ops_from_ref!(Commitment, Ring, usize); 108 | impl_multiplicative_ops_from_ref!(Commitment, Ring, usize); 109 | 110 | impl Zero for Commitment { 111 | fn zero() -> Self { 112 | Self::from([R::zero(); C]) 113 | } 114 | 115 | fn is_zero(&self) -> bool { 116 | self.val == [R::zero(); C] 117 | } 118 | } 119 | -------------------------------------------------------------------------------- /latticefold/src/commitment/operations.rs: -------------------------------------------------------------------------------- 1 | //! Provide macros to expand the implementation of commitment operations 2 | 3 | /// Given the additive operation for two references of a type, 4 | /// implement the additive operations for non-references. 5 | #[macro_export] 6 | macro_rules! impl_additive_ops_from_ref { 7 | ($type: ident, $params: ident, $constant: ident) => { 8 | #[allow(unused_qualifications)] 9 | impl<'a, const C: $constant, R: $params> Add<$type> for &'a $type { 10 | type Output = $type; 11 | 12 | fn add(self, rhs: $type) -> Self::Output { 13 | self + &rhs 14 | } 15 | } 16 | #[allow(unused_qualifications)] 17 | impl<'a, const C: usize, R: Ring> Add<&'a $type> for $type { 18 | type Output = $type; 19 | 20 | fn add(self, rhs: &'a $type) -> Self::Output { 21 | &self + rhs 22 | } 23 | } 24 | #[allow(unused_qualifications)] 25 | impl Add<$type> for $type { 26 | type Output = $type; 27 | 28 | fn add(self, rhs: $type) -> Self::Output { 29 | &self + &rhs 30 | } 31 | } 32 | 33 | #[allow(unused_qualifications)] 34 | impl core::ops::AddAssign for $type { 35 | fn add_assign(&mut self, other: Self) { 36 | *self = &*self + &other; 37 | } 38 | } 39 | 40 | #[allow(unused_qualifications)] 41 | impl<'a, const C: $constant, R: $params> core::ops::AddAssign<&'a mut Self> 42 | for $type 43 | { 44 | fn add_assign(&mut self, other: &'a mut Self) { 45 | *self = &*self + &*other; 46 | } 47 | } 48 | 49 | #[allow(unused_qualifications)] 50 | impl core::iter::Sum for $type { 51 | fn sum>(iter: I) -> Self { 52 | iter.fold(Self::zero(), core::ops::Add::add) 53 | } 54 | } 55 | 56 | #[allow(unused_qualifications)] 57 | impl<'a, const C: $constant, R: $params> core::iter::Sum<&'a Self> for $type { 58 | fn sum>(iter: I) -> Self { 59 | iter.fold(Self::zero(), core::ops::Add::add) 60 | } 61 | } 62 | }; 63 | } 64 | 65 | /// Given the subtractive operation for two references of a type, 66 | /// implement the additive operations for non-references. 67 | #[macro_export] 68 | macro_rules! impl_subtractive_ops_from_ref { 69 | ($type: ident, $params: ident, $constant: ident) => { 70 | #[allow(unused_qualifications)] 71 | impl<'a, const C: usize, R: Ring> Sub<$type> for &'a $type { 72 | type Output = $type; 73 | 74 | fn sub(self, rhs: $type) -> Self::Output { 75 | self - &rhs 76 | } 77 | } 78 | #[allow(unused_qualifications)] 79 | impl<'a, const C: usize, R: Ring> Sub<&'a $type> for $type { 80 | type Output = $type; 81 | 82 | fn sub(self, rhs: &'a $type) -> Self::Output { 83 | &self - rhs 84 | } 85 | } 86 | #[allow(unused_qualifications)] 87 | impl Sub<$type> for $type { 88 | type Output = $type; 89 | 90 | fn sub(self, rhs: $type) -> Self::Output { 91 | &self - &rhs 92 | } 93 | } 94 | #[allow(unused_qualifications)] 95 | impl core::ops::SubAssign for $type { 96 | fn sub_assign(&mut self, other: Self) { 97 | *self = &*self - &other; 98 | } 99 | } 100 | 101 | #[allow(unused_qualifications)] 102 | impl<'a, const C: $constant, R: $params> core::ops::SubAssign<&'a mut Self> 103 | for $type 104 | { 105 | fn sub_assign(&mut self, other: &'a mut Self) { 106 | *self = &*self - &*other; 107 | } 108 | } 109 | }; 110 | } 111 | 112 | /// Given the multiplicative operation for two references of a type, 113 | /// implement the additive operations for non-references. 114 | #[macro_export] 115 | macro_rules! impl_multiplicative_ops_from_ref { 116 | ($type: ident, $params: ident, $constant: ident) => { 117 | #[allow(unused_qualifications)] 118 | impl<'a, const C: $constant, R: $params> Mul for &'a $type { 119 | type Output = $type; 120 | 121 | fn mul(self, rhs: R) -> Self::Output { 122 | self * &rhs 123 | } 124 | } 125 | 126 | #[allow(unused_qualifications)] 127 | impl<'a, const C: usize, R: Ring> Mul<&'a R> for $type { 128 | type Output = $type; 129 | 130 | fn mul(self, rhs: &'a R) -> Self::Output { 131 | &self * rhs 132 | } 133 | } 134 | 135 | #[allow(unused_qualifications)] 136 | impl Mul for $type { 137 | type Output = $type; 138 | 139 | fn mul(self, rhs: R) -> Self::Output { 140 | &self * &rhs 141 | } 142 | } 143 | 144 | #[allow(unused_qualifications)] 145 | impl core::ops::MulAssign for $type { 146 | fn mul_assign(&mut self, other: R) { 147 | *self = &*self * &other; 148 | } 149 | } 150 | 151 | #[allow(unused_qualifications)] 152 | impl<'a, const C: $constant, R: $params> core::ops::MulAssign<&'a mut R> for $type { 153 | fn mul_assign(&mut self, other: &'a mut R) { 154 | *self = &*self * &*other; 155 | } 156 | } 157 | }; 158 | } 159 | -------------------------------------------------------------------------------- /latticefold/src/decomposition_parameters.rs: -------------------------------------------------------------------------------- 1 | //! Provides utility for decomposition parameters. 2 | //! 3 | //! Decomposition parameters dictate how higher-bound witness are 4 | //! decomposed into lower-bound witnesses. 5 | 6 | use ark_std::fmt::Display; 7 | 8 | /// Decomposition parameters. 9 | /// Convenient to enforce them compile-time. 10 | /// Contains both gadget matrix data and Latticefold decomposition step data. 11 | pub trait DecompositionParams: Clone { 12 | /// The MSIS bound. 13 | const B: u128; 14 | /// The ring modulus should be < B^L. 15 | const L: usize; 16 | /// The small b from the decomposition step of LF. 17 | const B_SMALL: usize; 18 | /// K = log_b B. 19 | const K: usize; 20 | } 21 | 22 | impl From

for DecompositionParamData { 23 | fn from(_: P) -> Self { 24 | { 25 | Self { b: P::B, l: P::L } 26 | } 27 | } 28 | } 29 | 30 | // Nice representation of parameters for printing out in benchmarks. 31 | #[derive(Clone, Copy)] 32 | pub struct DecompositionParamData { 33 | // The MSIS bound. 34 | b: u128, 35 | // The ring modulus should be < B^L. 36 | l: usize, 37 | } 38 | 39 | impl Display for DecompositionParamData { 40 | fn fmt(&self, f: &mut ark_std::fmt::Formatter<'_>) -> ark_std::fmt::Result { 41 | write!(f, "B={}, l={}", self.b, self.l,) 42 | } 43 | } 44 | 45 | #[allow(non_camel_case_types)] 46 | #[cfg(test)] 47 | pub mod test_params { 48 | use super::DecompositionParams; 49 | 50 | #[derive(Clone)] 51 | pub struct DP; 52 | 53 | #[cfg(test)] 54 | impl DecompositionParams for DP { 55 | const B: u128 = 1024; 56 | const L: usize = 2; 57 | const B_SMALL: usize = 2; 58 | const K: usize = 10; 59 | } 60 | 61 | #[derive(Clone)] 62 | pub struct DPL1; 63 | 64 | #[cfg(test)] 65 | impl DecompositionParams for DPL1 { 66 | const B: u128 = 1024; 67 | const L: usize = 1; 68 | const B_SMALL: usize = 2; 69 | const K: usize = 10; 70 | } 71 | #[derive(Clone)] 72 | pub struct StarkDP; 73 | impl DecompositionParams for StarkDP { 74 | const B: u128 = 10485760000; 75 | const L: usize = 8; 76 | const B_SMALL: usize = 320; 77 | const K: usize = 4; 78 | } 79 | #[derive(Clone)] 80 | pub struct StarkFoldingDP; 81 | impl DecompositionParams for StarkFoldingDP { 82 | const B: u128 = 3010936384; 83 | const L: usize = 8; 84 | const B_SMALL: usize = 38; 85 | const K: usize = 6; 86 | } 87 | 88 | #[derive(Clone)] 89 | pub struct GoldilocksDP; 90 | impl DecompositionParams for GoldilocksDP { 91 | const B: u128 = 1 << 15; 92 | const L: usize = 5; 93 | const B_SMALL: usize = 2; 94 | const K: usize = 15; 95 | } 96 | 97 | #[derive(Clone)] 98 | pub struct BabyBearDP; 99 | impl DecompositionParams for BabyBearDP { 100 | const B: u128 = 1 << 8; 101 | const L: usize = 4; 102 | const B_SMALL: usize = 2; 103 | const K: usize = 8; 104 | } 105 | #[derive(Clone)] 106 | pub struct FrogDP; 107 | 108 | impl DecompositionParams for FrogDP { 109 | const B: u128 = 1 << 8; 110 | const L: usize = 8; 111 | const B_SMALL: usize = 2; 112 | const K: usize = 10; 113 | } 114 | } 115 | -------------------------------------------------------------------------------- /latticefold/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![cfg_attr(not(feature = "std"), no_std)] 2 | #![allow(clippy::type_complexity)] 3 | #![allow(non_snake_case)] 4 | #![forbid(unsafe_code)] 5 | 6 | #[macro_use] 7 | extern crate ark_std; 8 | 9 | extern crate alloc; 10 | 11 | pub mod arith; 12 | pub mod commitment; 13 | pub mod decomposition_parameters; 14 | pub mod nifs; 15 | pub mod transcript; 16 | pub mod utils; 17 | 18 | #[doc(hidden)] 19 | mod ark_base { 20 | pub use ark_std::{ 21 | clone::Clone, 22 | convert::From, 23 | iter::Iterator, 24 | prelude::rust_2021::{derive, Debug}, 25 | result::Result::{self, Err, Ok}, 26 | string::{String, ToString}, 27 | vec::*, 28 | }; 29 | } 30 | -------------------------------------------------------------------------------- /latticefold/src/nifs.rs: -------------------------------------------------------------------------------- 1 | //! The NIFS module defines the behaviour of the [LatticeFold](https://eprint.iacr.org/2024/257.pdf) protocol 2 | //! 3 | //! NIFS = Non Interactive Folding Scheme 4 | 5 | use ark_ff::{Field, PrimeField}; 6 | use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; 7 | use ark_std::{marker::PhantomData, vec::Vec}; 8 | use cyclotomic_rings::rings::SuitableRing; 9 | use stark_rings::OverField; 10 | 11 | use self::{decomposition::*, error::LatticefoldError, folding::*, linearization::*}; 12 | use crate::{ 13 | arith::{error::CSError, Witness, CCCS, CCS, LCCCS}, 14 | commitment::AjtaiCommitmentScheme, 15 | decomposition_parameters::DecompositionParams, 16 | transcript::{Transcript, TranscriptWithShortChallenges}, 17 | }; 18 | 19 | pub mod decomposition; 20 | pub mod error; 21 | pub mod folding; 22 | pub mod linearization; 23 | 24 | #[cfg(test)] 25 | mod tests; 26 | 27 | /// `C` is the length of Ajtai commitment vectors. 28 | /// `NTT` is a cyclotomic ring in the NTT form. 29 | #[derive(Clone, CanonicalSerialize, CanonicalDeserialize)] 30 | pub struct LFProof { 31 | pub linearization_proof: LinearizationProof, 32 | pub decomposition_proof_l: DecompositionProof, 33 | pub decomposition_proof_r: DecompositionProof, 34 | pub folding_proof: FoldingProof, 35 | } 36 | 37 | /// `C` is the length of commitment vectors or, equivalently, the number of rows of the Ajtai matrix. 38 | /// `W` is the length of witness vectors or, equivalently, the number of columns of the Ajtai matrix. 39 | /// `NTT` is a suitable cyclotomic ring. 40 | /// `P` is the decomposition parameters. 41 | /// `T` is the FS-transform transcript. 42 | pub struct NIFSProver { 43 | _r: PhantomData, 44 | _p: PhantomData

, 45 | _t: PhantomData, 46 | } 47 | 48 | impl< 49 | const C: usize, 50 | const W: usize, 51 | NTT: SuitableRing, 52 | P: DecompositionParams, 53 | T: TranscriptWithShortChallenges, 54 | > NIFSProver 55 | { 56 | pub fn prove( 57 | acc: &LCCCS, 58 | w_acc: &Witness, 59 | cm_i: &CCCS, 60 | w_i: &Witness, 61 | transcript: &mut impl TranscriptWithShortChallenges, 62 | ccs: &CCS, 63 | scheme: &AjtaiCommitmentScheme, 64 | ) -> Result<(LCCCS, Witness, LFProof), LatticefoldError> { 65 | sanity_check::(ccs)?; 66 | 67 | absorb_public_input::(acc, cm_i, transcript); 68 | 69 | let (linearized_cm_i, linearization_proof) = 70 | LFLinearizationProver::<_, T>::prove(cm_i, w_i, transcript, ccs)?; 71 | let (mz_mles_l, decomposed_lcccs_l, decomposed_wit_l, decomposition_proof_l) = 72 | LFDecompositionProver::<_, T>::prove::(acc, w_acc, transcript, ccs, scheme)?; 73 | let (mz_mles_r, decomposed_lcccs_r, decomposed_wit_r, decomposition_proof_r) = 74 | LFDecompositionProver::<_, T>::prove::( 75 | &linearized_cm_i, 76 | w_i, 77 | transcript, 78 | ccs, 79 | scheme, 80 | )?; 81 | 82 | let (mz_mles, lcccs, wit_s) = { 83 | let mut lcccs = decomposed_lcccs_l; 84 | let mut lcccs_r = decomposed_lcccs_r; 85 | lcccs.append(&mut lcccs_r); 86 | 87 | let mut wit_s = decomposed_wit_l; 88 | let mut wit_s_r = decomposed_wit_r; 89 | wit_s.append(&mut wit_s_r); 90 | 91 | let mut mz_mles = mz_mles_l; 92 | let mut mz_mles_r = mz_mles_r; 93 | mz_mles.append(&mut mz_mles_r); 94 | (mz_mles, lcccs, wit_s) 95 | }; 96 | 97 | let (folded_lcccs, wit, folding_proof) = 98 | LFFoldingProver::<_, T>::prove::(&lcccs, wit_s, transcript, ccs, &mz_mles)?; 99 | 100 | Ok(( 101 | folded_lcccs, 102 | wit, 103 | LFProof { 104 | linearization_proof, 105 | decomposition_proof_l, 106 | decomposition_proof_r, 107 | folding_proof, 108 | }, 109 | )) 110 | } 111 | } 112 | 113 | /// `C` is the length of commitment vectors or, equivalently, the number of rows of the Ajtai matrix. 114 | /// `W` is the length of witness vectors or, equivalently, the number of columns of the Ajtai matrix. 115 | /// `NTT` is a suitable cyclotomic ring. 116 | /// `P` is the decomposition parameters. 117 | /// `T` is the FS-transform transcript. 118 | pub struct NIFSVerifier { 119 | _r: PhantomData, 120 | _p: PhantomData

, 121 | _t: PhantomData, 122 | } 123 | 124 | impl< 125 | const C: usize, 126 | NTT: SuitableRing, 127 | P: DecompositionParams, 128 | T: TranscriptWithShortChallenges, 129 | > NIFSVerifier 130 | { 131 | pub fn verify( 132 | acc: &LCCCS, 133 | cm_i: &CCCS, 134 | proof: &LFProof, 135 | transcript: &mut impl TranscriptWithShortChallenges, 136 | ccs: &CCS, 137 | ) -> Result, LatticefoldError> { 138 | sanity_check::(ccs)?; 139 | 140 | absorb_public_input::(acc, cm_i, transcript); 141 | 142 | let linearized_cm_i = LFLinearizationVerifier::<_, T>::verify( 143 | cm_i, 144 | &proof.linearization_proof, 145 | transcript, 146 | ccs, 147 | )?; 148 | let decomposed_acc = LFDecompositionVerifier::<_, T>::verify::( 149 | acc, 150 | &proof.decomposition_proof_l, 151 | transcript, 152 | ccs, 153 | )?; 154 | let decomposed_cm_i = LFDecompositionVerifier::<_, T>::verify::( 155 | &linearized_cm_i, 156 | &proof.decomposition_proof_r, 157 | transcript, 158 | ccs, 159 | )?; 160 | 161 | let lcccs_s = { 162 | let mut decomposed_acc = decomposed_acc; 163 | let mut decomposed_cm_i = decomposed_cm_i; 164 | 165 | decomposed_acc.append(&mut decomposed_cm_i); 166 | 167 | decomposed_acc 168 | }; 169 | 170 | Ok(LFFoldingVerifier::::verify::( 171 | &lcccs_s, 172 | &proof.folding_proof, 173 | transcript, 174 | ccs, 175 | )?) 176 | } 177 | } 178 | 179 | fn sanity_check( 180 | ccs: &CCS, 181 | ) -> Result<(), LatticefoldError> { 182 | if ccs.m != usize::max((ccs.n - ccs.l - 1) * DP::L, ccs.m).next_power_of_two() { 183 | return Err(CSError::InvalidSizeBounds(ccs.m, ccs.n, DP::L).into()); 184 | } 185 | 186 | Ok(()) 187 | } 188 | 189 | fn absorb_public_input( 190 | acc: &LCCCS, 191 | cm_i: &CCCS, 192 | transcript: &mut impl Transcript, 193 | ) { 194 | transcript.absorb_field_element(&::from_base_prime_field( 195 | ::BasePrimeField::from_be_bytes_mod_order(b"acc"), 196 | )); 197 | 198 | transcript.absorb_slice(&acc.r); 199 | transcript.absorb_slice(&acc.v); 200 | transcript.absorb_slice(acc.cm.as_ref()); 201 | transcript.absorb_slice(&acc.u); 202 | transcript.absorb_slice(&acc.x_w); 203 | transcript.absorb(&acc.h); 204 | 205 | transcript.absorb_field_element(&::from_base_prime_field( 206 | ::BasePrimeField::from_be_bytes_mod_order(b"cm_i"), 207 | )); 208 | 209 | transcript.absorb_slice(cm_i.cm.as_ref()); 210 | transcript.absorb_slice(&cm_i.x_ccs); 211 | } 212 | -------------------------------------------------------------------------------- /latticefold/src/nifs/decomposition/structs.rs: -------------------------------------------------------------------------------- 1 | #![allow(non_snake_case, clippy::upper_case_acronyms)] 2 | use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; 3 | use ark_std::marker::PhantomData; 4 | use cyclotomic_rings::rings::SuitableRing; 5 | use stark_rings::{OverField, Ring}; 6 | use stark_rings_poly::mle::DenseMultilinearExtension; 7 | 8 | use crate::{ 9 | arith::{Witness, CCS, LCCCS}, 10 | ark_base::*, 11 | commitment::{AjtaiCommitmentScheme, Commitment}, 12 | decomposition_parameters::DecompositionParams, 13 | nifs::error::DecompositionError, 14 | transcript::Transcript, 15 | }; 16 | 17 | /// The proof structure of the decomposition subprotocol. 18 | #[derive(Clone, Debug, PartialEq, CanonicalSerialize, CanonicalDeserialize)] 19 | pub struct DecompositionProof { 20 | /// CCS-linearization evaluation claims w.r.t. decomposed witnesses. 21 | /// 22 | /// After a run of the decomposition subprotocol prover this field contains 23 | /// `P::K` vectors of length `ccs.t`. `u_s[i][j]` is such that 24 | /// $$ 25 | /// \text{u\\_s[i][j]} = \sum\_{\vec{\mathbf{b}} \in \\{0,1\\}^{\log n\_c}} 26 | /// \text{mle}[\text{ccs.M[j]}](\vec{\mathbf{x}}, \vec{\mathbf{b}}) \cdot \text{mle}\[\mathbf{z}\_{i}\](\vec{\mathbf{b}}) 27 | /// $$ 28 | /// where $\mathbf{z}_i$ is obtained by concatenating `x_s[i] || w_s[i]` (`w_s` are the decomposed witnesses). 29 | pub u_s: Vec>, 30 | /// Evaluation claims about rows of $\hat{f}$-matrices of decomposed witnesses. 31 | /// 32 | /// After a run of the decomposition subprotocol prover this field contains 33 | /// `P::K` vectors of length `NTT::CoefficientRepresentation::dimension() / NTT::dimension()`. `v_s[i][j]` is such that 34 | /// $$ 35 | /// \text{v\\_s[i][j]}= \text{mle}[\text{w\\_s[i].f\\_hat[j]}] (\mathbf{r}). 36 | /// $$ 37 | /// where $\mathbf{r}$ is the evaluation point from the LCCCS. 38 | pub v_s: Vec>, 39 | /// Decomposed public parts of the statement-witness vectors `z_s`. 40 | /// 41 | /// It is expensive to compute them 42 | /// on the verifier's side thus the prover computes them by itself and sends to the verifier. 43 | pub x_s: Vec>, 44 | /// Commitments to the decomposed witnesses. 45 | pub y_s: Vec>, 46 | } 47 | 48 | pub trait DecompositionProver> { 49 | fn prove( 50 | cm_i: &LCCCS, 51 | wit: &Witness, 52 | transcript: &mut impl Transcript, 53 | ccs: &CCS, 54 | scheme: &AjtaiCommitmentScheme, 55 | ) -> Result< 56 | ( 57 | Vec>>, 58 | Vec>, 59 | Vec>, 60 | DecompositionProof, 61 | ), 62 | DecompositionError, 63 | >; 64 | } 65 | 66 | pub trait DecompositionVerifier> { 67 | fn verify( 68 | cm_i: &LCCCS, 69 | proof: &DecompositionProof, 70 | transcript: &mut impl Transcript, 71 | ccs: &CCS, 72 | ) -> Result>, DecompositionError>; 73 | } 74 | 75 | pub struct LFDecompositionProver { 76 | _ntt: PhantomData, 77 | _t: PhantomData, 78 | } 79 | 80 | pub struct LFDecompositionVerifier { 81 | _ntt: PhantomData, 82 | _t: PhantomData, 83 | } 84 | -------------------------------------------------------------------------------- /latticefold/src/nifs/decomposition/utils.rs: -------------------------------------------------------------------------------- 1 | use cyclotomic_rings::rings::SuitableRing; 2 | use stark_rings::{ 3 | balanced_decomposition::{decompose_balanced_vec, gadget_decompose, recompose}, 4 | cyclotomic_ring::{CRT, ICRT}, 5 | }; 6 | use stark_rings_linalg::ops::Transpose; 7 | 8 | use crate::{ark_base::*, decomposition_parameters::DecompositionParams}; 9 | 10 | /// Decompose a vector of arbitrary norm in its NTT form into DP::K vectors 11 | /// and applies the gadget-B matrix again. 12 | pub(super) fn decompose_big_vec_into_k_vec_and_compose_back< 13 | NTT: SuitableRing, 14 | DP: DecompositionParams, 15 | >( 16 | x: Vec, 17 | ) -> Vec> { 18 | // Allow x to have length m 19 | let coeff_repr: Vec = ICRT::elementwise_icrt(x); 20 | 21 | // radix-B 22 | let decomposed_in_B: Vec = 23 | gadget_decompose(&coeff_repr, DP::B, DP::L); 24 | 25 | // We now have a m * l length vector 26 | // Each element from original vector is mapped to l-length chunk 27 | 28 | decompose_balanced_vec(&decomposed_in_B, DP::B_SMALL as u128, DP::K) 29 | // We have a k by (m*l) matrix 30 | .transpose() 31 | // We have a (m*l) by k matrix 32 | .into_iter() 33 | // We recompose to a m * k matrix 34 | // Where could recompose basis b horizontally to recreate the original vector 35 | .map(|vec| { 36 | vec.chunks(DP::L) 37 | .map(|chunk| recompose(chunk, DP::B).crt()) 38 | .collect() 39 | }) 40 | .collect() 41 | } 42 | 43 | /// Decompose a vector of norm B in its coefficient form into DP::K small vectors. 44 | pub(super) fn decompose_B_vec_into_k_vec( 45 | x: &[NTT::CoefficientRepresentation], 46 | ) -> Vec> { 47 | decompose_balanced_vec(x, DP::B_SMALL as u128, DP::K).transpose() 48 | } 49 | 50 | #[cfg(test)] 51 | mod tests { 52 | use cyclotomic_rings::rings::SuitableRing; 53 | use rand::Rng; 54 | use stark_rings::{ 55 | balanced_decomposition::{decompose_balanced_vec, recompose}, 56 | cyclotomic_ring::{ 57 | models::goldilocks::{RqNTT, RqPoly}, 58 | CRT, 59 | }, 60 | PolyRing, 61 | }; 62 | 63 | use crate::{ 64 | ark_base::*, 65 | decomposition_parameters::{test_params::DP, DecompositionParams}, 66 | nifs::decomposition::utils::{ 67 | decompose_B_vec_into_k_vec, decompose_big_vec_into_k_vec_and_compose_back, 68 | }, 69 | }; 70 | 71 | fn draw_ring_below_bound(rng: &mut impl Rng) -> RqPoly 72 | where 73 | RqPoly: PolyRing + CRT, 74 | { 75 | let degree = ::dimension(); 76 | let mut coeffs = Vec::with_capacity(degree); 77 | for _ in 0..degree { 78 | let random_coeff = rng.gen_range(0..B); 79 | coeffs.push(::BaseRing::from(random_coeff)); 80 | } 81 | RqPoly::from(coeffs) 82 | } 83 | fn test_decompose_B_vec_into_k_vec() 84 | where 85 | RqNTT: SuitableRing, 86 | RqPoly: PolyRing + CRT, 87 | { 88 | // Create a test vector 89 | const N: usize = 32; 90 | let mut rng = ark_std::test_rng(); 91 | let test_vector: Vec = (0..N) 92 | .map(|_| draw_ring_below_bound::(&mut rng)) 93 | .collect(); 94 | 95 | // Call the function 96 | let decomposed = decompose_B_vec_into_k_vec::(&test_vector); 97 | 98 | // Check that we get K vectors back from the decomposition 99 | assert_eq!( 100 | decomposed.len(), 101 | DP::K, 102 | "Decomposition should output K={} vectors", 103 | DP::K 104 | ); 105 | 106 | // Check the length of each inner vector 107 | for vec in &decomposed { 108 | assert_eq!(vec.len(), N); 109 | } 110 | 111 | // Check that the decomposition is correct 112 | for i in 0..N { 113 | let decomp_i = decomposed.iter().map(|d_j| d_j[i]).collect::>(); 114 | assert_eq!( 115 | test_vector[i], 116 | recompose(&decomp_i, RqPoly::from(DP::B_SMALL as u128)) 117 | ); 118 | } 119 | } 120 | 121 | fn recompose_from_k_vec_to_big_vec( 122 | k_vecs: &[Vec], 123 | ) -> Vec { 124 | let decomposed_in_b: Vec> = k_vecs 125 | .iter() 126 | .map(|vec| { 127 | vec.iter() 128 | .flat_map(|&x| decompose_balanced_vec(&[x.icrt()], DP::B, DP::L)) 129 | .flatten() 130 | .collect() 131 | }) 132 | .collect(); 133 | 134 | // Transpose the decomposed vectors 135 | let mut transposed = vec![vec![]; decomposed_in_b[0].len()]; 136 | for row in &decomposed_in_b { 137 | for (j, &val) in row.iter().enumerate() { 138 | transposed[j].push(val); 139 | } 140 | } 141 | 142 | // Recompose first with B_SMALL, then with B 143 | transposed 144 | .iter() 145 | .map(|vec| { 146 | recompose( 147 | vec, 148 | NTT::CoefficientRepresentation::from(DP::B_SMALL as u128), 149 | ) 150 | }) 151 | .collect::>() 152 | .chunks(DP::L) 153 | .map(|chunk| recompose(chunk, NTT::CoefficientRepresentation::from(DP::B))) 154 | .collect() 155 | } 156 | 157 | fn test_decompose_big_vec_into_k_vec_and_compose_back() 158 | where 159 | RqNTT: SuitableRing, 160 | RqPoly: PolyRing + CRT, 161 | Vec: FromIterator<::CRTForm>, 162 | { 163 | // Create a test vector 164 | const N: usize = 32; 165 | let mut rng = ark_std::test_rng(); 166 | let test_vector: Vec = (0..N) 167 | .map(|_| draw_ring_below_bound::(&mut rng).crt()) 168 | .collect(); 169 | let decomposed_and_composed_back = 170 | decompose_big_vec_into_k_vec_and_compose_back::(test_vector.clone()); 171 | let restore_decomposed = 172 | recompose_from_k_vec_to_big_vec::(&decomposed_and_composed_back); 173 | 174 | // Check each entry matches 175 | for i in 0..N { 176 | assert_eq!( 177 | restore_decomposed[i], 178 | test_vector[i].icrt(), 179 | "Mismatch at index {}: decomposed_and_composed_back={}, test_vector={}", 180 | i, 181 | restore_decomposed[i], 182 | test_vector[i].icrt() 183 | ); 184 | } 185 | } 186 | 187 | #[test] 188 | fn test_decompose_B_vec_into_k_vec_gold() { 189 | test_decompose_B_vec_into_k_vec::(); 190 | } 191 | 192 | #[test] 193 | fn test_decompose_big_vec_into_k_vec_and_compose_back_gold() { 194 | test_decompose_big_vec_into_k_vec_and_compose_back::(); 195 | } 196 | } 197 | -------------------------------------------------------------------------------- /latticefold/src/nifs/error.rs: -------------------------------------------------------------------------------- 1 | use stark_rings::Ring; 2 | use stark_rings_poly::polynomials::ArithErrors; 3 | use thiserror::Error; 4 | 5 | use crate::{ 6 | arith::error::CSError, 7 | ark_base::*, 8 | commitment::CommitmentError, 9 | utils::{mle_helpers::MleEvaluationError, sumcheck::SumCheckError}, 10 | }; 11 | 12 | #[derive(Debug, Error)] 13 | pub enum LatticefoldError { 14 | #[error("linearization failed: {0}")] 15 | LinearizationError(#[from] LinearizationError), 16 | #[error("decomposition failed: {0}")] 17 | DecompositionError(#[from] DecompositionError), 18 | #[error("folding failed: {0}")] 19 | FoldingError(#[from] FoldingError), 20 | #[error("constraint system related error: {0}")] 21 | ConstraintSystemError(#[from] CSError), 22 | } 23 | 24 | #[derive(Debug, Error)] 25 | pub enum LinearizationError { 26 | #[error("sum check failed at linearization step: {0}")] 27 | SumCheckError(#[from] SumCheckError), 28 | #[error("parameters error: {0}")] 29 | ParametersError(String), 30 | #[error("constraint system related error: {0}")] 31 | ConstraintSystemError(#[from] CSError), 32 | #[error("Arithmetic error: {0}")] 33 | ArithmeticError(#[from] ArithErrors), 34 | #[error("mle evaluation failed: {0}")] 35 | EvaluationError(#[from] MleEvaluationError), 36 | } 37 | 38 | #[derive(Debug, Error)] 39 | pub enum DecompositionError { 40 | #[error("input vectors have incorrect length")] 41 | IncorrectLength, 42 | #[error("ajtai commitment error: {0}")] 43 | CommitmentError(#[from] CommitmentError), 44 | #[error("constraint system related error: {0}")] 45 | ConstraintSystemError(#[from] CSError), 46 | #[error("recomposing proof checked failed")] 47 | RecomposedError, 48 | #[error("mle evaluation failed: {0}")] 49 | EvaluationError(#[from] MleEvaluationError), 50 | } 51 | 52 | #[derive(Debug, Error)] 53 | pub enum FoldingError { 54 | #[error("input vectors have incorrect length")] 55 | IncorrectLength, 56 | #[error("sum check failed at folding step: {0}")] 57 | SumCheckError(#[from] SumCheckError), 58 | #[error("constraint system related error: {0}")] 59 | ConstraintSystemError(#[from] CSError), 60 | #[error("virtual polynomial error: {0}")] 61 | ArithError(#[from] ArithErrors), 62 | #[error("mle evaluation failed: {0}")] 63 | EvaluationError(#[from] MleEvaluationError), 64 | #[error("sumcheck challenge point were not generate correctly")] 65 | SumcheckChallengeError, 66 | } 67 | -------------------------------------------------------------------------------- /latticefold/src/nifs/folding/structs.rs: -------------------------------------------------------------------------------- 1 | use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; 2 | use ark_std::marker::PhantomData; 3 | use cyclotomic_rings::rings::SuitableRing; 4 | use stark_rings::OverField; 5 | use stark_rings_poly::mle::DenseMultilinearExtension; 6 | 7 | use crate::{ 8 | arith::{Witness, CCS, LCCCS}, 9 | ark_base::Vec, 10 | decomposition_parameters::DecompositionParams, 11 | nifs::error::FoldingError, 12 | transcript::TranscriptWithShortChallenges, 13 | utils::sumcheck, 14 | }; 15 | 16 | /// Proof generated by the folding prover. 17 | #[derive(Clone, Debug, PartialEq, CanonicalSerialize, CanonicalDeserialize)] 18 | pub struct FoldingProof { 19 | /// A list of non-interactive sumcheck prover messages. 20 | /// 21 | /// Sent in step 2 of folding subprotocol. 22 | pub pointshift_sumcheck_proof: sumcheck::Proof, 23 | /// The evaluations of MLEs of the rows of $\hat{\mathbf{f}}$-matrices of the witness at the $\overrightarrow{r}_0$ challenge point, i.e. 24 | /// 25 | /// $$ 26 | /// \theta[i]= \text{mle}[\text{wit.f\\_hat[i]}] (\overrightarrow{r}_0). 27 | /// $$ 28 | /// 29 | /// Sent in the step 3 of folding subprotocol. 30 | pub theta_s: Vec>, 31 | /// The evaluations of MLE's of $\\{ M_j \mathbf{z}_i \mid j = 1, 2, \dots, t, i = 1, 2 \dots 2k \\}$ at evaluated at the $\overrightarrow{r}_0$ challenge point, i.e. 32 | /// 33 | /// $$ 34 | /// \eta[j][i] = \sum\_{\vec{\mathbf{b}} \in \\{0,1\\}^{\log n\_c}} 35 | /// \text{mle}[\text{ccs.M[j]}](\vec{\mathbf{x}}, \vec{\mathbf{b}}) \cdot \text{mle}\[\mathbf{z}_{i}\](\vec{\mathbf{b}}). 36 | /// $$ 37 | /// 38 | /// Sent in the step 3 of folding subprotocol. 39 | pub eta_s: Vec>, 40 | } 41 | 42 | /// Prover for the folding subprotocol 43 | pub trait FoldingProver> { 44 | /// Generates a folded witness and its linearized commitment, along with proof to the correctness of the folding. 45 | /// 46 | /// # Arguments 47 | /// 48 | /// * `cm_i_s` - A reference to `[LCCCS]`, representing decomposed linearized commitments to be folded together. 49 | /// * `w_s` - A vector of decomposed witnesses to be folded together. 50 | /// * `transcript` - A mutable reference to a sponge for generating NI challenges. 51 | /// * `ccs` - A reference to a Customizable Constraint System instance used in the protocol. 52 | /// # Returns 53 | /// 54 | /// On success, returns a tuple `(LCCCS, Witness, FoldingProof)` where: 55 | /// * `LCCCS` is a folded linearized version of the CCS witness commitment. 56 | /// * `Witness` is a folded CCS and Ajtai witness. 57 | /// * `LinearizationProof` is a proof that the linearization subprotocol was executed correctly. 58 | /// 59 | /// # Errors 60 | /// 61 | /// Returns an error if asked to evaluate MLEs with incorrect number of variables 62 | /// 63 | fn prove( 64 | cm_i_s: &[LCCCS], 65 | w_s: Vec>, 66 | transcript: &mut impl TranscriptWithShortChallenges, 67 | ccs: &CCS, 68 | mz_mles: &[Vec>], 69 | ) -> Result<(LCCCS, Witness, FoldingProof), FoldingError>; 70 | } 71 | 72 | /// Verifier for folding subprotocol 73 | pub trait FoldingVerifier> { 74 | /// Verifies a proof for the folding subprotocol. 75 | /// 76 | /// # Arguments 77 | /// 78 | /// * `cm_i` - A reference to a vector of `CCCS`, which represents decomposed LCCS statements and commitments to witnesses. 79 | /// * `proof` - A reference to a `FoldingProof` containing the folding proof. 80 | /// * `transcript` - A mutable reference to a sponge for generating NI challenges. 81 | /// * `ccs` - A reference to a Customizable Constraint System instance used in the protocol. 82 | /// 83 | /// # Returns 84 | /// 85 | /// * `Ok(LCCCS)` - On success, returns the folded linearized version of the CCS witness commitment. 86 | /// * `Err(FoldingError)` - If verification fails, returns a `FoldingError`. 87 | /// 88 | fn verify( 89 | cm_i_s: &[LCCCS], 90 | proof: &FoldingProof, 91 | transcript: &mut impl TranscriptWithShortChallenges, 92 | ccs: &CCS, 93 | ) -> Result, FoldingError>; 94 | } 95 | 96 | /// The LatticeFold folding prover 97 | /// 98 | /// Implements the [`FoldingProver`] trait. 99 | pub struct LFFoldingProver { 100 | _ntt: PhantomData, 101 | _t: PhantomData, 102 | } 103 | 104 | /// The LatticeFold folding verifier 105 | /// 106 | /// Implements the [`FoldingVerifier`] trait. 107 | pub struct LFFoldingVerifier { 108 | _ntt: PhantomData, 109 | _t: PhantomData, 110 | } 111 | -------------------------------------------------------------------------------- /latticefold/src/nifs/linearization.rs: -------------------------------------------------------------------------------- 1 | use cyclotomic_rings::rings::SuitableRing; 2 | use stark_rings::OverField; 3 | use stark_rings_poly::mle::DenseMultilinearExtension; 4 | 5 | pub use self::structs::*; 6 | use self::utils::{compute_u, prepare_lin_sumcheck_polynomial, sumcheck_polynomial_comb_fn}; 7 | use super::error::LinearizationError; 8 | use crate::{ 9 | arith::{Instance, Witness, CCCS, CCS, LCCCS}, 10 | ark_base::*, 11 | nifs::linearization::utils::SqueezeBeta, 12 | transcript::Transcript, 13 | utils::{ 14 | mle_helpers::{calculate_Mz_mles, evaluate_mles}, 15 | sumcheck::{utils::eq_eval, MLSumcheck, Proof, SumCheckError::SumCheckFailed}, 16 | }, 17 | }; 18 | 19 | mod structs; 20 | 21 | #[cfg(test)] 22 | mod tests; 23 | pub mod utils; 24 | 25 | /// Prover for the Linearization subprotocol 26 | pub trait LinearizationProver> { 27 | /// Generates a proof for the linearization subprotocol 28 | /// 29 | /// # Arguments 30 | /// 31 | /// * `cm_i` - A reference to a committed CCS statement to be linearized, i.e. a CCCS. 32 | /// * `wit` - A reference to a CCS witness for the statement cm_i. 33 | /// * `transcript` - A mutable reference to a sponge for generating NI challenges. 34 | /// * `ccs` - A reference to a Customizable Constraint System circuit representation. 35 | /// 36 | /// # Returns 37 | /// 38 | /// On success, returns a tuple `(LCCCS, LinearizationProof)` where: 39 | /// * `LCCCS` is a linearized version of the CCS witness commitment. 40 | /// * `LinearizationProof` is a proof that the linearization subprotocol was executed correctly. 41 | /// 42 | /// # Errors 43 | /// 44 | /// Returns an error if asked to evaluate MLEs with incorrect number of variables 45 | /// 46 | fn prove( 47 | cm_i: &CCCS, 48 | wit: &Witness, 49 | transcript: &mut impl Transcript, 50 | ccs: &CCS, 51 | ) -> Result<(LCCCS, LinearizationProof), LinearizationError>; 52 | } 53 | 54 | /// Verifier for the Linearization subprotocol. 55 | pub trait LinearizationVerifier> { 56 | /// Verifies a proof for the linearization subprotocol. 57 | /// 58 | /// # Arguments 59 | /// 60 | /// * `cm_i` - A reference to a `CCCS`, which represents a CCS statement and a commitment to a witness. 61 | /// * `proof` - A reference to a `LinearizationProof` containing the linearization proof. 62 | /// * `transcript` - A mutable reference to a sponge for generating NI challenges. 63 | /// * `ccs` - A reference to a Customizable Constraint System instance used in the protocol. 64 | /// 65 | /// # Returns 66 | /// 67 | /// * `Ok(LCCCS)` - On success, returns a linearized version of the CCS witness commitment. 68 | /// * `Err(LinearizationError)` - If verification fails, returns a `LinearizationError`. 69 | /// 70 | fn verify( 71 | cm_i: &CCCS, 72 | proof: &LinearizationProof, 73 | transcript: &mut impl Transcript, 74 | ccs: &CCS, 75 | ) -> Result, LinearizationError>; 76 | } 77 | 78 | impl> LFLinearizationProver { 79 | /// Step 2 of Fig 5: Construct polynomial $g$ and generate $\beta$ challenges. 80 | fn construct_polynomial_g( 81 | z_ccs: &[NTT], 82 | transcript: &mut impl Transcript, 83 | ccs: &CCS, 84 | ) -> Result< 85 | ( 86 | Vec>, 87 | usize, 88 | Vec>, 89 | ), 90 | LinearizationError, 91 | > { 92 | // Generate beta challenges from Step 1 93 | let beta_s = transcript.squeeze_beta_challenges(ccs.s); 94 | 95 | // Prepare MLEs 96 | let Mz_mles = calculate_Mz_mles::>(ccs, z_ccs)?; 97 | 98 | // Construct the sumcheck polynomial g 99 | let (g_mles, g_degree) = 100 | prepare_lin_sumcheck_polynomial(&ccs.c, &ccs.d, &Mz_mles, &ccs.S, &beta_s)?; 101 | 102 | Ok((g_mles, g_degree, Mz_mles)) 103 | } 104 | 105 | /// Step 2: Run linearization sum-check protocol. 106 | fn generate_sumcheck_proof( 107 | transcript: &mut impl Transcript, 108 | mles: Vec>, 109 | nvars: usize, 110 | degree: usize, 111 | comb_fn: impl Fn(&[NTT]) -> NTT + Sync + Send, 112 | ) -> Result<(Proof, Vec), LinearizationError> { 113 | let (sum_check_proof, prover_state) = 114 | MLSumcheck::prove_as_subprotocol(transcript, mles, nvars, degree, comb_fn); 115 | let point_r = prover_state 116 | .randomness 117 | .into_iter() 118 | .map(|x| x.into()) 119 | .collect::>(); 120 | 121 | Ok((sum_check_proof, point_r)) 122 | } 123 | 124 | /// Step 3: the mle evaluations that the prover sends to the verifier. 125 | /// I.e. f-hat rows mle evaluations and Mz mle evaluations. 126 | fn compute_evaluation_vectors( 127 | wit: &Witness, 128 | point_r: &[NTT], 129 | Mz_mles: &[DenseMultilinearExtension], 130 | ) -> Result<(Vec, Vec, Vec), LinearizationError> { 131 | // Compute v 132 | 133 | let v: Vec = evaluate_mles::>(&wit.f_hat, point_r)?; 134 | 135 | // Compute u_j 136 | let u = compute_u(Mz_mles, point_r)?; 137 | 138 | Ok((point_r.to_vec(), v, u)) 139 | } 140 | } 141 | 142 | impl> LinearizationProver 143 | for LFLinearizationProver 144 | { 145 | fn prove( 146 | cm_i: &CCCS, 147 | wit: &Witness, 148 | transcript: &mut impl Transcript, 149 | ccs: &CCS, 150 | ) -> Result<(LCCCS, LinearizationProof), LinearizationError> { 151 | // Step 1: Generate beta challenges (done in construct_polynomial_g because they are not needed 152 | // elsewhere. 153 | 154 | // Step 2: Sum check protocol. 155 | // z_ccs vector, i.e. concatenation x || 1 || w. 156 | let z_ccs = cm_i.get_z_vector(&wit.w_ccs); 157 | let (g_mles, g_degree, Mz_mles) = Self::construct_polynomial_g(&z_ccs, transcript, ccs)?; 158 | 159 | let comb_fn = |vals: &[NTT]| -> NTT { sumcheck_polynomial_comb_fn(vals, ccs) }; 160 | 161 | // Run sumcheck protocol. 162 | let (sumcheck_proof, point_r) = 163 | Self::generate_sumcheck_proof(transcript, g_mles, ccs.s, g_degree, comb_fn)?; 164 | 165 | // Step 3: Compute v, u_vector. 166 | let (point_r, v, u) = Self::compute_evaluation_vectors(wit, &point_r, &Mz_mles)?; 167 | 168 | // Absorbing the prover's messages to the verifier. 169 | transcript.absorb_slice(&v); 170 | transcript.absorb_slice(&u); 171 | 172 | // Step 5: Output linearization_proof and lcccs 173 | let linearization_proof = LinearizationProof { 174 | linearization_sumcheck: sumcheck_proof, 175 | v: v.clone(), 176 | u: u.clone(), 177 | }; 178 | 179 | let lcccs = LCCCS { 180 | r: point_r, 181 | v, 182 | cm: cm_i.cm.clone(), 183 | u, 184 | x_w: cm_i.x_ccs.clone(), 185 | h: NTT::one(), 186 | }; 187 | 188 | Ok((lcccs, linearization_proof)) 189 | } 190 | } 191 | 192 | impl> LFLinearizationVerifier { 193 | fn verify_sumcheck_proof( 194 | proof: &LinearizationProof, 195 | transcript: &mut impl Transcript, 196 | ccs: &CCS, 197 | ) -> Result<(Vec, NTT), LinearizationError> { 198 | // The polynomial has degree <= ccs.d + 1 and log_m (ccs.s) vars. 199 | let nvars = ccs.s; 200 | let degree = ccs.d + 1; 201 | 202 | let subclaim = MLSumcheck::verify_as_subprotocol( 203 | transcript, 204 | nvars, 205 | degree, 206 | NTT::zero(), 207 | &proof.linearization_sumcheck, 208 | )?; 209 | 210 | Ok(( 211 | subclaim.point.into_iter().map(|x| x.into()).collect(), 212 | subclaim.expected_evaluation, 213 | )) 214 | } 215 | 216 | /// Step 4: Verify 217 | /// $$ 218 | /// \mathbf{e} \cdot \left( \sum\_{i=1}^{n\_s} c_i \cdot \prod\_{j \in S\_i} \mathbf{u}\_j \right) \stackrel{?}{=} s. 219 | /// $$ 220 | fn verify_evaluation_claim( 221 | beta_s: &[NTT], 222 | point_r: &[NTT], 223 | s: NTT, 224 | proof: &LinearizationProof, 225 | ccs: &CCS, 226 | ) -> Result<(), LinearizationError> { 227 | let e = eq_eval(point_r, beta_s)?; 228 | let should_equal_s = e * ccs // e * (\sum c_i * \Pi_{j \in S_i} u_j) 229 | .c 230 | .iter() 231 | .enumerate() 232 | .map(|(i, &c)| c * ccs.S[i].iter().map(|&j| proof.u[j]).product::()) // c_i * \Pi_{j \in S_i} u_j 233 | .sum::(); // \sum c_i * \Pi_{j \in S_i} u_j 234 | 235 | if should_equal_s != s { 236 | return Err(LinearizationError::SumCheckError(SumCheckFailed( 237 | should_equal_s, 238 | s, 239 | ))); 240 | } 241 | 242 | Ok(()) 243 | } 244 | 245 | fn prepare_verifier_output( 246 | cm_i: &CCCS, 247 | point_r: Vec, 248 | proof: &LinearizationProof, 249 | ) -> LCCCS { 250 | LCCCS { 251 | r: point_r, 252 | v: proof.v.clone(), 253 | cm: cm_i.cm.clone(), 254 | u: proof.u.clone(), 255 | x_w: cm_i.x_ccs.clone(), 256 | h: NTT::one(), 257 | } 258 | } 259 | } 260 | 261 | impl> LinearizationVerifier 262 | for LFLinearizationVerifier 263 | { 264 | fn verify( 265 | cm_i: &CCCS, 266 | proof: &LinearizationProof, 267 | transcript: &mut impl Transcript, 268 | ccs: &CCS, 269 | ) -> Result, LinearizationError> { 270 | // Step 1: Generate the beta challenges. 271 | let beta_s = transcript.squeeze_beta_challenges(ccs.s); 272 | 273 | //Step 2: The sumcheck. 274 | let (point_r, s) = Self::verify_sumcheck_proof(proof, transcript, ccs)?; 275 | 276 | Self::verify_evaluation_claim(&beta_s, &point_r, s, proof, ccs)?; 277 | 278 | // Absorbing the prover's mmessages to the verifier. 279 | transcript.absorb_slice(&proof.v); 280 | transcript.absorb_slice(&proof.u); 281 | 282 | // Step 5: Output z_o 283 | Ok(Self::prepare_verifier_output(cm_i, point_r, proof)) 284 | } 285 | } 286 | -------------------------------------------------------------------------------- /latticefold/src/nifs/linearization/structs.rs: -------------------------------------------------------------------------------- 1 | use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; 2 | use ark_std::marker::PhantomData; 3 | use stark_rings::OverField; 4 | 5 | use crate::{ark_base::*, utils::sumcheck}; 6 | 7 | /// Non-interactive proof generated by the linearization prover 8 | /// 9 | /// # Members 10 | /// 11 | /// * `linearization_sumcheck` - A list of non-interactive sumcheck prover messages. 12 | /// * `v` - The MLE of `wit.f_hat` evaluated at the sumcheck challenge point. 13 | /// * `u` - The MLEs of $\\{ M_j \mathbf{z} \mid j = 1, 2, \dots, t \\}$ evaluated at sumcheck challenge point. 14 | #[derive(Clone, Debug, PartialEq, CanonicalSerialize, CanonicalDeserialize)] 15 | pub struct LinearizationProof { 16 | /// A list of non-interactive sumcheck prover messages. 17 | /// 18 | /// Sent in step 2 of linearization subprotocol. 19 | pub linearization_sumcheck: sumcheck::Proof, 20 | /// The evaluations of MLE's of the rows of $\hat{\mathbf{f}}$-matrices of the witness at the sumcheck challenge point, i.e. 21 | /// 22 | /// $$ 23 | /// \text{v[i]}= \text{mle}[\text{wit.f\\_hat[i]}] (\mathbf{r}). 24 | /// $$ 25 | /// 26 | /// Sent in the step 3 of linearization subprotocol. 27 | pub v: Vec, 28 | /// The evaluations of MLE's of $\\{ M_j \mathbf{z} \mid j = 1, 2, \dots, t \\}$ at evaluated at the sumcheck challenge point, i.e. 29 | /// 30 | /// $$ 31 | /// \text{u[j]} = \sum\_{\vec{\mathbf{b}} \in \\{0,1\\}^{\log n\_c}} 32 | /// \text{mle}[\text{ccs.M[j]}](\vec{\mathbf{x}}, \vec{\mathbf{b}}) \cdot \text{mle}\[\mathbf{z}\](\vec{\mathbf{b}}). 33 | /// $$ 34 | /// 35 | /// Sent in the step 3 of linearization subprotocol. 36 | pub u: Vec, 37 | } 38 | 39 | /// The LatticeFold prover 40 | /// 41 | /// The implementation of the `LinearizationProver` trait is defined in the main linearization file. 42 | pub struct LFLinearizationProver { 43 | _ntt: PhantomData, 44 | _t: PhantomData, 45 | } 46 | 47 | /// The LatticeFold verifier 48 | /// 49 | /// The implementation of the `LinearizationVerifier` trait is defined in the main linearization file. 50 | pub struct LFLinearizationVerifier { 51 | _ntt: PhantomData, 52 | _t: PhantomData, 53 | } 54 | -------------------------------------------------------------------------------- /latticefold/src/nifs/linearization/utils.rs: -------------------------------------------------------------------------------- 1 | use ark_ff::{Field, PrimeField}; 2 | use cyclotomic_rings::rings::SuitableRing; 3 | use stark_rings::OverField; 4 | use stark_rings_poly::mle::DenseMultilinearExtension; 5 | 6 | use crate::{ 7 | ark_base::Vec, 8 | nifs::{error::LinearizationError, CCS}, 9 | transcript::Transcript, 10 | utils::{mle_helpers::evaluate_mles, sumcheck::utils::build_eq_x_r}, 11 | }; 12 | 13 | /// Computes the evaluation of the MLEs of $\\{ M_j \mathbf{z} \mid j = 1, 2, \dots, t \\}$ at the sumcheck challenge point. 14 | /// 15 | /// # Parameters 16 | /// 17 | /// * `Mz_mles` (`&[DenseMultilinearExtension]`): The MLEs of $\\{ M_j \mathbf{z} \mid j = 1, 2, \dots, t \\}$ 18 | /// * `r` (`&[NTT]`): The sumcheck challenge point 19 | /// 20 | /// # Errors 21 | /// 22 | /// This function may return a `LinearizationError` if there are inconsistencies in the dimensions of `Mz_mles` and `r`. 23 | /// 24 | pub fn compute_u( 25 | Mz_mles: &[DenseMultilinearExtension], 26 | r: &[NTT], 27 | ) -> Result, LinearizationError> { 28 | evaluate_mles::>(Mz_mles, r) 29 | } 30 | 31 | /// Prepare the main linearization polynomial. 32 | /// 33 | /// $$ g(\vec{\mathbf{x}}) := eq(\vec{\beta}, \vec{\mathbf{x}}) \cdot 34 | /// \left( 35 | /// \sum\_{i=1}^{n\_s} c\_i \cdot 36 | /// \left[ 37 | /// \prod\_{j \in S\_i} 38 | /// \left( 39 | /// \sum\_{\vec{\mathbf{b}} \in \\{0,1\\}^{\log n\_c}} 40 | /// \text{mle}[M\_j](\vec{\mathbf{x}}, \vec{\mathbf{b}}) \cdot \text{mle}\[\mathbf{z}\_{ccs}\](\vec{b}) 41 | /// \right) 42 | /// \right] 43 | /// \right) $$ 44 | /// 45 | /// # Parameters: 46 | /// 47 | /// * `c` (`&[NTT]`): The second multiplicand of the polynomial is a linear combination of products of lists of MLEs, c is the coefficients of the lists 48 | /// 49 | /// * `M_mles` (`&[DenseMultilinearExtension]`): MLEs that the polynomial is constructed from 50 | /// 51 | /// * `S` (`&[Vec]`): ] indices for the MLE lists 52 | /// 53 | /// * `beta_s` (`&[NTT]`): Randomness 54 | /// 55 | /// # Returns: 56 | /// 57 | /// * The MLEs which form the polynomial 58 | /// * The max degree of the polynomial 59 | /// 60 | /// # Errors: 61 | /// * Will return an error if any of the MLEs are of the wrong size 62 | /// 63 | pub fn prepare_lin_sumcheck_polynomial( 64 | c: &[NTT], 65 | d: &usize, 66 | M_mles: &[DenseMultilinearExtension], 67 | S: &[Vec], 68 | beta_s: &[NTT], 69 | ) -> Result<(Vec>, usize), LinearizationError> { 70 | let len = 1 + c 71 | .iter() 72 | .enumerate() 73 | .filter(|(_, c)| !c.is_zero()) 74 | .map(|(i, _)| S[i].len()) 75 | .sum::(); 76 | 77 | let mut mles = Vec::with_capacity(len); 78 | 79 | for (i, _) in c.iter().enumerate().filter(|(_, c)| !c.is_zero()) { 80 | for &j in &S[i] { 81 | mles.push(M_mles[j].clone()); 82 | } 83 | } 84 | 85 | mles.push(build_eq_x_r(beta_s)?); 86 | 87 | Ok((mles, d + 1)) 88 | } 89 | 90 | pub(crate) fn sumcheck_polynomial_comb_fn(vals: &[NTT], ccs: &CCS) -> NTT { 91 | let mut result = NTT::zero(); 92 | 'outer: for (i, &c) in ccs.c.iter().enumerate() { 93 | if c.is_zero() { 94 | continue; 95 | } 96 | let mut term = c; 97 | for &j in &ccs.S[i] { 98 | if vals[j].is_zero() { 99 | continue 'outer; 100 | } 101 | term *= vals[j]; 102 | } 103 | result += term; 104 | } 105 | // eq() is the last term added 106 | result * vals[vals.len() - 1] 107 | } 108 | 109 | pub(crate) trait SqueezeBeta { 110 | fn squeeze_beta_challenges(&mut self, n: usize) -> Vec; 111 | } 112 | 113 | impl> SqueezeBeta for T { 114 | fn squeeze_beta_challenges(&mut self, n: usize) -> Vec { 115 | self.absorb_field_element(&::from_base_prime_field( 116 | ::BasePrimeField::from_be_bytes_mod_order(b"beta_s"), 117 | )); 118 | 119 | self.get_challenges(n) 120 | .into_iter() 121 | .map(|x| x.into()) 122 | .collect() 123 | } 124 | } 125 | -------------------------------------------------------------------------------- /latticefold/src/nifs/tests.rs: -------------------------------------------------------------------------------- 1 | use ark_std::{test_rng, vec::Vec}; 2 | use cyclotomic_rings::{challenge_set::LatticefoldChallengeSet, rings::SuitableRing}; 3 | use rand::Rng; 4 | 5 | use crate::{ 6 | arith::{r1cs::get_test_z_split, tests::get_test_ccs, Witness, CCCS, CCS, LCCCS}, 7 | commitment::AjtaiCommitmentScheme, 8 | decomposition_parameters::DecompositionParams, 9 | nifs::{ 10 | linearization::{LFLinearizationProver, LinearizationProver}, 11 | NIFSProver, NIFSVerifier, 12 | }, 13 | transcript::{poseidon::PoseidonTranscript, TranscriptWithShortChallenges}, 14 | }; 15 | 16 | fn setup_test_environment< 17 | const C: usize, 18 | RqNTT: SuitableRing, 19 | DP: DecompositionParams, 20 | const W: usize, 21 | const WIT_LEN: usize, 22 | CS: LatticefoldChallengeSet, 23 | >() -> ( 24 | LCCCS, // acc 25 | Witness, // w_acc 26 | CCCS, // cm_i 27 | Witness, // w_i 28 | CCS, 29 | AjtaiCommitmentScheme, 30 | ) { 31 | let ccs = get_test_ccs::(W, DP::L); 32 | let mut rng = test_rng(); 33 | let (_, x_ccs, w_ccs) = get_test_z_split::(rng.gen_range(0..64)); 34 | let scheme = AjtaiCommitmentScheme::rand(&mut rng); 35 | 36 | let wit_i = Witness::from_w_ccs::(w_ccs); 37 | let cm_i = CCCS { 38 | cm: wit_i.commit::(&scheme).unwrap(), 39 | x_ccs: x_ccs.clone(), 40 | }; 41 | 42 | let rand_w_ccs: Vec = (0..WIT_LEN).map(|i| RqNTT::from(i as u64)).collect(); 43 | let wit_acc = Witness::from_w_ccs::(rand_w_ccs); 44 | 45 | let mut transcript = PoseidonTranscript::::default(); 46 | 47 | let (acc, _) = LFLinearizationProver::<_, PoseidonTranscript>::prove( 48 | &cm_i, 49 | &wit_acc, 50 | &mut transcript, 51 | &ccs, 52 | ) 53 | .unwrap(); 54 | (acc, wit_acc, cm_i, wit_i, ccs, scheme) 55 | } 56 | 57 | fn test_nifs_prove< 58 | const C: usize, 59 | const W: usize, 60 | const WIT_LEN: usize, 61 | RqNTT: SuitableRing, 62 | CS: LatticefoldChallengeSet, 63 | DP: DecompositionParams, 64 | T: TranscriptWithShortChallenges, 65 | >() { 66 | let (acc, w_acc, cm_i, w_i, ccs, scheme) = 67 | setup_test_environment::(); 68 | 69 | let mut transcript = PoseidonTranscript::::default(); 70 | 71 | let result = NIFSProver::::prove( 72 | &acc, 73 | &w_acc, 74 | &cm_i, 75 | &w_i, 76 | &mut transcript, 77 | &ccs, 78 | &scheme, 79 | ); 80 | 81 | assert!(result.is_ok()); 82 | } 83 | 84 | fn test_nifs_verify< 85 | const C: usize, 86 | const W: usize, 87 | const WIT_LEN: usize, 88 | RqNTT: SuitableRing, 89 | CS: LatticefoldChallengeSet, 90 | DP: DecompositionParams, 91 | T: TranscriptWithShortChallenges, 92 | >() { 93 | let (acc, w_acc, cm_i, w_i, ccs, scheme) = 94 | setup_test_environment::(); 95 | 96 | let mut prover_transcript = PoseidonTranscript::::default(); 97 | let mut verifier_transcript = PoseidonTranscript::::default(); 98 | 99 | let (_, _, proof) = NIFSProver::::prove( 100 | &acc, 101 | &w_acc, 102 | &cm_i, 103 | &w_i, 104 | &mut prover_transcript, 105 | &ccs, 106 | &scheme, 107 | ) 108 | .unwrap(); 109 | 110 | let result = NIFSVerifier::::verify( 111 | &acc, 112 | &cm_i, 113 | &proof, 114 | &mut verifier_transcript, 115 | &ccs, 116 | ); 117 | 118 | assert!(result.is_ok()); 119 | } 120 | 121 | mod e2e_tests { 122 | use super::*; 123 | mod stark { 124 | use cyclotomic_rings::rings::{StarkChallengeSet, StarkRingNTT}; 125 | 126 | use crate::{ 127 | decomposition_parameters::{test_params::StarkDP, DecompositionParams}, 128 | nifs::tests::{test_nifs_prove, test_nifs_verify}, 129 | transcript::poseidon::PoseidonTranscript, 130 | }; 131 | 132 | type RqNTT = StarkRingNTT; 133 | type CS = StarkChallengeSet; 134 | type DP = StarkDP; 135 | type T = PoseidonTranscript; 136 | 137 | const C: usize = 4; 138 | const WIT_LEN: usize = 4; 139 | const W: usize = WIT_LEN * DP::L; 140 | 141 | #[ignore] 142 | #[test] 143 | fn test_prove() { 144 | test_nifs_prove::(); 145 | } 146 | 147 | #[ignore] 148 | #[test] 149 | fn test_verify() { 150 | test_nifs_verify::(); 151 | } 152 | } 153 | 154 | mod goldilocks { 155 | use cyclotomic_rings::rings::{GoldilocksChallengeSet, GoldilocksRingNTT}; 156 | 157 | use super::*; 158 | use crate::decomposition_parameters::test_params::GoldilocksDP; 159 | 160 | type RqNTT = GoldilocksRingNTT; 161 | type CS = GoldilocksChallengeSet; 162 | type DP = GoldilocksDP; 163 | type T = PoseidonTranscript; 164 | 165 | const C: usize = 4; 166 | const WIT_LEN: usize = 4; 167 | const W: usize = WIT_LEN * DP::L; 168 | 169 | #[test] 170 | fn test_prove() { 171 | test_nifs_prove::(); 172 | } 173 | 174 | #[test] 175 | fn test_verify() { 176 | test_nifs_verify::(); 177 | } 178 | } 179 | 180 | mod babybear { 181 | use cyclotomic_rings::rings::{BabyBearChallengeSet, BabyBearRingNTT}; 182 | 183 | use super::*; 184 | use crate::decomposition_parameters::test_params::BabyBearDP; 185 | 186 | type RqNTT = BabyBearRingNTT; 187 | type CS = BabyBearChallengeSet; 188 | type DP = BabyBearDP; 189 | type T = PoseidonTranscript; 190 | 191 | const C: usize = 4; 192 | const WIT_LEN: usize = 4; 193 | const W: usize = WIT_LEN * DP::L; 194 | 195 | #[test] 196 | fn test_prove() { 197 | test_nifs_prove::(); 198 | } 199 | 200 | #[test] 201 | fn test_verify() { 202 | test_nifs_verify::(); 203 | } 204 | } 205 | } 206 | -------------------------------------------------------------------------------- /latticefold/src/transcript.rs: -------------------------------------------------------------------------------- 1 | //! Provides utility for generating non-interactive challenges 2 | //! 3 | //! Transcripts allow provers and verifiers to independently draw the same challenges. 4 | 5 | use ark_std::fmt::Debug; 6 | use cyclotomic_rings::{challenge_set::LatticefoldChallengeSet, rings::SuitableRing}; 7 | use stark_rings::OverField; 8 | 9 | use crate::ark_base::*; 10 | 11 | pub mod poseidon; 12 | 13 | pub trait Transcript { 14 | type TranscriptConfig: Debug; 15 | 16 | fn new(config: &Self::TranscriptConfig) -> Self; 17 | 18 | fn absorb(&mut self, v: &R); 19 | 20 | fn absorb_field_element(&mut self, v: &R::BaseRing) { 21 | self.absorb(&From::from(*v)) 22 | } 23 | 24 | fn absorb_slice(&mut self, v: &[R]) { 25 | for ring in v { 26 | self.absorb(ring); 27 | } 28 | } 29 | 30 | fn get_challenge(&mut self) -> R::BaseRing; 31 | 32 | fn get_challenges(&mut self, n: usize) -> Vec { 33 | let mut challenges = Vec::with_capacity(n); 34 | challenges.extend((0..n).map(|_| self.get_challenge())); 35 | challenges 36 | } 37 | } 38 | 39 | pub trait TranscriptWithShortChallenges: Transcript { 40 | type ChallengeSet: LatticefoldChallengeSet; 41 | 42 | fn get_short_challenge(&mut self) -> R::CoefficientRepresentation; 43 | 44 | fn get_small_challenges(&mut self, n: usize) -> Vec { 45 | let mut challenges = Vec::with_capacity(n); 46 | challenges.extend((0..n).map(|_| self.get_short_challenge())); 47 | challenges 48 | } 49 | } 50 | -------------------------------------------------------------------------------- /latticefold/src/transcript/poseidon.rs: -------------------------------------------------------------------------------- 1 | use ark_crypto_primitives::sponge::{ 2 | poseidon::{PoseidonConfig, PoseidonSponge}, 3 | CryptographicSponge, 4 | }; 5 | use ark_ff::Field; 6 | use ark_std::marker::PhantomData; 7 | use cyclotomic_rings::{ 8 | challenge_set::LatticefoldChallengeSet, 9 | rings::{GetPoseidonParams, SuitableRing}, 10 | }; 11 | use stark_rings::OverField; 12 | 13 | use super::{Transcript, TranscriptWithShortChallenges}; 14 | use crate::ark_base::*; 15 | 16 | /// PoseidonTranscript implements the Transcript trait using the Poseidon hash 17 | #[derive(Clone)] 18 | pub struct PoseidonTranscript { 19 | _marker: PhantomData, 20 | sponge: PoseidonSponge<::BasePrimeField>, 21 | } 22 | 23 | impl> Default for PoseidonTranscript { 24 | fn default() -> Self { 25 | Self::new(&R::PoseidonParams::get_poseidon_config()) 26 | } 27 | } 28 | 29 | impl Transcript for PoseidonTranscript { 30 | type TranscriptConfig = PoseidonConfig<::BasePrimeField>; 31 | 32 | fn new(config: &Self::TranscriptConfig) -> Self { 33 | let sponge = PoseidonSponge::<::BasePrimeField>::new(config); 34 | Self { 35 | sponge, 36 | _marker: PhantomData, 37 | } 38 | } 39 | 40 | fn absorb(&mut self, v: &R) { 41 | self.sponge.absorb( 42 | &v.coeffs() 43 | .iter() 44 | .flat_map(|x| x.to_base_prime_field_elements()) 45 | .collect::>(), 46 | ); 47 | } 48 | 49 | fn get_challenge(&mut self) -> R::BaseRing { 50 | let extension_degree = R::BaseRing::extension_degree(); 51 | let c = self 52 | .sponge 53 | .squeeze_field_elements(extension_degree as usize); 54 | self.sponge.absorb(&c); 55 | ::from_base_prime_field_elems(&c) 56 | .expect("something went wrong: c does not contain extension_degree elements") 57 | } 58 | } 59 | 60 | impl> TranscriptWithShortChallenges 61 | for PoseidonTranscript 62 | { 63 | type ChallengeSet = CS; 64 | 65 | fn get_short_challenge(&mut self) -> R::CoefficientRepresentation { 66 | let random_bytes = self.sponge.squeeze_bytes(Self::ChallengeSet::BYTES_NEEDED); 67 | 68 | Self::ChallengeSet::short_challenge_from_random_bytes(&random_bytes) 69 | .expect("not enough bytes to get a small challenge") 70 | } 71 | } 72 | 73 | #[cfg(test)] 74 | mod tests { 75 | use ark_ff::BigInt; 76 | use cyclotomic_rings::rings::{GoldilocksChallengeSet, GoldilocksRingNTT, GoldilocksRingPoly}; 77 | use stark_rings::cyclotomic_ring::models::goldilocks::{Fq, Fq3}; 78 | 79 | use super::*; 80 | 81 | #[test] 82 | fn test_get_big_challenge() { 83 | let mut transcript = 84 | PoseidonTranscript::::default(); 85 | 86 | transcript 87 | .sponge 88 | .absorb(&Fq::from(BigInt::<1>::from(0xFFu32))); 89 | 90 | let expected: Fq3 = Fq3::new( 91 | Fq::new(BigInt([10462816198028961279])), 92 | Fq::new(BigInt([17217694161994925895])), 93 | Fq::new(BigInt([6163269596856181508])), 94 | ); 95 | 96 | assert_eq!(expected, transcript.get_challenge()) 97 | } 98 | 99 | #[test] 100 | fn test_get_small_challenge() { 101 | let mut transcript = 102 | PoseidonTranscript::::default(); 103 | 104 | transcript 105 | .sponge 106 | .absorb(&Fq::from(BigInt::<1>::from(0xFFu32))); 107 | 108 | let expected_coeffs: Vec = vec![ 109 | Fq::new(BigInt([31])), 110 | Fq::new(BigInt([18446744069414584312])), 111 | Fq::new(BigInt([18446744069414584291])), 112 | Fq::new(BigInt([14])), 113 | Fq::new(BigInt([18446744069414584306])), 114 | Fq::new(BigInt([18446744069414584312])), 115 | Fq::new(BigInt([30])), 116 | Fq::new(BigInt([18446744069414584313])), 117 | Fq::new(BigInt([19])), 118 | Fq::new(BigInt([18446744069414584317])), 119 | Fq::new(BigInt([20])), 120 | Fq::new(BigInt([18446744069414584306])), 121 | Fq::new(BigInt([18446744069414584295])), 122 | Fq::new(BigInt([4])), 123 | Fq::new(BigInt([18446744069414584320])), 124 | Fq::new(BigInt([7])), 125 | Fq::new(BigInt([18446744069414584298])), 126 | Fq::new(BigInt([18446744069414584295])), 127 | Fq::new(BigInt([18446744069414584304])), 128 | Fq::new(BigInt([18446744069414584290])), 129 | Fq::new(BigInt([3])), 130 | Fq::new(BigInt([18446744069414584304])), 131 | Fq::new(BigInt([25])), 132 | Fq::new(BigInt([18446744069414584304])), 133 | ]; 134 | 135 | let expected = GoldilocksRingPoly::from(expected_coeffs); 136 | 137 | assert_eq!(expected, transcript.get_short_challenge()) 138 | } 139 | } 140 | -------------------------------------------------------------------------------- /latticefold/src/utils.rs: -------------------------------------------------------------------------------- 1 | //! Provides generic functionality useful to folding schemes. 2 | 3 | pub(crate) mod mle_helpers; 4 | #[cfg(feature = "std")] 5 | pub mod security_check; 6 | pub mod sumcheck; 7 | -------------------------------------------------------------------------------- /latticefold/src/utils/mle_helpers.rs: -------------------------------------------------------------------------------- 1 | //! 2 | //! Helper function used by all three subprotocols. 3 | //! 4 | 5 | use ark_std::{cfg_into_iter, cfg_iter, vec::Vec}; 6 | use cyclotomic_rings::rings::SuitableRing; 7 | #[cfg(feature = "parallel")] 8 | use rayon::iter::{IntoParallelIterator, IntoParallelRefIterator, ParallelIterator}; 9 | use stark_rings::Ring; 10 | use stark_rings_poly::mle::DenseMultilinearExtension; 11 | use thiserror::Error; 12 | 13 | use crate::arith::{error::CSError, utils::mat_vec_mul, CCS}; 14 | 15 | #[derive(Debug, Error)] 16 | pub enum MleEvaluationError { 17 | #[error("lengths of evaluation point and evaluations are not consistent: 1 << {0} != {1}")] 18 | IncorrectLength(usize, usize), 19 | } 20 | 21 | pub trait Evaluate { 22 | fn evaluate(self, point: &[R]) -> Result; 23 | } 24 | 25 | impl Evaluate for Vec { 26 | fn evaluate(self, point: &[R]) -> Result { 27 | let evals_len = self.len(); 28 | 29 | DenseMultilinearExtension::from_evaluations_vec(point.len(), self) 30 | .evaluate(point) 31 | .ok_or(MleEvaluationError::IncorrectLength(point.len(), evals_len)) 32 | } 33 | } 34 | 35 | impl Evaluate for &[R] { 36 | fn evaluate(self, point: &[R]) -> Result { 37 | let evals_len = self.len(); 38 | 39 | DenseMultilinearExtension::from_evaluations_slice(point.len(), self) 40 | .evaluate(point) 41 | .ok_or(MleEvaluationError::IncorrectLength(point.len(), evals_len)) 42 | } 43 | } 44 | 45 | impl Evaluate for &DenseMultilinearExtension { 46 | fn evaluate(self, point: &[R]) -> Result { 47 | DenseMultilinearExtension::::evaluate(self, point) 48 | .ok_or(MleEvaluationError::IncorrectLength(point.len(), self.elen)) 49 | } 50 | } 51 | 52 | impl Evaluate for &Vec { 53 | fn evaluate(self, point: &[R]) -> Result { 54 | if self.len() != 1 << point.len() { 55 | return Err(MleEvaluationError::IncorrectLength(point.len(), self.len())); 56 | } 57 | 58 | DenseMultilinearExtension::from_evaluations_slice(point.len(), self) 59 | .evaluate(point) 60 | .ok_or(MleEvaluationError::IncorrectLength(point.len(), self.len())) 61 | } 62 | } 63 | 64 | #[cfg(not(feature = "parallel"))] 65 | pub fn evaluate_mles(mle_s: I, point: &[R]) -> Result, E> 66 | where 67 | R: Ring, 68 | V: Evaluate, 69 | I: IntoIterator, 70 | E: From, 71 | { 72 | cfg_into_iter!(mle_s) 73 | .map(|evals| evals.evaluate(point).map_err(From::from)) 74 | .collect() 75 | } 76 | 77 | #[cfg(feature = "parallel")] 78 | pub fn evaluate_mles(mle_s: I, point: &[R]) -> Result, E> 79 | where 80 | R: Ring, 81 | V: Evaluate, 82 | I: IntoParallelIterator, 83 | E: From + Send + Sync, 84 | { 85 | cfg_into_iter!(mle_s) 86 | .map(|evals| evals.evaluate(point).map_err(From::from)) 87 | .collect() 88 | } 89 | 90 | #[cfg(not(feature = "parallel"))] 91 | pub fn to_mles_err( 92 | n_vars: usize, 93 | mle_s: I, 94 | ) -> Result>, E> 95 | where 96 | I: IntoIterator, E1>>, 97 | R: Ring, 98 | E: From + From, 99 | { 100 | mle_s 101 | .into_iter() 102 | .map(|m| { 103 | let m = m?; 104 | if 1 << n_vars < m.len() { 105 | Err(MleEvaluationError::IncorrectLength(1 << n_vars, m.len()).into()) 106 | } else { 107 | Ok(DenseMultilinearExtension::from_evaluations_vec(n_vars, m)) 108 | } 109 | }) 110 | .collect::>() 111 | } 112 | 113 | #[cfg(feature = "parallel")] 114 | pub fn to_mles_err( 115 | n_vars: usize, 116 | mle_s: I, 117 | ) -> Result>, E> 118 | where 119 | I: IntoParallelIterator, E1>>, 120 | R: Ring, 121 | E: From + Sync + Send + From, 122 | { 123 | mle_s 124 | .into_par_iter() 125 | .map(|m| { 126 | let m = m?; 127 | if 1 << n_vars < m.len() { 128 | Err(MleEvaluationError::IncorrectLength(1 << n_vars, m.len()).into()) 129 | } else { 130 | Ok(DenseMultilinearExtension::from_evaluations_vec(n_vars, m)) 131 | } 132 | }) 133 | .collect::>() 134 | } 135 | 136 | // Prepare MLE's of the form mle[M_i \cdot z_ccs](x), a.k.a. \sum mle[M_i](x, b) * mle[z_ccs](b). 137 | pub fn calculate_Mz_mles( 138 | ccs: &CCS, 139 | z_ccs: &[NTT], 140 | ) -> Result>, E> 141 | where 142 | NTT: SuitableRing, 143 | E: From + From + Sync + Send, 144 | { 145 | to_mles_err::<_, _, E, CSError>(ccs.s, cfg_iter!(ccs.M).map(|M| mat_vec_mul(M, z_ccs))) 146 | } 147 | -------------------------------------------------------------------------------- /latticefold/src/utils/security_check.rs: -------------------------------------------------------------------------------- 1 | use ark_std::f64; 2 | use num_bigint::BigUint; 3 | use num_traits::ToPrimitive; 4 | 5 | fn calculate_bound_l2(degree: usize, kappa: usize, ring_modulus_log2: f64) -> BigUint { 6 | // The current security parameter use log2(delta) 7 | let delta = 1.0045_f64; 8 | // Calculate B_{L_2} as 2^{2 \sqrt{\text{log2}(\delta) \times \text{degree} \times \kappa \times \frac{\text{modulus}}{2}}} 9 | let bound_l2 = 2f64.powf( 10 | 2.0 * (delta.ln() / 2f64.ln()).sqrt() 11 | * (degree as f64 * kappa as f64 * ring_modulus_log2).sqrt(), 12 | ); 13 | let bound_l2_ceil = bound_l2.ceil() as u64; // Ceil and convert to u64 14 | BigUint::from(bound_l2_ceil) // Convert to BigUint 15 | } 16 | 17 | pub fn check_ring_modulus_128_bits_security( 18 | ring_modulus: &BigUint, 19 | kappa: usize, 20 | degree: usize, 21 | num_cols: usize, 22 | b: u128, 23 | l: usize, 24 | already_under_bound: bool, 25 | ) -> bool { 26 | // Modulus bits and half 27 | let (ring_modulus_log2, ring_modulus_half) = (ring_modulus.bits() as f64, ring_modulus / 2u32); 28 | 29 | // Calculate the left side of the inequality 30 | let bound_l2_bigint = calculate_bound_l2(degree, kappa, ring_modulus_log2); 31 | let bound_l2_check = bound_l2_bigint < ring_modulus_half; 32 | // Calculate bound_inf B_inf as B_{L_2} / \sqrt{\text{degree} \times \text{num_cols}} 33 | let bound_inf = bound_l2_bigint.to_f64().unwrap() / ((degree as f64 * num_cols as f64).sqrt()); 34 | 35 | let b_check = b.to_f64().unwrap() < bound_inf; 36 | // Check if we need to decompose and b^l > stark_modulus/2 37 | let b_pow_l_check = if already_under_bound && l == 1 { 38 | true 39 | } else { 40 | BigUint::from(b).pow(l as u32) > ring_modulus_half 41 | }; 42 | 43 | // Return the result of the condition 44 | bound_l2_check && b_check && b_pow_l_check 45 | } 46 | -------------------------------------------------------------------------------- /latticefold/src/utils/sumcheck.rs: -------------------------------------------------------------------------------- 1 | use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; 2 | use ark_std::{fmt::Display, marker::PhantomData}; 3 | use prover::{ProverMsg, ProverState}; 4 | use stark_rings::{OverField, Ring}; 5 | use stark_rings_poly::polynomials::{ArithErrors, DenseMultilinearExtension}; 6 | use thiserror::Error; 7 | 8 | use self::verifier::SubClaim; 9 | use crate::{ark_base::*, transcript::Transcript}; 10 | 11 | pub mod prover; 12 | pub mod utils; 13 | pub mod verifier; 14 | 15 | /// Interactive Proof for Multilinear Sumcheck 16 | pub struct IPForMLSumcheck { 17 | #[doc(hidden)] 18 | _marker: PhantomData<(R, T)>, 19 | } 20 | 21 | #[derive(Error, Debug)] 22 | pub enum SumCheckError { 23 | #[error("univariate polynomial evaluation error")] 24 | EvaluationError(ArithErrors), 25 | #[error("incorrect sumcheck sum. Expected `{0}`. Received `{1}`")] 26 | SumCheckFailed(R, R), 27 | #[error("max degree exceeded")] 28 | MaxDegreeExceeded, 29 | } 30 | 31 | impl From for SumCheckError { 32 | fn from(arith_error: ArithErrors) -> Self { 33 | Self::EvaluationError(arith_error) 34 | } 35 | } 36 | 37 | /// Sumcheck for products of multilinear polynomial 38 | pub struct MLSumcheck(#[doc(hidden)] PhantomData<(R, T)>); 39 | 40 | /// proof generated by prover 41 | #[derive(Clone, Debug, PartialEq, CanonicalSerialize, CanonicalDeserialize)] 42 | pub struct Proof(Vec>); 43 | 44 | impl> MLSumcheck { 45 | /// extract sum from the proof 46 | pub fn extract_sum(proof: &Proof) -> R { 47 | proof.0[0].evaluations[0] + proof.0[0].evaluations[1] 48 | } 49 | 50 | /// This function does the same thing as `prove`, but it uses cryptographic sponge as the transcript/to generate the 51 | /// verifier challenges. Additionally, it returns the prover's state in addition to the proof. 52 | /// Both of these allow this sumcheck to be better used as a part of a larger protocol. 53 | pub fn prove_as_subprotocol( 54 | transcript: &mut T, 55 | mles: Vec>, 56 | nvars: usize, 57 | degree: usize, 58 | comb_fn: impl Fn(&[R]) -> R + Sync + Send, 59 | ) -> (Proof, ProverState) { 60 | transcript.absorb(&R::from(nvars as u128)); 61 | transcript.absorb(&R::from(degree as u128)); 62 | let mut prover_state = IPForMLSumcheck::::prover_init(mles, nvars, degree); 63 | let mut verifier_msg = None; 64 | let mut prover_msgs = Vec::with_capacity(nvars); 65 | for _ in 0..nvars { 66 | let prover_msg = 67 | IPForMLSumcheck::::prove_round(&mut prover_state, &verifier_msg, &comb_fn); 68 | transcript.absorb_slice(&prover_msg.evaluations); 69 | prover_msgs.push(prover_msg); 70 | let next_verifier_msg = IPForMLSumcheck::::sample_round(transcript); 71 | transcript.absorb(&next_verifier_msg.randomness.into()); 72 | 73 | verifier_msg = Some(next_verifier_msg); 74 | } 75 | prover_state 76 | .randomness 77 | .push(verifier_msg.unwrap().randomness); 78 | 79 | (Proof(prover_msgs), prover_state) 80 | } 81 | 82 | /// This function does the same thing as `prove`, but it uses a cryptographic sponge as the transcript/to generate the 83 | /// verifier challenges. This allows this sumcheck to be used as a part of a larger protocol. 84 | pub fn verify_as_subprotocol( 85 | transcript: &mut T, 86 | nvars: usize, 87 | degree: usize, 88 | claimed_sum: R, 89 | proof: &Proof, 90 | ) -> Result, SumCheckError> { 91 | transcript.absorb(&R::from(nvars as u128)); 92 | transcript.absorb(&R::from(degree as u128)); 93 | 94 | let mut verifier_state = IPForMLSumcheck::::verifier_init(nvars, degree); 95 | for i in 0..nvars { 96 | let prover_msg = proof.0.get(i).expect("proof is incomplete"); 97 | transcript.absorb_slice(&prover_msg.evaluations); 98 | let verifier_msg = 99 | IPForMLSumcheck::verify_round(prover_msg.clone(), &mut verifier_state, transcript); 100 | transcript.absorb(&verifier_msg.randomness.into()); 101 | } 102 | 103 | IPForMLSumcheck::::check_and_generate_subclaim(verifier_state, claimed_sum) 104 | } 105 | } 106 | 107 | #[cfg(test)] 108 | mod tests { 109 | use ark_serialize::{CanonicalDeserialize, CanonicalSerialize, Compress, Validate}; 110 | use ark_std::io::Cursor; 111 | use cyclotomic_rings::{challenge_set::LatticefoldChallengeSet, rings::SuitableRing}; 112 | use rand::Rng; 113 | 114 | use crate::{ 115 | ark_base::*, 116 | transcript::poseidon::PoseidonTranscript, 117 | utils::sumcheck::{ 118 | utils::{rand_poly, rand_poly_comb_fn}, 119 | MLSumcheck, Proof, 120 | }, 121 | }; 122 | 123 | fn generate_sumcheck_proof( 124 | nvars: usize, 125 | mut rng: &mut (impl Rng + Sized), 126 | ) -> (usize, R, Proof) 127 | where 128 | R: SuitableRing, 129 | CS: LatticefoldChallengeSet, 130 | { 131 | let mut transcript = PoseidonTranscript::::default(); 132 | 133 | let ((poly_mles, poly_degree), products, sum) = 134 | rand_poly(nvars, (2, 5), 3, &mut rng).unwrap(); 135 | 136 | let comb_fn = |vals: &[R]| -> R { rand_poly_comb_fn(vals, &products) }; 137 | 138 | let (proof, _) = MLSumcheck::prove_as_subprotocol( 139 | &mut transcript, 140 | poly_mles, 141 | nvars, 142 | poly_degree, 143 | comb_fn, 144 | ); 145 | (poly_degree, sum, proof) 146 | } 147 | 148 | fn test_sumcheck() 149 | where 150 | R: SuitableRing, 151 | CS: LatticefoldChallengeSet, 152 | { 153 | let mut rng = ark_std::test_rng(); 154 | let nvars = 5; 155 | 156 | for _ in 0..20 { 157 | let (poly_degree, sum, proof) = generate_sumcheck_proof::(nvars, &mut rng); 158 | 159 | let mut transcript: PoseidonTranscript = PoseidonTranscript::default(); 160 | let res = 161 | MLSumcheck::verify_as_subprotocol(&mut transcript, nvars, poly_degree, sum, &proof); 162 | assert!(res.is_ok()) 163 | } 164 | } 165 | 166 | fn test_sumcheck_proof_serialization() 167 | where 168 | R: SuitableRing, 169 | CS: LatticefoldChallengeSet, 170 | { 171 | let mut rng = ark_std::test_rng(); 172 | let nvars = 5; 173 | 174 | let proof = generate_sumcheck_proof::(nvars, &mut rng).2; 175 | 176 | let mut serialized = Vec::new(); 177 | proof 178 | .serialize_with_mode(&mut serialized, Compress::Yes) 179 | .expect("Failed to serialize proof"); 180 | 181 | let mut cursor = Cursor::new(&serialized); 182 | assert_eq!( 183 | proof, 184 | Proof::deserialize_with_mode(&mut cursor, Compress::Yes, Validate::Yes) 185 | .expect("Failed to deserialize proof") 186 | ); 187 | } 188 | 189 | fn test_failing_sumcheck() 190 | where 191 | R: SuitableRing, 192 | CS: LatticefoldChallengeSet, 193 | { 194 | let mut rng = ark_std::test_rng(); 195 | 196 | for _ in 0..20 { 197 | let mut transcript: PoseidonTranscript = PoseidonTranscript::default(); 198 | 199 | let nvars = 5; 200 | let ((poly_mles, poly_degree), products, _) = 201 | rand_poly(nvars, (2, 5), 3, &mut rng).unwrap(); 202 | 203 | let comb_fn = |vals: &[R]| -> R { rand_poly_comb_fn(vals, &products) }; 204 | 205 | let (proof, _) = MLSumcheck::prove_as_subprotocol( 206 | &mut transcript, 207 | poly_mles, 208 | nvars, 209 | poly_degree, 210 | comb_fn, 211 | ); 212 | 213 | let not_sum = R::zero(); 214 | 215 | let res = MLSumcheck::verify_as_subprotocol( 216 | &mut transcript, 217 | nvars, 218 | poly_degree, 219 | not_sum, 220 | &proof, 221 | ); 222 | assert!(res.is_err()); 223 | } 224 | } 225 | 226 | mod stark { 227 | use cyclotomic_rings::rings::StarkChallengeSet; 228 | use stark_rings::cyclotomic_ring::models::stark_prime::RqNTT; 229 | 230 | type CS = StarkChallengeSet; 231 | 232 | #[test] 233 | fn test_sumcheck() { 234 | super::test_sumcheck::(); 235 | } 236 | 237 | #[test] 238 | fn test_sumcheck_proof_serialization() { 239 | super::test_sumcheck_proof_serialization::(); 240 | } 241 | 242 | #[test] 243 | fn test_failing_sumcheck() { 244 | super::test_failing_sumcheck::(); 245 | } 246 | } 247 | 248 | mod frog { 249 | use cyclotomic_rings::rings::FrogChallengeSet; 250 | use stark_rings::cyclotomic_ring::models::frog_ring::RqNTT; 251 | 252 | type CS = FrogChallengeSet; 253 | 254 | #[test] 255 | fn test_sumcheck() { 256 | super::test_sumcheck::(); 257 | } 258 | 259 | #[test] 260 | fn test_sumcheck_proof_serialization() { 261 | super::test_sumcheck_proof_serialization::(); 262 | } 263 | 264 | #[test] 265 | fn test_failing_sumcheck() { 266 | super::test_failing_sumcheck::(); 267 | } 268 | } 269 | 270 | mod goldilocks { 271 | use cyclotomic_rings::rings::GoldilocksChallengeSet; 272 | use stark_rings::cyclotomic_ring::models::goldilocks::RqNTT; 273 | 274 | type CS = GoldilocksChallengeSet; 275 | 276 | #[test] 277 | fn test_sumcheck() { 278 | super::test_sumcheck::(); 279 | } 280 | 281 | #[test] 282 | fn test_sumcheck_proof_serialization() { 283 | super::test_sumcheck_proof_serialization::(); 284 | } 285 | 286 | #[test] 287 | fn test_failing_sumcheck() { 288 | super::test_failing_sumcheck::(); 289 | } 290 | } 291 | 292 | mod babybear { 293 | use cyclotomic_rings::rings::BabyBearChallengeSet; 294 | use stark_rings::cyclotomic_ring::models::babybear::RqNTT; 295 | 296 | type CS = BabyBearChallengeSet; 297 | 298 | #[test] 299 | fn test_sumcheck() { 300 | super::test_sumcheck::(); 301 | } 302 | 303 | #[test] 304 | fn test_sumcheck_proof_serialization() { 305 | super::test_sumcheck_proof_serialization::(); 306 | } 307 | 308 | #[test] 309 | fn test_failing_sumcheck() { 310 | super::test_failing_sumcheck::(); 311 | } 312 | } 313 | } 314 | -------------------------------------------------------------------------------- /latticefold/src/utils/sumcheck/prover.rs: -------------------------------------------------------------------------------- 1 | //! Prover 2 | 3 | use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; 4 | use ark_std::{cfg_into_iter, cfg_iter_mut, vec::Vec}; 5 | #[cfg(feature = "parallel")] 6 | use rayon::prelude::*; 7 | use stark_rings::{OverField, Ring}; 8 | use stark_rings_poly::{mle::MultilinearExtension, polynomials::DenseMultilinearExtension}; 9 | 10 | use super::{verifier::VerifierMsg, IPForMLSumcheck}; 11 | 12 | /// Prover Message 13 | #[derive(Clone, Debug, PartialEq, CanonicalSerialize, CanonicalDeserialize)] 14 | pub struct ProverMsg { 15 | /// evaluations on P(0), P(1), P(2), ... 16 | pub(crate) evaluations: Vec, 17 | } 18 | 19 | /// Prover State 20 | pub struct ProverState { 21 | /// sampled randomness given by the verifier 22 | pub randomness: Vec, 23 | /// Stores a list of multilinear extensions 24 | pub mles: Vec>, 25 | /// Number of variables 26 | pub num_vars: usize, 27 | /// Max degree 28 | pub max_degree: usize, 29 | /// The current round number 30 | pub round: usize, 31 | } 32 | 33 | impl IPForMLSumcheck { 34 | /// initialize the prover to argue for the sum of polynomial over {0,1}^`num_vars` 35 | pub fn prover_init( 36 | mles: Vec>, 37 | nvars: usize, 38 | degree: usize, 39 | ) -> ProverState { 40 | if nvars == 0 { 41 | panic!("Attempt to prove a constant.") 42 | } 43 | 44 | ProverState { 45 | randomness: Vec::with_capacity(nvars), 46 | mles, 47 | num_vars: nvars, 48 | max_degree: degree, 49 | round: 0, 50 | } 51 | } 52 | 53 | /// receive message from verifier, generate prover message, and proceed to next round 54 | /// 55 | /// Adapted Jolt's sumcheck implementation 56 | pub fn prove_round( 57 | prover_state: &mut ProverState, 58 | v_msg: &Option>, 59 | comb_fn: impl Fn(&[R]) -> R + Sync + Send, 60 | ) -> ProverMsg { 61 | if let Some(msg) = v_msg { 62 | if prover_state.round == 0 { 63 | panic!("first round should be prover first."); 64 | } 65 | prover_state.randomness.push(msg.randomness); 66 | 67 | // fix argument 68 | let i = prover_state.round; 69 | let r = prover_state.randomness[i - 1]; 70 | cfg_iter_mut!(prover_state.mles).for_each(|multiplicand| { 71 | multiplicand.fix_variables(&[r.into()]); 72 | }); 73 | } else if prover_state.round > 0 { 74 | panic!("verifier message is empty"); 75 | } 76 | 77 | prover_state.round += 1; 78 | 79 | if prover_state.round > prover_state.num_vars { 80 | panic!("Prover is not active"); 81 | } 82 | 83 | let i = prover_state.round; 84 | let nv = prover_state.num_vars; 85 | let degree = prover_state.max_degree; 86 | 87 | let polys = &prover_state.mles; 88 | 89 | struct Scratch { 90 | evals: Vec, 91 | steps: Vec, 92 | vals0: Vec, 93 | vals1: Vec, 94 | vals: Vec, 95 | levals: Vec, 96 | } 97 | let scratch = || Scratch { 98 | evals: vec![R::zero(); degree + 1], 99 | steps: vec![R::zero(); polys.len()], 100 | vals0: vec![R::zero(); polys.len()], 101 | vals1: vec![R::zero(); polys.len()], 102 | vals: vec![R::zero(); polys.len()], 103 | levals: vec![R::zero(); degree + 1], 104 | }; 105 | 106 | #[cfg(not(feature = "parallel"))] 107 | let zeros = scratch(); 108 | #[cfg(feature = "parallel")] 109 | let zeros = scratch; 110 | 111 | let summer = cfg_into_iter!(0..1 << (nv - i)).fold(zeros, |mut s, b| { 112 | let index = b << 1; 113 | 114 | s.vals0 115 | .iter_mut() 116 | .zip(polys.iter()) 117 | .for_each(|(v0, poly)| *v0 = poly[index]); 118 | s.levals[0] = comb_fn(&s.vals0); 119 | 120 | s.vals1 121 | .iter_mut() 122 | .zip(polys.iter()) 123 | .for_each(|(v1, poly)| *v1 = poly[index + 1]); 124 | s.levals[1] = comb_fn(&s.vals1); 125 | 126 | for (i, (v1, v0)) in s.vals1.iter().zip(s.vals0.iter()).enumerate() { 127 | s.steps[i] = *v1 - v0; 128 | s.vals[i] = *v1; 129 | } 130 | 131 | for eval_point in s.levals.iter_mut().take(degree + 1).skip(2) { 132 | for poly_i in 0..polys.len() { 133 | s.vals[poly_i] += s.steps[poly_i]; 134 | } 135 | *eval_point = comb_fn(&s.vals); 136 | } 137 | 138 | s.evals 139 | .iter_mut() 140 | .zip(s.levals.iter()) 141 | .for_each(|(e, l)| *e += l); 142 | s 143 | }); 144 | 145 | // Rayon's fold outputs an iter which still needs to be summed over 146 | #[cfg(feature = "parallel")] 147 | let evaluations = summer.map(|s| s.evals).reduce( 148 | || vec![R::zero(); degree + 1], 149 | |mut evaluations, levals| { 150 | evaluations 151 | .iter_mut() 152 | .zip(levals) 153 | .for_each(|(e, l)| *e += l); 154 | evaluations 155 | }, 156 | ); 157 | 158 | #[cfg(not(feature = "parallel"))] 159 | let evaluations = summer.evals; 160 | 161 | ProverMsg { evaluations } 162 | } 163 | } 164 | -------------------------------------------------------------------------------- /latticefold/src/utils/sumcheck/utils.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2023 Espresso Systems (espressosys.com) 2 | // This file is part of the HyperPlonk library. 3 | 4 | // Adapted for rings by Nethermind 5 | 6 | //! This module defines our main mathematical object `DensePolynomial`; and 7 | //! various functions associated with it. 8 | 9 | use ark_std::{ 10 | cfg_iter_mut, end_timer, 11 | rand::{Rng, RngCore}, 12 | start_timer, 13 | string::ToString, 14 | vec::*, 15 | }; 16 | #[cfg(feature = "parallel")] 17 | use rayon::prelude::*; 18 | use stark_rings::Ring; 19 | use stark_rings_poly::{ 20 | mle::DenseMultilinearExtension, 21 | polynomials::{random_mle_list, ArithErrors, RefCounter}, 22 | }; 23 | 24 | pub fn rand_poly( 25 | nv: usize, 26 | num_multiplicands_range: (usize, usize), 27 | num_products: usize, 28 | rng: &mut impl RngCore, 29 | ) -> Result< 30 | ( 31 | (Vec>, usize), 32 | Vec<(R, Vec)>, 33 | R, 34 | ), 35 | ArithErrors, 36 | > { 37 | let mut sum = R::zero(); 38 | let mut mles = vec![]; 39 | let mut products = Vec::with_capacity(num_products); 40 | let mut degree = 0; 41 | let mut current_mle_index = 0; 42 | for _ in 0..num_products { 43 | let num_multiplicands = rng.gen_range(num_multiplicands_range.0..num_multiplicands_range.1); 44 | degree = num_multiplicands.max(degree); 45 | let (product, product_sum) = random_mle_list(nv, num_multiplicands, rng); 46 | let product = product 47 | .into_iter() 48 | .map(|p| RefCounter::into_inner(p).unwrap()) 49 | .collect::>(); 50 | 51 | let coefficient = R::rand(rng); 52 | mles.extend(product); 53 | sum += product_sum * coefficient; 54 | 55 | let indices: Vec = 56 | (current_mle_index..current_mle_index + num_multiplicands).collect(); 57 | products.push((coefficient, indices)); 58 | current_mle_index += num_multiplicands; 59 | } 60 | 61 | Ok(((mles, degree), products, sum)) 62 | } 63 | 64 | pub fn rand_poly_comb_fn(vals: &[R], products: &[(R, Vec)]) -> R { 65 | let mut result = R::zero(); 66 | for (coef, indices) in products { 67 | let mut term = *coef; 68 | for &i in indices { 69 | term *= vals[i]; 70 | } 71 | result += term; 72 | } 73 | 74 | result 75 | } 76 | 77 | /// Evaluate eq polynomial. 78 | pub fn eq_eval(x: &[R], y: &[R]) -> Result { 79 | if x.len() != y.len() { 80 | return Err(ArithErrors::InvalidParameters( 81 | "x and y have different length".to_string(), 82 | )); 83 | } 84 | let start = start_timer!(|| "eq_eval"); 85 | let mut res = R::one(); 86 | for (&xi, &yi) in x.iter().zip(y.iter()) { 87 | let xi_yi = xi * yi; 88 | res *= xi_yi + xi_yi - xi - yi + R::one(); 89 | } 90 | end_timer!(start); 91 | Ok(res) 92 | } 93 | 94 | /// This function build the eq(x, r) polynomial for any given r. 95 | /// 96 | /// Evaluate 97 | /// eq(x,y) = \prod_i=1^num_var (x_i * y_i + (1-x_i)*(1-y_i)) 98 | /// over r, which is 99 | /// eq(x,y) = \prod_i=1^num_var (x_i * r_i + (1-x_i)*(1-r_i)) 100 | pub fn build_eq_x_r(r: &[R]) -> Result, ArithErrors> { 101 | let evals = build_eq_x_r_vec(r)?; 102 | let mle = DenseMultilinearExtension::from_evaluations_vec(r.len(), evals); 103 | 104 | Ok(mle) 105 | } 106 | /// This function build the eq(x, r) polynomial for any given r, and output the 107 | /// evaluation of eq(x, r) in its vector form. 108 | /// 109 | /// Evaluate 110 | /// eq(x,y) = \prod_i=1^num_var (x_i * y_i + (1-x_i)*(1-y_i)) 111 | /// over r, which is 112 | /// eq(x,y) = \prod_i=1^num_var (x_i * r_i + (1-x_i)*(1-r_i)) 113 | pub fn build_eq_x_r_vec(r: &[R]) -> Result, ArithErrors> { 114 | // we build eq(x,r) from its evaluations 115 | // we want to evaluate eq(x,r) over x \in {0, 1}^num_vars 116 | // for example, with num_vars = 4, x is a binary vector of 4, then 117 | // 0 0 0 0 -> (1-r0) * (1-r1) * (1-r2) * (1-r3) 118 | // 1 0 0 0 -> r0 * (1-r1) * (1-r2) * (1-r3) 119 | // 0 1 0 0 -> (1-r0) * r1 * (1-r2) * (1-r3) 120 | // 1 1 0 0 -> r0 * r1 * (1-r2) * (1-r3) 121 | // .... 122 | // 1 1 1 1 -> r0 * r1 * r2 * r3 123 | // we will need 2^num_var evaluations 124 | 125 | let mut eval = Vec::new(); 126 | build_eq_x_r_helper(r, &mut eval)?; 127 | 128 | Ok(eval) 129 | } 130 | 131 | /// A helper function to build eq(x, r) recursively. 132 | /// This function takes `r.len()` steps, and for each step it requires a maximum 133 | /// `r.len()-1` multiplications. 134 | fn build_eq_x_r_helper(r: &[R], buf: &mut Vec) -> Result<(), ArithErrors> { 135 | if r.is_empty() { 136 | return Err(ArithErrors::InvalidParameters("r length is 0".to_string())); 137 | } else if r.len() == 1 { 138 | // initializing the buffer with [1-r_0, r_0] 139 | buf.push(R::one() - r[0]); 140 | buf.push(r[0]); 141 | } else { 142 | build_eq_x_r_helper(&r[1..], buf)?; 143 | 144 | // suppose at the previous step we received [b_1, ..., b_k] 145 | // for the current step we will need 146 | // if x_0 = 0: (1-r0) * [b_1, ..., b_k] 147 | // if x_0 = 1: r0 * [b_1, ..., b_k] 148 | // let mut res = vec![]; 149 | // for &b_i in buf.iter() { 150 | // let tmp = r[0] * b_i; 151 | // res.push(b_i - tmp); 152 | // res.push(tmp); 153 | // } 154 | // *buf = res; 155 | 156 | let mut res = vec![R::zero(); buf.len() << 1]; 157 | cfg_iter_mut!(res).enumerate().for_each(|(i, val)| { 158 | let bi = buf[i >> 1]; 159 | let tmp = r[0] * bi; 160 | if (i & 1) == 0 { 161 | *val = bi - tmp; 162 | } else { 163 | *val = tmp; 164 | } 165 | }); 166 | *buf = res; 167 | } 168 | 169 | Ok(()) 170 | } 171 | 172 | /// Decompose an integer into a binary vector in little endian. 173 | #[cfg(feature = "std")] 174 | pub fn bit_decompose(input: u64, num_var: usize) -> Vec { 175 | let mut res = Vec::with_capacity(num_var); 176 | let mut i = input; 177 | for _ in 0..num_var { 178 | res.push((i & 1) == 1); 179 | i >>= 1; 180 | } 181 | res 182 | } 183 | -------------------------------------------------------------------------------- /rust-toolchain: -------------------------------------------------------------------------------- 1 | nightly-2025-03-06 2 | -------------------------------------------------------------------------------- /rustfmt.toml: -------------------------------------------------------------------------------- 1 | group_imports = "StdExternalCrate" 2 | imports_granularity = "Crate" --------------------------------------------------------------------------------