├── .github ├── ISSUE_TEMPLATE │ └── eli15.md ├── dependabot.yml └── workflows │ ├── ci.yml │ ├── lints-beta.yml │ ├── lints-stable.yml │ └── trigger_proverbench_dispatch.yml ├── .gitignore ├── COPYING.md ├── Cargo.lock ├── Cargo.toml ├── LICENSE-APACHE ├── LICENSE-MIT ├── README.md ├── book ├── .gitignore ├── Makefile ├── book.toml ├── edithtml.sh ├── macros.txt └── src │ ├── IDENTIFIERS.json │ ├── README.md │ ├── SUMMARY.md │ ├── background.md │ ├── background │ ├── curves.md │ ├── fields.md │ ├── groups.md │ ├── pc-ipa.md │ ├── plonkish.md │ ├── polynomials.md │ └── recursion.md │ ├── concepts.md │ ├── concepts │ ├── arithmetization.md │ ├── chips.md │ ├── gadgets.md │ └── proofs.md │ ├── design.md │ ├── design │ ├── gadgets.md │ ├── gadgets │ │ ├── decomposition.md │ │ ├── ecc.md │ │ ├── ecc │ │ │ ├── addition.md │ │ │ ├── fixed-base-scalar-mul.md │ │ │ ├── var-base-scalar-mul.md │ │ │ └── witnessing-points.md │ │ ├── sha256.md │ │ ├── sha256 │ │ │ ├── bit_reassignment.png │ │ │ ├── compression.png │ │ │ ├── low_sigma_0.png │ │ │ ├── low_sigma_1.png │ │ │ ├── table16.md │ │ │ ├── upp_sigma_0.png │ │ │ └── upp_sigma_1.png │ │ ├── sinsemilla.md │ │ └── sinsemilla │ │ │ └── merkle-crh.md │ ├── implementation.md │ ├── implementation │ │ ├── fields.md │ │ ├── proofs.md │ │ └── selector-combining.md │ ├── protocol.md │ ├── proving-system.md │ └── proving-system │ │ ├── circuit-commitments.md │ │ ├── comparison.md │ │ ├── inner-product.md │ │ ├── lookup.md │ │ ├── multipoint-opening.md │ │ ├── permutation-diagram.png │ │ ├── permutation-diagram.svg │ │ ├── permutation.md │ │ └── vanishing.md │ ├── user.md │ └── user │ ├── dev-tools.md │ ├── gadgets.md │ ├── lookup-tables.md │ ├── simple-example.md │ └── tips-and-tricks.md ├── halo2 ├── CHANGELOG.md ├── Cargo.toml ├── katex-header.html └── src │ └── lib.rs ├── halo2_gadgets ├── CHANGELOG.md ├── Cargo.toml ├── README.md ├── benches │ ├── poseidon.rs │ ├── primitives.rs │ └── sha256.rs ├── katex-header.html ├── proptest-regressions │ └── constants │ │ └── util.txt └── src │ ├── ecc.rs │ ├── ecc │ ├── chip.rs │ └── chip │ │ ├── add.rs │ │ ├── add_incomplete.rs │ │ ├── constants.rs │ │ ├── mul.rs │ │ ├── mul │ │ ├── complete.rs │ │ ├── incomplete.rs │ │ └── overflow.rs │ │ ├── mul_fixed.rs │ │ ├── mul_fixed │ │ ├── base_field_elem.rs │ │ ├── full_width.rs │ │ └── short.rs │ │ └── witness_point.rs │ ├── lib.rs │ ├── poseidon.rs │ ├── poseidon │ ├── pow5.rs │ ├── primitives.rs │ └── primitives │ │ ├── fp.rs │ │ ├── fq.rs │ │ ├── grain.rs │ │ ├── mds.rs │ │ ├── p128pow5t3.rs │ │ └── test_vectors.rs │ ├── sha256.rs │ ├── sha256 │ ├── table16.rs │ └── table16 │ │ ├── compression.rs │ │ ├── compression │ │ ├── compression_gates.rs │ │ ├── compression_util.rs │ │ ├── subregion_digest.rs │ │ ├── subregion_initial.rs │ │ └── subregion_main.rs │ │ ├── gates.rs │ │ ├── message_schedule.rs │ │ ├── message_schedule │ │ ├── schedule_gates.rs │ │ ├── schedule_util.rs │ │ ├── subregion1.rs │ │ ├── subregion2.rs │ │ └── subregion3.rs │ │ ├── spread_table.rs │ │ └── util.rs │ ├── sinsemilla.rs │ ├── sinsemilla │ ├── chip.rs │ ├── chip │ │ ├── generator_table.rs │ │ └── hash_to_point.rs │ ├── merkle.rs │ ├── merkle │ │ └── chip.rs │ ├── message.rs │ ├── primitives.rs │ └── primitives │ │ ├── addition.rs │ │ └── sinsemilla_s.rs │ ├── utilities.rs │ └── utilities │ ├── cond_swap.rs │ ├── decompose_running_sum.rs │ └── lookup_range_check.rs ├── halo2_proofs ├── CHANGELOG.md ├── Cargo.toml ├── README.md ├── benches │ ├── arithmetic.rs │ ├── commit_zk.rs │ ├── dev_lookup.rs │ ├── fft.rs │ ├── hashtocurve.rs │ ├── lookups.rs │ └── plonk.rs ├── examples │ ├── circuit-layout.rs │ ├── cost-model.rs │ ├── serialization.rs │ ├── shuffle.rs │ ├── shuffle_api.rs │ ├── simple-example.rs │ ├── two-chip.rs │ └── vector-mul.rs ├── katex-header.html ├── proptest-regressions │ └── plonk │ │ ├── assigned.txt │ │ └── circuit │ │ └── compress_selectors.txt ├── src │ ├── arithmetic.rs │ ├── circuit.rs │ ├── circuit │ │ ├── floor_planner.rs │ │ ├── floor_planner │ │ │ ├── single_pass.rs │ │ │ ├── v1.rs │ │ │ └── v1 │ │ │ │ └── strategy.rs │ │ ├── layouter.rs │ │ ├── table_layouter.rs │ │ └── value.rs │ ├── dev.rs │ ├── dev │ │ ├── cost.rs │ │ ├── failure.rs │ │ ├── failure │ │ │ └── emitter.rs │ │ ├── gates.rs │ │ ├── graph.rs │ │ ├── graph │ │ │ └── layout.rs │ │ ├── metadata.rs │ │ ├── tfp.rs │ │ └── util.rs │ ├── helpers.rs │ ├── lib.rs │ ├── multicore.rs │ ├── plonk.rs │ ├── plonk │ │ ├── assigned.rs │ │ ├── circuit.rs │ │ ├── circuit │ │ │ └── compress_selectors.rs │ │ ├── error.rs │ │ ├── evaluation.rs │ │ ├── keygen.rs │ │ ├── lookup.rs │ │ ├── lookup │ │ │ ├── prover.rs │ │ │ └── verifier.rs │ │ ├── mv_lookup.rs │ │ ├── mv_lookup │ │ │ ├── exec_info.json │ │ │ ├── prover.rs │ │ │ └── verifier.rs │ │ ├── permutation.rs │ │ ├── permutation │ │ │ ├── keygen.rs │ │ │ ├── prover.rs │ │ │ └── verifier.rs │ │ ├── prover.rs │ │ ├── shuffle.rs │ │ ├── shuffle │ │ │ ├── prover.rs │ │ │ └── verifier.rs │ │ ├── vanishing.rs │ │ ├── vanishing │ │ │ ├── prover.rs │ │ │ └── verifier.rs │ │ ├── verifier.rs │ │ └── verifier │ │ │ └── batch.rs │ ├── poly.rs │ ├── poly │ │ ├── commitment.rs │ │ ├── domain.rs │ │ ├── evaluator.rs │ │ ├── ipa │ │ │ ├── commitment.rs │ │ │ ├── commitment │ │ │ │ ├── prover.rs │ │ │ │ └── verifier.rs │ │ │ ├── mod.rs │ │ │ ├── msm.rs │ │ │ ├── multiopen.rs │ │ │ ├── multiopen │ │ │ │ ├── prover.rs │ │ │ │ └── verifier.rs │ │ │ └── strategy.rs │ │ ├── kzg │ │ │ ├── commitment.rs │ │ │ ├── mod.rs │ │ │ ├── msm.rs │ │ │ ├── multiopen.rs │ │ │ ├── multiopen │ │ │ │ ├── gwc.rs │ │ │ │ ├── gwc │ │ │ │ │ ├── prover.rs │ │ │ │ │ └── verifier.rs │ │ │ │ ├── shplonk.rs │ │ │ │ └── shplonk │ │ │ │ │ ├── prover.rs │ │ │ │ │ └── verifier.rs │ │ │ └── strategy.rs │ │ ├── multiopen.rs │ │ ├── multiopen_test.rs │ │ ├── query.rs │ │ └── strategy.rs │ ├── transcript.rs │ └── transcript │ │ ├── blake2b.rs │ │ ├── mod.rs.bak │ │ └── poseidon.rs └── tests │ └── plonk_api.rs └── rust-toolchain /.github/ISSUE_TEMPLATE/eli15.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: ELI15 improvement request 3 | about: Let us know how the Halo 2 book could be improved! 4 | title: 'ELI15: ' 5 | labels: 'ELI15' 6 | assignees: '' 7 | 8 | --- 9 | 10 | ## Which section of the Halo 2 book were you reading? 11 | 12 | ## What was unclear? 13 | 14 | ## What would help to make it clearer to you? 15 | 16 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: github-actions 4 | directory: "/" 5 | schedule: 6 | interval: daily 7 | timezone: Etc/UTC 8 | open-pull-requests-limit: 10 9 | reviewers: 10 | - str4d 11 | assignees: 12 | - str4d 13 | labels: 14 | - "A-CI" 15 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: CI checks 2 | 3 | on: 4 | merge_group: 5 | pull_request: 6 | push: 7 | branches: 8 | - main 9 | 10 | jobs: 11 | test: 12 | name: Test on ${{ matrix.os }} with ${{ matrix.feature_set }} features 13 | runs-on: ${{ matrix.os }} 14 | strategy: 15 | matrix: 16 | feature_set: [basic, all] 17 | os: [ubuntu-latest, windows-latest, macOS-latest] 18 | include: 19 | - feature_set: basic 20 | features: batch,dev-graph,gadget-traces 21 | - feature_set: all 22 | features: batch,dev-graph,gadget-traces,multicore,test-dev-graph,thread-safe-region,sanity-checks,circuit-params 23 | 24 | steps: 25 | - uses: actions/checkout@v3 26 | - uses: actions-rs/toolchain@v1 27 | with: 28 | override: false 29 | - name: Run tests 30 | uses: actions-rs/cargo@v1 31 | with: 32 | command: test 33 | args: --verbose --release --workspace --no-default-features --features "${{ matrix.features }}" 34 | 35 | build: 36 | name: Build target ${{ matrix.target }} 37 | runs-on: ubuntu-latest 38 | strategy: 39 | matrix: 40 | target: 41 | - wasm32-unknown-unknown 42 | - wasm32-wasi 43 | 44 | steps: 45 | - uses: actions/checkout@v3 46 | - uses: actions-rs/toolchain@v1 47 | with: 48 | override: false 49 | - name: Add target 50 | run: rustup target add ${{ matrix.target }} 51 | - name: cargo build 52 | uses: actions-rs/cargo@v1 53 | with: 54 | command: build 55 | args: --no-default-features --features batch,dev-graph,gadget-traces --target ${{ matrix.target }} 56 | 57 | bitrot: 58 | name: Bitrot check 59 | runs-on: ubuntu-latest 60 | 61 | steps: 62 | - uses: actions/checkout@v3 63 | - uses: actions-rs/toolchain@v1 64 | with: 65 | override: false 66 | # Build benchmarks to prevent bitrot 67 | - name: Build benchmarks 68 | uses: actions-rs/cargo@v1 69 | with: 70 | command: build 71 | args: --benches --examples --all-features 72 | 73 | doc-links: 74 | name: Intra-doc links 75 | runs-on: ubuntu-latest 76 | 77 | steps: 78 | - uses: actions/checkout@v3 79 | - uses: actions-rs/toolchain@v1 80 | with: 81 | override: false 82 | - name: cargo fetch 83 | uses: actions-rs/cargo@v1 84 | with: 85 | command: fetch 86 | 87 | # Ensure intra-documentation links all resolve correctly 88 | # Requires #![deny(intra_doc_link_resolution_failure)] in crates. 89 | - name: Check intra-doc links 90 | uses: actions-rs/cargo@v1 91 | with: 92 | command: doc 93 | args: --all --document-private-items 94 | 95 | fmt: 96 | name: Rustfmt 97 | timeout-minutes: 30 98 | runs-on: ubuntu-latest 99 | steps: 100 | - uses: actions/checkout@v3 101 | - uses: actions-rs/toolchain@v1 102 | with: 103 | override: false 104 | - run: rustup component add rustfmt 105 | - uses: actions-rs/cargo@v1 106 | with: 107 | command: fmt 108 | args: --all -- --check 109 | 110 | -------------------------------------------------------------------------------- /.github/workflows/lints-beta.yml: -------------------------------------------------------------------------------- 1 | name: Beta lints 2 | 3 | # These lints are only informative, so we only run them directly on branches 4 | # and not trial-merges of PRs, to reduce noise. 5 | on: push 6 | 7 | jobs: 8 | clippy-beta: 9 | name: Clippy (beta) 10 | timeout-minutes: 30 11 | runs-on: ubuntu-latest 12 | continue-on-error: true 13 | 14 | steps: 15 | - uses: actions/checkout@v3 16 | - uses: actions-rs/toolchain@v1 17 | with: 18 | components: clippy 19 | override: false 20 | - name: Run Clippy (beta) 21 | uses: actions-rs/clippy-check@v1 22 | continue-on-error: true 23 | with: 24 | name: Clippy (beta) 25 | token: ${{ secrets.GITHUB_TOKEN }} 26 | args: --all-features --all-targets -- -W clippy::all 27 | -------------------------------------------------------------------------------- /.github/workflows/lints-stable.yml: -------------------------------------------------------------------------------- 1 | name: Stable lints 2 | 3 | # We only run these lints on trial-merges of PRs to reduce noise. 4 | on: pull_request 5 | 6 | jobs: 7 | clippy: 8 | name: Clippy (1.56.1) 9 | timeout-minutes: 30 10 | runs-on: ubuntu-latest 11 | 12 | steps: 13 | - uses: actions/checkout@v3 14 | - uses: actions-rs/toolchain@v1 15 | with: 16 | components: clippy 17 | override: false 18 | - name: Run clippy 19 | uses: actions-rs/clippy-check@v1 20 | with: 21 | name: Clippy (1.56.1) 22 | token: ${{ secrets.GITHUB_TOKEN }} 23 | args: --all-features --all-targets -- -D warnings 24 | -------------------------------------------------------------------------------- /.github/workflows/trigger_proverbench_dispatch.yml: -------------------------------------------------------------------------------- 1 | name: Prover Bench on halo2 PR 2 | on: 3 | pull_request: 4 | types: [labeled , ready_for_review] 5 | jobs: 6 | Prover-benches-via-repo-dispatch-from-halo2-fork: 7 | if: ${{ github.event.label.name == 'benchmarks' }} 8 | runs-on: ubuntu-latest 9 | env: 10 | GH_USER: ${{ github.actor }} 11 | _TOKEN: ${{ secrets.BENCHMARKER }} 12 | REVISION: ${{ github.event.pull_request.head.sha }} 13 | REPO: ${{ github.event.repository.name }} 14 | PR_NUMBER: ${{ github.event.number }} 15 | steps: 16 | - name: Install curl 17 | run: | 18 | sudo apt-get update 19 | sudo apt-get install curl 20 | - name: Send repo api call 21 | run: | 22 | curl \ 23 | -X POST \ 24 | -H "Accept: application/vnd.github.v3+json" \ 25 | -u ZKEVMBOT:${{ env._TOKEN }} \ 26 | https://api.github.com/repos/appliedzkp/zkevm-circuits/actions/workflows/ProverBenchFromHalo2.yml/dispatches \ 27 | -d "{\"ref\":\"main\",\"inputs\":{\"halo2pr\":\"${{ env.PR_NUMBER }}\",\"revision\":\"${{ env.REVISION }}\",\"event-type\":\"halo2_wfdispatch\",\"ghuser\": \"${{ env.GH_USER }}\"}}" 28 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | **/*.rs.bk 3 | .vscode 4 | **/*.html 5 | .DS_Store -------------------------------------------------------------------------------- /COPYING.md: -------------------------------------------------------------------------------- 1 | # License 2 | 3 | Licensed under either of 4 | 5 | * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0) 6 | * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT) 7 | 8 | at your option. 9 | 10 | # Contribution 11 | 12 | Unless you explicitly state otherwise, any contribution intentionally 13 | submitted for inclusion in the work by you, as defined in the Apache-2.0 14 | license, shall be dual licensed as above, without any additional terms or 15 | conditions. 16 | 17 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [workspace] 2 | resolver = "2" 3 | members = [ 4 | "halo2", 5 | "halo2_gadgets", 6 | "halo2_proofs", 7 | ] 8 | -------------------------------------------------------------------------------- /LICENSE-MIT: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2020-2022 The Electric Coin Company 4 | Copyright (c) 2022 The Halo 2 developers 5 | 6 | Permission is hereby granted, free of charge, to any person obtaining a copy 7 | of this software and associated documentation files (the "Software"), to deal 8 | in the Software without restriction, including without limitation the rights 9 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | copies of the Software, and to permit persons to whom the Software is 11 | furnished to do so, subject to the following conditions: 12 | 13 | The above copyright notice and this permission notice shall be included in 14 | all copies or substantial portions of the Software. 15 | 16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 | THE SOFTWARE. 23 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # halo2 [![Crates.io](https://img.shields.io/crates/v/halo2.svg)](https://crates.io/crates/halo2) # 2 | 3 | ## [Documentation](https://docs.rs/halo2) 4 | 5 | ## Minimum Supported Rust Version 6 | 7 | Requires Rust **1.65.0** or higher. 8 | 9 | Minimum supported Rust version can be changed in the future, but it will be done with a 10 | minor version bump. 11 | 12 | ## Controlling parallelism 13 | 14 | `halo2` currently uses [rayon](https://github.com/rayon-rs/rayon) for parallel computation. 15 | The `RAYON_NUM_THREADS` environment variable can be used to set the number of threads. 16 | 17 | You can disable `rayon` by disabling the `"multicore"` feature. 18 | Warning! Halo2 will lose access to parallelism if you disable the `"multicore"` feature. 19 | This will significantly degrade performance. 20 | 21 | ## License 22 | 23 | Licensed under either of 24 | 25 | * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or 26 | http://www.apache.org/licenses/LICENSE-2.0) 27 | * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT) 28 | 29 | at your option. 30 | 31 | ### Contribution 32 | 33 | Unless you explicitly state otherwise, any contribution intentionally 34 | submitted for inclusion in the work by you, as defined in the Apache-2.0 35 | license, shall be dual licensed as above, without any additional terms or 36 | conditions. 37 | -------------------------------------------------------------------------------- /book/.gitignore: -------------------------------------------------------------------------------- 1 | book 2 | -------------------------------------------------------------------------------- /book/Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: all 2 | all: 3 | find src -type f -a -name '*.md' |sed 's/[.]md$$/.html/g' |xargs $(MAKE) 4 | 5 | clean: 6 | find src -type f -a -name '*.html' -print0 |xargs -0 rm 7 | 8 | %.html: %.md 9 | pandoc --katex --from=markdown --to=html "$<" "--output=$@" 10 | ./edithtml.sh "$@" "$<" 11 | -------------------------------------------------------------------------------- /book/book.toml: -------------------------------------------------------------------------------- 1 | [book] 2 | authors = [ 3 | "Jack Grigg", 4 | "Sean Bowe", 5 | "Daira Hopwood", 6 | "Ying Tong Lai", 7 | ] 8 | language = "en" 9 | multilingual = false 10 | src = "src" 11 | title = "The halo2 Book" 12 | 13 | [preprocessor.katex] 14 | macros = "macros.txt" 15 | -------------------------------------------------------------------------------- /book/edithtml.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | cat - "$1" > "$1.prefix" < 5 | 6 | 7 | 8 | 9 | 10 | $2 11 | 17 | 18 | 19 | 21 | 22 | 23 | EOF 24 | cat "$1.prefix" - >"$1" < 26 | 27 | EOF 28 | rm -f "$1.prefix" 29 | -------------------------------------------------------------------------------- /book/macros.txt: -------------------------------------------------------------------------------- 1 | # Conventions 2 | 3 | \bconcat:{\mathop{\kern 0.1em||\kern 0.1em}} 4 | \Repr:{\star} 5 | 6 | # Conversions 7 | 8 | \ItoLEBSP:{\mathsf{I2LEBSP}_{#1}} 9 | 10 | # Fields and curves 11 | 12 | \BaseLength:{\ell^\mathsf{#1\vphantom{p}}_{\mathsf{base}}} 13 | 14 | # Commitments and hashes 15 | 16 | \SinsemillaHash:{\mathsf{SinsemillaHash}} 17 | \SinsemillaCommit:{\mathsf{SinsemillaCommit}} 18 | \SinsemillaShortCommit:{\mathsf{SinsemillaShortCommit}} 19 | 20 | # Circuit constraint helper methods 21 | 22 | \BoolCheck:{\texttt{bool\_check}({#1})} 23 | \Ternary:{\texttt{ternary}({{#1}, {#2}, {#3}})} 24 | \RangeCheck:{\texttt{range\_check}({#1, #2})} 25 | \ShortLookupRangeCheck:{\texttt{short\_lookup\_range\_check}({#1})} 26 | 27 | # Halo 2 proof 28 | 29 | \field:{\mathbb{F}} 30 | \group:{\mathbb{G}} 31 | \setup:{\textnormal{Setup}} 32 | \prover:{\mathcal{P}} 33 | \verifier:{\mathcal{V}} 34 | \sec:{\lambda} 35 | \negl:{\textnormal{negl}(\lambda)} 36 | \pp:{\mathsf{pp}} 37 | \ip:{\textnormal{IP}} 38 | \relation:{\mathcal{R}} 39 | \a:{\mathcal{A}} 40 | \sim:{\mathcal{S}} 41 | \tr:{\textnormal{tr}} 42 | \srs:{\textnormal{SRS}} 43 | \srwee:{\textnormal{sr-wee}} 44 | \real:{\textnormal{real}} 45 | \ideal:{\textnormal{ideal}} 46 | \weereal:{\textnormal{WEE-real}} 47 | \weeideal:{\textnormal{WEE-ideal}} 48 | \oracle:{\mathcal{O}} 49 | \ch:{\mathsf{Ch}} 50 | \badch:{\mathsf{BadCh}} 51 | \adv:{\mathsf{Adv}} 52 | \bottom:{\perp} 53 | \alg:{#1_\textnormal{alg}} 54 | \zero:{\mathcal{O}} 55 | \dlrel:{\textsf{dl-rel}} 56 | \game:{\mathsf{G}} 57 | \innerprod:{\langle{#1},{#2}\rangle} 58 | \dlgame:{\mathsf{G}^\dlrel_{\group,n}} 59 | \distinguisher:{\mathcal{D}} 60 | \extractor:{\mathcal{E}} 61 | \state:{\mathsf{st}_{#1}} 62 | \halo:{\textsf{Halo}} 63 | \lo:{\textnormal{lo}} 64 | \hi:{\textnormal{hi}} 65 | \protocol:{\halo} 66 | \extractwitness:{\textnormal{ExtractWitness}} 67 | \pfail:{p_\textnormal{fail}} 68 | \repr:\{\kern-0.1em {#1} \kern-0.1em\}^{#2} 69 | \rep:{\repr{#1}{}} 70 | \repv:{\repr{#1}{\mathbf{#2}}_{#3}} 71 | \dlreladv:{\mathcal{H}} 72 | \mr:{\mathcal{M}^{#1}_{#2}({#3})} 73 | \mv:{\mr{\mathbf{#1}}{#2}{#3}} 74 | \m:{\mr{#1}{}{#2}} 75 | \z:{\mathcal{Z}_{#1}({#2}, {#3})} 76 | \trprefix:{{#1}|_{#2}} 77 | -------------------------------------------------------------------------------- /book/src/IDENTIFIERS.json: -------------------------------------------------------------------------------- 1 | { 2 | "decompose-combined-lookup": "design/gadgets/decomposition.html#combined-lookup-expression", 3 | "decompose-short-lookup": "design/gadgets/decomposition.html#short-range-check", 4 | "decompose-short-range": "design/gadgets/decomposition.html#short-range-decomposition", 5 | "ecc-complete-addition": "design/gadgets/ecc/addition.html#complete-addition-constraints", 6 | "ecc-incomplete-addition": "design/gadgets/ecc/addition.html#incomplete-addition-constraints", 7 | "ecc-fixed-mul-base-canonicity": "design/gadgets/ecc/fixed-base-scalar-mul.html#base-field-element", 8 | "ecc-fixed-mul-coordinates": "design/gadgets/ecc/fixed-base-scalar-mul.html#constrain-coordinates", 9 | "ecc-fixed-mul-full-word": "design/gadgets/ecc/fixed-base-scalar-mul.html#full-width-scalar", 10 | "ecc-fixed-mul-load-base": "design/gadgets/ecc/fixed-base-scalar-mul.html#load-fixed-base", 11 | "ecc-fixed-mul-short-msb": "design/gadgets/ecc/fixed-base-scalar-mul.html#constrain-short-signed-msb", 12 | "ecc-fixed-mul-short-conditional-neg": "design/gadgets/ecc/fixed-base-scalar-mul.html#constrain-short-signed-conditional-neg", 13 | "ecc-var-mul-complete-gate": "design/gadgets/ecc/var-base-scalar-mul.html#complete-gate", 14 | "ecc-var-mul-incomplete-first-row": "design/gadgets/ecc/var-base-scalar-mul.html#incomplete-first-row-gate", 15 | "ecc-var-mul-incomplete-last-row": "design/gadgets/ecc/var-base-scalar-mul.html#incomplete-last-row-gate", 16 | "ecc-var-mul-incomplete-main-loop": "design/gadgets/ecc/var-base-scalar-mul.html#incomplete-main-loop-gate", 17 | "ecc-var-mul-lsb-gate": "design/gadgets/ecc/var-base-scalar-mul.html#lsb-gate", 18 | "ecc-var-mul-overflow": "design/gadgets/ecc/var-base-scalar-mul.html#overflow-check-constraints", 19 | "ecc-var-mul-witness-scalar": "design/gadgets/ecc/var-base-scalar-mul.html#witness-scalar", 20 | "ecc-witness-point": "design/gadgets/ecc/witnessing-points.html#points-including-the-identity", 21 | "ecc-witness-non-identity-point": "design/gadgets/ecc/witnessing-points.html#non-identity-points", 22 | "sinsemilla-constraints": "design/gadgets/sinsemilla.html#optimized-sinsemilla-gate", 23 | "sinsemilla-merkle-crh-bit-lengths": "design/gadgets/sinsemilla/merkle-crh.html#bit-length-constraints", 24 | "sinsemilla-merkle-crh-decomposition": "design/gadgets/sinsemilla/merkle-crh.html#decomposition-constraints" 25 | } -------------------------------------------------------------------------------- /book/src/README.md: -------------------------------------------------------------------------------- 1 | {{#include ../../README.md}} 2 | -------------------------------------------------------------------------------- /book/src/SUMMARY.md: -------------------------------------------------------------------------------- 1 | # The halo2 Book 2 | 3 | [halo2](README.md) 4 | - [Concepts](concepts.md) 5 | - [Proof systems](concepts/proofs.md) 6 | - [PLONKish Arithmetization](concepts/arithmetization.md) 7 | - [Chips](concepts/chips.md) 8 | - [Gadgets](concepts/gadgets.md) 9 | - [User Documentation](user.md) 10 | - [Developer tools](user/dev-tools.md) 11 | - [A simple example](user/simple-example.md) 12 | - [Lookup tables](user/lookup-tables.md) 13 | - [Gadgets](user/gadgets.md) 14 | - [Tips and tricks](user/tips-and-tricks.md) 15 | - [Design](design.md) 16 | - [Proving system](design/proving-system.md) 17 | - [Lookup argument](design/proving-system/lookup.md) 18 | - [Permutation argument](design/proving-system/permutation.md) 19 | - [Circuit commitments](design/proving-system/circuit-commitments.md) 20 | - [Vanishing argument](design/proving-system/vanishing.md) 21 | - [Multipoint opening argument](design/proving-system/multipoint-opening.md) 22 | - [Inner product argument](design/proving-system/inner-product.md) 23 | - [Comparison to other work](design/proving-system/comparison.md) 24 | - [Protocol Description](design/protocol.md) 25 | - [Implementation](design/implementation.md) 26 | - [Proofs](design/implementation/proofs.md) 27 | - [Fields](design/implementation/fields.md) 28 | - [Selector combining](design/implementation/selector-combining.md) 29 | - [Gadgets](design/gadgets.md) 30 | - [Elliptic curve cryptography](design/gadgets/ecc.md) 31 | - [Witnessing points](design/gadgets/ecc/witnessing-points.md) 32 | - [Incomplete and complete addition](design/gadgets/ecc/addition.md) 33 | - [Fixed-base scalar multiplication](design/gadgets/ecc/fixed-base-scalar-mul.md) 34 | - [Variable-base scalar multiplication](design/gadgets/ecc/var-base-scalar-mul.md) 35 | - [Sinsemilla](design/gadgets/sinsemilla.md) 36 | - [MerkleCRH](design/gadgets/sinsemilla/merkle-crh.md) 37 | - [Decomposition](design/gadgets/decomposition.md) 38 | - [SHA-256](design/gadgets/sha256.md) 39 | - [16-bit table chip](design/gadgets/sha256/table16.md) 40 | - [Background Material](background.md) 41 | - [Fields](background/fields.md) 42 | - [Polynomials](background/polynomials.md) 43 | - [Cryptographic groups](background/groups.md) 44 | - [Elliptic curves](background/curves.md) 45 | - [Polynomial commitment using inner product argument](background/pc-ipa.md) 46 | - [Recursion](background/recursion.md) 47 | -------------------------------------------------------------------------------- /book/src/background.md: -------------------------------------------------------------------------------- 1 | # Background Material 2 | 3 | This section covers the background material required to understand the Halo 2 proving 4 | system. It is targeted at an ELI15 (Explain It Like I'm 15) level; if you think anything 5 | could do with additional explanation, [let us know]! 6 | 7 | [let us know]: https://github.com/zcash/halo2/issues/new/choose 8 | -------------------------------------------------------------------------------- /book/src/background/groups.md: -------------------------------------------------------------------------------- 1 | # Cryptographic groups 2 | 3 | In the section [Inverses and groups](fields.md#inverses-and-groups) we introduced the 4 | concept of *groups*. A group has an identity and a group operation. In this section we 5 | will write groups additively, i.e. the identity is $\mathcal{O}$ and the group operation 6 | is $+$. 7 | 8 | Some groups can be used as *cryptographic groups*. At the risk of oversimplifying, this 9 | means that the problem of finding a discrete logarithm of a group element $P$ to a given 10 | base $G$, i.e. finding $x$ such that $P = [x] G$, is hard in general. 11 | 12 | ## Pedersen commitment 13 | The Pedersen commitment [[P99]] is a way to commit to a secret message in a verifiable 14 | way. It uses two random public generators $G, H \in \mathbb{G},$ where $\mathbb{G}$ is a 15 | cryptographic group of order $p$. A random secret $r$ is chosen in $\mathbb{Z}_q$, and the 16 | message to commit to $m$ is from any subset of $\mathbb{Z}_q$. The commitment is 17 | 18 | $$c = \text{Commit}(m,r)=[m]G + [r]H.$$ 19 | 20 | To open the commitment, the committer reveals $m$ and $r,$ thus allowing anyone to verify 21 | that $c$ is indeed a commitment to $m.$ 22 | 23 | [P99]: https://link.springer.com/content/pdf/10.1007%2F3-540-46766-1_9.pdf#page=3 24 | 25 | Notice that the Pedersen commitment scheme is homomorphic: 26 | 27 | $$ 28 | \begin{aligned} 29 | \text{Commit}(m,r) + \text{Commit}(m',r') &= [m]G + [r]H + [m']G + [r']H \\ 30 | &= [m + m']G + [r + r']H \\ 31 | &= \text{Commit}(m + m',r + r'). 32 | \end{aligned} 33 | $$ 34 | 35 | Assuming the discrete log assumption holds, Pedersen commitments are also perfectly hiding 36 | and computationally binding: 37 | 38 | * **hiding**: the adversary chooses messages $m_0, m_1.$ The committer commits to one of 39 | these messages $c = \text{Commit}(m_b,r), b \in \{0,1\}.$ Given $c,$ the probability of 40 | the adversary guessing the correct $b$ is no more than $\frac{1}{2}$. 41 | * **binding**: the adversary cannot pick two different messages $m_0 \neq m_1,$ and 42 | randomness $r_0, r_1,$ such that $\text{Commit}(m_0,r_0) = \text{Commit}(m_1,r_1).$ 43 | 44 | ### Vector Pedersen commitment 45 | We can use a variant of the Pedersen commitment scheme to commit to multiple messages at 46 | once, $\mathbf{m} = (m_0, \cdots, m_{n-1})$. This time, we'll have to sample a corresponding 47 | number of random public generators $\mathbf{G} = (G_0, \cdots, G_{n-1}),$ along with a 48 | single random generator $H$ as before (for use in hiding). Then, our commitment scheme is: 49 | 50 | $$ 51 | \begin{aligned} 52 | \text{Commit}(\mathbf{m}; r) &= \text{Commit}((m_0, \cdots, m_{n-1}); r) \\ 53 | &= [r]H + [m_0]G_0 + \cdots + [m_{n-1}]G_{n-1} \\ 54 | &= [r]H + \sum_{i= 0}^{n-1} [m_i]G_i. 55 | \end{aligned} 56 | $$ 57 | 58 | > TODO: is this positionally binding? 59 | 60 | ## Diffie–Hellman 61 | 62 | An example of a protocol that uses cryptographic groups is Diffie–Hellman key agreement 63 | [[DH1976]]. The Diffie–Hellman protocol is a method for two users, Alice and Bob, to 64 | generate a shared private key. It proceeds as follows: 65 | 66 | 1. Alice and Bob publicly agree on two prime numbers, $p$ and $G,$ where $p$ is large and 67 | $G$ is a primitive root $\pmod p.$ (Note that $g$ is a generator of the group 68 | $\mathbb{F}_p^\times.$) 69 | 2. Alice chooses a large random number $a$ as her private key. She computes her public key 70 | $A = [a]G \pmod p,$ and sends $A$ to Bob. 71 | 3. Similarly, Bob chooses a large random number $b$ as his private key. He computes his 72 | public key $B = [b]G \pmod p,$ and sends $B$ to Alice. 73 | 4. Now both Alice and Bob compute their shared key $K = [ab]G \pmod p,$ which Alice 74 | computes as 75 | $$K = [a]B \pmod p = [a]([b]G) \pmod p,$$ 76 | and Bob computes as 77 | $$K = [b]A \pmod p = [b]([a]G) \pmod p.$$ 78 | 79 | [DH1976]: https://ee.stanford.edu/~hellman/publications/24.pdf 80 | 81 | A potential eavesdropper would need to derive $K = [ab]g \pmod p$ knowing only 82 | $g, p, A = [a]G,$ and $B = [b]G$: in other words, they would need to either get the 83 | discrete logarithm $a$ from $A = [a]G$ or $b$ from $B = [b]G,$ which we assume to be 84 | computationally infeasible in $\mathbb{F}_p^\times.$ 85 | 86 | More generally, protocols that use similar ideas to Diffie–Hellman are used throughout 87 | cryptography. One way of instantiating a cryptographic group is as an 88 | [elliptic curve](curves.md). Before we go into detail on elliptic curves, we'll describe 89 | some algorithms that can be used for any group. 90 | 91 | ## Multiscalar multiplication 92 | 93 | ### TODO: Pippenger's algorithm 94 | Reference: https://jbootle.github.io/Misc/pippenger.pdf 95 | -------------------------------------------------------------------------------- /book/src/background/pc-ipa.md: -------------------------------------------------------------------------------- 1 | # Polynomial commitment using inner product argument 2 | We want to commit to some polynomial $p(X) \in \mathbb{F}_p[X]$, and be able to provably 3 | evaluate the committed polynomial at arbitrary points. The naive solution would be for the 4 | prover to simply send the polynomial's coefficients to the verifier: however, this 5 | requires $O(n)$ communication. Our polynomial commitment scheme gets the job done using 6 | $O(\log n)$ communication. 7 | 8 | ### `Setup` 9 | Given a parameter $d = 2^k,$ we generate the common reference string 10 | $\sigma = (\mathbb{G}, \mathbf{G}, H, \mathbb{F}_p)$ defining certain constants for this 11 | scheme: 12 | * $\mathbb{G}$ is a group of prime order $p;$ 13 | * $\mathbf{G} \in \mathbb{G}^d$ is a vector of $d$ random group elements; 14 | * $H \in \mathbb{G}$ is a random group element; and 15 | * $\mathbb{F}_p$ is the finite field of order $p.$ 16 | 17 | ### `Commit` 18 | The Pedersen vector commitment $\text{Commit}$ is defined as 19 | 20 | $$\text{Commit}(\sigma, p(X); r) = \langle\mathbf{a}, \mathbf{G}\rangle + [r]H,$$ 21 | 22 | for some polynomial $p(X) \in \mathbb{F}_p[X]$ and some blinding factor 23 | $r \in \mathbb{F}_p.$ Here, each element of the vector $\mathbf{a}_i \in \mathbb{F}_p$ is 24 | the coefficient for the $i$th degree term of $p(X),$ and $p(X)$ is of maximal degree 25 | $d - 1.$ 26 | 27 | ### `Open` (prover) and `OpenVerify` (verifier) 28 | The modified inner product argument is an argument of knowledge for the relation 29 | 30 | $$\boxed{\{((P, x, v); (\mathbf{a}, r)): P = \langle\mathbf{a}, \mathbf{G}\rangle + [r]H, v = \langle\mathbf{a}, \mathbf{b}\rangle\}},$$ 31 | 32 | where $\mathbf{b} = (1, x, x^2, \cdots, x^{d-1})$ is composed of increasing powers of the 33 | evaluation point $x.$ This allows a prover to demonstrate to a verifier that the 34 | polynomial contained “inside” the commitment $P$ evaluates to $v$ at $x,$ and moreover, 35 | that the committed polynomial has maximum degree $d − 1.$ 36 | 37 | The inner product argument proceeds in $k = \log_2 d$ rounds. For our purposes, it is 38 | sufficient to know about its final outputs, while merely providing intuition about the 39 | intermediate rounds. (Refer to Section 3 in the [Halo] paper for a full explanation.) 40 | 41 | [Halo]: https://eprint.iacr.org/2019/1021.pdf 42 | 43 | Before beginning the argument, the verifier selects a random group element $U$ and sends it 44 | to the prover. We initialize the argument at round $k,$ with the vectors 45 | $\mathbf{a}^{(k)} := \mathbf{a},$ $\mathbf{G}^{(k)} := \mathbf{G}$ and 46 | $\mathbf{b}^{(k)} := \mathbf{b}.$ In each round $j = k, k-1, \cdots, 1$: 47 | 48 | * the prover computes two values $L_j$ and $R_j$ by taking some inner product of 49 | $\mathbf{a}^{(j)}$ with $\mathbf{G}^{(j)}$ and $\mathbf{b}^{(j)}$. Note that are in some 50 | sense "cross-terms": the lower half of $\mathbf{a}$ is used with the higher half of 51 | $\mathbf{G}$ and $\mathbf{b}$, and vice versa: 52 | 53 | $$ 54 | \begin{aligned} 55 | L_j &= \langle\mathbf{a_{lo}^{(j)}}, \mathbf{G_{hi}^{(j)}}\rangle + [l_j]H + [\langle\mathbf{a_{lo}^{(j)}}, \mathbf{b_{hi}^{(j)}}\rangle] U\\ 56 | R_j &= \langle\mathbf{a_{hi}^{(j)}}, \mathbf{G_{lo}^{(j)}}\rangle + [r_j]H + [\langle\mathbf{a_{hi}^{(j)}}, \mathbf{b_{lo}^{(j)}}\rangle] U\\ 57 | \end{aligned} 58 | $$ 59 | 60 | * the verifier issues a random challenge $u_j$; 61 | * the prover uses $u_j$ to compress the lower and higher halves of $\mathbf{a}^{(j)}$, 62 | thus producing a new vector of half the original length 63 | $$\mathbf{a}^{(j-1)} = \mathbf{a_{hi}^{(j)}}\cdot u_j^{-1} + \mathbf{a_{lo}^{(j)}}\cdot u_j.$$ 64 | The vectors $\mathbf{G}^{(j)}$ and $\mathbf{b}^{(j)}$ are similarly compressed to give 65 | $\mathbf{G}^{(j-1)}$ and $\mathbf{b}^{(j-1)}$. 66 | * $\mathbf{a}^{(j-1)}$, $\mathbf{G}^{(j-1)}$ and $\mathbf{b}^{(j-1)}$ are input to the 67 | next round $j - 1.$ 68 | 69 | Note that at the end of the last round $j = 1,$ we are left with $a := \mathbf{a}^{(0)}$, 70 | $G := \mathbf{G}^{(0)}$, $b := \mathbf{b}^{(0)},$ each of length 1. The intuition is that 71 | these final scalars, together with the challenges $\{u_j\}$ and "cross-terms" 72 | $\{L_j, R_j\}$ from each round, encode the compression in each round. Since the prover did 73 | not know the challenges $U, \{u_j\}$ in advance, they would have been unable to manipulate 74 | the round compressions. Thus, checking a constraint on these final terms should enforce 75 | that the compression had been performed correctly, and that the original $\mathbf{a}$ 76 | satisfied the relation before undergoing compression. 77 | 78 | Note that $G, b$ are simply rearrangements of the publicly known $\mathbf{G}, \mathbf{b},$ 79 | with the round challenges $\{u_j\}$ mixed in: this means the verifier can compute $G, b$ 80 | independently and verify that the prover had provided those same values. 81 | -------------------------------------------------------------------------------- /book/src/background/plonkish.md: -------------------------------------------------------------------------------- 1 | # [WIP] PLONKish arithmetization 2 | 3 | We call the field over which the circuit is defined $\mathbb{F} = \mathbb{F}_p$. 4 | 5 | Let $n = 2^k$, and assume that $\omega$ is a primitive root of unity of order $n$ in 6 | $\mathbb{F}^\times$, so that $\mathbb{F}^\times$ has a multiplicative subgroup 7 | $\mathcal{H} = \{1, \omega, \omega^2, \cdots, \omega^{n-1}\}$. This forms a Lagrange 8 | basis corresponding to the elements in the subgroup. 9 | 10 | ## Polynomial rules 11 | A polynomial rule defines a constraint that must hold between its specified columns at 12 | every row (i.e. at every element in the multiplicative subgroup). 13 | 14 | e.g. 15 | 16 | ```text 17 | a * sa + b * sb + a * b * sm + c * sc + PI = 0 18 | ``` 19 | 20 | ## Columns 21 | - **fixed columns**: fixed for all instances of a particular circuit. These include 22 | selector columns, which toggle parts of a polynomial rule "on" or "off" to form a 23 | "custom gate". They can also include any other fixed data. 24 | - **advice columns**: variable values assigned in each instance of the circuit. 25 | Corresponds to the prover's secret witness. 26 | - **public input**: like advice columns, but publicly known values. 27 | 28 | Each column is a vector of $n$ values, e.g. $\mathbf{a} = [a_0, a_1, \cdots, a_{n-1}]$. We 29 | can think of the vector as the evaluation form of the column polynomial 30 | $a(X), X \in \mathcal{H}.$ To recover the coefficient form, we can use 31 | [Lagrange interpolation](polynomials.md#lagrange-interpolation), such that 32 | $a(\omega^i) = a_i.$ 33 | 34 | ## Equality constraints 35 | - Define permutation between a set of columns, e.g. $\sigma(a, b, c)$ 36 | - Assert equalities between specific cells in these columns, e.g. $b_1 = c_0$ 37 | - Construct permuted columns which should evaluate to same value as original columns 38 | 39 | ## Permutation grand product 40 | $$Z(\omega^i) := \prod_{0 \leq j \leq i} \frac{C_k(\omega^j) + \beta\delta^k \omega^j + \gamma}{C_k(\omega^j) + \beta S_k(\omega^j) + \gamma},$$ 41 | where $i = 0, \cdots, n-1$ indexes over the size of the multiplicative subgroup, and 42 | $k = 0, \cdots, m-1$ indexes over the advice columns involved in the permutation. This is 43 | a running product, where each term includes the cumulative product of the terms before it. 44 | 45 | > TODO: what is $\delta$? keep columns linearly independent 46 | 47 | Check the constraints: 48 | 49 | 1. First term is equal to one 50 | $$\mathcal{L}_0(X) \cdot (1 - Z(X)) = 0$$ 51 | 52 | 2. Running product is well-constructed. For each row, we check that this holds: 53 | $$Z(\omega^i) \cdot{(C(\omega^i) + \beta S_k(\omega^i) + \gamma)} - Z(\omega^{i-1}) \cdot{(C(\omega^i) + \delta^k \beta \omega^i + \gamma)} = 0$$ 54 | Rearranging gives 55 | $$Z(\omega^i) = Z(\omega^{i-1}) \frac{C(\omega^i) + \beta\delta^k \omega^i + \gamma}{C(\omega^i) + \beta S_k(\omega^i) + \gamma},$$ 56 | which is how we defined the grand product polynomial in the first place. 57 | 58 | ### Lookup 59 | Reference: [Generic Lookups with PLONK (DRAFT)](/LTPc5f-3S0qNF6MtwD-Tdg?view) 60 | 61 | ### Vanishing argument 62 | We want to check that the expressions defined by the gate constraints, permutation 63 | constraints and lookup constraints evaluate to zero at all elements in the multiplicative 64 | subgroup. To do this, the prover collapses all the expressions into one polynomial 65 | $$H(X) = \sum_{i=0}^e y^i E_i(X),$$ 66 | where $e$ is the number of expressions and $y$ is a random challenge used to keep the 67 | constraints linearly independent. The prover then divides this by the vanishing polynomial 68 | (see section: [Vanishing polynomial](polynomials.md#vanishing-polynomial)) and commits to 69 | the resulting quotient 70 | 71 | $$\text{Commit}(Q(X)), \text{where } Q(X) = \frac{H(X)}{Z_H(X)}.$$ 72 | 73 | The verifier responds with a random evaluation point $x,$ to which the prover replies with 74 | the claimed evaluations $q = Q(x), \{e_i\}_{i=0}^e = \{E_i(x)\}_{i=0}^e.$ Now, all that 75 | remains for the verifier to check is that the evaluations satisfy 76 | 77 | $$q \stackrel{?}{=} \frac{\sum_{i=0}^e y^i e_i}{Z_H(x)}.$$ 78 | 79 | Notice that we have yet to check that the committed polynomials indeed evaluate to the 80 | claimed values at 81 | $x, q \stackrel{?}{=} Q(x), \{e_i\}_{i=0}^e \stackrel{?}{=} \{E_i(x)\}_{i=0}^e.$ 82 | This check is handled by the polynomial commitment scheme (described in the next section). 83 | -------------------------------------------------------------------------------- /book/src/background/recursion.md: -------------------------------------------------------------------------------- 1 | ## Recursion 2 | > Alternative terms: Induction; Accumulation scheme; Proof-carrying data 3 | 4 | However, the computation of $G$ requires a length-$2^k$ multiexponentiation 5 | $\langle \mathbf{G}, \mathbf{s}\rangle,$ where $\mathbf{s}$ is composed of the round 6 | challenges $u_1, \cdots, u_k$ arranged in a binary counting structure. This is the 7 | linear-time computation that we want to amortise across a batch of proof instances. 8 | Instead of computing $G,$ notice that we can express $G$ as a commitment to a polynomial 9 | 10 | $$G = \text{Commit}(\sigma, g(X, u_1, \cdots, u_k)),$$ 11 | 12 | where $g(X, u_1, \cdots, u_k) := \prod_{i=1}^k (u_i + u_i^{-1}X^{2^{i-1}})$ is a 13 | polynomial with degree $2^k - 1.$ 14 | 15 | | | | 16 | | -------- | -------- | 17 | | | Since $G$ is a commitment, it can be checked in an inner product argument. The verifier circuit witnesses $G$ and brings $G, u_1, \cdots, u_k$ out as public inputs to the proof $\pi.$ The next verifier instance checks $\pi$ using the inner product argument; this includes checking that $G = \text{Commit}(g(X, u_1, \cdots, u_k))$ evaluates at some random point to the expected value for the given challenges $u_1, \cdots, u_k.$ Recall from the [previous section](#Polynomial-commitment-using-inner-product-argument) that this check only requires $\log d$ work.

At the end of checking $\pi$ and $G,$ the circuit is left with a new $G',$ along with the $u_1', \cdots, u_k'$ challenges sampled for the check. To fully accept $\pi$ as valid, we should perform a linear-time computation of $G' = \langle\mathbf{G}, \mathbf{s}'\rangle$. Once again, we delay this computation by witnessing $G'$ and bringing $G', u_1', \cdots, u_k'$ out as public inputs to the proof $\pi'.$

This goes on from one proof instance to the next, until we are satisfied with the size of our batch of proofs. We finally perform a single linear-time computation, thus deciding the validity of the whole batch. | 18 | 19 | We recall from the section [Cycles of curves](curves.md#cycles-of-curves) that we can 20 | instantiate this protocol over a two-cycle, where a proof produced by one curve is 21 | efficiently verified in the circuit of the other curve. However, some of these verifier 22 | checks can actually be efficiently performed in the native circuit; these are "deferred" 23 | to the next native circuit (see diagram below) instead of being immediately passed over to 24 | the other curve. 25 | 26 | ![](https://i.imgur.com/l4HrYgE.png) 27 | -------------------------------------------------------------------------------- /book/src/concepts.md: -------------------------------------------------------------------------------- 1 | # Concepts 2 | 3 | First we'll describe the concepts behind zero-knowledge proof systems; the 4 | *arithmetization* (kind of circuit description) used by Halo 2; and the 5 | abstractions we use to build circuit implementations. 6 | -------------------------------------------------------------------------------- /book/src/concepts/arithmetization.md: -------------------------------------------------------------------------------- 1 | # PLONKish Arithmetization 2 | 3 | The arithmetization used by Halo 2 comes from [PLONK](https://eprint.iacr.org/2019/953), or 4 | more precisely its extension UltraPLONK that supports custom gates and lookup arguments. We'll 5 | call it [***PLONKish***](https://twitter.com/feministPLT/status/1413815927704014850). 6 | 7 | ***PLONKish circuits*** are defined in terms of a rectangular matrix of values. We refer to 8 | ***rows***, ***columns***, and ***cells*** of this matrix with the conventional meanings. 9 | 10 | A PLONKish circuit depends on a ***configuration***: 11 | 12 | * A finite field $\mathbb{F}$, where cell values (for a given statement and witness) will be 13 | elements of $\mathbb{F}$. 14 | * The number of columns in the matrix, and a specification of each column as being 15 | ***fixed***, ***advice***, or ***instance***. Fixed columns are fixed by the circuit; 16 | advice columns correspond to witness values; and instance columns are normally used for 17 | public inputs (technically, they can be used for any elements shared between the prover 18 | and verifier). 19 | 20 | * A subset of the columns that can participate in equality constraints. 21 | 22 | * A ***maximum constraint degree***. 23 | 24 | * A sequence of ***polynomial constraints***. These are multivariate polynomials over 25 | $\mathbb{F}$ that must evaluate to zero *for each row*. The variables in a polynomial 26 | constraint may refer to a cell in a given column of the current row, or a given column of 27 | another row relative to this one (with wrap-around, i.e. taken modulo $n$). The maximum 28 | degree of each polynomial is given by the maximum constraint degree. 29 | 30 | * A sequence of ***lookup arguments*** defined over tuples of ***input expressions*** 31 | (which are multivariate polynomials as above) and ***table columns***. 32 | 33 | A PLONKish circuit also defines: 34 | 35 | * The number of rows $n$ in the matrix. $n$ must correspond to the size of a multiplicative 36 | subgroup of $\mathbb{F}^\times$; typically a power of two. 37 | 38 | * A sequence of ***equality constraints***, which specify that two given cells must have equal 39 | values. 40 | 41 | * The values of the fixed columns at each row. 42 | 43 | From a circuit description we can generate a ***proving key*** and a ***verification key***, 44 | which are needed for the operations of proving and verification for that circuit. 45 | 46 | > Note that we specify the ordering of columns, polynomial constraints, lookup arguments, and 47 | > equality constraints, even though these do not affect the meaning of the circuit. This makes 48 | > it easier to define the generation of proving and verification keys as a deterministic 49 | > process. 50 | 51 | Typically, a configuration will define polynomial constraints that are switched off and on by 52 | ***selectors*** defined in fixed columns. For example, a constraint $q_i \cdot p(...) = 0$ can 53 | be switched off for a particular row $i$ by setting $q_i = 0$. In this case we sometimes refer 54 | to a set of constraints controlled by a set of selector columns that are designed to be used 55 | together, as a ***gate***. Typically there will be a ***standard gate*** that supports generic 56 | operations like field multiplication and division, and possibly also ***custom gates*** that 57 | support more specialized operations. 58 | -------------------------------------------------------------------------------- /book/src/concepts/gadgets.md: -------------------------------------------------------------------------------- 1 | # Gadgets 2 | 3 | When implementing a circuit, we could use the features of the chips we've selected directly. 4 | Typically, though, we will use them via ***gadgets***. This indirection is useful because, 5 | for reasons of efficiency and limitations imposed by PLONKish circuits, the chip interfaces will 6 | often be dependent on low-level implementation details. The gadget interface can provide a more 7 | convenient and stable API that abstracts away from extraneous detail. 8 | 9 | For example, consider a hash function such as SHA-256. The interface of a chip supporting 10 | SHA-256 might be dependent on internals of the hash function design such as the separation 11 | between message schedule and compression function. The corresponding gadget interface can 12 | provide a more convenient and familiar `update`/`finalize` API, and can also handle parts 13 | of the hash function that do not need chip support, such as padding. This is similar to how 14 | [accelerated](https://software.intel.com/content/www/us/en/develop/articles/intel-sha-extensions.html) 15 | [instructions](https://developer.arm.com/documentation/ddi0514/g/introduction/about-the-cortex-a57-processor-cryptography-engine) 16 | for cryptographic primitives on CPUs are typically accessed via software libraries, rather 17 | than directly. 18 | 19 | Gadgets can also provide modular and reusable abstractions for circuit programming 20 | at a higher level, similar to their use in libraries such as 21 | [libsnark](https://github.com/christianlundkvist/libsnark-tutorial) and 22 | [bellman](https://electriccoin.co/blog/bellman-zksnarks-in-rust/). As well as abstracting 23 | *functions*, they can also abstract *types*, such as elliptic curve points or integers of 24 | specific sizes. 25 | 26 | -------------------------------------------------------------------------------- /book/src/concepts/proofs.md: -------------------------------------------------------------------------------- 1 | # Proof systems 2 | 3 | The aim of any ***proof system*** is to be able to prove interesting mathematical or 4 | cryptographic ***statements***. 5 | 6 | Typically, in a given protocol we will want to prove families of statements that differ 7 | in their ***public inputs***. The prover will also need to show that they know some 8 | ***private inputs*** that make the statement hold. 9 | 10 | To do this we write down a ***relation***, $\mathcal{R}$, that specifies which 11 | combinations of public and private inputs are valid. 12 | 13 | > The terminology above is intended to be aligned with the 14 | > [ZKProof Community Reference](https://docs.zkproof.org/reference#latest-version). 15 | 16 | To be precise, we should distinguish between the relation $\mathcal{R}$, and its 17 | implementation to be used in a proof system. We call the latter a ***circuit***. 18 | 19 | The language that we use to express circuits for a particular proof system is called an 20 | ***arithmetization***. Usually, an arithmetization will define circuits in terms of 21 | polynomial constraints on variables over a field. 22 | 23 | > The _process_ of expressing a particular relation as a circuit is also sometimes called 24 | > "arithmetization", but we'll avoid that usage. 25 | 26 | To create a proof of a statement, the prover will need to know the private inputs, 27 | and also intermediate values, called ***advice*** values, that are used by the circuit. 28 | 29 | We assume that we can compute advice values efficiently from the private and public inputs. 30 | The particular advice values will depend on how we write the circuit, not only on the 31 | high-level statement. 32 | 33 | The private inputs and advice values are collectively called a ***witness***. 34 | 35 | > Some authors use "witness" as just a synonym for private inputs. But in our usage, 36 | > a witness includes advice, i.e. it includes all values that the prover supplies to 37 | > the circuit. 38 | 39 | For example, suppose that we want to prove knowledge of a preimage $x$ of a 40 | hash function $H$ for a digest $y$: 41 | 42 | * The private input would be the preimage $x$. 43 | 44 | * The public input would be the digest $y$. 45 | 46 | * The relation would be $\{(x, y) : H(x) = y\}$. 47 | 48 | * For a particular public input $Y$, the statement would be: $\{(x) : H(x) = Y\}$. 49 | 50 | * The advice would be all of the intermediate values in the circuit implementing the 51 | hash function. The witness would be $x$ and the advice. 52 | 53 | A ***Non-interactive Argument*** allows a ***prover*** to create a ***proof*** for a 54 | given statement and witness. The proof is data that can be used to convince a ***verifier*** 55 | that _there exists_ a witness for which the statement holds. The security property that 56 | such proofs cannot falsely convince a verifier is called ***soundness***. 57 | 58 | A ***Non-interactive Argument of Knowledge*** (***NARK***) further convinces the verifier 59 | that the prover _knew_ a witness for which the statement holds. This security property is 60 | called ***knowledge soundness***, and it implies soundness. 61 | 62 | In practice knowledge soundness is more useful for cryptographic protocols than soundness: 63 | if we are interested in whether Alice holds a secret key in some protocol, say, we need 64 | Alice to prove that _she knows_ the key, not just that it exists. 65 | 66 | Knowledge soundness is formalized by saying that an ***extractor***, which can observe 67 | precisely how the proof is generated, must be able to compute the witness. 68 | 69 | > This property is subtle given that proofs can be ***malleable***. That is, depending on the 70 | > proof system it may be possible to take an existing proof (or set of proofs) and, without 71 | > knowing the witness(es), modify it/them to produce a distinct proof of the same or a related 72 | > statement. Higher-level protocols that use malleable proof systems need to take this into 73 | > account. 74 | > 75 | > Even without malleability, proofs can also potentially be ***replayed***. For instance, 76 | > we would not want Alice in our example to be able to present a proof generated by someone 77 | > else, and have that be taken as a demonstration that she knew the key. 78 | 79 | If a proof yields no information about the witness (other than that a witness exists and was 80 | known to the prover), then we say that the proof system is ***zero knowledge***. 81 | 82 | If a proof system produces short proofs —i.e. of length polylogarithmic in the circuit 83 | size— then we say that it is ***succinct***. A succinct NARK is called a ***SNARK*** 84 | (***Succinct Non-Interactive Argument of Knowledge***). 85 | 86 | > By this definition, a SNARK need not have verification time polylogarithmic in the circuit 87 | > size. Some papers use the term ***efficient*** to describe a SNARK with that property, but 88 | > we'll avoid that term since it's ambiguous for SNARKs that support amortized or recursive 89 | > verification, which we'll get to later. 90 | 91 | A ***zk-SNARK*** is a zero-knowledge SNARK. 92 | -------------------------------------------------------------------------------- /book/src/design.md: -------------------------------------------------------------------------------- 1 | # Design 2 | 3 | ## Note on Language 4 | 5 | We use slightly different language than others to describe PLONK concepts. Here's the 6 | overview: 7 | 8 | 1. We like to think of PLONK-like arguments as tables, where each column corresponds to a 9 | "wire". We refer to entries in this table as "cells". 10 | 2. We like to call "selector polynomials" and so on "fixed columns" instead. We then refer 11 | specifically to a "selector constraint" when a cell in a fixed column is being used to 12 | control whether a particular constraint is enabled in that row. 13 | 3. We call the other polynomials "advice columns" usually, when they're populated by the 14 | prover. 15 | 4. We use the term "rule" to refer to a "gate" like 16 | $$A(X) \cdot q_A(X) + B(X) \cdot q_B(X) + A(X) \cdot B(X) \cdot q_M(X) + C(X) \cdot q_C(X) = 0.$$ 17 | - TODO: Check how consistent we are with this, and update the code and docs to match. 18 | -------------------------------------------------------------------------------- /book/src/design/gadgets.md: -------------------------------------------------------------------------------- 1 | # Gadgets 2 | 3 | In this section we document the gadgets and chip designs provided in the `halo2_gadgets` 4 | crate. 5 | 6 | > Neither these gadgets, nor their implementations, have been reviewed, and they should 7 | > not be used in production. 8 | -------------------------------------------------------------------------------- /book/src/design/gadgets/decomposition.md: -------------------------------------------------------------------------------- 1 | # Decomposition 2 | Given a field element $\alpha$, these gadgets decompose it into $W$ $K$-bit windows $$\alpha = k_0 + 2^{K} \cdot k_1 + 2^{2K} \cdot k_2 + \cdots + 2^{(W-1)K} \cdot k_{W-1}$$ where each $k_i$ a $K$-bit value. 3 | 4 | This is done using a running sum $z_i, i \in [0..W).$ We initialize the running sum $z_0 = \alpha,$ and compute subsequent terms $z_{i+1} = \frac{z_i - k_i}{2^{K}}.$ This gives us: 5 | 6 | $$ 7 | \begin{aligned} 8 | z_0 &= \alpha \\ 9 | &= k_0 + 2^{K} \cdot k_1 + 2^{2K} \cdot k_2 + 2^{3K} \cdot k_3 + \cdots, \\ 10 | z_1 &= (z_0 - k_0) / 2^K \\ 11 | &= k_1 + 2^{K} \cdot k_2 + 2^{2K} \cdot k_3 + \cdots, \\ 12 | z_2 &= (z_1 - k_1) / 2^K \\ 13 | &= k_2 + 2^{K} \cdot k_3 + \cdots, \\ 14 | &\vdots \\ 15 | \downarrow &\text{ (in strict mode)} \\ 16 | z_W &= (z_{W-1} - k_{W-1}) / 2^K \\ 17 | &= 0 \text{ (because } z_{W-1} = k_{W-1} \text{)} 18 | \end{aligned} 19 | $$ 20 | 21 | ### Strict mode 22 | Strict mode constrains the running sum output $z_{W}$ to be zero, thus range-constraining the field element to be within $W \cdot K$ bits. 23 | 24 | In strict mode, we are also assured that $z_{W-1} = k_{W-1}$ gives us the last window in the decomposition. 25 | ## Lookup decomposition 26 | This gadget makes use of a $K$-bit lookup table to decompose a field element $\alpha$ into $K$-bit words. Each $K$-bit word $k_i = z_i - 2^K \cdot z_{i+1}$ is range-constrained by a lookup in the $K$-bit table. 27 | 28 | The region layout for the lookup decomposition uses a single advice column $z$, and two selectors $q_{lookup}$ and $q_{running}.$ 29 | $$ 30 | \begin{array}{|c|c|c|} 31 | \hline 32 | z & q_\mathit{lookup} & q_\mathit{running} \\\hline 33 | \hline 34 | z_0 & 1 & 1 \\\hline 35 | z_1 & 1 & 1 \\\hline 36 | \vdots & \vdots & \vdots \\\hline 37 | z_{n-1} & 1 & 1 \\\hline 38 | z_n & 0 & 0 \\\hline 39 | \end{array} 40 | $$ 41 | ### Short range check 42 | Using two $K$-bit lookups, we can range-constrain a field element $\alpha$ to be $n$ bits, where $n \leq K.$ To do this: 43 | 44 | 1. Constrain $0 \leq \alpha < 2^K$ to be within $K$ bits using a $K$-bit lookup. 45 | 2. Constrain $0 \leq \alpha \cdot 2^{K - n} < 2^K$ to be within $K$ bits using a $K$-bit lookup. 46 | 47 | The short variant of the lookup decomposition introduces a $q_{bitshift}$ selector. The same advice column $z$ has here been renamed to $\textsf{word}$ for clarity: 48 | $$ 49 | \begin{array}{|c|c|c|c|} 50 | \hline 51 | \textsf{word} & q_\mathit{lookup} & q_\mathit{running} & q_\mathit{bitshift} \\\hline 52 | \hline 53 | \alpha & 1 & 0 & 0 \\\hline 54 | \alpha' & 1 & 0 & 1 \\\hline 55 | 2^{K-n} & 0 & 0 & 0 \\\hline 56 | \end{array} 57 | $$ 58 | 59 | where $\alpha' = \alpha \cdot 2^{K - n}.$ Note that $2^{K-n}$ is assigned to a fixed column at keygen, and copied in at proving time. This is used in the gate enabled by the $q_\mathit{bitshift}$ selector to check that $\alpha$ was shifted correctly: 60 | $$ 61 | \begin{array}{|c|l|} 62 | \hline 63 | \text{Degree} & \text{Constraint} \\\hline 64 | 2 & q_\mathit{bitshift} \cdot ((\alpha \cdot 2^{K - n}) - \alpha') \\\hline 65 | \end{array} 66 | $$ 67 | 68 | ### Combined lookup expression 69 | Since the lookup decomposition and its short variant both make use of the same lookup table, we combine their lookup input expressions into a single one: 70 | 71 | $$q_\mathit{lookup} \cdot \left(q_\mathit{running} \cdot (z_i - 2^K \cdot z_{i+1}) + (1 - q_\mathit{running}) \cdot \textsf{word} \right)$$ 72 | 73 | where $z_i$ and $\textsf{word}$ are the same cell (but distinguished here for clarity of usage). 74 | 75 | ## Short range decomposition 76 | For a short range (for instance, $[0, \texttt{range})$ where $\texttt{range} \leq 8$), we can range-constrain each word using a degree-$\texttt{range}$ polynomial constraint instead of a lookup: $$\RangeCheck{word}{range} = \texttt{word} \cdot (1 - \texttt{word}) \cdots (\texttt{range} - 1 - \texttt{word}).$$ 77 | -------------------------------------------------------------------------------- /book/src/design/gadgets/ecc.md: -------------------------------------------------------------------------------- 1 | # Elliptic Curves 2 | 3 | ## `EccChip` 4 | 5 | `halo2_gadgets` provides a chip that implements `EccInstructions` using 10 advice columns. 6 | The chip is currently restricted to the Pallas curve, but will be extended to support the 7 | [Vesta curve](https://github.com/zcash/halo2/issues/578) in the near future. 8 | 9 | ### Chip assumptions 10 | 11 | A non-exhaustive list of assumptions made by `EccChip`: 12 | - $0$ is not an $x$-coordinate of a valid point on the curve. 13 | - Holds for Pallas because $5$ is not square in $\mathbb{F}_q$. 14 | - $0$ is not a $y$-coordinate of a valid point on the curve. 15 | - Holds for Pallas because $-5$ is not a cube in $\mathbb{F}_q$. 16 | 17 | ### Layout 18 | 19 | The following table shows how columns are used by the gates for various chip sub-areas: 20 | 21 | - $W$ - witnessing points. 22 | - $AI$ - incomplete point addition. 23 | - $AC$ - complete point addition. 24 | - $MF$ - Fixed-base scalar multiplication. 25 | - $MVI$ - variable-base scalar multiplication, incomplete rounds. 26 | - $MVC$ - variable-base scalar multiplication, complete rounds. 27 | - $MVO$ - variable-base scalar multiplication, overflow check. 28 | 29 | $$ 30 | \begin{array}{|c||c|c|c|c|c|c|c|c|c|c|} 31 | \hline 32 | \text{Sub-area} & a_0 & a_1 & a_2 & a_3 & a_4 & a_5 & a_6 & a_7 & a_8 & a_9 \\\hline 33 | \hline 34 | W & x & y \\\hline 35 | \hline 36 | AI & x_p & y_p & x_q & y_q \\\hline 37 | & & & x_r & y_r \\\hline 38 | \hline 39 | AC & x_p & y_p & x_q & y_q & \lambda & \alpha & \beta & \gamma & \delta & \\\hline 40 | & & & x_r & y_r \\\hline 41 | \hline 42 | MF & x_p & y_p & x_q & y_q & \text{window} & u \\\hline 43 | & & & x_r & y_r \\\hline 44 | \hline 45 | MVI & x_p & y_p & \lambda_2^{lo} & x_A^{hi} & \lambda_1^{hi} & \lambda_2^{hi} & z^{lo} & x_A^{lo} & \lambda_1^{lo} & z^{hi} \\\hline 46 | \hline 47 | MVC & x_p & y_p & x_q & y_q & \lambda & \alpha & \beta & \gamma & \delta & z^{complete} \\\hline 48 | & & & x_r & y_r \\\hline 49 | \end{array} 50 | $$ 51 | -------------------------------------------------------------------------------- /book/src/design/gadgets/ecc/witnessing-points.md: -------------------------------------------------------------------------------- 1 | # Witnessing points 2 | 3 | We represent elliptic curve points in the circuit in their affine representation $(x, y)$. 4 | The identity is represented as the pseudo-coordinate $(0, 0)$, which we 5 | [assume](../ecc.md#chip-assumptions) is not a valid point on the curve. 6 | 7 | ## Non-identity points 8 | 9 | To constrain a coordinate pair $(x, y)$ as representing a valid point on the curve, we 10 | directly check the curve equation. For Pallas and Vesta, this is: 11 | 12 | $$y^2 = x^3 + 5$$ 13 | 14 | $$ 15 | \begin{array}{|c|l|} 16 | \hline 17 | \text{Degree} & \text{Constraint} \\\hline 18 | 4 & q_\text{point}^\text{non-id} \cdot (y^2 - x^3 - 5) = 0 \\\hline 19 | \end{array} 20 | $$ 21 | 22 | ## Points including the identity 23 | 24 | To allow $(x, y)$ to represent either a valid point on the curve, or the pseudo-coordinate 25 | $(0, 0)$, we define a separate gate that enforces the curve equation check unless both $x$ 26 | and $y$ are zero. 27 | 28 | $$ 29 | \begin{array}{|c|l|} 30 | \hline 31 | \text{Degree} & \text{Constraint} \\\hline 32 | 5 & (q_\text{point} \cdot x) \cdot (y^2 - x^3 - 5) = 0 \\\hline 33 | 5 & (q_\text{point} \cdot y) \cdot (y^2 - x^3 - 5) = 0 \\\hline 34 | \end{array} 35 | $$ 36 | -------------------------------------------------------------------------------- /book/src/design/gadgets/sha256.md: -------------------------------------------------------------------------------- 1 | # SHA-256 2 | 3 | ## Specification 4 | 5 | SHA-256 is specified in [NIST FIPS PUB 180-4](https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.180-4.pdf). 6 | 7 | Unlike the specification, we use $\boxplus$ for addition modulo $2^{32}$, and $+$ for 8 | field addition. $\oplus$ is used for XOR. 9 | 10 | ## Gadget interface 11 | 12 | SHA-256 maintains state in eight 32-bit variables. It processes input as 512-bit blocks, 13 | but internally splits these blocks into 32-bit chunks. We therefore designed the SHA-256 14 | gadget to consume input in 32-bit chunks. 15 | 16 | ## Chip instructions 17 | 18 | The SHA-256 gadget requires a chip with the following instructions: 19 | 20 | ```rust 21 | # extern crate halo2_proofs; 22 | # use halo2_proofs::plonk::Error; 23 | # use std::fmt; 24 | # 25 | # trait Chip: Sized {} 26 | # trait Layouter {} 27 | const BLOCK_SIZE: usize = 16; 28 | const DIGEST_SIZE: usize = 8; 29 | 30 | pub trait Sha256Instructions: Chip { 31 | /// Variable representing the SHA-256 internal state. 32 | type State: Clone + fmt::Debug; 33 | /// Variable representing a 32-bit word of the input block to the SHA-256 compression 34 | /// function. 35 | type BlockWord: Copy + fmt::Debug; 36 | 37 | /// Places the SHA-256 IV in the circuit, returning the initial state variable. 38 | fn initialization_vector(layouter: &mut impl Layouter) -> Result; 39 | 40 | /// Starting from the given initial state, processes a block of input and returns the 41 | /// final state. 42 | fn compress( 43 | layouter: &mut impl Layouter, 44 | initial_state: &Self::State, 45 | input: [Self::BlockWord; BLOCK_SIZE], 46 | ) -> Result; 47 | 48 | /// Converts the given state into a message digest. 49 | fn digest( 50 | layouter: &mut impl Layouter, 51 | state: &Self::State, 52 | ) -> Result<[Self::BlockWord; DIGEST_SIZE], Error>; 53 | } 54 | ``` 55 | 56 | TODO: Add instruction for computing padding. 57 | 58 | This set of instructions was chosen to strike a balance between the reusability of the 59 | instructions, and the scope for chips to internally optimise them. In particular, we 60 | considered splitting the compression function into its constituent parts (Ch, Maj etc), 61 | and providing a compression function gadget that implemented the round logic. However, 62 | this would prevent chips from using relative references between the various parts of a 63 | compression round. Having an instruction that implements all compression rounds is also 64 | similar to the Intel SHA extensions, which provide an instruction that performs multiple 65 | compression rounds. 66 | -------------------------------------------------------------------------------- /book/src/design/gadgets/sha256/bit_reassignment.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/scroll-tech/halo2/1492fd076f7b0b0173a12d304175171a2c75048f/book/src/design/gadgets/sha256/bit_reassignment.png -------------------------------------------------------------------------------- /book/src/design/gadgets/sha256/compression.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/scroll-tech/halo2/1492fd076f7b0b0173a12d304175171a2c75048f/book/src/design/gadgets/sha256/compression.png -------------------------------------------------------------------------------- /book/src/design/gadgets/sha256/low_sigma_0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/scroll-tech/halo2/1492fd076f7b0b0173a12d304175171a2c75048f/book/src/design/gadgets/sha256/low_sigma_0.png -------------------------------------------------------------------------------- /book/src/design/gadgets/sha256/low_sigma_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/scroll-tech/halo2/1492fd076f7b0b0173a12d304175171a2c75048f/book/src/design/gadgets/sha256/low_sigma_1.png -------------------------------------------------------------------------------- /book/src/design/gadgets/sha256/upp_sigma_0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/scroll-tech/halo2/1492fd076f7b0b0173a12d304175171a2c75048f/book/src/design/gadgets/sha256/upp_sigma_0.png -------------------------------------------------------------------------------- /book/src/design/gadgets/sha256/upp_sigma_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/scroll-tech/halo2/1492fd076f7b0b0173a12d304175171a2c75048f/book/src/design/gadgets/sha256/upp_sigma_1.png -------------------------------------------------------------------------------- /book/src/design/implementation.md: -------------------------------------------------------------------------------- 1 | # Implementation 2 | -------------------------------------------------------------------------------- /book/src/design/implementation/fields.md: -------------------------------------------------------------------------------- 1 | # Fields 2 | 3 | The [Pasta curves](https://electriccoin.co/blog/the-pasta-curves-for-halo-2-and-beyond/) 4 | that we use in `halo2` are designed to be highly 2-adic, meaning that a large $2^S$ 5 | [multiplicative subgroup](../../background/fields.md#multiplicative-subgroups) exists in 6 | each field. That is, we can write $p - 1 \equiv 2^S \cdot T$ with $T$ odd. For both Pallas 7 | and Vesta, $S = 32$; this helps to simplify the field implementations. 8 | 9 | ## Sarkar square-root algorithm (table-based variant) 10 | 11 | We use a technique from [Sarkar2020](https://eprint.iacr.org/2020/1407.pdf) to compute 12 | [square roots](../../background/fields.md#square-roots) in `halo2`. The intuition behind 13 | the algorithm is that we can split the task into computing square roots in each 14 | multiplicative subgroup. 15 | 16 | Suppose we want to find the square root of $u$ modulo one of the Pasta primes $p$, where 17 | $u$ is a non-zero square in $\mathbb{Z}_p^\times$. We define a $2^S$ 18 | [root of unity](../../background/fields.md#roots-of-unity) $g = z^T$ where $z$ is a 19 | non-square in $\mathbb{Z}_p^\times$, and precompute the following tables: 20 | 21 | $$ 22 | gtab = \begin{bmatrix} 23 | g^0 & g^1 & ... & g^{2^8 - 1} \\ 24 | (g^{2^8})^0 & (g^{2^8})^1 & ... & (g^{2^8})^{2^8 - 1} \\ 25 | (g^{2^{16}})^0 & (g^{2^{16}})^1 & ... & (g^{2^{16}})^{2^8 - 1} \\ 26 | (g^{2^{24}})^0 & (g^{2^{24}})^1 & ... & (g^{2^{24}})^{2^8 - 1} 27 | \end{bmatrix} 28 | $$ 29 | 30 | $$ 31 | invtab = \begin{bmatrix} 32 | (g^{-2^{24}})^0 & (g^{-2^{24}})^1 & ... & (g^{-2^{24}})^{2^8 - 1} 33 | \end{bmatrix} 34 | $$ 35 | 36 | Let $v = u^{(T-1)/2}$. We can then define $x = uv \cdot v = u^T$ as an element of the 37 | $2^S$ multiplicative subgroup. 38 | 39 | Let $x_3 = x, x_2 = x_3^{2^8}, x_1 = x_2^{2^8}, x_0 = x_1^{2^8}.$ 40 | 41 | ### i = 0, 1 42 | Using $invtab$, we lookup $t_0$ such that 43 | $$ 44 | x_0 = (g^{-2^{24}})^{t_0} \implies x_0 \cdot g^{t_0 \cdot 2^{24}} = 1. 45 | $$ 46 | 47 | Define $\alpha_1 = x_1 \cdot (g^{2^{16}})^{t_0}.$ 48 | 49 | ### i = 2 50 | Lookup $t_1$ s.t. 51 | $$ 52 | \begin{array}{ll} 53 | \alpha_1 = (g^{-2^{24}})^{t_1} &\implies x_1 \cdot (g^{2^{16}})^{t_0} = (g^{-2^{24}})^{t_1} \\ 54 | &\implies 55 | x_1 \cdot g^{(t_0 + 2^8 \cdot t_1) \cdot 2^{16}} = 1. 56 | \end{array} 57 | $$ 58 | 59 | Define $\alpha_2 = x_2 \cdot (g^{2^8})^{t_0 + 2^8 \cdot t_1}.$ 60 | 61 | ### i = 3 62 | Lookup $t_2$ s.t. 63 | 64 | $$ 65 | \begin{array}{ll} 66 | \alpha_2 = (g^{-2^{24}})^{t_2} &\implies x_2 \cdot (g^{2^8})^{t_0 + 2^8\cdot {t_1}} = (g^{-2^{24}})^{t_2} \\ 67 | &\implies x_2 \cdot g^{(t_0 + 2^8 \cdot t_1 + 2^{16} \cdot t_2) \cdot 2^8} = 1. 68 | \end{array} 69 | $$ 70 | 71 | Define $\alpha_3 = x_3 \cdot g^{t_0 + 2^8 \cdot t_1 + 2^{16} \cdot t_2}.$ 72 | 73 | ### Final result 74 | Lookup $t_3$ such that 75 | 76 | $$ 77 | \begin{array}{ll} 78 | \alpha_3 = (g^{-2^{24}})^{t_3} &\implies x_3 \cdot g^{t_0 + 2^8\cdot {t_1} + 2^{16} \cdot t_2} = (g^{-2^{24}})^{t_3} \\ 79 | &\implies x_3 \cdot g^{t_0 + 2^8 \cdot t_1 + 2^{16} \cdot t_2 + 2^{24} \cdot t_3} = 1. 80 | \end{array} 81 | $$ 82 | 83 | Let $t = t_0 + 2^8 \cdot t_1 + 2^{16} \cdot t_2 + 2^{24} \cdot t_3$. 84 | 85 | We can now write 86 | $$ 87 | \begin{array}{lclcl} 88 | x_3 \cdot g^{t} = 1 &\implies& x_3 &=& g^{-t} \\ 89 | &\implies& uv^2 &=& g^{-t} \\ 90 | &\implies& uv &=& v^{-1} \cdot g^{-t} \\ 91 | &\implies& uv \cdot g^{t / 2} &=& v^{-1} \cdot g^{-t / 2}. 92 | \end{array} 93 | $$ 94 | 95 | Squaring the RHS, we observe that $(v^{-1} g^{-t / 2})^2 = v^{-2}g^{-t} = u.$ Therefore, 96 | the square root of $u$ is $uv \cdot g^{t / 2}$; the first part we computed earlier, and 97 | the second part can be computed with three multiplications using lookups in $gtab$. 98 | -------------------------------------------------------------------------------- /book/src/design/implementation/proofs.md: -------------------------------------------------------------------------------- 1 | # Halo 2 proofs 2 | 3 | ## Proofs as opaque byte streams 4 | 5 | In proving system implementations like `bellman`, there is a concrete `Proof` struct that 6 | encapsulates the proof data, is returned by a prover, and can be passed to a verifier. 7 | 8 | `halo2` does not contain any proof-like structures, for several reasons: 9 | 10 | - The Proof structures would contain vectors of (vectors of) curve points and scalars. 11 | This complicates serialization/deserialization of proofs because the lengths of these 12 | vectors depend on the configuration of the circuit. However, we didn't want to encode 13 | the lengths of vectors inside of proofs, because at runtime the circuit is fixed, and 14 | thus so are the proof sizes. 15 | - It's easy to accidentally put stuff into a Proof structure that isn't also placed in the 16 | transcript, which is a hazard when developing and implementing a proving system. 17 | - We needed to be able to create multiple PLONK proofs at the same time; these proofs 18 | share many different substructures when they are for the same circuit. 19 | 20 | Instead, `halo2` treats proof objects as opaque byte streams. Creation and consumption of 21 | these byte streams happens via the transcript: 22 | 23 | - The `TranscriptWrite` trait represents something that we can write proof components to 24 | (at proving time). 25 | - The `TranscriptRead` trait represents something that we can read proof components from 26 | (at verifying time). 27 | 28 | Crucially, implementations of `TranscriptWrite` are responsible for simultaneously writing 29 | to some `std::io::Write` buffer at the same time that they hash things into the transcript, 30 | and similarly for `TranscriptRead`/`std::io::Read`. 31 | 32 | As a bonus, treating proofs as opaque byte streams ensures that verification accounts for 33 | the cost of deserialization, which isn't negligible due to point compression. 34 | 35 | ## Proof encoding 36 | 37 | A Halo 2 proof, constructed over a curve $E(\mathbb{F}_p)$, is encoded as a stream of: 38 | 39 | - Points $P \in E(\mathbb{F}_p)$ (for commitments to polynomials), and 40 | - Scalars $s \in \mathbb{F}_q$ (for evaluations of polynomials, and blinding values). 41 | 42 | For the Pallas and Vesta curves, both points and scalars have 32-byte encodings, meaning 43 | that proofs are always a multiple of 32 bytes. 44 | 45 | The `halo2` crate supports proving multiple instances of a circuit simultaneously, in 46 | order to share common proof components and protocol logic. 47 | 48 | In the encoding description below, we will use the following circuit-specific constants: 49 | 50 | - $k$ - the size parameter of the circuit (which has $2^k$ rows). 51 | - $A$ - the number of advice columns. 52 | - $F$ - the number of fixed columns. 53 | - $I$ - the number of instance columns. 54 | - $L$ - the number of lookup arguments. 55 | - $P$ - the number of permutation arguments. 56 | - $\textsf{Col}_P$ - the number of columns involved in permutation argument $P$. 57 | - $D$ - the maximum degree for the quotient polynomial. 58 | - $Q_A$ - the number of advice column queries. 59 | - $Q_F$ - the number of fixed column queries. 60 | - $Q_I$ - the number of instance column queries. 61 | - $M$ - the number of instances of the circuit that are being proven simultaneously. 62 | 63 | As the proof encoding directly follows the transcript, we can break the encoding into 64 | sections matching the Halo 2 protocol: 65 | 66 | - PLONK commitments: 67 | - $A$ points (repeated $M$ times). 68 | - $2L$ points (repeated $M$ times). 69 | - $P$ points (repeated $M$ times). 70 | - $L$ points (repeated $M$ times). 71 | 72 | - Vanishing argument: 73 | - $D - 1$ points. 74 | - $Q_I$ scalars (repeated $M$ times). 75 | - $Q_A$ scalars (repeated $M$ times). 76 | - $Q_F$ scalars. 77 | - $D - 1$ scalars. 78 | 79 | - PLONK evaluations: 80 | - $(2 + \textsf{Col}_P) \times P$ scalars (repeated $M$ times). 81 | - $5L$ scalars (repeated $M$ times). 82 | 83 | - Multiopening argument: 84 | - 1 point. 85 | - 1 scalar per set of points in the multiopening argument. 86 | 87 | - Polynomial commitment scheme: 88 | - $1 + 2k$ points. 89 | - $2$ scalars. 90 | -------------------------------------------------------------------------------- /book/src/design/proving-system.md: -------------------------------------------------------------------------------- 1 | # Proving system 2 | 3 | The Halo 2 proving system can be broken down into five stages: 4 | 5 | 1. Commit to polynomials encoding the main components of the circuit: 6 | - Cell assignments. 7 | - Permuted values and products for each lookup argument. 8 | - Equality constraint permutations. 9 | 2. Construct the vanishing argument to constrain all circuit relations to zero: 10 | - Standard and custom gates. 11 | - Lookup argument rules. 12 | - Equality constraint permutation rules. 13 | 3. Evaluate the above polynomials at all necessary points: 14 | - All relative rotations used by custom gates across all columns. 15 | - Vanishing argument pieces. 16 | 4. Construct the multipoint opening argument to check that all evaluations are consistent 17 | with their respective commitments. 18 | 5. Run the inner product argument to create a polynomial commitment opening proof for the 19 | multipoint opening argument polynomial. 20 | 21 | These stages are presented in turn across this section of the book. 22 | 23 | ## Example 24 | 25 | To aid our explanations, we will at times refer to the following example constraint 26 | system: 27 | 28 | - Four advice columns $a, b, c, d$. 29 | - One fixed column $f$. 30 | - Three custom gates: 31 | - $a \cdot b \cdot c_{-1} - d = 0$ 32 | - $f_{-1} \cdot c = 0$ 33 | - $f \cdot d \cdot a = 0$ 34 | 35 | ## tl;dr 36 | 37 | The table below provides a (probably too) succinct description of the Halo 2 protocol. 38 | This description will likely be replaced by the Halo 2 paper and security proof, but for 39 | now serves as a summary of the following sub-sections. 40 | 41 | | Prover | | Verifier | 42 | | --------------------------------------------------------------------------- | ------- | ---------------------------------- | 43 | | | $\larr$ | $t(X) = (X^n - 1)$ | 44 | | | $\larr$ | $F = [F_0, F_1, \dots, F_{m - 1}]$ | 45 | | $\mathbf{A} = [A_0, A_1, \dots, A_{m - 1}]$ | $\rarr$ | | 46 | | | $\larr$ | $\theta$ | 47 | | $\mathbf{L} = [(A'_0, S'_0), \dots, (A'_{m - 1}, S'_{m - 1})]$ | $\rarr$ | | 48 | | | $\larr$ | $\beta, \gamma$ | 49 | | $\mathbf{Z_P} = [Z_{P,0}, Z_{P,1}, \ldots]$ | $\rarr$ | | 50 | | $\mathbf{Z_L} = [Z_{L,0}, Z_{L,1}, \ldots]$ | $\rarr$ | | 51 | | | $\larr$ | $y$ | 52 | | $h(X) = \frac{\text{gate}_0(X) + \dots + y^i \cdot \text{gate}_i(X)}{t(X)}$ | | | 53 | | $h(X) = h_0(X) + \dots + X^{n(d-1)} h_{d-1}(X)$ | | | 54 | | $\mathbf{H} = [H_0, H_1, \dots, H_{d-1}]$ | $\rarr$ | | 55 | | | $\larr$ | $x$ | 56 | | $evals = [A_0(x), \dots, H_{d - 1}(x)]$ | $\rarr$ | | 57 | | | | Checks $h(x)$ | 58 | | | $\larr$ | $x_1, x_2$ | 59 | | Constructs $h'(X)$ multipoint opening poly | | | 60 | | $U = \text{Commit}(h'(X))$ | $\rarr$ | | 61 | | | $\larr$ | $x_3$ | 62 | | $\mathbf{q}_\text{evals} = [Q_0(x_3), Q_1(x_3), \dots]$ | $\rarr$ | | 63 | | $u_\text{eval} = U(x_3)$ | $\rarr$ | | 64 | | | $\larr$ | $x_4$ | 65 | 66 | Then the prover and verifier: 67 | 68 | - Construct $\text{finalPoly}(X)$ as a linear combination of $\mathbf{Q}$ and $U$ using 69 | powers of $x_4$; 70 | - Construct $\text{finalPolyEval}$ as the equivalent linear combination of 71 | $\mathbf{q}_\text{evals}$ and $u_\text{eval}$; and 72 | - Perform $\text{InnerProduct}(\text{finalPoly}(X), x_3, \text{finalPolyEval}).$ 73 | 74 | > TODO: Write up protocol components that provide zero-knowledge. 75 | -------------------------------------------------------------------------------- /book/src/design/proving-system/comparison.md: -------------------------------------------------------------------------------- 1 | # Comparison to other work 2 | 3 | ## BCMS20 Appendix A.2 4 | 5 | Appendix A.2 of [BCMS20] describes a polynomial commitment scheme that is similar to the 6 | one described in [BGH19] (BCMS20 being a generalization of the original Halo paper). Halo 7 | 2 builds on both of these works, and thus itself uses a polynomial commitment scheme that 8 | is very similar to the one in BCMS20. 9 | 10 | [BGH19]: https://eprint.iacr.org/2019/1021 11 | [BCMS20]: https://eprint.iacr.org/2020/499 12 | 13 | The following table provides a mapping between the variable names in BCMS20, and the 14 | equivalent objects in Halo 2 (which builds on the nomenclature from the Halo paper): 15 | 16 | | BCMS20 | Halo 2 | 17 | | :------------: | :-----------------: | 18 | | $S$ | $H$ | 19 | | $H$ | $U$ | 20 | | $C$ | `msm` or $P$ | 21 | | $\alpha$ | $\iota$ | 22 | | $\xi_0$ | $z$ | 23 | | $\xi_i$ | `challenge_i` | 24 | | $H'$ | $[z] U$ | 25 | | $\bar{p}$ | `s_poly` | 26 | | $\bar{\omega}$ | `s_poly_blind` | 27 | | $\bar{C}$ | `s_poly_commitment` | 28 | | $h(X)$ | $g(X)$ | 29 | | $U$ | $G$ | 30 | | $\omega'$ | `blind` / $\xi$ | 31 | | $\mathbf{c}$ | $\mathbf{a}$ | 32 | | $c$ | $a = \mathbf{a}_0$ | 33 | | $v'$ | $ab$ | 34 | 35 | Halo 2's polynomial commitment scheme differs from Appendix A.2 of BCMS20 in two ways: 36 | 37 | 1. Step 8 of the $\text{Open}$ algorithm computes a "non-hiding" commitment $C'$ prior to 38 | the inner product argument, which opens to the same value as $C$ but is a commitment to 39 | a randomly-drawn polynomial. The remainder of the protocol involves no blinding. By 40 | contrast, in Halo 2 we blind every single commitment that we make (even for instance 41 | and fixed polynomials, though using a blinding factor of 1 for the fixed polynomials); 42 | this makes the protocol simpler to reason about. As a consequence of this, the verifier 43 | needs to handle the cumulative blinding factor at the end of the protocol, and so there 44 | is no need to derive an equivalent to $C'$ at the start of the protocol. 45 | 46 | - $C'$ is also an input to the random oracle for $\xi_0$; in Halo 2 we utilize a 47 | transcript that has already committed to the equivalent components of $C'$ prior to 48 | sampling $z$. 49 | 50 | 2. The $\text{PC}_\text{DL}.\text{SuccinctCheck}$ subroutine (Figure 2 of BCMS20) computes 51 | the initial group element $C_0$ by adding $[v] H' = [v \xi_0] H$, which requires two 52 | scalar multiplications. Instead, we subtract $[v] G_0$ from the original commitment $P$, 53 | so that we're effectively opening the polynomial at the point to the value zero. The 54 | computation $[v] G_0$ is more efficient in the context of recursion because $G_0$ is a 55 | fixed base (so we can use lookup tables). 56 | -------------------------------------------------------------------------------- /book/src/design/proving-system/inner-product.md: -------------------------------------------------------------------------------- 1 | # Inner product argument 2 | 3 | Halo 2 uses a polynomial commitment scheme for which we can create polynomial commitment 4 | opening proofs, based around the Inner Product Argument. 5 | 6 | > TODO: Explain Halo 2's variant of the IPA. 7 | > 8 | > It is very similar to $\text{PC}_\text{DL}.\text{Open}$ from Appendix A.2 of [BCMS20]. 9 | > See [this comparison](comparison.md#bcms20-appendix-a2) for details. 10 | > 11 | > [BCMS20]: https://eprint.iacr.org/2020/499 12 | -------------------------------------------------------------------------------- /book/src/design/proving-system/multipoint-opening.md: -------------------------------------------------------------------------------- 1 | # Multipoint opening argument 2 | 3 | Consider the commitments $A, B, C, D$ to polynomials $a(X), b(X), c(X), d(X)$. 4 | Let's say that $a$ and $b$ were queried at the point $x$, while $c$ and $d$ 5 | were queried at both points $x$ and $\omega x$. (Here, $\omega$ is the primitive 6 | root of unity in the multiplicative subgroup over which we constructed the 7 | polynomials). 8 | 9 | To open these commitments, we could create a polynomial $Q$ for each point that we queried 10 | at (corresponding to each relative rotation used in the circuit). But this would not be 11 | efficient in the circuit; for example, $c(X)$ would appear in multiple polynomials. 12 | 13 | Instead, we can group the commitments by the sets of points at which they were queried: 14 | $$ 15 | \begin{array}{cccc} 16 | &\{x\}& &\{x, \omega x\}& \\ 17 | &A& &C& \\ 18 | &B& &D& 19 | \end{array} 20 | $$ 21 | 22 | For each of these groups, we combine them into a polynomial set, and create a single $Q$ 23 | for that set, which we open at each rotation. 24 | 25 | ## Optimization steps 26 | 27 | The multipoint opening optimization takes as input: 28 | 29 | - A random $x$ sampled by the verifier, at which we evaluate $a(X), b(X), c(X), d(X)$. 30 | - Evaluations of each polynomial at each point of interest, provided by the prover: 31 | $a(x), b(x), c(x), d(x), c(\omega x), d(\omega x)$ 32 | 33 | These are the outputs of the [vanishing argument](vanishing.md#evaluating-the-polynomials). 34 | 35 | The multipoint opening optimization proceeds as such: 36 | 37 | 1. Sample random $x_1$, to keep $a, b, c, d$ linearly independent. 38 | 2. Accumulate polynomials and their corresponding evaluations according 39 | to the point set at which they were queried: 40 | `q_polys`: 41 | $$ 42 | \begin{array}{rccl} 43 | q_1(X) &=& a(X) &+& x_1 b(X) \\ 44 | q_2(X) &=& c(X) &+& x_1 d(X) 45 | \end{array} 46 | $$ 47 | `q_eval_sets`: 48 | ```math 49 | [ 50 | [a(x) + x_1 b(x)], 51 | [ 52 | c(x) + x_1 d(x), 53 | c(\omega x) + x_1 d(\omega x) 54 | ] 55 | ] 56 | ``` 57 | NB: `q_eval_sets` is a vector of sets of evaluations, where the outer vector 58 | corresponds to the point sets, which in this example are $\{x\}$ and $\{x, \omega x\}$, 59 | and the inner vector corresponds to the points in each set. 60 | 3. Interpolate each set of values in `q_eval_sets`: 61 | `r_polys`: 62 | $$ 63 | \begin{array}{cccc} 64 | r_1(X) s.t.&&& \\ 65 | &r_1(x) &=& a(x) + x_1 b(x) \\ 66 | r_2(X) s.t.&&& \\ 67 | &r_2(x) &=& c(x) + x_1 d(x) \\ 68 | &r_2(\omega x) &=& c(\omega x) + x_1 d(\omega x) \\ 69 | \end{array} 70 | $$ 71 | 4. Construct `f_polys` which check the correctness of `q_polys`: 72 | `f_polys` 73 | $$ 74 | \begin{array}{rcl} 75 | f_1(X) &=& \frac{ q_1(X) - r_1(X)}{X - x} \\ 76 | f_2(X) &=& \frac{ q_2(X) - r_2(X)}{(X - x)(X - \omega x)} \\ 77 | \end{array} 78 | $$ 79 | 80 | If $q_1(x) = r_1(x)$, then $f_1(X)$ should be a polynomial. 81 | If $q_2(x) = r_2(x)$ and $q_2(\omega x) = r_2(\omega x)$ 82 | then $f_2(X)$ should be a polynomial. 83 | 5. Sample random $x_2$ to keep the `f_polys` linearly independent. 84 | 6. Construct $f(X) = f_1(X) + x_2 f_2(X)$. 85 | 7. Sample random $x_3$, at which we evaluate $f(X)$: 86 | $$ 87 | \begin{array}{rcccl} 88 | f(x_3) &=& f_1(x_3) &+& x_2 f_2(x_3) \\ 89 | &=& \frac{q_1(x_3) - r_1(x_3)}{x_3 - x} &+& x_2\frac{q_2(x_3) - r_2(x_3)}{(x_3 - x)(x_3 - \omega x)} 90 | \end{array} 91 | $$ 92 | 8. Sample random $x_4$ to keep $f(X)$ and `q_polys` linearly independent. 93 | 9. Construct `final_poly`, $$final\_poly(X) = f(X) + x_4 q_1(X) + x_4^2 q_2(X),$$ 94 | which is the polynomial we commit to in the inner product argument. 95 | -------------------------------------------------------------------------------- /book/src/design/proving-system/permutation-diagram.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/scroll-tech/halo2/1492fd076f7b0b0173a12d304175171a2c75048f/book/src/design/proving-system/permutation-diagram.png -------------------------------------------------------------------------------- /book/src/design/proving-system/vanishing.md: -------------------------------------------------------------------------------- 1 | # Vanishing argument 2 | 3 | Having committed to the circuit assignments, the prover now needs to demonstrate that the 4 | various circuit relations are satisfied: 5 | 6 | - The custom gates, represented by polynomials $\text{gate}_i(X)$. 7 | - The rules of the lookup arguments. 8 | - The rules of the equality constraint permutations. 9 | 10 | Each of these relations is represented as a polynomial of degree $d$ (the maximum degree 11 | of any of the relations) with respect to the circuit columns. Given that the degree of the 12 | assignment polynomials for each column is $n - 1$, the relation polynomials have degree 13 | $d(n - 1)$ with respect to $X$. 14 | 15 | > In our [example](../proving-system.md#example), these would be the gate polynomials, of 16 | > degree $3n - 3$: 17 | > 18 | > - $\text{gate}_0(X) = a_0(X) \cdot a_1(X) \cdot a_2(X \omega^{-1}) - a_3(X)$ 19 | > - $\text{gate}_1(X) = f_0(X \omega^{-1}) \cdot a_2(X)$ 20 | > - $\text{gate}_2(X) = f_0(X) \cdot a_3(X) \cdot a_0(X)$ 21 | 22 | A relation is satisfied if its polynomial is equal to zero. One way to demonstrate this is 23 | to divide each polynomial relation by the vanishing polynomial $t(X) = (X^n - 1)$, which 24 | is the lowest-degree monomial that has roots at every $\omega^i$. If relation's polynomial 25 | is perfectly divisible by $t(X)$, it is equal to zero over the domain (as desired). 26 | 27 | This simple construction would require a polynomial commitment per relation. Instead, we 28 | commit to all of the circuit relations simultaneously: the verifier samples $y$, and then 29 | the prover constructs the quotient polynomial 30 | 31 | $$h(X) = \frac{\text{gate}_0(X) + y \cdot \text{gate}_1(X) + \dots + y^i \cdot \text{gate}_i(X) + \dots}{t(X)},$$ 32 | 33 | where the numerator is a random (the prover commits to the cell assignments before the 34 | verifier samples $y$) linear combination of the circuit relations. 35 | 36 | - If the numerator polynomial (in formal indeterminate $X$) is perfectly divisible by 37 | $t(X)$, then with high probability all relations are satisfied. 38 | - Conversely, if at least one relation is not satisfied, then with high probability 39 | $h(x) \cdot t(x)$ will not equal the evaluation of the numerator at $x$. In this case, 40 | the numerator polynomial would not be perfectly divisible by $t(X)$. 41 | 42 | ## Committing to $h(X)$ 43 | 44 | $h(X)$ has degree $d(n - 1) - n$ (because the divisor $t(X)$ has degree $n$). However, the 45 | polynomial commitment scheme we use for Halo 2 only supports committing to polynomials of 46 | degree $n - 1$ (which is the maximum degree that the rest of the protocol needs to commit 47 | to). Instead of increasing the cost of the polynomial commitment scheme, the prover split 48 | $h(X)$ into pieces of degree $n - 1$ 49 | 50 | $$h_0(X) + X^n h_1(X) + \dots + X^{n(d-1)} h_{d-1}(X),$$ 51 | 52 | and produces blinding commitments to each piece 53 | 54 | $$\mathbf{H} = [\text{Commit}(h_0(X)), \text{Commit}(h_1(X)), \dots, \text{Commit}(h_{d-1}(X))].$$ 55 | 56 | ## Evaluating the polynomials 57 | 58 | At this point, all properties of the circuit have been committed to. The verifier now 59 | wants to see if the prover committed to the correct $h(X)$ polynomial. The verifier 60 | samples $x$, and the prover produces the purported evaluations of the various polynomials 61 | at $x$, for all the relative offsets used in the circuit, as well as $h(X)$. 62 | 63 | > In our [example](../proving-system.md#example), this would be: 64 | > 65 | > - $a_0(x)$ 66 | > - $a_1(x)$ 67 | > - $a_2(x)$, $a_2(x \omega^{-1})$ 68 | > - $a_3(x)$ 69 | > - $f_0(x)$, $f_0(x \omega^{-1})$ 70 | > - $h_0(x)$, ..., $h_{d-1}(x)$ 71 | 72 | The verifier checks that these evaluations satisfy the form of $h(X)$: 73 | 74 | $$\frac{\text{gate}_0(x) + \dots + y^i \cdot \text{gate}_i(x) + \dots}{t(x)} = h_0(x) + \dots + x^{n(d-1)} h_{d-1}(x)$$ 75 | 76 | Now content that the evaluations collectively satisfy the gate constraints, the verifier 77 | needs to check that the evaluations themselves are consistent with the original 78 | [circuit commitments](circuit-commitments.md), as well as $\mathbf{H}$. To implement this 79 | efficiently, we use a [multipoint opening argument](multipoint-opening.md). 80 | -------------------------------------------------------------------------------- /book/src/user.md: -------------------------------------------------------------------------------- 1 | # User Documentation 2 | 3 | You're probably here because you want to write circuits? Excellent! 4 | 5 | This section will guide you through the process of creating circuits with halo2. 6 | -------------------------------------------------------------------------------- /book/src/user/dev-tools.md: -------------------------------------------------------------------------------- 1 | # Developer tools 2 | 3 | The `halo2` crate includes several utilities to help you design and implement your 4 | circuits. 5 | 6 | ## Mock prover 7 | 8 | `halo2_proofs::dev::MockProver` is a tool for debugging circuits, as well as cheaply verifying 9 | their correctness in unit tests. The private and public inputs to the circuit are 10 | constructed as would normally be done to create a proof, but `MockProver::run` instead 11 | creates an object that will test every constraint in the circuit directly. It returns 12 | granular error messages that indicate which specific constraint (if any) is not satisfied. 13 | 14 | ## Circuit visualizations 15 | 16 | The `dev-graph` feature flag exposes several helper methods for creating graphical 17 | representations of circuits. 18 | 19 | On Debian systems, you will need the following additional packages: 20 | ```plaintext 21 | sudo apt install cmake libexpat1-dev libfreetype6-dev 22 | ``` 23 | 24 | ### Circuit layout 25 | 26 | `halo2_proofs::dev::CircuitLayout` renders the circuit layout as a grid: 27 | 28 | ```rust,ignore,no_run 29 | {{#include ../../../halo2_proofs/examples/circuit-layout.rs:dev-graph}} 30 | ``` 31 | 32 | - Columns are laid out from left to right as instance, advice and fixed. The order of 33 | columns is otherwise without meaning. 34 | - Instance columns have a white background. 35 | - Advice columns have a red background. 36 | - Fixed columns have a blue background. 37 | - Regions are shown as labelled green boxes (overlaying the background colour). A region 38 | may appear as multiple boxes if some of its columns happen to not be adjacent. 39 | - Cells that have been assigned to by the circuit will be shaded in grey. If any cells are 40 | assigned to more than once (which is usually a mistake), they will be shaded darker than 41 | the surrounding cells. 42 | 43 | ### Circuit structure 44 | 45 | `halo2_proofs::dev::circuit_dot_graph` builds a [DOT graph string] representing the given 46 | circuit, which can then be rendered with a variety of [layout programs]. The graph is built 47 | from calls to `Layouter::namespace` both within the circuit, and inside the gadgets and 48 | chips that it uses. 49 | 50 | [DOT graph string]: https://graphviz.org/doc/info/lang.html 51 | [layout programs]: https://en.wikipedia.org/wiki/DOT_(graph_description_language)#Layout_programs 52 | 53 | ```rust,ignore,no_run 54 | fn main() { 55 | // Prepare the circuit you want to render. 56 | // You don't need to include any witness variables. 57 | let a = Fp::rand(); 58 | let instance = Fp::one() + Fp::one(); 59 | let lookup_table = vec![instance, a, a, Fp::zero()]; 60 | let circuit: MyCircuit = MyCircuit { 61 | a: None, 62 | lookup_table, 63 | }; 64 | 65 | // Generate the DOT graph string. 66 | let dot_string = halo2_proofs::dev::circuit_dot_graph(&circuit); 67 | 68 | // Now you can either handle it in Rust, or just 69 | // print it out to use with command-line tools. 70 | print!("{}", dot_string); 71 | } 72 | ``` 73 | 74 | ## Cost estimator 75 | 76 | The `cost-model` binary takes high-level parameters for a circuit design, and estimates 77 | the verification cost, as well as resulting proof size. 78 | 79 | ```plaintext 80 | Usage: cargo run --example cost-model -- [OPTIONS] k 81 | 82 | Positional arguments: 83 | k 2^K bound on the number of rows. 84 | 85 | Optional arguments: 86 | -h, --help Print this message. 87 | -a, --advice R[,R..] An advice column with the given rotations. May be repeated. 88 | -i, --instance R[,R..] An instance column with the given rotations. May be repeated. 89 | -f, --fixed R[,R..] A fixed column with the given rotations. May be repeated. 90 | -g, --gate-degree D Maximum degree of the custom gates. 91 | -l, --lookup N,I,T A lookup over N columns with max input degree I and max table degree T. May be repeated. 92 | -p, --permutation N A permutation over N columns. May be repeated. 93 | ``` 94 | 95 | For example, to estimate the cost of a circuit with three advice columns and one fixed 96 | column (with various rotations), and a maximum gate degree of 4: 97 | 98 | ```plaintext 99 | > cargo run --example cost-model -- -a 0,1 -a 0 -a-0,-1,1 -f 0 -g 4 11 100 | Finished dev [unoptimized + debuginfo] target(s) in 0.03s 101 | Running `target/debug/examples/cost-model -a 0,1 -a 0 -a 0,-1,1 -f 0 -g 4 11` 102 | Circuit { 103 | k: 11, 104 | max_deg: 4, 105 | advice_columns: 3, 106 | lookups: 0, 107 | permutations: [], 108 | column_queries: 7, 109 | point_sets: 3, 110 | estimator: Estimator, 111 | } 112 | Proof size: 1440 bytes 113 | Verification: at least 81.689ms 114 | ``` 115 | -------------------------------------------------------------------------------- /book/src/user/gadgets.md: -------------------------------------------------------------------------------- 1 | # Gadgets 2 | -------------------------------------------------------------------------------- /book/src/user/lookup-tables.md: -------------------------------------------------------------------------------- 1 | # Lookup tables 2 | 3 | In normal programs, you can trade memory for CPU to improve performance, by pre-computing 4 | and storing lookup tables for some part of the computation. We can do the same thing in 5 | halo2 circuits! 6 | 7 | A lookup table can be thought of as enforcing a *relation* between variables, where the relation is expressed as a table. 8 | Assuming we have only one lookup argument in our constraint system, the total size of tables is constrained by the size of the circuit: 9 | each table entry costs one row, and it also costs one row to do each lookup. 10 | 11 | TODO 12 | -------------------------------------------------------------------------------- /book/src/user/simple-example.md: -------------------------------------------------------------------------------- 1 | # A simple example 2 | 3 | Let's start with a simple circuit, to introduce you to the common APIs and how they are 4 | used. The circuit will take a public input $c$, and will prove knowledge of two private 5 | inputs $a$ and $b$ such that 6 | 7 | $$a^2 \cdot b^2 = c.$$ 8 | 9 | ## Define instructions 10 | 11 | Firstly, we need to define the instructions that our circuit will rely on. Instructions 12 | are the boundary between high-level [gadgets](../concepts/gadgets.md) and the low-level 13 | circuit operations. Instructions may be as coarse or as granular as desired, but in 14 | practice you want to strike a balance between an instruction being large enough to 15 | effectively optimize its implementation, and small enough that it is meaningfully 16 | reusable. 17 | 18 | For our circuit, we will use three instructions: 19 | - Load a private number into the circuit. 20 | - Multiply two numbers. 21 | - Expose a number as a public input to the circuit. 22 | 23 | We also need a type for a variable representing a number. Instruction interfaces provide 24 | associated types for their inputs and outputs, to allow the implementations to represent 25 | these in a way that makes the most sense for their optimization goals. 26 | 27 | ```rust,ignore,no_run 28 | {{#include ../../../halo2_proofs/examples/simple-example.rs:instructions}} 29 | ``` 30 | 31 | ## Define a chip implementation 32 | 33 | For our circuit, we will build a [chip](../concepts/chips.md) that provides the above 34 | numeric instructions for a finite field. 35 | 36 | ```rust,ignore,no_run 37 | {{#include ../../../halo2_proofs/examples/simple-example.rs:chip}} 38 | ``` 39 | 40 | Every chip needs to implement the `Chip` trait. This defines the properties of the chip 41 | that a `Layouter` may rely on when synthesizing a circuit, as well as enabling any initial 42 | state that the chip requires to be loaded into the circuit. 43 | 44 | ```rust,ignore,no_run 45 | {{#include ../../../halo2_proofs/examples/simple-example.rs:chip-impl}} 46 | ``` 47 | 48 | ## Configure the chip 49 | 50 | The chip needs to be configured with the columns, permutations, and gates that will be 51 | required to implement all of the desired instructions. 52 | 53 | ```rust,ignore,no_run 54 | {{#include ../../../halo2_proofs/examples/simple-example.rs:chip-config}} 55 | ``` 56 | 57 | ## Implement chip traits 58 | 59 | ```rust,ignore,no_run 60 | {{#include ../../../halo2_proofs/examples/simple-example.rs:instructions-impl}} 61 | ``` 62 | 63 | ## Build the circuit 64 | 65 | Now that we have the instructions we need, and a chip that implements them, we can finally 66 | build our circuit! 67 | 68 | ```rust,ignore,no_run 69 | {{#include ../../../halo2_proofs/examples/simple-example.rs:circuit}} 70 | ``` 71 | 72 | ## Testing the circuit 73 | 74 | `halo2_proofs::dev::MockProver` can be used to test that the circuit is working correctly. The 75 | private and public inputs to the circuit are constructed as we will do to create a proof, 76 | but by passing them to `MockProver::run` we get an object that can test every constraint 77 | in the circuit, and tell us exactly what is failing (if anything). 78 | 79 | ```rust,ignore,no_run 80 | {{#include ../../../halo2_proofs/examples/simple-example.rs:test-circuit}} 81 | ``` 82 | 83 | ## Full example 84 | 85 | You can find the source code for this example 86 | [here](https://github.com/zcash/halo2/tree/main/halo2_proofs/examples/simple-example.rs). 87 | -------------------------------------------------------------------------------- /book/src/user/tips-and-tricks.md: -------------------------------------------------------------------------------- 1 | # Tips and tricks 2 | 3 | This section contains various ideas and snippets that you might find useful while writing 4 | halo2 circuits. 5 | 6 | ## Small range constraints 7 | 8 | A common constraint used in R1CS circuits is the boolean constraint: $b * (1 - b) = 0$. 9 | This constraint can only be satisfied by $b = 0$ or $b = 1$. 10 | 11 | In halo2 circuits, you can similarly constrain a cell to have one of a small set of 12 | values. For example, to constrain $a$ to the range $[0..5]$, you would create a gate of 13 | the form: 14 | 15 | $$a \cdot (1 - a) \cdot (2 - a) \cdot (3 - a) \cdot (4 - a) = 0$$ 16 | 17 | while to constrain $c$ to be either 7 or 13, you would use: 18 | 19 | $$(7 - c) \cdot (13 - c) = 0$$ 20 | 21 | > The underlying principle here is that we create a polynomial constraint with roots at 22 | > each value in the set of possible values we want to allow. In R1CS circuits, the maximum 23 | > supported polynomial degree is 2 (due to all constraints being of the form $a * b = c$). 24 | > In halo2 circuits, you can use arbitrary-degree polynomials - with the proviso that 25 | > higher-degree constraints are more expensive to use. 26 | 27 | Note that the roots don't have to be constants; for example $(a - x) \cdot (a - y) \cdot (a - z) = 0$ will constrain $a$ to be equal to one of $\{ x, y, z \}$ where the latter can be arbitrary polynomials, as long as the whole expression stays within the maximum degree bound. 28 | 29 | ## Small set interpolation 30 | We can use Lagrange interpolation to create a polynomial constraint that maps 31 | $f(X) = Y$ for small sets of $X \in \{x_i\}, Y \in \{y_i\}$. 32 | 33 | For instance, say we want to map a 2-bit value to a "spread" version interleaved 34 | with zeros. We first precompute the evaluations at each point: 35 | 36 | $$ 37 | \begin{array}{rcl} 38 | 00 \rightarrow 0000 &\implies& 0 \rightarrow 0 \\ 39 | 01 \rightarrow 0001 &\implies& 1 \rightarrow 1 \\ 40 | 10 \rightarrow 0100 &\implies& 2 \rightarrow 4 \\ 41 | 11 \rightarrow 0101 &\implies& 3 \rightarrow 5 42 | \end{array} 43 | $$ 44 | 45 | Then, we construct the Lagrange basis polynomial for each point using the 46 | identity: 47 | $$\mathcal{l}_j(X) = \prod_{0 \leq m < k,\; m \neq j} \frac{x - x_m}{x_j - x_m},$$ 48 | where $k$ is the number of data points. ($k = 4$ in our example above.) 49 | 50 | Recall that the Lagrange basis polynomial $\mathcal{l}_j(X)$ evaluates to $1$ at 51 | $X = x_j$ and $0$ at all other $x_i, j \neq i.$ 52 | 53 | Continuing our example, we get four Lagrange basis polynomials: 54 | 55 | $$ 56 | \begin{array}{ccc} 57 | l_0(X) &=& \frac{(X - 3)(X - 2)(X - 1)}{(-3)(-2)(-1)} \\[1ex] 58 | l_1(X) &=& \frac{(X - 3)(X - 2)(X)}{(-2)(-1)(1)} \\[1ex] 59 | l_2(X) &=& \frac{(X - 3)(X - 1)(X)}{(-1)(1)(2)} \\[1ex] 60 | l_3(X) &=& \frac{(X - 2)(X - 1)(X)}{(1)(2)(3)} 61 | \end{array} 62 | $$ 63 | 64 | Our polynomial constraint is then 65 | 66 | $$ 67 | \begin{array}{cccccccccccl} 68 | &f(0) \cdot l_0(X) &+& f(1) \cdot l_1(X) &+& f(2) \cdot l_2(X) &+& f(3) \cdot l_3(X) &-& f(X) &=& 0 \\ 69 | \implies& 0 \cdot l_0(X) &+& 1 \cdot l_1(X) &+& 4 \cdot l_2(X) &+& 5 \cdot l_3(X) &-& f(X) &=& 0. \\ 70 | \end{array} 71 | $$ 72 | -------------------------------------------------------------------------------- /halo2/CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | All notable changes to this project will be documented in this file. 3 | 4 | The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), 5 | and this project adheres to Rust's notion of 6 | [Semantic Versioning](https://semver.org/spec/v2.0.0.html). 7 | 8 | ## [Unreleased] 9 | 10 | ## [0.1.0-beta.2] - 2022-02-14 11 | ### Removed 12 | - Everything (moved to `halo2_proofs` crate). 13 | 14 | ## [0.1.0-beta.1] - 2021-09-24 15 | Initial beta release! 16 | -------------------------------------------------------------------------------- /halo2/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "halo2" 3 | version = "0.1.0-beta.2" 4 | authors = [ 5 | "Jack Grigg ", 6 | ] 7 | edition = "2021" 8 | rust-version = "1.56.1" 9 | description = "[BETA] Fast zero-knowledge proof-carrying data implementation with no trusted setup" 10 | license = "MIT OR Apache-2.0" 11 | repository = "https://github.com/zcash/halo2" 12 | documentation = "https://docs.rs/halo2" 13 | readme = "../README.md" 14 | categories = ["cryptography"] 15 | keywords = ["halo", "proofs", "recursive", "zkp", "zkSNARKs"] 16 | 17 | [package.metadata.docs.rs] 18 | all-features = true 19 | rustdoc-args = ["--cfg", "docsrs", "--html-in-header", "katex-header.html"] 20 | 21 | [dependencies] 22 | halo2_proofs = { path = "../halo2_proofs", default-features = false } 23 | 24 | [lib] 25 | bench = false 26 | -------------------------------------------------------------------------------- /halo2/katex-header.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | -------------------------------------------------------------------------------- /halo2/src/lib.rs: -------------------------------------------------------------------------------- 1 | //! # halo2 2 | 3 | #![cfg_attr(docsrs, feature(doc_cfg))] 4 | #![deny(rustdoc::broken_intra_doc_links)] 5 | #![deny(missing_debug_implementations)] 6 | #![deny(missing_docs)] 7 | #![deny(unsafe_code)] 8 | -------------------------------------------------------------------------------- /halo2_gadgets/CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | All notable changes to this project will be documented in this file. 3 | 4 | The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), 5 | and this project adheres to Rust's notion of 6 | [Semantic Versioning](https://semver.org/spec/v2.0.0.html). 7 | 8 | ## [Unreleased] 9 | 10 | ## [0.2.0] - 2022-06-23 11 | ### Added 12 | - `halo2_gadgets::utilities::RangeConstrained>::bitrange_of` 13 | 14 | ### Changed 15 | All APIs that represented witnessed values as `Option` now represent them as 16 | `halo2_proofs::circuit::Value`. The core API changes are listed below. 17 | 18 | - Migrated to `halo2_proofs 0.2.0`. 19 | - The following APIs now take `Value<_>` instead of `Option<_>`: 20 | - `halo2_gadgets::ecc`: 21 | - `EccInstructions::{witness_point, witness_point_non_id}` 22 | - `EccInstructions::{witness_scalar_var, witness_scalar_fixed}` 23 | - `ScalarVar::new` 24 | - `ScalarFixed::new` 25 | - `NonIdentityPoint::new` 26 | - `Point::new` 27 | - `halo2_gadgets::sinsemilla`: 28 | - `SinsemillaInstructions::witness_message_piece` 29 | - `MessagePiece::{from_field_elem, from_subpieces}` 30 | - `halo2_gadgets::sinsemilla::merkle`: 31 | - `MerklePath::construct` 32 | - `halo2_gadgets::utilities`: 33 | - `UtilitiesInstructions::load_private` 34 | - `RangeConstrained::witness_short` 35 | - `halo2_gadgets::utilities::cond_swap`: 36 | - `CondSwapInstructions::swap` 37 | - `halo2_gadgets::utilities::decompose_running_sum`: 38 | - `RunningSumConfig::witness_decompose` 39 | - `halo2_gadgets::utilities::lookup_range_check`: 40 | - `LookupRangeCheckConfig::{witness_check, witness_short_check}` 41 | - The following APIs now return `Value<_>` instead of `Option<_>`: 42 | - `halo2_gadgets::ecc::chip`: 43 | - `EccPoint::{point, is_identity}` 44 | - `NonIdentityEccPoint::point` 45 | - `halo2_gadgets::utilities`: 46 | - `FieldValue::value` 47 | - `Var::value` 48 | - `RangeConstrained::value` 49 | - `halo2_gadgets::sha256::BlockWord` is now a newtype wrapper around 50 | `Value` instead of `Option`. 51 | 52 | ### Removed 53 | - `halo2_gadgets::utilities::RangeConstrained>::bitrange_of` 54 | 55 | ## [0.1.0] - 2022-05-10 56 | ### Added 57 | - `halo2_gadgets::utilities`: 58 | - `FieldValue` trait. 59 | - `RangeConstrained` newtype wrapper. 60 | - `halo2_gadgets::ecc`: 61 | - `EccInstructions::witness_scalar_var` API to witness a full-width scalar 62 | used in variable-base scalar multiplication. 63 | - `EccInstructions::witness_scalar_fixed`, to witness a full-width scalar 64 | used in fixed-base scalar multiplication. 65 | - `EccInstructions::scalar_fixed_from_signed_short`, to construct a signed 66 | short scalar used in fixed-base scalar multiplication from its magnitude and 67 | sign. 68 | - `BaseFitsInScalarInstructions` trait that can be implemented for a curve 69 | whose base field fits into its scalar field. This provides a method 70 | `scalar_var_from_base` that converts a base field element that exists as 71 | a variable in the circuit, into a scalar to be used in variable-base 72 | scalar multiplication. 73 | - `ScalarFixed::new` 74 | - `ScalarFixedShort::new` 75 | - `ScalarVar::new` and `ScalarVar::from_base` gadget APIs. 76 | - `halo2_gadgets::ecc::chip`: 77 | - `ScalarVar` enum with `BaseFieldElem` and `FullWidth` variants. `FullWidth` 78 | is unimplemented for `halo2_gadgets v0.1.0`. 79 | - `halo2_gadgets::poseidon`: 80 | - `primitives` (moved from `halo2_gadgets::primitives::poseidon`) 81 | - `halo2_gadgets::sinsemilla`: 82 | - `primitives` (moved from `halo2_gadgets::primitives::sinsemilla`) 83 | - `MessagePiece::from_subpieces` 84 | 85 | ### Changed 86 | - `halo2_gadgets::ecc`: 87 | - `EccInstructions::ScalarVar` is now treated as a full-width scalar, instead 88 | of being restricted to a base field element. 89 | - `EccInstructions::mul` now takes a `Self::ScalarVar` as argument, instead 90 | of assuming that the scalar fits in a base field element `Self::Var`. 91 | - `EccInstructions::mul_fixed` now takes a `Self::ScalarFixed` as argument, 92 | instead of requiring that the chip always witness a new scalar. 93 | - `EccInstructions::mul_fixed_short` now takes a `Self::ScalarFixedShort` as 94 | argument, instead of the magnitude and sign directly. 95 | - `FixedPoint::mul` now takes `ScalarFixed` instead of `Option`. 96 | - `FixedPointShort::mul` now takes `ScalarFixedShort` instead of 97 | `(EccChip::Var, EccChip::Var)`. 98 | - `halo2_gadgets::ecc::chip`: 99 | - `FixedPoint::u` now returns `Vec<[::Repr; H]>` 100 | instead of `Vec<[[u8; 32]; H]>`. 101 | - `ScalarKind` has been renamed to `FixedScalarKind`. 102 | - `halo2_gadgets::sinsemilla`: 103 | - `CommitDomain::{commit, short_commit}` now take the trapdoor `r` as an 104 | `ecc::ScalarFixed` instead of `Option`. 105 | - `merkle::MerklePath` can now be constructed with more or fewer than two 106 | `MerkleChip`s. 107 | 108 | ### Removed 109 | - `halo2_gadgets::primitives` (use `halo2_gadgets::poseidon::primitives` or 110 | `halo2_gadgets::sinsemilla::primitives` instead). 111 | 112 | ## [0.1.0-beta.3] - 2022-04-06 113 | ### Changed 114 | - Migrated to `halo2_proofs 0.1.0-beta.4`. 115 | 116 | ## [0.1.0-beta.2] - 2022-03-22 117 | ### Changed 118 | - Migrated to `halo2_proofs 0.1.0-beta.3`. 119 | 120 | ## [0.1.0-beta.1] - 2022-02-14 121 | Initial release! 122 | -------------------------------------------------------------------------------- /halo2_gadgets/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "halo2_gadgets" 3 | version = "1.1.0" 4 | authors = [ 5 | "Sean Bowe ", 6 | "Jack Grigg ", 7 | "Daira Hopwood ", 8 | "Ying Tong Lai ", 9 | "Kris Nuttycombe ", 10 | ] 11 | edition = "2021" 12 | rust-version = "1.66.0" 13 | description = "Reusable gadgets and chip implementations for Halo 2" 14 | license = "MIT OR Apache-2.0" 15 | repository = "https://github.com/zcash/halo2" 16 | readme = "README.md" 17 | categories = ["cryptography"] 18 | keywords = ["halo", "proofs", "zcash", "zkp", "zkSNARKs"] 19 | 20 | [package.metadata.docs.rs] 21 | all-features = true 22 | rustdoc-args = ["--cfg", "docsrs", "--html-in-header", "katex-header.html"] 23 | 24 | [dependencies] 25 | arrayvec = "0.7.0" 26 | bitvec = "1" 27 | ff = { version = "0.13", features = ["bits"] } 28 | group = "0.13" 29 | halo2_proofs = { path = "../halo2_proofs", default-features = false } 30 | lazy_static = "1" 31 | halo2curves = { version = "0.1.0" } 32 | proptest = { version = "1.0.0", optional = true } 33 | rand = "0.8" 34 | subtle = "2.3" 35 | uint = "0.9.2" 36 | 37 | # Developer tooling dependencies 38 | plotters = { version = "0.3.0", default-features = false, optional = true } 39 | 40 | [dev-dependencies] 41 | criterion = "0.3" 42 | proptest = "1.0.0" 43 | 44 | [target.'cfg(unix)'.dev-dependencies] 45 | pprof = { version = "0.8", features = ["criterion", "flamegraph"] } # MSRV 1.56 46 | 47 | [lib] 48 | bench = false 49 | 50 | [features] 51 | test-dev-graph = [ 52 | "halo2_proofs/dev-graph", 53 | "plotters", 54 | "plotters/bitmap_backend", 55 | "plotters/bitmap_encoder", 56 | "plotters/ttf", 57 | ] 58 | circuit-params = ["halo2_proofs/circuit-params"] 59 | test-dependencies = ["proptest"] 60 | unstable = [] 61 | 62 | [[bench]] 63 | name = "primitives" 64 | harness = false 65 | 66 | [[bench]] 67 | name = "poseidon" 68 | harness = false 69 | 70 | [[bench]] 71 | name = "sha256" 72 | harness = false 73 | required-features = ["unstable"] 74 | -------------------------------------------------------------------------------- /halo2_gadgets/README.md: -------------------------------------------------------------------------------- 1 | # halo2_gadgets [![Crates.io](https://img.shields.io/crates/v/halo2_gadgets.svg)](https://crates.io/crates/halo2_gadgets) # 2 | 3 | Requires Rust 1.56.1+. 4 | 5 | ## Documentation 6 | 7 | - [The Halo 2 Book](https://zcash.github.io/halo2/) 8 | - [Crate documentation](https://docs.rs/halo2_gadgets) 9 | 10 | ## License 11 | 12 | Licensed under either of 13 | 14 | * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or 15 | http://www.apache.org/licenses/LICENSE-2.0) 16 | * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT) 17 | 18 | at your option. 19 | 20 | ### Contribution 21 | 22 | Unless you explicitly state otherwise, any contribution intentionally 23 | submitted for inclusion in the work by you, as defined in the Apache-2.0 24 | license, shall be dual licensed as above, without any additional terms or 25 | conditions. 26 | -------------------------------------------------------------------------------- /halo2_gadgets/benches/primitives.rs: -------------------------------------------------------------------------------- 1 | use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion}; 2 | use ff::Field; 3 | use halo2_gadgets::{ 4 | poseidon::primitives::{self as poseidon, ConstantLength, P128Pow5T3}, 5 | sinsemilla::primitives as sinsemilla, 6 | }; 7 | 8 | use halo2curves::pasta::pallas; 9 | #[cfg(unix)] 10 | use pprof::criterion::{Output, PProfProfiler}; 11 | use rand::{rngs::OsRng, Rng}; 12 | 13 | fn bench_primitives(c: &mut Criterion) { 14 | let mut rng = OsRng; 15 | 16 | { 17 | let mut group = c.benchmark_group("Poseidon"); 18 | 19 | let message = [pallas::Base::random(rng), pallas::Base::random(rng)]; 20 | 21 | group.bench_function("2-to-1", |b| { 22 | b.iter(|| { 23 | poseidon::Hash::<_, P128Pow5T3, ConstantLength<2>, 3, 2>::init().hash(message) 24 | }) 25 | }); 26 | } 27 | 28 | { 29 | let mut group = c.benchmark_group("Sinsemilla"); 30 | 31 | let hasher = sinsemilla::HashDomain::new("hasher"); 32 | let committer = sinsemilla::CommitDomain::new("committer"); 33 | let bits: Vec = (0..1086).map(|_| rng.gen()).collect(); 34 | let r = pallas::Scalar::random(rng); 35 | 36 | // Benchmark the input sizes we use in Orchard: 37 | // - 510 bits for Commit^ivk 38 | // - 520 bits for MerkleCRH 39 | // - 1086 bits for NoteCommit 40 | for size in [510, 520, 1086] { 41 | group.bench_function(BenchmarkId::new("hash-to-point", size), |b| { 42 | b.iter(|| hasher.hash_to_point(bits[..size].iter().cloned())) 43 | }); 44 | 45 | group.bench_function(BenchmarkId::new("hash", size), |b| { 46 | b.iter(|| hasher.hash(bits[..size].iter().cloned())) 47 | }); 48 | 49 | group.bench_function(BenchmarkId::new("commit", size), |b| { 50 | b.iter(|| committer.commit(bits[..size].iter().cloned(), &r)) 51 | }); 52 | 53 | group.bench_function(BenchmarkId::new("short-commit", size), |b| { 54 | b.iter(|| committer.commit(bits[..size].iter().cloned(), &r)) 55 | }); 56 | } 57 | } 58 | } 59 | 60 | #[cfg(unix)] 61 | criterion_group! { 62 | name = benches; 63 | config = Criterion::default().with_profiler(PProfProfiler::new(100, Output::Flamegraph(None))); 64 | targets = bench_primitives 65 | } 66 | #[cfg(not(unix))] 67 | criterion_group!(benches, bench_primitives); 68 | criterion_main!(benches); 69 | -------------------------------------------------------------------------------- /halo2_gadgets/katex-header.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | -------------------------------------------------------------------------------- /halo2_gadgets/proptest-regressions/constants/util.txt: -------------------------------------------------------------------------------- 1 | # Seeds for failure cases proptest has generated in the past. It is 2 | # automatically read and these particular cases re-run before any 3 | # novel cases are generated. 4 | # 5 | # It is recommended to check this file in to source control so that 6 | # everyone who runs the test benefits from these saved cases. 7 | cc 251d6e9f7ad2f5cd8679dec6b69aa9c879baae8742791b19669c136aef12deac # shrinks to scalar = 0x0000000000000000000000000000000000000000000000000000000000000000, window_num_bits = 6 8 | -------------------------------------------------------------------------------- /halo2_gadgets/src/lib.rs: -------------------------------------------------------------------------------- 1 | //! This crate provides various common gadgets and chips for use with `halo2_proofs`. 2 | //! 3 | //! # Gadgets 4 | //! 5 | //! Gadgets are an abstraction for writing reusable and interoperable circuit logic. They 6 | //! do not create any circuit constraints or assignments themselves, instead interacting 7 | //! with the circuit through a defined "instruction set". A circuit developer uses gadgets 8 | //! by instantiating them with a particular choice of chip. 9 | //! 10 | //! # Chips 11 | //! 12 | //! Chips implement the low-level circuit constraints. The same instructions may be 13 | //! implemented by multiple chips, enabling different performance trade-offs to be made. 14 | //! Chips can be highly optimised by their developers, as long as they conform to the 15 | //! defined instructions. 16 | 17 | #![cfg_attr(docsrs, feature(doc_cfg))] 18 | // Temporary until we have more of the crate implemented. 19 | #![allow(dead_code)] 20 | // Catch documentation errors caused by code changes. 21 | #![deny(rustdoc::broken_intra_doc_links)] 22 | #![deny(missing_debug_implementations)] 23 | #![deny(missing_docs)] 24 | #![deny(unsafe_code)] 25 | 26 | pub mod ecc; 27 | pub mod poseidon; 28 | #[cfg(feature = "unstable")] 29 | #[cfg_attr(docsrs, doc(cfg(feature = "unstable")))] 30 | pub mod sha256; 31 | pub mod sinsemilla; 32 | pub mod utilities; 33 | -------------------------------------------------------------------------------- /halo2_gadgets/src/poseidon/primitives/mds.rs: -------------------------------------------------------------------------------- 1 | use ff::FromUniformBytes; 2 | 3 | use super::{grain::Grain, Mds}; 4 | 5 | pub(super) fn generate_mds + Ord, const T: usize>( 6 | grain: &mut Grain, 7 | mut select: usize, 8 | ) -> (Mds, Mds) { 9 | let (xs, ys, mds) = loop { 10 | // Generate two [F; T] arrays of unique field elements. 11 | let (xs, ys) = loop { 12 | let mut vals: Vec<_> = (0..2 * T) 13 | .map(|_| grain.next_field_element_without_rejection()) 14 | .collect(); 15 | 16 | // Check that we have unique field elements. 17 | let mut unique = vals.clone(); 18 | unique.sort_unstable(); 19 | unique.dedup(); 20 | if vals.len() == unique.len() { 21 | let rhs = vals.split_off(T); 22 | break (vals, rhs); 23 | } 24 | }; 25 | 26 | // We need to ensure that the MDS is secure. Instead of checking the MDS against 27 | // the relevant algorithms directly, we witness a fixed number of MDS matrices 28 | // that we need to sample from the given Grain state before obtaining a secure 29 | // matrix. This can be determined out-of-band via the reference implementation in 30 | // Sage. 31 | if select != 0 { 32 | select -= 1; 33 | continue; 34 | } 35 | 36 | // Generate a Cauchy matrix, with elements a_ij in the form: 37 | // a_ij = 1/(x_i + y_j); x_i + y_j != 0 38 | // 39 | // It would be much easier to use the alternate definition: 40 | // a_ij = 1/(x_i - y_j); x_i - y_j != 0 41 | // 42 | // These are clearly equivalent on `y <- -y`, but it is easier to work with the 43 | // negative formulation, because ensuring that xs ∪ ys is unique implies that 44 | // x_i - y_j != 0 by construction (whereas the positive case does not hold). It 45 | // also makes computation of the matrix inverse simpler below (the theorem used 46 | // was formulated for the negative definition). 47 | // 48 | // However, the Poseidon paper and reference impl use the positive formulation, 49 | // and we want to rely on the reference impl for MDS security, so we use the same 50 | // formulation. 51 | let mut mds = [[F::ZERO; T]; T]; 52 | #[allow(clippy::needless_range_loop)] 53 | for i in 0..T { 54 | for j in 0..T { 55 | let sum = xs[i] + ys[j]; 56 | // We leverage the secure MDS selection counter to also check this. 57 | assert!(!sum.is_zero_vartime()); 58 | mds[i][j] = sum.invert().unwrap(); 59 | } 60 | } 61 | 62 | break (xs, ys, mds); 63 | }; 64 | 65 | // Compute the inverse. All square Cauchy matrices have a non-zero determinant and 66 | // thus are invertible. The inverse for a Cauchy matrix of the form: 67 | // 68 | // a_ij = 1/(x_i - y_j); x_i - y_j != 0 69 | // 70 | // has elements b_ij given by: 71 | // 72 | // b_ij = (x_j - y_i) A_j(y_i) B_i(x_j) (Schechter 1959, Theorem 1) 73 | // 74 | // where A_i(x) and B_i(x) are the Lagrange polynomials for xs and ys respectively. 75 | // 76 | // We adapt this to the positive Cauchy formulation by negating ys. 77 | let mut mds_inv = [[F::ZERO; T]; T]; 78 | let l = |xs: &[F], j, x: F| { 79 | let x_j = xs[j]; 80 | xs.iter().enumerate().fold(F::ONE, |acc, (m, x_m)| { 81 | if m == j { 82 | acc 83 | } else { 84 | // We can invert freely; by construction, the elements of xs are distinct. 85 | let diff: F = x_j - *x_m; 86 | acc * (x - x_m) * diff.invert().unwrap() 87 | } 88 | }) 89 | }; 90 | let neg_ys: Vec<_> = ys.iter().map(|y| -*y).collect(); 91 | for i in 0..T { 92 | for j in 0..T { 93 | mds_inv[i][j] = (xs[j] - neg_ys[i]) * l(&xs, j, neg_ys[i]) * l(&neg_ys, i, xs[j]); 94 | } 95 | } 96 | 97 | (mds, mds_inv) 98 | } 99 | 100 | #[cfg(test)] 101 | mod tests { 102 | use halo2curves::pasta::Fp; 103 | 104 | use super::{generate_mds, Grain}; 105 | 106 | #[test] 107 | fn poseidon_mds() { 108 | const T: usize = 3; 109 | let mut grain = Grain::new(super::super::grain::SboxType::Pow, T as u16, 8, 56); 110 | let (mds, mds_inv) = generate_mds::(&mut grain, 0); 111 | 112 | // Verify that MDS * MDS^-1 = I. 113 | #[allow(clippy::needless_range_loop)] 114 | for i in 0..T { 115 | for j in 0..T { 116 | let expected = if i == j { Fp::one() } else { Fp::zero() }; 117 | assert_eq!( 118 | (0..T).fold(Fp::zero(), |acc, k| acc + (mds[i][k] * mds_inv[k][j])), 119 | expected 120 | ); 121 | } 122 | } 123 | } 124 | } 125 | -------------------------------------------------------------------------------- /halo2_gadgets/src/sha256/table16/compression/subregion_main.rs: -------------------------------------------------------------------------------- 1 | use super::{ 2 | super::{AssignedBits, RoundWord, RoundWordA, RoundWordE, StateWord, ROUND_CONSTANTS}, 3 | compression_util::*, 4 | CompressionConfig, Field, State, 5 | }; 6 | use halo2_proofs::{circuit::Region, plonk::Error}; 7 | 8 | impl CompressionConfig { 9 | #[allow(clippy::many_single_char_names)] 10 | pub fn assign_round( 11 | &self, 12 | region: &mut Region<'_, F>, 13 | round_idx: MainRoundIdx, 14 | state: State, 15 | schedule_word: &(AssignedBits, AssignedBits), 16 | ) -> Result, Error> { 17 | let a_7 = self.extras[3]; 18 | 19 | let (a, b, c, d, e, f, g, h) = match_state(state); 20 | 21 | // s_upper_sigma_1(E) 22 | let sigma_1 = self.assign_upper_sigma_1(region, round_idx, e.pieces.clone().unwrap())?; 23 | 24 | // Ch(E, F, G) 25 | let ch = self.assign_ch( 26 | region, 27 | round_idx, 28 | e.spread_halves.clone().unwrap(), 29 | f.spread_halves.clone(), 30 | )?; 31 | let ch_neg = self.assign_ch_neg( 32 | region, 33 | round_idx, 34 | e.spread_halves.clone().unwrap(), 35 | g.spread_halves.clone(), 36 | )?; 37 | 38 | // s_upper_sigma_0(A) 39 | let sigma_0 = self.assign_upper_sigma_0(region, round_idx, a.pieces.clone().unwrap())?; 40 | 41 | // Maj(A, B, C) 42 | let maj = self.assign_maj( 43 | region, 44 | round_idx, 45 | a.spread_halves.clone().unwrap(), 46 | b.spread_halves.clone(), 47 | c.spread_halves.clone(), 48 | )?; 49 | 50 | // H' = H + Ch(E, F, G) + s_upper_sigma_1(E) + K + W 51 | let h_prime = self.assign_h_prime( 52 | region, 53 | round_idx, 54 | h, 55 | ch, 56 | ch_neg, 57 | sigma_1, 58 | ROUND_CONSTANTS[round_idx.as_usize()], 59 | schedule_word, 60 | )?; 61 | 62 | // E_new = H' + D 63 | let e_new_dense = self.assign_e_new(region, round_idx, &d, &h_prime)?; 64 | let e_new_val = e_new_dense.value(); 65 | 66 | // A_new = H' + Maj(A, B, C) + sigma_0(A) 67 | let a_new_dense = self.assign_a_new(region, round_idx, maj, sigma_0, h_prime)?; 68 | let a_new_val = a_new_dense.value(); 69 | 70 | if round_idx < 63.into() { 71 | // Assign and copy A_new 72 | let a_new_row = get_decompose_a_row((round_idx + 1).into()); 73 | a_new_dense 74 | .0 75 | .copy_advice(|| "a_new_lo", region, a_7, a_new_row)?; 76 | a_new_dense 77 | .1 78 | .copy_advice(|| "a_new_hi", region, a_7, a_new_row + 1)?; 79 | 80 | // Assign and copy E_new 81 | let e_new_row = get_decompose_e_row((round_idx + 1).into()); 82 | e_new_dense 83 | .0 84 | .copy_advice(|| "e_new_lo", region, a_7, e_new_row)?; 85 | e_new_dense 86 | .1 87 | .copy_advice(|| "e_new_hi", region, a_7, e_new_row + 1)?; 88 | 89 | // Decompose A into (2, 11, 9, 10)-bit chunks 90 | let a_new = self.decompose_a(region, (round_idx + 1).into(), a_new_val)?; 91 | 92 | // Decompose E into (6, 5, 14, 7)-bit chunks 93 | let e_new = self.decompose_e(region, (round_idx + 1).into(), e_new_val)?; 94 | 95 | Ok(State::new( 96 | StateWord::A(a_new), 97 | StateWord::B(RoundWord::new(a.dense_halves, a.spread_halves.unwrap())), 98 | StateWord::C(b), 99 | StateWord::D(c.dense_halves), 100 | StateWord::E(e_new), 101 | StateWord::F(RoundWord::new(e.dense_halves, e.spread_halves.unwrap())), 102 | StateWord::G(f), 103 | StateWord::H(g.dense_halves), 104 | )) 105 | } else { 106 | Ok(State::new( 107 | StateWord::A(RoundWordA::new_dense(a_new_dense)), 108 | StateWord::B(RoundWord::new(a.dense_halves, a.spread_halves.unwrap())), 109 | StateWord::C(b), 110 | StateWord::D(c.dense_halves), 111 | StateWord::E(RoundWordE::new_dense(e_new_dense)), 112 | StateWord::F(RoundWord::new(e.dense_halves, e.spread_halves.unwrap())), 113 | StateWord::G(f), 114 | StateWord::H(g.dense_halves), 115 | )) 116 | } 117 | } 118 | } 119 | -------------------------------------------------------------------------------- /halo2_gadgets/src/sha256/table16/gates.rs: -------------------------------------------------------------------------------- 1 | use ff::PrimeField; 2 | use halo2_proofs::{arithmetic::Field, plonk::Expression}; 3 | 4 | pub struct Gate(pub Expression); 5 | 6 | impl Gate { 7 | fn ones() -> Expression { 8 | Expression::Constant(F::ONE) 9 | } 10 | 11 | // Helper gates 12 | fn lagrange_interpolate( 13 | var: Expression, 14 | points: Vec, 15 | evals: Vec, 16 | ) -> (F, Expression) { 17 | assert_eq!(points.len(), evals.len()); 18 | let deg = points.len(); 19 | 20 | fn factorial(n: u64) -> u64 { 21 | if n < 2 { 22 | 1 23 | } else { 24 | n * factorial(n - 1) 25 | } 26 | } 27 | 28 | // Scale the whole expression by factor to avoid divisions 29 | let factor = factorial((deg - 1) as u64); 30 | 31 | let numerator = |var: Expression, eval: u32, idx: u64| { 32 | let mut expr = Self::ones(); 33 | for i in 0..deg { 34 | let i = i as u64; 35 | if i != idx { 36 | expr = expr * (Self::ones() * (-F::ONE) * F::from(i) + var.clone()); 37 | } 38 | } 39 | expr * F::from(u64::from(eval)) 40 | }; 41 | let denominator = |idx: i32| { 42 | let mut denom: i32 = 1; 43 | for i in 0..deg { 44 | let i = i as i32; 45 | if i != idx { 46 | denom *= idx - i 47 | } 48 | } 49 | if denom < 0 { 50 | -F::ONE * F::from(factor / (-denom as u64)) 51 | } else { 52 | F::from(factor / (denom as u64)) 53 | } 54 | }; 55 | 56 | let mut expr = Self::ones() * F::ZERO; 57 | for ((idx, _), eval) in points.iter().enumerate().zip(evals.iter()) { 58 | expr = expr + numerator(var.clone(), *eval, idx as u64) * denominator(idx as i32) 59 | } 60 | 61 | (F::from(factor), expr) 62 | } 63 | 64 | pub fn range_check(value: Expression, lower_range: u64, upper_range: u64) -> Expression { 65 | let mut expr = Self::ones(); 66 | for i in lower_range..(upper_range + 1) { 67 | expr = expr * (Self::ones() * (-F::ONE) * F::from(i) + value.clone()) 68 | } 69 | expr 70 | } 71 | 72 | /// Spread and range check on 2-bit word 73 | pub fn two_bit_spread_and_range( 74 | dense: Expression, 75 | spread: Expression, 76 | ) -> impl Iterator)> { 77 | let two_bit_spread = |dense: Expression, spread: Expression| { 78 | let (factor, lagrange_poly) = Self::lagrange_interpolate( 79 | dense, 80 | vec![0b00, 0b01, 0b10, 0b11], 81 | vec![0b0000, 0b0001, 0b0100, 0b0101], 82 | ); 83 | 84 | lagrange_poly - spread * factor 85 | }; 86 | 87 | std::iter::empty() 88 | .chain(Some(( 89 | "two_bit_range_check", 90 | Self::range_check(dense.clone(), 0, (1 << 2) - 1), 91 | ))) 92 | .chain(Some(( 93 | "two_bit_spread_check", 94 | two_bit_spread(dense, spread), 95 | ))) 96 | } 97 | 98 | /// Spread and range check on 3-bit word 99 | pub fn three_bit_spread_and_range( 100 | dense: Expression, 101 | spread: Expression, 102 | ) -> impl Iterator)> { 103 | let three_bit_spread = |dense: Expression, spread: Expression| { 104 | let (factor, lagrange_poly) = Self::lagrange_interpolate( 105 | dense, 106 | vec![0b000, 0b001, 0b010, 0b011, 0b100, 0b101, 0b110, 0b111], 107 | vec![ 108 | 0b000000, 0b000001, 0b000100, 0b000101, 0b010000, 0b010001, 0b010100, 0b010101, 109 | ], 110 | ); 111 | 112 | lagrange_poly - spread * factor 113 | }; 114 | 115 | std::iter::empty() 116 | .chain(Some(( 117 | "three_bit_range_check", 118 | Self::range_check(dense.clone(), 0, (1 << 3) - 1), 119 | ))) 120 | .chain(Some(( 121 | "three_bit_spread_check", 122 | three_bit_spread(dense, spread), 123 | ))) 124 | } 125 | } 126 | -------------------------------------------------------------------------------- /halo2_gadgets/src/sha256/table16/util.rs: -------------------------------------------------------------------------------- 1 | use halo2_proofs::circuit::Value; 2 | 3 | pub const MASK_EVEN_32: u32 = 0x55555555; 4 | 5 | /// The sequence of bits representing a u64 in little-endian order. 6 | /// 7 | /// # Panics 8 | /// 9 | /// Panics if the expected length of the sequence `NUM_BITS` exceeds 10 | /// 64. 11 | pub fn i2lebsp(int: u64) -> [bool; NUM_BITS] { 12 | /// Takes in an FnMut closure and returns a constant-length array with elements of 13 | /// type `Output`. 14 | fn gen_const_array( 15 | closure: impl FnMut(usize) -> Output, 16 | ) -> [Output; LEN] { 17 | gen_const_array_with_default(Default::default(), closure) 18 | } 19 | 20 | fn gen_const_array_with_default( 21 | default_value: Output, 22 | closure: impl FnMut(usize) -> Output, 23 | ) -> [Output; LEN] { 24 | let mut ret: [Output; LEN] = [default_value; LEN]; 25 | for (bit, val) in ret.iter_mut().zip((0..LEN).map(closure)) { 26 | *bit = val; 27 | } 28 | ret 29 | } 30 | 31 | assert!(NUM_BITS <= 64); 32 | gen_const_array(|mask: usize| (int & (1 << mask)) != 0) 33 | } 34 | 35 | /// Returns the integer representation of a little-endian bit-array. 36 | /// Panics if the number of bits exceeds 64. 37 | pub fn lebs2ip(bits: &[bool; K]) -> u64 { 38 | assert!(K <= 64); 39 | bits.iter() 40 | .enumerate() 41 | .fold(0u64, |acc, (i, b)| acc + if *b { 1 << i } else { 0 }) 42 | } 43 | 44 | /// Helper function that interleaves a little-endian bit-array with zeros 45 | /// in the odd indices. That is, it takes the array 46 | /// [b_0, b_1, ..., b_n] 47 | /// to 48 | /// [b_0, 0, b_1, 0, ..., b_n, 0]. 49 | /// Panics if bit-array is longer than 16 bits. 50 | pub fn spread_bits( 51 | bits: impl Into<[bool; DENSE]>, 52 | ) -> [bool; SPREAD] { 53 | assert_eq!(DENSE * 2, SPREAD); 54 | assert!(DENSE <= 16); 55 | 56 | let bits: [bool; DENSE] = bits.into(); 57 | let mut spread = [false; SPREAD]; 58 | 59 | for (idx, bit) in bits.iter().enumerate() { 60 | spread[idx * 2] = *bit; 61 | } 62 | 63 | spread 64 | } 65 | 66 | /// Negates the even bits in a spread bit-array. 67 | pub fn negate_spread(arr: [bool; LEN]) -> [bool; LEN] { 68 | assert_eq!(LEN % 2, 0); 69 | 70 | let mut neg = arr; 71 | for even_idx in (0..LEN).step_by(2) { 72 | let odd_idx = even_idx + 1; 73 | assert!(!arr[odd_idx]); 74 | 75 | neg[even_idx] = !arr[even_idx]; 76 | } 77 | 78 | neg 79 | } 80 | 81 | /// Returns even bits in a bit-array 82 | pub fn even_bits(bits: [bool; LEN]) -> [bool; HALF] { 83 | assert_eq!(LEN % 2, 0); 84 | let mut even_bits = [false; HALF]; 85 | for idx in 0..HALF { 86 | even_bits[idx] = bits[idx * 2] 87 | } 88 | even_bits 89 | } 90 | 91 | /// Returns odd bits in a bit-array 92 | pub fn odd_bits(bits: [bool; LEN]) -> [bool; HALF] { 93 | assert_eq!(LEN % 2, 0); 94 | let mut odd_bits = [false; HALF]; 95 | for idx in 0..HALF { 96 | odd_bits[idx] = bits[idx * 2 + 1] 97 | } 98 | odd_bits 99 | } 100 | 101 | /// Given a vector of words as vec![(lo: u16, hi: u16)], returns their sum: u32, along 102 | /// with a carry bit. 103 | pub fn sum_with_carry(words: Vec<(Value, Value)>) -> (Value, Value) { 104 | let words_lo: Value> = words.iter().map(|(lo, _)| lo.map(|lo| lo as u64)).collect(); 105 | let words_hi: Value> = words.iter().map(|(_, hi)| hi.map(|hi| hi as u64)).collect(); 106 | 107 | let sum: Value = { 108 | let sum_lo: Value = words_lo.map(|vec| vec.iter().sum()); 109 | let sum_hi: Value = words_hi.map(|vec| vec.iter().sum()); 110 | sum_lo.zip(sum_hi).map(|(lo, hi)| lo + (1 << 16) * hi) 111 | }; 112 | 113 | let carry = sum.map(|sum| (sum >> 32)); 114 | let sum = sum.map(|sum| sum as u32); 115 | 116 | (sum, carry) 117 | } 118 | -------------------------------------------------------------------------------- /halo2_gadgets/src/sinsemilla/chip/generator_table.rs: -------------------------------------------------------------------------------- 1 | use halo2_proofs::{ 2 | circuit::{Layouter, Value}, 3 | plonk::{ConstraintSystem, Error, Expression, TableColumn}, 4 | poly::Rotation, 5 | }; 6 | 7 | use super::{CommitDomains, FixedPoints, HashDomains}; 8 | use crate::sinsemilla::primitives::{self as sinsemilla, SINSEMILLA_S}; 9 | use ff::PrimeField; 10 | use halo2curves::pasta::pallas; 11 | 12 | /// Table containing independent generators S[0..2^k] 13 | #[derive(Eq, PartialEq, Copy, Clone, Debug)] 14 | pub struct GeneratorTableConfig { 15 | pub table_idx: TableColumn, 16 | pub table_x: TableColumn, 17 | pub table_y: TableColumn, 18 | } 19 | 20 | impl GeneratorTableConfig { 21 | #[allow(clippy::too_many_arguments)] 22 | #[allow(non_snake_case)] 23 | /// Even though the lookup table can be used in other parts of the circuit, 24 | /// this specific configuration sets up Sinsemilla-specific constraints 25 | /// controlled by `q_sinsemilla`, and would likely not apply to other chips. 26 | pub fn configure( 27 | meta: &mut ConstraintSystem, 28 | config: super::SinsemillaConfig, 29 | ) where 30 | Hash: HashDomains, 31 | F: FixedPoints, 32 | Commit: CommitDomains, 33 | { 34 | let (table_idx, table_x, table_y) = ( 35 | config.generator_table.table_idx, 36 | config.generator_table.table_x, 37 | config.generator_table.table_y, 38 | ); 39 | 40 | // https://p.z.cash/halo2-0.1:sinsemilla-constraints?partial 41 | meta.lookup("lookup", |meta| { 42 | let q_s1 = meta.query_selector(config.q_sinsemilla1); 43 | let q_s2 = meta.query_fixed(config.q_sinsemilla2, Rotation::cur()); 44 | let q_s3 = config.q_s3(meta); 45 | let q_run = q_s2 - q_s3; 46 | 47 | // m_{i+1} = z_{i} - 2^K * q_{run,i} * z_{i + 1} 48 | // Note that the message words m_i's are 1-indexed while the 49 | // running sum z_i's are 0-indexed. 50 | let word = { 51 | let z_cur = meta.query_advice(config.bits, Rotation::cur()); 52 | let z_next = meta.query_advice(config.bits, Rotation::next()); 53 | z_cur - (q_run * z_next * pallas::Base::from(1 << sinsemilla::K)) 54 | }; 55 | 56 | let x_p = meta.query_advice(config.double_and_add.x_p, Rotation::cur()); 57 | 58 | // y_{p,i} = (Y_{A,i} / 2) - lambda1 * (x_{A,i} - x_{P,i}) 59 | let y_p = { 60 | let lambda1 = meta.query_advice(config.double_and_add.lambda_1, Rotation::cur()); 61 | let x_a = meta.query_advice(config.double_and_add.x_a, Rotation::cur()); 62 | let Y_A = config.double_and_add.Y_A(meta, Rotation::cur()); 63 | 64 | (Y_A * pallas::Base::TWO_INV) - (lambda1 * (x_a - x_p.clone())) 65 | }; 66 | 67 | // Lookup expressions default to the first entry when `q_s1` 68 | // is not enabled. 69 | let (init_x, init_y) = SINSEMILLA_S[0]; 70 | let not_q_s1 = Expression::Constant(pallas::Base::one()) - q_s1.clone(); 71 | 72 | let m = q_s1.clone() * word; // The first table index is 0. 73 | let x_p = q_s1.clone() * x_p + not_q_s1.clone() * init_x; 74 | let y_p = q_s1 * y_p + not_q_s1 * init_y; 75 | 76 | vec![(m, table_idx), (x_p, table_x), (y_p, table_y)] 77 | }); 78 | } 79 | 80 | pub fn load(&self, layouter: &mut impl Layouter) -> Result<(), Error> { 81 | layouter.assign_table( 82 | || "generator_table", 83 | |mut table| { 84 | for (index, (x, y)) in SINSEMILLA_S.iter().enumerate() { 85 | table.assign_cell( 86 | || "table_idx", 87 | self.table_idx, 88 | index, 89 | || Value::known(pallas::Base::from(index as u64)), 90 | )?; 91 | table.assign_cell(|| "table_x", self.table_x, index, || Value::known(*x))?; 92 | table.assign_cell(|| "table_y", self.table_y, index, || Value::known(*y))?; 93 | } 94 | Ok(()) 95 | }, 96 | ) 97 | } 98 | } 99 | -------------------------------------------------------------------------------- /halo2_gadgets/src/sinsemilla/message.rs: -------------------------------------------------------------------------------- 1 | //! Gadget and chips for the Sinsemilla hash function. 2 | use ff::PrimeFieldBits; 3 | use halo2_proofs::{ 4 | arithmetic::Field, 5 | circuit::{AssignedCell, Cell, Value}, 6 | }; 7 | use std::fmt::Debug; 8 | 9 | /// A [`Message`] composed of several [`MessagePiece`]s. 10 | #[derive(Clone, Debug)] 11 | pub struct Message(Vec>); 12 | 13 | impl From>> 14 | for Message 15 | { 16 | fn from(pieces: Vec>) -> Self { 17 | // A message cannot contain more than `MAX_WORDS` words. 18 | assert!(pieces.iter().map(|piece| piece.num_words()).sum::() < MAX_WORDS); 19 | Message(pieces) 20 | } 21 | } 22 | 23 | impl std::ops::Deref 24 | for Message 25 | { 26 | type Target = [MessagePiece]; 27 | 28 | fn deref(&self) -> &[MessagePiece] { 29 | &self.0 30 | } 31 | } 32 | 33 | /// A [`MessagePiece`] of some bitlength. 34 | /// 35 | /// The piece must fit within a base field element, which means its length 36 | /// cannot exceed the base field's `NUM_BITS`. 37 | #[derive(Clone, Debug)] 38 | pub struct MessagePiece { 39 | cell_value: AssignedCell, 40 | /// The number of K-bit words in this message piece. 41 | num_words: usize, 42 | } 43 | 44 | impl MessagePiece { 45 | pub fn new(cell_value: AssignedCell, num_words: usize) -> Self { 46 | assert!(num_words * K < F::NUM_BITS as usize); 47 | Self { 48 | cell_value, 49 | num_words, 50 | } 51 | } 52 | 53 | pub fn num_words(&self) -> usize { 54 | self.num_words 55 | } 56 | 57 | pub fn cell(&self) -> Cell { 58 | self.cell_value.cell() 59 | } 60 | 61 | pub fn field_elem(&self) -> Value { 62 | self.cell_value.value().cloned() 63 | } 64 | 65 | pub fn cell_value(&self) -> AssignedCell { 66 | self.cell_value.clone() 67 | } 68 | } 69 | -------------------------------------------------------------------------------- /halo2_gadgets/src/sinsemilla/primitives/addition.rs: -------------------------------------------------------------------------------- 1 | use std::ops::Add; 2 | 3 | use group::{cofactor::CofactorCurveAffine, Group}; 4 | use halo2curves::pasta::pallas; 5 | use subtle::{ConstantTimeEq, CtOption}; 6 | 7 | /// P ∪ {⊥} 8 | /// 9 | /// Simulated incomplete addition built over complete addition. 10 | #[derive(Clone, Copy, Debug)] 11 | pub(super) struct IncompletePoint(CtOption); 12 | 13 | impl From for IncompletePoint { 14 | fn from(p: pallas::Point) -> Self { 15 | IncompletePoint(CtOption::new(p, 1.into())) 16 | } 17 | } 18 | 19 | impl From for CtOption { 20 | fn from(p: IncompletePoint) -> Self { 21 | p.0 22 | } 23 | } 24 | 25 | impl Add for IncompletePoint { 26 | type Output = IncompletePoint; 27 | 28 | #[allow(clippy::suspicious_arithmetic_impl)] 29 | fn add(self, rhs: Self) -> Self::Output { 30 | // ⊥ ⸭ ⊥ = ⊥ 31 | // ⊥ ⸭ P = ⊥ 32 | IncompletePoint(self.0.and_then(|p| { 33 | // P ⸭ ⊥ = ⊥ 34 | rhs.0.and_then(|q| { 35 | // 0 ⸭ 0 = ⊥ 36 | // 0 ⸭ P = ⊥ 37 | // P ⸭ 0 = ⊥ 38 | // (x, y) ⸭ (x', y') = ⊥ if x == x' 39 | // (x, y) ⸭ (x', y') = (x, y) + (x', y') if x != x' 40 | CtOption::new( 41 | p + q, 42 | !(p.is_identity() | q.is_identity() | p.ct_eq(&q) | p.ct_eq(&-q)), 43 | ) 44 | }) 45 | })) 46 | } 47 | } 48 | 49 | impl Add for IncompletePoint { 50 | type Output = IncompletePoint; 51 | 52 | /// Specialisation of incomplete addition for mixed addition. 53 | #[allow(clippy::suspicious_arithmetic_impl)] 54 | fn add(self, rhs: pallas::Affine) -> Self::Output { 55 | // ⊥ ⸭ ⊥ = ⊥ 56 | // ⊥ ⸭ P = ⊥ 57 | IncompletePoint(self.0.and_then(|p| { 58 | // P ⸭ ⊥ = ⊥ is satisfied by definition. 59 | let q = rhs.to_curve(); 60 | 61 | // 0 ⸭ 0 = ⊥ 62 | // 0 ⸭ P = ⊥ 63 | // P ⸭ 0 = ⊥ 64 | // (x, y) ⸭ (x', y') = ⊥ if x == x' 65 | // (x, y) ⸭ (x', y') = (x, y) + (x', y') if x != x' 66 | CtOption::new( 67 | // Use mixed addition for efficiency. 68 | p + rhs, 69 | !(p.is_identity() | q.is_identity() | p.ct_eq(&q) | p.ct_eq(&-q)), 70 | ) 71 | })) 72 | } 73 | } 74 | -------------------------------------------------------------------------------- /halo2_proofs/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "halo2_proofs" 3 | version = "1.1.0" 4 | authors = [ 5 | "Sean Bowe ", 6 | "Ying Tong Lai ", 7 | "Daira Hopwood ", 8 | "Jack Grigg ", 9 | ] 10 | edition = "2021" 11 | rust-version = "1.66.0" 12 | description = """ 13 | Fast PLONK-based zero-knowledge proving system with no trusted setup 14 | """ 15 | license = "MIT OR Apache-2.0" 16 | repository = "https://github.com/zcash/halo2" 17 | documentation = "https://docs.rs/halo2_proofs" 18 | readme = "README.md" 19 | categories = ["cryptography"] 20 | keywords = ["halo", "proofs", "zkp", "zkSNARKs"] 21 | 22 | [package.metadata.docs.rs] 23 | all-features = true 24 | rustdoc-args = ["--cfg", "docsrs", "--html-in-header", "katex-header.html"] 25 | 26 | [[bench]] 27 | name = "arithmetic" 28 | harness = false 29 | 30 | [[bench]] 31 | name = "hashtocurve" 32 | harness = false 33 | 34 | [[bench]] 35 | name = "plonk" 36 | harness = false 37 | 38 | [[bench]] 39 | name = "dev_lookup" 40 | harness = false 41 | 42 | [[bench]] 43 | name = "lookups" 44 | harness = false 45 | 46 | [[bench]] 47 | name = "fft" 48 | harness = false 49 | 50 | [dependencies] 51 | rayon = "1.5.1" 52 | backtrace = { version = "0.3", optional = true } 53 | ff = "0.13" 54 | group = "0.13" 55 | halo2curves = { version = "0.1.0", features = [ "derive_serde" ] } 56 | rand_core = { version = "0.6", default-features = false } 57 | tracing = "0.1" 58 | blake2b_simd = "1" # MSRV 1.66.0 59 | sha3 = "0.9.1" 60 | subtle = "2.3" 61 | cfg-if = "0.1" 62 | poseidon = { git = "https://github.com/scroll-tech/poseidon.git", branch = "main" } 63 | num-integer = "0.1" 64 | num-bigint = { version = "0.4", features = ["rand"] } 65 | rand_chacha = "0.3" 66 | maybe-rayon = { version = "0.1.0", default-features = false } 67 | 68 | crossbeam = "0.8.0" 69 | # Developer tooling dependencies 70 | plotters = { version = "0.3.0", default-features = false, optional = true } 71 | tabbycat = { version = "0.1", features = ["attributes"], optional = true } 72 | lazy_static = { version = "1", optional = true } 73 | log = "0.4.17" 74 | 75 | # timer 76 | ark-std = { version = "0.3.0" } 77 | 78 | 79 | # Legacy circuit compatibility 80 | halo2_legacy_pdqsort = { version = "0.1.0", optional = true } 81 | 82 | [dev-dependencies] 83 | assert_matches = "1.5" 84 | criterion = "0.3" 85 | env_logger = "0.8.0" 86 | gumdrop = "0.8" 87 | proptest = "1" 88 | rand_core = { version = "0.6", default-features = false, features = ["getrandom"] } 89 | 90 | [target.'cfg(all(target_arch = "wasm32", target_os = "unknown"))'.dev-dependencies] 91 | getrandom = { version = "0.2", features = ["js"] } 92 | 93 | [features] 94 | default = ["batch", "gwc", "multicore", "parallel_syn", "logup_skip_inv"] 95 | multicore = ["maybe-rayon/threads"] 96 | dev-graph = ["plotters", "tabbycat"] 97 | test-dev-graph = [ 98 | "dev-graph", 99 | "plotters/bitmap_backend", 100 | "plotters/bitmap_encoder", 101 | "plotters/ttf", 102 | ] 103 | gadget-traces = ["backtrace"] 104 | thread-safe-region = [] 105 | sanity-checks = [] 106 | batch = ["rand_core/getrandom"] 107 | shplonk = [] 108 | gwc = [] 109 | parallel_syn = [] 110 | profile = ["ark-std/print-trace"] 111 | counter = ["lazy_static"] 112 | mock-batch-inv = [] 113 | circuit-params = [] 114 | logup_skip_inv = [] 115 | 116 | # todo: we have both mutliphase prover and phase check. consider merge them 117 | phase-check = [] 118 | multiphase-mock-prover = [] 119 | 120 | [lib] 121 | bench = false 122 | 123 | [[example]] 124 | name = "circuit-layout" 125 | required-features = ["test-dev-graph"] 126 | -------------------------------------------------------------------------------- /halo2_proofs/README.md: -------------------------------------------------------------------------------- 1 | # halo2_proofs [![Crates.io](https://img.shields.io/crates/v/halo2_proofs.svg)](https://crates.io/crates/halo2_proofs) # 2 | 3 | ## [Documentation](https://docs.rs/halo2_proofs) 4 | 5 | ## Minimum Supported Rust Version 6 | 7 | Requires Rust **1.65.0** or higher. 8 | 9 | Minimum supported Rust version can be changed in the future, but it will be done with a 10 | minor version bump. 11 | 12 | ## Controlling parallelism 13 | 14 | `halo2_proofs` currently uses [rayon](https://github.com/rayon-rs/rayon) for parallel 15 | computation. The `RAYON_NUM_THREADS` environment variable can be used to set the number of 16 | threads. 17 | 18 | You can disable `rayon` by disabling the `"multicore"` feature. 19 | Warning! Halo2 will lose access to parallelism if you disable the `"multicore"` feature. 20 | This will significantly degrade performance. 21 | 22 | ## License 23 | 24 | Licensed under either of 25 | 26 | * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or 27 | http://www.apache.org/licenses/LICENSE-2.0) 28 | * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT) 29 | 30 | at your option. 31 | 32 | ### Contribution 33 | 34 | Unless you explicitly state otherwise, any contribution intentionally 35 | submitted for inclusion in the work by you, as defined in the Apache-2.0 36 | license, shall be dual licensed as above, without any additional terms or 37 | conditions. 38 | -------------------------------------------------------------------------------- /halo2_proofs/benches/arithmetic.rs: -------------------------------------------------------------------------------- 1 | #[macro_use] 2 | extern crate criterion; 3 | 4 | use crate::arithmetic::small_multiexp; 5 | use crate::halo2curves::pasta::{EqAffine, Fp}; 6 | use group::ff::Field; 7 | use halo2_proofs::*; 8 | 9 | use halo2_proofs::poly::{commitment::ParamsProver, ipa::commitment::ParamsIPA}; 10 | 11 | use criterion::{black_box, Criterion}; 12 | use rand_core::OsRng; 13 | 14 | fn criterion_benchmark(c: &mut Criterion) { 15 | let rng = OsRng; 16 | 17 | // small multiexp 18 | { 19 | let params: ParamsIPA = ParamsIPA::new(5); 20 | let g = &mut params.get_g().to_vec(); 21 | let len = g.len() / 2; 22 | let (g_lo, g_hi) = g.split_at_mut(len); 23 | 24 | let coeff_1 = Fp::random(rng); 25 | let coeff_2 = Fp::random(rng); 26 | 27 | c.bench_function("double-and-add", |b| { 28 | b.iter(|| { 29 | for (g_lo, g_hi) in g_lo.iter().zip(g_hi.iter()) { 30 | small_multiexp(&[black_box(coeff_1), black_box(coeff_2)], &[*g_lo, *g_hi]); 31 | } 32 | }) 33 | }); 34 | } 35 | } 36 | 37 | criterion_group!(benches, criterion_benchmark); 38 | criterion_main!(benches); 39 | -------------------------------------------------------------------------------- /halo2_proofs/benches/commit_zk.rs: -------------------------------------------------------------------------------- 1 | extern crate criterion; 2 | 3 | use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion}; 4 | use group::ff::Field; 5 | use halo2_proofs::arithmetic::parallelize; 6 | use halo2curves::pasta::pallas::Scalar; 7 | use rand_chacha::rand_core::RngCore; 8 | use rand_chacha::ChaCha20Rng; 9 | use rand_core::SeedableRng; 10 | use std::{collections::HashMap, iter}; 11 | 12 | #[cfg(feature = "multicore")] 13 | use maybe_rayon::current_num_threads; 14 | 15 | #[cfg(not(feature = "multicore"))] 16 | fn current_num_threads() -> usize { 17 | 1 18 | } 19 | 20 | fn rand_poly_serial(mut rng: ChaCha20Rng, domain: usize) -> Vec { 21 | // Sample a random polynomial of degree n - 1 22 | let mut random_poly = vec![Scalar::zero(); 1 << domain]; 23 | for coeff in random_poly.iter_mut() { 24 | *coeff = Scalar::random(&mut rng); 25 | } 26 | 27 | random_poly 28 | } 29 | 30 | fn rand_poly_par(mut rng: ChaCha20Rng, domain: usize) -> Vec { 31 | // Sample a random polynomial of degree n - 1 32 | let n = 1usize << domain; 33 | let mut random_poly = vec![Scalar::ZERO; n]; 34 | 35 | let num_threads = current_num_threads(); 36 | let chunk_size = n / num_threads; 37 | let thread_seeds = (0..) 38 | .step_by(chunk_size + 1) 39 | .take(n % num_threads) 40 | .chain( 41 | (chunk_size != 0) 42 | .then(|| ((n % num_threads) * (chunk_size + 1)..).step_by(chunk_size)) 43 | .into_iter() 44 | .flatten(), 45 | ) 46 | .take(num_threads) 47 | .zip(iter::repeat_with(|| { 48 | let mut seed = [0u8; 32]; 49 | rng.fill_bytes(&mut seed); 50 | ChaCha20Rng::from_seed(seed) 51 | })) 52 | .collect::>(); 53 | 54 | parallelize(&mut random_poly, |chunk, offset| { 55 | let mut rng = thread_seeds[&offset].clone(); 56 | chunk.iter_mut().for_each(|v| *v = Scalar::random(&mut rng)); 57 | }); 58 | random_poly 59 | } 60 | 61 | fn bench_commit(c: &mut Criterion) { 62 | let mut group = c.benchmark_group("Blinder_poly"); 63 | let rand = ChaCha20Rng::from_seed([1u8; 32]); 64 | for i in [ 65 | 18usize, 19usize, 20usize, 21usize, 22usize, 23usize, 24usize, 25usize, 66 | ] 67 | .iter() 68 | { 69 | group.bench_with_input(BenchmarkId::new("serial", i), i, |b, i| { 70 | b.iter(|| rand_poly_serial(rand.clone(), *i)) 71 | }); 72 | group.bench_with_input(BenchmarkId::new("parallel", i), i, |b, i| { 73 | b.iter(|| rand_poly_par(rand.clone(), *i)) 74 | }); 75 | } 76 | group.finish(); 77 | } 78 | 79 | criterion_group!(benches, bench_commit); 80 | criterion_main!(benches); 81 | -------------------------------------------------------------------------------- /halo2_proofs/benches/dev_lookup.rs: -------------------------------------------------------------------------------- 1 | #[macro_use] 2 | extern crate criterion; 3 | 4 | use ff::{Field, PrimeField}; 5 | use halo2_proofs::circuit::{Layouter, SimpleFloorPlanner, Value}; 6 | use halo2_proofs::dev::MockProver; 7 | use halo2_proofs::plonk::*; 8 | use halo2_proofs::poly::Rotation; 9 | use halo2curves::pasta::pallas; 10 | 11 | use std::marker::PhantomData; 12 | 13 | use criterion::{BenchmarkId, Criterion}; 14 | 15 | fn criterion_benchmark(c: &mut Criterion) { 16 | #[derive(Clone, Default)] 17 | struct MyCircuit { 18 | _marker: PhantomData, 19 | } 20 | 21 | #[derive(Clone)] 22 | struct MyConfig { 23 | selector: Selector, 24 | table: TableColumn, 25 | advice: Column, 26 | } 27 | 28 | impl Circuit for MyCircuit { 29 | type Config = MyConfig; 30 | type FloorPlanner = SimpleFloorPlanner; 31 | #[cfg(feature = "circuit-params")] 32 | type Params = (); 33 | 34 | fn without_witnesses(&self) -> Self { 35 | Self::default() 36 | } 37 | 38 | fn configure(meta: &mut ConstraintSystem) -> MyConfig { 39 | let config = MyConfig { 40 | selector: meta.complex_selector(), 41 | table: meta.lookup_table_column(), 42 | advice: meta.advice_column(), 43 | }; 44 | 45 | meta.lookup("lookup", |meta| { 46 | let selector = meta.query_selector(config.selector); 47 | let not_selector = Expression::Constant(F::ONE) - selector.clone(); 48 | let advice = meta.query_advice(config.advice, Rotation::cur()); 49 | vec![(selector * advice + not_selector, config.table)] 50 | }); 51 | 52 | config 53 | } 54 | 55 | fn synthesize( 56 | &self, 57 | config: MyConfig, 58 | mut layouter: impl Layouter, 59 | ) -> Result<(), Error> { 60 | layouter.assign_table( 61 | || "8-bit table", 62 | |mut table| { 63 | for row in 0u64..(1 << 8) { 64 | table.assign_cell( 65 | || format!("row {}", row), 66 | config.table, 67 | row as usize, 68 | || Value::known(F::from(row + 1)), 69 | )?; 70 | } 71 | 72 | Ok(()) 73 | }, 74 | )?; 75 | 76 | layouter.assign_region( 77 | || "assign values", 78 | |mut region| { 79 | for offset in 0u64..(1 << 10) { 80 | config.selector.enable(&mut region, offset as usize)?; 81 | region.assign_advice( 82 | || format!("offset {}", offset), 83 | config.advice, 84 | offset as usize, 85 | || Value::known(F::from((offset % 256) + 1)), 86 | )?; 87 | } 88 | 89 | Ok(()) 90 | }, 91 | ) 92 | } 93 | } 94 | 95 | fn prover(k: u32) { 96 | let circuit = MyCircuit:: { 97 | _marker: PhantomData, 98 | }; 99 | let prover = MockProver::run(k, &circuit, vec![]).unwrap(); 100 | assert_eq!(prover.verify(), Ok(())) 101 | } 102 | 103 | let k_range = 14..=18; 104 | 105 | let mut prover_group = c.benchmark_group("dev-lookup"); 106 | prover_group.sample_size(10); 107 | for k in k_range { 108 | prover_group.bench_with_input(BenchmarkId::from_parameter(k), &k, |b, &k| { 109 | b.iter(|| prover(k)); 110 | }); 111 | } 112 | prover_group.finish(); 113 | } 114 | 115 | criterion_group!(benches, criterion_benchmark); 116 | criterion_main!(benches); 117 | -------------------------------------------------------------------------------- /halo2_proofs/benches/fft.rs: -------------------------------------------------------------------------------- 1 | #[macro_use] 2 | extern crate criterion; 3 | 4 | use crate::arithmetic::best_fft; 5 | use group::ff::Field; 6 | use halo2_proofs::*; 7 | use halo2curves::pasta::Fp; 8 | 9 | use criterion::{BenchmarkId, Criterion}; 10 | use rand_core::OsRng; 11 | 12 | fn criterion_benchmark(c: &mut Criterion) { 13 | let mut group = c.benchmark_group("fft"); 14 | for k in 3..19 { 15 | group.bench_function(BenchmarkId::new("k", k), |b| { 16 | let mut a = (0..(1 << k)).map(|_| Fp::random(OsRng)).collect::>(); 17 | let omega = Fp::random(OsRng); // would be weird if this mattered 18 | b.iter(|| { 19 | best_fft(&mut a, omega, k as u32); 20 | }); 21 | }); 22 | } 23 | } 24 | 25 | criterion_group!(benches, criterion_benchmark); 26 | criterion_main!(benches); 27 | -------------------------------------------------------------------------------- /halo2_proofs/benches/hashtocurve.rs: -------------------------------------------------------------------------------- 1 | //! Benchmarks for hashing to the Pasta curves. 2 | 3 | use criterion::{criterion_group, criterion_main, Criterion}; 4 | 5 | use halo2_proofs::arithmetic::CurveExt; 6 | use halo2curves::pasta::{pallas, vesta}; 7 | 8 | fn criterion_benchmark(c: &mut Criterion) { 9 | bench_hash_to_curve(c); 10 | } 11 | 12 | fn bench_hash_to_curve(c: &mut Criterion) { 13 | let mut group = c.benchmark_group("hash-to-curve"); 14 | 15 | let hash_pallas = pallas::Point::hash_to_curve("z.cash:test"); 16 | group.bench_function("Pallas", |b| b.iter(|| hash_pallas(b"benchmark"))); 17 | 18 | let hash_vesta = vesta::Point::hash_to_curve("z.cash:test"); 19 | group.bench_function("Vesta", |b| b.iter(|| hash_vesta(b"benchmark"))); 20 | } 21 | 22 | criterion_group!(benches, criterion_benchmark); 23 | criterion_main!(benches); 24 | -------------------------------------------------------------------------------- /halo2_proofs/katex-header.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | -------------------------------------------------------------------------------- /halo2_proofs/proptest-regressions/plonk/assigned.txt: -------------------------------------------------------------------------------- 1 | # Seeds for failure cases proptest has generated in the past. It is 2 | # automatically read and these particular cases re-run before any 3 | # novel cases are generated. 4 | # 5 | # It is recommended to check this file in to source control so that 6 | # everyone who runs the test benefits from these saved cases. 7 | cc 9ec8b547e21d3ed71ee4f99316edb8ff7d0c4d42751bb2479a2864a661860326 # shrinks to (values, operations) = ([Rational(0x0000000000000000000000000000000000000000000000000000000000000000, 0x0000000000000000000000000000000000000000000000000000000000000000), Trivial(0x0000000000000000000000000000000000000000000000000000000000000001)], [Add]) 8 | -------------------------------------------------------------------------------- /halo2_proofs/proptest-regressions/plonk/circuit/compress_selectors.txt: -------------------------------------------------------------------------------- 1 | # Seeds for failure cases proptest has generated in the past. It is 2 | # automatically read and these particular cases re-run before any 3 | # novel cases are generated. 4 | # 5 | # It is recommended to check this file in to source control so that 6 | # everyone who runs the test benefits from these saved cases. 7 | cc 782948e336b9fcaaf993d40cd290eff20399d34766a93793fc3a4516274c1ea7 # shrinks to (selectors, max_degree) = ([SelectorDescription { selector: 0, activations: [false], max_degree: 0 }, SelectorDescription { selector: 1, activations: [false], max_degree: 0 }], 1) 8 | cc 656e5446792c4f5fe22fd10bcd2dbadc70e84ac1ddb1a7ec8f622f64a15ff260 # shrinks to (selectors, max_degree) = ([SelectorDescription { selector: 0, activations: [false], max_degree: 1 }, SelectorDescription { selector: 1, activations: [false], max_degree: 1 }, SelectorDescription { selector: 2, activations: [false], max_degree: 1 }], 2) 9 | cc b7b81ca8745931e4dd8b4f896f7bde78f85f4d88857c5fdf9dc4bbf0f172db5e # shrinks to (selectors, max_degree) = ([SelectorDescription { selector: 0, activations: [false], max_degree: 1 }, SelectorDescription { selector: 1, activations: [false], max_degree: 1 }, SelectorDescription { selector: 2, activations: [false], max_degree: 1 }], 2) 10 | -------------------------------------------------------------------------------- /halo2_proofs/src/circuit/floor_planner.rs: -------------------------------------------------------------------------------- 1 | //! Implementations of common circuit floor planners. 2 | 3 | pub(super) mod single_pass; 4 | 5 | mod v1; 6 | pub use v1::{V1Pass, V1}; 7 | -------------------------------------------------------------------------------- /halo2_proofs/src/lib.rs: -------------------------------------------------------------------------------- 1 | //! # halo2_proofs 2 | 3 | #![cfg_attr(docsrs, feature(doc_cfg))] 4 | // The actual lints we want to disable. 5 | #![allow(clippy::op_ref, clippy::many_single_char_names)] 6 | #![deny(rustdoc::broken_intra_doc_links)] 7 | #![deny(missing_debug_implementations)] 8 | #![feature(stmt_expr_attributes)] 9 | // #![deny(missing_docs)] 10 | // #![deny(unsafe_code)] 11 | #![allow(clippy::uninit_vec)] 12 | #![allow(clippy::too_many_arguments)] 13 | #![feature(associated_type_defaults)] 14 | 15 | #[cfg(feature = "counter")] 16 | extern crate lazy_static; 17 | 18 | #[cfg(feature = "counter")] 19 | use lazy_static::lazy_static; 20 | 21 | #[cfg(feature = "counter")] 22 | use std::sync::Mutex; 23 | 24 | #[cfg(feature = "counter")] 25 | use std::collections::BTreeMap; 26 | 27 | #[cfg(feature = "counter")] 28 | lazy_static! { 29 | static ref FFT_COUNTER: Mutex> = Mutex::new(BTreeMap::new()); 30 | static ref MSM_COUNTER: Mutex> = Mutex::new(BTreeMap::new()); 31 | } 32 | 33 | pub mod arithmetic; 34 | pub mod circuit; 35 | pub use halo2curves; 36 | mod multicore; 37 | pub mod plonk; 38 | pub mod poly; 39 | pub mod transcript; 40 | 41 | pub mod dev; 42 | mod helpers; 43 | pub use helpers::SerdeFormat; 44 | -------------------------------------------------------------------------------- /halo2_proofs/src/multicore.rs: -------------------------------------------------------------------------------- 1 | #[cfg(all( 2 | feature = "multicore", 3 | target_arch = "wasm32", 4 | not(target_feature = "atomics") 5 | ))] 6 | compile_error!( 7 | "The multicore feature flag is not supported on wasm32 architectures without atomics" 8 | ); 9 | 10 | pub use maybe_rayon::{ 11 | iter::{IntoParallelIterator, IntoParallelRefMutIterator, ParallelIterator}, 12 | join, scope, Scope, 13 | }; 14 | 15 | #[cfg(feature = "multicore")] 16 | pub use maybe_rayon::{ 17 | current_num_threads, 18 | iter::{IndexedParallelIterator, IntoParallelRefIterator}, 19 | slice::ParallelSliceMut, 20 | }; 21 | 22 | #[cfg(not(feature = "multicore"))] 23 | pub fn current_num_threads() -> usize { 24 | 1 25 | } 26 | 27 | pub trait TryFoldAndReduce { 28 | /// Implements `iter.try_fold().try_reduce()` for `rayon::iter::ParallelIterator`, 29 | /// falling back on `Iterator::try_fold` when the `multicore` feature flag is 30 | /// disabled. 31 | /// The `try_fold_and_reduce` function can only be called by a iter with 32 | /// `Result` item type because the `fold_op` must meet the trait 33 | /// bounds of both `try_fold` and `try_reduce` from rayon. 34 | fn try_fold_and_reduce( 35 | self, 36 | identity: impl Fn() -> T + Send + Sync, 37 | fold_op: impl Fn(T, Result) -> Result + Send + Sync, 38 | ) -> Result; 39 | } 40 | 41 | #[cfg(feature = "multicore")] 42 | impl TryFoldAndReduce for I 43 | where 44 | T: Send + Sync, 45 | E: Send + Sync, 46 | I: maybe_rayon::iter::ParallelIterator>, 47 | { 48 | fn try_fold_and_reduce( 49 | self, 50 | identity: impl Fn() -> T + Send + Sync, 51 | fold_op: impl Fn(T, Result) -> Result + Send + Sync, 52 | ) -> Result { 53 | self.try_fold(&identity, &fold_op) 54 | .try_reduce(&identity, |a, b| fold_op(a, Ok(b))) 55 | } 56 | } 57 | 58 | #[cfg(not(feature = "multicore"))] 59 | impl TryFoldAndReduce for I 60 | where 61 | I: std::iter::Iterator>, 62 | { 63 | fn try_fold_and_reduce( 64 | mut self, 65 | identity: impl Fn() -> T + Send + Sync, 66 | fold_op: impl Fn(T, Result) -> Result + Send + Sync, 67 | ) -> Result { 68 | self.try_fold(identity(), fold_op) 69 | } 70 | } 71 | -------------------------------------------------------------------------------- /halo2_proofs/src/plonk/lookup.rs: -------------------------------------------------------------------------------- 1 | use super::circuit::Expression; 2 | use ff::Field; 3 | use std::fmt::{self, Debug}; 4 | 5 | pub(crate) mod prover; 6 | pub(crate) mod verifier; 7 | 8 | #[derive(Clone, PartialEq, Eq)] 9 | pub struct Argument { 10 | pub(crate) name: String, 11 | pub(crate) input_expressions: Vec>, 12 | pub(crate) table_expressions: Vec>, 13 | } 14 | 15 | impl Debug for Argument { 16 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 17 | f.debug_struct("Argument") 18 | .field("input_expressions", &self.input_expressions) 19 | .field("table_expressions", &self.table_expressions) 20 | .finish() 21 | } 22 | } 23 | 24 | impl Argument { 25 | /// Constructs a new lookup argument. 26 | /// 27 | /// `table_map` is a sequence of `(input, table)` tuples. 28 | pub fn new>(name: S, table_map: Vec<(Expression, Expression)>) -> Self { 29 | let (input_expressions, table_expressions) = table_map.into_iter().unzip(); 30 | Argument { 31 | name: name.as_ref().to_string(), 32 | input_expressions, 33 | table_expressions, 34 | } 35 | } 36 | 37 | pub(crate) fn required_degree(&self) -> usize { 38 | assert_eq!(self.input_expressions.len(), self.table_expressions.len()); 39 | 40 | // The first value in the permutation poly should be one. 41 | // degree 2: 42 | // l_0(X) * (1 - z(X)) = 0 43 | // 44 | // The "last" value in the permutation poly should be a boolean, for 45 | // completeness and soundness. 46 | // degree 3: 47 | // l_last(X) * (z(X)^2 - z(X)) = 0 48 | // 49 | // Enable the permutation argument for only the rows involved. 50 | // degree (2 + input_degree + table_degree) or 4, whichever is larger: 51 | // (1 - (l_last(X) + l_blind(X))) * ( 52 | // z(\omega X) (a'(X) + \beta) (s'(X) + \gamma) 53 | // - z(X) (\theta^{m-1} a_0(X) + ... + a_{m-1}(X) + \beta) (\theta^{m-1} s_0(X) + ... + s_{m-1}(X) + \gamma) 54 | // ) = 0 55 | // 56 | // The first two values of a' and s' should be the same. 57 | // degree 2: 58 | // l_0(X) * (a'(X) - s'(X)) = 0 59 | // 60 | // Either the two values are the same, or the previous 61 | // value of a' is the same as the current value. 62 | // degree 3: 63 | // (1 - (l_last(X) + l_blind(X))) * (a′(X) − s′(X))⋅(a′(X) − a′(\omega^{-1} X)) = 0 64 | let mut input_degree = 1; 65 | for expr in self.input_expressions.iter() { 66 | input_degree = std::cmp::max(input_degree, expr.degree()); 67 | } 68 | let mut table_degree = 1; 69 | for expr in self.table_expressions.iter() { 70 | table_degree = std::cmp::max(table_degree, expr.degree()); 71 | } 72 | 73 | // In practice because input_degree and table_degree are initialized to 74 | // one, the latter half of this max() invocation is at least 4 always, 75 | // rendering this call pointless except to be explicit in case we change 76 | // the initialization of input_degree/table_degree in the future. 77 | std::cmp::max( 78 | // (1 - (l_last + l_blind)) z(\omega X) (a'(X) + \beta) (s'(X) + \gamma) 79 | 4, 80 | // (1 - (l_last + l_blind)) z(X) (\theta^{m-1} a_0(X) + ... + a_{m-1}(X) + \beta) (\theta^{m-1} s_0(X) + ... + s_{m-1}(X) + \gamma) 81 | 2 + input_degree + table_degree, 82 | ) 83 | } 84 | 85 | /// Returns input of this argument 86 | pub fn input_expressions(&self) -> &Vec> { 87 | &self.input_expressions 88 | } 89 | 90 | /// Returns table of this argument 91 | pub fn table_expressions(&self) -> &Vec> { 92 | &self.table_expressions 93 | } 94 | 95 | /// Returns name of this argument 96 | pub fn name(&self) -> &str { 97 | &self.name 98 | } 99 | } 100 | -------------------------------------------------------------------------------- /halo2_proofs/src/plonk/mv_lookup.rs: -------------------------------------------------------------------------------- 1 | use super::circuit::Expression; 2 | use ff::Field; 3 | use std::fmt::{self, Debug}; 4 | 5 | pub(crate) mod prover; 6 | pub(crate) mod verifier; 7 | 8 | /// Degree of lookup without inputs 9 | pub fn base_degree(table_degree: usize) -> usize { 10 | // let lhs_degree = table_degree + inputs_expressions_degree + 1 11 | // let degree = lhs_degree + 1 12 | std::cmp::max(3, table_degree + 2) 13 | } 14 | 15 | pub fn degree_with_input(base_degree: usize, input_expression_degree: usize) -> usize { 16 | base_degree + input_expression_degree 17 | } 18 | 19 | #[derive(Clone)] 20 | pub struct Argument { 21 | pub name: &'static str, 22 | pub(crate) table_expressions: Vec>, 23 | pub(crate) inputs_expressions: Vec>>, 24 | } 25 | 26 | impl Debug for Argument { 27 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 28 | f.debug_struct("Argument") 29 | .field("table_expressions", &self.table_expressions) 30 | .field("inputs_expressions", &self.inputs_expressions) 31 | .finish() 32 | } 33 | } 34 | 35 | impl Argument { 36 | /// Constructs a new lookup argument. 37 | pub fn new(name: &'static str, table: &[Expression], input: &[Vec>]) -> Self { 38 | Self { 39 | name, 40 | table_expressions: table.to_owned(), 41 | inputs_expressions: input.to_owned(), 42 | } 43 | } 44 | 45 | pub(crate) fn required_degree(&self) -> usize { 46 | assert!(self 47 | .inputs_expressions 48 | .iter() 49 | .all(|input| input.len() == self.table_expressions.len())); 50 | 51 | let expr_degree = |input_expressions: &Vec>| { 52 | let mut input_degree = 0; 53 | for expr in input_expressions.iter() { 54 | input_degree = std::cmp::max(input_degree, expr.degree()); 55 | } 56 | 57 | input_degree 58 | }; 59 | 60 | let inputs_expressions_degree: usize = 61 | self.inputs_expressions.iter().map(expr_degree).sum(); 62 | 63 | let table_degree = expr_degree(&self.table_expressions); 64 | 65 | /* 66 | φ_i(X) = f_i(X) + α 67 | τ(X) = t(X) + α 68 | LHS = τ(X) * Π(φ_i(X)) * (ϕ(gX) - ϕ(X)) 69 | = table_degree + sum(input_degree) + 1 70 | RHS = τ(X) * Π(φ_i(X)) * (∑ 1/(φ_i(X)) - m(X) / τ(X)))) 71 | 72 | deg(q(X)) = (1 - (q_last + q_blind)) * (LHS - RHS) 73 | = 1 + LHS 74 | */ 75 | 76 | let lhs_degree = table_degree + inputs_expressions_degree + 1; 77 | let degree = lhs_degree + 1; 78 | 79 | // 3 = phi + q_blind + table (where table is = 1) 80 | // + 1 for each of inputs expressions 81 | std::cmp::max(3 + self.inputs_expressions.len(), degree) 82 | } 83 | 84 | /// Returns input of this argument 85 | pub fn input_expressions(&self) -> &Vec>> { 86 | &self.inputs_expressions 87 | } 88 | 89 | /// Returns table of this argument 90 | pub fn table_expressions(&self) -> &Vec> { 91 | &self.table_expressions 92 | } 93 | } 94 | -------------------------------------------------------------------------------- /halo2_proofs/src/plonk/mv_lookup/exec_info.json: -------------------------------------------------------------------------------- 1 | { 2 | "unit": "ms", 3 | "non_batched": { 4 | "k": 14, 5 | "halo2": { 6 | "protocol": "halo2", 7 | "methods": { 8 | "commit_permuted": { 9 | "compress_expressions": 1, 10 | "permute_expressions": 4.5, 11 | "commit_permuted_input": 5, 12 | "commit_permuted_table": 5 13 | }, 14 | "grand_product": { 15 | "lookup_product_denom": 2, 16 | "lookup_product": 0.2, 17 | "grand_prod_evals": 0.5, 18 | "grand_prod_commit": 7.5 19 | }, 20 | "h_evaluation": { 21 | 22 | } 23 | } 24 | }, 25 | "mv": { 26 | "protocol": "mv", 27 | "methods": { 28 | "compute_multiplicity": { 29 | "compress_expressions": 1, 30 | "compute_multiplicities": 2, 31 | "commit_m": 1 32 | }, 33 | "grand_sum": { 34 | "inputs_log_derivatives": 2, 35 | "table_log_derivatives": 1.8, 36 | "log_derivatives_diff": 0.2, 37 | "grand_sum_evals": 0.2, 38 | "grand_sum_commit": 33 39 | }, 40 | "h_evaluation": { 41 | 42 | } 43 | } 44 | } 45 | } 46 | } -------------------------------------------------------------------------------- /halo2_proofs/src/plonk/shuffle.rs: -------------------------------------------------------------------------------- 1 | use super::circuit::Expression; 2 | use ff::Field; 3 | use std::fmt::{self, Debug}; 4 | 5 | pub(crate) mod prover; 6 | pub(crate) mod verifier; 7 | 8 | #[derive(Clone, PartialEq, Eq)] 9 | pub struct Argument { 10 | pub(crate) name: String, 11 | pub(crate) input_expressions: Vec>, 12 | pub(crate) shuffle_expressions: Vec>, 13 | } 14 | 15 | impl Debug for Argument { 16 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 17 | f.debug_struct("Argument") 18 | .field("input_expressions", &self.input_expressions) 19 | .field("shuffle_expressions", &self.shuffle_expressions) 20 | .finish() 21 | } 22 | } 23 | 24 | impl Argument { 25 | /// Constructs a new shuffle argument. 26 | /// 27 | /// `shuffle` is a sequence of `(input, shuffle)` tuples. 28 | pub fn new>(name: S, shuffle: Vec<(Expression, Expression)>) -> Self { 29 | let (input_expressions, shuffle_expressions) = shuffle.into_iter().unzip(); 30 | Argument { 31 | name: name.as_ref().to_string(), 32 | input_expressions, 33 | shuffle_expressions, 34 | } 35 | } 36 | 37 | pub(crate) fn required_degree(&self) -> usize { 38 | assert_eq!(self.input_expressions.len(), self.shuffle_expressions.len()); 39 | 40 | let mut input_degree = 1; 41 | for expr in self.input_expressions.iter() { 42 | input_degree = std::cmp::max(input_degree, expr.degree()); 43 | } 44 | let mut shuffle_degree = 1; 45 | for expr in self.shuffle_expressions.iter() { 46 | shuffle_degree = std::cmp::max(shuffle_degree, expr.degree()); 47 | } 48 | 49 | // (1 - (l_last + l_blind)) (z(\omega X) (s(X) + \gamma) - z(X) (a(X) + \gamma)) 50 | std::cmp::max(2 + shuffle_degree, 2 + input_degree) 51 | } 52 | 53 | /// Returns input of this argument 54 | pub fn input_expressions(&self) -> &Vec> { 55 | &self.input_expressions 56 | } 57 | 58 | /// Returns table of this argument 59 | pub fn shuffle_expressions(&self) -> &Vec> { 60 | &self.shuffle_expressions 61 | } 62 | 63 | /// Returns name of this argument 64 | pub fn name(&self) -> &str { 65 | &self.name 66 | } 67 | } 68 | -------------------------------------------------------------------------------- /halo2_proofs/src/plonk/shuffle/verifier.rs: -------------------------------------------------------------------------------- 1 | use std::iter; 2 | 3 | use super::super::{circuit::Expression, ChallengeGamma, ChallengeTheta, ChallengeX}; 4 | use super::Argument; 5 | use crate::{ 6 | arithmetic::CurveAffine, 7 | plonk::{Error, VerifyingKey}, 8 | poly::{commitment::MSM, Rotation, VerifierQuery}, 9 | transcript::{EncodedChallenge, TranscriptRead}, 10 | }; 11 | use ff::Field; 12 | 13 | pub struct Committed { 14 | product_commitment: C, 15 | } 16 | 17 | pub struct Evaluated { 18 | committed: Committed, 19 | product_eval: C::Scalar, 20 | product_next_eval: C::Scalar, 21 | } 22 | 23 | impl Argument { 24 | pub(in crate::plonk) fn read_product_commitment< 25 | C: CurveAffine, 26 | E: EncodedChallenge, 27 | T: TranscriptRead, 28 | >( 29 | &self, 30 | transcript: &mut T, 31 | ) -> Result, Error> { 32 | let product_commitment = transcript.read_point()?; 33 | 34 | Ok(Committed { product_commitment }) 35 | } 36 | } 37 | 38 | impl Committed { 39 | pub(crate) fn evaluate, T: TranscriptRead>( 40 | self, 41 | transcript: &mut T, 42 | ) -> Result, Error> { 43 | let product_eval = transcript.read_scalar()?; 44 | let product_next_eval = transcript.read_scalar()?; 45 | 46 | Ok(Evaluated { 47 | committed: self, 48 | product_eval, 49 | product_next_eval, 50 | }) 51 | } 52 | } 53 | 54 | impl Evaluated { 55 | #[allow(clippy::too_many_arguments)] 56 | pub(in crate::plonk) fn expressions<'a>( 57 | &'a self, 58 | l_0: C::Scalar, 59 | l_last: C::Scalar, 60 | l_blind: C::Scalar, 61 | argument: &'a Argument, 62 | theta: ChallengeTheta, 63 | gamma: ChallengeGamma, 64 | advice_evals: &[C::Scalar], 65 | fixed_evals: &[C::Scalar], 66 | instance_evals: &[C::Scalar], 67 | challenges: &[C::Scalar], 68 | ) -> impl Iterator + 'a { 69 | let active_rows = C::Scalar::ONE - (l_last + l_blind); 70 | 71 | let product_expression = || { 72 | // z(\omega X) (s(X) + \gamma) - z(X) (a(X) + \gamma) 73 | let compress_expressions = |expressions: &[Expression]| { 74 | expressions 75 | .iter() 76 | .map(|expression| { 77 | expression.evaluate( 78 | &|scalar| scalar, 79 | &|_| panic!("virtual selectors are removed during optimization"), 80 | &|query| fixed_evals[query.index.unwrap()], 81 | &|query| advice_evals[query.index.unwrap()], 82 | &|query| instance_evals[query.index.unwrap()], 83 | &|challenge| challenges[challenge.index()], 84 | &|a| -a, 85 | &|a, b| a + &b, 86 | &|a, b| a * &b, 87 | &|a, scalar| a * &scalar, 88 | ) 89 | }) 90 | .fold(C::Scalar::ZERO, |acc, eval| acc * &*theta + &eval) 91 | }; 92 | // z(\omega X) (s(X) + \gamma) 93 | let left = self.product_next_eval 94 | * &(compress_expressions(&argument.shuffle_expressions) + &*gamma); 95 | // z(X) (a(X) + \gamma) 96 | let right = 97 | self.product_eval * &(compress_expressions(&argument.input_expressions) + &*gamma); 98 | 99 | (left - &right) * &active_rows 100 | }; 101 | 102 | std::iter::empty() 103 | .chain( 104 | // l_0(X) * (1 - z'(X)) = 0 105 | Some(l_0 * &(C::Scalar::ONE - &self.product_eval)), 106 | ) 107 | .chain( 108 | // l_last(X) * (z(X)^2 - z(X)) = 0 109 | Some(l_last * &(self.product_eval.square() - &self.product_eval)), 110 | ) 111 | .chain( 112 | // (1 - (l_last(X) + l_blind(X))) * ( z(\omega X) (s(X) + \gamma) - z(X) (a(X) + \gamma)) 113 | Some(product_expression()), 114 | ) 115 | } 116 | 117 | pub(in crate::plonk) fn queries<'r, M: MSM + 'r>( 118 | &'r self, 119 | vk: &'r VerifyingKey, 120 | x: ChallengeX, 121 | ) -> impl Iterator> + Clone { 122 | let x_next = vk.domain.rotate_omega(*x, Rotation::next()); 123 | 124 | iter::empty() 125 | // Open shuffle product commitment at x 126 | .chain(Some(VerifierQuery::new_commitment( 127 | &self.committed.product_commitment, 128 | *x, 129 | self.product_eval, 130 | ))) 131 | // Open shuffle product commitment at \omega x 132 | .chain(Some(VerifierQuery::new_commitment( 133 | &self.committed.product_commitment, 134 | x_next, 135 | self.product_next_eval, 136 | ))) 137 | } 138 | } 139 | -------------------------------------------------------------------------------- /halo2_proofs/src/plonk/vanishing.rs: -------------------------------------------------------------------------------- 1 | use std::marker::PhantomData; 2 | 3 | use crate::arithmetic::CurveAffine; 4 | 5 | mod prover; 6 | mod verifier; 7 | 8 | /// A vanishing argument. 9 | pub(crate) struct Argument { 10 | _marker: PhantomData, 11 | } 12 | -------------------------------------------------------------------------------- /halo2_proofs/src/plonk/vanishing/verifier.rs: -------------------------------------------------------------------------------- 1 | use std::iter; 2 | 3 | use ff::Field; 4 | 5 | use crate::{ 6 | arithmetic::CurveAffine, 7 | plonk::{Error, VerifyingKey}, 8 | poly::{ 9 | commitment::{Params, MSM}, 10 | VerifierQuery, 11 | }, 12 | transcript::{read_n_points, EncodedChallenge, TranscriptRead}, 13 | }; 14 | 15 | use super::super::{ChallengeX, ChallengeY}; 16 | use super::Argument; 17 | 18 | pub struct Committed { 19 | random_poly_commitment: C, 20 | } 21 | 22 | pub struct Constructed { 23 | h_commitments: Vec, 24 | random_poly_commitment: C, 25 | } 26 | 27 | pub struct PartiallyEvaluated { 28 | h_commitments: Vec, 29 | random_poly_commitment: C, 30 | random_eval: C::Scalar, 31 | } 32 | 33 | pub struct Evaluated> { 34 | h_commitment: M, 35 | random_poly_commitment: C, 36 | expected_h_eval: C::Scalar, 37 | random_eval: C::Scalar, 38 | } 39 | 40 | impl Argument { 41 | pub(in crate::plonk) fn read_commitments_before_y< 42 | E: EncodedChallenge, 43 | T: TranscriptRead, 44 | >( 45 | transcript: &mut T, 46 | ) -> Result, Error> { 47 | let random_poly_commitment = transcript.read_point()?; 48 | 49 | Ok(Committed { 50 | random_poly_commitment, 51 | }) 52 | } 53 | } 54 | 55 | impl Committed { 56 | pub(in crate::plonk) fn read_commitments_after_y< 57 | E: EncodedChallenge, 58 | T: TranscriptRead, 59 | >( 60 | self, 61 | vk: &VerifyingKey, 62 | transcript: &mut T, 63 | ) -> Result, Error> { 64 | // Obtain a commitment to h(X) in the form of multiple pieces of degree n - 1 65 | let h_commitments = read_n_points(transcript, vk.domain.get_quotient_poly_degree())?; 66 | 67 | Ok(Constructed { 68 | h_commitments, 69 | random_poly_commitment: self.random_poly_commitment, 70 | }) 71 | } 72 | } 73 | 74 | impl Constructed { 75 | pub(in crate::plonk) fn evaluate_after_x, T: TranscriptRead>( 76 | self, 77 | transcript: &mut T, 78 | ) -> Result, Error> { 79 | let random_eval = transcript.read_scalar()?; 80 | 81 | Ok(PartiallyEvaluated { 82 | h_commitments: self.h_commitments, 83 | random_poly_commitment: self.random_poly_commitment, 84 | random_eval, 85 | }) 86 | } 87 | } 88 | 89 | impl PartiallyEvaluated { 90 | pub(in crate::plonk) fn verify<'params, P: Params<'params, C>>( 91 | self, 92 | params: &'params P, 93 | expressions: impl Iterator, 94 | y: ChallengeY, 95 | xn: C::Scalar, 96 | ) -> Evaluated { 97 | let expected_h_eval = expressions.fold(C::Scalar::ZERO, |h_eval, v| h_eval * &*y + &v); 98 | let expected_h_eval = expected_h_eval * ((xn - C::Scalar::ONE).invert().unwrap()); 99 | 100 | let h_commitment = 101 | self.h_commitments 102 | .iter() 103 | .rev() 104 | .fold(params.empty_msm(), |mut acc, commitment| { 105 | acc.scale(xn); 106 | let commitment: C::CurveExt = (*commitment).into(); 107 | acc.append_term(C::Scalar::ONE, commitment); 108 | 109 | acc 110 | }); 111 | 112 | Evaluated { 113 | expected_h_eval, 114 | h_commitment, 115 | random_poly_commitment: self.random_poly_commitment, 116 | random_eval: self.random_eval, 117 | } 118 | } 119 | } 120 | 121 | impl> Evaluated { 122 | pub(in crate::plonk) fn queries( 123 | &self, 124 | x: ChallengeX, 125 | ) -> impl Iterator> + Clone { 126 | iter::empty() 127 | .chain(Some(VerifierQuery::new_msm( 128 | &self.h_commitment, 129 | *x, 130 | self.expected_h_eval, 131 | ))) 132 | .chain(Some(VerifierQuery::new_commitment( 133 | &self.random_poly_commitment, 134 | *x, 135 | self.random_eval, 136 | ))) 137 | } 138 | } 139 | -------------------------------------------------------------------------------- /halo2_proofs/src/plonk/verifier/batch.rs: -------------------------------------------------------------------------------- 1 | use ff::FromUniformBytes; 2 | use group::ff::Field; 3 | use halo2curves::CurveAffine; 4 | use rand_core::OsRng; 5 | 6 | use super::{verify_proof, VerificationStrategy}; 7 | use crate::{ 8 | multicore::{IntoParallelIterator, TryFoldAndReduce}, 9 | plonk::{Error, VerifyingKey}, 10 | poly::{ 11 | commitment::{Params, MSM}, 12 | ipa::{ 13 | commitment::{IPACommitmentScheme, ParamsVerifierIPA}, 14 | msm::MSMIPA, 15 | multiopen::VerifierIPA, 16 | strategy::GuardIPA, 17 | }, 18 | }, 19 | transcript::{Blake2bRead, TranscriptReadBuffer}, 20 | }; 21 | 22 | #[cfg(feature = "multicore")] 23 | use crate::multicore::{IndexedParallelIterator, ParallelIterator}; 24 | 25 | /// A proof verification strategy that returns the proof's MSM. 26 | /// 27 | /// `BatchVerifier` handles the accumulation of the MSMs for the batched proofs. 28 | #[derive(Debug)] 29 | struct BatchStrategy<'params, C: CurveAffine> { 30 | msm: MSMIPA<'params, C>, 31 | } 32 | 33 | impl<'params, C: CurveAffine> 34 | VerificationStrategy<'params, IPACommitmentScheme, VerifierIPA<'params, C>> 35 | for BatchStrategy<'params, C> 36 | { 37 | type Output = MSMIPA<'params, C>; 38 | 39 | fn new(params: &'params ParamsVerifierIPA) -> Self { 40 | BatchStrategy { 41 | msm: MSMIPA::new(params), 42 | } 43 | } 44 | 45 | fn process( 46 | self, 47 | f: impl FnOnce(MSMIPA<'params, C>) -> Result, Error>, 48 | ) -> Result { 49 | let guard = f(self.msm)?; 50 | Ok(guard.use_challenges()) 51 | } 52 | 53 | fn finalize(self) -> bool { 54 | unreachable!() 55 | } 56 | } 57 | 58 | #[derive(Debug)] 59 | struct BatchItem { 60 | instances: Vec>>, 61 | proof: Vec, 62 | } 63 | 64 | /// A verifier that checks multiple proofs in a batch. **This requires the 65 | /// `batch` crate feature to be enabled.** 66 | #[derive(Debug, Default)] 67 | pub struct BatchVerifier { 68 | items: Vec>, 69 | } 70 | 71 | impl BatchVerifier 72 | where 73 | C::Scalar: FromUniformBytes<64>, 74 | { 75 | /// Constructs a new batch verifier. 76 | pub fn new() -> Self { 77 | Self { items: vec![] } 78 | } 79 | 80 | /// Adds a proof to the batch. 81 | pub fn add_proof(&mut self, instances: Vec>>, proof: Vec) { 82 | self.items.push(BatchItem { instances, proof }) 83 | } 84 | 85 | /// Finalizes the batch and checks its validity. 86 | /// 87 | /// Returns `false` if *some* proof was invalid. If the caller needs to identify 88 | /// specific failing proofs, it must re-process the proofs separately. 89 | /// 90 | /// This uses [`OsRng`] internally instead of taking an `R: RngCore` argument, because 91 | /// the internal parallelization requires access to a RNG that is guaranteed to not 92 | /// clone its internal state when shared between threads. 93 | pub fn finalize(self, params: &ParamsVerifierIPA, vk: &VerifyingKey) -> bool { 94 | fn accumulate_msm<'params, C: CurveAffine>( 95 | mut acc: MSMIPA<'params, C>, 96 | msm: MSMIPA<'params, C>, 97 | ) -> MSMIPA<'params, C> { 98 | // Scale the MSM by a random factor to ensure that if the existing MSM has 99 | // `is_zero() == false` then this argument won't be able to interfere with it 100 | // to make it true, with high probability. 101 | acc.scale(C::Scalar::random(OsRng)); 102 | 103 | acc.add_msm(&msm); 104 | acc 105 | } 106 | 107 | let final_msm = self 108 | .items 109 | .into_par_iter() 110 | .enumerate() 111 | .map(|(i, item)| { 112 | let instances: Vec> = item 113 | .instances 114 | .iter() 115 | .map(|i| i.iter().map(|c| &c[..]).collect()) 116 | .collect(); 117 | let instances: Vec<_> = instances.iter().map(|i| &i[..]).collect(); 118 | 119 | let strategy = BatchStrategy::new(params); 120 | let mut transcript = Blake2bRead::init(&item.proof[..]); 121 | verify_proof(params, vk, strategy, &instances, &mut transcript).map_err(|e| { 122 | tracing::debug!("Batch item {} failed verification: {}", i, e); 123 | e 124 | }) 125 | }) 126 | .try_fold_and_reduce( 127 | || params.empty_msm(), 128 | |acc, res| res.map(|proof_msm| accumulate_msm(acc, proof_msm)), 129 | ); 130 | 131 | match final_msm { 132 | Ok(msm) => msm.check(), 133 | Err(_) => false, 134 | } 135 | } 136 | } 137 | -------------------------------------------------------------------------------- /halo2_proofs/src/poly/ipa/commitment/verifier.rs: -------------------------------------------------------------------------------- 1 | use group::ff::{BatchInvert, Field}; 2 | 3 | use super::ParamsIPA; 4 | use crate::{arithmetic::CurveAffine, poly::ipa::strategy::GuardIPA}; 5 | use crate::{ 6 | poly::{commitment::MSM, ipa::msm::MSMIPA, Error}, 7 | transcript::{EncodedChallenge, TranscriptRead}, 8 | }; 9 | 10 | /// Checks to see if the proof represented within `transcript` is valid, and a 11 | /// point `x` that the polynomial commitment `P` opens purportedly to the value 12 | /// `v`. The provided `msm` should evaluate to the commitment `P` being opened. 13 | pub fn verify_proof<'params, C: CurveAffine, E: EncodedChallenge, T: TranscriptRead>( 14 | params: &'params ParamsIPA, 15 | mut msm: MSMIPA<'params, C>, 16 | transcript: &mut T, 17 | x: C::Scalar, 18 | v: C::Scalar, 19 | ) -> Result, Error> { 20 | let k = params.k as usize; 21 | 22 | // P' = P - [v] G_0 + [ξ] S 23 | msm.add_constant_term(-v); // add [-v] G_0 24 | let s_poly_commitment = transcript.read_point().map_err(|_| Error::OpeningError)?; 25 | let xi = *transcript.squeeze_challenge_scalar::<()>(); 26 | msm.append_term(xi, s_poly_commitment.into()); 27 | 28 | let z = *transcript.squeeze_challenge_scalar::<()>(); 29 | 30 | let mut rounds = vec![]; 31 | for _ in 0..k { 32 | // Read L and R from the proof and write them to the transcript 33 | let l = transcript.read_point().map_err(|_| Error::OpeningError)?; 34 | let r = transcript.read_point().map_err(|_| Error::OpeningError)?; 35 | 36 | let u_j_packed = transcript.squeeze_challenge(); 37 | let u_j = *u_j_packed.as_challenge_scalar::<()>(); 38 | 39 | rounds.push((l, r, u_j, /* to be inverted */ u_j, u_j_packed)); 40 | } 41 | 42 | rounds 43 | .iter_mut() 44 | .map(|&mut (_, _, _, ref mut u_j, _)| u_j) 45 | .batch_invert(); 46 | 47 | // This is the left-hand side of the verifier equation. 48 | // P' + \sum([u_j^{-1}] L_j) + \sum([u_j] R_j) 49 | let mut u = Vec::with_capacity(k); 50 | let mut u_packed: Vec = Vec::with_capacity(k); 51 | for (l, r, u_j, u_j_inv, u_j_packed) in rounds { 52 | msm.append_term(u_j_inv, l.into()); 53 | msm.append_term(u_j, r.into()); 54 | 55 | u.push(u_j); 56 | u_packed.push(u_j_packed.get_scalar()); 57 | } 58 | 59 | // Our goal is to check that the left hand side of the verifier 60 | // equation 61 | // P' + \sum([u_j^{-1}] L_j) + \sum([u_j] R_j) 62 | // equals (given b = \mathbf{b}_0, and the prover's values c, f), 63 | // the right-hand side 64 | // = [c] (G'_0 + [b * z] U) + [f] W 65 | // Subtracting the right-hand side from both sides we get 66 | // P' + \sum([u_j^{-1}] L_j) + \sum([u_j] R_j) 67 | // + [-c] G'_0 + [-cbz] U + [-f] W 68 | // = 0 69 | // 70 | // Note that the guard returned from this function does not include 71 | // the [-c]G'_0 term. 72 | 73 | let c = transcript.read_scalar().map_err(|_| Error::SamplingError)?; 74 | let neg_c = -c; 75 | let f = transcript.read_scalar().map_err(|_| Error::SamplingError)?; 76 | let b = compute_b(x, &u); 77 | 78 | msm.add_to_u_scalar(neg_c * &b * &z); 79 | msm.add_to_w_scalar(-f); 80 | 81 | let guard = GuardIPA { 82 | msm, 83 | neg_c, 84 | u, 85 | u_packed, 86 | }; 87 | 88 | Ok(guard) 89 | } 90 | 91 | /// Computes $\prod\limits_{i=0}^{k-1} (1 + u_{k - 1 - i} x^{2^i})$. 92 | fn compute_b(x: F, u: &[F]) -> F { 93 | let mut tmp = F::ONE; 94 | let mut cur = x; 95 | for u_j in u.iter().rev() { 96 | tmp *= F::ONE + &(*u_j * &cur); 97 | cur *= cur; 98 | } 99 | tmp 100 | } 101 | -------------------------------------------------------------------------------- /halo2_proofs/src/poly/ipa/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod commitment; 2 | /// Multiscalar multiplication engines 3 | pub mod msm; 4 | /// IPA multi-open scheme 5 | pub mod multiopen; 6 | /// Strategies used with KZG scheme 7 | pub mod strategy; 8 | -------------------------------------------------------------------------------- /halo2_proofs/src/poly/ipa/multiopen/prover.rs: -------------------------------------------------------------------------------- 1 | use super::{construct_intermediate_sets, ChallengeX1, ChallengeX2, ChallengeX3, ChallengeX4}; 2 | use crate::arithmetic::{eval_polynomial, kate_division, CurveAffine}; 3 | use crate::poly::commitment::ParamsProver; 4 | use crate::poly::commitment::{Blind, Prover}; 5 | use crate::poly::ipa::commitment::{self, IPACommitmentScheme, ParamsIPA}; 6 | use crate::poly::query::ProverQuery; 7 | use crate::poly::{Coeff, Polynomial}; 8 | use crate::transcript::{EncodedChallenge, TranscriptWrite}; 9 | 10 | use ff::Field; 11 | use group::Curve; 12 | use rand_core::RngCore; 13 | use std::io; 14 | use std::marker::PhantomData; 15 | 16 | /// IPA multi-open prover 17 | #[derive(Debug)] 18 | pub struct ProverIPA<'params, C: CurveAffine> { 19 | pub(crate) params: &'params ParamsIPA, 20 | } 21 | 22 | impl<'params, C: CurveAffine> Prover<'params, IPACommitmentScheme> for ProverIPA<'params, C> { 23 | const QUERY_INSTANCE: bool = true; 24 | 25 | fn new(params: &'params ParamsIPA) -> Self { 26 | Self { params } 27 | } 28 | 29 | /// Create a multi-opening proof 30 | fn create_proof<'com, Z: EncodedChallenge, T: TranscriptWrite, R, I>( 31 | &self, 32 | mut rng: R, 33 | transcript: &mut T, 34 | queries: I, 35 | ) -> io::Result<()> 36 | where 37 | I: IntoIterator> + Clone, 38 | R: RngCore, 39 | { 40 | let x_1: ChallengeX1<_> = transcript.squeeze_challenge_scalar(); 41 | let x_2: ChallengeX2<_> = transcript.squeeze_challenge_scalar(); 42 | 43 | let (poly_map, point_sets) = construct_intermediate_sets(queries); 44 | 45 | // Collapse openings at same point sets together into single openings using 46 | // x_1 challenge. 47 | let mut q_polys: Vec>> = vec![None; point_sets.len()]; 48 | let mut q_blinds = vec![Blind(C::Scalar::ZERO); point_sets.len()]; 49 | 50 | { 51 | let mut accumulate = |set_idx: usize, 52 | new_poly: &Polynomial, 53 | blind: Blind| { 54 | if let Some(poly) = &q_polys[set_idx] { 55 | q_polys[set_idx] = Some(poly.clone() * *x_1 + new_poly); 56 | } else { 57 | q_polys[set_idx] = Some(new_poly.clone()); 58 | } 59 | q_blinds[set_idx] *= *x_1; 60 | q_blinds[set_idx] += blind; 61 | }; 62 | 63 | for commitment_data in poly_map.into_iter() { 64 | accumulate( 65 | commitment_data.set_index, // set_idx, 66 | commitment_data.commitment.poly, // poly, 67 | commitment_data.commitment.blind, // blind, 68 | ); 69 | } 70 | } 71 | 72 | let q_prime_poly = point_sets 73 | .iter() 74 | .zip(q_polys.iter()) 75 | .fold(None, |q_prime_poly, (points, poly)| { 76 | let mut poly = points 77 | .iter() 78 | .fold(poly.clone().unwrap().values, |poly, point| { 79 | kate_division(&poly, *point) 80 | }); 81 | poly.resize(self.params.n as usize, C::Scalar::ZERO); 82 | let poly = Polynomial { 83 | values: poly, 84 | _marker: PhantomData, 85 | }; 86 | 87 | if q_prime_poly.is_none() { 88 | Some(poly) 89 | } else { 90 | q_prime_poly.map(|q_prime_poly| q_prime_poly * *x_2 + &poly) 91 | } 92 | }) 93 | .unwrap(); 94 | 95 | let q_prime_blind = Blind(C::Scalar::random(&mut rng)); 96 | let q_prime_commitment = self.params.commit(&q_prime_poly, q_prime_blind).to_affine(); 97 | 98 | transcript.write_point(q_prime_commitment)?; 99 | 100 | let x_3: ChallengeX3<_> = transcript.squeeze_challenge_scalar(); 101 | 102 | // Prover sends u_i for all i, which correspond to the evaluation 103 | // of each Q polynomial commitment at x_3. 104 | for q_i_poly in &q_polys { 105 | transcript.write_scalar(eval_polynomial(q_i_poly.as_ref().unwrap(), *x_3))?; 106 | } 107 | 108 | let x_4: ChallengeX4<_> = transcript.squeeze_challenge_scalar(); 109 | 110 | let (p_poly, p_poly_blind) = q_polys.into_iter().zip(q_blinds).fold( 111 | (q_prime_poly, q_prime_blind), 112 | |(q_prime_poly, q_prime_blind), (poly, blind)| { 113 | ( 114 | q_prime_poly * *x_4 + &poly.unwrap(), 115 | Blind((q_prime_blind.0 * &(*x_4)) + &blind.0), 116 | ) 117 | }, 118 | ); 119 | 120 | commitment::create_proof(self.params, rng, transcript, &p_poly, p_poly_blind, *x_3) 121 | } 122 | } 123 | -------------------------------------------------------------------------------- /halo2_proofs/src/poly/kzg/mod.rs: -------------------------------------------------------------------------------- 1 | /// KZG commitment scheme 2 | pub mod commitment; 3 | /// Multiscalar multiplication engines 4 | pub mod msm; 5 | /// KZG multi-open scheme 6 | pub mod multiopen; 7 | /// Strategies used with KZG scheme 8 | pub mod strategy; 9 | -------------------------------------------------------------------------------- /halo2_proofs/src/poly/kzg/multiopen.rs: -------------------------------------------------------------------------------- 1 | mod gwc; 2 | mod shplonk; 3 | 4 | pub use gwc::*; 5 | pub use shplonk::*; 6 | -------------------------------------------------------------------------------- /halo2_proofs/src/poly/kzg/multiopen/gwc.rs: -------------------------------------------------------------------------------- 1 | mod prover; 2 | mod verifier; 3 | 4 | pub use prover::ProverGWC; 5 | pub use verifier::VerifierGWC; 6 | 7 | use crate::{poly::query::Query, transcript::ChallengeScalar}; 8 | use ff::Field; 9 | use std::marker::PhantomData; 10 | 11 | #[derive(Clone, Copy, Debug)] 12 | struct U {} 13 | type ChallengeU = ChallengeScalar; 14 | 15 | #[derive(Clone, Copy, Debug)] 16 | struct V {} 17 | type ChallengeV = ChallengeScalar; 18 | 19 | struct CommitmentData> { 20 | queries: Vec, 21 | point: F, 22 | _marker: PhantomData, 23 | } 24 | 25 | fn construct_intermediate_sets>(queries: I) -> Vec> 26 | where 27 | I: IntoIterator + Clone, 28 | { 29 | let mut point_query_map: Vec<(F, Vec)> = Vec::new(); 30 | for query in queries { 31 | if let Some(pos) = point_query_map 32 | .iter() 33 | .position(|(point, _)| *point == query.get_point()) 34 | { 35 | let (_, queries) = &mut point_query_map[pos]; 36 | queries.push(query); 37 | } else { 38 | point_query_map.push((query.get_point(), vec![query])); 39 | } 40 | } 41 | 42 | point_query_map 43 | .into_iter() 44 | .map(|(point, queries)| CommitmentData { 45 | queries, 46 | point, 47 | _marker: PhantomData, 48 | }) 49 | .collect() 50 | } 51 | -------------------------------------------------------------------------------- /halo2_proofs/src/poly/kzg/multiopen/gwc/prover.rs: -------------------------------------------------------------------------------- 1 | use super::{construct_intermediate_sets, ChallengeV, Query}; 2 | use crate::arithmetic::{kate_division, powers}; 3 | use crate::helpers::SerdeCurveAffine; 4 | use crate::poly::commitment::ParamsProver; 5 | use crate::poly::commitment::Prover; 6 | use crate::poly::kzg::commitment::{KZGCommitmentScheme, ParamsKZG}; 7 | use crate::poly::query::ProverQuery; 8 | use crate::poly::{commitment::Blind, Polynomial}; 9 | use crate::transcript::{EncodedChallenge, TranscriptWrite}; 10 | 11 | use ff::PrimeField; 12 | use group::Curve; 13 | use halo2curves::pairing::Engine; 14 | use rand_core::RngCore; 15 | use std::fmt::Debug; 16 | use std::io; 17 | use std::marker::PhantomData; 18 | 19 | /// Concrete KZG prover with GWC variant 20 | #[derive(Debug)] 21 | pub struct ProverGWC<'params, E: Engine> { 22 | params: &'params ParamsKZG, 23 | } 24 | 25 | /// Create a multi-opening proof 26 | impl<'params, E: Engine + Debug> Prover<'params, KZGCommitmentScheme> for ProverGWC<'params, E> 27 | where 28 | E::Scalar: PrimeField, 29 | E::G1Affine: SerdeCurveAffine, 30 | E::G2Affine: SerdeCurveAffine, 31 | { 32 | const QUERY_INSTANCE: bool = true; 33 | 34 | fn new(params: &'params ParamsKZG) -> Self { 35 | Self { params } 36 | } 37 | 38 | /// Create a multi-opening proof 39 | fn create_proof< 40 | 'com, 41 | Ch: EncodedChallenge, 42 | T: TranscriptWrite, 43 | R, 44 | I, 45 | >( 46 | &self, 47 | _: R, 48 | transcript: &mut T, 49 | queries: I, 50 | ) -> io::Result<()> 51 | where 52 | I: IntoIterator> + Clone, 53 | R: RngCore, 54 | { 55 | let v: ChallengeV<_> = transcript.squeeze_challenge_scalar(); 56 | let commitment_data = construct_intermediate_sets(queries); 57 | 58 | for commitment_at_a_point in commitment_data.iter() { 59 | let z = commitment_at_a_point.point; 60 | let (poly_batch, eval_batch) = commitment_at_a_point 61 | .queries 62 | .iter() 63 | .zip(powers(*v)) 64 | .map(|(query, power_of_v)| { 65 | assert_eq!(query.get_point(), z); 66 | 67 | let poly = query.get_commitment().poly; 68 | let eval = query.get_eval(); 69 | 70 | (poly.clone() * power_of_v, eval * power_of_v) 71 | }) 72 | .reduce(|(poly_acc, eval_acc), (poly, eval)| (poly_acc + &poly, eval_acc + eval)) 73 | .unwrap(); 74 | 75 | let poly_batch = &poly_batch - eval_batch; 76 | let witness_poly = Polynomial { 77 | values: kate_division(&poly_batch.values, z), 78 | _marker: PhantomData, 79 | }; 80 | let w = self 81 | .params 82 | .commit(&witness_poly, Blind::default()) 83 | .to_affine(); 84 | 85 | transcript.write_point(w)?; 86 | } 87 | Ok(()) 88 | } 89 | } 90 | -------------------------------------------------------------------------------- /halo2_proofs/src/poly/kzg/multiopen/gwc/verifier.rs: -------------------------------------------------------------------------------- 1 | use std::fmt::Debug; 2 | 3 | use super::{construct_intermediate_sets, ChallengeU, ChallengeV}; 4 | use crate::arithmetic::powers; 5 | use crate::helpers::SerdeCurveAffine; 6 | use crate::poly::commitment::Verifier; 7 | use crate::poly::commitment::MSM; 8 | use crate::poly::kzg::commitment::{KZGCommitmentScheme, ParamsKZG}; 9 | use crate::poly::kzg::msm::{DualMSM, MSMKZG}; 10 | use crate::poly::kzg::strategy::GuardKZG; 11 | use crate::poly::query::Query; 12 | use crate::poly::query::{CommitmentReference, VerifierQuery}; 13 | use crate::poly::Error; 14 | use crate::transcript::{EncodedChallenge, TranscriptRead}; 15 | 16 | use ff::{Field, PrimeField}; 17 | use halo2curves::pairing::{Engine, MultiMillerLoop}; 18 | 19 | #[derive(Debug)] 20 | /// Concrete KZG verifier with GWC variant 21 | pub struct VerifierGWC<'params, E: Engine> { 22 | params: &'params ParamsKZG, 23 | } 24 | 25 | impl<'params, E> Verifier<'params, KZGCommitmentScheme> for VerifierGWC<'params, E> 26 | where 27 | E: MultiMillerLoop + Debug, 28 | E::Scalar: PrimeField, 29 | E::G1Affine: SerdeCurveAffine, 30 | E::G2Affine: SerdeCurveAffine, 31 | { 32 | type Guard = GuardKZG<'params, E>; 33 | type MSMAccumulator = DualMSM<'params, E>; 34 | 35 | const QUERY_INSTANCE: bool = true; 36 | 37 | fn new(params: &'params ParamsKZG) -> Self { 38 | Self { params } 39 | } 40 | 41 | fn verify_proof< 42 | 'com, 43 | Ch: EncodedChallenge, 44 | T: TranscriptRead, 45 | I, 46 | >( 47 | &self, 48 | transcript: &mut T, 49 | queries: I, 50 | mut msm_accumulator: DualMSM<'params, E>, 51 | ) -> Result 52 | where 53 | I: IntoIterator>> + Clone, 54 | { 55 | let v: ChallengeV<_> = transcript.squeeze_challenge_scalar(); 56 | 57 | let commitment_data = construct_intermediate_sets(queries); 58 | 59 | let w: Vec = (0..commitment_data.len()) 60 | .map(|_| transcript.read_point().map_err(|_| Error::SamplingError)) 61 | .collect::, Error>>()?; 62 | 63 | let u: ChallengeU<_> = transcript.squeeze_challenge_scalar(); 64 | 65 | let mut commitment_multi = MSMKZG::::new(); 66 | let mut eval_multi = E::Scalar::ZERO; 67 | 68 | let mut witness = MSMKZG::::new(); 69 | let mut witness_with_aux = MSMKZG::::new(); 70 | 71 | for ((commitment_at_a_point, wi), power_of_u) in 72 | commitment_data.iter().zip(w).zip(powers(*u)) 73 | { 74 | assert!(!commitment_at_a_point.queries.is_empty()); 75 | let z = commitment_at_a_point.point; 76 | 77 | let (mut commitment_batch, eval_batch) = commitment_at_a_point 78 | .queries 79 | .iter() 80 | .zip(powers(*v)) 81 | .map(|(query, power_of_v)| { 82 | assert_eq!(query.get_point(), z); 83 | 84 | let commitment = match query.get_commitment() { 85 | CommitmentReference::Commitment(c) => { 86 | let mut msm = MSMKZG::::new(); 87 | msm.append_term(power_of_v, (*c).into()); 88 | msm 89 | } 90 | CommitmentReference::MSM(msm) => { 91 | let mut msm = msm.clone(); 92 | msm.scale(power_of_v); 93 | msm 94 | } 95 | }; 96 | let eval = power_of_v * query.get_eval(); 97 | 98 | (commitment, eval) 99 | }) 100 | .reduce(|(mut commitment_acc, eval_acc), (commitment, eval)| { 101 | commitment_acc.add_msm(&commitment); 102 | (commitment_acc, eval_acc + eval) 103 | }) 104 | .unwrap(); 105 | 106 | commitment_batch.scale(power_of_u); 107 | commitment_multi.add_msm(&commitment_batch); 108 | eval_multi += power_of_u * eval_batch; 109 | 110 | witness_with_aux.append_term(power_of_u * z, wi.into()); 111 | witness.append_term(power_of_u, wi.into()); 112 | } 113 | 114 | msm_accumulator.left.add_msm(&witness); 115 | 116 | msm_accumulator.right.add_msm(&witness_with_aux); 117 | msm_accumulator.right.add_msm(&commitment_multi); 118 | let g0: E::G1 = self.params.g[0].into(); 119 | msm_accumulator.right.append_term(eval_multi, -g0); 120 | 121 | Ok(Self::Guard::new(msm_accumulator)) 122 | } 123 | } 124 | -------------------------------------------------------------------------------- /halo2_proofs/src/poly/kzg/strategy.rs: -------------------------------------------------------------------------------- 1 | use super::{ 2 | commitment::{KZGCommitmentScheme, ParamsKZG}, 3 | msm::DualMSM, 4 | }; 5 | use crate::{ 6 | helpers::SerdeCurveAffine, 7 | plonk::Error, 8 | poly::{ 9 | commitment::Verifier, 10 | strategy::{Guard, VerificationStrategy}, 11 | }, 12 | }; 13 | use ff::{Field, PrimeField}; 14 | use halo2curves::pairing::{Engine, MultiMillerLoop}; 15 | use rand_core::OsRng; 16 | use std::fmt::Debug; 17 | 18 | /// Wrapper for linear verification accumulator 19 | #[derive(Debug, Clone)] 20 | pub struct GuardKZG<'params, E: MultiMillerLoop + Debug> { 21 | pub(crate) msm_accumulator: DualMSM<'params, E>, 22 | } 23 | 24 | /// Define accumulator type as `DualMSM` 25 | impl<'params, E> Guard> for GuardKZG<'params, E> 26 | where 27 | E::Scalar: PrimeField, 28 | E: MultiMillerLoop + Debug, 29 | E::G1Affine: SerdeCurveAffine, 30 | E::G2Affine: SerdeCurveAffine, 31 | { 32 | type MSMAccumulator = DualMSM<'params, E>; 33 | } 34 | 35 | /// KZG specific operations 36 | impl<'params, E: MultiMillerLoop + Debug> GuardKZG<'params, E> { 37 | pub(crate) fn new(msm_accumulator: DualMSM<'params, E>) -> Self { 38 | Self { msm_accumulator } 39 | } 40 | } 41 | 42 | /// A verifier that checks multiple proofs in a batch 43 | #[derive(Clone, Debug)] 44 | pub struct AccumulatorStrategy<'params, E: Engine> { 45 | pub(crate) msm_accumulator: DualMSM<'params, E>, 46 | } 47 | 48 | impl<'params, E: MultiMillerLoop + Debug> AccumulatorStrategy<'params, E> { 49 | /// Constructs an empty batch verifier 50 | pub fn new(params: &'params ParamsKZG) -> Self { 51 | AccumulatorStrategy { 52 | msm_accumulator: DualMSM::new(params), 53 | } 54 | } 55 | 56 | /// Constructs and initialized new batch verifier 57 | pub fn with(msm_accumulator: DualMSM<'params, E>) -> Self { 58 | AccumulatorStrategy { msm_accumulator } 59 | } 60 | } 61 | 62 | /// A verifier that checks a single proof 63 | #[derive(Clone, Debug)] 64 | pub struct SingleStrategy<'params, E: Engine> { 65 | pub(crate) msm: DualMSM<'params, E>, 66 | } 67 | 68 | impl<'params, E: MultiMillerLoop + Debug> SingleStrategy<'params, E> { 69 | /// Constructs an empty batch verifier 70 | pub fn new(params: &'params ParamsKZG) -> Self { 71 | SingleStrategy { 72 | msm: DualMSM::new(params), 73 | } 74 | } 75 | } 76 | 77 | impl< 78 | 'params, 79 | E: MultiMillerLoop + Debug, 80 | V: Verifier< 81 | 'params, 82 | KZGCommitmentScheme, 83 | MSMAccumulator = DualMSM<'params, E>, 84 | Guard = GuardKZG<'params, E>, 85 | >, 86 | > VerificationStrategy<'params, KZGCommitmentScheme, V> for AccumulatorStrategy<'params, E> 87 | where 88 | E::Scalar: PrimeField, 89 | E::G1Affine: SerdeCurveAffine, 90 | E::G2Affine: SerdeCurveAffine, 91 | { 92 | type Output = Self; 93 | 94 | fn new(params: &'params ParamsKZG) -> Self { 95 | AccumulatorStrategy::new(params) 96 | } 97 | 98 | fn process( 99 | mut self, 100 | f: impl FnOnce(V::MSMAccumulator) -> Result, 101 | ) -> Result { 102 | self.msm_accumulator.scale(E::Scalar::random(OsRng)); 103 | 104 | // Guard is updated with new msm contributions 105 | let guard = f(self.msm_accumulator)?; 106 | Ok(Self { 107 | msm_accumulator: guard.msm_accumulator, 108 | }) 109 | } 110 | 111 | fn finalize(self) -> bool { 112 | self.msm_accumulator.check() 113 | } 114 | } 115 | 116 | impl< 117 | 'params, 118 | E: MultiMillerLoop + Debug, 119 | V: Verifier< 120 | 'params, 121 | KZGCommitmentScheme, 122 | MSMAccumulator = DualMSM<'params, E>, 123 | Guard = GuardKZG<'params, E>, 124 | >, 125 | > VerificationStrategy<'params, KZGCommitmentScheme, V> for SingleStrategy<'params, E> 126 | where 127 | E::Scalar: PrimeField, 128 | E::G1Affine: SerdeCurveAffine, 129 | E::G2Affine: SerdeCurveAffine, 130 | { 131 | type Output = (); 132 | 133 | fn new(params: &'params ParamsKZG) -> Self { 134 | Self::new(params) 135 | } 136 | 137 | fn process( 138 | self, 139 | f: impl FnOnce(V::MSMAccumulator) -> Result, 140 | ) -> Result { 141 | // Guard is updated with new msm contributions 142 | let guard = f(self.msm)?; 143 | let msm = guard.msm_accumulator; 144 | if msm.check() { 145 | Ok(()) 146 | } else { 147 | Err(Error::ConstraintSystemFailure) 148 | } 149 | } 150 | 151 | fn finalize(self) -> bool { 152 | unreachable!(); 153 | } 154 | } 155 | -------------------------------------------------------------------------------- /halo2_proofs/src/poly/query.rs: -------------------------------------------------------------------------------- 1 | use std::fmt::Debug; 2 | 3 | use super::commitment::{Blind, MSM}; 4 | use crate::{ 5 | arithmetic::eval_polynomial, 6 | poly::{Coeff, Polynomial}, 7 | }; 8 | use halo2curves::CurveAffine; 9 | 10 | pub trait Query: Sized + Clone + Send + Sync { 11 | type Commitment: PartialEq + Copy + Send + Sync; 12 | type Eval: Clone + Default + Debug; 13 | 14 | fn get_point(&self) -> F; 15 | fn get_eval(&self) -> Self::Eval; 16 | fn get_commitment(&self) -> Self::Commitment; 17 | } 18 | 19 | /// A polynomial query at a point 20 | #[derive(Debug, Clone)] 21 | pub struct ProverQuery<'com, C: CurveAffine> { 22 | /// point at which polynomial is queried 23 | pub(crate) point: C::Scalar, 24 | /// coefficients of polynomial 25 | pub(crate) poly: &'com Polynomial, 26 | /// blinding factor of polynomial 27 | pub(crate) blind: Blind, 28 | } 29 | 30 | #[doc(hidden)] 31 | #[derive(Copy, Clone)] 32 | pub struct PolynomialPointer<'com, C: CurveAffine> { 33 | pub(crate) poly: &'com Polynomial, 34 | pub(crate) blind: Blind, 35 | } 36 | 37 | impl<'com, C: CurveAffine> PartialEq for PolynomialPointer<'com, C> { 38 | fn eq(&self, other: &Self) -> bool { 39 | std::ptr::eq(self.poly, other.poly) 40 | } 41 | } 42 | 43 | impl<'com, C: CurveAffine> Query for ProverQuery<'com, C> { 44 | type Commitment = PolynomialPointer<'com, C>; 45 | type Eval = C::Scalar; 46 | 47 | fn get_point(&self) -> C::Scalar { 48 | self.point 49 | } 50 | fn get_eval(&self) -> Self::Eval { 51 | eval_polynomial(&self.poly[..], self.get_point()) 52 | } 53 | fn get_commitment(&self) -> Self::Commitment { 54 | PolynomialPointer { 55 | poly: self.poly, 56 | blind: self.blind, 57 | } 58 | } 59 | } 60 | 61 | impl<'com, C: CurveAffine, M: MSM> VerifierQuery<'com, C, M> { 62 | /// Create a new verifier query based on a commitment 63 | pub fn new_commitment(commitment: &'com C, point: C::Scalar, eval: C::Scalar) -> Self { 64 | VerifierQuery { 65 | point, 66 | eval, 67 | commitment: CommitmentReference::Commitment(commitment), 68 | } 69 | } 70 | 71 | /// Create a new verifier query based on a linear combination of commitments 72 | pub fn new_msm(msm: &'com M, point: C::Scalar, eval: C::Scalar) -> VerifierQuery<'com, C, M> { 73 | VerifierQuery { 74 | point, 75 | eval, 76 | commitment: CommitmentReference::MSM(msm), 77 | } 78 | } 79 | } 80 | 81 | /// A polynomial query at a point 82 | #[derive(Debug)] 83 | pub struct VerifierQuery<'com, C: CurveAffine, M: MSM> { 84 | /// point at which polynomial is queried 85 | pub(crate) point: C::Scalar, 86 | /// commitment to polynomial 87 | pub(crate) commitment: CommitmentReference<'com, C, M>, 88 | /// evaluation of polynomial at query point 89 | pub(crate) eval: C::Scalar, 90 | } 91 | 92 | impl<'com, C: CurveAffine, M: MSM> Clone for VerifierQuery<'com, C, M> { 93 | fn clone(&self) -> Self { 94 | Self { 95 | point: self.point, 96 | commitment: self.commitment, 97 | eval: self.eval, 98 | } 99 | } 100 | } 101 | 102 | #[allow(clippy::upper_case_acronyms)] 103 | #[derive(Clone, Debug)] 104 | pub enum CommitmentReference<'r, C: CurveAffine, M: MSM> { 105 | Commitment(&'r C), 106 | MSM(&'r M), 107 | } 108 | 109 | impl<'r, C: CurveAffine, M: MSM> Copy for CommitmentReference<'r, C, M> {} 110 | 111 | impl<'r, C: CurveAffine, M: MSM> PartialEq for CommitmentReference<'r, C, M> { 112 | #![allow(clippy::vtable_address_comparisons)] 113 | fn eq(&self, other: &Self) -> bool { 114 | match (self, other) { 115 | (&CommitmentReference::Commitment(a), &CommitmentReference::Commitment(b)) => { 116 | std::ptr::eq(a, b) 117 | } 118 | (&CommitmentReference::MSM(a), &CommitmentReference::MSM(b)) => std::ptr::eq(a, b), 119 | _ => false, 120 | } 121 | } 122 | } 123 | 124 | impl<'com, C: CurveAffine, M: MSM> Query for VerifierQuery<'com, C, M> { 125 | type Eval = C::Scalar; 126 | type Commitment = CommitmentReference<'com, C, M>; 127 | 128 | fn get_point(&self) -> C::Scalar { 129 | self.point 130 | } 131 | fn get_eval(&self) -> C::Scalar { 132 | self.eval 133 | } 134 | fn get_commitment(&self) -> Self::Commitment { 135 | self.commitment 136 | } 137 | } 138 | -------------------------------------------------------------------------------- /halo2_proofs/src/poly/strategy.rs: -------------------------------------------------------------------------------- 1 | use super::commitment::{CommitmentScheme, Verifier}; 2 | use crate::plonk::Error; 3 | 4 | /// Guards is unfinished verification result. Implement this to construct various 5 | /// verification strategies such as aggregation and recursion. 6 | pub trait Guard { 7 | /// Multi scalar engine which is not evaluated yet. 8 | type MSMAccumulator; 9 | } 10 | 11 | /// Trait representing a strategy for verifying Halo 2 proofs. 12 | pub trait VerificationStrategy<'params, Scheme: CommitmentScheme, V: Verifier<'params, Scheme>> { 13 | /// The output type of this verification strategy after processing a proof. 14 | type Output; 15 | 16 | /// Creates new verification strategy instance 17 | fn new(params: &'params Scheme::ParamsVerifier) -> Self; 18 | 19 | /// Obtains an MSM from the verifier strategy and yields back the strategy's 20 | /// output. 21 | fn process( 22 | self, 23 | f: impl FnOnce(V::MSMAccumulator) -> Result, 24 | ) -> Result; 25 | 26 | /// Finalizes the batch and checks its validity. 27 | /// 28 | /// Returns `false` if *some* proof was invalid. If the caller needs to identify 29 | /// specific failing proofs, it must re-process the proofs separately. 30 | fn finalize(self) -> bool; 31 | } 32 | -------------------------------------------------------------------------------- /rust-toolchain: -------------------------------------------------------------------------------- 1 | nightly-2023-12-03 2 | --------------------------------------------------------------------------------