├── .gitignore ├── COPYING ├── Cargo.lock ├── Cargo.toml ├── Contributor_Agreement ├── LICENSE-BOSL ├── README.md ├── book ├── .gitignore ├── Makefile ├── book.toml ├── edithtml.sh ├── macros.txt └── src │ ├── README.md │ ├── SUMMARY.md │ ├── background.md │ ├── background │ ├── curves.md │ ├── fields.md │ ├── groups.md │ ├── pc-ipa.md │ ├── plonkish.md │ ├── polynomials.md │ └── recursion.md │ ├── concepts.md │ ├── concepts │ ├── arithmetization.md │ ├── chips.md │ ├── gadgets.md │ └── proofs.md │ ├── design.md │ ├── design │ ├── gadgets.md │ ├── gadgets │ │ ├── decomposition.md │ │ ├── ecc.md │ │ ├── ecc │ │ │ ├── addition.md │ │ │ ├── fixed-base-scalar-mul.md │ │ │ └── var-base-scalar-mul.md │ │ ├── sha256.md │ │ ├── sha256 │ │ │ ├── bit_reassignment.png │ │ │ ├── compression.png │ │ │ ├── low_sigma_0.png │ │ │ ├── low_sigma_1.png │ │ │ ├── table16.md │ │ │ ├── upp_sigma_0.png │ │ │ └── upp_sigma_1.png │ │ ├── sinsemilla.md │ │ └── sinsemilla │ │ │ └── merkle-crh.md │ ├── implementation.md │ ├── implementation │ │ ├── fields.md │ │ └── proofs.md │ ├── protocol.md │ ├── proving-system.md │ └── proving-system │ │ ├── circuit-commitments.md │ │ ├── comparison.md │ │ ├── inner-product.md │ │ ├── lookup.md │ │ ├── multipoint-opening.md │ │ ├── permutation-diagram.png │ │ ├── permutation-diagram.svg │ │ ├── permutation.md │ │ └── vanishing.md │ ├── user.md │ └── user │ ├── dev-tools.md │ ├── gadgets.md │ ├── lookup-tables.md │ ├── simple-example.md │ └── tips-and-tricks.md ├── halo2 ├── CHANGELOG.md ├── Cargo.toml └── src │ └── lib.rs ├── halo2_proofs ├── CHANGELOG.md ├── Cargo.toml ├── README.md ├── benches │ ├── arithmetic.rs │ └── plonk.rs ├── build.rs ├── examples │ ├── circuit-layout.rs │ ├── cost-model.rs │ ├── lookup_api.rs │ ├── lookup_api_set.rs │ ├── range-check.rs │ ├── shuffle.rs │ ├── shuffle_api.rs │ ├── shuffle_api_group.rs │ ├── simple-example-2.rs │ ├── simple-example-3.rs │ ├── simple-example.rs │ └── two-chip.rs ├── proptest-regressions │ └── plonk │ │ ├── assigned.txt │ │ └── circuit │ │ └── compress_selectors.txt ├── src │ ├── arithmetic.rs │ ├── circuit.rs │ ├── circuit │ │ ├── floor_planner.rs │ │ ├── floor_planner │ │ │ ├── flat.rs │ │ │ ├── flat │ │ │ │ └── region.rs │ │ │ ├── single_pass.rs │ │ │ ├── v1.rs │ │ │ └── v1 │ │ │ │ └── strategy.rs │ │ └── layouter.rs │ ├── dev.rs │ ├── dev │ │ ├── cost.rs │ │ ├── gates.rs │ │ ├── graph.rs │ │ ├── graph │ │ │ └── layout.rs │ │ ├── metadata.rs │ │ └── util.rs │ ├── helpers.rs │ ├── lib.rs │ ├── multicore.rs │ ├── parallel.rs │ ├── plonk.rs │ ├── plonk │ │ ├── assigned.rs │ │ ├── circuit.rs │ │ ├── circuit │ │ │ └── compress_selectors.rs │ │ ├── error.rs │ │ ├── evaluation.rs │ │ ├── evaluation_gpu.rs │ │ ├── keygen.rs │ │ ├── logup.rs │ │ ├── logup │ │ │ ├── prover.rs │ │ │ └── verifier.rs │ │ ├── lookup.rs │ │ ├── lookup │ │ │ ├── prover.rs │ │ │ └── verifier.rs │ │ ├── permutation.rs │ │ ├── permutation │ │ │ ├── keygen.rs │ │ │ ├── prover.rs │ │ │ └── verifier.rs │ │ ├── prover.rs │ │ ├── range_check.rs │ │ ├── shuffle.rs │ │ ├── shuffle │ │ │ ├── prover.rs │ │ │ └── verifier.rs │ │ ├── vanishing.rs │ │ ├── vanishing │ │ │ ├── prover.rs │ │ │ └── verifier.rs │ │ └── verifier.rs │ ├── poly.rs │ ├── poly │ │ ├── commitment.rs │ │ ├── domain.rs │ │ ├── msm.rs │ │ ├── multiopen.rs │ │ └── multiopen │ │ │ ├── gwc.rs │ │ │ ├── gwc │ │ │ ├── prover.rs │ │ │ └── verifier.rs │ │ │ ├── shplonk.rs │ │ │ └── shplonk │ │ │ ├── prover.rs │ │ │ └── verifier.rs │ └── transcript.rs └── tests │ ├── lookup_any.rs │ └── plonk_api.rs ├── katex-header.html └── rust-toolchain /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | **/*.rs.bk 3 | .vscode 4 | **/*.html 5 | .idea 6 | -------------------------------------------------------------------------------- /COPYING: -------------------------------------------------------------------------------- 1 | Copyright 2020-2021 The Electric Coin Company 2 | 3 | This package ("Original Work") is licensed under the terms of the Bootstrap Open 4 | Source License, version 1.0, or at your option, any later version ("BOSL"). See 5 | the file ./LICENSE-BOSL for the terms of the Bootstrap Open Source Licence, 6 | version 1.0. 7 | 8 | Only if this Original Work is included as part of the distribution of one of the 9 | following projects ("the Project"): 10 | 11 | - The Zcash projects published by the Electric Coin Company, 12 | - The Zebra project published by the Zcash Foundation, 13 | 14 | then License is granted to use this package under the BOSL as modified by the 15 | following clarification and special exception. This exception applies only to 16 | the Original Work when linked or combined with the Project and not to the 17 | Original Work when linked, combined, or included in or with any other software 18 | or project or on a standalone basis. 19 | 20 | Under the terms of the BOSL, linking or combining this Original Work with 21 | the Project creates a Derivative Work based upon the Original Work and the 22 | terms of the BOSL thus apply to both the Original Work and that Derivative 23 | Work. As a special exception to the BOSL, and to allow this Original Work to 24 | be linked and combined with the Project without having to apply the BOSL to 25 | the other portions of the Project, you are granted permission to link or 26 | combine this Original Work with the Project and to copy and distribute the 27 | resulting work ("Resulting Work") under the open source license applicable 28 | to the Project ("Project License"), provided that any portions of this 29 | Original Work included in the Resulting Work remain subject to the BOSL. For 30 | clarity, you may continue to treat all other portions of the Project under 31 | the Project License, provided that you comply with the BOSL with respect to 32 | the Original Work. If you modify this Original Work, your version of the 33 | Original Work must remain under the BOSL. You may also extend this exception 34 | to your version, but you are not obligated to do so. If you do not wish to 35 | do so, delete this exception statement from your version. 36 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [workspace] 2 | members = [ 3 | "halo2", 4 | "halo2_proofs", 5 | ] 6 | -------------------------------------------------------------------------------- /Contributor_Agreement: -------------------------------------------------------------------------------- 1 | ECC Contributor Agreement 2 | 3 | This Contributor Agreement ("Agreement") applies to your contribution of any 4 | software, documentation, or other materials to any project ("project") 5 | established or managed by ZeroCoin Electric Coin Company LLC (dba Electric Coin 6 | Company) ("ECC", "we", "us" or "our") and establishes the intellectual property 7 | rights you grant to ECC in your contribution. 8 | 9 | Please read this Agreement carefully. If you agree to this Agreement, please 10 | complete and sign the Agreement below. We suggest you also keep a copy of this 11 | Agreement for your records. 12 | 13 | 1. Please provide your information below. If you are making a contribution 14 | individually, the term "you" refers to the individual identified below. If you 15 | are making a contribution on behalf of a company or other entity, "you" will 16 | also mean the company or entity you identify below. 17 | 18 | Your name: 19 | 20 | Your GitHub handle: 21 | 22 | Company/entity name (if applicable): 23 | 24 | Your mailing address: 25 | 26 | Your telephone: 27 | 28 | Your preferred email: 29 | 30 | The term "contribution" means any software, documentation, materials, or other 31 | work of authorship, including any modifications or additions to any existing 32 | software, documentation, materials, or other work, that you submit to ECC in 33 | any form, including electronic, verbal, or written. 34 | 35 | 2. You grant to ECC, and recipients of any contributions distributed by ECC, 36 | the following rights and licenses: 37 | 38 | a. A perpetual, worldwide, non-exclusive, no-charge, royalty-free, fully 39 | sublicensable, irrevocable right and license to reproduce, prepare 40 | derivative works of, display, perform, sublicense, and distribute and make 41 | available any contributions and such derivative works. 42 | 43 | b. A perpetual, worldwide, non-exclusive, no-charge, royalty-free, fully 44 | sublicensable, irrevocable[ (except as stated in this section)] right and 45 | license to make, have made, use, offer to sell, sell, import, transfer and 46 | otherwise practice any contributions, in whole or in part, alone or in 47 | combination with or included in any other contributions or any other 48 | software, documentation, materials, or works. If any entity institutes 49 | patent litigation against you or any other entity (including a cross-claim 50 | or counterclaim in a lawsuit) alleging that one of your contributions, or 51 | any work to which you have contributed, constitutes direct or contributory 52 | patent infringement, then any patent licenses granted to that entity under 53 | this Agreement for that contribution or work shall terminate as of the date 54 | such litigation is filed. 55 | 56 | You also agree that you will not assert any moral rights in any contribution 57 | against us, our licensees or transferees. The rights in this Section are 58 | effective on the date a contribution is first submitted to us, even if the 59 | submission took place before the date you agree to this Agreement. Except as 60 | stated in this Section, as between you and us, you keep all right, title, and 61 | interest in your contribution. 62 | 63 | 3. You represent and warrant to ECC that: (a) each of your contributions is 64 | your original creation; (b) you are legally entitled to make your contributions 65 | and grant the above rights and licenses to ECC; and (c) you have (or will 66 | obtain) any permissions, whether from your employer and any other entity or 67 | individual, necessary to make your contributions and grant the above rights and 68 | licenses to ECC. You will provide ECC with complete details of any third-party 69 | license or other restrictions (including intellectual property restrictions) of 70 | which you are aware and which are associated with any part of your 71 | contributions. You agree to notify ECC of any facts or circumstances of which 72 | you become aware that would make these representations inaccurate in any 73 | respect. 74 | 75 | 4. This Agreement is governed by the laws of the State of Colorado (USA) and 76 | applicable U.S. Federal law. Any choice of law rules will not apply. All issues 77 | or disputes involving this Agreement will be brought in a court in Denver, 78 | Colorado (USA), or any court having jurisdiction over those courts, and each 79 | party irrevocably submits to the jurisdiction and venue of those courts. 80 | 81 | You hereby accept and agree to this Agreement for all of your present and 82 | future contributions. 83 | 84 | Name: 85 | 86 | Signature: 87 | 88 | Date: 89 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # halo2 [![Crates.io](https://img.shields.io/crates/v/halo2.svg)](https://crates.io/crates/halo2) # 2 | 3 | **IMPORTANT**: This library is in beta, and should not be used in production software. 4 | 5 | ## [Documentation](https://docs.rs/halo2) 6 | 7 | ## Minimum Supported Rust Version 8 | 9 | Requires Rust **1.51** or higher. 10 | 11 | Minimum supported Rust version can be changed in the future, but it will be done with a 12 | minor version bump. 13 | 14 | ## Controlling parallelism 15 | 16 | `halo2` currently uses [rayon](https://github.com/rayon-rs/rayon) for parallel computation. 17 | The `RAYON_NUM_THREADS` environment variable can be used to set the number of threads. 18 | 19 | ## License 20 | 21 | Copyright 2020-2021 The Electric Coin Company. 22 | 23 | You may use this package under the Bootstrap Open Source Licence, version 1.0, 24 | or at your option, any later version. See the file [`COPYING`](COPYING) for 25 | more details, and [`LICENSE-BOSL`](LICENSE-BOSL) for the terms of the Bootstrap 26 | Open Source Licence, version 1.0. 27 | 28 | The purpose of the BOSL is to allow commercial improvements to the package 29 | while ensuring that all improvements are open source. See 30 | [here](https://electriccoin.co/blog/introducing-tgppl-a-radically-new-type-of-open-source-license/) 31 | for why the BOSL exists. 32 | -------------------------------------------------------------------------------- /book/.gitignore: -------------------------------------------------------------------------------- 1 | book 2 | -------------------------------------------------------------------------------- /book/Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: all 2 | all: 3 | find src -type f -a -name '*.md' |sed 's/[.]md$$/.html/g' |xargs $(MAKE) 4 | 5 | clean: 6 | find src -type f -a -name '*.html' -print0 |xargs -0 rm 7 | 8 | %.html: %.md 9 | pandoc --katex --from=markdown --to=html "$<" "--output=$@" 10 | ./edithtml.sh "$@" "$<" 11 | -------------------------------------------------------------------------------- /book/book.toml: -------------------------------------------------------------------------------- 1 | [book] 2 | authors = [ 3 | "Jack Grigg", 4 | "Sean Bowe", 5 | "Daira Hopwood", 6 | "Ying Tong Lai", 7 | ] 8 | language = "en" 9 | multilingual = false 10 | src = "src" 11 | title = "The halo2 Book" 12 | 13 | [preprocessor.katex] 14 | macros = "macros.txt" 15 | -------------------------------------------------------------------------------- /book/edithtml.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | cat - "$1" > "$1.prefix" < 5 | 6 | 7 | 8 | 9 | 10 | $2 11 | 17 | 18 | 19 | 21 | 22 | 23 | EOF 24 | cat "$1.prefix" - >"$1" < 26 | 27 | EOF 28 | rm -f "$1.prefix" 29 | -------------------------------------------------------------------------------- /book/macros.txt: -------------------------------------------------------------------------------- 1 | # Conventions 2 | 3 | \bconcat:{\mathop{\kern 0.1em||\kern 0.1em}} 4 | \Repr:{\star} 5 | 6 | # Conversions 7 | 8 | \ItoLEBSP:{\mathsf{I2LEBSP}_{#1}} 9 | 10 | # Fields and curves 11 | 12 | \BaseLength:{\ell^\mathsf{#1\vphantom{p}}_{\mathsf{base}}} 13 | 14 | # Commitments and hashes 15 | 16 | \SinsemillaHash:{\mathsf{SinsemillaHash}} 17 | \SinsemillaCommit:{\mathsf{SinsemillaCommit}} 18 | \SinsemillaShortCommit:{\mathsf{SinsemillaShortCommit}} 19 | 20 | # Circuit constraint helper methods 21 | 22 | \BoolCheck:{\texttt{bool\_check}({#1})} 23 | \RangeCheck:{\texttt{range\_check}({#1, #2})} 24 | \ShortLookupRangeCheck:{\texttt{short\_lookup\_range\_check}({#1})} 25 | 26 | # Halo 2 proof 27 | 28 | \field:{\mathbb{F}} 29 | \group:{\mathbb{G}} 30 | \setup:{\textnormal{Setup}} 31 | \prover:{\mathcal{P}} 32 | \verifier:{\mathcal{V}} 33 | \sec:{\lambda} 34 | \negl:{\textnormal{negl}(\lambda)} 35 | \pp:{\mathsf{pp}} 36 | \ip:{\textnormal{IP}} 37 | \relation:{\mathcal{R}} 38 | \a:{\mathcal{A}} 39 | \sim:{\mathcal{S}} 40 | \tr:{\textnormal{tr}} 41 | \srs:{\textnormal{SRS}} 42 | \srwee:{\textnormal{sr-wee}} 43 | \real:{\textnormal{real}} 44 | \ideal:{\textnormal{ideal}} 45 | \weereal:{\textnormal{WEE-real}} 46 | \weeideal:{\textnormal{WEE-ideal}} 47 | \oracle:{\mathcal{O}} 48 | \ch:{\mathsf{Ch}} 49 | \badch:{\mathsf{BadCh}} 50 | \adv:{\mathsf{Adv}} 51 | \bottom:{\perp} 52 | \alg:{#1_\textnormal{alg}} 53 | \zero:{\mathcal{O}} 54 | \dlrel:{\mathsf{dl-rel}} 55 | \game:{\mathsf{G}} 56 | \innerprod:{\langle{#1},{#2}\rangle} 57 | \dlgame:{\mathsf{G}^\dlrel_{\group,n}} 58 | \distinguisher:{\mathcal{D}} 59 | \extractor:{\mathcal{E}} 60 | \state:{\mathsf{st}_{#1}} 61 | \halo:{\textsf{Halo}} 62 | \lo:{\textnormal{lo}} 63 | \hi:{\textnormal{hi}} 64 | \protocol:{\halo} -------------------------------------------------------------------------------- /book/src/README.md: -------------------------------------------------------------------------------- 1 | {{#include ../../README.md}} 2 | -------------------------------------------------------------------------------- /book/src/SUMMARY.md: -------------------------------------------------------------------------------- 1 | # The halo2 Book 2 | 3 | [halo2](README.md) 4 | - [Concepts](concepts.md) 5 | - [Proof systems](concepts/proofs.md) 6 | - [PLONKish Arithmetization](concepts/arithmetization.md) 7 | - [Chips](concepts/chips.md) 8 | - [Gadgets](concepts/gadgets.md) 9 | - [User Documentation](user.md) 10 | - [Developer tools](user/dev-tools.md) 11 | - [A simple example](user/simple-example.md) 12 | - [Lookup tables](user/lookup-tables.md) 13 | - [Gadgets](user/gadgets.md) 14 | - [Tips and tricks](user/tips-and-tricks.md) 15 | - [Design](design.md) 16 | - [Proving system](design/proving-system.md) 17 | - [Lookup argument](design/proving-system/lookup.md) 18 | - [Permutation argument](design/proving-system/permutation.md) 19 | - [Circuit commitments](design/proving-system/circuit-commitments.md) 20 | - [Vanishing argument](design/proving-system/vanishing.md) 21 | - [Multipoint opening argument](design/proving-system/multipoint-opening.md) 22 | - [Inner product argument](design/proving-system/inner-product.md) 23 | - [Comparison to other work](design/proving-system/comparison.md) 24 | - [Protocol Description](design/protocol.md) 25 | - [Implementation](design/implementation.md) 26 | - [Proofs](design/implementation/proofs.md) 27 | - [Fields](design/implementation/fields.md) 28 | - [Gadgets](design/gadgets.md) 29 | - [Elliptic curve cryptography](design/gadgets/ecc.md) 30 | - [Incomplete and complete addition](design/gadgets/ecc/addition.md) 31 | - [Fixed-base scalar multiplication](design/gadgets/ecc/fixed-base-scalar-mul.md) 32 | - [Variable-base scalar multiplication](design/gadgets/ecc/var-base-scalar-mul.md) 33 | - [Sinsemilla](design/gadgets/sinsemilla.md) 34 | - [MerkleCRH](design/gadgets/sinsemilla/merkle-crh.md) 35 | - [Decomposition](design/gadgets/decomposition.md) 36 | - [SHA-256](design/gadgets/sha256.md) 37 | - [16-bit table chip](design/gadgets/sha256/table16.md) 38 | - [Background Material](background.md) 39 | - [Fields](background/fields.md) 40 | - [Polynomials](background/polynomials.md) 41 | - [Cryptographic groups](background/groups.md) 42 | - [Elliptic curves](background/curves.md) 43 | - [Polynomial commitment using inner product argument](background/pc-ipa.md) 44 | - [Recursion](background/recursion.md) 45 | -------------------------------------------------------------------------------- /book/src/background.md: -------------------------------------------------------------------------------- 1 | # Background Material 2 | 3 | This section covers the background material required to understand the Halo 2 proving 4 | system. It is targeted at an ELI15 (Explain It Like I'm 15) level; if you think anything 5 | could do with additional explanation, [let us know]! 6 | 7 | [let us know]: https://github.com/zcash/halo2/issues/new/choose 8 | -------------------------------------------------------------------------------- /book/src/background/groups.md: -------------------------------------------------------------------------------- 1 | # Cryptographic groups 2 | 3 | In the section [Inverses and groups](fields.md#inverses-and-groups) we introduced the 4 | concept of *groups*. A group has an identity and a group operation. In this section we 5 | will write groups additively, i.e. the identity is $\mathcal{O}$ and the group operation 6 | is $+$. 7 | 8 | Some groups can be used as *cryptographic groups*. At the risk of oversimplifying, this 9 | means that the problem of finding a discrete logarithm of a group element $P$ to a given 10 | base $G$, i.e. finding $x$ such that $P = [x] G$, is hard in general. 11 | 12 | ## Pedersen commitment 13 | The Pedersen commitment [[P99]] is a way to commit to a secret message in a verifiable 14 | way. It uses two random public generators $G, H \in \mathbb{G},$ where $\mathbb{G}$ is a 15 | cryptographic group of order $p$. A random secret $r$ is chosen in $\mathbb{Z}_q$, and the 16 | message to commit to $m$ is from any subset of $\mathbb{Z}_q$. The commitment is 17 | 18 | $$c = \text{Commit}(m,r)=[m]G + [r]H.$$ 19 | 20 | To open the commitment, the committer reveals $m$ and $r,$ thus allowing anyone to verify 21 | that $c$ is indeed a commitment to $m.$ 22 | 23 | [P99]: https://link.springer.com/content/pdf/10.1007%2F3-540-46766-1_9.pdf#page=3 24 | 25 | Notice that the Pedersen commitment scheme is homomorphic: 26 | 27 | $$ 28 | \begin{aligned} 29 | \text{Commit}(m,r) + \text{Commit}(m',r') &= [m]G + [r]H + [m']G + [r']H \\ 30 | &= [m + m']G + [r + r']H \\ 31 | &= \text{Commit}(m + m',r + r'). 32 | \end{aligned} 33 | $$ 34 | 35 | Assuming the discrete log assumption holds, Pedersen commitments are also perfectly hiding 36 | and computationally binding: 37 | 38 | * **hiding**: the adversary chooses messages $m_0, m_1.$ The committer commits to one of 39 | these messages $c = \text{Commit}(m_b;r), b \in \{0,1\}.$ Given $c,$ the probability of 40 | the adversary guessing the correct $b$ is no more than $\frac{1}{2}$. 41 | * **binding**: the adversary cannot pick two different messages $m_0 \neq m_1,$ and 42 | randomness $r_0, r_1,$ such that $\text{Commit}(m_0,r_0) = \text{Commit}(m_1,r_1).$ 43 | 44 | ### Vector Pedersen commitment 45 | We can use a variant of the Pedersen commitment scheme to commit to multiple messages at 46 | once, $\mathbf{m} = (m_1, \cdots, m_n)$. This time, we'll have to sample a corresponding 47 | number of random public generators $\mathbf{G} = (G_0, \cdots, G_{n-1}),$ along with a 48 | single random generator $H$ as before (for use in hiding). Then, our commitment scheme is: 49 | 50 | $$ 51 | \begin{aligned} 52 | \text{Commit}(\mathbf{m}; r) &= \text{Commit}((m_0, \cdots, m_{n-1}); r) \\ 53 | &= [r]H + [m_0]G_0 + \cdots + [m_{n-1}]G_{n-1} \\ 54 | &= [r]H + \sum_{i= 0}^{n-1} [m_i]G_i. 55 | \end{aligned} 56 | $$ 57 | 58 | > TODO: is this positionally binding? 59 | 60 | ## Diffie--Hellman 61 | 62 | An example of a protocol that uses cryptographic groups is Diffie--Hellman key agreement 63 | [[DH1976]]. The Diffie--Hellman protocol is a method for two users, Alice and Bob, to 64 | generate a shared private key. It proceeds as follows: 65 | 66 | 1. Alice and Bob publicly agree on two prime numbers, $p$ and $G,$ where $p$ is large and 67 | $G$ is a primitive root $\pmod p.$ (Note that $g$ is a generator of the group 68 | $\mathbb{F}_p^\times.$) 69 | 2. Alice chooses a large random number $a$ as her private key. She computes her public key 70 | $A = [a]G \pmod p,$ and sends $A$ to Bob. 71 | 3. Similarly, Bob chooses a large random number $b$ as his private key. He computes his 72 | public key $B = [b]G \pmod p,$ and sends $B$ to Alice. 73 | 4. Now both Alice and Bob compute their shared key $K = [ab]G \pmod p,$ which Alice 74 | computes as 75 | $$K = [a]B \pmod p = [a]([b]G) \pmod p,$$ 76 | and Bob computes as 77 | $$K = [b]A \pmod p = [b]([a]G) \pmod p.$$ 78 | 79 | [DH1976]: https://ee.stanford.edu/~hellman/publications/24.pdf 80 | 81 | A potential eavesdropper would need to derive $K = [ab]g \pmod p$ knowing only 82 | $g, p, A = [a]G,$ and $B = [b]G$: in other words, they would need to either get the 83 | discrete logarithm $a$ from $A = [a]G$ or $b$ from $B = [b]G,$ which we assume to be 84 | computationally infeasible in $\mathbb{F}_p^\times.$ 85 | 86 | More generally, protocols that use similar ideas to Diffie--Hellman are used throughout 87 | cryptography. One way of instantiating a cryptographic group is as an 88 | [elliptic curve](curves.md). Before we go into detail on elliptic curves, we'll describe 89 | some algorithms that can be used for any group. 90 | 91 | ## Multiscalar multiplication 92 | 93 | ### TODO: Pippenger's algorithm 94 | Reference: https://jbootle.github.io/Misc/pippenger.pdf 95 | -------------------------------------------------------------------------------- /book/src/background/pc-ipa.md: -------------------------------------------------------------------------------- 1 | # Polynomial commitment using inner product argument 2 | We want to commit to some polynomial $p(X) \in \mathbb{F}_p[X]$, and be able to provably 3 | evaluate the committed polynomial at arbitrary points. The naive solution would be for the 4 | prover to simply send the polynomial's coefficients to the verifier: however, this 5 | requires $O(n)$ communication. Our polynomial commitment scheme gets the job done using 6 | $O(\log n)$ communication. 7 | 8 | ### `Setup` 9 | Given a parameter $d = 2^k,$ we generate the common reference string 10 | $\sigma = (\mathbb{G}, \mathbf{G}, H, \mathbb{F}_p)$ defining certain constants for this 11 | scheme: 12 | * $\mathbb{G}$ is a group of prime order $p;$ 13 | * $\mathbf{G} \in \mathbb{G}^d$ is a vector of $d$ random group elements; 14 | * $H \in \mathbb{G}$ is a random group element; and 15 | * $\mathbb{F}_p$ is the finite field of order $p.$ 16 | 17 | ### `Commit` 18 | The Pedersen vector commitment $\text{Commit}$ is defined as 19 | 20 | $$\text{Commit}(\sigma, p(X); r) = \langle\mathbf{a}, \mathbf{G}\rangle + [r]H,$$ 21 | 22 | for some polynomial $p(X) \in \mathbb{F}_p[X]$ and some blinding factor 23 | $r \in \mathbb{F}_p.$ Here, each element of the vector $\mathbf{a}_i \in \mathbb{F}_p$ is 24 | the coefficient for the $i$th degree term of $p(X),$ and $p(X)$ is of maximal degree 25 | $d - 1.$ 26 | 27 | ### `Open` (prover) and `OpenVerify` (verifier) 28 | The modified inner product argument is an argument of knowledge for the relation 29 | 30 | $$\boxed{\{((P, x, v); (\mathbf{a}, r)): P = \langle\mathbf{a}, \mathbf{G}\rangle + [r]H, v = \langle\mathbf{a}, \mathbf{b}\rangle\}},$$ 31 | 32 | where $\mathbf{b} = (1, x, x^2, \cdots, x^{d-1})$ is composed of increasing powers of the 33 | evaluation point $x.$ This allows a prover to demonstrate to a verifier that the 34 | polynomial contained “inside” the commitment $P$ evaluates to $v$ at $x,$ and moreover, 35 | that the committed polynomial has maximum degree $d − 1.$ 36 | 37 | The inner product argument proceeds in $k = \log_2 d$ rounds. For our purposes, it is 38 | sufficient to know about its final outputs, while merely providing intuition about the 39 | intermediate rounds. (Refer to Section 3 in the [Halo] paper for a full explanation.) 40 | 41 | [Halo]: https://eprint.iacr.org/2019/1021.pdf 42 | 43 | Before beginning the argument, the verifier selects a random group element $U$ and sends it 44 | to the prover. We initialize the argument at round $k,$ with the vectors 45 | $\mathbf{a}^{(k)} := \mathbf{a},$ $\mathbf{G}^{(k)} := \mathbf{G}$ and 46 | $\mathbf{b}^{(k)} := \mathbf{b}.$ In each round $j = k, k-1, \cdots, 1$: 47 | 48 | * the prover computes two values $L_j$ and $R_j$ by taking some inner product of 49 | $\mathbf{a}^{(j)}$ with $\mathbf{G}^{(j)}$ and $\mathbf{b}^{(j)}$. Note that are in some 50 | sense "cross-terms": the lower half of $\mathbf{a}$ is used with the higher half of 51 | $\mathbf{G}$ and $\mathbf{b}$, and vice versa: 52 | 53 | $$ 54 | \begin{aligned} 55 | L_j &= \langle\mathbf{a_{lo}^{(j)}}, \mathbf{G_{hi}^{(j)}}\rangle + [l_j]H + [\langle\mathbf{a_{lo}^{(j)}}, \mathbf{b_{hi}^{(j)}}\rangle] U\\ 56 | R_j &= \langle\mathbf{a_{hi}^{(j)}}, \mathbf{G_{lo}^{(j)}}\rangle + [l_j]H + [\langle\mathbf{a_{hi}^{(j)}}, \mathbf{b_{lo}^{(j)}}\rangle] U\\ 57 | \end{aligned} 58 | $$ 59 | 60 | * the verifier issues a random challenge $u_j$; 61 | * the prover uses $u_j$ to compress the lower and higher halves of $\mathbf{a}^{(j)}$, 62 | thus producing a new vector of half the original length 63 | $$\mathbf{a}^{(j-1)} = \mathbf{a_{hi}^{(j)}}\cdot u_j^{-1} + \mathbf{a_{lo}^{(j)}}\cdot u_j.$$ 64 | The vectors $\mathbf{G}^{(j)}$ and $\mathbf{b}^{(j)}$ are similarly compressed to give 65 | $\mathbf{G}^{(j-1)}$ and $\mathbf{b}^{(j-1)}$. 66 | * $\mathbf{a}^{(j-1)}$, $\mathbf{G}^{(j-1)}$ and $\mathbf{b}^{(j-1)}$ are input to the 67 | next round $j - 1.$ 68 | 69 | Note that at the end of the last round $j = 1,$ we are left with $a := \mathbf{a}^{(0)}$, 70 | $G := \mathbf{G}^{(0)}$, $b := \mathbf{b}^{(0)},$ each of length 1. The intuition is that 71 | these final scalars, together with the challenges $\{u_j\}$ and "cross-terms" 72 | $\{L_j, R_j\}$ from each round, encode the compression in each round. Since the prover did 73 | not know the challenges $U, \{u_j\}$ in advance, they would have been unable to manipulate 74 | the round compressions. Thus, checking a constraint on these final terms should enforce 75 | that the compression had been performed correctly, and that the original $\mathbf{a}$ 76 | satisfied the relation before undergoing compression. 77 | 78 | Note that $G, b$ are simply rearrangements of the publicly known $\mathbf{G}, \mathbf{b},$ 79 | with the round challenges $\{u_j\}$ mixed in: this means the verifier can compute $G, b$ 80 | independently and verify that the prover had provided those same values. 81 | -------------------------------------------------------------------------------- /book/src/background/plonkish.md: -------------------------------------------------------------------------------- 1 | # [WIP] PLONKish arithmetization 2 | 3 | We call the field over which the circuit is defined $\mathbb{F} = \mathbb{F}_p$. 4 | 5 | Let $n = 2^k$, and assume that $\omega$ is a primitive root of unity of order $n$ in 6 | $\mathbb{F}^\times$, so that $\mathbb{F}^\times$ has a multiplicative subgroup 7 | $\mathcal{H} = \{1, \omega, \omega^2, \cdots, \omega^{n-1}\}$. This forms a Lagrange 8 | basis corresponding to the elements in the subgroup. 9 | 10 | ## Polynomial rules 11 | A polynomial rule defines a constraint that must hold between its specified columns at 12 | every row (i.e. at every element in the multiplicative subgroup). 13 | 14 | e.g. 15 | 16 | ```text 17 | a * sa + b * sb + a * b * sm + c * sc + PI = 0 18 | ``` 19 | 20 | ## Columns 21 | - **fixed columns**: fixed for all instances of a particular circuit. These include 22 | selector columns, which toggle parts of a polynomial rule "on" or "off" to form a 23 | "custom gate". They can also include any other fixed data. 24 | - **advice columns**: variable values assigned in each instance of the circuit. 25 | Corresponds to the prover's secret witness. 26 | - **public input**: like advice columns, but publicly known values. 27 | 28 | Each column is a vector of $n$ values, e.g. $\mathbf{a} = [a_0, a_1, \cdots, a_{n-1}]$. We 29 | can think of the vector as the evaluation form of the column polynomial 30 | $a(X), X \in \mathcal{H}.$ To recover the coefficient form, we can use 31 | [Lagrange interpolation](polynomials.md#lagrange-interpolation), such that 32 | $a(\omega^i) = a_i.$ 33 | 34 | ## Equality constraints 35 | - Define permutation between a set of columns, e.g. $\sigma(a, b, c)$ 36 | - Assert equalities between specific cells in these columns, e.g. $b_1 = c_0$ 37 | - Construct permuted columns which should evaluate to same value as original columns 38 | 39 | ## Permutation grand product 40 | $$Z(\omega^i) := \prod_{0 \leq j \leq i} \frac{C_k(\omega^j) + \beta\delta^k \omega^j + \gamma}{C_k(\omega^j) + \beta S_k(\omega^j) + \gamma},$$ 41 | where $i = 0, \cdots, n-1$ indexes over the size of the multiplicative subgroup, and 42 | $k = 0, \cdots, m-1$ indexes over the advice columns involved in the permutation. This is 43 | a running product, where each term includes the cumulative product of the terms before it. 44 | 45 | > TODO: what is $\delta$? keep columns linearly independent 46 | 47 | Check the constraints: 48 | 49 | 1. First term is equal to one 50 | $$\mathcal{L}_0(X) \cdot (1 - Z(X)) = 0$$ 51 | 52 | 2. Running product is well-constructed. For each row, we check that this holds: 53 | $$Z(\omega^i) \cdot{(C(\omega^i) + \beta S_k(\omega^i) + \gamma)} - Z(\omega^{i-1}) \cdot{(C(\omega^i) + \delta^k \beta \omega^i + \gamma)} = 0$$ 54 | Rearranging gives 55 | $$Z(\omega^i) = Z(\omega^{i-1}) \frac{C(\omega^i) + \beta\delta^k \omega^i + \gamma}{C(\omega^i) + \beta S_k(\omega^i) + \gamma},$$ 56 | which is how we defined the grand product polynomial in the first place. 57 | 58 | ### Lookup 59 | Reference: [Generic Lookups with PLONK (DRAFT)](/LTPc5f-3S0qNF6MtwD-Tdg?view) 60 | 61 | ### Vanishing argument 62 | We want to check that the expressions defined by the gate constraints, permutation 63 | constraints and lookup constraints evaluate to zero at all elements in the multiplicative 64 | subgroup. To do this, the prover collapses all the expressions into one polynomial 65 | $$H(X) = \sum_{i=0}^e y^i E_i(X),$$ 66 | where $e$ is the number of expressions and $y$ is a random challenge used to keep the 67 | constraints linearly independent. The prover then divides this by the vanishing polynomial 68 | (see section: [Vanishing polynomial](polynomials.md#vanishing-polynomial)) and commits to 69 | the resulting quotient 70 | 71 | $$\text{Commit}(Q(X)), \text{where } Q(X) = \frac{H(X)}{Z_H(X)}.$$ 72 | 73 | The verifier responds with a random evaluation point $x,$ to which the prover replies with 74 | the claimed evaluations $q = Q(x), \{e_i\}_{i=0}^e = \{E_i(x)\}_{i=0}^e.$ Now, all that 75 | remains for the verifier to check is that the evaluations satisfy 76 | 77 | $$q \stackrel{?}{=} \frac{\sum_{i=0}^e y^i e_i}{Z_H(x)}.$$ 78 | 79 | Notice that we have yet to check that the committed polynomials indeed evaluate to the 80 | claimed values at 81 | $x, q \stackrel{?}{=} Q(x), \{e_i\}_{i=0}^e \stackrel{?}{=} \{E_i(x)\}_{i=0}^e.$ 82 | This check is handled by the polynomial commitment scheme (described in the next section). 83 | -------------------------------------------------------------------------------- /book/src/background/recursion.md: -------------------------------------------------------------------------------- 1 | ## Recursion 2 | > Alternative terms: Induction; Accumulation scheme; Proof-carrying data 3 | 4 | However, the computation of $G$ requires a length-$2^k$ multiexponentiation 5 | $\langle \mathbf{G}, \mathbf{s}\rangle,$ where $\mathbf{s}$ is composed of the round 6 | challenges $u_1, \cdots, u_k$ arranged in a binary counting structure. This is the 7 | linear-time computation that we want to amortise across a batch of proof instances. 8 | Instead of computing $G,$ notice that we can express $G$ as a commitment to a polynomial 9 | 10 | $$G = \text{Commit}(\sigma, g(X, u_1, \cdots, u_k)),$$ 11 | 12 | where $g(X, u_1, \cdots, u_k) := \prod_{i=1}^k (u_i + u_i^{-1}X^{2^{i-1}})$ is a 13 | polynomial with degree $2^k - 1.$ 14 | 15 | | | | 16 | | -------- | -------- | 17 | | | Since $G$ is a commitment, it can be checked in an inner product argument. The verifier circuit witnesses $G$ and brings $G, u_1, \cdots, u_k$ out as public inputs to the proof $\pi.$ The next verifier instance checks $\pi$ using the inner product argument; this includes checking that $G = \text{Commit}(g(X, u_1, \cdots, u_k))$ evaluates at some random point to the expected value for the given challenges $u_1, \cdots, u_k.$ Recall from the [previous section](#Polynomial-commitment-using-inner-product-argument) that this check only requires $\log d$ work.

At the end of checking $\pi$ and $G,$ the circuit is left with a new $G',$ along with the $u_1', \cdots, u_k'$ challenges sampled for the check. To fully accept $\pi$ as valid, we should perform a linear-time computation of $G' = \langle\mathbf{G}, \mathbf{s}'\rangle$. Once again, we delay this computation by witnessing $G'$ and bringing $G, u_1, \cdots, u_k$ out as public inputs to the proof $\pi.$

This goes on from one proof instance to the next, until we are satisfied with the size of our batch of proofs. We finally perform a single linear-time computation, thus deciding the validity of the whole batch. | 18 | 19 | We recall from the section [Cycles of curves](curves.md#cycles-of-curves) that we can 20 | instantiate this protocol over a two-cycle, where a proof produced by one curve is 21 | efficiently verified in the circuit of the other curve. However, some of these verifier 22 | checks can actually be efficiently performed in the native circuit; these are "deferred" 23 | to the next native circuit (see diagram below) instead of being immediately passed over to 24 | the other curve. 25 | 26 | ![](https://i.imgur.com/l4HrYgE.png) 27 | -------------------------------------------------------------------------------- /book/src/concepts.md: -------------------------------------------------------------------------------- 1 | # Concepts 2 | 3 | First we'll describe the concepts behind zero-knowledge proof systems; the 4 | *arithmetization* (kind of circuit description) used by Halo 2; and the 5 | abstractions we use to build circuit implementations. 6 | -------------------------------------------------------------------------------- /book/src/concepts/arithmetization.md: -------------------------------------------------------------------------------- 1 | # PLONKish Arithmetization 2 | 3 | The arithmetization used by Halo 2 comes from [PLONK](https://eprint.iacr.org/2019/953), or 4 | more precisely its extension UltraPLONK that supports custom gates and lookup arguments. We'll 5 | call it [***PLONKish***](https://twitter.com/feministPLT/status/1413815927704014850). 6 | 7 | ***PLONKish circuits*** are defined in terms of a rectangular matrix of values. We refer to 8 | ***rows***, ***columns***, and ***cells*** of this matrix with the conventional meanings. 9 | 10 | A PLONKish circuit depends on a ***configuration***: 11 | 12 | * A finite field $\mathbb{F}$, where cell values (for a given statement and witness) will be 13 | elements of $\mathbb{F}$. 14 | * The number of columns in the matrix, and a specification of each column as being 15 | ***fixed***, ***advice***, or ***instance***. Fixed columns are fixed by the circuit; 16 | advice columns correspond to witness values; and instance columns are normally used for 17 | public inputs (technically, they can be used for any elements shared between the prover 18 | and verifier). 19 | 20 | * A subset of the columns that can participate in equality constraints. 21 | 22 | * A ***polynomial degree bound***. 23 | 24 | * A sequence of ***polynomial constraints***. These are multivariate polynomials over 25 | $\mathbb{F}$ that must evaluate to zero *for each row*. The variables in a polynomial 26 | constraint may refer to a cell in a given column of the current row, or a given column of 27 | another row relative to this one (with wrap-around, i.e. taken modulo $n$). The maximum 28 | degree of each polynomial is given by the polynomial degree bound. 29 | 30 | * A sequence of ***lookup arguments*** defined over tuples of ***input expressions*** 31 | (which are multivariate polynomials as above) and ***table columns***. 32 | 33 | A PLONKish circuit also defines: 34 | 35 | * The number of rows $n$ in the matrix. $n$ must correspond to the size of a multiplicative 36 | subgroup of $\mathbb{F}^\times$; typically a power of two. 37 | 38 | * A sequence of ***equality constraints***, which specify that two given cells must have equal 39 | values. 40 | 41 | * The values of the fixed columns at each row. 42 | 43 | From a circuit description we can generate a ***proving key*** and a ***verification key***, 44 | which are needed for the operations of proving and verification for that circuit. 45 | 46 | > Note that we specify the ordering of columns, polynomial constraints, lookup arguments, and 47 | > equality constraints, even though these do not affect the meaning of the circuit. This makes 48 | > it easier to define the generation of proving and verification keys as a deterministic 49 | > process. 50 | 51 | Typically, a configuration will define polynomial constraints that are switched off and on by 52 | ***selectors*** defined in fixed columns. For example, a constraint $q_i \cdot p(...) = 0$ can 53 | be switched off for a particular row $i$ by setting $q_i = 0$. In this case we sometimes refer 54 | to a set of constraints controlled by a set of selector columns that are designed to be used 55 | together, as a ***gate***. Typically there will be a ***standard gate*** that supports generic 56 | operations like field multiplication and division, and possibly also ***custom gates*** that 57 | support more specialized operations. 58 | -------------------------------------------------------------------------------- /book/src/concepts/chips.md: -------------------------------------------------------------------------------- 1 | # Chips 2 | 3 | The previous section gives a fairly low-level description of a circuit. When implementing circuits we will 4 | typically use a higher-level API which aims for the desirable characteristics of auditability, 5 | efficiency, modularity, and expressiveness. 6 | 7 | Some of the terminology and concepts used in this API are taken from an analogy with 8 | integrated circuit design and layout. [As for integrated circuits](https://opencores.org/), 9 | the above desirable characteristics are easier to obtain by composing ***chips*** that provide 10 | efficient pre-built implementations of particular functionality. 11 | 12 | For example, we might have chips that implement particular cryptographic primitives such as a 13 | hash function or cipher, or algorithms like scalar multiplication or pairings. 14 | 15 | In PLONKish circuits, it is possible to build up arbitrary logic just from standard gates that do 16 | field multiplication and addition. However, very significant efficiency gains can be obtained by 17 | using custom gates. 18 | 19 | Using our API, we define chips that "know" how to use particular sets of custom gates. This 20 | creates an abstraction layer that isolates the implementation of a high-level circuit from the 21 | complexity of using custom gates directly. 22 | 23 | > Even if we sometimes need to "wear two hats", by implementing both a high-level circuit and 24 | > the chips that it uses, the intention is that this separation will result in code that is 25 | > easier to understand, audit, and maintain/reuse. This is partly because some potential 26 | > implementation errors are ruled out by construction. 27 | 28 | Gates in PLONKish circuits refer to cells by ***relative references***, i.e. to the cell in a given 29 | column, and the row at a given offset relative to the one in which the gate's selector is set. We 30 | call this an ***offset reference*** when the offset is nonzero (i.e. offset references are a subset 31 | of relative references). 32 | 33 | Relative references contrast with ***absolute references*** used in equality constraints, 34 | which can point to any cell. 35 | 36 | The motivation for offset references is to reduce the number of columns needed in the 37 | configuration, which reduces proof size. If we did not have offset references then we would 38 | need a column to hold each value referred to by a custom gate, and we would need to use 39 | equality constraints to copy values from other cells of the circuit into that column. With 40 | offset references, we not only need fewer columns; we also do not need equality constraints to 41 | be supported for all of those columns, which improves efficiency. 42 | 43 | In R1CS (another arithmetization which may be more familiar to some readers, but don't worry 44 | if it isn't), a circuit consists of a "sea of gates" with no semantically significant ordering. 45 | Because of offset references, the order of rows in a PLONKish circuit, on the other hand, *is* 46 | significant. We're going to make some simplifying assumptions and define some abstractions to 47 | tame the resulting complexity: the aim will be that, [at the gadget level](gadgets.md) where 48 | we do most of our circuit construction, we will not have to deal with relative references or 49 | with gate layout explicitly. 50 | 51 | We will partition a circuit into ***regions***, where each region contains a disjoint subset 52 | of cells, and relative references only ever point *within* a region. Part of the responsibility 53 | of a chip implementation is to ensure that gates that make offset references are laid out in 54 | the correct positions in a region. 55 | 56 | Given the set of regions and their ***shapes***, we will use a separate ***floor planner*** 57 | to decide where (i.e. at what starting row) each region is placed. There is a default floor 58 | planner that implements a very general algorithm, but you can write your own floor planner if 59 | you need to. 60 | 61 | Floor planning will in general leave gaps in the matrix, because the gates in a given row did 62 | not use all available columns. These are filled in —as far as possible— by gates that do 63 | not require offset references, which allows them to be placed on any row. 64 | 65 | Chips can also define lookup tables. If more than one table is defined for the same lookup 66 | argument, we can use a ***tag column*** to specify which table is used on each row. It is also 67 | possible to perform a lookup in the union of several tables (limited by the polynomial degree 68 | bound). 69 | 70 | ## Composing chips 71 | In order to combine functionality from several chips, we compose them in a tree. The top-level 72 | chip defines a set of fixed, advice, and instance columns, and then specifies how they 73 | should be distributed between lower-level chips. 74 | 75 | In the simplest case, each lower-level chips will use columns disjoint from the other chips. 76 | However, it is allowed to share a column between chips. It is important to optimize the number 77 | of advice columns in particular, because that affects proof size. 78 | 79 | The result (possibly after optimization) is a PLONKish configuration. Our circuit implementation 80 | will be parameterized on a chip, and can use any features of the supported lower-level chips via 81 | the top-level chip. 82 | 83 | Our hope is that less expert users will normally be able to find an existing chip that 84 | supports the operations they need, or only have to make minor modifications to an existing 85 | chip. Expert users will have full control to do the kind of 86 | [circuit optimizations](https://zips.z.cash/protocol/canopy.pdf#circuitdesign) 87 | [that ECC is famous for](https://electriccoin.co/blog/cultivating-sapling-faster-zksnarks/) 🙂. 88 | -------------------------------------------------------------------------------- /book/src/concepts/gadgets.md: -------------------------------------------------------------------------------- 1 | # Gadgets 2 | 3 | When implementing a circuit, we could use the features of the chips we've selected directly. 4 | Typically, though, we will use them via ***gadgets***. This indirection is useful because, 5 | for reasons of efficiency and limitations imposed by PLONKish circuits, the chip interfaces will 6 | often be dependent on low-level implementation details. The gadget interface can provide a more 7 | convenient and stable API that abstracts away from extraneous detail. 8 | 9 | For example, consider a hash function such as SHA-256. The interface of a chip supporting 10 | SHA-256 might be dependent on internals of the hash function design such as the separation 11 | between message schedule and compression function. The corresponding gadget interface can 12 | provide a more convenient and familiar `update`/`finalize` API, and can also handle parts 13 | of the hash function that do not need chip support, such as padding. This is similar to how 14 | [accelerated](https://software.intel.com/content/www/us/en/develop/articles/intel-sha-extensions.html) 15 | [instructions](https://developer.arm.com/documentation/ddi0514/g/introduction/about-the-cortex-a57-processor-cryptography-engine) 16 | for cryptographic primitives on CPUs are typically accessed via software libraries, rather 17 | than directly. 18 | 19 | Gadgets can also provide modular and reusable abstractions for circuit programming 20 | at a higher level, similar to their use in libraries such as 21 | [libsnark](https://github.com/christianlundkvist/libsnark-tutorial) and 22 | [bellman](https://electriccoin.co/blog/bellman-zksnarks-in-rust/). As well as abstracting 23 | *functions*, they can also abstract *types*, such as elliptic curve points or integers of 24 | specific sizes. 25 | 26 | -------------------------------------------------------------------------------- /book/src/concepts/proofs.md: -------------------------------------------------------------------------------- 1 | # Proof systems 2 | 3 | The aim of any ***proof system*** is to be able to prove interesting mathematical or 4 | cryptographic ***statements***. 5 | 6 | Typically, in a given protocol we will want to prove families of statements that differ 7 | in their ***public inputs***. The prover will also need to show that they know some 8 | ***private inputs*** that make the statement hold. 9 | 10 | To do this we write down a ***relation***, $\mathcal{R}$, that specifies which 11 | combinations of public and private inputs are valid. 12 | 13 | > The terminology above is intended to be aligned with the 14 | > [ZKProof Community Reference](https://docs.zkproof.org/reference#latest-version). 15 | 16 | To be precise, we should distinguish between the relation $\mathcal{R}$, and its 17 | implementation to be used in a proof system. We call the latter a ***circuit***. 18 | 19 | The language that we use to express circuits for a particular proof system is called an 20 | ***arithmetization***. Usually, an arithmetization will define circuits in terms of 21 | polynomial constraints on variables over a field. 22 | 23 | > The _process_ of expressing a particular relation as a circuit is also sometimes called 24 | > "arithmetization", but we'll avoid that usage. 25 | 26 | To create a proof of a statement, the prover will need to know the private inputs, 27 | and also intermediate values, called ***advice*** values, that are used by the circuit. 28 | 29 | We assume that we can compute advice values efficiently from the private and public inputs. 30 | The particular advice values will depend on how we write the circuit, not only on the 31 | high-level statement. 32 | 33 | The private inputs and advice values are collectively called a ***witness***. 34 | 35 | > Some authors use "witness" as just a synonym for private inputs. But in our usage, 36 | > a witness includes advice, i.e. it includes all values that the prover supplies to 37 | > the circuit. 38 | 39 | For example, suppose that we want to prove knowledge of a preimage $x$ of a 40 | hash function $H$ for a digest $y$: 41 | 42 | * The private input would be the preimage $x$. 43 | 44 | * The public input would be the digest $y$. 45 | 46 | * The relation would be $\{(x, y) : H(x) = y\}$. 47 | 48 | * For a particular public input $Y$, the statement would be: $\{(x) : H(x) = Y\}$. 49 | 50 | * The advice would be all of the intermediate values in the circuit implementing the 51 | hash function. The witness would be $x$ and the advice. 52 | 53 | A ***Non-interactive Argument*** allows a ***prover*** to create a ***proof*** for a 54 | given statement and witness. The proof is data that can be used to convince a ***verifier*** 55 | that _there exists_ a witness for which the statement holds. The security property that 56 | such proofs cannot falsely convince a verifier is called ***soundness***. 57 | 58 | A ***Non-interactive Argument of Knowledge*** (***NARK***) further convinces the verifier 59 | that the prover _knew_ a witness for which the statement holds. This security property is 60 | called ***knowledge soundness***, and it implies soundness. 61 | 62 | In practice knowledge soundness is more useful for cryptographic protocols than soundness: 63 | if we are interested in whether Alice holds a secret key in some protocol, say, we need 64 | Alice to prove that _she knows_ the key, not just that it exists. 65 | 66 | Knowledge soundness is formalized by saying that an ***extractor***, which can observe 67 | precisely how the proof is generated, must be able to compute the witness. 68 | 69 | > This property is subtle given that proofs can be ***malleable***. That is, depending on the 70 | > proof system it may be possible to take an existing proof (or set of proofs) and, without 71 | > knowing the witness(es), modify it/them to produce a distinct proof of the same or a related 72 | > statement. Higher-level protocols that use malleable proof systems need to take this into 73 | > account. 74 | > 75 | > Even without malleability, proofs can also potentially be ***replayed***. For instance, 76 | > we would not want Alice in our example to be able to present a proof generated by someone 77 | > else, and have that be taken as a demonstration that she knew the key. 78 | 79 | If a proof yields no information about the witness (other than that a witness exists and was 80 | known to the prover), then we say that the proof system is ***zero knowledge***. 81 | 82 | If a proof system produces short proofs —i.e. of length polylogarithmic in the circuit 83 | size— then we say that it is ***succinct***. A succinct NARK is called a ***SNARK*** 84 | (***Succinct Non-Interactive Argument of Knowledge***). 85 | 86 | > By this definition, a SNARK need not have verification time polylogarithmic in the circuit 87 | > size. Some papers use the term ***efficient*** to describe a SNARK with that property, but 88 | > we'll avoid that term since it's ambiguous for SNARKs that support amortized or recursive 89 | > verification, which we'll get to later. 90 | 91 | A ***zk-SNARK*** is a zero-knowledge SNARK. 92 | -------------------------------------------------------------------------------- /book/src/design.md: -------------------------------------------------------------------------------- 1 | # Design 2 | 3 | ## Note on Language 4 | 5 | We use slightly different language than others to describe PLONK concepts. Here's the 6 | overview: 7 | 8 | 1. We like to think of PLONK-like arguments as tables, where each column corresponds to a 9 | "wire". We refer to entries in this table as "cells". 10 | 2. We like to call "selector polynomials" and so on "fixed columns" instead. We then refer 11 | specifically to a "selector constraint" when a cell in a fixed column is being used to 12 | control whether a particular constraint is enabled in that row. 13 | 3. We call the other polynomials "advice columns" usually, when they're populated by the 14 | prover. 15 | 4. We use the term "rule" to refer to a "gate" like 16 | $$A(X) \cdot q_A(X) + B(X) \cdot q_B(X) + A(X) \cdot B(X) \cdot q_M(X) + C(X) \cdot q_C(X) = 0.$$ 17 | - TODO: Check how consistent we are with this, and update the code and docs to match. 18 | -------------------------------------------------------------------------------- /book/src/design/gadgets.md: -------------------------------------------------------------------------------- 1 | # Gadgets 2 | 3 | In this section we document the gadgets and chip designs provided in the `halo2_gadgets` 4 | crate. 5 | 6 | > Neither these gadgets, nor their implementations, have been reviewed, and they should 7 | > not be used in production. 8 | -------------------------------------------------------------------------------- /book/src/design/gadgets/decomposition.md: -------------------------------------------------------------------------------- 1 | # Decomposition 2 | Given a field element $\alpha$, these gadgets decompose it into $W$ $K$-bit windows $$\alpha = k_0 + 2^{K} \cdot k_1 + 2^{2K} \cdot k_2 + \cdots + 2^{(W-1)K} \cdot k_{W-1}$$ where each $k_i$ a $K$-bit value. 3 | 4 | This is done using a running sum $z_i, i \in [0..W).$ We initialize the running sum $z_0 = \alpha,$ and compute subsequent terms $z_{i+1} = \frac{z_i - k_i}{2^{K}}.$ This gives us: 5 | 6 | $$ 7 | \begin{aligned} 8 | z_0 &= \alpha \\ 9 | &= k_0 + 2^{K} \cdot k_1 + 2^{2K} \cdot k_2 + 2^{3K} \cdot k_3 + \cdots, \\ 10 | z_1 &= (z_0 - k_0) / 2^K \\ 11 | &= k_1 + 2^{K} \cdot k_2 + 2^{2K} \cdot k_3 + \cdots, \\ 12 | z_2 &= (z_1 - k_1) / 2^K \\ 13 | &= k_2 + 2^{K} \cdot k_3 + \cdots, \\ 14 | &\vdots \\ 15 | \downarrow &\text{ (in strict mode)} \\ 16 | z_W &= (z_{W-1} - k_{W-1}) / 2^K \\ 17 | &= 0 \text{ (because } z_{W-1} = k_{W-1} \text{)} 18 | \end{aligned} 19 | $$ 20 | 21 | ### Strict mode 22 | Strict mode constrains the running sum output $z_{W}$ to be zero, thus range-constraining the field element to be within $W \cdot K$ bits. 23 | 24 | In strict mode, we are also assured that $z_{W-1} = k_{W-1}$ gives us the last window in the decomposition. 25 | ## Lookup decomposition 26 | This gadget makes use of a $K$-bit lookup table to decompose a field element $\alpha$ into $K$-bit words. Each $K$-bit word $k_i = z_i - 2^K \cdot z_{i+1}$ is range-constrained by a lookup in the $K$-bit table. 27 | 28 | The region layout for the lookup decomposition uses a single advice column $z$, and two selectors $q_{lookup}$ and $q_{running}.$ 29 | $$ 30 | \begin{array}{|c|c|c|} 31 | \hline 32 | z & q_\mathit{lookup} & q_\mathit{running} \\\hline 33 | \hline 34 | z_0 & 1 & 1 \\\hline 35 | z_1 & 1 & 1 \\\hline 36 | \vdots & \vdots & \vdots \\\hline 37 | z_{n-1} & 1 & 1 \\\hline 38 | z_n & 0 & 0 \\\hline 39 | \end{array} 40 | $$ 41 | ### Short range check 42 | Using two $K$-bit lookups, we can range-constrain a field element $\alpha$ to be $n$ bits, where $n \leq K.$ To do this: 43 | 44 | 1. Constrain $0 \leq \alpha < 2^K$ to be within $K$ bits using a $K$-bit lookup. 45 | 2. Constrain $0 \leq \alpha \cdot 2^{K - n} < 2^K$ to be within $K$ bits using a $K$-bit lookup. 46 | 47 | The short variant of the lookup decomposition introduces a $q_{bitshift}$ selector. The same advice column $z$ has here been renamed to $\textsf{word}$ for clarity: 48 | $$ 49 | \begin{array}{|c|c|c|c|} 50 | \hline 51 | \textsf{word} & q_\mathit{lookup} & q_\mathit{running} & q_\mathit{bitshift} \\\hline 52 | \hline 53 | \alpha & 1 & 0 & 0 \\\hline 54 | \alpha' & 1 & 0 & 1 \\\hline 55 | 2^{K-n} & 0 & 0 & 0 \\\hline 56 | \end{array} 57 | $$ 58 | 59 | where $\alpha' = \alpha \cdot 2^{K - n}.$ Note that $2^{K-n}$ is assigned to a fixed column at keygen, and copied in at proving time. This is used in the gate enabled by the $q_\mathit{bitshift}$ selector to check that $\alpha$ was shifted correctly: 60 | $$ 61 | \begin{array}{|c|l|} 62 | \hline 63 | \text{Degree} & \text{Constraint} \\\hline 64 | 2 & q_\mathit{bitshift} \cdot (\alpha' - (\alpha \cdot 2^{K - n})) \\\hline 65 | \end{array} 66 | $$ 67 | 68 | ### Combined lookup expression 69 | Since the lookup decomposition and its short variant both make use of the same lookup table, we combine their lookup input expressions into a single one: 70 | 71 | $$q_\mathit{lookup} \cdot \left(q_\mathit{running} \cdot (z_i - 2^K \cdot z_{i+1}) + (1 - q_\mathit{running}) \cdot \textsf{word} \right)$$ 72 | 73 | where $z_i$ and $\textsf{word}$ are the same cell (but distinguished here for clarity of usage). 74 | 75 | ## Short range decomposition 76 | For a short range (for instance, $[0, \texttt{range})$ where $\texttt{range} \leq 8$), we can range-constrain each word using a degree-$\texttt{range}$ polynomial constraint instead of a lookup: $$\RangeCheck{word}{range} = \texttt{word} \cdot (1 - \texttt{word}) \cdots (\texttt{range} - 1 - \texttt{word}).$$ 77 | -------------------------------------------------------------------------------- /book/src/design/gadgets/ecc.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DelphinusLab/halo2-gpu-specific/55d1969571375005206615deb6ff0395a7ca1607/book/src/design/gadgets/ecc.md -------------------------------------------------------------------------------- /book/src/design/gadgets/sha256.md: -------------------------------------------------------------------------------- 1 | # SHA-256 2 | 3 | ## Specification 4 | 5 | SHA-256 is specified in [NIST FIPS PUB 180-4](https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.180-4.pdf). 6 | 7 | Unlike the specification, we use $\boxplus$ for addition modulo $2^{32}$, and $+$ for 8 | field addition. $\oplus$ is used for XOR. 9 | 10 | ## Gadget interface 11 | 12 | SHA-256 maintains state in eight 32-bit variables. It processes input as 512-bit blocks, 13 | but internally splits these blocks into 32-bit chunks. We therefore designed the SHA-256 14 | gadget to consume input in 32-bit chunks. 15 | 16 | ## Chip instructions 17 | 18 | The SHA-256 gadget requires a chip with the following instructions: 19 | 20 | ```rust 21 | # extern crate halo2_proofs; 22 | # use halo2_proofs::plonk::Error; 23 | # use std::fmt; 24 | # 25 | # trait Chip: Sized {} 26 | # trait Layouter {} 27 | const BLOCK_SIZE: usize = 16; 28 | const DIGEST_SIZE: usize = 8; 29 | 30 | pub trait Sha256Instructions: Chip { 31 | /// Variable representing the SHA-256 internal state. 32 | type State: Clone + fmt::Debug; 33 | /// Variable representing a 32-bit word of the input block to the SHA-256 compression 34 | /// function. 35 | type BlockWord: Copy + fmt::Debug; 36 | 37 | /// Places the SHA-256 IV in the circuit, returning the initial state variable. 38 | fn initialization_vector(layouter: &mut impl Layouter) -> Result; 39 | 40 | /// Starting from the given initial state, processes a block of input and returns the 41 | /// final state. 42 | fn compress( 43 | layouter: &mut impl Layouter, 44 | initial_state: &Self::State, 45 | input: [Self::BlockWord; BLOCK_SIZE], 46 | ) -> Result; 47 | 48 | /// Converts the given state into a message digest. 49 | fn digest( 50 | layouter: &mut impl Layouter, 51 | state: &Self::State, 52 | ) -> Result<[Self::BlockWord; DIGEST_SIZE], Error>; 53 | } 54 | ``` 55 | 56 | TODO: Add instruction for computing padding. 57 | 58 | This set of instructions was chosen to strike a balance between the reusability of the 59 | instructions, and the scope for chips to internally optimise them. In particular, we 60 | considered splitting the compression function into its constituent parts (Ch, Maj etc), 61 | and providing a compression function gadget that implemented the round logic. However, 62 | this would prevent chips from using relative references between the various parts of a 63 | compression round. Having an instruction that implements all compression rounds is also 64 | similar to the Intel SHA extensions, which provide an instruction that performs multiple 65 | compression rounds. 66 | -------------------------------------------------------------------------------- /book/src/design/gadgets/sha256/bit_reassignment.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DelphinusLab/halo2-gpu-specific/55d1969571375005206615deb6ff0395a7ca1607/book/src/design/gadgets/sha256/bit_reassignment.png -------------------------------------------------------------------------------- /book/src/design/gadgets/sha256/compression.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DelphinusLab/halo2-gpu-specific/55d1969571375005206615deb6ff0395a7ca1607/book/src/design/gadgets/sha256/compression.png -------------------------------------------------------------------------------- /book/src/design/gadgets/sha256/low_sigma_0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DelphinusLab/halo2-gpu-specific/55d1969571375005206615deb6ff0395a7ca1607/book/src/design/gadgets/sha256/low_sigma_0.png -------------------------------------------------------------------------------- /book/src/design/gadgets/sha256/low_sigma_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DelphinusLab/halo2-gpu-specific/55d1969571375005206615deb6ff0395a7ca1607/book/src/design/gadgets/sha256/low_sigma_1.png -------------------------------------------------------------------------------- /book/src/design/gadgets/sha256/upp_sigma_0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DelphinusLab/halo2-gpu-specific/55d1969571375005206615deb6ff0395a7ca1607/book/src/design/gadgets/sha256/upp_sigma_0.png -------------------------------------------------------------------------------- /book/src/design/gadgets/sha256/upp_sigma_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DelphinusLab/halo2-gpu-specific/55d1969571375005206615deb6ff0395a7ca1607/book/src/design/gadgets/sha256/upp_sigma_1.png -------------------------------------------------------------------------------- /book/src/design/gadgets/sinsemilla/merkle-crh.md: -------------------------------------------------------------------------------- 1 | # MerkleCRH 2 | 3 | ## Message decomposition 4 | $\mathsf{SinsemillaHash}$ is used in the [$\mathsf{MerkleCRH^{Orchard}}$ hash function](https://zips.z.cash/protocol/protocol.pdf#orchardmerklecrh). The input to $\mathsf{SinsemillaHash}$ is: 5 | 6 | $${l\star} \,||\, {\textsf{left}\star} \,||\, {\textsf{right}\star},$$ 7 | 8 | where: 9 | - ${l\star} = \textsf{I2LEBSP}_{10}(l) = \textsf{I2LEBSP}_{10}(\textsf{MerkleDepth}^\textsf{Orchard} - 1 - \textsf{layer})$, 10 | - ${\textsf{left}\star} = \textsf{I2LEBSP}_{\ell_{\textsf{Merkle}}^{\textsf{Orchard}}}(\textsf{left})$, 11 | - ${\textsf{right}\star} = \textsf{I2LEBSP}_{\ell_{\textsf{Merkle}}^{\textsf{Orchard}}}(\textsf{right})$, 12 | 13 | with $\ell_{\textsf{Merkle}}^{\textsf{Orchard}} = 255.$ $\textsf{left}$ and $\textsf{right}$ are allowed to be non-canonical $255$-bit encodings. 14 | 15 | We break these inputs into the following `MessagePiece`s: 16 | 17 | $$ 18 | \begin{aligned} 19 | a \text{ (250 bits)} &= a_0 \,||\, a_1 \\ 20 | &= {l\star} \,||\, (\text{bits } 0..=239 \text{ of } \textsf{ left }) \\ 21 | b \text{ (20 bits)} &= b_0 \,||\, b_1 \,||\, b_2 \\ 22 | &= (\text{bits } 240..=249 \text{ of } \textsf{left}) \,||\, (\text{bits } 250..=254 \text{ of } \textsf{left}) \,||\, (\text{bits } 0..=4 \text{ of } \textsf{right}) \\ 23 | c \text{ (250 bits)} &= \text{bits } 5..=254 \text{ of } \textsf{right} 24 | \end{aligned} 25 | $$ 26 | 27 | $a,b,c$ are constrained by the $\textsf{SinsemillaHash}$ to be $250$ bits, $20$ bits, and $250$ bits respectively. 28 | 29 | In a custom gate, we check this message decomposition by enforcing the following constraints: 30 | 31 | 1. $a_0 = l$ 32 |
33 | $z_{1,a}$, the index-1 running sum output of $\textsf{SinsemillaHash}(a)$, is copied into the gate. $z_{1,a}$ has been constrained by the $\textsf{SinsemillaHash}$ to be $240$ bits. We recover the subpieces $a_0, a_1$ using $a, z_{1,a}$: 34 | $$ 35 | \begin{aligned} 36 | z_{1,a} &= \frac{a - a_0}{2^{10}}\\ 37 | &= a_1 \\ 38 | \implies a_0 &= a - z_{1,a} \cdot 2^{10}. 39 | \end{aligned} 40 | $$ 41 | $l + 1$ is loaded into a fixed column at each layer of the hash. It is used both as a gate selector, and to fix the value of $l$. We check that $$a_0 = (l + 1) - 1.$$ 42 | > Note: The reason for using $l + 1$ instead of $l$ is that $l = 0$ when $\textsf{layer} = 31$ (hashing two leaves). We cannot have a zero-valued selector, since a constraint gated by a zero-valued selector is never checked. 43 | 44 | 2. $b_1 + 2^5 \cdot b_2 = z_{1,b}$ 45 |
46 | $z_{1,b}$, the index-1 running sum output of $\textsf{SinsemillaHash}(b)$, is copied into the gate. $z_{1,b}$ has been constrained by the $\textsf{SinsemillaHash}$ to be $10$ bits. We witness the subpieces $b_1, b_2$ outside this gate, and constrain them each to be $5$ bits. Inside the gate, we check that $$b_1 + 2^5 \cdot b_2 = z_{1,b}.$$ 47 | We also recover the subpiece $b_0$ using $(b, z_{1,b})$: 48 | $$ 49 | \begin{aligned} 50 | z_{1,b} &= \frac{b - b_{0..=10}}{2^{10}}\\ 51 | \implies b_0 &= b - (z_{1,b} \cdot 2^{10}). 52 | \end{aligned} 53 | $$ 54 | 55 | We have now derived or witnessed every subpiece, and range-constrained every subpiece: 56 | - $a_0$ ($10$ bits), derived as $a_0 = a - 2^{10} \cdot z_{1,a}$; 57 | - $a_1$ ($240$ bits), equal to $z_{1,a}$; 58 | - $b_0$ ($10$ bits), derived as $b_0 = b - 2^{10} \cdot z_{1,b}$; 59 | - $b_1$ ($5$ bits) is witnessed and constrained outside the gate; 60 | - $b_2$ ($5$ bits) is witnessed and constrained outside the gate; 61 | - $b_1 + 2^5 \cdot b_2$ is constrained to equal $z_{1, b}$, 62 | and we use them to reconstruct the original field element inputs: 63 | 64 | 3. $\mathsf{left} = a_1 + 2^{240} \cdot b_0 + 2^{254} \cdot b_1$ 65 | 66 | 4. $\mathsf{right} = b_2 + 2^5 \cdot c$ 67 | 68 | ## Circuit components 69 | The Orchard circuit spans $10$ advice columns while the $\textsf{Sinsemilla}$ chip only uses $5$ advice columns. We distribute the path hashing evenly across two $\textsf{Sinsemilla}$ chips to make better use of the available circuit area. Since the output from the previous layer hash is copied into the next layer hash, we maintain continuity even when moving from one chip to the other. 70 | -------------------------------------------------------------------------------- /book/src/design/implementation.md: -------------------------------------------------------------------------------- 1 | # Implementation 2 | -------------------------------------------------------------------------------- /book/src/design/implementation/fields.md: -------------------------------------------------------------------------------- 1 | # Fields 2 | 3 | The [Pasta curves](https://electriccoin.co/blog/the-pasta-curves-for-halo-2-and-beyond/) 4 | that we use in `halo2` are designed to be highly 2-adic, meaning that a large $2^S$ 5 | [multiplicative subgroup](../../background/fields.md#multiplicative-subgroups) exists in 6 | each field. That is, we can write $p - 1 \equiv 2^S \cdot T$ with $T$ odd. For both Pallas 7 | and Vesta, $S = 32$; this helps to simplify the field implementations. 8 | 9 | ## Sarkar square-root algorithm (table-based variant) 10 | 11 | We use a technique from [Sarkar2020](https://eprint.iacr.org/2020/1407.pdf) to compute 12 | [square roots](../../background/fields.md#square-roots) in `halo2`. The intuition behind 13 | the algorithm is that we can split the task into computing square roots in each 14 | multiplicative subgroup. 15 | 16 | Suppose we want to find the square root of $u$ modulo one of the Pasta primes $p$, where 17 | $u$ is a non-zero square in $\mathbb{Z}_p^\times$. We define a $2^S$ 18 | [root of unity](../../background/fields.md#roots-of-unity) $g = z^T$ where $z$ is a 19 | non-square in $\mathbb{Z}_p^\times$, and precompute the following tables: 20 | 21 | $$ 22 | gtab = \begin{bmatrix} 23 | g^0 & g^1 & ... & g^{2^8 - 1} \\ 24 | (g^{2^8})^0 & (g^{2^8})^1 & ... & (g^{2^8})^{2^8 - 1} \\ 25 | (g^{2^{16}})^0 & (g^{2^{16}})^1 & ... & (g^{2^{16}})^{2^8 - 1} \\ 26 | (g^{2^{24}})^0 & (g^{2^{24}})^1 & ... & (g^{2^{24}})^{2^8 - 1} 27 | \end{bmatrix} 28 | $$ 29 | 30 | $$ 31 | invtab = \begin{bmatrix} 32 | (g^{-2^{24}})^0 & (g^{-2^{24}})^1 & ... & (g^{-2^{24}})^{2^8 - 1} 33 | \end{bmatrix} 34 | $$ 35 | 36 | Let $v = u^{(T-1)/2}$. We can then define $x = uv \cdot v = u^T$ as an element of the 37 | $2^S$ multiplicative subgroup. 38 | 39 | Let $x_3 = x, x_2 = x_3^{2^8}, x_1 = x_2^{2^8}, x_0 = x_1^{2^8}.$ 40 | 41 | ### i = 0, 1 42 | Using $invtab$, we lookup $t_0$ such that 43 | $$ 44 | x_0 = (g^{-2^{24}})^{t_0} \implies x_0 \cdot g^{t_0 \cdot 2^{24}} = 1. 45 | $$ 46 | 47 | Define $\alpha_1 = x_1 \cdot (g^{2^{16}})^{t_0}.$ 48 | 49 | ### i = 2 50 | Lookup $t_1$ s.t. 51 | $$ 52 | \begin{array}{ll} 53 | \alpha_1 = (g^{-2^{24}})^{t_1} &\implies x_1 \cdot (g^{2^{16}})^{t_0} = (g^{-2^{24}})^{t_1} \\ 54 | &\implies 55 | x_1 \cdot g^{(t_0 + 2^8 \cdot t_1) \cdot 2^{16}} = 1. 56 | \end{array} 57 | $$ 58 | 59 | Define $\alpha_2 = x_2 \cdot (g^{2^8})^{t_0 + 2^8 \cdot t_1}.$ 60 | 61 | ### i = 3 62 | Lookup $t_2$ s.t. 63 | 64 | $$ 65 | \begin{array}{ll} 66 | \alpha_2 = (g^{-2^{24}})^{t_2} &\implies x_2 \cdot (g^{2^8})^{t_0 + 2^8\cdot {t_1}} = (g^{-2^{24}})^{t_2} \\ 67 | &\implies x_2 \cdot g^{(t_0 + 2^8 \cdot t_1 + 2^{16} \cdot t_2) \cdot 2^8} = 1. 68 | \end{array} 69 | $$ 70 | 71 | Define $\alpha_3 = x_3 \cdot g^{t_0 + 2^8 \cdot t_1 + 2^{16} \cdot t_2}.$ 72 | 73 | ### Final result 74 | Lookup $t_3$ such that 75 | 76 | $$ 77 | \begin{array}{ll} 78 | \alpha_3 = (g^{-2^{24}})^{t_3} &\implies x_3 \cdot g^{t_0 + 2^8\cdot {t_1} + 2^{16} \cdot t_2} = (g^{-2^{24}})^{t_3} \\ 79 | &\implies x_3 \cdot g^{t_0 + 2^8 \cdot t_1 + 2^{16} \cdot t_2 + 2^{24} \cdot t_3} = 1. 80 | \end{array} 81 | $$ 82 | 83 | Let $t = t_0 + 2^8 \cdot t_1 + 2^{16} \cdot t_2 + 2^{24} \cdot t_3$. 84 | 85 | We can now write 86 | $$ 87 | \begin{array}{lclcl} 88 | x_3 \cdot g^{t} = 1 &\implies& x_3 &=& g^{-t} \\ 89 | &\implies& uv^2 &=& g^{-t} \\ 90 | &\implies& uv &=& v^{-1} \cdot g^{-t} \\ 91 | &\implies& uv \cdot g^{t / 2} &=& v^{-1} \cdot g^{-t / 2}. 92 | \end{array} 93 | $$ 94 | 95 | Squaring the RHS, we observe that $(v^{-1} g^{-t / 2})^2 = v^{-2}g^{-t} = u.$ Therefore, 96 | the square root of $u$ is $uv \cdot g^{t / 2}$; the first part we computed earlier, and 97 | the second part can be computed with three multiplications using lookups in $gtab$. 98 | -------------------------------------------------------------------------------- /book/src/design/implementation/proofs.md: -------------------------------------------------------------------------------- 1 | # Halo 2 proofs 2 | 3 | ## Proofs as opaque byte streams 4 | 5 | In proving system implementations like `bellman`, there is a concrete `Proof` struct that 6 | encapsulates the proof data, is returned by a prover, and can be passed to a verifier. 7 | 8 | `halo2` does not contain any proof-like structures, for several reasons: 9 | 10 | - The Proof structures would contain vectors of (vectors of) curve points and scalars. 11 | This complicates serialization/deserialization of proofs because the lengths of these 12 | vectors depend on the configuration of the circuit. However, we didn't want to encode 13 | the lengths of vectors inside of proofs, because at runtime the circuit is fixed, and 14 | thus so are the proof sizes. 15 | - It's easy to accidentally put stuff into a Proof structure that isn't also placed in the 16 | transcript, which is a hazard when developing and implementing a proving system. 17 | - We needed to be able to create multiple PLONK proofs at the same time; these proofs 18 | share many different substructures when they are for the same circuit. 19 | 20 | Instead, `halo2` treats proof objects as opaque byte streams. Creation and consumption of 21 | these byte streams happens via the transcript: 22 | 23 | - The `TranscriptWrite` trait represents something that we can write proof components to 24 | (at proving time). 25 | - The `TranscriptRead` trait represents something that we can read proof components from 26 | (at verifying time). 27 | 28 | Crucially, implementations of `TranscriptWrite` are responsible for simultaneously writing 29 | to some `std::io::Write` buffer at the same time that they hash things into the transcript, 30 | and similarly for `TranscriptRead`/`std::io::Read`. 31 | 32 | As a bonus, treating proofs as opaque byte streams ensures that verification accounts for 33 | the cost of deserialization, which isn't negligible due to point compression. 34 | 35 | ## Proof encoding 36 | 37 | A Halo 2 proof, constructed over a curve $E(\mathbb{F}_p)$, is encoded as a stream of: 38 | 39 | - Points $P \in E(\mathbb{F}_p)$) (for commitments to polynomials), and 40 | - Scalars $s \in \mathbb{F}_q$) (for evaluations of polynomials, and blinding values). 41 | 42 | For the Pallas and Vesta curves, both points and scalars have 32-byte encodings, meaning 43 | that proofs are always a multiple of 32 bytes. 44 | 45 | The `halo2` crate supports proving multiple instances of a circuit simultaneously, in 46 | order to share common proof components and protocol logic. 47 | 48 | In the encoding description below, we will use the following circuit-specific constants: 49 | 50 | - $k$ - the size parameter of the circuit (which has $2^k$ rows). 51 | - $A$ - the number of advice columns. 52 | - $F$ - the number of fixed columns. 53 | - $I$ - the number of instance columns. 54 | - $L$ - the number of lookup arguments. 55 | - $P$ - the number of permutation arguments. 56 | - $\textsf{Col}_P$ - the number of columns involved in permutation argument $P$. 57 | - $D$ - the maximum degree for the quotient polynomial. 58 | - $Q_A$ - the number of advice column queries. 59 | - $Q_F$ - the number of fixed column queries. 60 | - $Q_I$ - the number of instance column queries. 61 | - $M$ - the number of instances of the circuit that are being proven simultaneously. 62 | 63 | As the proof encoding directly follows the transcript, we can break the encoding into 64 | sections matching the Halo 2 protocol: 65 | 66 | - PLONK commitments: 67 | - $A$ points (repeated $M$ times). 68 | - $2L$ points (repeated $M$ times). 69 | - $P$ points (repeated $M$ times). 70 | - $L$ points (repeated $M$ times). 71 | 72 | - Vanishing argument: 73 | - $D - 1$ points. 74 | - $Q_I$ scalars (repeated $M$ times). 75 | - $Q_A$ scalars (repeated $M$ times). 76 | - $Q_F$ scalars. 77 | - $D - 1$ scalars. 78 | 79 | - PLONK evaluations: 80 | - $(2 + \textsf{Col}_P) \times P$ scalars (repeated $M$ times). 81 | - $5L$ scalars (repeated $M$ times). 82 | 83 | - Multiopening argument: 84 | - 1 point. 85 | - 1 scalar per set of points in the multiopening argument. 86 | 87 | - Polynomial commitment scheme: 88 | - $1 + 2k$ points. 89 | - $2$ scalars. 90 | -------------------------------------------------------------------------------- /book/src/design/proving-system.md: -------------------------------------------------------------------------------- 1 | # Proving system 2 | 3 | The Halo 2 proving system can be broken down into five stages: 4 | 5 | 1. Commit to polynomials encoding the main components of the circuit: 6 | - Cell assignments. 7 | - Permuted values and products for each lookup argument. 8 | - Equality constraint permutations. 9 | 2. Construct the vanishing argument to constrain all circuit relations to zero: 10 | - Standard and custom gates. 11 | - Lookup argument rules. 12 | - Equality constraint permutation rules. 13 | 3. Evaluate the above polynomials at all necessary points: 14 | - All relative rotations used by custom gates across all columns. 15 | - Vanishing argument pieces. 16 | 4. Construct the multipoint opening argument to check that all evaluations are consistent 17 | with their respective commitments. 18 | 5. Run the inner product argument to create a polynomial commitment opening proof for the 19 | multipoint opening argument polynomial. 20 | 21 | These stages are presented in turn across this section of the book. 22 | 23 | ## Example 24 | 25 | To aid our explanations, we will at times refer to the following example constraint 26 | system: 27 | 28 | - Four advice columns $a, b, c, d$. 29 | - One fixed column $f$. 30 | - Three custom gates: 31 | - $a \cdot b \cdot c_{-1} - d = 0$ 32 | - $f_{-1} \cdot c = 0$ 33 | - $f \cdot d \cdot a = 0$ 34 | 35 | ## tl;dr 36 | 37 | The table below provides a (probably too) succinct description of the Halo 2 protocol. 38 | This description will likely be replaced by the Halo 2 paper and security proof, but for 39 | now serves as a summary of the following sub-sections. 40 | 41 | | Prover | | Verifier | 42 | | --------------------------------------------------------------------------- | ------- | ---------------------------------- | 43 | | | $\larr$ | $t(X) = (X^n - 1)$ | 44 | | | $\larr$ | $F = [F_0, F_1, \dots, F_{m - 1}]$ | 45 | | $\mathbf{A} = [A_0, A_1, \dots, A_{m - 1}]$ | $\rarr$ | | 46 | | | $\larr$ | $\theta$ | 47 | | $\mathbf{L} = [(A'_0, S'_0), \dots, (A'_{m - 1}, S'_{m - 1})]$ | $\rarr$ | | 48 | | | $\larr$ | $\beta, \gamma$ | 49 | | $\mathbf{Z_P} = [Z_{P,0}, Z_{P,1}, \ldots]$ | $\rarr$ | | 50 | | $\mathbf{Z_L} = [Z_{L,0}, Z_{L,1}, \ldots]$ | $\rarr$ | | 51 | | | $\larr$ | $y$ | 52 | | $h(X) = \frac{\text{gate}_0(X) + \dots + y^i \cdot \text{gate}_i(X)}{t(X)}$ | | | 53 | | $h(X) = h_0(X) + \dots + X^{n(d-1)} h_{d-1}(X)$ | | | 54 | | $\mathbf{H} = [H_0, H_1, \dots, H_{d-1}]$ | $\rarr$ | | 55 | | | $\larr$ | $x$ | 56 | | $evals = [A_0(x), \dots, H_{d - 1}(x)]$ | $\rarr$ | | 57 | | | | Checks $h(x)$ | 58 | | | $\larr$ | $x_1, x_2$ | 59 | | Constructs $h'(X)$ multipoint opening poly | | | 60 | | $U = \text{Commit}(h'(X))$ | $\rarr$ | | 61 | | | $\larr$ | $x_3$ | 62 | | $\mathbf{q}_\text{evals} = [Q_0(x_3), Q_1(x_3), \dots]$ | $\rarr$ | | 63 | | $u_\text{eval} = U(x_3)$ | $\rarr$ | | 64 | | | $\larr$ | $x_4$ | 65 | 66 | Then the prover and verifier: 67 | 68 | - Construct $\text{finalPoly}(X)$ as a linear combination of $\mathbf{Q}$ and $U$ using 69 | powers of $x_4$; 70 | - Construct $\text{finalPolyEval}$ as the equivalent linear combination of 71 | $\mathbf{q}_\text{evals}$ and $u_\text{eval}$; and 72 | - Perform $\text{InnerProduct}(\text{finalPoly}(X), x_3, \text{finalPolyEval}).$ 73 | 74 | > TODO: Write up protocol components that provide zero-knowledge. 75 | -------------------------------------------------------------------------------- /book/src/design/proving-system/circuit-commitments.md: -------------------------------------------------------------------------------- 1 | # Circuit commitments 2 | 3 | ## Committing to the circuit assignments 4 | 5 | At the start of proof creation, the prover has a table of cell assignments that it claims 6 | satisfy the constraint system. The table has $n = 2^k$ rows, and is broken into advice, 7 | instance, and fixed columns. We define $F_{i,j}$ as the assignment in the $j$th row of 8 | the $i$th fixed column. Without loss of generality, we'll similarly define $A_{i,j}$ to 9 | represent the advice and instance assignments. 10 | 11 | > We separate fixed columns here because they are provided by the verifier, whereas the 12 | > advice and instance columns are provided by the prover. In practice, the commitments to 13 | > instance and fixed columns are computed by both the prover and verifier, and only the 14 | > advice commitments are stored in the proof. 15 | 16 | To commit to these assignments, we construct Lagrange polynomials of degree $n - 1$ for 17 | each column, over an evaluation domain of size $n$ (where $\omega$ is the $n$th primitive 18 | root of unity): 19 | 20 | - $a_i(X)$ interpolates such that $a_i(\omega^j) = A_{i,j}$. 21 | - $f_i(X)$ interpolates such that $f_i(\omega^j) = F_{i,j}$. 22 | 23 | We then create a blinding commitment to the polynomial for each column: 24 | 25 | $$\mathbf{A} = [\text{Commit}(a_0(X)), \dots, \text{Commit}(a_i(X))]$$ 26 | $$\mathbf{F} = [\text{Commit}(f_0(X)), \dots, \text{Commit}(f_i(X))]$$ 27 | 28 | $\mathbf{F}$ is constructed as part of key generation, using a blinding factor of $1$. 29 | $\mathbf{A}$ is constructed by the prover and sent to the verifier. 30 | 31 | ## Committing to the lookup permutations 32 | 33 | The verifier starts by sampling $\theta$, which is used to keep individual columns within 34 | lookups independent. Then, the prover commits to the permutations for each lookup as 35 | follows: 36 | 37 | - Given a lookup with input column polynomials $[A_0(X), \dots, A_{m-1}(X)]$ and table 38 | column polynomials $[S_0(X), \dots, S_{m-1}(X)]$, the prover constructs two compressed 39 | polynomials 40 | 41 | $$A_\text{compressed}(X) = \theta^{m-1} A_0(X) + \theta^{m-2} A_1(X) + \dots + \theta A_{m-2}(X) + A_{m-1}(X)$$ 42 | $$S_\text{compressed}(X) = \theta^{m-1} S_0(X) + \theta^{m-2} S_1(X) + \dots + \theta S_{m-2}(X) + S_{m-1}(X)$$ 43 | 44 | - The prover then permutes $A_\text{compressed}(X)$ and $S_\text{compressed}(X)$ according 45 | to the [rules of the lookup argument](lookup.md), obtaining $A'(X)$ and $S'(X)$. 46 | 47 | The prover creates blinding commitments for all of the lookups 48 | 49 | $$\mathbf{L} = \left[ (\text{Commit}(A'(X))), \text{Commit}(S'(X))), \dots \right]$$ 50 | 51 | and sends them to the verifier. 52 | 53 | After the verifier receives $\mathbf{A}$, $\mathbf{F}$, and $\mathbf{L}$, it samples 54 | challenges $\beta$ and $\gamma$ that will be used in the permutation argument and the 55 | remainder of the lookup argument below. (These challenges can be reused because the 56 | arguments are independent.) 57 | 58 | ## Committing to the equality constraint permutation 59 | 60 | Let $c$ be the number of columns that are enabled for equality constraints. 61 | 62 | Let $m$ be the maximum number of columns that can accommodated by a 63 | [column set](permutation.md#spanning-a-large-number-of-columns) without exceeding 64 | the PLONK configuration's polynomial degree bound. 65 | 66 | Let $u$ be the number of “usable” rows as defined in the 67 | [Permutation argument](permutation.md#zero-knowledge-adjustment) section. 68 | 69 | Let $b = \mathsf{ceiling}(c/m).$ 70 | 71 | The prover constructs a vector $\mathbf{P}$ of length $bu$ such that for each 72 | column set $0 \leq a < b$ and each row $0 \leq j < u,$ 73 | 74 | $$ 75 | \mathbf{P}_{au + j} = \prod\limits_{i=am}^{\min(c, (a+1)m)-1} \frac{v_i(\omega^j) + \beta \cdot \delta^i \cdot \omega^j + \gamma}{v_i(\omega^j) + \beta \cdot s_i(\omega^j) + \gamma}. 76 | $$ 77 | 78 | The prover then computes a running product of $\mathbf{P}$, starting at $1$, 79 | and a vector of polynomials $Z_{P,0..b-1}$ that each have a Lagrange basis 80 | representation corresponding to a $u$-sized slice of this running product, as 81 | described in the [Permutation argument](permutation.md#argument-specification) 82 | section. 83 | 84 | The prover creates blinding commitments to each $Z_{P,a}$ polynomial: 85 | 86 | $$\mathbf{Z_P} = \left[\text{Commit}(Z_{P,0}(X)), \dots, \text{Commit}(Z_{P,b-1}(X))\right]$$ 87 | 88 | and sends them to the verifier. 89 | 90 | ## Committing to the lookup permutation product columns 91 | 92 | In addition to committing to the individual permuted lookups, for each lookup, 93 | the prover needs to commit to the permutation product column: 94 | 95 | - The prover constructs a vector $P$: 96 | 97 | $$ 98 | P_j = \frac{(A_\text{compressed}(\omega^j) + \beta)(S_\text{compressed}(\omega^j) + \gamma)}{(A'(\omega^j) + \beta)(S'(\omega^j) + \gamma)} 99 | $$ 100 | 101 | - The prover constructs a polynomial $Z_L$ which has a Lagrange basis representation 102 | corresponding to a running product of $P$, starting at $Z_L(1) = 1$. 103 | 104 | $\beta$ and $\gamma$ are used to combine the permutation arguments for $A'(X)$ and $S'(X)$ 105 | while keeping them independent. The important thing here is that the verifier samples 106 | $\beta$ and $\gamma$ after the prover has created $\mathbf{A}$, $\mathbf{F}$, and 107 | $\mathbf{L}$ (and thus committed to all the cell values used in lookup columns, as well 108 | as $A'(X)$ and $S'(X)$ for each lookup). 109 | 110 | As before, the prover creates blinding commitments to each $Z_L$ polynomial: 111 | 112 | $$\mathbf{Z_L} = \left[\text{Commit}(Z_L(X)), \dots \right]$$ 113 | 114 | and sends them to the verifier. 115 | -------------------------------------------------------------------------------- /book/src/design/proving-system/comparison.md: -------------------------------------------------------------------------------- 1 | # Comparison to other work 2 | 3 | ## BCMS20 Appendix A.2 4 | 5 | Appendix A.2 of [BCMS20] describes a polynomial commitment scheme that is similar to the 6 | one described in [BGH19] (BCMS20 being a generalization of the original Halo paper). Halo 7 | 2 builds on both of these works, and thus itself uses a polynomial commitment scheme that 8 | is very similar to the one in BCMS20. 9 | 10 | [BGH19]: https://eprint.iacr.org/2019/1021 11 | [BCMS20]: https://eprint.iacr.org/2020/499 12 | 13 | The following table provides a mapping between the variable names in BCMS20, and the 14 | equivalent objects in Halo 2 (which builds on the nomenclature from the Halo paper): 15 | 16 | | BCMS20 | Halo 2 | 17 | | :------------: | :-----------------: | 18 | | $S$ | $H$ | 19 | | $H$ | $U$ | 20 | | $C$ | `msm` or $P$ | 21 | | $\alpha$ | $\iota$ | 22 | | $\xi_0$ | $z$ | 23 | | $\xi_i$ | `challenge_i` | 24 | | $H'$ | $[z] U$ | 25 | | $\bar{p}$ | `s_poly` | 26 | | $\bar{\omega}$ | `s_poly_blind` | 27 | | $\bar{C}$ | `s_poly_commitment` | 28 | | $h(X)$ | $g(X)$ | 29 | | $\omega'$ | `blind` / $\xi$ | 30 | | $\mathbf{c}$ | $\mathbf{a}$ | 31 | | $c$ | $a = \mathbf{a}_0$ | 32 | | $v'$ | $ab$ | 33 | 34 | Halo 2's polynomial commitment scheme differs from Appendix A.2 of BCMS20 in two ways: 35 | 36 | 1. Step 8 of the $\text{Open}$ algorithm computes a "non-hiding" commitment $C'$ prior to 37 | the inner product argument, which opens to the same value as $C$ but is a commitment to 38 | a randomly-drawn polynomial. The remainder of the protocol involves no blinding. By 39 | contrast, in Halo 2 we blind every single commitment that we make (even for instance 40 | and fixed polynomials, though using a blinding factor of 1 for the fixed polynomials); 41 | this makes the protocol simpler to reason about. As a consequence of this, the verifier 42 | needs to handle the cumulative blinding factor at the end of the protocol, and so there 43 | is no need to derive an equivalent to $C'$ at the start of the protocol. 44 | 45 | - $C'$ is also an input to the random oracle for $\xi_0$; in Halo 2 we utilize a 46 | transcript that has already committed to the equivalent components of $C'$ prior to 47 | sampling $z$. 48 | 49 | 2. The $\text{PC}_\text{DL}.\text{SuccinctCheck}$ subroutine (Figure 2 of BCMS20) computes 50 | the initial group element $C_0$ by adding $[v] H' = [v \epsilon] H$, which requires two 51 | scalar multiplications. Instead, we subtract $[v] G_0$ from the original commitment $P$, 52 | so that we're effectively opening the polynomial at the point to the value zero. The 53 | computation $[v] G_0$ is more efficient in the context of recursion because $G_0$ is a 54 | fixed base (so we can use lookup tables). 55 | -------------------------------------------------------------------------------- /book/src/design/proving-system/inner-product.md: -------------------------------------------------------------------------------- 1 | # Inner product argument 2 | 3 | Halo 2 uses a polynomial commitment scheme for which we can create polynomial commitment 4 | opening proofs, based around the Inner Product Argument. 5 | 6 | > TODO: Explain Halo 2's variant of the IPA. 7 | > 8 | > It is very similar to $\text{PC}_\text{DL}.\text{Open}$ from Appendix A.2 of [BCMS20]. 9 | > See [this comparison](comparison.md#bcms20-appendix-a2) for details. 10 | > 11 | > [BCMS20]: https://eprint.iacr.org/2020/499 12 | -------------------------------------------------------------------------------- /book/src/design/proving-system/multipoint-opening.md: -------------------------------------------------------------------------------- 1 | # Multipoint opening argument 2 | 3 | Consider the commitments $A, B, C, D$ to polynomials $a(X), b(X), c(X), d(X)$. 4 | Let's say that $a$ and $b$ were queried at the point $x$, while $c$ and $d$ 5 | were queried at both points $x$ and $\omega x$. (Here, $\omega$ is the primitive 6 | root of unity in the multiplicative subgroup over which we constructed the 7 | polynomials). 8 | 9 | To open these commitments, we could create a polynomial $Q$ for each point that we queried 10 | at (corresponding to each relative rotation used in the circuit). But this would not be 11 | efficient in the circuit; for example, $c(X)$ would appear in multiple polynomials. 12 | 13 | Instead, we can group the commitments by the sets of points at which they were queried: 14 | $$ 15 | \begin{array}{cccc} 16 | &\{x\}& &\{x, \omega x\}& \\ 17 | &A& &C& \\ 18 | &B& &D& 19 | \end{array} 20 | $$ 21 | 22 | For each of these groups, we combine them into a polynomial set, and create a single $Q$ 23 | for that set, which we open at each rotation. 24 | 25 | ## Optimization steps 26 | 27 | The multipoint opening optimization takes as input: 28 | 29 | - A random $x$ sampled by the verifier, at which we evaluate $a(X), b(X), c(X), d(X)$. 30 | - Evaluations of each polynomial at each point of interest, provided by the prover: 31 | $a(x), b(x), c(x), d(x), c(\omega x), d(\omega x)$ 32 | 33 | These are the outputs of the [vanishing argument](vanishing.md#evaluating-the-polynomials). 34 | 35 | The multipoint opening optimization proceeds as such: 36 | 37 | 1. Sample random $x_1$, to keep $a, b, c, d$ linearly independent. 38 | 2. Accumulate polynomials and their corresponding evaluations according 39 | to the point set at which they were queried: 40 | `q_polys`: 41 | $$ 42 | \begin{array}{rccl} 43 | q_1(X) &=& a(X) &+& x_1 b(X) \\ 44 | q_2(X) &=& c(X) &+& x_1 d(X) 45 | \end{array} 46 | $$ 47 | `q_eval_sets`: 48 | ```math 49 | [ 50 | [a(x) + x_1 b(x)], 51 | [ 52 | c(x) + x_1 d(x), 53 | c(\omega x) + x_1 d(\omega x) 54 | ] 55 | ] 56 | ``` 57 | NB: `q_eval_sets` is a vector of sets of evaluations, where the outer vector 58 | goes over the point sets, and the inner vector goes over the points in each set. 59 | 3. Interpolate each set of values in `q_eval_sets`: 60 | `r_polys`: 61 | $$ 62 | \begin{array}{cccc} 63 | r_1(X) s.t.&&& \\ 64 | &r_1(x) &=& a(x) + x_1 b(x) \\ 65 | r_2(X) s.t.&&& \\ 66 | &r_2(x) &=& c(x) + x_1 d(x) \\ 67 | &r_2(\omega x) &=& c(\omega x) + x_1 d(\omega x) \\ 68 | \end{array} 69 | $$ 70 | 4. Construct `f_polys` which check the correctness of `q_polys`: 71 | `f_polys` 72 | $$ 73 | \begin{array}{rcl} 74 | f_1(X) &=& \frac{ q_1(X) - r_1(X)}{X - x} \\ 75 | f_2(X) &=& \frac{ q_2(X) - r_2(X)}{(X - x)(X - \omega x)} \\ 76 | \end{array} 77 | $$ 78 | 79 | If $q_1(x) = r_1(x)$, then $f_1(X)$ should be a polynomial. 80 | If $q_2(x) = r_2(x)$ and $q_2(\omega x) = r_2(\omega x)$ 81 | then $f_2(X)$ should be a polynomial. 82 | 5. Sample random $x_2$ to keep the `f_polys` linearly independent. 83 | 6. Construct $f(X) = f_1(X) + x_2 f_2(X)$. 84 | 7. Sample random $x_3$, at which we evaluate $f(X)$: 85 | $$ 86 | \begin{array}{rcccl} 87 | f(x_3) &=& f_1(x_3) &+& x_2 f_2(x_3) \\ 88 | &=& \frac{q_1(x_3) - r_1(x_3)}{x_3 - x} &+& x_2\frac{q_2(x_3) - r_2(x_3)}{(x_3 - x)(x_3 - \omega x)} 89 | \end{array} 90 | $$ 91 | 8. Sample random $x_4$ to keep $f(X)$ and `q_polys` linearly independent. 92 | 9. Construct `final_poly`, $$final\_poly(X) = f(X) + x_4 q_1(X) + x_4^2 q_2(X),$$ 93 | which is the polynomial we commit to in the inner product argument. 94 | -------------------------------------------------------------------------------- /book/src/design/proving-system/permutation-diagram.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DelphinusLab/halo2-gpu-specific/55d1969571375005206615deb6ff0395a7ca1607/book/src/design/proving-system/permutation-diagram.png -------------------------------------------------------------------------------- /book/src/design/proving-system/vanishing.md: -------------------------------------------------------------------------------- 1 | # Vanishing argument 2 | 3 | Having committed to the circuit assignments, the prover now needs to demonstrate that the 4 | various circuit relations are satisfied: 5 | 6 | - The custom gates, represented by polynomials $\text{gate}_i(X)$. 7 | - The rules of the lookup arguments. 8 | - The rules of the equality constraint permutations. 9 | 10 | Each of these relations is represented as a polynomial of degree $d$ (the maximum degree 11 | of any of the relations) with respect to the circuit columns. Given that the degree of the 12 | assignment polynomials for each column is $n - 1$, the relation polynomials have degree 13 | $d(n - 1)$ with respect to $X$. 14 | 15 | > In our [example](../proving-system.md#example), these would be the gate polynomials, of 16 | > degree $3n - 3$: 17 | > 18 | > - $\text{gate}_0(X) = a_0(X) \cdot a_1(X) \cdot a_2(X \omega^{-1}) - a_3(X)$ 19 | > - $\text{gate}_1(X) = f_0(X \omega^{-1}) \cdot a_2(X)$ 20 | > - $\text{gate}_2(X) = f_0(X) \cdot a_3(X) \cdot a_0(X)$ 21 | 22 | A relation is satisfied if its polynomial is equal to zero. One way to demonstrate this is 23 | to divide each polynomial relation by the vanishing polynomial $t(X) = (X^n - 1)$, which 24 | is the lowest-degree monomial that has roots at every $\omega^i$. If relation's polynomial 25 | is perfectly divisible by $t(X)$, it is equal to zero over the domain (as desired). 26 | 27 | This simple construction would require a polynomial commitment per relation. Instead, we 28 | commit to all of the circuit relations simultaneously: the verifier samples $y$, and then 29 | the prover constructs the quotient polynomial 30 | 31 | $$h(X) = \frac{\text{gate}_0(X) + y \cdot \text{gate}_1(X) + \dots + y^i \cdot \text{gate}_i(X) + \dots}{t(X)},$$ 32 | 33 | where the numerator is a random (the prover commits to the cell assignments before the 34 | verifier samples $y$) linear combination of the circuit relations. 35 | 36 | - If the numerator polynomial (in formal indeterminate $X$) is perfectly divisible by 37 | $t(X)$, then with high probability all relations are satisfied. 38 | - Conversely, if at least one relation is not satisfied, then with high probability 39 | $h(x) \cdot t(x)$ will not equal the evaluation of the numerator at $x$. In this case, 40 | the numerator polynomial would not be perfectly divisible by $t(X)$. 41 | 42 | ## Committing to $h(X)$ 43 | 44 | $h(X)$ has degree $(d - 1)n - d$ (because the divisor $t(X)$ has degree $n$). However, the 45 | polynomial commitment scheme we use for Halo 2 only supports committing to polynomials of 46 | degree $n - 1$ (which is the maximum degree that the rest of the protocol needs to commit 47 | to). Instead of increasing the cost of the polynomial commitment scheme, the prover split 48 | $h(X)$ into pieces of degree $n - 1$ 49 | 50 | $$h_0(X) + X^n h_1(X) + \dots + X^{n(d-1)} h_{d-1}(X),$$ 51 | 52 | and produces blinding commitments to each piece 53 | 54 | $$\mathbf{H} = [\text{Commit}(h_0(X)), \text{Commit}(h_1(X)), \dots, \text{Commit}(h_{d-1}(X))].$$ 55 | 56 | ## Evaluating the polynomials 57 | 58 | At this point, all properties of the circuit have been committed to. The verifier now 59 | wants to see if the prover committed to the correct $h(X)$ polynomial. The verifier 60 | samples $x$, and the prover produces the purported evaluations of the various polynomials 61 | at $x$, for all the relative offsets used in the circuit, as well as $h(X)$. 62 | 63 | > In our [example](../proving-system.md#example), this would be: 64 | > 65 | > - $a_0(x)$ 66 | > - $a_1(x)$ 67 | > - $a_2(x)$, $a_2(x \omega^{-1})$ 68 | > - $a_3(x)$ 69 | > - $f_0(x)$, $f_0(x \omega^{-1})$ 70 | > - $h_0(x)$, ..., $h_{d-1}(x)$ 71 | 72 | The verifier checks that these evaluations satisfy the form of $h(X)$: 73 | 74 | $$\frac{\text{gate}_0(x) + \dots + y^i \cdot \text{gate}_i(x) + \dots}{t(x)} = h_0(x) + \dots + x^{n(d-1)} h_{d-1}(x)$$ 75 | 76 | Now content that the evaluations collectively satisfy the gate constraints, the verifier 77 | needs to check that the evaluations themselves are consistent with the original 78 | [circuit commitments](circuit-commitments.md), as well as $\mathbf{H}$. To implement this 79 | efficiently, we use a [multipoint opening argument](multipoint-opening.md). 80 | -------------------------------------------------------------------------------- /book/src/user.md: -------------------------------------------------------------------------------- 1 | # User Documentation 2 | 3 | You're probably here because you want to write circuits? Excellent! 4 | 5 | This section will guide you through the process of creating circuits with halo2. 6 | -------------------------------------------------------------------------------- /book/src/user/dev-tools.md: -------------------------------------------------------------------------------- 1 | # Developer tools 2 | 3 | The `halo2` crate includes several utilities to help you design and implement your 4 | circuits. 5 | 6 | ## Mock prover 7 | 8 | `halo2_proofs::dev::MockProver` is a tool for debugging circuits, as well as cheaply verifying 9 | their correctness in unit tests. The private and public inputs to the circuit are 10 | constructed as would normally be done to create a proof, but `MockProver::run` instead 11 | creates an object that will test every constraint in the circuit directly. It returns 12 | granular error messages that indicate which specific constraint (if any) is not satisfied. 13 | 14 | ## Circuit visualizations 15 | 16 | The `dev-graph` feature flag exposes several helper methods for creating graphical 17 | representations of circuits. 18 | 19 | On Debian systems, you will need the following additional packages: 20 | ```plaintext 21 | sudo apt install cmake libexpat1-dev libfreetype6-dev 22 | ``` 23 | 24 | ### Circuit layout 25 | 26 | `halo2_proofs::dev::CircuitLayout` renders the circuit layout as a grid: 27 | 28 | ```rust,ignore,no_run 29 | {{#include ../../../halo2_proofs/examples/circuit-layout.rs:dev-graph}} 30 | ``` 31 | 32 | - Columns are laid out from left to right as instance, advice and fixed. The order of 33 | columns is otherwise without meaning. 34 | - Instance columns have a white background. 35 | - Advice columns have a red background. 36 | - Fixed columns have a blue background. 37 | - Regions are shown as labelled green boxes (overlaying the background colour). A region 38 | may appear as multiple boxes if some of its columns happen to not be adjacent. 39 | - Cells that have been assigned to by the circuit will be shaded in grey. If any cells are 40 | assigned to more than once (which is usually a mistake), they will be shaded darker than 41 | the surrounding cells. 42 | 43 | ### Circuit structure 44 | 45 | `halo2_proofs::dev::circuit_dot_graph` builds a [DOT graph string] representing the given 46 | circuit, which can then be rendered with a variety of [layout programs]. The graph is built 47 | from calls to `Layouter::namespace` both within the circuit, and inside the gadgets and 48 | chips that it uses. 49 | 50 | [DOT graph string]: https://graphviz.org/doc/info/lang.html 51 | [layout programs]: https://en.wikipedia.org/wiki/DOT_(graph_description_language)#Layout_programs 52 | 53 | ```rust,ignore,no_run 54 | fn main() { 55 | // Prepare the circuit you want to render. 56 | // You don't need to include any witness variables. 57 | let a = Fp::rand(); 58 | let instance = Fp::one() + Fp::one(); 59 | let lookup_table = vec![instance, a, a, Fp::zero()]; 60 | let circuit: MyCircuit = MyCircuit { 61 | a: None, 62 | lookup_table, 63 | }; 64 | 65 | // Generate the DOT graph string. 66 | let dot_string = halo2_proofs::dev::circuit_dot_graph(&circuit); 67 | 68 | // Now you can either handle it in Rust, or just 69 | // print it out to use with command-line tools. 70 | print!("{}", dot_string); 71 | } 72 | ``` 73 | 74 | ## Cost estimator 75 | 76 | The `cost-model` binary takes high-level parameters for a circuit design, and estimates 77 | the verification cost, as well as resulting proof size. 78 | 79 | ```plaintext 80 | Usage: cargo run --example cost-model -- [OPTIONS] k 81 | 82 | Positional arguments: 83 | k 2^K bound on the number of rows. 84 | 85 | Optional arguments: 86 | -h, --help Print this message. 87 | -a, --advice R[,R..] An advice column with the given rotations. May be repeated. 88 | -i, --instance R[,R..] An instance column with the given rotations. May be repeated. 89 | -f, --fixed R[,R..] A fixed column with the given rotations. May be repeated. 90 | -g, --gate-degree D Maximum degree of the custom gates. 91 | -l, --lookup N,I,T A lookup over N columns with max input degree I and max table degree T. May be repeated. 92 | -p, --permutation N A permutation over N columns. May be repeated. 93 | ``` 94 | 95 | For example, to estimate the cost of a circuit with three advice columns and one fixed 96 | column (with various rotations), and a maximum gate degree of 4: 97 | 98 | ```plaintext 99 | > cargo run --example cost-model -- -a 0,1 -a 0 -a-0,-1,1 -f 0 -g 4 11 100 | Finished dev [unoptimized + debuginfo] target(s) in 0.03s 101 | Running `target/debug/examples/cost-model -a 0,1 -a 0 -a 0,-1,1 -f 0 -g 4 11` 102 | Circuit { 103 | k: 11, 104 | max_deg: 4, 105 | advice_columns: 3, 106 | lookups: 0, 107 | permutations: [], 108 | column_queries: 7, 109 | point_sets: 3, 110 | estimator: Estimator, 111 | } 112 | Proof size: 1440 bytes 113 | Verification: at least 81.689ms 114 | ``` 115 | -------------------------------------------------------------------------------- /book/src/user/gadgets.md: -------------------------------------------------------------------------------- 1 | # Gadgets 2 | -------------------------------------------------------------------------------- /book/src/user/lookup-tables.md: -------------------------------------------------------------------------------- 1 | # Lookup tables 2 | 3 | In normal programs, you can trade memory for CPU to improve performance, by pre-computing 4 | and storing lookup tables for some part of the computation. We can do the same thing in 5 | halo2 circuits! 6 | 7 | A lookup table can be thought of as enforcing a *relation* between variables, where the relation is expressed as a table. 8 | Assuming we have only one lookup argument in our constraint system, the total size of tables is constrained by the size of the circuit: 9 | each table entry costs one row, and it also costs one row to do each lookup. 10 | 11 | TODO 12 | -------------------------------------------------------------------------------- /book/src/user/simple-example.md: -------------------------------------------------------------------------------- 1 | # A simple example 2 | 3 | Let's start with a simple circuit, to introduce you to the common APIs and how they are 4 | used. The circuit will take a public input $c$, and will prove knowledge of two private 5 | inputs $a$ and $b$ such that 6 | 7 | $$a^2 \cdot b^2 = c.$$ 8 | 9 | ## Define instructions 10 | 11 | Firstly, we need to define the instructions that our circuit will rely on. Instructions 12 | are the boundary between high-level [gadgets](../concepts/gadgets.md) and the low-level 13 | circuit operations. Instructions may be as coarse or as granular as desired, but in 14 | practice you want to strike a balance between an instruction being large enough to 15 | effectively optimize its implementation, and small enough that it is meaningfully 16 | reusable. 17 | 18 | For our circuit, we will use three instructions: 19 | - Load a private number into the circuit. 20 | - Multiply two numbers. 21 | - Expose a number as a public input to the circuit. 22 | 23 | We also need a type for a variable representing a number. Instruction interfaces provide 24 | associated types for their inputs and outputs, to allow the implementations to represent 25 | these in a way that makes the most sense for their optimization goals. 26 | 27 | ```rust,ignore,no_run 28 | {{#include ../../../halo2_proofs/examples/simple-example.rs:instructions}} 29 | ``` 30 | 31 | ## Define a chip implementation 32 | 33 | For our circuit, we will build a [chip](../concepts/chips.md) that provides the above 34 | numeric instructions for a finite field. 35 | 36 | ```rust,ignore,no_run 37 | {{#include ../../../halo2_proofs/examples/simple-example.rs:chip}} 38 | ``` 39 | 40 | Every chip needs to implement the `Chip` trait. This defines the properties of the chip 41 | that a `Layouter` may rely on when synthesizing a circuit, as well as enabling any initial 42 | state that the chip requires to be loaded into the circuit. 43 | 44 | ```rust,ignore,no_run 45 | {{#include ../../../halo2_proofs/examples/simple-example.rs:chip-impl}} 46 | ``` 47 | 48 | ## Configure the chip 49 | 50 | The chip needs to be configured with the columns, permutations, and gates that will be 51 | required to implement all of the desired instructions. 52 | 53 | ```rust,ignore,no_run 54 | {{#include ../../../halo2_proofs/examples/simple-example.rs:chip-config}} 55 | ``` 56 | 57 | ## Implement chip traits 58 | 59 | ```rust,ignore,no_run 60 | {{#include ../../../halo2_proofs/examples/simple-example.rs:instructions-impl}} 61 | ``` 62 | 63 | ## Build the circuit 64 | 65 | Now that we have the instructions we need, and a chip that implements them, we can finally 66 | build our circuit! 67 | 68 | ```rust,ignore,no_run 69 | {{#include ../../../halo2_proofs/examples/simple-example.rs:circuit}} 70 | ``` 71 | 72 | ## Testing the circuit 73 | 74 | `halo2_proofs::dev::MockProver` can be used to test that the circuit is working correctly. The 75 | private and public inputs to the circuit are constructed as we will do to create a proof, 76 | but by passing them to `MockProver::run` we get an object that can test every constraint 77 | in the circuit, and tell us exactly what is failing (if anything). 78 | 79 | ```rust,ignore,no_run 80 | {{#include ../../../halo2_proofs/examples/simple-example.rs:test-circuit}} 81 | ``` 82 | 83 | ## Full example 84 | 85 | You can find the source code for this example 86 | [here](https://github.com/zcash/halo2/tree/main/halo2_proofs/examples/simple-example.rs). 87 | -------------------------------------------------------------------------------- /book/src/user/tips-and-tricks.md: -------------------------------------------------------------------------------- 1 | # Tips and tricks 2 | 3 | This section contains various ideas and snippets that you might find useful while writing 4 | halo2 circuits. 5 | 6 | ## Small range constraints 7 | 8 | A common constraint used in R1CS circuits is the boolean constraint: $b * (1 - b) = 0$. 9 | This constraint can only be satisfied by $b = 0$ or $b = 1$. 10 | 11 | In halo2 circuits, you can similarly constrain a cell to have one of a small set of 12 | values. For example, to constrain $a$ to the range $[0..5]$, you would create a gate of 13 | the form: 14 | 15 | $$a \cdot (1 - a) \cdot (2 - a) \cdot (3 - a) \cdot (4 - a) = 0$$ 16 | 17 | while to constraint $c$ to be either 7 or 13, you would use: 18 | 19 | $$(7 - c) \cdot (13 - c) = 0$$ 20 | 21 | > The underlying principle here is that we create a polynomial constraint with roots at 22 | > each value in the set of possible values we want to allow. In R1CS circuits, the maximum 23 | > supported polynomial degree is 2 (due to all constraints being of the form $a * b = c$). 24 | > In halo2 circuits, you can use arbitrary-degree polynomials - with the proviso that 25 | > higher-degree constraints are more expensive to use. 26 | 27 | Note that the roots don't have to be constants; for example $(a - x) \cdot (a - y) \cdot (a - z) = 0$ will constrain $a$ to be equal to one of $\{ x, y, z \}$ where the latter can be arbitrary polynomials, as long as the whole expression stays within the maximum degree bound. 28 | 29 | ## Small set interpolation 30 | We can use Lagrange interpolation to create a polynomial constraint that maps 31 | $f(X) = Y$ for small sets of $X \in \{x_i\}, Y \in \{y_i\}$. 32 | 33 | For instance, say we want to map a 2-bit value to a "spread" version interleaved 34 | with zeros. We first precompute the evaluations at each point: 35 | 36 | $$ 37 | \begin{array}{rcl} 38 | 00 \rightarrow 0000 &\implies& 0 \rightarrow 0 \\ 39 | 01 \rightarrow 0001 &\implies& 1 \rightarrow 1 \\ 40 | 10 \rightarrow 0100 &\implies& 2 \rightarrow 4 \\ 41 | 11 \rightarrow 0101 &\implies& 3 \rightarrow 5 42 | \end{array} 43 | $$ 44 | 45 | Then, we construct the Lagrange basis polynomial for each point using the 46 | identity: 47 | $$\mathcal{l}_j(X) = \prod_{0 \leq m < k,\; m \neq j} \frac{x - x_m}{x_j - x_m},$$ 48 | where $k$ is the number of data points. ($k = 4$ in our example above.) 49 | 50 | Recall that the Lagrange basis polynomial $\mathcal{l}_j(X)$ evaluates to $1$ at 51 | $X = x_j$ and $0$ at all other $x_i, j \neq i.$ 52 | 53 | Continuing our example, we get four Lagrange basis polynomials: 54 | 55 | $$ 56 | \begin{array}{ccc} 57 | l_0(X) &=& \frac{(X - 3)(X - 2)(X - 1)}{(-3)(-2)(-1)} \\[1ex] 58 | l_1(X) &=& \frac{(X - 3)(X - 2)(X)}{(-2)(-1)(1)} \\[1ex] 59 | l_2(X) &=& \frac{(X - 3)(X - 1)(X)}{(-1)(1)(2)} \\[1ex] 60 | l_3(X) &=& \frac{(X - 2)(X - 1)(X)}{(1)(2)(3)} 61 | \end{array} 62 | $$ 63 | 64 | Our polynomial constraint is then 65 | 66 | $$ 67 | \begin{array}{cccccccccccl} 68 | &f(0) \cdot l_0(X) &+& f(1) \cdot l_1(X) &+& f(2) \cdot l_2(X) &+& f(3) \cdot l_3(X) &-& f(X) &=& 0 \\ 69 | \implies& 0 \cdot l_0(X) &+& 1 \cdot l_1(X) &+& 4 \cdot l_2(X) &+& 5 \cdot l_3(X) &-& f(X) &=& 0. \\ 70 | \end{array} 71 | $$ 72 | -------------------------------------------------------------------------------- /halo2/CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | All notable changes to this project will be documented in this file. 3 | 4 | The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), 5 | and this project adheres to Rust's notion of 6 | [Semantic Versioning](https://semver.org/spec/v2.0.0.html). 7 | 8 | ## [Unreleased] 9 | ### Removed 10 | - Everything (moved to `halo2_proofs` crate). 11 | 12 | ## [0.1.0-beta.1] - 2021-09-24 13 | Initial beta release! 14 | -------------------------------------------------------------------------------- /halo2/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "halo2" 3 | version = "0.1.0-beta.1" 4 | authors = [ 5 | "Sean Bowe ", 6 | "Ying Tong Lai ", 7 | "Daira Hopwood ", 8 | "Jack Grigg ", 9 | ] 10 | edition = "2018" 11 | license-file = "../COPYING" 12 | repository = "https://github.com/zcash/halo2" 13 | documentation = "https://docs.rs/halo2" 14 | readme = "../README.md" 15 | 16 | [package.metadata.docs.rs] 17 | all-features = true 18 | rustdoc-args = ["--cfg", "docsrs", "--html-in-header", "../katex-header.html"] 19 | 20 | [dependencies] 21 | halo2_proofs = { version = "0.1.0-beta.1", path = "../halo2_proofs" } 22 | -------------------------------------------------------------------------------- /halo2/src/lib.rs: -------------------------------------------------------------------------------- 1 | //! # halo2 2 | 3 | #![cfg_attr(docsrs, feature(doc_cfg))] 4 | #![deny(rustdoc::broken_intra_doc_links)] 5 | #![deny(missing_debug_implementations)] 6 | #![deny(missing_docs)] 7 | #![deny(unsafe_code)] 8 | -------------------------------------------------------------------------------- /halo2_proofs/CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | All notable changes to this project will be documented in this file. 3 | 4 | The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), 5 | and this project adheres to Rust's notion of 6 | [Semantic Versioning](https://semver.org/spec/v2.0.0.html). 7 | 8 | ## [Unreleased] 9 | (relative to `halo2 0.1.0-beta.1`) 10 | 11 | ### Added 12 | - `halo2_proofs::plonk`: 13 | - `VerificationStrategy` 14 | - `SingleVerifier`, an implementation of `VerificationStrategy` for verifying 15 | proofs individually. 16 | - `BatchVerifier`, an implementation of `VerificationStrategy` for verifying 17 | multiple proofs in a batch. 18 | - `halo2_proofs::dev::FailureLocation` (used in `VerifyFailure::Lookup`) 19 | 20 | ### Changed 21 | - `halo2_proofs::plonk::verify_proof` now takes a `VerificationStrategy` instead 22 | of an `MSM` directly. 23 | - `halo2_proofs` now depends on `rand_core` instead of `rand`. 24 | - `halo2_proofs::plonk::create_proof` now take an argument `R: rand_core::RngCore`. 25 | - `halo2_proofs::plonk::Error` has been overhauled: 26 | - `Error` now implements `std::fmt::Display` and `std::error::Error`. 27 | - `Error` no longer implements `PartialEq`. Tests can check for specific error 28 | cases with `assert!(matches!(..))`, or the `assert_matches` crate. 29 | - `Error::IncompatibleParams` is now `Error::InvalidInstances`. 30 | - `Error::NotEnoughRowsAvailable` now stores the current value of `k`. 31 | - `Error::OpeningError` is now `Error::Opening`. 32 | - `Error::SynthesisError` is now `Error::Synthesis`. 33 | - `Error::TranscriptError` is now `Error::Transcript`, and stores the 34 | underlying `io::Error`. 35 | - `halo2_proofs::dev::CircuitLayout::render` now takes `k` as a `u32`, matching 36 | the regular parameter APIs. 37 | - `halo2_proofs::dev::VerifyFailure` has been overhauled: 38 | - `VerifyFailure::Cell` has been renamed to `VerifyFailure::CellNotAssigned`. 39 | - `VerifyFailure::ConstraintNotSatisfied` now has a `cell_values` field, 40 | storing the values of the cells used in the unsatisfied constraint. 41 | - The `row` fields of `VerifyFailure::{ConstraintNotSatisfied, Lookup}` have 42 | been replaced by `location` fields, which can now indicate whether the 43 | location falls within an assigned region. 44 | - `halo2_proofs::plonk::ConstraintSystem::enable_equality` and 45 | `halo2_proofs::plonk::ConstraintSystem::query_any` now take `Into>` 46 | instead of `Column` as a parameter to avoid excesive `.into()` usage. 47 | 48 | ### Removed 49 | - `halo2_proofs::arithmetic::BatchInvert` (use `ff::BatchInvert` instead). 50 | - `impl Default for halo2_proofs::poly::Rotation` (use `Rotation::cur()` instead). 51 | - `halo2_proofs::poly`: 52 | - `EvaluationDomain::{add_extended, sub_extended, mul_extended}` 53 | - `Polynomial::one_minus` 54 | - `impl Neg, Sub for Polynomial` 55 | - `impl Mul for Polynomial<_, ExtendedLagrangeCoeff>` 56 | -------------------------------------------------------------------------------- /halo2_proofs/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "halo2_proofs" 3 | version = "0.1.0-beta.1" 4 | authors = [ 5 | "Sean Bowe ", 6 | "Ying Tong Lai ", 7 | "Daira Hopwood ", 8 | "Jack Grigg ", 9 | ] 10 | edition = "2018" 11 | description = """ 12 | [BETA] Fast proof-carrying data implementation with no trusted setup 13 | """ 14 | license-file = "../COPYING" 15 | repository = "https://github.com/zcash/halo2" 16 | documentation = "https://docs.rs/halo2_proofs" 17 | readme = "README.md" 18 | 19 | [package.metadata.docs.rs] 20 | all-features = true 21 | rustdoc-args = ["--cfg", "docsrs", "--html-in-header", "../katex-header.html"] 22 | 23 | [[bench]] 24 | name = "arithmetic" 25 | harness = false 26 | 27 | [[bench]] 28 | name = "plonk" 29 | harness = false 30 | 31 | [dependencies] 32 | core_affinity = "0.5.10" 33 | ark-std = { version = "0.4.0" } 34 | backtrace = { version = "0.3", optional = true } 35 | ec-gpu-gen = { git = "https://github.com/lanbones/ec-gpu", branch = "halo2-opt-v2", optional = true } 36 | pairing = { git = "https://github.com/lanbones/pairing", package = "pairing_bn256"} 37 | rayon = "1.5.1" 38 | ff = "0.12" 39 | group = "0.12" 40 | rand = "0.8" 41 | rand_core = { version = "0.6", default-features = false } 42 | blake2b_simd = "1" 43 | subtle = "2.3" 44 | cfg-if = "0.1" 45 | num-integer = "0.1" 46 | num-bigint = { version = "0.4", features = ["rand"] } 47 | num-traits = "0.2.15" 48 | num-derive = "0.3" 49 | num = "0.4" 50 | cuda-driver-sys = "0.3.0" 51 | lazy_static = "1.4.0" 52 | memmap = "0.7" 53 | log = "0.4.17" 54 | 55 | # Developer tooling dependencies 56 | plotters = { version = "0.3.0", optional = true } 57 | tabbycat = { version = "0.1", features = ["attributes"], optional = true } 58 | 59 | [target.'cfg(target_arch = "wasm32")'.dependencies] 60 | # plotters depends on web-sys, which eventually depends on bumpalo 3. This dependency is 61 | # required because our MSRV is 1.51, but bumpalo 3.9 increased its MSRV to 1.54. We can 62 | # remove this once our MSRV is 1.54+ (and should do so, because currently this makes it a 63 | # required dependency even if the dev-graph feature flag is not enabled). 64 | bumpalo = ">=3,<3.9.0" 65 | 66 | [build-dependencies] 67 | ec-gpu-gen = { git = "https://github.com/lanbones/ec-gpu", branch = "halo2-opt-v2", optional = true } 68 | pairing = { git = "https://github.com/lanbones/pairing", package = "pairing_bn256"} 69 | 70 | [dev-dependencies] 71 | assert_matches = "1.5" 72 | criterion = "0.3" 73 | gumdrop = "0.8" 74 | proptest = "1" 75 | rand_core = { version = "0.6", default-features = false, features = ["getrandom"] } 76 | 77 | [target.'cfg(all(target_arch = "wasm32", target_os = "unknown"))'.dev-dependencies] 78 | getrandom = { version = "0.2", features = ["js"] } 79 | 80 | [features] 81 | default = [] 82 | dev-graph = ["plotters", "tabbycat"] 83 | gadget-traces = ["backtrace"] 84 | sanity-checks = [] 85 | profile = ["ark-std/print-trace"] 86 | cuda = ["ec-gpu-gen/cuda", "pairing/gpu"] 87 | 88 | [lib] 89 | bench = false 90 | 91 | [[example]] 92 | name = "circuit-layout" 93 | required-features = ["dev-graph"] 94 | -------------------------------------------------------------------------------- /halo2_proofs/README.md: -------------------------------------------------------------------------------- 1 | # halo2_proofs [![Crates.io](https://img.shields.io/crates/v/halo2_proofs.svg)](https://crates.io/crates/halo2_proofs) # 2 | 3 | **IMPORTANT**: This library is in beta, and should not be used in production software. 4 | 5 | ## [Documentation](https://docs.rs/halo2_proofs) 6 | 7 | ## Minimum Supported Rust Version 8 | 9 | Requires Rust **1.51** or higher. 10 | 11 | Minimum supported Rust version can be changed in the future, but it will be done with a 12 | minor version bump. 13 | 14 | ## Controlling parallelism 15 | 16 | `halo2_proofs` currently uses [rayon](https://github.com/rayon-rs/rayon) for parallel 17 | computation. The `RAYON_NUM_THREADS` environment variable can be used to set the number of 18 | threads. 19 | 20 | ## License 21 | 22 | Copyright 2020-2021 The Electric Coin Company. 23 | 24 | You may use this package under the Bootstrap Open Source Licence, version 1.0, 25 | or at your option, any later version. See the file [`COPYING`](COPYING) for 26 | more details, and [`LICENSE-BOSL`](LICENSE-BOSL) for the terms of the Bootstrap 27 | Open Source Licence, version 1.0. 28 | 29 | The purpose of the BOSL is to allow commercial improvements to the package 30 | while ensuring that all improvements are open source. See 31 | [here](https://electriccoin.co/blog/introducing-tgppl-a-radically-new-type-of-open-source-license/) 32 | for why the BOSL exists. 33 | -------------------------------------------------------------------------------- /halo2_proofs/benches/arithmetic.rs: -------------------------------------------------------------------------------- 1 | #[macro_use] 2 | extern crate criterion; 3 | 4 | use group::ff::Field; 5 | use halo2_proofs::arithmetic::small_multiexp; 6 | use halo2_proofs::poly::commitment::Params; 7 | 8 | use criterion::{black_box, Criterion}; 9 | use rand_core::OsRng; 10 | 11 | use pairing::bn256::{Bn256, Fr as Fp, G1Affine}; 12 | 13 | fn criterion_benchmark(c: &mut Criterion) { 14 | let rng = OsRng; 15 | 16 | // small multiexp 17 | { 18 | let params: Params = Params::::unsafe_setup::(5); 19 | 20 | let g = &mut params.get_g(); 21 | let len = g.len() / 2; 22 | let (g_lo, g_hi) = g.split_at_mut(len); 23 | 24 | let coeff_1 = Fp::random(rng); 25 | let coeff_2 = Fp::random(rng); 26 | 27 | c.bench_function("double-and-add", |b| { 28 | b.iter(|| { 29 | for (g_lo, g_hi) in g_lo.iter().zip(g_hi.iter()) { 30 | small_multiexp(&[black_box(coeff_1), black_box(coeff_2)], &[*g_lo, *g_hi]); 31 | } 32 | }) 33 | }); 34 | } 35 | } 36 | 37 | criterion_group!(benches, criterion_benchmark); 38 | criterion_main!(benches); 39 | -------------------------------------------------------------------------------- /halo2_proofs/build.rs: -------------------------------------------------------------------------------- 1 | fn main() { 2 | #[cfg(feature = "cuda")] 3 | { 4 | use ec_gpu_gen::SourceBuilder; 5 | use pairing::bn256::{Fq, Fr, G1Affine}; 6 | let source_builder = SourceBuilder::new() 7 | .add_fft::() 8 | .add_multiexp::(); 9 | ec_gpu_gen::generate(&source_builder); 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /halo2_proofs/examples/lookup_api.rs: -------------------------------------------------------------------------------- 1 | use halo2_proofs::arithmetic::FieldExt; 2 | use halo2_proofs::circuit::{floor_planner::V1, Chip, Layouter, Region}; 3 | use halo2_proofs::dev::MockProver; 4 | use halo2_proofs::plonk::*; 5 | use pairing::bn256::Fr as Fp; 6 | use pairing::bn256::{Bn256, G1Affine}; 7 | 8 | use std::marker::PhantomData; 9 | 10 | use halo2_proofs::poly::{ 11 | commitment::{Params, ParamsVerifier}, 12 | Rotation, 13 | }; 14 | use halo2_proofs::transcript::{Blake2bRead, Blake2bWrite, Challenge255}; 15 | use rand_core::OsRng; 16 | 17 | #[derive(Clone, Debug)] 18 | struct SimpleChip { 19 | config: SimpleConfig, 20 | _marker: PhantomData, 21 | } 22 | 23 | #[derive(Clone, Debug)] 24 | struct SimpleConfig { 25 | input_0: Column, 26 | input_1: Column, 27 | input_2: Column, 28 | s_0: Column, 29 | s_1: Column, 30 | table: TableColumn, 31 | } 32 | 33 | impl Chip for SimpleChip { 34 | type Config = SimpleConfig; 35 | type Loaded = (); 36 | 37 | fn config(&self) -> &Self::Config { 38 | &self.config 39 | } 40 | fn loaded(&self) -> &Self::Loaded { 41 | &() 42 | } 43 | } 44 | 45 | impl SimpleChip { 46 | fn construct(config: SimpleConfig) -> Self { 47 | Self { 48 | config, 49 | _marker: PhantomData, 50 | } 51 | } 52 | 53 | fn configure( 54 | meta: &mut ConstraintSystem, 55 | input_0: Column, 56 | input_1: Column, 57 | input_2: Column, 58 | s_0: Column, 59 | s_1: Column, 60 | table: TableColumn, 61 | ) -> SimpleConfig { 62 | meta.create_gate("", |meta| { 63 | let input_0 = meta.query_advice(input_0, Rotation::cur()); 64 | let input_1 = meta.query_advice(input_1, Rotation::cur()); 65 | let s0 = meta.query_fixed(s_0, Rotation::cur()); 66 | vec![s0 * (input_0 * F::from(1) - input_1)] 67 | }); 68 | 69 | meta.lookup("table1", |meta| { 70 | let input_0 = meta.query_advice(input_0, Rotation::cur()); 71 | [(input_0, table)].to_vec() 72 | }); 73 | meta.lookup("table2", |meta| { 74 | let input_1 = meta.query_advice(input_1, Rotation::cur()); 75 | [(input_1 * F::from(2), table)].to_vec() 76 | }); 77 | meta.lookup("table3", |meta| { 78 | let input_2 = meta.query_advice(input_2, Rotation::cur()); 79 | [(input_2, table)].to_vec() 80 | }); 81 | 82 | meta.lookup_any("any", |meta| { 83 | let input_0 = meta.query_advice(input_0, Rotation::cur()); 84 | let input_1 = meta.query_advice(input_1, Rotation::cur()); 85 | let input_2 = meta.query_advice(input_2, Rotation::cur()); 86 | let s0 = meta.query_fixed(s_0, Rotation::cur()); 87 | let s1 = meta.query_fixed(s_1, Rotation::cur()); 88 | 89 | [ 90 | (s0.clone() * input_0.clone(), s0 * input_1), 91 | (s1.clone() * input_0, s1 * input_2), 92 | ] 93 | .to_vec() 94 | }); 95 | 96 | SimpleConfig { 97 | input_0, 98 | input_1, 99 | input_2, 100 | s_0, 101 | s_1, 102 | table, 103 | } 104 | } 105 | } 106 | 107 | #[derive(Default, Clone, Debug)] 108 | struct MyCircuit { 109 | _marker: PhantomData, 110 | } 111 | 112 | impl Circuit for MyCircuit { 113 | type Config = SimpleConfig; 114 | type FloorPlanner = V1; 115 | 116 | fn without_witnesses(&self) -> Self { 117 | Self::default() 118 | } 119 | 120 | fn configure(meta: &mut ConstraintSystem) -> Self::Config { 121 | let [input_0, input_1, input_2] = [(); 3].map(|_| meta.advice_column()); 122 | let [s_0, s_1] = [(); 2].map(|_| meta.fixed_column()); 123 | let [table] = [(); 1].map(|_| meta.lookup_table_column()); 124 | SimpleChip::configure(meta, input_0, input_1, input_2, s_0, s_1, table) 125 | } 126 | 127 | fn synthesize(&self, config: Self::Config, layouter: impl Layouter) -> Result<(), Error> { 128 | let ch = SimpleChip::::construct(config.clone()); 129 | 130 | layouter.assign_region( 131 | || "inputs", 132 | |region: &Region<'_, F>| { 133 | region.assign_advice(|| "", ch.config.input_0, 0, || Ok(F::from(1 as u64)))?; 134 | region.assign_advice(|| "", ch.config.input_1, 0, || Ok(F::from(1 as u64)))?; 135 | region.assign_fixed(|| "", ch.config.s_0, 0, || Ok(F::from(1)))?; 136 | 137 | region.assign_advice(|| "", ch.config.input_0, 1, || Ok(F::from(3 as u64)))?; 138 | region.assign_advice(|| "", ch.config.input_2, 1, || Ok(F::from(3 as u64)))?; 139 | region.assign_fixed(|| "", ch.config.s_1, 1, || Ok(F::from(1)))?; 140 | 141 | Ok(()) 142 | }, 143 | )?; 144 | layouter.assign_table( 145 | || "common range table", 146 | |table| { 147 | for i in 0..9 { 148 | table.assign_cell( 149 | || "range tag", 150 | ch.config.table, 151 | i, 152 | || Ok(F::from(i as u64)), 153 | )?; 154 | } 155 | 156 | Ok(()) 157 | }, 158 | ) 159 | } 160 | } 161 | 162 | fn test_prover(k: u32, circuit: MyCircuit) { 163 | let public_inputs_size = 0; 164 | // Initialize the polynomial commitment parameters 165 | let params: Params = Params::::unsafe_setup::(k); 166 | let params_verifier: ParamsVerifier = params.verifier(public_inputs_size).unwrap(); 167 | 168 | let vk = keygen_vk(¶ms, &circuit).unwrap(); 169 | let pk = keygen_pk(¶ms, vk, &circuit).unwrap(); 170 | 171 | let mut transcript = Blake2bWrite::<_, _, Challenge255<_>>::init(vec![]); 172 | 173 | create_proof(¶ms, &pk, &[circuit], &[&[]], OsRng, &mut transcript) 174 | .expect("proof generation should not fail"); 175 | 176 | let proof = transcript.finalize(); 177 | 178 | let strategy = SingleVerifier::new(¶ms_verifier); 179 | let mut transcript = Blake2bRead::<_, _, Challenge255<_>>::init(&proof[..]); 180 | 181 | assert!(verify_proof( 182 | ¶ms_verifier, 183 | pk.get_vk(), 184 | strategy, 185 | &[&[]], 186 | &mut transcript, 187 | ) 188 | .is_ok()); 189 | } 190 | 191 | fn main() { 192 | // The number of rows in our circuit cannot exceed 2^k 193 | let k = 10; 194 | 195 | let circuit = MyCircuit:: { 196 | _marker: PhantomData, 197 | }; 198 | 199 | let prover = MockProver::run(k, &circuit, vec![]).unwrap(); 200 | assert_eq!(prover.verify(), Ok(())); 201 | 202 | test_prover(k, circuit); 203 | } 204 | -------------------------------------------------------------------------------- /halo2_proofs/examples/lookup_api_set.rs: -------------------------------------------------------------------------------- 1 | use halo2_proofs::arithmetic::FieldExt; 2 | use halo2_proofs::circuit::{floor_planner::V1, Chip, Layouter, Region}; 3 | use halo2_proofs::dev::MockProver; 4 | use halo2_proofs::plonk::*; 5 | use pairing::bn256::Fr as Fp; 6 | use pairing::bn256::{Bn256, G1Affine}; 7 | 8 | use std::marker::PhantomData; 9 | 10 | use halo2_proofs::poly::{ 11 | commitment::{Params, ParamsVerifier}, 12 | Rotation, 13 | }; 14 | use halo2_proofs::transcript::{Blake2bRead, Blake2bWrite, Challenge255}; 15 | use rand_core::OsRng; 16 | 17 | #[derive(Clone, Debug)] 18 | struct SimpleChip { 19 | config: SimpleConfig, 20 | _marker: PhantomData, 21 | } 22 | 23 | #[derive(Clone, Debug)] 24 | struct SimpleConfig { 25 | inputs: [Column; 6], 26 | s_0: Column, 27 | s_1: Column, 28 | table: TableColumn, 29 | } 30 | 31 | impl Chip for SimpleChip { 32 | type Config = SimpleConfig; 33 | type Loaded = (); 34 | 35 | fn config(&self) -> &Self::Config { 36 | &self.config 37 | } 38 | fn loaded(&self) -> &Self::Loaded { 39 | &() 40 | } 41 | } 42 | 43 | impl SimpleChip { 44 | fn construct(config: SimpleConfig) -> Self { 45 | Self { 46 | config, 47 | _marker: PhantomData, 48 | } 49 | } 50 | 51 | fn configure( 52 | meta: &mut ConstraintSystem, 53 | inputs: [Column; 6], 54 | s_0: Column, 55 | s_1: Column, 56 | table: TableColumn, 57 | ) -> SimpleConfig { 58 | meta.create_gate("", |meta| { 59 | let input_0 = meta.query_advice(inputs[0], Rotation::cur()); 60 | let input_1 = meta.query_advice(inputs[1], Rotation::cur()); 61 | let s0 = meta.query_fixed(s_0, Rotation::cur()); 62 | vec![s0 * (input_0 * F::from(1) - input_1)] 63 | }); 64 | 65 | //set 0 66 | meta.lookup("table0", |meta| { 67 | let input_0 = meta.query_advice(inputs[0], Rotation::cur()); 68 | [(input_0, table)].to_vec() 69 | }); 70 | 71 | //set 1 72 | meta.lookup("table1", |meta| { 73 | let input_1 = meta.query_advice(inputs[1], Rotation::cur()); 74 | [(input_1 * F::from(2), table)].to_vec() 75 | }); 76 | meta.lookup("table2", |meta| { 77 | let input_2 = meta.query_advice(inputs[2], Rotation::cur()); 78 | [(input_2, table)].to_vec() 79 | }); 80 | 81 | //set 2 82 | meta.lookup("table3", |meta| { 83 | let input_3 = meta.query_advice(inputs[3], Rotation::cur()); 84 | [(input_3 * F::from(10), table)].to_vec() 85 | }); 86 | meta.lookup("table4", |meta| { 87 | let input_4 = meta.query_advice(inputs[4], Rotation::cur()); 88 | [(input_4, table)].to_vec() 89 | }); 90 | 91 | //set 3 92 | meta.lookup("table5", |meta| { 93 | let input_5 = meta.query_advice(inputs[5], Rotation::cur()); 94 | [(input_5, table)].to_vec() 95 | }); 96 | 97 | SimpleConfig { 98 | inputs, 99 | s_0, 100 | s_1, 101 | table, 102 | } 103 | } 104 | } 105 | 106 | #[derive(Default, Clone, Debug)] 107 | struct MyCircuit { 108 | _marker: PhantomData, 109 | } 110 | 111 | impl Circuit for MyCircuit { 112 | type Config = SimpleConfig; 113 | type FloorPlanner = V1; 114 | 115 | fn without_witnesses(&self) -> Self { 116 | Self::default() 117 | } 118 | 119 | fn configure(meta: &mut ConstraintSystem) -> Self::Config { 120 | let inputs = [(); 6].map(|_| meta.advice_column()); 121 | let [s_0, s_1] = [(); 2].map(|_| meta.fixed_column()); 122 | let [table] = [(); 1].map(|_| meta.lookup_table_column()); 123 | SimpleChip::configure(meta, inputs, s_0, s_1, table) 124 | } 125 | 126 | fn synthesize(&self, config: Self::Config, layouter: impl Layouter) -> Result<(), Error> { 127 | let ch = SimpleChip::::construct(config.clone()); 128 | 129 | layouter.assign_region( 130 | || "inputs", 131 | |region: &Region<'_, F>| { 132 | for i in 0..6 { 133 | region.assign_advice( 134 | || "", 135 | ch.config.inputs[i], 136 | 0, 137 | || Ok(F::from(1 as u64)), 138 | )?; 139 | } 140 | region.assign_fixed(|| "", ch.config.s_0, 0, || Ok(F::from(1)))?; 141 | 142 | for i in 0..6 { 143 | region.assign_advice( 144 | || "", 145 | ch.config.inputs[i], 146 | 1, 147 | || Ok(F::from(3 as u64)), 148 | )?; 149 | } 150 | region.assign_fixed(|| "", ch.config.s_1, 1, || Ok(F::from(1)))?; 151 | 152 | Ok(()) 153 | }, 154 | )?; 155 | layouter.assign_table( 156 | || "common range table", 157 | |table| { 158 | for i in 0..100 { 159 | table.assign_cell( 160 | || "range tag", 161 | ch.config.table, 162 | i, 163 | || Ok(F::from(i as u64)), 164 | )?; 165 | } 166 | 167 | Ok(()) 168 | }, 169 | ) 170 | } 171 | } 172 | 173 | fn test_prover(k: u32, circuit: MyCircuit) { 174 | let public_inputs_size = 0; 175 | // Initialize the polynomial commitment parameters 176 | let params: Params = Params::::unsafe_setup::(k); 177 | let params_verifier: ParamsVerifier = params.verifier(public_inputs_size).unwrap(); 178 | 179 | let vk = keygen_vk(¶ms, &circuit).unwrap(); 180 | let pk = keygen_pk(¶ms, vk, &circuit).unwrap(); 181 | 182 | let mut transcript = Blake2bWrite::<_, _, Challenge255<_>>::init(vec![]); 183 | 184 | create_proof(¶ms, &pk, &[circuit], &[&[]], OsRng, &mut transcript) 185 | .expect("proof generation should not fail"); 186 | 187 | let proof = transcript.finalize(); 188 | 189 | let strategy = SingleVerifier::new(¶ms_verifier); 190 | let mut transcript = Blake2bRead::<_, _, Challenge255<_>>::init(&proof[..]); 191 | 192 | assert!(verify_proof( 193 | ¶ms_verifier, 194 | pk.get_vk(), 195 | strategy, 196 | &[&[]], 197 | &mut transcript, 198 | ) 199 | .is_ok()); 200 | } 201 | 202 | fn main() { 203 | // The number of rows in our circuit cannot exceed 2^k 204 | let k = 10; 205 | 206 | let circuit = MyCircuit:: { 207 | _marker: PhantomData, 208 | }; 209 | 210 | let prover = MockProver::run(k, &circuit, vec![]).unwrap(); 211 | assert_eq!(prover.verify(), Ok(())); 212 | 213 | test_prover(k, circuit); 214 | } 215 | -------------------------------------------------------------------------------- /halo2_proofs/examples/range-check.rs: -------------------------------------------------------------------------------- 1 | use halo2_proofs::{ 2 | arithmetic::FieldExt, 3 | circuit::{floor_planner::V1, Layouter}, 4 | dev::MockProver, 5 | plonk::*, 6 | poly::commitment::{Params, ParamsVerifier}, 7 | transcript::{Blake2bRead, Blake2bWrite, Challenge255}, 8 | }; 9 | use pairing::bn256::{Bn256, Fr as Fp, G1Affine}; 10 | use rand::Rng; 11 | use rand_core::OsRng; 12 | 13 | use std::marker::PhantomData; 14 | 15 | const K: usize = 18; 16 | 17 | #[derive(Clone)] 18 | struct RangeCheckConfig { 19 | l_0: Column, 20 | l_active: Column, 21 | l_last_active: Column, 22 | adv: Column, 23 | l_last_offset: usize, 24 | } 25 | 26 | struct TestCircuit { 27 | _mark: PhantomData, 28 | } 29 | 30 | impl Circuit for TestCircuit { 31 | type Config = RangeCheckConfig; 32 | type FloorPlanner = V1; 33 | 34 | fn without_witnesses(&self) -> Self { 35 | Self { _mark: PhantomData } 36 | } 37 | 38 | fn configure(meta: &mut ConstraintSystem) -> RangeCheckConfig { 39 | let l_0 = meta.fixed_column(); 40 | let l_active = meta.fixed_column(); 41 | let l_last_active = meta.fixed_column(); 42 | 43 | let adv = meta.advice_column_range( 44 | l_0, 45 | l_active, 46 | l_last_active, 47 | (0, F::from(0)), 48 | (u16::MAX as u32, F::from(u16::MAX as u64)), 49 | (2, F::from(2)), 50 | ); 51 | 52 | let l_last_offset = (1 << K) - (meta.blinding_factors() + 1); 53 | 54 | RangeCheckConfig { 55 | l_0, 56 | l_active, 57 | l_last_active, 58 | l_last_offset, 59 | adv, 60 | } 61 | } 62 | 63 | fn synthesize( 64 | &self, 65 | config: RangeCheckConfig, 66 | layouter: impl Layouter, 67 | ) -> Result<(), Error> { 68 | layouter.assign_region( 69 | || "region", 70 | |region| { 71 | region.assign_fixed(|| "l_0", config.l_0, 0, || Ok(F::one()))?; 72 | region.assign_fixed( 73 | || "l_last_active", 74 | config.l_last_active, 75 | config.l_last_offset - 1, 76 | || Ok(F::one()), 77 | )?; 78 | for offset in 0..config.l_last_offset { 79 | region.assign_fixed(|| "l_active", config.l_active, offset, || Ok(F::one()))?; 80 | } 81 | 82 | let mut rng = OsRng; 83 | 84 | for offset in 0..u64::from(u16::MAX) { 85 | let value = rng.gen_range(0..=u16::MAX); 86 | region.assign_advice( 87 | || "advice", 88 | config.adv, 89 | offset as usize, 90 | || Ok(F::from(value as u64)), 91 | )?; 92 | } 93 | 94 | Ok(()) 95 | }, 96 | )?; 97 | 98 | Ok(()) 99 | } 100 | } 101 | 102 | fn main() { 103 | let k = 18; 104 | let public_inputs_size = 0; 105 | 106 | let circuit: TestCircuit = TestCircuit { _mark: PhantomData }; 107 | 108 | let prover = MockProver::run(k, &circuit, vec![]).unwrap(); 109 | assert!(prover.verify().is_ok()); 110 | 111 | // Initialize the polynomial commitment parameters 112 | let params: Params = Params::::unsafe_setup::(k); 113 | let params_verifier: ParamsVerifier = params.verifier(public_inputs_size).unwrap(); 114 | 115 | // Initialize the proving key 116 | let vk = keygen_vk(¶ms, &circuit).expect("keygen_vk should not fail"); 117 | let pk = keygen_pk(¶ms, vk, &circuit).expect("keygen_pk should not fail"); 118 | 119 | // Create a proof 120 | let mut transcript = Blake2bWrite::<_, _, Challenge255<_>>::init(vec![]); 121 | 122 | create_proof(¶ms, &pk, &[circuit], &[&[]], OsRng, &mut transcript) 123 | .expect("proof generation should not fail"); 124 | 125 | let proof = transcript.finalize(); 126 | 127 | let strategy = SingleVerifier::new(¶ms_verifier); 128 | let mut transcript = Blake2bRead::<_, _, Challenge255<_>>::init(&proof[..]); 129 | 130 | verify_proof( 131 | ¶ms_verifier, 132 | pk.get_vk(), 133 | strategy, 134 | &[&[]], 135 | &mut transcript, 136 | ) 137 | .unwrap(); 138 | } 139 | -------------------------------------------------------------------------------- /halo2_proofs/examples/shuffle_api.rs: -------------------------------------------------------------------------------- 1 | use halo2_proofs::arithmetic::FieldExt; 2 | use halo2_proofs::circuit::{floor_planner::V1, Chip, Layouter, Region}; 3 | use halo2_proofs::dev::MockProver; 4 | use halo2_proofs::plonk::*; 5 | use pairing::bn256::Fr as Fp; 6 | use pairing::bn256::{Bn256, G1Affine}; 7 | 8 | use std::marker::PhantomData; 9 | 10 | use halo2_proofs::poly::{ 11 | commitment::{Params, ParamsVerifier}, 12 | Rotation, 13 | }; 14 | use halo2_proofs::transcript::{Blake2bRead, Blake2bWrite, Challenge255}; 15 | use rand_core::OsRng; 16 | 17 | #[derive(Clone, Debug)] 18 | struct ShuffleChip { 19 | config: ShuffleConfig, 20 | _marker: PhantomData, 21 | } 22 | 23 | #[derive(Clone, Debug)] 24 | struct ShuffleConfig { 25 | input_0: Column, 26 | input_1: Column, 27 | shuffle_0: Column, 28 | shuffle_1: Column, 29 | s_input: Column, 30 | s_shuffle: Column, 31 | } 32 | 33 | impl Chip for ShuffleChip { 34 | type Config = ShuffleConfig; 35 | type Loaded = (); 36 | 37 | fn config(&self) -> &Self::Config { 38 | &self.config 39 | } 40 | fn loaded(&self) -> &Self::Loaded { 41 | &() 42 | } 43 | } 44 | 45 | impl ShuffleChip { 46 | fn construct(config: ShuffleConfig) -> Self { 47 | Self { 48 | config, 49 | _marker: PhantomData, 50 | } 51 | } 52 | 53 | fn configure( 54 | meta: &mut ConstraintSystem, 55 | input_0: Column, 56 | input_1: Column, 57 | shuffle_0: Column, 58 | shuffle_1: Column, 59 | s_input: Column, 60 | s_shuffle: Column, 61 | ) -> ShuffleConfig { 62 | //need at least one gate or GPU will panic 63 | meta.create_gate("", |meta| { 64 | let input_0 = meta.query_advice(input_0, Rotation::cur()); 65 | let input_1 = meta.query_advice(input_1, Rotation::cur()); 66 | let s_input = meta.query_fixed(s_input, Rotation::cur()); 67 | vec![s_input * (input_0 * F::from(10) - input_1)] 68 | }); 69 | 70 | meta.shuffle("shuffle", |meta| { 71 | let input_0 = meta.query_advice(input_0, Rotation::cur()); 72 | let shuffle_0 = meta.query_advice(shuffle_0, Rotation::cur()); 73 | let input_1 = meta.query_advice(input_1, Rotation::cur()); 74 | let shuffle_1 = meta.query_advice(shuffle_1, Rotation::cur()); 75 | let s_input = meta.query_fixed(s_input, Rotation::cur()); 76 | let s_shuffle = meta.query_fixed(s_shuffle, Rotation::cur()); 77 | 78 | [ 79 | (s_input.clone() * input_0, s_shuffle.clone() * shuffle_0), 80 | (s_input * input_1, s_shuffle * shuffle_1), 81 | ] 82 | .to_vec() 83 | }); 84 | 85 | ShuffleConfig { 86 | input_0, 87 | input_1, 88 | shuffle_0, 89 | shuffle_1, 90 | s_input, 91 | s_shuffle, 92 | } 93 | } 94 | } 95 | 96 | #[derive(Default, Clone, Debug)] 97 | struct MyCircuit { 98 | input0: Vec, 99 | input1: Vec, 100 | shuffle0: Vec, 101 | shuffle1: Vec, 102 | } 103 | 104 | impl MyCircuit { 105 | fn construct() -> Self { 106 | Self { 107 | input0: [1, 2, 4, 1].map(|x| F::from(x as u64)).to_vec(), 108 | shuffle0: [4, 1, 1, 2].map(|x| F::from(x as u64)).to_vec(), 109 | input1: [10, 20, 40, 10].map(|x| F::from(x as u64)).to_vec(), 110 | shuffle1: [40, 10, 10, 20].map(|x| F::from(x as u64)).to_vec(), 111 | } 112 | } 113 | } 114 | 115 | impl Circuit for MyCircuit { 116 | type Config = ShuffleConfig; 117 | type FloorPlanner = V1; 118 | 119 | fn without_witnesses(&self) -> Self { 120 | Self::default() 121 | } 122 | 123 | fn configure(meta: &mut ConstraintSystem) -> Self::Config { 124 | let [input_0, input_1, shuffle_0, shuffle_1] = [(); 4].map(|_| meta.advice_column()); 125 | let [s_input, s_shuffle] = [(); 2].map(|_| meta.fixed_column()); 126 | 127 | ShuffleChip::configure( 128 | meta, input_0, input_1, shuffle_0, shuffle_1, s_input, s_shuffle, 129 | ) 130 | } 131 | 132 | fn synthesize( 133 | &self, 134 | config: Self::Config, 135 | mut layouter: impl Layouter, 136 | ) -> Result<(), Error> { 137 | let ch = ShuffleChip::::construct(config.clone()); 138 | 139 | layouter.assign_region( 140 | || "inputs", 141 | |mut region: &Region<'_, F>| { 142 | for (i, (input0, input1)) in self.input0.iter().zip(self.input1.iter()).enumerate() 143 | { 144 | region.assign_advice(|| "", ch.config.input_0, i, || Ok(*input0))?; 145 | region.assign_advice(|| "", ch.config.input_1, i, || Ok(*input1))?; 146 | 147 | region.assign_fixed(|| "", ch.config.s_input, i, || Ok(F::from(1)))?; 148 | } 149 | Ok(()) 150 | }, 151 | )?; 152 | layouter.assign_region( 153 | || "shuffles", 154 | |mut region: &Region<'_, F>| { 155 | for (i, (shuffle0, shuffle1)) in 156 | self.shuffle0.iter().zip(self.shuffle1.iter()).enumerate() 157 | { 158 | region.assign_advice(|| "", ch.config.shuffle_0, i, || Ok(*shuffle0))?; 159 | region.assign_advice(|| "", ch.config.shuffle_1, i, || Ok(*shuffle1))?; 160 | 161 | region.assign_fixed(|| "", ch.config.s_shuffle, i, || Ok(F::from(1)))?; 162 | } 163 | Ok(()) 164 | }, 165 | ) 166 | } 167 | } 168 | 169 | fn test_prover(k: u32, circuit: MyCircuit) { 170 | let public_inputs_size = 0; 171 | // Initialize the polynomial commitment parameters 172 | let params: Params = Params::::unsafe_setup::(k); 173 | let params_verifier: ParamsVerifier = params.verifier(public_inputs_size).unwrap(); 174 | 175 | let vk = keygen_vk(¶ms, &circuit).unwrap(); 176 | let pk = keygen_pk(¶ms, vk, &circuit).unwrap(); 177 | 178 | let mut transcript = Blake2bWrite::<_, _, Challenge255<_>>::init(vec![]); 179 | 180 | create_proof(¶ms, &pk, &[circuit], &[&[]], OsRng, &mut transcript) 181 | .expect("proof generation should not fail"); 182 | 183 | let proof = transcript.finalize(); 184 | 185 | let strategy = SingleVerifier::new(¶ms_verifier); 186 | let mut transcript = Blake2bRead::<_, _, Challenge255<_>>::init(&proof[..]); 187 | 188 | assert!(verify_proof( 189 | ¶ms_verifier, 190 | pk.get_vk(), 191 | strategy, 192 | &[&[]], 193 | &mut transcript, 194 | ) 195 | .is_ok()); 196 | } 197 | 198 | fn main() { 199 | // The number of rows in our circuit cannot exceed 2^k 200 | let k = 10; 201 | 202 | let circuit = MyCircuit::::construct(); 203 | 204 | let prover = MockProver::run(k, &circuit, vec![]).unwrap(); 205 | assert_eq!(prover.verify(), Ok(())); 206 | 207 | test_prover(k, circuit); 208 | } 209 | -------------------------------------------------------------------------------- /halo2_proofs/proptest-regressions/plonk/assigned.txt: -------------------------------------------------------------------------------- 1 | # Seeds for failure cases proptest has generated in the past. It is 2 | # automatically read and these particular cases re-run before any 3 | # novel cases are generated. 4 | # 5 | # It is recommended to check this file in to source control so that 6 | # everyone who runs the test benefits from these saved cases. 7 | cc 9ec8b547e21d3ed71ee4f99316edb8ff7d0c4d42751bb2479a2864a661860326 # shrinks to (values, operations) = ([Rational(0x0000000000000000000000000000000000000000000000000000000000000000, 0x0000000000000000000000000000000000000000000000000000000000000000), Trivial(0x0000000000000000000000000000000000000000000000000000000000000001)], [Add]) 8 | -------------------------------------------------------------------------------- /halo2_proofs/proptest-regressions/plonk/circuit/compress_selectors.txt: -------------------------------------------------------------------------------- 1 | # Seeds for failure cases proptest has generated in the past. It is 2 | # automatically read and these particular cases re-run before any 3 | # novel cases are generated. 4 | # 5 | # It is recommended to check this file in to source control so that 6 | # everyone who runs the test benefits from these saved cases. 7 | cc 782948e336b9fcaaf993d40cd290eff20399d34766a93793fc3a4516274c1ea7 # shrinks to (selectors, max_degree) = ([SelectorDescription { selector: 0, activations: [false], max_degree: 0 }, SelectorDescription { selector: 1, activations: [false], max_degree: 0 }], 1) 8 | cc 656e5446792c4f5fe22fd10bcd2dbadc70e84ac1ddb1a7ec8f622f64a15ff260 # shrinks to (selectors, max_degree) = ([SelectorDescription { selector: 0, activations: [false], max_degree: 1 }, SelectorDescription { selector: 1, activations: [false], max_degree: 1 }, SelectorDescription { selector: 2, activations: [false], max_degree: 1 }], 2) 9 | cc b7b81ca8745931e4dd8b4f896f7bde78f85f4d88857c5fdf9dc4bbf0f172db5e # shrinks to (selectors, max_degree) = ([SelectorDescription { selector: 0, activations: [false], max_degree: 1 }, SelectorDescription { selector: 1, activations: [false], max_degree: 1 }, SelectorDescription { selector: 2, activations: [false], max_degree: 1 }], 2) 10 | -------------------------------------------------------------------------------- /halo2_proofs/src/circuit/floor_planner.rs: -------------------------------------------------------------------------------- 1 | //! Implementations of common circuit floor planners. 2 | 3 | pub(super) mod single_pass; 4 | 5 | mod flat; 6 | mod v1; 7 | pub use flat::FlatFloorPlanner; 8 | pub use v1::{V1Pass, V1}; 9 | -------------------------------------------------------------------------------- /halo2_proofs/src/circuit/floor_planner/flat/region.rs: -------------------------------------------------------------------------------- 1 | use ff::Field; 2 | use std::{ 3 | cmp, 4 | sync::{Arc, Mutex}, 5 | }; 6 | 7 | use crate::{ 8 | circuit::{layouter::RegionColumn, RegionIndex}, 9 | parallel::Parallel, 10 | }; 11 | use crate::{ 12 | circuit::{layouter::RegionLayouter, Cell}, 13 | plonk::{Advice, Any, Assigned, Column, Error, Fixed, Instance, Selector, TableColumn}, 14 | }; 15 | use std::collections::HashSet; 16 | 17 | /// The shape of a region. For a region at a certain index, we track 18 | /// the set of columns it uses as well as the number of rows it uses. 19 | #[derive(Clone, Debug)] 20 | pub struct RegionSetup { 21 | pub(super) region_index: RegionIndex, 22 | pub(super) columns: HashSet, 23 | pub(super) row_count: usize, 24 | pub(super) constants: Vec<(Assigned, Cell)>, 25 | } 26 | 27 | impl RegionSetup { 28 | /// Create a new `RegionShape` for a region at `region_index`. 29 | pub(crate) fn new(region_index: RegionIndex) -> Self { 30 | RegionSetup { 31 | region_index, 32 | columns: HashSet::default(), 33 | row_count: 0, 34 | constants: vec![], 35 | } 36 | } 37 | } 38 | 39 | impl RegionLayouter for Parallel> { 40 | fn enable_selector<'v>( 41 | &'v self, 42 | _: &'v (dyn Fn() -> String + 'v), 43 | selector: &Selector, 44 | offset: usize, 45 | ) -> Result<(), Error> { 46 | // Track the selector's fixed column as part of the region's shape. 47 | let mut region = self.lock().unwrap(); 48 | region.columns.insert((*selector).into()); 49 | region.row_count = cmp::max(region.row_count, offset + 1); 50 | Ok(()) 51 | } 52 | 53 | fn assign_advice<'v>( 54 | &'v self, 55 | _: &'v (dyn Fn() -> String + 'v), 56 | column: Column, 57 | offset: usize, 58 | _to: &'v mut (dyn FnMut() -> Result, Error> + 'v), 59 | ) -> Result { 60 | let mut region = self.lock().unwrap(); 61 | region.columns.insert(Column::::from(column).into()); 62 | region.row_count = cmp::max(region.row_count, offset + 1); 63 | 64 | Ok(Cell { 65 | region_index: region.region_index, 66 | row_offset: offset, 67 | column: column.into(), 68 | }) 69 | } 70 | 71 | fn assign_advice_from_constant<'v>( 72 | &'v self, 73 | annotation: &'v (dyn Fn() -> String + 'v), 74 | column: Column, 75 | offset: usize, 76 | constant: Assigned, 77 | ) -> Result { 78 | // The rest is identical to witnessing an advice cell. 79 | let advice = self.assign_advice(annotation, column, offset, &mut || Ok(constant))?; 80 | self.constrain_constant(advice, constant)?; 81 | Ok(advice) 82 | } 83 | 84 | fn assign_advice_from_instance<'v>( 85 | &self, 86 | _: &'v (dyn Fn() -> String + 'v), 87 | _: Column, 88 | _: usize, 89 | advice: Column, 90 | offset: usize, 91 | ) -> Result<(Cell, Option), Error> { 92 | let mut region = self.lock().unwrap(); 93 | region.columns.insert(Column::::from(advice).into()); 94 | region.row_count = cmp::max(region.row_count, offset + 1); 95 | 96 | Ok(( 97 | Cell { 98 | region_index: region.region_index, 99 | row_offset: offset, 100 | column: advice.into(), 101 | }, 102 | None, 103 | )) 104 | } 105 | 106 | fn assign_fixed<'v>( 107 | &'v self, 108 | _: &'v (dyn Fn() -> String + 'v), 109 | column: Column, 110 | offset: usize, 111 | _to: &'v mut (dyn FnMut() -> Result, Error> + 'v), 112 | ) -> Result { 113 | let mut region = self.lock().unwrap(); 114 | region.columns.insert(Column::::from(column).into()); 115 | region.row_count = cmp::max(region.row_count, offset + 1); 116 | 117 | Ok(Cell { 118 | region_index: region.region_index, 119 | row_offset: offset, 120 | column: column.into(), 121 | }) 122 | } 123 | 124 | fn constrain_constant(&self, cell: Cell, constant: Assigned) -> Result<(), Error> { 125 | // Global constants don't affect the region shape. 126 | let mut region = self.lock().unwrap(); 127 | region.constants.push((constant, cell)); 128 | Ok(()) 129 | } 130 | 131 | fn constrain_equal(&self, _left: Cell, _right: Cell) -> Result<(), Error> { 132 | // Equality constraints don't affect the region shape. 133 | Ok(()) 134 | } 135 | } 136 | -------------------------------------------------------------------------------- /halo2_proofs/src/circuit/floor_planner/single_pass.rs: -------------------------------------------------------------------------------- 1 | use std::cmp; 2 | use std::collections::HashMap; 3 | use std::fmt; 4 | use std::marker::PhantomData; 5 | 6 | use ff::Field; 7 | 8 | use crate::{ 9 | circuit::{ 10 | layouter::{RegionColumn, RegionLayouter, RegionShape, TableLayouter}, 11 | Cell, Layouter, Region, RegionIndex, RegionStart, Table, 12 | }, 13 | parallel::Parallel, 14 | plonk::{ 15 | Advice, Any, Assigned, Assignment, Circuit, Column, Error, Fixed, FloorPlanner, Instance, 16 | Selector, TableColumn, 17 | }, 18 | }; 19 | 20 | /// The default value to fill a table column with. 21 | /// 22 | /// - The outer `Option` tracks whether the value in row 0 of the table column has been 23 | /// assigned yet. This will always be `Some` once a valid table has been completely 24 | /// assigned. 25 | /// - The inner `Option` tracks whether the underlying `Assignment` is evaluating 26 | /// witnesses or not. 27 | type DefaultTableValue = Option>>; 28 | 29 | pub(crate) struct SimpleTableLayouter<'r, 'a, F: Field, CS: Assignment + 'a> { 30 | cs: &'a CS, 31 | used_columns: &'r [TableColumn], 32 | // maps from a fixed column to a pair (default value, vector saying which rows are assigned) 33 | pub(crate) default_and_assigned: 34 | Parallel, Vec)>>, 35 | } 36 | 37 | impl<'r, 'a, F: Field, CS: Assignment + 'a> fmt::Debug for SimpleTableLayouter<'r, 'a, F, CS> { 38 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 39 | f.debug_struct("SimpleTableLayouter") 40 | .field("used_columns", &self.used_columns) 41 | .field("default_and_assigned", &self.default_and_assigned) 42 | .finish() 43 | } 44 | } 45 | 46 | impl<'r, 'a, F: Field, CS: Assignment + 'a> SimpleTableLayouter<'r, 'a, F, CS> { 47 | pub(crate) fn new(cs: &'a CS, used_columns: &'r [TableColumn]) -> Self { 48 | SimpleTableLayouter { 49 | cs, 50 | used_columns, 51 | default_and_assigned: Parallel::new(HashMap::default()), 52 | } 53 | } 54 | } 55 | 56 | impl<'r, 'a, F: Field, CS: Assignment + 'a> TableLayouter 57 | for SimpleTableLayouter<'r, 'a, F, CS> 58 | { 59 | fn assign_cell<'v>( 60 | &'v self, 61 | annotation: &'v (dyn Fn() -> String + 'v), 62 | column: TableColumn, 63 | offset: usize, 64 | to: &'v mut (dyn FnMut() -> Result, Error> + 'v), 65 | ) -> Result<(), Error> { 66 | if self.used_columns.contains(&column) { 67 | return Err(Error::Synthesis); // TODO better error 68 | } 69 | 70 | let mut default_and_assigned = self.default_and_assigned.lock().unwrap(); 71 | let entry = default_and_assigned.entry(column).or_default(); 72 | 73 | let mut value = None; 74 | self.cs.assign_fixed( 75 | annotation, 76 | column.inner(), 77 | offset, // tables are always assigned starting at row 0 78 | || { 79 | let res = to(); 80 | value = res.as_ref().ok().cloned(); 81 | res 82 | }, 83 | )?; 84 | 85 | match (entry.0.is_none(), offset) { 86 | // Use the value at offset 0 as the default value for this table column. 87 | (true, 0) => entry.0 = Some(value), 88 | // Since there is already an existing default value for this table column, 89 | // the caller should not be attempting to assign another value at offset 0. 90 | (false, 0) => return Err(Error::Synthesis), // TODO better error 91 | _ => (), 92 | } 93 | if entry.1.len() <= offset { 94 | entry.1.resize(offset + 1, false); 95 | } 96 | entry.1[offset] = true; 97 | 98 | Ok(()) 99 | } 100 | } 101 | 102 | /* 103 | #[cfg(test)] 104 | mod tests { 105 | use pairing::bn256::Fr as Scalar; 106 | 107 | use super::SimpleFloorPlanner; 108 | use crate::{ 109 | dev::MockProver, 110 | plonk::{Advice, Circuit, Column, Error}, 111 | }; 112 | 113 | #[test] 114 | fn not_enough_columns_for_constants() { 115 | struct MyCircuit {} 116 | 117 | impl Circuit for MyCircuit { 118 | type Config = Column; 119 | type FloorPlanner = SimpleFloorPlanner; 120 | 121 | fn without_witnesses(&self) -> Self { 122 | MyCircuit {} 123 | } 124 | 125 | fn configure(meta: &mut crate::plonk::ConstraintSystem) -> Self::Config { 126 | meta.advice_column() 127 | } 128 | 129 | fn synthesize( 130 | &self, 131 | config: Self::Config, 132 | mut layouter: impl crate::circuit::Layouter, 133 | ) -> Result<(), crate::plonk::Error> { 134 | layouter.assign_region( 135 | || "assign constant", 136 | |mut region| { 137 | region.assign_advice_from_constant(|| "one", config, 0, Scalar::one()) 138 | }, 139 | )?; 140 | 141 | Ok(()) 142 | } 143 | } 144 | 145 | let circuit = MyCircuit {}; 146 | assert!(matches!( 147 | MockProver::run(3, &circuit, vec![]).unwrap_err(), 148 | Error::NotEnoughColumnsForConstants, 149 | )); 150 | } 151 | } 152 | */ 153 | -------------------------------------------------------------------------------- /halo2_proofs/src/dev/graph.rs: -------------------------------------------------------------------------------- 1 | use ff::Field; 2 | use tabbycat::{AttrList, Edge, GraphBuilder, GraphType, Identity, StmtList}; 3 | 4 | use crate::plonk::{ 5 | Advice, Any, Assigned, Assignment, Circuit, Column, ConstraintSystem, Error, Fixed, 6 | FloorPlanner, Instance, Selector, 7 | }; 8 | 9 | pub mod layout; 10 | 11 | /// Builds a dot graph string representing the given circuit. 12 | /// 13 | /// The graph is built from calls to [`Layouter::namespace`] both within the circuit, and 14 | /// inside the gadgets and chips that it uses. 15 | /// 16 | /// [`Layouter::namespace`]: crate::circuit::Layouter#method.namespace 17 | pub fn circuit_dot_graph>( 18 | circuit: &ConcreteCircuit, 19 | ) -> String { 20 | // Collect the graph details. 21 | let mut cs = ConstraintSystem::default(); 22 | let (config, cs) = cs.circuit_configure::(); 23 | let mut graph = Graph::default(); 24 | ConcreteCircuit::FloorPlanner::synthesize(&mut graph, circuit, config, cs.constants).unwrap(); 25 | 26 | // Construct the node labels. We need to store these, because tabbycat operates on 27 | // string references, and we need those references to live long enough. 28 | let node_labels: Vec<_> = graph 29 | .nodes 30 | .into_iter() 31 | .map(|(name, gadget_name)| { 32 | if let Some(gadget_name) = gadget_name { 33 | format!("[{}] {}", gadget_name, name) 34 | } else { 35 | name 36 | } 37 | }) 38 | .collect(); 39 | 40 | // Construct the dot graph statements. 41 | let mut stmts = StmtList::new(); 42 | for (id, label) in node_labels.iter().enumerate() { 43 | stmts = stmts.add_node( 44 | id.into(), 45 | None, 46 | Some(AttrList::new().add_pair(tabbycat::attributes::label(label))), 47 | ); 48 | } 49 | for (parent, child) in graph.edges { 50 | stmts = 51 | stmts.add_edge(Edge::head_node(parent.into(), None).arrow_to_node(child.into(), None)) 52 | } 53 | 54 | // Build the graph! 55 | GraphBuilder::default() 56 | .graph_type(GraphType::DiGraph) 57 | .strict(false) 58 | .id(Identity::id("circuit").unwrap()) 59 | .stmts(stmts) 60 | .build() 61 | .unwrap() 62 | .to_string() 63 | } 64 | 65 | #[derive(Default)] 66 | struct Graph { 67 | /// Graph nodes in the namespace, structured as `(name, gadget_name)`. 68 | nodes: Vec<(String, Option)>, 69 | 70 | /// Directed edges in the graph, as pairs of indices into `nodes`. 71 | edges: Vec<(usize, usize)>, 72 | 73 | /// The current namespace, as indices into `nodes`. 74 | current_namespace: Vec, 75 | } 76 | 77 | impl Assignment for Graph { 78 | fn enter_region(&mut self, _: N) 79 | where 80 | NR: Into, 81 | N: FnOnce() -> NR, 82 | { 83 | // Do nothing; we don't care about regions in this context. 84 | } 85 | 86 | fn exit_region(&mut self) { 87 | // Do nothing; we don't care about regions in this context. 88 | } 89 | 90 | fn enable_selector(&mut self, _: A, _: &Selector, _: usize) -> Result<(), Error> 91 | where 92 | A: FnOnce() -> AR, 93 | AR: Into, 94 | { 95 | // Do nothing; we don't care about cells in this context. 96 | Ok(()) 97 | } 98 | 99 | fn query_instance(&self, _: Column, _: usize) -> Result, Error> { 100 | Ok(None) 101 | } 102 | 103 | fn assign_advice( 104 | &mut self, 105 | _: A, 106 | _: Column, 107 | _: usize, 108 | _: V, 109 | ) -> Result<(), Error> 110 | where 111 | V: FnOnce() -> Result, 112 | VR: Into>, 113 | A: FnOnce() -> AR, 114 | AR: Into, 115 | { 116 | // Do nothing; we don't care about cells in this context. 117 | Ok(()) 118 | } 119 | 120 | fn assign_fixed( 121 | &mut self, 122 | _: A, 123 | _: Column, 124 | _: usize, 125 | _: V, 126 | ) -> Result<(), Error> 127 | where 128 | V: FnOnce() -> Result, 129 | VR: Into>, 130 | A: FnOnce() -> AR, 131 | AR: Into, 132 | { 133 | // Do nothing; we don't care about cells in this context. 134 | Ok(()) 135 | } 136 | 137 | fn copy( 138 | &mut self, 139 | _: Column, 140 | _: usize, 141 | _: Column, 142 | _: usize, 143 | ) -> Result<(), crate::plonk::Error> { 144 | // Do nothing; we don't care about permutations in this context. 145 | Ok(()) 146 | } 147 | 148 | fn fill_from_row( 149 | &mut self, 150 | _: Column, 151 | _: usize, 152 | _: Option>, 153 | ) -> Result<(), Error> { 154 | Ok(()) 155 | } 156 | 157 | fn push_namespace(&mut self, name_fn: N) 158 | where 159 | NR: Into, 160 | N: FnOnce() -> NR, 161 | { 162 | // Store the new node. 163 | let new_node = self.nodes.len(); 164 | self.nodes.push((name_fn().into(), None)); 165 | 166 | // Create an edge from the parent, if any. 167 | if let Some(parent) = self.current_namespace.last() { 168 | self.edges.push((*parent, new_node)); 169 | } 170 | 171 | // Push the new namespace. 172 | self.current_namespace.push(new_node); 173 | } 174 | 175 | fn pop_namespace(&mut self, gadget_name: Option) { 176 | // Store the gadget name that was extracted, if any. 177 | let node = self 178 | .current_namespace 179 | .last() 180 | .expect("pop_namespace should never be called on the root"); 181 | self.nodes[*node].1 = gadget_name; 182 | 183 | // Pop the namespace. 184 | self.current_namespace.pop(); 185 | } 186 | } 187 | -------------------------------------------------------------------------------- /halo2_proofs/src/dev/metadata.rs: -------------------------------------------------------------------------------- 1 | //! Metadata about circuits. 2 | 3 | use crate::plonk::{self, Any}; 4 | use std::fmt; 5 | 6 | /// Metadata about a column within a circuit. 7 | #[derive(Debug, PartialEq, Eq, PartialOrd, Ord)] 8 | pub struct Column { 9 | /// The type of the column. 10 | column_type: Any, 11 | /// The index of the column. 12 | index: usize, 13 | } 14 | 15 | impl fmt::Display for Column { 16 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 17 | write!(f, "Column('{:?}', {})", self.column_type, self.index) 18 | } 19 | } 20 | 21 | impl From<(Any, usize)> for Column { 22 | fn from((column_type, index): (Any, usize)) -> Self { 23 | Column { column_type, index } 24 | } 25 | } 26 | 27 | impl From> for Column { 28 | fn from(column: plonk::Column) -> Self { 29 | Column { 30 | column_type: *column.column_type(), 31 | index: column.index(), 32 | } 33 | } 34 | } 35 | 36 | /// A "virtual cell" is a PLONK cell that has been queried at a particular relative offset 37 | /// within a custom gate. 38 | #[derive(Debug, PartialEq, Eq, PartialOrd, Ord)] 39 | pub struct VirtualCell { 40 | name: &'static str, 41 | column: Column, 42 | rotation: i32, 43 | } 44 | 45 | impl From<(Column, i32)> for VirtualCell { 46 | fn from((column, rotation): (Column, i32)) -> Self { 47 | VirtualCell { 48 | name: "", 49 | column, 50 | rotation, 51 | } 52 | } 53 | } 54 | 55 | impl From<(&'static str, Column, i32)> for VirtualCell { 56 | fn from((name, column, rotation): (&'static str, Column, i32)) -> Self { 57 | VirtualCell { 58 | name, 59 | column, 60 | rotation, 61 | } 62 | } 63 | } 64 | 65 | impl From for VirtualCell { 66 | fn from(c: plonk::VirtualCell) -> Self { 67 | VirtualCell { 68 | name: "", 69 | column: c.column.into(), 70 | rotation: c.rotation.0, 71 | } 72 | } 73 | } 74 | 75 | impl fmt::Display for VirtualCell { 76 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 77 | write!(f, "{}@{}", self.column, self.rotation)?; 78 | if !self.name.is_empty() { 79 | write!(f, "({})", self.name)?; 80 | } 81 | Ok(()) 82 | } 83 | } 84 | 85 | /// Metadata about a configured gate within a circuit. 86 | #[derive(Debug, PartialEq)] 87 | pub struct Gate { 88 | /// The index of the active gate. These indices are assigned in the order in which 89 | /// `ConstraintSystem::create_gate` is called during `Circuit::configure`. 90 | index: usize, 91 | /// The name of the active gate. These are specified by the gate creator (such as 92 | /// a chip implementation), and is not enforced to be unique. 93 | name: &'static str, 94 | } 95 | 96 | impl fmt::Display for Gate { 97 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 98 | write!(f, "Gate {} ('{}')", self.index, self.name) 99 | } 100 | } 101 | 102 | impl From<(usize, &'static str)> for Gate { 103 | fn from((index, name): (usize, &'static str)) -> Self { 104 | Gate { index, name } 105 | } 106 | } 107 | 108 | /// Metadata about a configured constraint within a circuit. 109 | #[derive(Debug, PartialEq)] 110 | pub struct Constraint { 111 | /// The gate containing the constraint. 112 | gate: Gate, 113 | /// The index of the polynomial constraint within the gate. These indices correspond 114 | /// to the order in which the constraints are returned from the closure passed to 115 | /// `ConstraintSystem::create_gate` during `Circuit::configure`. 116 | index: usize, 117 | /// The name of the constraint. This is specified by the gate creator (such as a chip 118 | /// implementation), and is not enforced to be unique. 119 | name: &'static str, 120 | } 121 | 122 | impl fmt::Display for Constraint { 123 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 124 | write!( 125 | f, 126 | "Constraint {}{} in gate {} ('{}')", 127 | self.index, 128 | if self.name.is_empty() { 129 | String::new() 130 | } else { 131 | format!(" ('{}')", self.name) 132 | }, 133 | self.gate.index, 134 | self.gate.name, 135 | ) 136 | } 137 | } 138 | 139 | impl From<(Gate, usize, &'static str)> for Constraint { 140 | fn from((gate, index, name): (Gate, usize, &'static str)) -> Self { 141 | Constraint { gate, index, name } 142 | } 143 | } 144 | 145 | /// Metadata about an assigned region within a circuit. 146 | #[derive(Debug, PartialEq)] 147 | pub struct Region { 148 | /// The index of the region. These indices are assigned in the order in which 149 | /// `Layouter::assign_region` is called during `Circuit::synthesize`. 150 | index: usize, 151 | /// The name of the region. This is specified by the region creator (such as a chip 152 | /// implementation), and is not enforced to be unique. 153 | name: String, 154 | } 155 | 156 | impl fmt::Display for Region { 157 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 158 | write!(f, "Region {} ('{}')", self.index, self.name) 159 | } 160 | } 161 | 162 | impl From<(usize, String)> for Region { 163 | fn from((index, name): (usize, String)) -> Self { 164 | Region { index, name } 165 | } 166 | } 167 | 168 | impl From<(usize, &str)> for Region { 169 | fn from((index, name): (usize, &str)) -> Self { 170 | Region { 171 | index, 172 | name: name.to_owned(), 173 | } 174 | } 175 | } 176 | -------------------------------------------------------------------------------- /halo2_proofs/src/dev/util.rs: -------------------------------------------------------------------------------- 1 | use std::collections::BTreeMap; 2 | 3 | use group::ff::Field; 4 | use pairing::arithmetic::FieldExt; 5 | 6 | use super::{metadata, Value}; 7 | use crate::{ 8 | plonk::{Any, Expression, Gate, VirtualCell}, 9 | poly::Rotation, 10 | }; 11 | 12 | pub(super) fn format_value(v: F) -> String { 13 | if v.is_zero_vartime() { 14 | "0".into() 15 | } else if v == F::one() { 16 | "1".into() 17 | } else if v == -F::one() { 18 | "-1".into() 19 | } else { 20 | // Format value as hex. 21 | let s = format!("{:?}", v); 22 | // Remove leading zeroes. 23 | let s = s.strip_prefix("0x").unwrap(); 24 | let s = s.trim_start_matches('0'); 25 | format!("0x{}", s) 26 | } 27 | } 28 | 29 | fn cell_value<'a, F: FieldExt>( 30 | virtual_cells: &'a [VirtualCell], 31 | column_type: Any, 32 | load: impl Fn(usize, usize, Rotation) -> Value + 'a, 33 | ) -> impl Fn(usize, usize, Rotation) -> BTreeMap + 'a { 34 | move |query_index, column_index, rotation| { 35 | virtual_cells 36 | .iter() 37 | .find(|c| { 38 | c.column.column_type() == &column_type 39 | && c.column.index() == column_index 40 | && c.rotation == rotation 41 | }) 42 | // None indicates a selector, which we don't bother showing. 43 | .map(|cell| { 44 | ( 45 | cell.clone().into(), 46 | match load(query_index, column_index, rotation) { 47 | Value::Real(v) => format_value(v), 48 | Value::Poison => unreachable!(), 49 | }, 50 | ) 51 | }) 52 | .into_iter() 53 | .collect() 54 | } 55 | } 56 | 57 | pub(super) fn cell_values<'a, F: FieldExt>( 58 | gate: &Gate, 59 | poly: &Expression, 60 | load_fixed: impl Fn(usize, usize, Rotation) -> Value + 'a, 61 | load_advice: impl Fn(usize, usize, Rotation) -> Value + 'a, 62 | load_instance: impl Fn(usize, usize, Rotation) -> Value + 'a, 63 | ) -> Vec<(metadata::VirtualCell, String)> { 64 | let virtual_cells = gate.queried_cells(); 65 | let cell_values = poly.evaluate( 66 | &|_| BTreeMap::default(), 67 | &|_| panic!("virtual selectors are removed during optimization"), 68 | &cell_value(virtual_cells, Any::Fixed, load_fixed), 69 | &cell_value(virtual_cells, Any::Advice, load_advice), 70 | &cell_value(virtual_cells, Any::Instance, load_instance), 71 | &|a| a, 72 | &|mut a, mut b| { 73 | a.append(&mut b); 74 | a 75 | }, 76 | &|a, b| { 77 | let mut a = a(); 78 | let mut b = b(); 79 | a.append(&mut b); 80 | a 81 | }, 82 | &|a, _| a, 83 | ); 84 | cell_values.into_iter().collect() 85 | } 86 | -------------------------------------------------------------------------------- /halo2_proofs/src/lib.rs: -------------------------------------------------------------------------------- 1 | //! # halo2_proofs 2 | #![feature(local_key_cell_methods)] 3 | #![cfg_attr(docsrs, feature(doc_cfg))] 4 | // Build without warnings on stable 1.51 and later. 5 | #![allow(unknown_lints)] 6 | // Disable old lint warnings until our MSRV is at least 1.51. 7 | #![allow(renamed_and_removed_lints)] 8 | // Use the old lint name to build without warnings until our MSRV is at least 1.51. 9 | #![allow(clippy::unknown_clippy_lints)] 10 | // The actual lints we want to disable. 11 | #![allow( 12 | clippy::op_ref, 13 | clippy::assign_op_pattern, 14 | clippy::too_many_arguments, 15 | clippy::suspicious_arithmetic_impl, 16 | clippy::many_single_char_names, 17 | clippy::same_item_push, 18 | clippy::upper_case_acronyms 19 | )] 20 | #![deny(broken_intra_doc_links)] 21 | #![deny(missing_debug_implementations)] 22 | // Remove this once we update pasta_curves 23 | #![allow(unused_imports)] 24 | 25 | pub(crate) mod parallel; 26 | 27 | pub mod arithmetic; 28 | pub mod circuit; 29 | pub use pairing; 30 | mod multicore; 31 | pub mod plonk; 32 | pub mod poly; 33 | pub mod transcript; 34 | 35 | pub mod dev; 36 | pub mod helpers; 37 | 38 | #[macro_use] 39 | extern crate lazy_static; 40 | -------------------------------------------------------------------------------- /halo2_proofs/src/multicore.rs: -------------------------------------------------------------------------------- 1 | //! An interface for dealing with the kinds of parallel computations involved in 2 | //! `halo2`. It's currently just a (very!) thin wrapper around [`rayon`] but may 3 | //! be extended in the future to allow for various parallelism strategies. 4 | 5 | pub use rayon::{current_num_threads, scope, Scope}; 6 | -------------------------------------------------------------------------------- /halo2_proofs/src/parallel.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | fmt::Debug, 3 | sync::{Arc, Mutex, MutexGuard, PoisonError}, 4 | }; 5 | 6 | #[derive(Debug)] 7 | pub struct Parallel(Arc>); 8 | 9 | // derive failed 10 | impl Clone for Parallel { 11 | fn clone(&self) -> Self { 12 | Self(self.0.clone()) 13 | } 14 | } 15 | 16 | impl Parallel { 17 | pub(crate) fn new(v: T) -> Self { 18 | Parallel(Arc::new(Mutex::new(v))) 19 | } 20 | 21 | pub(crate) fn into_inner(self) -> T { 22 | Arc::try_unwrap(self.0).unwrap().into_inner().unwrap() 23 | } 24 | 25 | pub(crate) fn lock(&self) -> Result, PoisonError>> { 26 | self.0.lock() 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /halo2_proofs/src/plonk/error.rs: -------------------------------------------------------------------------------- 1 | use std::cmp; 2 | use std::error; 3 | use std::fmt; 4 | use std::io; 5 | 6 | use super::{Any, Column}; 7 | 8 | /// This is an error that could occur during proving or circuit synthesis. 9 | // TODO: these errors need to be cleaned up 10 | #[derive(Debug)] 11 | pub enum Error { 12 | /// This is an error that can occur during synthesis of the circuit, for 13 | /// example, when the witness is not present. 14 | Synthesis, 15 | /// The provided instances do not match the circuit parameters. 16 | InvalidInstances, 17 | /// The constraint system is not satisfied. 18 | ConstraintSystemFailure, 19 | /// Out of bounds index passed to a backend 20 | BoundsFailure, 21 | /// Opening error 22 | Opening, 23 | /// Transcript error 24 | Transcript(io::Error), 25 | /// `k` is too small for the given circuit. 26 | NotEnoughRowsAvailable { 27 | /// The current value of `k` being used. 28 | current_k: u32, 29 | }, 30 | /// Instance provided exceeds number of available rows 31 | InstanceTooLarge, 32 | /// Circuit synthesis requires global constants, but circuit configuration did not 33 | /// call [`ConstraintSystem::enable_constant`] on fixed columns with sufficient space. 34 | /// 35 | /// [`ConstraintSystem::enable_constant`]: crate::plonk::ConstraintSystem::enable_constant 36 | NotEnoughColumnsForConstants, 37 | /// The instance sets up a copy constraint involving a column that has not been 38 | /// included in the permutation. 39 | ColumnNotInPermutation(Column), 40 | /// Not enough rows for extra range values. 41 | NotEnoughRowsForRangeCheck, 42 | } 43 | 44 | impl From for Error { 45 | fn from(error: io::Error) -> Self { 46 | // The only place we can get io::Error from is the transcript. 47 | Error::Transcript(error) 48 | } 49 | } 50 | 51 | impl Error { 52 | /// Constructs an `Error::NotEnoughRowsAvailable`. 53 | pub(crate) fn not_enough_rows_available(current_k: u32) -> Self { 54 | Error::NotEnoughRowsAvailable { current_k } 55 | } 56 | } 57 | 58 | impl fmt::Display for Error { 59 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 60 | match self { 61 | Error::Synthesis => write!(f, "General synthesis error"), 62 | Error::InvalidInstances => write!(f, "Provided instances do not match the circuit"), 63 | Error::ConstraintSystemFailure => write!(f, "The constraint system is not satisfied"), 64 | Error::BoundsFailure => write!(f, "An out-of-bounds index was passed to the backend"), 65 | Error::Opening => write!(f, "Multi-opening proof was invalid"), 66 | Error::Transcript(e) => write!(f, "Transcript error: {}", e), 67 | Error::NotEnoughRowsAvailable { current_k } => write!( 68 | f, 69 | "k = {} is too small for the given circuit. Try using a larger value of k", 70 | current_k, 71 | ), 72 | Error::InstanceTooLarge => write!(f, "Instance vectors are larger than the circuit"), 73 | Error::NotEnoughColumnsForConstants => { 74 | write!( 75 | f, 76 | "Too few fixed columns are enabled for global constants usage" 77 | ) 78 | } 79 | Error::ColumnNotInPermutation(column) => write!( 80 | f, 81 | "Column {:?} must be included in the permutation. Help: try applying `meta.enable_equalty` on the column", 82 | column 83 | ), 84 | Error::NotEnoughRowsForRangeCheck => write!( 85 | f, 86 | "Not enough rows for auxiliary range values. Try using a larger value of k 87 | " 88 | ) 89 | } 90 | } 91 | } 92 | 93 | impl error::Error for Error { 94 | fn source(&self) -> Option<&(dyn error::Error + 'static)> { 95 | match self { 96 | Error::Transcript(e) => Some(e), 97 | _ => None, 98 | } 99 | } 100 | } 101 | -------------------------------------------------------------------------------- /halo2_proofs/src/plonk/lookup.rs: -------------------------------------------------------------------------------- 1 | #![allow(dead_code)] 2 | use super::circuit::Expression; 3 | use ff::Field; 4 | 5 | pub(crate) mod prover; 6 | pub(crate) mod verifier; 7 | 8 | #[derive(Clone, Debug)] 9 | pub struct Argument { 10 | pub name: &'static str, 11 | pub input_expressions: Vec>, 12 | pub table_expressions: Vec>, 13 | } 14 | 15 | impl Argument { 16 | /// Constructs a new lookup argument. 17 | /// 18 | /// `table_map` is a sequence of `(input, table)` tuples. 19 | pub fn new(name: &'static str, table_map: Vec<(Expression, Expression)>) -> Self { 20 | let (input_expressions, table_expressions) = table_map.into_iter().unzip(); 21 | Argument { 22 | name, 23 | input_expressions, 24 | table_expressions, 25 | } 26 | } 27 | 28 | pub(crate) fn required_degree(&self) -> usize { 29 | assert_eq!(self.input_expressions.len(), self.table_expressions.len()); 30 | 31 | // The first value in the permutation poly should be one. 32 | // degree 2: 33 | // l_0(X) * (1 - z(X)) = 0 34 | // 35 | // The "last" value in the permutation poly should be a boolean, for 36 | // completeness and soundness. 37 | // degree 3: 38 | // l_last(X) * (z(X)^2 - z(X)) = 0 39 | // 40 | // Enable the permutation argument for only the rows involved. 41 | // degree (2 + input_degree + table_degree) or 4, whichever is larger: 42 | // (1 - (l_last(X) + l_blind(X))) * ( 43 | // z(\omega X) (a'(X) + \beta) (s'(X) + \gamma) 44 | // - z(X) (\theta^{m-1} a_0(X) + ... + a_{m-1}(X) + \beta) (\theta^{m-1} s_0(X) + ... + s_{m-1}(X) + \gamma) 45 | // ) = 0 46 | // 47 | // The first two values of a' and s' should be the same. 48 | // degree 2: 49 | // l_0(X) * (a'(X) - s'(X)) = 0 50 | // 51 | // Either the two values are the same, or the previous 52 | // value of a' is the same as the current value. 53 | // degree 3: 54 | // (1 - (l_last(X) + l_blind(X))) * (a′(X) − s′(X))⋅(a′(X) − a′(\omega^{-1} X)) = 0 55 | let mut input_degree = 1; 56 | for expr in self.input_expressions.iter() { 57 | input_degree = std::cmp::max(input_degree, expr.degree()); 58 | } 59 | let mut table_degree = 1; 60 | for expr in self.table_expressions.iter() { 61 | table_degree = std::cmp::max(table_degree, expr.degree()); 62 | } 63 | 64 | // In practice because input_degree and table_degree are initialized to 65 | // one, the latter half of this max() invocation is at least 4 always, 66 | // rendering this call pointless except to be explicit in case we change 67 | // the initialization of input_degree/table_degree in the future. 68 | std::cmp::max( 69 | // (1 - (l_last + l_blind)) z(\omega X) (a'(X) + \beta) (s'(X) + \gamma) 70 | 4, 71 | // (1 - (l_last + l_blind)) z(X) (\theta^{m-1} a_0(X) + ... + a_{m-1}(X) + \beta) (\theta^{m-1} s_0(X) + ... + s_{m-1}(X) + \gamma) 72 | 2 + input_degree + table_degree, 73 | ) 74 | } 75 | } 76 | -------------------------------------------------------------------------------- /halo2_proofs/src/plonk/permutation.rs: -------------------------------------------------------------------------------- 1 | use super::circuit::{Any, Column}; 2 | use crate::{ 3 | arithmetic::CurveAffine, 4 | helpers::CurveRead, 5 | poly::{Coeff, ExtendedLagrangeCoeff, LagrangeCoeff, Polynomial}, 6 | }; 7 | 8 | pub(crate) mod keygen; 9 | pub(crate) mod prover; 10 | pub(crate) mod verifier; 11 | 12 | use std::io; 13 | 14 | /// A permutation argument. 15 | #[derive(Debug, Clone)] 16 | pub struct Argument { 17 | /// A sequence of columns involved in the argument. 18 | pub columns: Vec>, 19 | } 20 | 21 | impl Argument { 22 | pub(crate) fn new() -> Self { 23 | Argument { columns: vec![] } 24 | } 25 | 26 | /// Returns the minimum circuit degree required by the permutation argument. 27 | /// The argument may use larger degree gates depending on the actual 28 | /// circuit's degree and how many columns are involved in the permutation. 29 | pub(crate) fn required_degree(&self) -> usize { 30 | // degree 2: 31 | // l_0(X) * (1 - z(X)) = 0 32 | // 33 | // We will fit as many polynomials p_i(X) as possible 34 | // into the required degree of the circuit, so the 35 | // following will not affect the required degree of 36 | // this middleware. 37 | // 38 | // (1 - (l_last(X) + l_blind(X))) * ( 39 | // z(\omega X) \prod (p(X) + \beta s_i(X) + \gamma) 40 | // - z(X) \prod (p(X) + \delta^i \beta X + \gamma) 41 | // ) 42 | // 43 | // On the first sets of columns, except the first 44 | // set, we will do 45 | // 46 | // l_0(X) * (z(X) - z'(\omega^(last) X)) = 0 47 | // 48 | // where z'(X) is the permutation for the previous set 49 | // of columns. 50 | // 51 | // On the final set of columns, we will do 52 | // 53 | // degree 3: 54 | // l_last(X) * (z'(X)^2 - z'(X)) = 0 55 | // 56 | // which will allow the last value to be zero to 57 | // ensure the argument is perfectly complete. 58 | 59 | // There are constraints of degree 3 regardless of the 60 | // number of columns involved. 61 | 3 62 | } 63 | 64 | pub(crate) fn add_column(&mut self, column: Column) { 65 | if !self.columns.contains(&column) { 66 | self.columns.push(column); 67 | } 68 | } 69 | 70 | pub(crate) fn get_columns(&self) -> Vec> { 71 | self.columns.clone() 72 | } 73 | } 74 | 75 | /// The verifying key for a single permutation argument. 76 | #[derive(Debug, Clone)] 77 | pub struct VerifyingKey { 78 | pub commitments: Vec, 79 | } 80 | 81 | impl VerifyingKey { 82 | pub fn write(&self, writer: &mut W) -> io::Result<()> { 83 | for commitment in &self.commitments { 84 | writer.write_all(commitment.to_bytes().as_ref())?; 85 | } 86 | 87 | Ok(()) 88 | } 89 | 90 | pub fn read(reader: &mut R, argument: &Argument) -> io::Result { 91 | let commitments = (0..argument.columns.len()) 92 | .map(|_| C::read(reader)) 93 | .collect::, _>>()?; 94 | Ok(VerifyingKey { commitments }) 95 | } 96 | } 97 | 98 | /// The proving key for a single permutation argument. 99 | #[derive(Debug)] 100 | pub struct ProvingKey { 101 | pub permutations: Vec>, 102 | pub polys: Vec>, 103 | 104 | #[cfg(not(feature = "cuda"))] 105 | pub(super) cosets: Vec>, 106 | } 107 | -------------------------------------------------------------------------------- /halo2_proofs/src/plonk/range_check.rs: -------------------------------------------------------------------------------- 1 | use ff::Field; 2 | use num::FromPrimitive; 3 | use num_derive::FromPrimitive; 4 | use pairing::arithmetic::FieldExt; 5 | 6 | use super::Advice; 7 | use super::Column; 8 | 9 | #[derive(Clone, Debug)] 10 | pub struct RangeCheckRel { 11 | pub origin: Column, 12 | pub sort: Column, 13 | pub min: (u32, F), 14 | pub max: (u32, F), 15 | pub step: (u32, F), 16 | } 17 | 18 | impl RangeCheckRel { 19 | pub fn new( 20 | origin: Column, 21 | sort: Column, 22 | min: (u32, F), 23 | max: (u32, F), 24 | step: (u32, F), 25 | ) -> Self { 26 | assert_ne!(step.0, 0); 27 | assert!(min.0 <= max.0); 28 | 29 | RangeCheckRel { 30 | origin, 31 | sort, 32 | min, 33 | max, 34 | step, 35 | } 36 | } 37 | } 38 | 39 | pub(crate) struct RangeCheckRelAssigner { 40 | current: u32, 41 | maximal: u32, 42 | step: u32, 43 | } 44 | 45 | impl Iterator for RangeCheckRelAssigner { 46 | type Item = u32; 47 | 48 | fn next(&mut self) -> Option { 49 | let value = self.current; 50 | 51 | if value < self.maximal { 52 | self.current = u32::min(value + self.step, self.maximal); 53 | 54 | Some(value) 55 | } else if self.current == self.maximal { 56 | self.current += self.step; 57 | 58 | Some(value) 59 | } else { 60 | None 61 | } 62 | } 63 | } 64 | 65 | impl From<&RangeCheckRel> for RangeCheckRelAssigner { 66 | fn from(value: &RangeCheckRel) -> Self { 67 | RangeCheckRelAssigner { 68 | current: value.min.0, 69 | maximal: value.max.0, 70 | step: value.step.0, 71 | } 72 | } 73 | } 74 | 75 | #[derive(Clone, Debug)] 76 | pub struct Argument(pub Vec>); 77 | 78 | impl Argument { 79 | pub(crate) fn new() -> Self { 80 | Self(vec![]) 81 | } 82 | } 83 | -------------------------------------------------------------------------------- /halo2_proofs/src/plonk/shuffle.rs: -------------------------------------------------------------------------------- 1 | use super::circuit::Expression; 2 | use ff::Field; 3 | use std::collections::BTreeMap; 4 | pub(crate) mod prover; 5 | pub(crate) mod verifier; 6 | 7 | // group of shuffle ArgumentUnit 8 | #[derive(Clone, Debug)] 9 | pub struct Argument(pub Vec>); 10 | 11 | impl Argument { 12 | //get the degree sum to group's all elements 13 | pub(crate) fn degree_sum(&self) -> usize { 14 | self.0.iter().map(|arg| arg.degree()).sum::() 15 | } 16 | } 17 | 18 | #[derive(Clone, Debug)] 19 | pub struct ArgumentUnit { 20 | pub name: &'static str, 21 | pub input_expressions: Vec>, 22 | pub shuffle_expressions: Vec>, 23 | } 24 | 25 | impl ArgumentUnit { 26 | /// Constructs a new shuffle lookup argument. 27 | /// `shuffle_map` is a sequence of `(input, shuffle)` tuples. 28 | pub fn new(name: &'static str, shuffle_map: Vec<(Expression, Expression)>) -> Self { 29 | let (input_expressions, shuffle_expressions) = shuffle_map.into_iter().unzip(); 30 | ArgumentUnit { 31 | name, 32 | input_expressions, 33 | shuffle_expressions, 34 | } 35 | } 36 | 37 | //get expressions max degree 38 | pub(crate) fn degree(&self) -> usize { 39 | assert_eq!(self.input_expressions.len(), self.shuffle_expressions.len()); 40 | let mut input_degree = 1; 41 | for expr in self.input_expressions.iter() { 42 | input_degree = std::cmp::max(input_degree, expr.degree()); 43 | } 44 | let mut shuffle_degree = 1; 45 | for expr in self.shuffle_expressions.iter() { 46 | shuffle_degree = std::cmp::max(shuffle_degree, expr.degree()); 47 | } 48 | std::cmp::max(shuffle_degree, input_degree) 49 | } 50 | 51 | //get shuffle gate's max degree 52 | pub(crate) fn required_degree(&self) -> usize { 53 | assert_eq!(self.input_expressions.len(), self.shuffle_expressions.len()); 54 | // degree 2+input or 2+shuffle degree: 55 | // (1 - (l_last + l_blind)) (z(\omega X) (s1(X) + \beta) - z(X) (a(X) + \beta)) 56 | let mut input_degree = 1; 57 | for expr in self.input_expressions.iter() { 58 | input_degree = std::cmp::max(input_degree, expr.degree()); 59 | } 60 | let mut shuffle_degree = 1; 61 | for expr in self.shuffle_expressions.iter() { 62 | shuffle_degree = std::cmp::max(shuffle_degree, expr.degree()); 63 | } 64 | std::cmp::max(2 + shuffle_degree, 2 + input_degree) 65 | } 66 | } 67 | 68 | // compact little degree shuffle argument to one group according to max_degree and reduce the final shuffle poly amount. 69 | // (1 - (l_last + l_blind)) (z(\omega X)*(s1(X) + \beta)*(s2(X) + \beta^2).. - z(X)*(a1(X) + \beta)*(a2(X) + \beta^2)..) 70 | pub(crate) fn chunk( 71 | tracer: &[ArgumentUnit], 72 | global_degree: usize, 73 | ) -> Vec> { 74 | assert!(tracer.len() > 0, "shuffle tracer is 0"); 75 | assert!(global_degree > 2, "Invalid degree"); 76 | //(1 - (l_last + l_blind)) * z(\omega X) has 2 degree 77 | let max_degree = global_degree - 2; 78 | let mut groups = vec![Argument(vec![tracer[0].clone()])]; 79 | for arg in tracer.iter().skip(1) { 80 | let new_deg = arg.degree(); 81 | let mut hit = false; 82 | for group in groups.iter_mut() { 83 | if group.degree_sum() + new_deg <= max_degree { 84 | group.0.push(arg.clone()); 85 | hit = true; 86 | break; 87 | } 88 | } 89 | //not hit, create new group 90 | if !hit { 91 | groups.push(Argument(vec![arg.clone()])); 92 | } 93 | } 94 | assert_eq!( 95 | groups.iter().map(|group| group.0.len()).sum::(), 96 | tracer.len() 97 | ); 98 | assert_eq!( 99 | groups.iter().all(|group| group.degree_sum() <= max_degree), 100 | true 101 | ); 102 | groups 103 | } 104 | -------------------------------------------------------------------------------- /halo2_proofs/src/plonk/shuffle/verifier.rs: -------------------------------------------------------------------------------- 1 | use std::iter; 2 | 3 | use super::super::{ 4 | circuit::Expression, ChallengeBeta, ChallengeGamma, ChallengeTheta, ChallengeX, 5 | }; 6 | use super::Argument; 7 | use crate::{ 8 | arithmetic::{BaseExt, CurveAffine, FieldExt}, 9 | plonk::{Error, VerifyingKey}, 10 | poly::{multiopen::VerifierQuery, Rotation}, 11 | transcript::{EncodedChallenge, TranscriptRead}, 12 | }; 13 | use ff::Field; 14 | 15 | #[derive(Debug)] 16 | pub struct Committed { 17 | pub product_commitment: C, 18 | } 19 | 20 | #[derive(Debug)] 21 | pub struct Evaluated { 22 | pub committed: Committed, 23 | pub product_eval: C::Scalar, 24 | pub product_next_eval: C::Scalar, 25 | } 26 | 27 | impl Argument { 28 | pub fn read_product_commitment< 29 | C: CurveAffine, 30 | E: EncodedChallenge, 31 | T: TranscriptRead, 32 | >( 33 | &self, 34 | transcript: &mut T, 35 | ) -> Result, Error> { 36 | let product_commitment = transcript.read_point()?; 37 | 38 | Ok(Committed { product_commitment }) 39 | } 40 | } 41 | 42 | impl Committed { 43 | pub fn evaluate, T: TranscriptRead>( 44 | self, 45 | transcript: &mut T, 46 | ) -> Result, Error> { 47 | let product_eval = transcript.read_scalar()?; 48 | let product_next_eval = transcript.read_scalar()?; 49 | 50 | Ok(Evaluated { 51 | committed: self, 52 | product_eval, 53 | product_next_eval, 54 | }) 55 | } 56 | } 57 | 58 | impl Evaluated { 59 | pub(in crate::plonk) fn expressions<'a>( 60 | &'a self, 61 | l_0: C::Scalar, 62 | l_last: C::Scalar, 63 | l_blind: C::Scalar, 64 | argument: &'a Argument, 65 | theta: ChallengeTheta, 66 | beta: ChallengeBeta, 67 | advice_evals: &[C::Scalar], 68 | fixed_evals: &[C::Scalar], 69 | instance_evals: &[C::Scalar], 70 | ) -> impl Iterator + 'a { 71 | let active_rows = C::Scalar::one() - (l_last + l_blind); 72 | let product_expression = || { 73 | // (\theta^{m-1} s_0(X) + ... + s_{m-1}(X)) 74 | let compress_expressions = |expressions: &[Expression]| { 75 | expressions 76 | .iter() 77 | .map(|expression| { 78 | expression.evaluate( 79 | &|scalar| scalar, 80 | &|_| panic!("virtual selectors are removed during optimization"), 81 | &|index, _, _| fixed_evals[index], 82 | &|index, _, _| advice_evals[index], 83 | &|index, _, _| instance_evals[index], 84 | &|a| -a, 85 | &|a, b| a + &b, 86 | &|a, b| a() * &b(), 87 | &|a, scalar| a * &scalar, 88 | ) 89 | }) 90 | .fold(C::Scalar::zero(), |acc, eval| acc * &*theta + &eval) 91 | }; 92 | let challenges: Vec = (0..argument.0.len()) 93 | .map(|i| beta.pow_vartime([1 + i as u64, 0, 0, 0])) 94 | .collect(); 95 | let (product_shuffle, product_input) = argument 96 | .0 97 | .iter() 98 | .zip(challenges.iter()) 99 | .map(|(argument, lcx)| { 100 | ( 101 | compress_expressions(&argument.shuffle_expressions) + lcx, 102 | compress_expressions(&argument.input_expressions) + lcx, 103 | ) 104 | }) 105 | .fold((C::Scalar::one(), C::Scalar::one()), |acc, v| { 106 | (acc.0 * v.0, acc.1 * v.1) 107 | }); 108 | let left = self.product_next_eval * &product_shuffle; 109 | let right = self.product_eval * &product_input; 110 | (left - &right) * &active_rows 111 | }; 112 | 113 | std::iter::empty() 114 | .chain( 115 | // l_0(X) * (1 - z'(X)) = 0 116 | Some(l_0 * &(C::Scalar::one() - &self.product_eval)), 117 | ) 118 | .chain( 119 | // l_last(X) * (z(X)^2 - z(X)) = 0 120 | Some(l_last * &(self.product_eval.square() - &self.product_eval)), 121 | ) 122 | .chain( 123 | // (1 - (l_last(X) + l_blind(X))) * 124 | //( z(\omega X) (s1(X)+\beta)(s2(X)+\beta^2) - z(X) (a1(X)+\beta)(a2(X)+\beta^2)) 125 | Some(product_expression()), 126 | ) 127 | } 128 | 129 | pub(in crate::plonk) fn queries<'r>( 130 | &'r self, 131 | vk: &'r VerifyingKey, 132 | x: ChallengeX, 133 | ) -> impl Iterator> + Clone { 134 | let x_next = vk.domain.rotate_omega(*x, Rotation::next()); 135 | 136 | iter::empty() 137 | // Open lookup product commitment at x 138 | .chain(Some(VerifierQuery::new_commitment( 139 | &self.committed.product_commitment, 140 | *x, 141 | Rotation::cur(), 142 | self.product_eval, 143 | ))) 144 | // Open lookup product commitment at \omega x 145 | .chain(Some(VerifierQuery::new_commitment( 146 | &self.committed.product_commitment, 147 | x_next, 148 | Rotation::next(), 149 | self.product_next_eval, 150 | ))) 151 | } 152 | } 153 | -------------------------------------------------------------------------------- /halo2_proofs/src/plonk/vanishing.rs: -------------------------------------------------------------------------------- 1 | use std::marker::PhantomData; 2 | 3 | use crate::arithmetic::CurveAffine; 4 | 5 | mod prover; 6 | mod verifier; 7 | 8 | /// A vanishing argument. 9 | pub(crate) struct Argument { 10 | _marker: PhantomData, 11 | } 12 | -------------------------------------------------------------------------------- /halo2_proofs/src/plonk/vanishing/prover.rs: -------------------------------------------------------------------------------- 1 | use std::iter; 2 | 3 | use ff::Field; 4 | use group::Curve; 5 | use rand::rngs::ThreadRng; 6 | use rand::thread_rng; 7 | use rand_core::RngCore; 8 | use rayon::prelude::{IntoParallelRefIterator, ParallelIterator}; 9 | use rayon::slice::ParallelSlice; 10 | 11 | use super::Argument; 12 | use crate::poly::Rotation; 13 | use crate::{ 14 | arithmetic::{eval_polynomial, CurveAffine, FieldExt}, 15 | plonk::{ChallengeX, ChallengeY, Error}, 16 | poly::{ 17 | self, 18 | commitment::{Blind, Params}, 19 | multiopen::ProverQuery, 20 | Coeff, EvaluationDomain, ExtendedLagrangeCoeff, Polynomial, 21 | }, 22 | transcript::{EncodedChallenge, TranscriptWrite}, 23 | }; 24 | use rayon::iter::IntoParallelRefMutIterator; 25 | 26 | pub(in crate::plonk) struct Committed { 27 | random_poly: Polynomial, 28 | } 29 | 30 | pub(in crate::plonk) struct Constructed { 31 | h_pieces: Vec>, 32 | committed: Committed, 33 | } 34 | 35 | pub(in crate::plonk) struct Evaluated { 36 | h_poly: Polynomial, 37 | committed: Committed, 38 | } 39 | 40 | impl Argument { 41 | pub(in crate::plonk) fn commit, R: RngCore, T: TranscriptWrite>( 42 | params: &Params, 43 | domain: &EvaluationDomain, 44 | mut rng: R, 45 | transcript: &mut T, 46 | ) -> Result, Error> { 47 | // Sample a random polynomial of degree n - 1 48 | let mut random_poly = domain.empty_coeff(); 49 | 50 | let random = vec![0; domain.k() as usize] 51 | .iter() 52 | .map(|_| C::ScalarExt::random(&mut rng)) 53 | .collect::>(); 54 | 55 | random_poly.par_iter_mut().for_each(|coeff| { 56 | let mut rng = thread_rng(); 57 | *coeff = (C::ScalarExt::random(&mut rng) 58 | + random[rng.next_u64() as usize % domain.k() as usize]) 59 | * (C::ScalarExt::random(&mut rng) 60 | + random[rng.next_u64() as usize % domain.k() as usize]) 61 | }); 62 | 63 | // Commit 64 | let c = params.commit(&random_poly).to_affine(); 65 | transcript.write_point(c)?; 66 | 67 | Ok(Committed { random_poly }) 68 | } 69 | } 70 | 71 | impl Committed { 72 | pub(in crate::plonk) fn construct, T: TranscriptWrite>( 73 | self, 74 | params: &Params, 75 | domain: &EvaluationDomain, 76 | h_poly: Polynomial, 77 | transcript: &mut T, 78 | ) -> Result, Error> { 79 | // Divide by t(X) = X^{params.n} - 1. 80 | let h_poly = domain.divide_by_vanishing_poly(h_poly); 81 | 82 | // Obtain final h(X) polynomial 83 | let h_poly = domain.extended_to_coeff(h_poly); 84 | 85 | // Split h(X) up into pieces 86 | let h_pieces = h_poly 87 | .par_chunks_exact(params.n as usize) 88 | .map(|v| domain.coeff_from_vec(v.to_vec())) 89 | .collect::>(); 90 | drop(h_poly); 91 | 92 | // Compute commitments to each h(X) piece 93 | let h_commitments_projective: Vec<_> = h_pieces 94 | .iter() 95 | .map(|h_piece| params.commit(h_piece)) 96 | .collect(); 97 | let mut h_commitments = vec![C::identity(); h_commitments_projective.len()]; 98 | C::Curve::batch_normalize(&h_commitments_projective, &mut h_commitments); 99 | 100 | // Hash each h(X) piece 101 | for c in h_commitments.iter() { 102 | transcript.write_point(*c)?; 103 | } 104 | 105 | Ok(Constructed { 106 | h_pieces, 107 | committed: self, 108 | }) 109 | } 110 | } 111 | 112 | impl Constructed { 113 | pub(in crate::plonk) fn evaluate, T: TranscriptWrite>( 114 | self, 115 | x: ChallengeX, 116 | xn: C::Scalar, 117 | domain: &EvaluationDomain, 118 | transcript: &mut T, 119 | ) -> Result, Error> { 120 | let h_poly = self 121 | .h_pieces 122 | .iter() 123 | .rev() 124 | .fold(domain.empty_coeff(), |acc, eval| acc * xn + eval); 125 | 126 | let random_eval = eval_polynomial(&self.committed.random_poly, *x); 127 | transcript.write_scalar(random_eval)?; 128 | 129 | Ok(Evaluated { 130 | h_poly, 131 | committed: self.committed, 132 | }) 133 | } 134 | } 135 | 136 | impl Evaluated { 137 | pub(in crate::plonk) fn open( 138 | &self, 139 | x: ChallengeX, 140 | ) -> impl Iterator> + Clone { 141 | iter::empty() 142 | .chain(Some(ProverQuery { 143 | point: *x, 144 | rotation: Rotation::cur(), 145 | poly: &self.h_poly, 146 | })) 147 | .chain(Some(ProverQuery { 148 | point: *x, 149 | rotation: Rotation::cur(), 150 | poly: &self.committed.random_poly, 151 | })) 152 | } 153 | } 154 | -------------------------------------------------------------------------------- /halo2_proofs/src/plonk/vanishing/verifier.rs: -------------------------------------------------------------------------------- 1 | use std::iter; 2 | 3 | use ff::Field; 4 | 5 | use crate::poly::Rotation; 6 | use crate::{ 7 | arithmetic::CurveAffine, 8 | plonk::{Error, VerifyingKey}, 9 | poly::{commitment::Params, multiopen::VerifierQuery, MSM}, 10 | transcript::{read_n_points, EncodedChallenge, TranscriptRead}, 11 | }; 12 | 13 | use super::super::{ChallengeX, ChallengeY}; 14 | use super::Argument; 15 | 16 | pub struct Committed { 17 | random_poly_commitment: C, 18 | } 19 | 20 | pub struct Constructed { 21 | h_commitments: Vec, 22 | random_poly_commitment: C, 23 | } 24 | 25 | pub struct PartiallyEvaluated { 26 | h_commitments: Vec, 27 | random_poly_commitment: C, 28 | random_eval: C::Scalar, 29 | } 30 | 31 | pub struct Evaluated { 32 | h_commitment: MSM, 33 | random_poly_commitment: C, 34 | expected_h_eval: C::Scalar, 35 | random_eval: C::Scalar, 36 | } 37 | 38 | impl Argument { 39 | pub(in crate::plonk) fn read_commitments_before_y< 40 | E: EncodedChallenge, 41 | T: TranscriptRead, 42 | >( 43 | transcript: &mut T, 44 | ) -> Result, Error> { 45 | let random_poly_commitment = transcript.read_point()?; 46 | 47 | Ok(Committed { 48 | random_poly_commitment, 49 | }) 50 | } 51 | } 52 | 53 | impl Committed { 54 | pub(in crate::plonk) fn read_commitments_after_y< 55 | E: EncodedChallenge, 56 | T: TranscriptRead, 57 | >( 58 | self, 59 | vk: &VerifyingKey, 60 | transcript: &mut T, 61 | ) -> Result, Error> { 62 | // Obtain a commitment to h(X) in the form of multiple pieces of degree n - 1 63 | let h_commitments = read_n_points(transcript, vk.domain.get_quotient_poly_degree())?; 64 | 65 | Ok(Constructed { 66 | h_commitments, 67 | random_poly_commitment: self.random_poly_commitment, 68 | }) 69 | } 70 | } 71 | 72 | impl Constructed { 73 | pub(in crate::plonk) fn evaluate_after_x, T: TranscriptRead>( 74 | self, 75 | transcript: &mut T, 76 | ) -> Result, Error> { 77 | let random_eval = transcript.read_scalar()?; 78 | 79 | Ok(PartiallyEvaluated { 80 | h_commitments: self.h_commitments, 81 | random_poly_commitment: self.random_poly_commitment, 82 | random_eval, 83 | }) 84 | } 85 | } 86 | 87 | impl PartiallyEvaluated { 88 | pub(in crate::plonk) fn verify( 89 | self, 90 | expressions: impl Iterator, 91 | y: ChallengeY, 92 | xn: C::Scalar, 93 | ) -> Evaluated { 94 | let expected_h_eval = expressions.fold(C::Scalar::zero(), |h_eval, v| h_eval * &*y + &v); 95 | let expected_h_eval = expected_h_eval * ((xn - C::Scalar::one()).invert().unwrap()); 96 | 97 | let h_commitment = 98 | self.h_commitments 99 | .iter() 100 | .rev() 101 | .fold(MSM::new(), |mut acc, commitment| { 102 | acc.scale(xn); 103 | acc.append_term(C::Scalar::one(), *commitment); 104 | acc 105 | }); 106 | 107 | Evaluated { 108 | expected_h_eval, 109 | h_commitment, 110 | random_poly_commitment: self.random_poly_commitment, 111 | random_eval: self.random_eval, 112 | } 113 | } 114 | } 115 | 116 | impl<'params, C: CurveAffine> Evaluated { 117 | pub(in crate::plonk) fn queries<'r>( 118 | &'r self, 119 | x: ChallengeX, 120 | ) -> impl Iterator> + Clone 121 | where 122 | 'params: 'r, 123 | { 124 | iter::empty() 125 | .chain(Some(VerifierQuery::new_msm( 126 | &self.h_commitment, 127 | *x, 128 | Rotation::cur(), 129 | self.expected_h_eval, 130 | ))) 131 | .chain(Some(VerifierQuery::new_commitment( 132 | &self.random_poly_commitment, 133 | *x, 134 | Rotation::cur(), 135 | self.random_eval, 136 | ))) 137 | } 138 | } 139 | -------------------------------------------------------------------------------- /halo2_proofs/src/poly/msm.rs: -------------------------------------------------------------------------------- 1 | use crate::arithmetic::{best_multiexp, parallelize, CurveAffine, Engine}; 2 | use group::Curve; 3 | 4 | /// A multiscalar multiplication in the polynomial commitment scheme 5 | #[derive(Debug, Clone)] 6 | pub struct MSM { 7 | scalars: Vec, 8 | bases: Vec, 9 | } 10 | 11 | impl Default for MSM { 12 | fn default() -> Self { 13 | Self::new() 14 | } 15 | } 16 | 17 | impl<'a, C: CurveAffine> MSM { 18 | /// Create a new, empty MSM using the provided parameters. 19 | pub fn new() -> Self { 20 | MSM { 21 | scalars: vec![], 22 | bases: vec![], 23 | } 24 | } 25 | 26 | /// Add another multiexp into this one 27 | pub fn add_msm(&mut self, other: &Self) { 28 | self.scalars.extend(other.scalars.iter()); 29 | self.bases.extend(other.bases.iter()); 30 | } 31 | 32 | /// Add arbitrary term (the scalar and the point) 33 | pub fn append_term(&mut self, scalar: C::Scalar, point: C) { 34 | self.scalars.push(scalar); 35 | self.bases.push(point); 36 | } 37 | 38 | /// Scale all scalars in the MSM by some scaling factor 39 | pub fn scale(&mut self, factor: C::Scalar) { 40 | if !self.scalars.is_empty() { 41 | parallelize(&mut self.scalars, |scalars, _| { 42 | for other_scalar in scalars { 43 | *other_scalar *= &factor; 44 | } 45 | }) 46 | } 47 | } 48 | 49 | /// Prepares all scalars in the MSM to linear combination 50 | pub fn combine_with_base(&mut self, base: C::Scalar) { 51 | use ff::Field; 52 | let mut acc = C::Scalar::one(); 53 | if !self.scalars.is_empty() { 54 | for scalar in self.scalars.iter_mut().rev() { 55 | *scalar *= &acc; 56 | acc *= base; 57 | } 58 | } 59 | } 60 | 61 | /// Perform multiexp and check that it results in zero 62 | pub fn eval(&self) -> C { 63 | best_multiexp(&self.scalars, &self.bases).into() 64 | } 65 | 66 | /// Check if eval is equal to identity 67 | pub fn check(self) -> bool { 68 | bool::from(self.eval().is_identity()) 69 | } 70 | } 71 | 72 | /// A guard returned by the verifier 73 | #[derive(Debug, Default)] 74 | pub struct PairMSM { 75 | left: MSM, 76 | right: MSM, 77 | } 78 | 79 | impl PairMSM { 80 | /// Create a new, with prepared two channel MSM 81 | pub fn with(left: MSM, right: MSM) -> Self { 82 | Self { left, right } 83 | } 84 | 85 | /// Perform multiexp on both channels 86 | pub fn eval(&self) -> (C, C) { 87 | (self.left.eval(), self.right.eval()) 88 | } 89 | 90 | /// Scale all scalars in the MSM by some scaling factor 91 | pub fn scale(&mut self, e: C::Scalar) { 92 | self.left.scale(e); 93 | self.right.scale(e); 94 | } 95 | 96 | /// Add another multiexp into this one 97 | pub fn add_msm(&mut self, other: Self) { 98 | self.left.add_msm(&other.left); 99 | self.right.add_msm(&other.right); 100 | } 101 | } 102 | 103 | #[derive(Debug, Clone)] 104 | pub struct ProjectiveMSM { 105 | scalars: Vec, 106 | bases: Vec, 107 | } 108 | 109 | impl<'a, E: Engine> ProjectiveMSM { 110 | /// Create a new, empty MSM using the provided parameters. 111 | pub fn new() -> Self { 112 | ProjectiveMSM { 113 | scalars: vec![], 114 | bases: vec![], 115 | } 116 | } 117 | 118 | /// Add arbitrary term (the scalar and the point) 119 | pub fn append_term(&mut self, scalar: E::Scalar, point: E::G1) { 120 | self.scalars.push(scalar); 121 | self.bases.push(point); 122 | } 123 | 124 | /// Scale all scalars in the MSM by some scaling factor 125 | pub fn scale(&mut self, factor: E::Scalar) { 126 | if !self.scalars.is_empty() { 127 | parallelize(&mut self.scalars, |scalars, _| { 128 | for other_scalar in scalars { 129 | *other_scalar *= &factor; 130 | } 131 | }) 132 | } 133 | } 134 | 135 | /// Prepares all scalars in the MSM to linear combination 136 | pub fn combine_with_base(&mut self, base: E::Scalar) { 137 | use ff::Field; 138 | let mut acc = E::Scalar::one(); 139 | if !self.scalars.is_empty() { 140 | for scalar in self.scalars.iter_mut().rev() { 141 | *scalar *= &acc; 142 | acc *= base; 143 | } 144 | } 145 | } 146 | } 147 | 148 | /// A projective point collector 149 | #[derive(Debug, Clone)] 150 | pub struct PreMSM { 151 | projectives_msms: Vec>, 152 | } 153 | 154 | impl<'a, E: Engine> PreMSM { 155 | pub fn new() -> Self { 156 | PreMSM { 157 | projectives_msms: vec![], 158 | } 159 | } 160 | 161 | pub fn normalize(self) -> MSM { 162 | use group::prime::PrimeCurveAffine; 163 | 164 | let bases: Vec = self 165 | .projectives_msms 166 | .iter() 167 | .map(|msm| msm.bases.clone()) 168 | .collect::>>() 169 | .into_iter() 170 | .flatten() 171 | .collect(); 172 | 173 | let scalars: Vec = self 174 | .projectives_msms 175 | .iter() 176 | .map(|msm| msm.scalars.clone()) 177 | .collect::>>() 178 | .into_iter() 179 | .flatten() 180 | .collect(); 181 | 182 | let mut affine_bases = vec![E::G1Affine::identity(); bases.len()]; 183 | E::G1::batch_normalize(&bases[..], &mut affine_bases); 184 | MSM { 185 | scalars, 186 | bases: affine_bases, 187 | } 188 | } 189 | 190 | pub fn add_msm(&mut self, other: ProjectiveMSM) { 191 | self.projectives_msms.push(other); 192 | } 193 | 194 | /// Prepares all scalars in the MSM to linear combination 195 | pub fn combine_with_base(&mut self, base: E::Scalar) { 196 | use ff::Field; 197 | let mut acc = E::Scalar::one(); 198 | if !self.projectives_msms.is_empty() { 199 | for msm in self.projectives_msms.iter_mut().rev() { 200 | msm.scale(acc); 201 | acc *= base; 202 | } 203 | } 204 | } 205 | } 206 | -------------------------------------------------------------------------------- /halo2_proofs/src/poly/multiopen/gwc.rs: -------------------------------------------------------------------------------- 1 | mod prover; 2 | mod verifier; 3 | 4 | use super::Query; 5 | use crate::{ 6 | arithmetic::{eval_polynomial, CurveAffine, FieldExt}, 7 | poly::{ 8 | commitment::{Params, ParamsVerifier}, 9 | msm::MSM, 10 | Coeff, Polynomial, 11 | }, 12 | transcript::ChallengeScalar, 13 | }; 14 | 15 | use std::{ 16 | collections::{BTreeMap, BTreeSet}, 17 | marker::PhantomData, 18 | }; 19 | 20 | use crate::poly::Rotation; 21 | pub use prover::create_proof; 22 | pub use verifier::verify_proof; 23 | 24 | #[derive(Clone, Copy, Debug)] 25 | struct U {} 26 | type ChallengeU = ChallengeScalar; 27 | 28 | #[derive(Clone, Copy, Debug)] 29 | struct V {} 30 | type ChallengeV = ChallengeScalar; 31 | 32 | struct CommitmentData> { 33 | queries: Vec, 34 | point: F, 35 | _marker: PhantomData, 36 | } 37 | 38 | fn construct_intermediate_sets>(queries: I) -> Vec> 39 | where 40 | I: IntoIterator, 41 | { 42 | let mut point_query_map: BTreeMap> = BTreeMap::new(); 43 | for query in queries { 44 | if let Some(queries) = point_query_map.get_mut(&query.get_rotation()) { 45 | queries.push(query); 46 | } else { 47 | point_query_map.insert(query.get_rotation(), vec![query]); 48 | } 49 | } 50 | 51 | point_query_map 52 | .into_iter() 53 | .map(|(_, queries)| { 54 | let point = queries[0].get_point(); 55 | CommitmentData { 56 | queries, 57 | point, 58 | _marker: PhantomData, 59 | } 60 | }) 61 | .collect() 62 | } 63 | -------------------------------------------------------------------------------- /halo2_proofs/src/poly/multiopen/gwc/verifier.rs: -------------------------------------------------------------------------------- 1 | use super::{construct_intermediate_sets, ChallengeU, ChallengeV}; 2 | use crate::arithmetic::{eval_polynomial, lagrange_interpolate, CurveAffine, FieldExt}; 3 | use crate::poly::Rotation; 4 | use crate::poly::{ 5 | commitment::{Params, ParamsVerifier}, 6 | multiopen::{CommitmentReference, Query, VerifierQuery}, 7 | Error, PairMSM, MSM, 8 | }; 9 | use crate::transcript::{EncodedChallenge, TranscriptRead}; 10 | 11 | use ff::Field; 12 | use group::Group; 13 | use pairing::arithmetic::{MillerLoopResult, MultiMillerLoop}; 14 | use subtle::Choice; 15 | 16 | /// Verify a multi-opening proof 17 | pub fn verify_proof< 18 | 'r, 19 | 'params: 'r, 20 | I, 21 | C: MultiMillerLoop, 22 | E: EncodedChallenge, 23 | T: TranscriptRead, 24 | >( 25 | params: &'params ParamsVerifier, 26 | transcript: &mut T, 27 | queries: I, 28 | ) -> Result, Error> 29 | where 30 | I: IntoIterator> + Clone, 31 | { 32 | let v: ChallengeV<_> = transcript.squeeze_challenge_scalar(); 33 | let u: ChallengeU<_> = transcript.squeeze_challenge_scalar(); 34 | 35 | let commitment_data = construct_intermediate_sets(queries); 36 | 37 | let mut commitment_multi = params.empty_msm(); 38 | let mut eval_multi = C::Scalar::zero(); 39 | 40 | let mut witness = params.empty_msm(); 41 | let mut witness_with_aux = params.empty_msm(); 42 | 43 | for commitment_at_a_point in commitment_data.iter() { 44 | assert!(!commitment_at_a_point.queries.is_empty()); 45 | let z = commitment_at_a_point.point; 46 | 47 | let wi = transcript.read_point().map_err(|_| Error::SamplingError)?; 48 | 49 | witness_with_aux.scale(*u); 50 | witness_with_aux.append_term(z, wi); 51 | witness.scale(*u); 52 | witness.append_term(C::Scalar::one(), wi); 53 | commitment_multi.scale(*u); 54 | eval_multi = eval_multi * *u; 55 | 56 | let mut commitment_batch = params.empty_msm(); 57 | let mut eval_batch = C::Scalar::zero(); 58 | 59 | for query in commitment_at_a_point.queries.iter() { 60 | assert_eq!(query.get_point(), z); 61 | 62 | let commitment = query.get_commitment(); 63 | let eval = query.get_eval(); 64 | 65 | commitment_batch.scale(*v); 66 | match commitment { 67 | CommitmentReference::Commitment(c) => { 68 | commitment_batch.append_term(C::Scalar::one(), *c); 69 | } 70 | CommitmentReference::MSM(msm) => { 71 | commitment_batch.add_msm(msm); 72 | } 73 | } 74 | 75 | eval_batch = eval_batch * *v + eval; 76 | } 77 | 78 | commitment_multi.add_msm(&commitment_batch); 79 | eval_multi += eval_batch; 80 | } 81 | 82 | let mut left = params.empty_msm(); 83 | left.add_msm(&witness); 84 | 85 | let mut right = params.empty_msm(); 86 | right.add_msm(&witness_with_aux); 87 | right.add_msm(&commitment_multi); 88 | right.append_term(eval_multi, -params.g1); 89 | 90 | Ok(PairMSM::with(left, right)) 91 | } 92 | -------------------------------------------------------------------------------- /halo2_proofs/src/poly/multiopen/shplonk/verifier.rs: -------------------------------------------------------------------------------- 1 | use super::{construct_intermediate_sets, ChallengeU, ChallengeV, ChallengeY}; 2 | use crate::arithmetic::{ 3 | eval_polynomial, evaluate_vanishing_polynomial, lagrange_interpolate, CurveAffine, Engine, 4 | FieldExt, MillerLoopResult, MultiMillerLoop, 5 | }; 6 | use crate::poly::{ 7 | commitment::{Params, ParamsVerifier}, 8 | msm::{PairMSM, PreMSM, ProjectiveMSM, MSM}, 9 | multiopen::{CommitmentReference, Query, VerifierQuery}, 10 | Rotation, {Coeff, Error, Polynomial}, 11 | }; 12 | use crate::transcript::{EncodedChallenge, TranscriptRead}; 13 | 14 | use ff::Field; 15 | use group::prime::PrimeCurveAffine; 16 | use group::{Curve, Group}; 17 | use rand::rngs::OsRng; 18 | use std::marker::PhantomData; 19 | use std::ops::MulAssign; 20 | use subtle::Choice; 21 | 22 | /// Verify a multi-opening proof 23 | pub fn verify_proof< 24 | 'r, 25 | 'params: 'r, 26 | I, 27 | C: MultiMillerLoop, 28 | E: EncodedChallenge, 29 | T: TranscriptRead, 30 | >( 31 | params: &'params ParamsVerifier, 32 | transcript: &mut T, 33 | queries: I, 34 | ) -> Result, Error> 35 | where 36 | I: IntoIterator> + Clone, 37 | { 38 | let intermediate_sets = construct_intermediate_sets(queries); 39 | let (rotation_sets, super_point_set) = ( 40 | intermediate_sets.rotation_sets, 41 | intermediate_sets.super_point_set, 42 | ); 43 | 44 | let y: ChallengeY<_> = transcript.squeeze_challenge_scalar(); 45 | let v: ChallengeV<_> = transcript.squeeze_challenge_scalar(); 46 | 47 | let h1 = transcript.read_point().map_err(|_| Error::SamplingError)?; 48 | let u: ChallengeU<_> = transcript.squeeze_challenge_scalar(); 49 | let h2 = transcript.read_point().map_err(|_| Error::SamplingError)?; 50 | 51 | let (mut z_0_diff_inverse, mut z_0) = (C::Scalar::zero(), C::Scalar::zero()); 52 | let (mut outer_msm, mut r_outer_acc) = (PreMSM::::new(), C::Scalar::zero()); 53 | for (i, rotation_set) in rotation_sets.iter().enumerate() { 54 | let diffs: Vec = super_point_set 55 | .iter() 56 | .filter(|point| !rotation_set.points.contains(point)) 57 | .copied() 58 | .collect(); 59 | let mut z_diff_i = evaluate_vanishing_polynomial(&diffs[..], *u); 60 | 61 | // normalize coefficients by the coefficient of the first commitment 62 | if i == 0 { 63 | z_0 = evaluate_vanishing_polynomial(&rotation_set.points[..], *u); 64 | z_0_diff_inverse = z_diff_i.invert().unwrap(); 65 | z_diff_i = C::Scalar::one(); 66 | } else { 67 | z_diff_i.mul_assign(z_0_diff_inverse); 68 | } 69 | 70 | let (mut inner_msm, mut r_inner_acc) = (ProjectiveMSM::new(), C::Scalar::zero()); 71 | for commitment_data in rotation_set.commitments.iter() { 72 | // calculate low degree equivalent 73 | let r_x = lagrange_interpolate(&rotation_set.points[..], &commitment_data.evals()[..]); 74 | let r_eval = eval_polynomial(&r_x[..], *u); 75 | r_inner_acc = (*y * r_inner_acc) + r_eval; 76 | 77 | let inner_contrib = match commitment_data.get() { 78 | CommitmentReference::Commitment(c) => c.to_curve(), 79 | // TODO: we should support one more nested degree to append 80 | // folded commitments to the inner_msm 81 | CommitmentReference::MSM(msm) => msm.eval().to_curve(), 82 | }; 83 | inner_msm.append_term(C::Scalar::one(), inner_contrib); 84 | } 85 | r_outer_acc = (*v * r_outer_acc) + (r_inner_acc * z_diff_i); 86 | 87 | inner_msm.combine_with_base(*y); 88 | inner_msm.scale(z_diff_i); 89 | outer_msm.add_msm(inner_msm); 90 | } 91 | outer_msm.combine_with_base(*v); 92 | let mut outer_msm = outer_msm.normalize(); 93 | outer_msm.append_term(-r_outer_acc, params.g1); 94 | outer_msm.append_term(-z_0, h1); 95 | outer_msm.append_term(*u, h2); 96 | 97 | let mut left = params.empty_msm(); 98 | left.append_term(C::Scalar::one(), h2); 99 | 100 | let mut right = params.empty_msm(); 101 | right.add_msm(&outer_msm); 102 | 103 | Ok(PairMSM::with(left, right)) 104 | } 105 | -------------------------------------------------------------------------------- /katex-header.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | -------------------------------------------------------------------------------- /rust-toolchain: -------------------------------------------------------------------------------- 1 | nightly-2023-06-01 --------------------------------------------------------------------------------