├── .github
└── workflows
│ ├── build.yml
│ ├── clippy.yml
│ ├── mainpod-circuit-info-publish.yml
│ ├── mdbook-check.yml
│ ├── mdbook-publish.yml
│ ├── rustfmt.yml
│ ├── scripts
│ ├── mainpod-circuit-info-table.sh
│ └── mainpod-circuit-info.sh
│ ├── tests.yml
│ ├── typos.toml
│ └── typos.yml
├── .gitignore
├── Cargo.toml
├── README.md
├── book
├── .gitignore
├── book.toml
└── src
│ ├── SUMMARY.md
│ ├── anchoredkeys.md
│ ├── architecture.md
│ ├── backendtypes.md
│ ├── custom.md
│ ├── custom2.md
│ ├── customexample.md
│ ├── customhash.md
│ ├── custompred.md
│ ├── deductions.md
│ ├── examples.md
│ ├── front_and_back.md
│ ├── img
│ ├── SignedPod.png
│ ├── frontend-backend.png
│ ├── introductionpod-2-steps-ecdsa-example.png
│ ├── introductionpod-2-steps.png
│ ├── introductionpod-mainpod.png
│ ├── merkletree-example-1-a.png
│ ├── merkletree-example-1-b.png
│ ├── merkletree-example-2-a.png
│ └── merkletree-example-2-b.png
│ ├── introduction.md
│ ├── introductionpods.md
│ ├── mainpod.md
│ ├── merklestatements.md
│ ├── merkletree.md
│ ├── operations.md
│ ├── signature.md
│ ├── simpleexample.md
│ ├── statements.md
│ └── values.md
├── build.rs
├── examples
├── main_pod_points.rs
└── signed_dict.rs
├── rust-analyzer.toml
├── rust-toolchain.toml
├── rustfmt.toml
└── src
├── backends
├── mod.rs
└── plonky2
│ ├── basetypes.rs
│ ├── circuits
│ ├── common.rs
│ ├── hash.rs
│ ├── mainpod.rs
│ ├── metrics.rs
│ ├── mod.rs
│ ├── mux_table.rs
│ └── utils.rs
│ ├── emptypod.rs
│ ├── error.rs
│ ├── mainpod
│ ├── mod.rs
│ ├── operation.rs
│ └── statement.rs
│ ├── mock
│ ├── emptypod.rs
│ ├── mainpod.rs
│ └── mod.rs
│ ├── mod.rs
│ ├── primitives
│ ├── ec
│ │ ├── bits.rs
│ │ ├── curve.rs
│ │ ├── field.rs
│ │ ├── gates
│ │ │ ├── curve.rs
│ │ │ ├── field.rs
│ │ │ └── mod.rs
│ │ ├── mod.rs
│ │ └── schnorr.rs
│ ├── merkletree
│ │ ├── circuit.rs
│ │ ├── error.rs
│ │ └── mod.rs
│ ├── mod.rs
│ └── signature
│ │ ├── circuit.rs
│ │ └── mod.rs
│ ├── recursion
│ ├── circuit.rs
│ └── mod.rs
│ ├── serialization.rs
│ └── signer.rs
├── bin
└── mainpod_circuit_info.rs
├── cache
├── disk.rs
├── mem.rs
└── mod.rs
├── examples
├── custom.rs
└── mod.rs
├── frontend
├── custom.rs
├── error.rs
├── mod.rs
├── operation.rs
├── pod_request.rs
└── serialization.rs
├── lang
├── error.rs
├── grammar.pest
├── mod.rs
├── parser.rs
├── pretty_print.rs
└── processor.rs
├── lib.rs
└── middleware
├── basetypes.rs
├── containers.rs
├── custom.rs
├── error.rs
├── mod.rs
├── operation.rs
├── pod_deserialization.rs
├── serialization.rs
└── statement.rs
/.github/workflows/build.yml:
--------------------------------------------------------------------------------
1 | name: Rust Build with features
2 |
3 | on:
4 | pull_request:
5 | branches: [ main ]
6 | types: [ready_for_review, opened, synchronize, reopened]
7 | push:
8 | branches: [ main ]
9 |
10 | jobs:
11 | test:
12 | if: github.event.pull_request.draft == false
13 | name: Rust tests
14 | runs-on: ubuntu-latest
15 | steps:
16 | - uses: actions/checkout@v3
17 | - name: Set up Rust
18 | uses: actions-rust-lang/setup-rust-toolchain@v1
19 | - name: Build default
20 | run: cargo build
21 | - name: Build non-zk # check without the zk feature enabled
22 | run: cargo build --no-default-features --features backend_plonky2,mem_cache
23 | - name: Build metrics
24 | run: cargo build --features metrics
25 | - name: Build time
26 | run: cargo build --features time
27 | - name: Build disk_cache
28 | run: cargo build --no-default-features --features backend_plonky2,zk,disk_cache
29 |
30 |
--------------------------------------------------------------------------------
/.github/workflows/clippy.yml:
--------------------------------------------------------------------------------
1 | name: Clippy Check
2 |
3 | on:
4 | pull_request:
5 | branches: [ main ]
6 | types: [ready_for_review, opened, synchronize, reopened]
7 | push:
8 | branches: [ main ]
9 |
10 | jobs:
11 | rustfmt:
12 | if: github.event.pull_request.draft == false
13 | name: Rust formatting
14 | runs-on: ubuntu-latest
15 | steps:
16 | - uses: actions/checkout@v4
17 | - uses: actions-rust-lang/setup-rust-toolchain@v1
18 | with:
19 | components: clippy
20 | - name: Check lints with clippy
21 | run: cargo clippy --tests --examples
22 |
--------------------------------------------------------------------------------
/.github/workflows/mainpod-circuit-info-publish.yml:
--------------------------------------------------------------------------------
1 | ---
2 | name: Publish MainPod circuit info
3 |
4 | on:
5 | push:
6 | branches: ["main"]
7 |
8 | # Allows to run this workflow manually from the Actions tab
9 | workflow_dispatch:
10 |
11 | concurrency:
12 | group: wiki
13 | cancel-in-progress: true
14 |
15 | permissions:
16 | contents: write
17 |
18 | jobs:
19 | wiki:
20 | name: Update Wiki with new MainPod circuit info
21 | runs-on: ubuntu-latest
22 | steps:
23 | - name: Checkout Code
24 | uses: actions/checkout@v4
25 | with:
26 | repository: ${{github.repository}}
27 | path: ${{github.repository}}
28 |
29 | - name: Checkout Wiki
30 | uses: actions/checkout@v4
31 | with:
32 | repository: ${{github.repository}}.wiki
33 | path: ${{github.repository}}.wiki
34 |
35 | - name: Set up Rust
36 | uses: actions-rust-lang/setup-rust-toolchain@v1
37 |
38 | - name: Push to wiki
39 | run: |
40 | set -e
41 |
42 | cd $GITHUB_WORKSPACE/${{github.repository}}
43 | table_entry=$(.github/workflows/scripts/mainpod-circuit-info-table.sh)
44 | params=$(.github/workflows/scripts/mainpod-circuit-info.sh params)
45 | data=$(.github/workflows/scripts/mainpod-circuit-info.sh circuit-info)
46 | params_hash=$(echo "${data}" | jq --raw-output .params_hash)
47 |
48 | cd $GITHUB_WORKSPACE/${{github.repository}}.wiki
49 | git config --local user.email "action@github.com"
50 | git config --local user.name "GitHub Action"
51 | mkdir -p params
52 | echo "$params" > params/${params_hash}.json
53 | echo "$table_entry" >> MainPod-circuit-info.md
54 | git add .
55 | git diff-index --quiet HEAD || git commit -m "action: update MainPod-circuit-info" && git push
56 |
--------------------------------------------------------------------------------
/.github/workflows/mdbook-check.yml:
--------------------------------------------------------------------------------
1 | name: Check mdbook compilation
2 |
3 | on:
4 | pull_request:
5 | branches: [ main ]
6 | types: [ready_for_review, opened, synchronize, reopened]
7 | push:
8 | branches:
9 | - main
10 |
11 | jobs:
12 | compile:
13 | if: github.event.pull_request.draft == false
14 | runs-on: ubuntu-latest
15 | env:
16 | MDBOOK_VERSION: 0.4.40
17 | MDBOOKKATEX_VERSION: 0.7.0
18 | steps:
19 | - uses: actions/checkout@v4
20 | - name: Install mdBook
21 | run: |
22 | curl --proto '=https' --tlsv1.2 https://sh.rustup.rs -sSf -y | sh
23 | rustup update
24 | rustup toolchain install nightly-x86_64-unknown-linux-gnu
25 | cargo install --version ${MDBOOK_VERSION} mdbook
26 | cargo install --version ${MDBOOKKATEX_VERSION} mdbook-katex
27 | - name: Build with mdBook
28 | run: |
29 | cd book
30 | mdbook build
31 | - name: Check build result
32 | run: |
33 | if [ -d "book/book" ]; then
34 | echo "mdBook compilation success"
35 | else
36 | echo "mdBook compilation fail"
37 | exit 1
38 | fi
39 |
--------------------------------------------------------------------------------
/.github/workflows/mdbook-publish.yml:
--------------------------------------------------------------------------------
1 | name: Publish mdbook
2 | on:
3 | push:
4 | branches: ["main"]
5 |
6 | # Allows to run this workflow manually from the Actions tab
7 | workflow_dispatch:
8 |
9 | # Sets permissions of the GITHUB_TOKEN to allow deployment to GitHub Pages
10 | permissions:
11 | contents: read
12 | pages: write
13 | id-token: write
14 |
15 | # Allow only one concurrent deployment, skipping runs queued between the run in-progress and latest queued.
16 | # However, do NOT cancel in-progress runs as we want to allow these production deployments to complete.
17 | concurrency:
18 | group: "pages"
19 | cancel-in-progress: false
20 |
21 | jobs:
22 | build:
23 | runs-on: ubuntu-latest
24 | env:
25 | MDBOOK_VERSION: 0.4.40
26 | MDBOOKKATEX_VERSION: 0.7.0
27 | steps:
28 | - uses: actions/checkout@v4
29 | - name: Install mdBook
30 | run: |
31 | curl --proto '=https' --tlsv1.2 https://sh.rustup.rs -sSf -y | sh
32 | rustup update
33 | rustup toolchain install nightly-x86_64-unknown-linux-gnu
34 | cargo install --version ${MDBOOK_VERSION} mdbook
35 | cargo install --version ${MDBOOKKATEX_VERSION} mdbook-katex
36 | - name: Setup Pages
37 | id: pages
38 | uses: actions/configure-pages@v4
39 | - name: Build with mdBook
40 | run: |
41 | cd book
42 | mdbook build
43 | - name: Upload artifact
44 | uses: actions/upload-pages-artifact@v3
45 | with:
46 | path: ./book/book
47 |
48 | deploy:
49 | environment:
50 | name: github-pages
51 | url: ${{ steps.deployment.outputs.page_url }}
52 | runs-on: ubuntu-latest
53 | needs: build
54 | steps:
55 | - name: Deploy to GitHub Pages
56 | id: deployment
57 | uses: actions/deploy-pages@v4
58 |
--------------------------------------------------------------------------------
/.github/workflows/rustfmt.yml:
--------------------------------------------------------------------------------
1 | name: Rustfmt Check
2 |
3 | on:
4 | pull_request:
5 | branches: [ main ]
6 | types: [ready_for_review, opened, synchronize, reopened]
7 | push:
8 | branches: [ main ]
9 |
10 | jobs:
11 | rustfmt:
12 | if: github.event.pull_request.draft == false
13 | name: Rust formatting
14 | runs-on: ubuntu-latest
15 | steps:
16 | - uses: actions/checkout@v4
17 | - uses: actions-rust-lang/setup-rust-toolchain@v1
18 | with:
19 | components: rustfmt
20 | - name: Check formatting
21 | uses: actions-rust-lang/rustfmt@v1
22 |
--------------------------------------------------------------------------------
/.github/workflows/scripts/mainpod-circuit-info-table.sh:
--------------------------------------------------------------------------------
1 | #/bin/sh
2 |
3 | set -e
4 |
5 | # Generate the markdown table entry of the MainPod circuit information
6 |
7 | scripts_dir=$(dirname "$0")
8 | data=$(./${scripts_dir}/mainpod-circuit-info.sh circuit-info)
9 | date=$(date --utc --iso-8601=minutes)
10 | commit=$(git rev-parse HEAD)
11 | params_hash=$(echo "$data" | jq --raw-output .params_hash)
12 | verifier_hash=$(echo "$data" | jq --raw-output .verifier_hash)
13 | common_hash=$(echo "$data" | jq --raw-output .common_hash)
14 | echo "| $date | [\`${commit}\`](https://github.com/0xPARC/pod2/commit/${commit}) | [\`${params_hash}\`](https://raw.githubusercontent.com/wiki/0xPARC/pod2/params/${params_hash}.json) | \`${verifier_hash}\` | \`${common_hash}\` |"
15 |
--------------------------------------------------------------------------------
/.github/workflows/scripts/mainpod-circuit-info.sh:
--------------------------------------------------------------------------------
1 | #/bin/sh
2 |
3 | set -e
4 |
5 | cargo run --release --no-default-features --features=zk,backend_plonky2,disk_cache --bin mainpod_circuit_info -- $1
6 |
--------------------------------------------------------------------------------
/.github/workflows/tests.yml:
--------------------------------------------------------------------------------
1 | name: Rust Tests
2 |
3 | on:
4 | pull_request:
5 | branches: [ main ]
6 | types: [ready_for_review, opened, synchronize, reopened]
7 | push:
8 | branches: [ main ]
9 |
10 | jobs:
11 | test:
12 | if: github.event.pull_request.draft == false
13 | name: Rust tests
14 | runs-on: ubuntu-latest
15 | steps:
16 | - uses: actions/checkout@v3
17 | - name: Set up Rust
18 | uses: actions-rust-lang/setup-rust-toolchain@v1
19 | - name: Run tests
20 | run: cargo test --release
21 |
--------------------------------------------------------------------------------
/.github/workflows/typos.toml:
--------------------------------------------------------------------------------
1 | [default.extend-words]
2 | groth = "groth" # to avoid it dectecting it as 'growth'
3 | BA = "BA"
4 | Ded = "Ded" # "ANDed", it thought "Ded" should be "Dead"
5 | OT = "OT"
6 | aks = "aks" # anchored keys
7 | nin = "nin" # not in
8 | kow = "kow" # key or wildcard
9 | KOW = "KOW" # Key Or Wildcard
10 | datas = "datas" # plural (for 'verifier_datas', a vector of 'verifier_data')
11 | typ = "typ" # from 'type', which can not be used as variable name since it is a keyword
12 |
--------------------------------------------------------------------------------
/.github/workflows/typos.yml:
--------------------------------------------------------------------------------
1 | name: typos
2 | on:
3 | pull_request:
4 | branches: [ main ]
5 | types: [ready_for_review, opened, synchronize, reopened]
6 | push:
7 | branches:
8 | - main
9 |
10 | jobs:
11 | typos:
12 | if: github.event.pull_request.draft == false
13 | name: Spell Check with Typos
14 | runs-on: ubuntu-latest
15 | steps:
16 | - uses: actions/checkout@v4
17 | - name: Use typos with config file
18 | uses: crate-ci/typos@master
19 | with:
20 | config: .github/workflows/typos.toml
21 |
22 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | /target
2 | Cargo.lock
3 | .DS_Store
4 | aardnotes.md
5 | notes
6 |
--------------------------------------------------------------------------------
/Cargo.toml:
--------------------------------------------------------------------------------
1 | [package]
2 | name = "pod2"
3 | version = "0.1.0"
4 | edition = "2021"
5 | build = "build.rs"
6 |
7 | [lib]
8 | name = "pod2"
9 | path = "src/lib.rs"
10 |
11 | [dependencies]
12 | hex = "0.4.3"
13 | itertools = "0.14.0"
14 | strum = "0.26"
15 | strum_macros = "0.26"
16 | anyhow = "1.0.56"
17 | dyn-clone = "1.0.18"
18 | log = "0.4"
19 | env_logger = "0.11"
20 | lazy_static = "1.5.0"
21 | thiserror = { version = "2.0.12" }
22 | # enabled by features:
23 | plonky2 = { git = "https://github.com/0xPARC/plonky2.git", rev = "109d517d09c210ae4c2cee381d3e3fbc04aa3812", optional = true }
24 | plonky2_u32 = { git = "https://github.com/ax0/plonky2-u32", rev = "cb8e2d9681eb06d069157edbb5ec1d05038611c4" }
25 | serde = "1.0.219"
26 | serde_json = "1.0.140"
27 | base64 = "0.22.1"
28 | bs58 = "0.5.1"
29 | schemars = "0.8.22"
30 | num = { version = "0.4.3", features = ["num-bigint"] }
31 | num-bigint = { version = "0.4.6", features = ["rand"] }
32 | # num-bigint 0.4 requires rand 0.8
33 | rand = "0.8.5"
34 | hashbrown = { version = "0.14.3", default-features = false, features = ["serde"] }
35 | pest = "2.8.0"
36 | pest_derive = "2.8.0"
37 | directories = { version = "6.0.0", optional = true }
38 | minicbor-serde = { version = "0.5.0", features = ["std"], optional = true }
39 | serde_bytes = "0.11"
40 | serde_arrays = "0.2.0"
41 | sha2 = { version = "0.10.9" }
42 | rand_chacha = "0.3.1"
43 |
44 | # Uncomment for debugging with https://github.com/ed255/plonky2/ at branch `feat/debug`. The repo directory needs to be checked out next to the pod2 repo directory.
45 | # [patch."https://github.com/0xPARC/plonky2"]
46 | # plonky2 = { path = "../plonky2/plonky2" }
47 |
48 | [dev-dependencies]
49 | pretty_assertions = "1.4.1"
50 | # Used only for testing JSON Schema generation and validation.
51 | jsonschema = "0.30.0"
52 |
53 | [build-dependencies]
54 | vergen-gitcl = { version = "1.0.0", features = ["build"] }
55 |
56 | [features]
57 | default = ["backend_plonky2", "zk", "mem_cache"]
58 | backend_plonky2 = ["plonky2"]
59 | zk = []
60 | metrics = []
61 | time = []
62 | examples = []
63 | disk_cache = ["directories", "minicbor-serde"]
64 | mem_cache = []
65 |
66 | # Uncomment in order to enable debug information in the release builds. This allows getting panic backtraces with a performance similar to regular release.
67 | # [profile.release]
68 | # debug = true
69 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # POD2
2 |
3 | ## Usage
4 | - Run tests: `cargo test --release`
5 | - Rustfmt: `cargo fmt`
6 | - Check [typos](https://github.com/crate-ci/typos): `typos -c .github/workflows/typos.toml`
7 |
8 | ## Book
9 | The `book` contains the specification of POD2. A rendered version of the site can be found at: https://0xparc.github.io/pod2/
10 |
11 | To run it locally:
12 | - Requirements
13 | - [mdbook](https://github.com/rust-lang/mdBook): `cargo install mdbook`
14 | - [mdbook-katex](https://github.com/lzanini/mdbook-katex): `cargo install mdbook-katex`
15 | - Go to the book directory: `cd book`
16 | - Run the mdbook: `mdbook serve`
17 |
--------------------------------------------------------------------------------
/book/.gitignore:
--------------------------------------------------------------------------------
1 | book
2 |
--------------------------------------------------------------------------------
/book/book.toml:
--------------------------------------------------------------------------------
1 | [book]
2 | authors = ["0xPARC"]
3 | language = "en"
4 | multilingual = false
5 | src = "src"
6 | title = "POD2-docs"
7 |
8 | [output.html]
9 | default-theme = "light"
10 |
11 | [preprocessor.katex]
12 | after = ["links"]
13 |
--------------------------------------------------------------------------------
/book/src/SUMMARY.md:
--------------------------------------------------------------------------------
1 | # Summary
2 |
3 | - [Introduction](./introduction.md)
4 |
5 | # Specification
6 | - [Front and back end](./front_and_back.md)
7 | - [The frontend structure of a POD]()
8 | - [Frontend POD value types](./values.md)
9 | - [Anchored keys](./anchoredkeys.md)
10 | - [The backend structure of a POD]()
11 | - [Backend types](./backendtypes.md)
12 | - [MerkleTree](./merkletree.md)
13 | - [Signature](./signature.md)
14 | - [Deductions](./deductions.md)
15 | - [Statements](./statements.md)
16 | - [Operations](./operations.md)
17 | - [Simple example](./simpleexample.md)
18 | - [Custom statements and custom operations](./custom.md)
19 | - [Defining custom predicates](./custompred.md)
20 | - [Custom statement example](./customexample.md)
21 | - [How to hash a custom statement](./customhash.md)
22 | - [MainPOD](./mainpod.md)
23 | - [Introduction PODs](./introductionpods.md)
24 | - [Examples](./examples.md)
25 |
26 | # Architecture
27 | - [Architecture](./architecture.md)
28 |
--------------------------------------------------------------------------------
/book/src/anchoredkeys.md:
--------------------------------------------------------------------------------
1 | # Anchored keys
2 | Rather than dealing with just keys, we introduce the notion of an *anchored key*, which is a pair consisting of an dictionary specifier and a key, i.e.
3 |
4 | ```
5 | type AnchoredKey = (Dict, Key)
6 | type Key = String
7 | ```
8 |
9 | Statements can use anchored keys or literal values as arguments. Since our
10 | system uses constructive logic, if a statement that uses an anchored key in
11 | some of its arguments is proved, it means that a valid Merkle proof of the
12 | value behind it exists and was used at some point to construct a `Contains`
13 | statement that introduced that anchored key.
14 |
15 | In PODLang, anchored key indexing can use subscript syntax `foo["bar"]` which
16 | allows any string key, or dot syntax `foo.bar` if the key is a valid identifier.
17 |
18 | For example:
19 | ```
20 | 0: None
21 | 1: Contains(foo, "bar", 42) <- ContainsFromEntries 0 0 0 mt_proof
22 | 2: Lt(foo["bar"], 100) <- LtFromEntries 1 0
23 | 3: NotEqual(foo.bar, 100) <- LtToNotEqual 2
24 | ```
25 |
--------------------------------------------------------------------------------
/book/src/architecture.md:
--------------------------------------------------------------------------------
1 | # Architecture
2 |
3 | This document explains the architecture of the current implementation.
4 |
5 | The main logic of the POD2 implementation is divided into three modules:
6 | - frontend
7 | - compiles user-friendly pod declarations into intermediate representations to be consumed by the backend
8 | - internally connects to the backend to get pods built (proved).
9 | - presents pods to the user
10 | - middleware
11 | - defines the intermediate representation of Statements, Operations and interfaces of PODs
12 | - Statements and Operations are strongly typed here
13 | - Both frontend and backend use types defined in the middleware
14 | - Does not import types from frontend nor backend
15 | - backend
16 | - takes a middleware POD request representation, signs/proves it and returns a generic POD object
17 |
18 | If this was the Rust language compiler:
19 | - frontend: takes a Rust code and compiles it to LLVM-IR
20 | - middleware: defines LLVM-IR instructions and blocks
21 | - backend: Takes LLVM-IR instructions and emits assembly code for a particular CPU
22 |
23 | The following diagram shows visually how the components interact with each other:
24 |
25 | 
26 |
27 | In this organization, the middleware could be defined at arbitrary points:
28 | - closer to the user would be more high level
29 | - closer to the target would be more low level
30 |
31 | All these positions are OK. We just need to choose one, and we can try to choose a point that simplifies the implementation.
32 |
33 | For example in the middleware we could define `Value = 4 x Goldilock` (making it slightly low level); or `Value = BigUint` and letting the backend choose the maximum representable value, the field encoding, etc. (making it slightly higher level).
34 |
35 | In the current iteration we choose `Value = 4 x Goldilock`, but we can revisit it in a future iteration (eg. if we want to support plonky3) by either moving the middleware to a higher level, or by keeping it the same and replacing the `Value` definition.
36 |
37 | The diagram above includes an arrow that would show the typical flow followed by a user making a POD. This is a simplified description of the process.
38 | 1. The user interacts with the frontend API and passes a list of Operations. The frontend takes those operations and generates the corresponding Statements. The list of Operations and Statements are transformed into middleware types. This process can be seen as a compilation step. The frontend sends this middleware data as a request to the Backend.
39 | 2. The backend receives a request to build a POD from a list of Statements and Operations. It takes that bundle of data and lays it out in the appropriate format to be proved by a circuit, padding unused slots, etc. Then it calls a proof system API to generate a proof.
40 | 3. The target (proof system) generates a proof from some circuit description and witness data and gives it back to the backend.
41 | 4. The backend receives the proof and encapsulates it in an object that adheres to the Pod trait and passes it to the frontend
42 | 5. The frontend receives a "blackbox" Pod object and wraps it in a presentation layer in order to show it to the user.
43 |
--------------------------------------------------------------------------------
/book/src/backendtypes.md:
--------------------------------------------------------------------------------
1 | # Backend types
2 |
3 | On the backend, there is only a single type: `Value`.
4 |
5 | A `Value` is simply a tuple of field elements. With the plonky2 backend, a `Value` is a tuple of 4 field elements. In general, the backend will expose a constant `VALUE_SIZE`, and a `Value` will be a tuple of `VALUE_SIZE` field elements.
6 |
7 | ## Integers and booleans
8 |
9 | The backend encoding stores integers in such a way that arithmetic operations (addition, multiplication, comparison) are inexpensive to verify in-circuit.
10 |
11 | In the case of the Plonky2 backend, an integer $x$ is decomposed as
12 | $$x = x_0 + x_1 \cdot 2^{32}$$
13 | with $0 \leq x_0, x_1 < 2^{32}$ and represented as
14 | $$\texttt{map}\ \iota\ [x_0, x_1, 0, 0],$$
15 | where $\iota:\mathbb{N}\cup\{0\}\rightarrow\texttt{GoldilocksField}$ is the canonical projection.
16 |
17 | On the backend, a boolean is stored as an integer, either 0 or 1; so logical operations on booleans are also inexpensive.
18 |
19 | ## Strings
20 |
21 | The backend encoding stores strings as hashes, using a hash function that might not be zk-friendly. For this reason, string operations (substrings, accessing individual characters) are hard to verify in-circuit. The POD2 system does not provide methods for manipulating strings.
22 |
23 | In other words: As POD2 sees it, two strings are either equal or not equal. There are no other relationships between strings.
24 |
25 | In the case of the Plonky2 backend, a string is converted to a sequence of bytes with the byte `0x01` appended as padding, then the bytes are split into 7-byte chunks starting from the left, these chunks then being interpreted as integers in little-endian form, each of which is naturally an element of `GoldilocksField`, whence the resulting sequence may be hashed via the Poseidon hash function. Symbolically, given a string $s$, its hash is defined by
26 |
27 | $$\texttt{poseidon}(\texttt{map}\ (\iota\circ\jmath_\texttt{le-bytes->int})\ \texttt{chunks}_7(\jmath_\texttt{string->bytes}(s)\ \texttt{++}\ [\texttt{0x01}])),$$
28 |
29 | where `poseidon` is the Poseidon instance used by Plonky2, $\iota$ is as above, $\texttt{chunks}_{n}:[\texttt{u8}]\rightarrow [[\texttt{u8}]]$ is defined such that[^aux]
30 |
31 | $$\texttt{chunks}_n(v) = \textup{if}\ v = [\ ]\ \textup{then}\ [\ ]\ \textup{else}\ [\texttt{take}_n v]\ \texttt{++}\ \texttt{chunks}_n(\texttt{drop}_n v),$$
32 |
33 | the mapping $\jmath_\texttt{le-bytes->int}: [u8] \rightarrow{N}\cup\{0\}$ is given by
34 |
35 | $$[b_0,\dots,b_{N-1}]\mapsto \sum_{i=0}^{N-1} b_i \cdot 2^{8i},$$
36 |
37 | and $\jmath_\texttt{string->bytes}$ is the canonical mapping of a string to its UTF-8 representation.
38 |
39 | ## Compound types
40 |
41 | The three front-end compound types (`Dictionary`, `Array`, `Set`) are all represented as Merkle roots on the backend. The details of the representation are explained on a separate [Merkle tree](./merkletree.md) page.
--------------------------------------------------------------------------------
/book/src/custom.md:
--------------------------------------------------------------------------------
1 | # Custom statements and custom operations
2 |
3 | Users of the POD system can introduce _custom predicates_ (previously called _custom statements_) to express complex logical relations not available in the built-in predicates. Every custom predicate is defined as the conjunction (AND) or disjunction (OR) of a small number of other statements.
4 |
5 | When a custom predicate is introduced in a MainPod, it becomes available for use in that POD and all PODs that inherit[^inherit] from it.
6 |
7 | On the frontend, a custom predicate is defined as a collection of conjunctions and disjunctions of statements. The definition can be recursive: the definition of a predicate can involve the predicate itself, or the definitions of several predicates can depend on each other.
8 |
9 | At the backend level, every definition of a predicate is either a conjunction or a disjunction of statements. To convert a frontend custom predicate to the backend, the middleware may need to introduce _sub-predicates_.
10 |
11 | On the backend, custom predicates are defined in _groups_. A group can contain one or more custom predicates and their associated sub-predicates. Recursive definition is only possible within a group: the definition of a predicate in a group can only depend on previously existing predicates, itself, and other predicates in the same group.
12 |
13 | ## Arguments of custom predicates
14 |
15 | The definition of a custom predicate might also be called an _operation_ or _deduction rule_. It includes two (or, potentially, say, five) statement templates as conditions. The arguments to the statement templates are decomposed as (origin, key) pairs: if statements are allowed to have arity at most 4, then the statement templates in a deduction rule will have at most 8 arguments (4 origins and 4 keys). The same holds for the output statement.
16 |
17 | Each argument (origin or key) to an statement template is either a wildcard or a literal. In the backend, the wildcard arguments will be identified as ?1, ?2, ?3, ....
18 |
19 | ## Examples
20 |
21 | See [examples](./customexample.md)
22 |
23 | ## Hashing and predicate IDs
24 |
25 | Each custom predicate is defined as part of a _group_ of predicates. The definitions of all statements in the group are laid out consecutively (see [examples](./customexample.md)) and hashed. For more details, see the pages on [hashing custom statements](./customhash.md) and [custom predicates](./custompred.md).
26 |
27 | ## How to prove an application of an operation
28 |
29 | The POD proof format is inspired by "two-column proofs" (for an example, see [Wikipedia](https://en.wikipedia.org/wiki/Mathematical_proof)). A POD contains a "tabular proof", in which each row includes a "statement" and an "operation". The "operation" is the "reason" that justifies the statement: it is everything the circuit needs as a witness to verify the statement.
30 |
31 | For a custom statement, the "reason" includes the following witnesses and verifications:
32 | - the definition of the statement, serialized (see [examples](./customexample.md))
33 | - if the statement is part of a group, the definition of the full group, serialized
34 | - verify that the hash of the definition is the statement ID
35 | - the definition will have some number of "wildcards" (?1, ?2, ...) as arguments to statement templates; a value for each wildcard must be provided as a witness (each will be either an origin ID or key)
36 | - the circuit must substitute the claimed values for the wildcards, and the resulting statements (true statements with origins and keys) will appear as witnesses
37 | - the circuit must verify that all the input statement templates (with origins and keys) appear in the previous statements (in higher rows of the table)
38 | - the circuit also substitutes the claimed values for the wildcards in the output statement, and verifies that it matches the claimed output statement
39 |
40 |
41 |
42 | [^inherit]: What to call this? One POD "inherits" from another?
43 |
44 |
--------------------------------------------------------------------------------
/book/src/custom2.md:
--------------------------------------------------------------------------------
1 | # Custom operations (or: how to define a custom predicate): VERSION 2
2 |
3 | # DO NOT USE THIS DOC
4 | # SAVING IN THE GITHUB JUST SO WE HAVE A COPY
5 | # WE ARE NOT USING THIS SPEC
6 | # DO NOT USE
7 |
8 | ## The local variable requirement
9 |
10 | This spec differs from the main spec in that there are no anchored keys. However, there are still types `Origin` and `Key`.
11 |
12 | An `Origin` refers to an input POD; in-circuit, the `Origin` is the pod ID of the pod.
13 |
14 | A `Key` refers to a value within a POD.
15 |
16 | With the exception of the special statement `ValueFromPodKey`, a key always refers to a value within the POD _self. In other words, a statement (except for `ValueFromPodKey`) cannot refer to a value from a previous POD, only to a value in the "namespace" of the current POD.
17 |
18 | Roughly speaking, the statement
19 | ```
20 | ValueFromPodKey(local_key, origin_id, key)
21 | ```
22 | means that the value of `local_key` on the current POD (_self) is the same as the value of `key` on the POD `origin_id` -- in other words, it is basically the same as
23 | ```
24 | Equals(AnchoredKey(_SELF, local_key), AnchoredKey(origin_id, key)).
25 | ```
26 |
27 | I say "basically the same" because, in this spec, it is possible to refer to both keys and origin IDs by reference.
28 |
29 | ## Referencing
30 |
31 | Recall that in the front-end, a `Key` is a string that functions as an identifier (like a variable name in other languages), and a `Value` is the value of that variable -- an `Integer`, `String`, or compound value.
32 |
33 | In the back-end, a `Key` is four field elements (computed as a hash of the front-end key); and a `Value` is again four field elements. Again, each `Key` has a unique `Value`.
34 |
35 | A `Reference` statement allows a key to be reinterpreted as a value; it is analogous to a pointer in C.
36 |
37 | The statement
38 | ```
39 | Reference(reference_key, key)
40 | ```
41 | means that `reference_key` is a key, whose associated value is the same as the key `key`.
42 |
43 | ## ValueFromPodKey, precisely this time
44 |
45 | ```
46 | ValueFromPodKey(local_key: KeyOrLiteral::String, origin_id: KeyOrLiteral::OriginID, key: KeyOrLiteral::String).
47 | ```
48 |
49 | means that the _values_ of `local_key` and `key` are _keys_, the _value_ of `origin_id` is an _origin ID_, and the value assigned to the key `local_key` on the present POD is the same as the value assigned to the key `key` on the pod `origin_ID`.
50 |
51 | An example with literals:
52 | ```
53 | ValueFromPodKey("local_ssn", 0x4030, "ssn")
54 | ```
55 | means that the pod `0x4030` has a key called `ssn`, the local pod has a key `local_ssn`, and they have the same value.
56 |
57 | An example with keys, that expresses the same semantic meaning:
58 | ```
59 | ValueOf(local_varname, "local_ssn")
60 | ValueOf(remote_varname, "ssn")
61 | ValueOf(gov_id_root, 0x4030)
62 | ValueFromPodKey(local_varname, gov_id_root, remote_varname)
63 | ```
64 |
65 | ## Summary of additional statements in this spec
66 |
67 | ```
68 | ValueFromPodKey(local_key: KeyOrLiteral::String, origin_id: KeyOrLiteral::OriginID, key: KeyOrLiteral::String).
69 | ```
70 |
71 |
72 | In addition to the built-in statements in the [main spec](./statements.md):
73 |
74 | There is one additional front-end type: `OriginID`. As the name suggests, it contains the "origin ID" of a POD.
75 |
76 | There are two additional built-in statements:
77 | ```
78 | Reference(reference_key: Key::String, key: Key)
79 |
80 | ValueFromPodKey(local_key: KeyOrLiteral::String, origin_id: KeyOrLiteral::OriginID, key: KeyOrLiteral::String).
81 | ```
82 |
83 | ```
84 | Reference(reference_key, key)
85 | ```
86 | means that the *value* of `reference key` is the *key name* of `key`.
87 |
88 | ```
89 | ValueFromPodKey(local_key, origin_id, key)
90 | ```
91 | means that the key `local_key` in the local scope has the same value as the key `key` in the scope of the pod `origin_id`.
92 |
93 | ## How to work with the local variable requirement
94 |
95 | To make a statement about an inherited value (a value introduced in an ancestor POD), the value must be copied to a local value:
96 |
97 | The statements below assert that "name" on pod1 and "friend" on pod2 are assigned the same value.
98 | ```
99 | ValueFromPodKey(name_from_pod1, pod1, "name")
100 | ValueFromPodKey(friend_from_pod2, pod2, "friend")
101 | Equal(name_from_pod1, friend_from_pod2)
102 | ```
103 |
104 | ## How to inherit local variables from a previous POD
105 |
106 | In this design, an additional complication arises when you
107 | carry a value from one POD to another,
108 | and you want to keep track of the origin POD on which it originated.
109 |
110 | To allow this operation, we introduce an additional deduction rule
111 | ```
112 | InheritValueFromPodKey,
113 | ```
114 | which works as follows.
115 |
116 | Suppose "self" is the current POD and "parent_id" is the POD id of one of the input PODs to "self".
117 |
118 | Suppose "parent" has, among its public statements, the statement
119 | ```
120 | ValueFromPodKey(parent_name, origin, original_name)
121 | ```
122 | and "self" has the statement (public or private)
123 | ```
124 | ValueFromPodKey(self_name, parent_id, parent_name).
125 | ```
126 |
127 | Then ```InheritValueFromPodKey``` allows you to generate the following statement on "self":
128 | ```
129 | ValueFromPodKey(self_name, origin, original_name).
130 | ```
131 |
--------------------------------------------------------------------------------
/book/src/customexample.md:
--------------------------------------------------------------------------------
1 |
2 | # Ethdos custom predicate, using binary AND and OR: example of a recursive group
3 |
4 | ```
5 | eth_dos_distance(src_or, src_key, dst_or, dst_key, distance_or, distance_key) = OR(
6 | eth_dos_distance_ind_0(src_or, src_key, dst_or, dst_key, distance_or, distance_key),
7 | eth_dos_distance_base(src_or, src_key, dst_or, dst_key, distance_or, distance_key)
8 | )
9 |
10 | eth_dos_distance_base(src_or, src_key, dst_or, dst_key, distance_or, distance_key) = AND(
11 | Equal(src_or[src_key], dst_or[dst_key]),
12 | ValueOf(distance_or[distance_key], 0)
13 | )
14 |
15 | eth_dos_distance_ind_0(src_or, src_key, dst_or, dst_key, distance_or, distance_key, private: intermed_or, intermed_key, shorter_distance_or, shorter_distance_key, one_or, one_key) = AND(
16 | eth_dos_distance(src_or, src_key, intermed_or, intermed_key, shorter_distance_or, shorter_distance_key)
17 |
18 | // distance == shorter_distance + 1
19 | ValueOf(one_or[one_key], 1)
20 | SumOf(distance_or[distance_key], shorter_distance_or[shorter_distance_key], one_or[one_key])
21 |
22 | // intermed is a friend of dst
23 | eth_friend(intermed_or, intermed_key, dst_or, dst_key)
24 | )
25 | ```
26 |
27 | This group includes three statements.
28 |
29 | When the definition is serialized for hashing, the statements are renamed to SELF.1, SELF.2, SELF.3.
30 |
31 | With this renaming and the wildcards, the first of the three definitions becomes:
32 | ```
33 | SELF.1(?1, ?2, ?3, ?4, ?5, ?6) = OR(
34 | SELF.2(?1, ?2, ?3, ?4, ?5, ?6)
35 | SELF.3(?1, ?2, ?3, ?4, ?5, ?6)
36 | )
37 | ```
38 | and similarly for the other two definitions.
39 |
40 | The above definition is serialized in-circuit and hashed with a zk-friendly hash to generate the "group hash", a unique cryptographic identifier for the group.
41 |
42 | Then the individual statements in the group are identified as:
43 | ```
44 | eth_dos_distance = groupHASH.1
45 | eth_dos_distance_base = groupHASH.2
46 | eth_dos_distance_ind = groupHASH.3
47 | ```
48 |
--------------------------------------------------------------------------------
/book/src/customhash.md:
--------------------------------------------------------------------------------
1 | # How to hash a custom predicate
2 |
3 | Every predicate, native or custom, is identified on the backend by a predicate ID.
4 |
5 | The native predicates are numbered with small integers, sequentially. The ID of a custom predicate is a hash of its definition; this guarantees that two different predicates cannot have the same ID (aside from the miniscule probability of a hash collision).
6 |
7 | This document explains in some detail how the definition of a custom predicate is serialized and hashed.
8 |
9 | Custom predicates are defined in _groups_ (also known as _batches_); see an [example](./customexample.md). The definition of a custom predicate in a group involves other predicates, which may include:
10 | - native predicates
11 | - previously-defined custom predicates
12 | - other predicates in the same group.
13 |
14 | Predicate hashing is recursive: in order to hash a group of custom predicates, we need to know IDs for all the previously-defined custom predicates it depends on.
15 |
16 | The definition of the whole group of custom predicates is serialized (as explained below), and that serialization is hashed (using a zk-friendly hash -- in the case of the plonky2 backend, Poseidon) to give a _group ID_. Each predicate in the group is then referenced by
17 | ```
18 | predicate_ID = (group_ID, idx)
19 | ```
20 | (here `idx` is simply the index of the predicate in the group).
21 |
--------------------------------------------------------------------------------
/book/src/custompred.md:
--------------------------------------------------------------------------------
1 | # Defining custom predicates
2 |
3 | (Note: At the moment, we consider a "custom operation" to be exactly the same thing as the "definition of a custom predicate.")
4 |
5 | A custom operation [^operation] is a rule that allows one to deduce a custom statement from one or more existing statements according to a logical rule, described below.
6 |
7 | > Note: Unlike built-in operations, it is not possible to perform arbitrary calculations inside a custom operation.
8 |
9 | The syntax of a custom operation is best explained with an example.
10 |
11 | Original example with anchored keys, origins, and keys.
12 | | Args | Condition | Output |
13 | |------------|-----------------------------------------|----|
14 | | signed_dict: Dict, signer: PublicKey, good_boy_issuers: AnchoredKey::MerkleRoot, receiver: AnchoredKey | SignedBy(signed_dict, signer), Contains(good_boy_issuers, signer), Equals(signed_dict["friend"], receiver) | GoodBoy(receiver, good_boy_issuers) |
15 |
16 | Compiled example with only origins and keys.
17 | | Args | Condition | Output |
18 | |------------|-----------------------------------------|----|
19 | | signed_dict: Dict, signer: PublicKey, good_boy_issuers_origin: Origin, good_boy_issuers_key: Key::MerkleRoot, receiver_origin: Origin, receiver_key: Key | SignedBy(signed_dict, signer), Contains(good_boy_issuers_origin[good_boy_issuers_key], signer), Equals(signed_dict["friend"], receiver_origin[receiver_key]) | GoodBoy(receiver_origin[receiver_key]), good_boy_issuers_origin[good_boy_issuers_key]) |
20 |
21 | A custom operation accepts as input a number of statements (the `Condition`);
22 | each statement has a number of arguments, which may be constants or anchored keys; and an [anchored key](./anchoredkeys.md) in turn can optionally be decomposed as a pair of a Dict and a Key.
23 |
24 | In the "original example" above, the anchored keys `good_boy_issuers` and `receiver` are not broken down, but `signed_dict["friend"]` is. The purpose of breaking it down, in this case, is to use an entry of a dictionary that has been signed.
25 |
26 | In the "compiled example", all the anchored keys have been broken down into dictionaries and keys.
27 |
28 | In general, in the front-end language, the "arguments" to an operation define a list of identifiers with types. Every statement in the "condition" must have valid arguments of the correct types: either constants, or identifiers defined in the "arguments".
29 |
30 | In order to apply the operation, the user who wants to create a POD must give acceptable values for all the arguments. The POD prover will substitute those values for all the statements in the "Condition" and check that all substituted statements previously appear in the POD. If this check passes, the output statement is then a valid statement.
31 |
32 | ## What applying the operation looks like on the back end
33 |
34 | On the back end the "compiled example" deduction rule is converted to a sort of "template":
35 |
36 | | Args | Condition | Output |
37 | |------------|-----------------------------------------|----|
38 | | ?1 (signed_dict), ?2 (signer) ?3 (good_boy_issuers_origin), ?4 (good_boy_issuers_key), ?5 (receiver_origin), ?6 (receiver_key) | SignedBy(?1, ?2), Contains(?3[?4], ?2), Equals(?1["friend"], ?5[?6]) | GoodBoy(?5[?6], ?3[?4]) |
39 |
40 | If you want to apply this deduction rule to prove a `GoodBoy` statement,
41 | you have to provide the following witnesses in-circuit.
42 |
43 | - Copy of the deduction rule
44 | - Values for ?1, ?2, ?3, ?4, ?5, ?6.
45 | - Copy of the three statements in the deduction rule with ?1, ?2, ?3, ?4, ?5, ?6 filled in
46 | - Indices of the three statements `SignedBy`, `Contains`, `Equals` in the list of previous statements.
47 |
48 | And the circuit will verify:
49 | - ?1, ?2, ?3, ?4, ?5, ?6 were correctly substituted into the statements
50 | - The three statements `SignedBy`, `Contains`, `Equals` do indeed appear at the claimed indices.
51 |
52 | [^operation]: In previous versions of these docs, "operations" were called "deduction rules".
53 |
--------------------------------------------------------------------------------
/book/src/deductions.md:
--------------------------------------------------------------------------------
1 | # Deductions
2 |
--------------------------------------------------------------------------------
/book/src/examples.md:
--------------------------------------------------------------------------------
1 | # Examples
2 |
3 | Examples of POD2 use cases
4 |
5 | ## EthDos
6 | *Check also the [custom statement example](./customexample.md) section.*
7 |
8 | Original in prolog https://gist.github.com/ludns/f84b379ec8c53c97b7f95630e16cc39c#file-eth_dos-pl
9 |
10 | An EthDos Pod exposes a single custom statement with two custom deduction
11 | rules, the inductive case and the base case.
12 |
13 | ```
14 | // src, dst: PubKey, attetation_pod: Pod
15 | eth_dos_friend(src, dst, private: attestation_pod) = AND(
16 | ValueOf(attestation_pod[KEY_TYPE], SIGNATURE)
17 | Equal(attestation_pod[KEY_SIGNER], src)
18 | Equal(attestation_pod["attestation"], dst)
19 | )
20 |
21 | // src, intermed, dst: PubKey, distance, shorter_distance: Int
22 | eth_dos_distance(src, dst, distance, private: shorter_distance, intermed) = OR(
23 | AND(
24 | eth_dos_distance(src, intermed, shorter)
25 | SumOf(distance, shorter_distance, 1)
26 | eth_friend(intermed, dst)
27 | )
28 | AND(
29 | Equal(src, dst)
30 | Equal(distance, 0)
31 | )
32 | )
33 | ```
34 |
35 | ## ZuKYC (classic)
36 |
37 | Original using GPC https://github.com/proofcarryingdata/zukyc
38 |
39 | Authority public keys:
40 | - `ZOO_GOV`: PubKey, issues IDs
41 | - `ZOO_DEEL`: PubKey, issues bank statements
42 | Authority lists:
43 | - `SANCTION_LIST`: Set, Merkle Tree Root of set of sanctioned public keys
44 | - values: `["G2345678", "G1987654", "G1657678"]`
45 | Date values:
46 | - `NOW_MINUS_18Y`: Int, 18 years ago
47 | - `NOW_MINUS_1Y`: Int, 1 year ago
48 | - `NOW_MINUS_7D`: Int, 7 days ago
49 |
50 | A ZuKYC Pod exposes a single custom statement with one custom deduction rule.
51 |
52 | ```
53 | // receiver: PubKey, gov_id, paystub, sk_pok: Pod, nullifier, sk: Raw
54 | loan_check(receiver, private: gov_id, paystub, nullifier, sk, sk_pok) = AND(
55 | Equal(gov_id["pk"], receiver)
56 | // Not in the sanction list
57 | SetNotContains(SANCTION_LIST, receiver)
58 | // Valid government-issued ID
59 | Equal(gov_id[KEY_SIGNER], ZOO_GOV)
60 | Equal(gov_id[KEY_TYPE], SIGNATURE)
61 | // At least 18 years old
62 | Lt(gov_id["date_of_birth"], NOW_MINUS_18Y) # date_of_birdth is more than 18y old
63 | Equal(paystub[KEY_SIGNER], ZOO_DEEL)
64 | Equal(paystub[KEY_TYPE], SIGNATURE)
65 | Equal(paystub[ssn], gov_id["ssn"])
66 | // At least one year of consistent employment with your current employer
67 | Lt(paystub["start_date"], NOW_MINUS_1Y) # start_date is more than 1y old
68 | Gt(paystub["issue_date"], NOW_MINUS_7D) # issue_date is less than 7d old
69 | // Annual salary is at least $20,000
70 | Gt(paystub["annual_salary"], 20000)
71 | // Private key knowledge
72 | Equal(sk_pok[KEY_SIGNER], receiver)
73 | Equal(sk_pok[KEY_TYPE], SIGNATURE)
74 | Equal(sk_pok["auth"], "ZUKYC_V1_AUTH")
75 | HashOf(, 0, sk)
76 | // Nullifier
77 | HashOf(nullifier, "ZUKYC_V1_NULLIFIER", sk)
78 | )
79 | ```
80 |
81 | ## ZuKYC (simplified for P1)
82 |
83 | This simplified version uses less statements but requires a very similar set of
84 | features.
85 |
86 | Authority lists:
87 | - `SANCTION_LIST`: Set, Merkle Tree Root of set of sanctioned public keys
88 | - values: `["G2345678", "G1987654", "G1657678"]`
89 | Date values:
90 | - `NOW_MINUS_18Y`: Int, 18 years ago
91 | - `NOW_MINUS_1Y`: Int, 1 year ago
92 |
93 | A ZuKYC Pod exposes a single custom statement with one custom deduction rule.
94 |
95 | ```
96 | // receiver: String, gov_pk, paystub_pk: PubKey, gov_id, paystub: Pod
97 | loan_check(receiver, gov_pk, paystub_pk, private: gov_id, paystub) = AND(
98 | Equal(gov_id["id_number"], receiver)
99 | // Not in the sanction list
100 | SetNotContains(SANCTION_LIST, gov_id["id_number"])
101 | // Valid government-issued ID
102 | ValueOf(gov_id[KEY_SIGNER], gov_pk)
103 | Equal(gov_id[KEY_TYPE], SIGNATURE)
104 | // At least 18 years old
105 | Lt(gov_id["date_of_birth"], NOW_MINUS_18Y) # date_of_birdth is more than 18y old
106 | ValueOf(paystub[KEY_SIGNER], paystub_pk)
107 | Equal(paystub[KEY_TYPE], SIGNATURE)
108 | Equal(paystub["ssn"], gov_id["ssn"])
109 | // At least one year of consistent employment with your current employer
110 | Lt(paystub["start_date"], NOW_MINUS_1Y) # start_date is more than 1y old
111 | )
112 | ```
113 |
114 | ## GreatBoy
115 |
116 | A Good Boy Pod exposes one custom statement with one custom deduction rule.
117 |
118 | ```
119 | // user: PubKey, good_boy_issuers: Set, pod: Pod, age: Int
120 | is_good_boy(user, good_boy_issuers, private: pod, age) = AND(
121 | Equal(pod[KEY_TYPE], SIGNATURE)
122 | SetContains(good_boy_issuers, pod[KEY_SIGNER])
123 | // A good boy issuer says this user is a good boy
124 | Equal(pod["user"], user)
125 | Equal(pod["age"], age)
126 | )
127 | ```
128 |
129 | A Friend Pod exposes one custom statement with one custom deduction rule.
130 |
131 | ```
132 | // good_boy, friend: PubKey, good_boy_issuers: Set, friend_pod: Pod
133 | is_friend(good_boy, friend, good_boy_issuers, friend_pod) = AND(
134 | Equal(pod[KEY_TYPE], SIGNATURE)
135 | // The issuer is a good boy
136 | is_good_boy(good_boy, good_boy_issuers)
137 | // A good boy says this is their friend
138 | Equal(pod[KEY_SIGNER], good_boy)
139 | Equal(pod["friend"], friend)
140 | )
141 | ```
142 |
143 | A Great Boy Pod exposes (in addition to the above) one new custom statement
144 | with one custom deduction rule.
145 |
146 | ```
147 | great_boy: PubKey, good_boy_issuers: Set, friend_pod_0, friend_pod_1: Pod
148 | is_great_boy(great_boy, good_boy_issuers, private: friend_pod_0, friend_pod_1) = AND
149 | // Two good boys consider this user their friend
150 | is_friend(friend_pod_0[KEY_SIGNER], great_boy)
151 | is_friend(friend_pod_1[KEY_SIGNER], great_boy)
152 | // good boy 0 != good boy 1
153 | NotEqual(friend_pod_0[KEY_SIGNER], friend_pod_1[KEY_SIGNER])
154 | ```
155 |
156 | ## Attested GreatBoy
157 |
158 | An Attested Great Boy Pod is like a Great Boy Pod, but the names of the signers are revealed.
159 |
160 | ```
161 | // great_boy: PubKey, friend0, friend1: String, good_boy_issuers: Set, friend_pod_0, friend_pod_1: Pod
162 | is_great_boy(great_boy, friend0, friend1, good_boy_issuers, private: friend_pod_0, friend_pod_1) = AND
163 | // Two good boys consider this user their friend
164 | is_friend(friend_pod_0[KEY_SIGNER], great_boy)
165 | is_friend(friend_pod_1[KEY_SIGNER], great_boy)
166 | // good boy 0 != good boy 1
167 | NotEqual(friend_pod_0[KEY_SIGNER], friend_pod_1[KEY_SIGNER])
168 | // publicize signer names
169 | ValueOf(friend_pod_0["name"], friend0)
170 | ValueOf(friend_pod_1["name"], friend1)
171 | ```
172 |
173 | To produce a Great Boy Pod, you need two Friend Pods, `friend_pod0` and `friend_pod1`, each of which reveals its `signer`.
174 |
175 | ## Tracking PodIDs: Posts and comments
176 |
177 | The goal of this example is to model a social network, where posts and comments are pods.
178 |
179 | A Post is a signature pod with the following fields:
180 | ```
181 | content: String
182 | poster: String
183 | signer: PubKey
184 | timestamp: Int
185 | ```
186 |
187 | A Comment is a signature pod with the following fields:
188 | ```
189 | content: String
190 | referenced_post: PodID
191 | signer: PubKey
192 | timestamp: Int
193 | ```
194 |
195 | A post is popular if it has at least two comments from different signers.
196 |
197 | ```
198 | // post, comment1, comment2: Pod
199 | statement is_popular(post, private: comment1, comment2) = AND(
200 | IsEqual(comment1["referenced_post"], post)
201 | IsEqual(comment2["referenced_post"], post)
202 | NotEqual(comment1[KEY_SIGNER], comment2[KEY_SIGNER])
203 | )
204 | ```
205 |
206 | ## Multiple people over 18
207 |
208 | Suppose I want to prove that two different people are over 18, and a third person is under 18, using the custom predicates `over_18` and `under_18`.
209 | ```
210 | // age: Int
211 | over_18(age) = AND(
212 | GtEq(age, 18)
213 | )
214 | ```
215 |
216 | ```
217 | // age: Int
218 | under_18(age) = AND(
219 | Lt(age, 18)
220 | )
221 | ```
222 |
223 | With wildcards:
224 | ```
225 | over_18(?1) = AND(
226 | GtEq(?1, 18)
227 | )
228 | ```
229 |
230 | Maybe I have two input pods `gov_id1` and `gov_id2`, and I want to prove that these pods refer to two different people, both of whom are over 18; and a third pods `gov_id3` refers to someone under 18. So in my public output statements, I want to have:
231 | ```
232 | NotEqual(gov_id1["name"], gov_id2["name"])
233 | over_18(gov_id1["age"])
234 | over_18(gov_id2["age"])
235 | under_18(gov_id3["age"]).
236 | ```
237 |
238 | I would prove this with the following sequence of deductions:
239 | | Statement | Reason |
240 | | --- | --- |
241 | | over_18(gov_id1["age"]) | over_18, ?1 = gov_id1["age"] |
242 | | over_18(gov_id2["age"]) | over_18, ?1 = gov_id2["age"] |
243 | | under_18(gov_id3["age"]) | under_18, ?1 = gov_id3["age"] |
244 | | NotEqual(gov_id1["name"], gov_id2["name"]) | (not equal from entries) |
245 |
246 |
--------------------------------------------------------------------------------
/book/src/front_and_back.md:
--------------------------------------------------------------------------------
1 | # Frontend and backend
2 |
3 | The POD2 system consists of a frontend and a backend, connected by a middleware. This page outlines some design principles for deciding which components go where.
4 |
5 | ```
6 | user -- frontend -- middleware -- backend -- ZK circuit
7 | ```
8 |
9 | The frontend is what we want the user to see; the backend is what we want the circuit to see.
10 |
11 | ## Circuit and proving system
12 |
13 | The first implementation of POD2 uses Plonky2 as its proving system. In principle, a future implementation could use some other proving system. The frontend and middleware should not be aware of what proving system is in use: anything specific to the proving system belongs to the backend.
14 |
15 | ## User-facing types versus in-circuit types
16 |
17 | The frontend type system exposes human-readable types to POD developers: strings, ints, bools, and so forth. On the backend, all types are build out of field elements. The middleware should handle the conversion.
18 |
--------------------------------------------------------------------------------
/book/src/img/SignedPod.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/0xPARC/pod2/c382bf487c3901b5d9ff2df1e96850d2a169a2f7/book/src/img/SignedPod.png
--------------------------------------------------------------------------------
/book/src/img/frontend-backend.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/0xPARC/pod2/c382bf487c3901b5d9ff2df1e96850d2a169a2f7/book/src/img/frontend-backend.png
--------------------------------------------------------------------------------
/book/src/img/introductionpod-2-steps-ecdsa-example.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/0xPARC/pod2/c382bf487c3901b5d9ff2df1e96850d2a169a2f7/book/src/img/introductionpod-2-steps-ecdsa-example.png
--------------------------------------------------------------------------------
/book/src/img/introductionpod-2-steps.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/0xPARC/pod2/c382bf487c3901b5d9ff2df1e96850d2a169a2f7/book/src/img/introductionpod-2-steps.png
--------------------------------------------------------------------------------
/book/src/img/introductionpod-mainpod.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/0xPARC/pod2/c382bf487c3901b5d9ff2df1e96850d2a169a2f7/book/src/img/introductionpod-mainpod.png
--------------------------------------------------------------------------------
/book/src/img/merkletree-example-1-a.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/0xPARC/pod2/c382bf487c3901b5d9ff2df1e96850d2a169a2f7/book/src/img/merkletree-example-1-a.png
--------------------------------------------------------------------------------
/book/src/img/merkletree-example-1-b.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/0xPARC/pod2/c382bf487c3901b5d9ff2df1e96850d2a169a2f7/book/src/img/merkletree-example-1-b.png
--------------------------------------------------------------------------------
/book/src/img/merkletree-example-2-a.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/0xPARC/pod2/c382bf487c3901b5d9ff2df1e96850d2a169a2f7/book/src/img/merkletree-example-2-a.png
--------------------------------------------------------------------------------
/book/src/img/merkletree-example-2-b.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/0xPARC/pod2/c382bf487c3901b5d9ff2df1e96850d2a169a2f7/book/src/img/merkletree-example-2-b.png
--------------------------------------------------------------------------------
/book/src/introduction.md:
--------------------------------------------------------------------------------
1 | # Introduction
2 |
--------------------------------------------------------------------------------
/book/src/introductionpods.md:
--------------------------------------------------------------------------------
1 | # Introduction PODs
2 |
3 | Introduction PODs are a kind of POD used to introduce any cryptographic data
4 | structure into the POD system. This allows the POD system to reason about
5 | cryptographic data and cryptographic primitives that are not part of the POD
6 | system itself.
7 |
8 | An example of an IntroductionPod are the `Ed25519Pod` and the `EcdsaPod`, both
9 | of them allowing to incorporate their respective kind of signature schemes that
10 | are not supported in the core of the POD system.
11 | This allows to the POD system to incorporate data validated by those
12 | Introduction PODs, and recursively use it in future proofs combining them with
13 | any logical reasoning together with other PODs.
14 |
15 | For example, we could have an scenario where we use 3 different Introduction
16 | PODs (Ed25519Pod, RSAPod, EcdsaPod), to prove that a user controls an Ethereum
17 | wallet *AND* a GitHub account. To do so, the user would produce a new `MainPod`
18 | which proves that the user has signed a certain message with the given ECDSA key
19 | (Ethereum wallet) *AND* with either the given Ed25519 key *OR* the RSA key (from
20 | a GitHub account).
21 |
22 |
23 |
24 | ### Interface
25 | The interface of a `IntroductionPod` is just the one of the
26 | [Pod](https://github.com/0xPARC/pod2/blob/511efa8d4477a0d936bd898a484e3b41454b1991/src/middleware/mod.rs#L901)
27 | trait, and by definition the `IntroductionPod` is expected to only output
28 | Introduction statements (or None statements for padding).
29 |
30 | An Introduction Statement is a Statement that uses an Introduction predicate
31 | which embeds the verifying key of the circuit that generates it. This way the
32 | statements generated by an `IntroductionPod` are self-describing.
33 |
34 | This means that as long as we fit into the `Pod` interface, the
35 | `IntroductionPod` will fit into the recursive verification chain of the
36 | `MainPods`.
37 |
38 | ### Different configurations
39 | There are some cases, where the operations needed for the `IntroductionPod`
40 | require a different circuit configuration than the standard recursive circuit
41 | configuration that is used for the `MainPods`.
42 |
43 | In those cases, we do a previous recursive step, where we verify the logic of
44 | the intended `IntroductionPod`, and then the proof of this circuit is the one
45 | verified in the real `IntroductionPod`.
46 |
47 | This is done in order to be able to *converge* the different circuit
48 | configuration shape (and thus different `common_data`) into a new proof that can
49 | be verified with the standard configuration (used for the MainPods).
50 |
51 |
52 |
53 | Notice that this is not a requirement for implementing a new `IntroductionPod`,
54 | and if the IntroductionPod logic can be directly verified with the standard
55 | recursion configuration, we don't need this previous recursive step.
56 |
57 | As examples of the two-recursive steps approach, we have the `EcdsaPod` and the
58 | `Ed25519Pod`. Both of them, require different circuit configurations that would
59 | not match the one used for the standard recursion with `MainPods`. Therefore we
60 | first generate a circuit proof of correct signature verification, and then this
61 | proof is the one actually verified in the `EcdsaPod`/`Ed25519Pod` respectively,
62 | not the original signature itself.
63 |
64 |
65 |
66 | > Examples of `IntroductionPods` can be found at the git repository
67 | > [github.com/0xPARC/introduction-pods](https://github.com/0xPARC/introduction-pods/).
68 |
--------------------------------------------------------------------------------
/book/src/mainpod.md:
--------------------------------------------------------------------------------
1 | # MainPOD
2 |
--------------------------------------------------------------------------------
/book/src/merklestatements.md:
--------------------------------------------------------------------------------
1 | # Copied from statements.md
2 |
3 | ```
4 | Branches(parent: AnchoredKey::MerkleTree, left: AnchoredKey::MerkleTree, right: AnchoredKey::MerkleTree)
5 |
6 | Leaf(node: AnchoredKey::MerkleTree, key: AnchoredKey, value: AnchoredKey)
7 |
8 | IsNullTree(node: AnchoredKey::MerkleTree)
9 |
10 | GoesLeft(key: AnchoredKey, depth: Value::Integer)
11 |
12 | GoesRight(key: AnchoredKey, depth: Value::Integer)
13 |
14 | Contains(root: AnchoredKey::MerkleTree, key: AnchoredKey, value: AnchoredKey)
15 |
16 | MerkleSubtree(root: AnchoredKey::MerkleTree, node: AnchoredKey::MerkleTree)
17 |
18 | MerkleCorrectPath(root: AnchoredKey::MerkleTree, node: AnchoredKey::MerkleTree, key: AnchoredKey, depth: Value::Integer)
19 |
20 | Contains(root: AnchoredKey::MerkleTree, key: AnchoredKey, value: AnchoredKey)
21 |
22 | NotContains(root: AnchoredKey::MerkleTree, key: AnchoredKey)
23 |
24 | ContainsHashedKey(root: AnchoredKey::DictOrSet, key: AnchoredKey)
25 |
26 | NotContainsHashedKey(root: AnchoredKey::DictOrSet, key: AnchoredKey)
27 |
28 | ContainsValue(root: AnchoredKey::Array, value: AnchoredKey)
29 | ```
30 |
31 | # Statements involving compound types and Merkle trees
32 |
33 | The front end has three compound types
34 | - `Dictionary`
35 | - `Array`
36 | - `Set`
37 |
38 | all of which are represented as `MerkleTree` on the back end.
39 |
40 | The frontend compound types and their implementation as Merkle trees is explained under [POD value types](./values.md#dictionary-array-set). The backend structure of a MerkleTree is explained on [the Merkle tree page](./merkletree.md).
41 |
42 | The POD2 interface provides statements for working with Merkle trees and compound types at all layers of the stack:
43 | - Primitive statements for Merkle trees
44 | - General derived statements for Merkle trees
45 | - Specialized `ContainsKey`, `NotContainsKey`, and `ContainsValue` statements for the three front-end types.
46 |
47 | ## Primitive statements for Merkle trees
48 |
49 | ```
50 | Branches(parent: AnchoredKey::MerkleTree, left: AnchoredKey::MerkleTree, right: AnchoredKey::MerkleTree)
51 |
52 | Leaf(node: AnchoredKey::MerkleTree, key: AnchoredKey, value: AnchoredKey)
53 |
54 | IsNullTree(node: AnchoredKey::MerkleTree)
55 |
56 | GoesLeft(key: AnchoredKey, depth: Value::Integer)
57 |
58 | GoesRight(key: AnchoredKey, depth: Value::Integer)
59 | ```
60 |
61 | These four statements expose the inner workings of a Merkle tree. Their implementations depend on the implementation details of POD2's sparse Merkle trees. In-circuit, verifying these statements requires low-level computation: either a hash or a binary decomposition.
62 |
63 | Every Merkle root either:
64 | - is a special type of Merkle tree called a "null tree", which has no elements,
65 | - is a special type of Merkle tree called a "leaf", which just has a single element, or
66 | - has two branches, left and right -- each of which is itself a Merkle tree. Such a tree is called a "non-leaf" Merkle tree.
67 |
68 | ### `Branches`
69 |
70 | ```
71 | Branches(parent, left, right)
72 | ```
73 | means that ```parent``` is a non-leaf Merkle node, and ```left``` and ```right``` are its branches.
74 |
75 | A `Branches` statement is proved by computing a hash, as specified on [the Merkle tree page](./merkletree.md).
76 |
77 | ### `Leaf`
78 |
79 | ```
80 | Leaf(node, key, value)
81 | ```
82 | means that ```node``` is a leaf Merkle node, whose single item is the key-value pair ```(key, value)```.
83 |
84 | A `Leaf` statement is proved by computing a hash, as specified on [the Merkle tree page](./merkletree.md).
85 |
86 | ### `IsNullTree`
87 |
88 | ```
89 | IsNullTree(node)
90 | ```
91 | means that ```node``` is a null Merkle tree.
92 |
93 | An `IsNullTree` statement is proved by comparing the value of `node` to `hash(0)`.
94 |
95 | ### `GoesLeft` and `GoesRight`
96 |
97 | ```
98 | GoesLeft(key, depth)
99 | ```
100 | means that if ```key``` is contained in a sparse Merkle tree, then at depth ```depth```, it must be in the left branch.
101 |
102 | ```GoesRight``` is similar.
103 |
104 | A `GoesLeft` or `GoesRight` statement is proved by computing a binary decomposition of `key` and extracting the bit at index `depth`, as specified on [the Merkle tree page](./merkletree.md).
105 |
106 | ## General derived statements for Merkle trees
107 |
108 | ```
109 | MerkleSubtree(root: AnchoredKey::MerkleTree, node: AnchoredKey::MerkleTree)
110 |
111 | MerkleCorrectPath(root: AnchoredKey::MerkleTree, node: AnchoredKey::MerkleTree, key: AnchoredKey, depth: Value::Integer)
112 |
113 | Contains(root: AnchoredKey::MerkleTree, key: AnchoredKey, value: AnchoredKey)
114 |
115 | NotContains(root: AnchoredKey::MerkleTree, key: AnchoredKey)
116 | ```
117 |
118 | ### `MerkleSubtree`
119 |
120 | ```
121 | MerkleSubtree(root, node)
122 | ```
123 | means that there is a valid Merkle path of length `depth` from `root` to `node`.
124 |
125 | A `MerkleSubtree` statement is proved as follows:
126 | ```
127 | MerkleSubtree(root, root)
128 | ```
129 | is automatically true.
130 |
131 | Otherwise, `MerkleSubtree(root, node)` can be deduced from either
132 | ```
133 | MerkleSubtree(root, parent)
134 | Branches(parent, node, other)
135 | ```
136 | or
137 | ```
138 | MerkleSubtree(root, parent)
139 | Branches(parent, other, node).
140 | ```
141 |
142 | ### `MerkleCorrectPath`
143 |
144 | ```
145 | MerkleCorrectPath(root, node, key, depth)
146 | ```
147 | means that there is a valid Merkle path of length `depth` from `root` to `node`, and if `key` appears as a key in the Merkle tree with root `root`, then `key` must be in the subtree under `node`.
148 |
149 | A `MerkleCorrectPath` statement is proved as follows:
150 | ```
151 | MerkleCorrectPath(root, root, key, 0)
152 | ```
153 | is automatically true.
154 |
155 | Otherwise, `MerkleCorrectPath(root, node, key, depth)` can be deduced from either:
156 | ```
157 | MerkleCorrectPath(root, parent, key, depth-1)
158 | Branches(parent, node, other)
159 | GoesLeft(key, depth-1)
160 | ```
161 | or
162 | ```
163 | MerkleCorrectPath(root, parent, key, depth-1)
164 | Branches(parent, other, node)
165 | GoesRight(key, depth-1).
166 | ```
167 |
168 | ### `Contains`
169 |
170 | ```
171 | Contains(root, key, value)
172 | ```
173 | means that the key-value pair ```(key, value)``` is contained in the Merkle tree with Merkle root ```root```.
174 |
175 | A `Contains` statement can be deduced from the following two statements.
176 | ```
177 | MerkleSubtree(root, node)
178 | Leaf(node, key, value)
179 | ```
180 |
181 | ### `NotContains`
182 |
183 | ```
184 | NotContains(root, key)
185 | ```
186 | means that the key ```key``` is not contained in the sparse Merkle tree with Merkle root ```root```.
187 |
188 | The statement `NotContains(root, key)` can be deduced from either
189 | ```
190 | MerkleCorrectPath(root, node, key, depth)
191 | Leaf(node, otherkey, value)
192 | NotEqual(otherkey, key)
193 | ```
194 | or
195 | ```
196 | MerkleCorrectPath(root, node, key, depth)
197 | IsNullTree(node).
198 | ```
199 |
200 | ## Specialized statements for front-end compound types
201 |
202 | ```
203 | ContainsHashedKey(root: AnchoredKey::DictOrSet, key: AnchoredKey)
204 |
205 | NotContainsHashedKey(root: AnchoredKey::DictOrSet, key: AnchoredKey)
206 |
207 | ContainsValue(root: AnchoredKey::Array, value: AnchoredKey)
208 | ```
209 |
210 | When a dictionary or set is converted to a Merkle tree, its key is hashed -- see the [POD2 values page](./values.md#dictionary-array-set).
211 |
212 | ```ContainsHashedKey(root, key)``` is deduced from
213 | ```
214 | Contains(root, keyhash, value)
215 | keyhash = hash(key).
216 | ```
217 |
218 | ```NotContainsHashedKey(root, key)``` is deduced from
219 | ```
220 | NotContains(root, keyhash)
221 | keyhash = hash(key)
222 | ```
223 |
224 | ```ContainsValue(root, value)``` is deduced from
225 | ```
226 | Contains(root, idx, value).
227 | ```
228 |
--------------------------------------------------------------------------------
/book/src/signature.md:
--------------------------------------------------------------------------------
1 | # Signature
2 |
3 |
4 | Current signature scheme used is proof-based signatures using Plonky2 proofs, following [https://eprint.iacr.org/2024/1553](https://eprint.iacr.org/2024/1553) and [https://jdodinh.io/assets/files/m-thesis.pdf](https://jdodinh.io/assets/files/m-thesis.pdf). This comes from [Polygon Miden's RPO STARK-based](https://github.com/0xPolygonMiden/crypto/blob/d2a67396053fded90ec72690404c8c7728b98e4e/src/dsa/rpo_stark/signature/mod.rs#L129) signatures.
5 |
6 | In future iterations we may replace it by other signature schemes (either elliptic curve based scheme on a Golilocks-prime friendly curve, or a lattice based scheme).
7 |
8 |
9 |
10 | ### generate_params()
11 | $pp$: plonky2 circuit prover params
12 | $vp$: plonky2 circuit verifier params
13 | return $(pp, vp)$
14 |
15 | ### keygen()
16 | secret key: $sk \xleftarrow{R} \mathbb{F}^4$
17 | public key: $pk := H(sk)$ [^1]
18 | return $(sk, pk)$
19 |
20 | ### sign(pp, sk, m)
21 | $pk := H(sk)$
22 | $s := H(pk, m)$
23 | $\pi = plonky2.Prove(pp, sk, pk, m, s)$
24 | return $(sig:=\pi)$
25 |
26 | ### verify(vp, sig, pk, m)
27 | $\pi = sig$
28 | $s := H(pk, m)$
29 | return $plonky2.Verify(vp, \pi, pk, m, s)$
30 |
31 |
32 | ### Plonky2 circuit
33 | private inputs: $(sk)$
34 | public inputs: $(pk, m, s)$
35 | $pk \stackrel{!}{=} H(sk)$
36 | $s \stackrel{!}{=} H(pk, m)$
37 |
38 |
39 |
40 |
41 | [^1]: The [2024/1553 paper](https://eprint.iacr.org/2024/1553) uses $pk:=H(sk||0^4)$ to have as input (to the hash) 8 field elements, to be able to reuse the same instance of the RPO hash as the one they use later in the signature (where it hashes 8 field elements).
42 |
--------------------------------------------------------------------------------
/book/src/simpleexample.md:
--------------------------------------------------------------------------------
1 | # Simple example
2 |
3 |
4 | ## Circuit structure, two-column proof
5 |
6 | A "proof" is a table that looks like
7 | | STATEMENT | REASON |
8 | | --- | --- |
9 | | STATEMENT1 | REASON1 |
10 | | STATEMENT2 | REASON2 |
11 | ...
12 |
13 | In other words:
14 |
15 | A "proof" is an ordered list of 100 proof-rows.
16 |
17 | Each "row" is a pair (statement, reason).
18 |
19 | The statement is the statement.
20 |
21 | The reason is everything the circuit needs to verify that the statement is true.
22 |
23 | Example:
24 |
25 | ```
26 | STATEMENT1 = Equals(olddict["name"], otherdict["field"])
27 |
28 | STATEMENT2 = Equals(otherdict["field"], newdict["result"])
29 |
30 | STATEMENT3 = Equals(olddict["name"], newdict["result"])
31 | ```
32 |
33 | The reasons in human-readable simplified format:
34 |
35 | ```
36 | REASON1 -- "came from previous pod"
37 |
38 | REASON2 -- "came from previous pod"
39 |
40 | REASON3 -- "use transitive property on STATEMENT1 and STATEMENT2"
41 | ```
42 |
43 | ## What does the reason look like in circuit?
44 |
45 | It won't be so simple. I'll just explain what REASON3 has to look like.
46 |
47 | First, the operation (deduction rule).
48 |
49 | ## A simple example of a deduction rule
50 |
51 | Here is the transitive property of equality, in human-readable form.
52 | ```
53 | if
54 | Equals(a, b) and Equals(b, c)
55 | then
56 | Equals(a, c)
57 | ```
58 |
59 | First, we need to decompose all the anchored keys as (dict, key) pairs. This is the frontend description of the deduction rule.
60 | ```
61 | IF
62 | Equals(a_or[a_key], b_or[b_key])
63 | AND
64 | Equals(b_or[b_key], c_or[c_key])
65 | THEN
66 | Equals(a_or[a_key], c_or[c_key])
67 | ```
68 |
69 | In-circuit, all these identifiers are replaced with wildcards, which come in numerical order (because they will be used as array indices). So the backend representation is:
70 | ```
71 | IF
72 | Equals( ?1[?2], ?3[?4] ) and Equals ( ?3[?4], ?5[?6] )
73 | THEN
74 | Equals( ?1[?2], ?5[?6] )
75 | ```
76 |
77 |
78 | ## What does REASON3 need to look like in-circuit?
79 |
80 | - Repeat deduction rule
81 | ```
82 | IF
83 | Equals( ?1[?2], ?3[?4] ) and Equals ( ?3[?4], ?5[?6] )
84 | THEN
85 | Equals( ?1[?2], ?5[?6] )
86 | ```
87 | - Say what the wildcards are
88 | ```
89 | ?1 -- olddict
90 | ?2 -- "name"
91 | ?3 -- otherdict
92 | ...
93 | ```
94 | - Substitute the wildcards into the deduction rule
95 | ```
96 | IF
97 | Equals( olddict["name"], ... ) ...
98 | Equals( otherdict["value"])
99 | THEN
100 | Equals( olddict["name"] newdict[...] )
101 | ...
102 | ```
103 | - Say where to find the previous statements (indices in the list), and check that they are above this one.
104 | ```
105 | Statement1
106 | Statement2
107 | ```
108 | - Check that the input statements match. Check that the output statement matches.
109 |
110 |
111 |
112 | ## Decomposing anchored keys
113 |
114 | Sometimes a deduction rule requires different anchored keys to come from the same dictionary. Here's an example from Ethdos.
115 |
116 | The wildcard system handles this very naturally, since the dict of the anchored key can use its own wildcard.
117 |
118 | ```
119 | eth_friend(src_or, src_key, dst_or, dst_key) = and<
120 | // the attestation dict is signed by (src_or, src_key)
121 | SignedBy(attestation_dict, src_or[src_key])
122 |
123 | // that same attestation pod has an "attestation"
124 | Equal(attestation_dict["attestation"], dst_or[dst_key])
125 | >
126 | ```
127 |
128 | In terms of anchored keys, it would be a little more complicated. five anchored keys show up in this deduction rule:
129 | ```
130 | AK1 = src
131 | AK2 = dst
132 | AK3 = attestation_dict["attestation"]
133 | ```
134 |
135 | and we need to force AK3, AK4, AK5 to come from the same origin.
136 |
137 | WILDCARD matching takes care of it.
138 |
139 | ```
140 | eth_friend(?1, ?2, ?3, ?4) = and<
141 | // the attestation dict is signed by (src_or, src_key)
142 | SignedBy(?5, ?1[?2])
143 |
144 | // that same attestation pod has an "attestation"
145 | Equal(?5["attestation"], ?3[?4])
146 | >
147 | ```
148 |
--------------------------------------------------------------------------------
/book/src/statements.md:
--------------------------------------------------------------------------------
1 | # Statements
2 |
3 | A _statement_ is any sort of claim about the values of entries: for example, that two values are equal, or that one entry is contained in another.
4 |
5 | Statements come in two types: _built-in_ and _custom_. There is a short list of built-in statements (see below). [^builtin]
6 | In addition, users can freely define custom statements.
7 |
8 | From the user (front-end) perspective, a statement represents a claim about the values of some number of entries -- the statement can only be proved if the claim is true. On the front end, a statement is identified by its _name_ (`ValueOf`, `Equal`, etc.).
9 |
10 | From the circuit (back-end) perspective, a statement can be proved either:
11 | - by direct in-circuit verification, or
12 | - by an operation (aka deduction rule).
13 | On the back end, a statement is identified by a unique numerical _identifier_.
14 |
15 | ## Built-in statements
16 |
17 | The POD system has several builtin statements. These statements are associated to a reserved set of statement IDs.
18 |
19 | ### Backend statements
20 |
21 | A statement is a code (or, in the frontend, string identifier) followed by 0 or more arguments. These arguments may consist of up to three anchored keys and up to one POD value.
22 |
23 | The following table summarises the natively-supported statements, where we write `value_of(ak)` for 'the value anchored key `ak` maps to', which is of type `PODValue`, and `key_of(ak)` for the key part of `ak`:
24 |
25 | | Code | Identifier | Args | Meaning |
26 | |------|---------------|---------------------|-------------------------------------------------------------------|
27 | | 0 | `None` | | no statement, always true (useful for padding) |
28 | | 1 | `False` | | always false (useful for padding disjunctions) |
29 | | 2 | `Equal` | `ak1`, `ak2` | `value_of(ak1) = value_of(ak2)` |
30 | | 3 | `NotEqual` | `ak1`, `ak2` | `value_of(ak1) != value_of(ak2)` |
31 | | 4 | `LtEq` | `ak1`, `ak2` | `value_of(ak1) <= value_of(ak2)` |
32 | | 5 | `Lt` | `ak1`, `ak2` | `value_of(ak1) < value_of(ak2)` |
33 | | 6 | `Contains` | `ak1`, `ak2`, `ak3` | `(value_of(ak2), value_of(ak3)) ∈ value_of(ak1)` (Merkle inclusion) |
34 | | 7 | `NotContains` | `ak1`, `ak2` | `(value_of(ak2), _) ∉ value_of(ak1)` (Merkle exclusion) |
35 | | 8 | `SumOf` | `ak1`, `ak2`, `ak3` | `value_of(ak1) = value_of(ak2) + value_of(ak3)` |
36 | | 9 | `ProductOf` | `ak1`, `ak2`, `ak3` | `value_of(ak1) = value_of(ak2) * value_of(ak3)` |
37 | | 10 | `MaxOf` | `ak1`, `ak2`, `ak3` | `value_of(ak1) = max(value_of(ak2), value_of(ak3))` |
38 | | 11 | `HashOf` | `ak1`, `ak2`, `ak3` | `value_of(ak1) = hash(value_of(ak2), value_of(ak3))` |
39 | | 12 | `PublicKeyOf` | `ak1`, `ak2` | `value_of(ak1) = derive_public_key(value_of(ak2))` |
40 | | 13 | `SignedBy` | `ak1`, `ak2` | `value_of(ak1)` is signed by `value_of(ak2)` |
41 | | 14 | `ContainerInsert` | `ak1`, `ak2`, `ak3`, `ak4` | `(value_of(ak3), _) ∉ value_of(ak2) ∧ value_of(ak1) = value_of(ak2) ∪ {(value_of(ak3), value_of(ak4))}` (Merkle insert) |
42 | | 15 | `ContainerUpdate` | `ak1`, `ak2`, `ak3`, `ak4` | `(value_of(ak3), v) ∈ value_of(ak2) ∧ value_of(ak1) = (value_of(ak2) - {(value_of(ak3), v)}) ∪ {(value_of(ak3), value_of(ak4))}` (Merkle update) |
43 | | 16 | `ContainerDelete` | `ak1`, `ak2`, `ak3` | `(value_of(ak3), v) ∈ value_of(ak2) ∧ value_of(ak1) = value_of(ak2) - {(value_of(ak3), v)}` (Merkle delete) |
44 |
45 | ### Frontend statements
46 |
47 | The frontend also exposes the following syntactic sugar predicates. These predicates are not supported by the backend. The frontend compiler is responsible for translating these predicates into the predicates above.
48 |
49 | | Code | Identifier | Args and desugaring |
50 | |------|---------------|---------------------|
51 | | 1000 | DictContains | `DictContains(root, key, val) -> Contains(root, key, val)` |
52 | | 1001 | DictNotContains | `DictNotContains(root, key) -> NotContains(root, key)` |
53 | | 1002 | SetContains | `SetContains(root, val) -> Contains(root, val, val)` |
54 | | 1003 | SetNotContains | `SetNotContains(root, val) -> Contains(root, val, val)` |
55 | | 1004 | ArrayContains | `ArrayContains(root, idx, val) -> Contains(root, idx, val)` |
56 | | 1005 | GtEq | `GtEq(a, b) -> LtEq(b, a)`|
57 | | 1006 | Gt | `Gt(a, b) -> Lt(b, a)` |
58 | | 1009 | DictInsert | `DictInsert(new_root, old_root, key, val) -> ContainerInsert(new_root, old_root, key, val)` |
59 | | 1010 | DictUpdate | `DictUpdate(new_root, old_root, key, val) -> ContainerUpdate(new_root, old_root, key, val)` |
60 | | 1011 | DictDelete | `DictDelete(new_root, old_root, key) -> ContainerDelete(new_root, old_root, key)` |
61 | | 1012 | SetInsert | `SetInsert(new_root, old_root, val) -> ContainerInsert(new_root, old_root, val, val)` |
62 | | 1013 | SetDelete | `SetDelete(new_root, old_root, val) -> ContainerDelete(new_root, old_root, val)` |
63 | | 1014 | ArrayUpdate | `ArrayUpdate(new_root, old_root, idx, val) -> ContainerUpdate(new_root, old_root, idx, val)` |
64 |
65 |
66 | ### Built-in statements for entries of any type
67 |
68 | A ```DictContains``` statement asserts that an entry has a certain value.
69 | ```
70 | DictContains(A, "name", "Arthur")
71 | ```
72 | Implies that the entry `A["name"]` exists with the value `"Arthur"`.
73 |
74 | An ```Equal``` statement asserts that two entries have the same value. (Technical note: The circuit only proves equality of field elements; no type checking is performed. For strings or Merkle roots, collision-resistance of the hash gives a cryptographic guarantee of equality. However, note both Arrays and Sets are implemented as dictionaries in the backend; the backend cannot type-check, so it is possible to prove an equality between an Array or Set and a Dictionary.)
75 | ```
76 | Equal(A["name"], B["name"])
77 | ```
78 |
79 | An ```NotEqual``` statement asserts that two entries have different values.
80 | ```
81 | NotEqual (for arbitrary types)
82 | ```
83 |
84 | ##### Built-in Statements for Numerical Types
85 | An ```Gt(x, y)``` statement asserts that ```x``` is an entry of type ```Integer```, ```y``` is an entry or constant of type ```Integer```, and ```x > y```.
86 | ```
87 | Gt (for numerical types only)
88 | Gt(A["price"], 100)
89 | Gt(A["price"], B["balance"])
90 | ```
91 |
92 | The statements ```Lt```, ```GEq```, ```Leq``` are defined analogously.
93 |
94 | ```SumOf(x, y, z)``` asserts that ```x```, ```y```, ```z``` are entries of type ```Integer```, and ```x = y + z```
95 |
96 | ```ProductOf``` and ```MaxOf``` are defined analogously.
97 |
98 | The two items below may be added in the future:
99 | ```
100 | poseidon_hash_of(A["hash"], B["preimage"]) // perhaps a hash_of predicate can be parametrized by an enum representing the hash scheme; rather than having a bunch of specific things like SHA256_hash_of and poseidon_hash_of etc.
101 | ```
102 |
103 | ```
104 | ecdsa_priv_to_pub_of(A["pubkey"], B["privkey"])
105 | ```
106 |
107 |
108 |
109 | [^builtin]: TODO List of built-in statements is not yet complete.
110 |
--------------------------------------------------------------------------------
/book/src/values.md:
--------------------------------------------------------------------------------
1 | # POD value types
2 | From the frontend perspective, POD values may be one of the following[^type] types: four atomic types
3 | - `Integer`
4 | - `Bool`
5 | - `String`
6 | - `Raw`
7 |
8 | and three compound types
9 | - `Dictionary`
10 | - `Array`
11 | - `Set`.
12 |
13 | From the backend perspective, however, these types will all be encoded as a fixed number of field elements, the number being chosen so as to accommodate the `Integer` type as well as hashes to represent the `String` and compound types with the appropriate level of security.
14 |
15 | In the case of the Plonky2 backend with 100 bits of security, all of these types are represented as 4 field elements, the output of the Poseidon hash function used there being
16 |
17 | $$\texttt{HashOut}\simeq\texttt{[GoldilocksField; 4]}.$$
18 |
19 |
20 | ## `Integer`
21 | In the frontend, this type is none other than `u64`[^i64]. In the backend, it will be appropriately embedded into the codomain of the canonical hash function.
22 |
23 | ## `Bool`
24 | In the frontend, this is a simple bool. In the backend, it will have the same encoding as an `Integer` `0` (for `false`) or `1` (for `true`).
25 |
26 | ## `String`
27 | In the frontend, this type corresponds to the usual `String`. In the backend, the string will be mapped to a sequence of field elements and hashed with the hash function employed there, thus being represented by its hash.
28 |
29 | ## `Raw`
30 | "Raw" is short for "raw value". A `Raw` exposes a [backend `Value`](./backendtypes.md) on the frontend.
31 |
32 | With the plonky2 backend, a `Raw` is a tuple of 4 elements of the Goldilocks field.
33 |
34 | ## Dictionary, array, set
35 |
36 | The array, set and dictionary types are similar types. While all of them use [a merkletree](./merkletree.md) under the hood, each of them uses it in a specific way:
37 | - **dictionary**: the user original keys and values are hashed to be used in the leaf.
38 | - `leaf.key=hash(original_key)`
39 | - `leaf.value=hash(original_value)`
40 | - **array**: the elements are placed at the value field of each leaf, and the key field is just the array index (integer)
41 | - `leaf.key=i`
42 | - `leaf.value=original_value`
43 | - **set**: both the key and the value are set to the hash of the value.
44 | - `leaf.key=hash(original_value)`
45 | - `leaf.value=hash(original_value)`
46 |
47 | In the three types, the merkletree under the hood allows to prove inclusion & non-inclusion of the particular entry of the {dictionary/array/set} element.
48 |
49 | A concrete implementation of dictionary, array, set can be found at [pod2/src/middleware/containers.rs](https://github.com/0xPARC/pod2/blob/main/src/middleware/containers.rs).
50 |
51 |
52 |
53 | ---
54 |
55 |
56 | [^type]: TODO In POD 1, there is the `cryptographic` type, which has the same type of the output of the hash function employed there. It is useful for representing arbitrary hashes. Do we want to expand our type list to include a similar type, which would correspond to the `HashOut` type in the case of Plonky2? This would not have a uniform representation in the frontend if we continue to be backend agnostic unless we fix the number of bits to e.g. 256, in which case we would actually need one more field element in the case of Plonky2.
57 | [^i64]: TODO Replace this with `i64` once operational details have been worked out.
58 | [^aux]: Definitions of `drop` and `take` may be found [here](https://hackage.haskell.org/package/haskell98-2.0.0.3/docs/Prelude.html#v:drop) and [here](https://hackage.haskell.org/package/haskell98-2.0.0.3/docs/Prelude.html#v:take).
59 |
60 |
--------------------------------------------------------------------------------
/build.rs:
--------------------------------------------------------------------------------
1 | #[cfg(feature = "mem_cache")]
2 | fn main() {}
3 |
4 | #[cfg(feature = "disk_cache")]
5 | fn main() -> Result<(), Box> {
6 | use vergen_gitcl::{Emitter, GitclBuilder};
7 | // Example of injected vars:
8 | // cargo:rustc-env=VERGEN_GIT_BRANCH=master
9 | // cargo:rustc-env=VERGEN_GIT_COMMIT_AUTHOR_EMAIL=emitter@vergen.com
10 | // cargo:rustc-env=VERGEN_GIT_COMMIT_AUTHOR_NAME=Jason Ozias
11 | // cargo:rustc-env=VERGEN_GIT_COMMIT_COUNT=44
12 | // cargo:rustc-env=VERGEN_GIT_COMMIT_DATE=2024-01-30
13 | // cargo:rustc-env=VERGEN_GIT_COMMIT_MESSAGE=depsup
14 | // cargo:rustc-env=VERGEN_GIT_COMMIT_TIMESTAMP=2024-01-30T21:43:43.000000000Z
15 | // cargo:rustc-env=VERGEN_GIT_DESCRIBE=0.1.0-beta.1-15-g728e25c
16 | // cargo:rustc-env=VERGEN_GIT_SHA=728e25ca5bb7edbbc505f12b28c66b2b27883cf1
17 | let gitcl = GitclBuilder::all_git()?;
18 | Emitter::default().add_instructions(&gitcl)?.emit()?;
19 |
20 | Ok(())
21 | }
22 |
--------------------------------------------------------------------------------
/examples/main_pod_points.rs:
--------------------------------------------------------------------------------
1 | #![allow(clippy::uninlined_format_args)] // TODO: Remove this in another PR
2 | //! Example of building main pods that verify signed pods and other main pods using custom
3 | //! predicates
4 | //!
5 | //! The example follows a scenario where a game issues signed pods to players with the points
6 | //! accumulated after finishing each game level. Then we build a custom predicate to prove that
7 | //! the sum of points from level 1 and 2 for a player is over 9000.
8 | //!
9 | //! Run in real mode: `cargo run --release --example main_pod_points`
10 | //! Run in mock mode: `cargo run --release --example main_pod_points -- --mock`
11 | use std::env;
12 |
13 | use pod2::{
14 | backends::plonky2::{
15 | basetypes::DEFAULT_VD_SET, mainpod::Prover, mock::mainpod::MockProver,
16 | primitives::ec::schnorr::SecretKey, signer::Signer,
17 | },
18 | frontend::{MainPodBuilder, Operation, SignedDictBuilder},
19 | lang::parse,
20 | middleware::{MainPodProver, Params, VDSet},
21 | };
22 |
23 | fn main() -> Result<(), Box> {
24 | env_logger::init();
25 | let args: Vec = env::args().collect();
26 | let mock = args.get(1).is_some_and(|arg1| arg1 == "--mock");
27 | if mock {
28 | println!("Using MockMainPod")
29 | } else {
30 | println!("Using MainPod")
31 | }
32 |
33 | let params = Params::default();
34 |
35 | let mock_prover = MockProver {};
36 | let real_prover = Prover {};
37 | let (vd_set, prover): (_, &dyn MainPodProver) = if mock {
38 | (&VDSet::new(8, &[])?, &mock_prover)
39 | } else {
40 | println!("Prebuilding circuits to calculate vd_set...");
41 | let vd_set = &*DEFAULT_VD_SET;
42 | println!("vd_set calculation complete");
43 | (vd_set, &real_prover)
44 | };
45 |
46 | // Create a schnorr key pair to sign pods
47 | let game_sk = SecretKey::new_rand();
48 | let game_pk = game_sk.public_key();
49 |
50 | let game_signer = Signer(game_sk);
51 |
52 | // Build 2 signed pods where the game assigns points to a player that has completed a level.
53 | let mut builder = SignedDictBuilder::new(¶ms);
54 | builder.insert("player", "Alice");
55 | builder.insert("level", 1);
56 | builder.insert("points", 3512);
57 | let pod_points_lvl_1 = builder.sign(&game_signer)?;
58 | pod_points_lvl_1.verify()?;
59 | println!("# pod_points_lvl_1:\n{}", pod_points_lvl_1);
60 |
61 | let mut builder = SignedDictBuilder::new(¶ms);
62 | builder.insert("player", "Alice");
63 | builder.insert("level", 2);
64 | builder.insert("points", 5771);
65 | let pod_points_lvl_2 = builder.sign(&game_signer)?;
66 | pod_points_lvl_2.verify()?;
67 | println!("# pod_points_lvl_2:\n{}", pod_points_lvl_2);
68 |
69 | // Build a MainPod to prove >9000 points from sum of level 1 and 2
70 |
71 | // Declare the custom predicate
72 | let input = format!(
73 | r#"
74 | points(player, level, points, private: points_dict) = AND(
75 | SignedBy(points_dict, PublicKey({game_pk}))
76 | Contains(points_dict, "player", player)
77 | Contains(points_dict, "level", level)
78 | Contains(points_dict, "points", points)
79 | )
80 |
81 | over_9000(player, private: points_lvl_1, points_lvl_2, points_total) = AND(
82 | points(player, 1, points_lvl_1)
83 | points(player, 2, points_lvl_2)
84 | SumOf(points_total, points_lvl_1, points_lvl_2)
85 | Gt(points_total, 9000)
86 | )
87 | "#,
88 | game_pk = game_pk,
89 | );
90 | println!("# custom predicate batch:{}", input);
91 | let batch = parse(&input, ¶ms, &[])?.custom_batch;
92 | let points_pred = batch.predicate_ref_by_name("points").unwrap();
93 | let over_9000_pred = batch.predicate_ref_by_name("over_9000").unwrap();
94 |
95 | // Build a pod to prove the statement `points("Alice", 1, 3512)`
96 | let mut builder = MainPodBuilder::new(¶ms, vd_set);
97 | let st_signed_by = builder.priv_op(Operation::dict_signed_by(&pod_points_lvl_1))?;
98 | let st_player = builder.priv_op(Operation::dict_contains(
99 | pod_points_lvl_1.dict.clone(),
100 | "player",
101 | "Alice",
102 | ))?;
103 | let st_level = builder.priv_op(Operation::dict_contains(
104 | pod_points_lvl_1.dict.clone(),
105 | "level",
106 | 1,
107 | ))?;
108 | let st_points = builder.priv_op(Operation::dict_contains(
109 | pod_points_lvl_1.dict.clone(),
110 | "points",
111 | 3512,
112 | ))?;
113 | let st_points_lvl_1 = builder.pub_op(Operation::custom(
114 | points_pred.clone(),
115 | [st_signed_by, st_player, st_level, st_points],
116 | ))?;
117 |
118 | let pod_alice_lvl_1_points = builder.prove(prover).unwrap();
119 | println!("# pod_alice_lvl_1_points\n:{}", pod_alice_lvl_1_points);
120 | pod_alice_lvl_1_points.pod.verify().unwrap();
121 |
122 | // Build a pod to prove the statement `points("Alice", 2, 5771)`
123 | let mut builder = MainPodBuilder::new(¶ms, vd_set);
124 | let st_signed_by = builder.priv_op(Operation::dict_signed_by(&pod_points_lvl_2))?;
125 | let st_player = builder.priv_op(Operation::dict_contains(
126 | pod_points_lvl_2.dict.clone(),
127 | "player",
128 | "Alice",
129 | ))?;
130 | let st_level = builder.priv_op(Operation::dict_contains(
131 | pod_points_lvl_2.dict.clone(),
132 | "level",
133 | 2,
134 | ))?;
135 | let st_points = builder.priv_op(Operation::dict_contains(
136 | pod_points_lvl_2.dict.clone(),
137 | "points",
138 | 5771,
139 | ))?;
140 |
141 | let st_points_lvl_2 = builder.pub_op(Operation::custom(
142 | points_pred,
143 | [st_signed_by, st_player, st_level, st_points],
144 | ))?;
145 | let pod_alice_lvl_2_points = builder.prove(prover).unwrap();
146 | println!("# pod_alice_lvl_2_points\n:{}", pod_alice_lvl_2_points);
147 | pod_alice_lvl_2_points.pod.verify().unwrap();
148 |
149 | // Build a pod to prove the statement `over_9000("Alice")`
150 | let mut builder = MainPodBuilder::new(¶ms, vd_set);
151 | builder.add_pod(pod_alice_lvl_1_points);
152 | builder.add_pod(pod_alice_lvl_2_points);
153 | let st_points_total = builder.priv_op(Operation::sum_of(3512 + 5771, 3512, 5771))?;
154 | let st_gt_9000 = builder.priv_op(Operation::gt(3512 + 5771, 9000))?;
155 | let _st_over_9000 = builder.pub_op(Operation::custom(
156 | over_9000_pred,
157 | [
158 | st_points_lvl_1,
159 | st_points_lvl_2,
160 | st_points_total,
161 | st_gt_9000,
162 | ],
163 | ));
164 | let pod_alice_over_9000 = builder.prove(prover).unwrap();
165 | println!("# pod_alice_over_9000\n:{}", pod_alice_over_9000);
166 | pod_alice_over_9000.pod.verify().unwrap();
167 |
168 | Ok(())
169 | }
170 |
--------------------------------------------------------------------------------
/examples/signed_dict.rs:
--------------------------------------------------------------------------------
1 | #![allow(clippy::uninlined_format_args)] // TODO: Remove this in another PR
2 | //! Simple example of building a signed dict and verifying it
3 | //!
4 | //! Run: `cargo run --release --example signed_dict`
5 | use std::collections::HashSet;
6 |
7 | use pod2::{
8 | backends::plonky2::{primitives::ec::schnorr::SecretKey, signer::Signer},
9 | frontend::SignedDictBuilder,
10 | middleware::{containers::Set, Params, Value},
11 | };
12 |
13 | fn main() -> Result<(), Box> {
14 | let params = Params::default();
15 |
16 | // Create a schnorr key pair to sign the dict
17 | let sk = SecretKey::new_rand();
18 | let pk = sk.public_key();
19 | println!("Public key: {}\n", pk);
20 |
21 | let signer = Signer(sk);
22 |
23 | // Build the signed dict
24 | let mut builder = SignedDictBuilder::new(¶ms);
25 | // The values can be String, i64, bool, Array, Set, Dictionary, ...
26 | builder.insert("name", "Alice");
27 | builder.insert("lucky_number", 42);
28 | builder.insert("human", true);
29 | let friends_set: HashSet = ["Bob", "Charlie", "Dave"]
30 | .into_iter()
31 | .map(Value::from)
32 | .collect();
33 | builder.insert(
34 | "friends",
35 | Set::new(params.max_merkle_proofs_containers, friends_set)?,
36 | );
37 |
38 | // Sign the dict and verify it
39 | let signed_dict = builder.sign(&signer)?;
40 | signed_dict.verify()?;
41 |
42 | println!("{}", signed_dict);
43 |
44 | Ok(())
45 | }
46 |
--------------------------------------------------------------------------------
/rust-analyzer.toml:
--------------------------------------------------------------------------------
1 | imports.prefix = "crate"
2 |
--------------------------------------------------------------------------------
/rust-toolchain.toml:
--------------------------------------------------------------------------------
1 | [toolchain]
2 | channel = "nightly-2025-07-02"
3 | components = ["clippy", "rustfmt"]
4 |
--------------------------------------------------------------------------------
/rustfmt.toml:
--------------------------------------------------------------------------------
1 | imports_granularity = "Crate"
2 | reorder_imports = true # default
3 | group_imports = "StdExternalCrate"
4 |
--------------------------------------------------------------------------------
/src/backends/mod.rs:
--------------------------------------------------------------------------------
1 | #[cfg(feature = "backend_plonky2")]
2 | pub mod plonky2;
3 |
--------------------------------------------------------------------------------
/src/backends/plonky2/basetypes.rs:
--------------------------------------------------------------------------------
1 | //! This file exposes the basetypes to be used in the middleware when the `backend_plonky2` feature
2 | //! is enabled.
3 | //! See src/middleware/basetypes.rs for more details.
4 |
5 | /// F is the native field we use everywhere. Currently it's Goldilocks from plonky2
6 | pub use plonky2::field::goldilocks_field::GoldilocksField as F;
7 | use plonky2::{
8 | field::extension::quadratic::QuadraticExtension,
9 | hash::{hash_types, poseidon::PoseidonHash},
10 | plonk::{circuit_builder, circuit_data, config::GenericConfig, proof},
11 | };
12 | use schemars::JsonSchema;
13 | use serde::{Deserialize, Deserializer, Serialize};
14 |
15 | /// D defines the extension degree of the field used in the Plonky2 proofs (quadratic extension).
16 | pub const D: usize = 2;
17 |
18 | /// FE is the degree D field extension used in Plonky2 proofs.
19 | pub type FE = QuadraticExtension;
20 |
21 | /// C is the Plonky2 config used in POD2 to work with Plonky2 recursion.
22 | #[derive(Debug, Copy, Clone, Default, Eq, PartialEq, Serialize)]
23 | pub struct C;
24 | impl GenericConfig for C {
25 | type F = F;
26 | type FE = FE;
27 | type Hasher = PoseidonHash;
28 | type InnerHasher = PoseidonHash;
29 | }
30 |
31 | pub type CircuitData = circuit_data::CircuitData;
32 | pub type CommonCircuitData = circuit_data::CommonCircuitData;
33 | pub type ProverOnlyCircuitData = circuit_data::ProverOnlyCircuitData;
34 | pub type VerifierOnlyCircuitData = circuit_data::VerifierOnlyCircuitData;
35 | pub type VerifierCircuitData = circuit_data::VerifierCircuitData;
36 | pub type CircuitBuilder = circuit_builder::CircuitBuilder;
37 | pub type Proof = proof::Proof;
38 | pub type ProofWithPublicInputs = proof::ProofWithPublicInputs;
39 | pub type HashOut = hash_types::HashOut;
40 | use std::{collections::HashMap, sync::LazyLock};
41 |
42 | pub use crate::backends::plonky2::{
43 | primitives::ec::{
44 | curve::Point as PublicKey,
45 | schnorr::{SecretKey, Signature},
46 | },
47 | recursion::circuit::hash_verifier_data,
48 | };
49 | use crate::{
50 | backends::plonky2::{
51 | mainpod::cache_get_rec_main_pod_verifier_circuit_data,
52 | primitives::merkletree::MerkleClaimAndProof,
53 | },
54 | middleware::{containers::Array, Hash, Params, RawValue, Result, Value},
55 | };
56 |
57 | pub static DEFAULT_VD_LIST: LazyLock> = LazyLock::new(|| {
58 | let params = Params::default();
59 | // NOTE: We only include the recursive MainPod with default parameters here. We don't need to
60 | // include the verifying key of the EmptyPod because it's an Introduction pod and its verifying
61 | // key appears in its statement in a self-describing way.
62 | vec![cache_get_rec_main_pod_verifier_circuit_data(¶ms)
63 | .verifier_only
64 | .clone()]
65 | });
66 |
67 | pub static DEFAULT_VD_SET: LazyLock = LazyLock::new(|| {
68 | let params = Params::default();
69 | let vds = &*DEFAULT_VD_LIST;
70 | VDSet::new(params.max_depth_mt_vds, vds).unwrap()
71 | });
72 |
73 | /// VDSet is the set of the allowed verifier_data hashes. When proving a
74 | /// MainPod, the circuit will enforce that all the used verifier_datas for
75 | /// verifying the recursive proofs of previous PODs appears in the VDSet.
76 | /// The VDSet struct that allows to get the specific merkle proofs for the given
77 | /// verifier_data.
78 | #[derive(Clone, Debug, Serialize, JsonSchema)]
79 | pub struct VDSet {
80 | #[serde(skip)]
81 | #[schemars(skip)]
82 | root: Hash,
83 | // (verifier_data's hash, merkleproof)
84 | #[serde(skip)]
85 | #[schemars(skip)]
86 | proofs_map: HashMap,
87 | tree_depth: usize,
88 | vds_hashes: Vec,
89 | }
90 |
91 | impl PartialEq for VDSet {
92 | fn eq(&self, other: &Self) -> bool {
93 | self.root == other.root
94 | && self.tree_depth == other.tree_depth
95 | && self.vds_hashes == other.vds_hashes
96 | }
97 | }
98 | impl Eq for VDSet {}
99 |
100 | impl VDSet {
101 | fn new_from_vds_hashes(tree_depth: usize, mut vds_hashes: Vec) -> Result {
102 | // before using the hash values, sort them, so that each set of
103 | // verifier_datas gets the same VDSet root
104 | vds_hashes.sort();
105 |
106 | let array = Array::new(
107 | tree_depth,
108 | vds_hashes.iter().map(|vd| Value::from(*vd)).collect(),
109 | )?;
110 |
111 | let root = array.commitment();
112 | let mut proofs_map = HashMap::::new();
113 |
114 | for (i, vd) in vds_hashes.iter().enumerate() {
115 | let (value, proof) = array.prove(i)?;
116 | let p = MerkleClaimAndProof {
117 | root,
118 | key: RawValue::from(i as i64),
119 | value: value.raw(),
120 | proof,
121 | };
122 | proofs_map.insert(*vd, p);
123 | }
124 | Ok(Self {
125 | root,
126 | proofs_map,
127 | tree_depth,
128 | vds_hashes,
129 | })
130 | }
131 | /// builds the verifier_datas tree, and returns the root and the proofs
132 | pub fn new(tree_depth: usize, vds: &[VerifierOnlyCircuitData]) -> Result {
133 | // compute the verifier_data's hashes
134 | let vds_hashes: Vec = vds
135 | .iter()
136 | .map(crate::backends::plonky2::recursion::circuit::hash_verifier_data)
137 | .collect::>();
138 |
139 | let vds_hashes: Vec = vds_hashes
140 | .into_iter()
141 | .map(|h| Hash(h.elements))
142 | .collect::>();
143 |
144 | Self::new_from_vds_hashes(tree_depth, vds_hashes)
145 | }
146 | pub fn root(&self) -> Hash {
147 | self.root
148 | }
149 | /// returns the vector of merkle proofs corresponding to the given verifier_datas
150 | pub fn get_vds_proof(&self, vd: &VerifierOnlyCircuitData) -> Result {
151 | let verifier_data_hash =
152 | crate::backends::plonky2::recursion::circuit::hash_verifier_data(vd);
153 | Ok(self
154 | .proofs_map
155 | .get(&Hash(verifier_data_hash.elements))
156 | .ok_or(crate::middleware::Error::custom(
157 | "verifier_data not found in VDSet".to_string(),
158 | ))?
159 | .clone())
160 | }
161 | /// Returns true if the `verifier_data_hash` is in the set
162 | pub fn contains(&self, verifier_data_hash: HashOut) -> bool {
163 | self.proofs_map
164 | .contains_key(&Hash(verifier_data_hash.elements))
165 | }
166 | }
167 |
168 | impl<'de> Deserialize<'de> for VDSet {
169 | fn deserialize(deserializer: D) -> Result
170 | where
171 | D: Deserializer<'de>,
172 | {
173 | #[derive(Deserialize)]
174 | struct Aux {
175 | tree_depth: usize,
176 | vds_hashes: Vec,
177 | }
178 | let aux = Aux::deserialize(deserializer)?;
179 | VDSet::new_from_vds_hashes(aux.tree_depth, aux.vds_hashes).map_err(serde::de::Error::custom)
180 | }
181 | }
182 |
--------------------------------------------------------------------------------
/src/backends/plonky2/circuits/hash.rs:
--------------------------------------------------------------------------------
1 | use plonky2::{
2 | hash::{
3 | hash_types::{HashOutTarget, RichField, NUM_HASH_OUT_ELTS},
4 | hashing::PlonkyPermutation,
5 | },
6 | iop::target::Target,
7 | plonk::config::AlgebraicHasher,
8 | };
9 |
10 | use crate::{backends::plonky2::basetypes::CircuitBuilder, middleware::F};
11 |
12 | /// Precompute the hash state by absorbing all full chunks from `inputs` and return the reminder
13 | /// elements that didn't fit into a chunk.
14 | pub fn precompute_hash_state>(inputs: &[F]) -> (P, &[F]) {
15 | let (inputs, inputs_rem) = inputs.split_at((inputs.len() / P::RATE) * P::RATE);
16 | let mut perm = P::new(core::iter::repeat(F::ZERO));
17 |
18 | // Absorb all inputs up to the biggest multiple of RATE.
19 | for input_chunk in inputs.chunks(P::RATE) {
20 | perm.set_from_slice(input_chunk, 0);
21 | perm.permute();
22 | }
23 |
24 | (perm, inputs_rem)
25 | }
26 |
27 | /// Hash `inputs` starting from a circuit-constant `perm` state.
28 | pub fn hash_from_state_circuit, P: PlonkyPermutation>(
29 | builder: &mut CircuitBuilder,
30 | perm: P,
31 | inputs: &[Target],
32 | ) -> HashOutTarget {
33 | let mut state =
34 | H::AlgebraicPermutation::new(perm.as_ref().iter().map(|v| builder.constant(*v)));
35 |
36 | // Absorb all input chunks.
37 | for input_chunk in inputs.chunks(H::AlgebraicPermutation::RATE) {
38 | // Overwrite the first r elements with the inputs. This differs from a standard sponge,
39 | // where we would xor or add in the inputs. This is a well-known variant, though,
40 | // sometimes called "overwrite mode".
41 | state.set_from_slice(input_chunk, 0);
42 | state = builder.permute::(state);
43 | }
44 |
45 | let num_outputs = NUM_HASH_OUT_ELTS;
46 | // Squeeze until we have the desired number of outputs.
47 | let mut outputs = Vec::with_capacity(num_outputs);
48 | loop {
49 | for &s in state.squeeze() {
50 | outputs.push(s);
51 | if outputs.len() == num_outputs {
52 | return HashOutTarget::from_vec(outputs);
53 | }
54 | }
55 | state = builder.permute::(state);
56 | }
57 | }
58 |
--------------------------------------------------------------------------------
/src/backends/plonky2/circuits/metrics.rs:
--------------------------------------------------------------------------------
1 | use std::{
2 | collections::HashMap,
3 | sync::{LazyLock, Mutex},
4 | };
5 |
6 | use plonky2::plonk::circuit_builder::CircuitBuilder;
7 |
8 | use crate::{backends::plonky2::basetypes::D, middleware::F};
9 |
10 | pub static METRICS: LazyLock> = LazyLock::new(|| Mutex::new(Metrics::default()));
11 |
12 | #[derive(Default)]
13 | pub struct Metrics {
14 | gates: Vec<(String, usize)>,
15 | stack: Vec,
16 | }
17 |
18 | pub struct MetricsMeasure {
19 | name: String,
20 | start_num_gates: usize,
21 | ended: bool,
22 | }
23 |
24 | impl Drop for MetricsMeasure {
25 | fn drop(&mut self) {
26 | if !self.ended {
27 | panic!("Measure \"{}\" not ended", self.name);
28 | }
29 | }
30 | }
31 |
32 | impl Metrics {
33 | #[must_use]
34 | pub fn begin(
35 | &mut self,
36 | builder: &CircuitBuilder,
37 | name: impl Into,
38 | ) -> MetricsMeasure {
39 | let name = name.into();
40 | self.stack.push(name);
41 | MetricsMeasure {
42 | name: self.stack.join("/"),
43 | start_num_gates: builder.num_gates(),
44 | ended: false,
45 | }
46 | }
47 | pub fn end(&mut self, builder: &CircuitBuilder, mut measure: MetricsMeasure) {
48 | self.stack.pop();
49 | measure.ended = true;
50 | let num_gates = builder.num_gates();
51 | let delta_gates = num_gates - measure.start_num_gates;
52 | self.gates.push((measure.name.clone(), delta_gates));
53 | }
54 | pub fn print(&self) {
55 | println!("Gate count:");
56 | let mut count = HashMap::new();
57 | let mut list = Vec::new();
58 | for (name, num_gates) in &self.gates {
59 | let (n, gates) = count.entry(name).or_insert((0, 0));
60 | if *n == 0 {
61 | list.push(name);
62 | }
63 | *n += 1;
64 | *gates += num_gates;
65 | }
66 | for name in list.iter().rev() {
67 | let (n, total_gates) = count.get(name).expect("key inserted in previous loop");
68 | let avg_gates: f64 = (*total_gates as f64) / (*n as f64);
69 | println!("- {}: {} x {:.01} = {}", name, n, avg_gates, total_gates);
70 | }
71 | }
72 | pub fn reset(&mut self) {
73 | *self = Self::default()
74 | }
75 | }
76 |
77 | #[cfg(feature = "metrics")]
78 | pub mod measure_macros {
79 | #[macro_export]
80 | macro_rules! measure_gates_begin {
81 | ($builder:expr, $name:expr) => {{
82 | use $crate::backends::plonky2::circuits::metrics::METRICS;
83 | let mut metrics = METRICS.lock().unwrap();
84 | metrics.begin($builder, $name)
85 | }};
86 | }
87 |
88 | #[macro_export]
89 | macro_rules! measure_gates_end {
90 | ($builder:expr, $measure:expr) => {{
91 | use $crate::backends::plonky2::circuits::metrics::METRICS;
92 | let mut metrics = METRICS.lock().unwrap();
93 | metrics.end($builder, $measure);
94 | }};
95 | }
96 |
97 | #[macro_export]
98 | macro_rules! measure_gates_reset {
99 | () => {{
100 | use $crate::backends::plonky2::circuits::metrics::METRICS;
101 | let mut metrics = METRICS.lock().unwrap();
102 | metrics.reset();
103 | }};
104 | }
105 |
106 | #[macro_export]
107 | macro_rules! measure_gates_print {
108 | () => {{
109 | use $crate::backends::plonky2::circuits::metrics::METRICS;
110 | let metrics = METRICS.lock().unwrap();
111 | metrics.print();
112 | }};
113 | }
114 | }
115 |
116 | #[cfg(not(feature = "metrics"))]
117 | pub mod measure_macros {
118 | #[macro_export]
119 | macro_rules! measure_gates_begin {
120 | ($builder:expr, $name:expr) => {
121 | ()
122 | };
123 | }
124 |
125 | #[macro_export]
126 | macro_rules! measure_gates_end {
127 | ($builder:expr, $measure:expr) => {
128 | let _ = $measure;
129 | };
130 | }
131 |
132 | #[macro_export]
133 | macro_rules! measure_gates_reset {
134 | () => {};
135 | }
136 |
137 | #[macro_export]
138 | macro_rules! measure_gates_print {
139 | () => {{
140 | println!("Gate count disabled: \"metrics\" feature not enabled.");
141 | }};
142 | }
143 | }
144 |
--------------------------------------------------------------------------------
/src/backends/plonky2/circuits/mod.rs:
--------------------------------------------------------------------------------
1 | pub mod common;
2 | pub mod hash;
3 | pub mod mainpod;
4 | pub mod metrics;
5 | pub mod mux_table;
6 | pub mod utils;
7 |
--------------------------------------------------------------------------------
/src/backends/plonky2/circuits/mux_table.rs:
--------------------------------------------------------------------------------
1 | use std::iter;
2 |
3 | use itertools::Itertools;
4 | use plonky2::{
5 | field::{extension::Extendable, types::Field},
6 | hash::{
7 | hash_types::{HashOutTarget, RichField},
8 | poseidon::{PoseidonHash, PoseidonPermutation},
9 | },
10 | iop::{
11 | generator::{GeneratedValues, SimpleGenerator},
12 | target::{BoolTarget, Target},
13 | witness::{PartitionWitness, Witness, WitnessWrite},
14 | },
15 | plonk::circuit_data::CommonCircuitData,
16 | util::serialization::{Buffer, IoResult, Read, Write},
17 | };
18 |
19 | use crate::{
20 | backends::plonky2::{
21 | basetypes::CircuitBuilder,
22 | circuits::{
23 | common::{CircuitBuilderPod, Flattenable, IndexTarget},
24 | hash::{hash_from_state_circuit, precompute_hash_state},
25 | },
26 | },
27 | measure_gates_begin, measure_gates_end,
28 | middleware::{Params, F},
29 | };
30 |
31 | // This structure allows multiplexing multiple tables into one by using tags. The table entries
32 | // are computed by hashing the concatenation of the tag with the flattened target, with zero
33 | // padding to normalize the size of all flattened entries. We use zero-padding on then reverse the
34 | // array so that smaller entries can skip the initial hashes by using the precomputed hash state of
35 | // the prefixed zeroes.
36 | // The table offers an indexing API that returns a flattened entry that includes the "unhashing",
37 | // this allows doing a single lookup for different possible tagged entries at the same time.
38 | pub struct MuxTableTarget {
39 | params: Params,
40 | max_flattened_entry_len: usize,
41 | hashed_tagged_entries: Vec,
42 | tagged_entries: Vec>,
43 | }
44 |
45 | impl MuxTableTarget {
46 | pub fn new(params: &Params, max_flattened_entry_len: usize) -> Self {
47 | Self {
48 | params: params.clone(),
49 | max_flattened_entry_len,
50 | hashed_tagged_entries: Vec::new(),
51 | tagged_entries: Vec::new(),
52 | }
53 | }
54 |
55 | #[allow(clippy::len_without_is_empty)]
56 | pub fn len(&self) -> usize {
57 | self.hashed_tagged_entries.len()
58 | }
59 |
60 | pub fn push(&mut self, builder: &mut CircuitBuilder, tag: u32, entry: &T) {
61 | let flattened_entry = entry.flatten();
62 | self.push_flattened(builder, tag, &flattened_entry);
63 | }
64 |
65 | pub fn push_flattened(
66 | &mut self,
67 | builder: &mut CircuitBuilder,
68 | tag: u32,
69 | flattened_entry: &[Target],
70 | ) {
71 | let measure = measure_gates_begin!(builder, "HashTaggedTblEntry");
72 | assert!(flattened_entry.len() <= self.max_flattened_entry_len);
73 | let flattened = [&[builder.constant(F(tag as u64))], flattened_entry].concat();
74 | self.tagged_entries.push(flattened.clone());
75 |
76 | let tagged_entry_max_len = 1 + self.max_flattened_entry_len;
77 | let front_pad_elts = iter::repeat(F::ZERO)
78 | .take(tagged_entry_max_len - flattened.len())
79 | .collect_vec();
80 |
81 | let (perm, front_pad_elts_rem) =
82 | precompute_hash_state::>(&front_pad_elts);
83 |
84 | let rev_flattened = flattened.iter().rev().copied();
85 | // Precompute the Poseidon state for the initial padding chunks
86 | let inputs = front_pad_elts_rem
87 | .iter()
88 | .map(|v| builder.constant(*v))
89 | .chain(rev_flattened)
90 | .collect_vec();
91 | let hash =
92 | hash_from_state_circuit::>(builder, perm, &inputs);
93 |
94 | measure_gates_end!(builder, measure);
95 | self.hashed_tagged_entries.push(hash);
96 | }
97 |
98 | pub fn get(&self, builder: &mut CircuitBuilder, index: &IndexTarget) -> TableEntryTarget {
99 | let measure = measure_gates_begin!(builder, "GetTaggedTblEntry");
100 | let entry_hash = builder.vec_ref(&self.params, &self.hashed_tagged_entries, index);
101 |
102 | let mut rev_resolved_tagged_flattened =
103 | builder.add_virtual_targets(1 + self.max_flattened_entry_len);
104 | let query_hash =
105 | builder.hash_n_to_hash_no_pad::(rev_resolved_tagged_flattened.clone());
106 | builder.connect_flattenable(&entry_hash, &query_hash);
107 | rev_resolved_tagged_flattened.reverse();
108 | let resolved_tagged_flattened = rev_resolved_tagged_flattened;
109 |
110 | builder.add_simple_generator(TableGetGenerator {
111 | index: index.clone(),
112 | tagged_entries: self.tagged_entries.clone(),
113 | get_tagged_entry: resolved_tagged_flattened.clone(),
114 | });
115 | measure_gates_end!(builder, measure);
116 | TableEntryTarget {
117 | params: self.params.clone(),
118 | tagged_flattened_entry: resolved_tagged_flattened,
119 | }
120 | }
121 | }
122 |
123 | #[derive(Debug, Clone, Default)]
124 | pub struct TableGetGenerator {
125 | index: IndexTarget,
126 | tagged_entries: Vec>,
127 | get_tagged_entry: Vec,
128 | }
129 |
130 | impl, const D: usize> SimpleGenerator for TableGetGenerator {
131 | fn id(&self) -> String {
132 | "TableGetGenerator".to_string()
133 | }
134 |
135 | fn dependencies(&self) -> Vec {
136 | [self.index.low, self.index.high]
137 | .into_iter()
138 | .chain(self.tagged_entries.iter().flatten().copied())
139 | .collect()
140 | }
141 |
142 | fn run_once(
143 | &self,
144 | witness: &PartitionWitness,
145 | out_buffer: &mut GeneratedValues,
146 | ) -> anyhow::Result<()> {
147 | let index_low = witness.get_target(self.index.low);
148 | let index_high = witness.get_target(self.index.high);
149 | let index = (index_low + index_high * F::from_canonical_usize(1 << 6)).to_canonical_u64();
150 |
151 | let entry = witness.get_targets(&self.tagged_entries[index as usize]);
152 |
153 | for (target, value) in self.get_tagged_entry.iter().zip(
154 | entry
155 | .iter()
156 | .chain(iter::repeat(&F::ZERO).take(self.get_tagged_entry.len())),
157 | ) {
158 | out_buffer.set_target(*target, *value)?;
159 | }
160 |
161 | Ok(())
162 | }
163 |
164 | fn serialize(&self, dst: &mut Vec, _common_data: &CommonCircuitData) -> IoResult<()> {
165 | dst.write_usize(self.index.max_array_len)?;
166 | dst.write_target(self.index.low)?;
167 | dst.write_target(self.index.high)?;
168 |
169 | dst.write_usize(self.tagged_entries.len())?;
170 | for tagged_entry in &self.tagged_entries {
171 | dst.write_target_vec(tagged_entry)?;
172 | }
173 |
174 | dst.write_target_vec(&self.get_tagged_entry)
175 | }
176 |
177 | fn deserialize(src: &mut Buffer, _common_data: &CommonCircuitData) -> IoResult {
178 | let index = IndexTarget {
179 | max_array_len: src.read_usize()?,
180 | low: src.read_target()?,
181 | high: src.read_target()?,
182 | };
183 | let len = src.read_usize()?;
184 | let mut tagged_entries = Vec::with_capacity(len);
185 | for _ in 0..len {
186 | tagged_entries.push(src.read_target_vec()?);
187 | }
188 | let get_tagged_entry = src.read_target_vec()?;
189 |
190 | Ok(Self {
191 | index,
192 | tagged_entries,
193 | get_tagged_entry,
194 | })
195 | }
196 | }
197 |
198 | pub struct TableEntryTarget {
199 | params: Params,
200 | tagged_flattened_entry: Vec,
201 | }
202 |
203 | impl TableEntryTarget {
204 | pub fn as_type(
205 | &self,
206 | builder: &mut CircuitBuilder,
207 | tag: u32,
208 | ) -> (BoolTarget, T) {
209 | let tag_target = self.tagged_flattened_entry[0];
210 | let flattened_entry = &self.tagged_flattened_entry[1..];
211 | let entry = T::from_flattened(&self.params, &flattened_entry[..T::size(&self.params)]);
212 | let tag_expect = builder.constant(F(tag as u64));
213 | let tag_ok = builder.is_equal(tag_expect, tag_target);
214 | (tag_ok, entry)
215 | }
216 | }
217 |
--------------------------------------------------------------------------------
/src/backends/plonky2/circuits/utils.rs:
--------------------------------------------------------------------------------
1 | use plonky2::{
2 | field::extension::Extendable,
3 | hash::hash_types::RichField,
4 | iop::{
5 | generator::{GeneratedValues, SimpleGenerator},
6 | target::Target,
7 | witness::{PartitionWitness, Witness},
8 | },
9 | plonk::circuit_data::CommonCircuitData,
10 | util::serialization::{Buffer, IoError, IoResult, Read, Write},
11 | };
12 |
13 | /// Plonky2 generator that allows debugging values assigned to targets. This generator doesn't
14 | /// actually generate any value and doesn't assign any witness. Instead it can be registered to
15 | /// monitor targets and print their values once they are available.
16 | ///
17 | /// Example usage:
18 | /// ```rust,ignore
19 | /// builder.add_simple_generator(DebugGenerator::new(
20 | /// format!("values_{}", i),
21 | /// vec![v1, v2, v3],
22 | /// ));
23 | /// ```
24 | #[derive(Debug, Default, Clone)]
25 | pub struct DebugGenerator {
26 | pub(crate) name: String,
27 | pub(crate) xs: Vec,
28 | }
29 |
30 | impl DebugGenerator {
31 | pub fn new(name: String, xs: Vec) -> Self {
32 | Self { name, xs }
33 | }
34 | }
35 |
36 | impl, const D: usize> SimpleGenerator for DebugGenerator {
37 | fn id(&self) -> String {
38 | "DebugGenerator".to_string()
39 | }
40 |
41 | fn dependencies(&self) -> Vec {
42 | self.xs.clone()
43 | }
44 |
45 | fn run_once(
46 | &self,
47 | witness: &PartitionWitness,
48 | _out_buffer: &mut GeneratedValues,
49 | ) -> anyhow::Result<()> {
50 | let xs = witness.get_targets(&self.xs);
51 |
52 | println!("debug: values of {}", self.name);
53 | for (i, x) in xs.iter().enumerate() {
54 | println!("- {:03}: {}", i, x);
55 | }
56 | Ok(())
57 | }
58 |
59 | fn serialize(&self, dst: &mut Vec, _common_data: &CommonCircuitData) -> IoResult<()> {
60 | dst.write_usize(self.name.len())?;
61 | dst.write_all(self.name.as_bytes())?;
62 | dst.write_target_vec(&self.xs)
63 | }
64 |
65 | fn deserialize(src: &mut Buffer, _common_data: &CommonCircuitData) -> IoResult {
66 | let name_len = src.read_usize()?;
67 | let mut name_buf = vec![0; name_len];
68 | src.read_exact(&mut name_buf)?;
69 | let name = String::from_utf8(name_buf).map_err(|_| IoError)?;
70 | let xs = src.read_target_vec()?;
71 | Ok(Self { name, xs })
72 | }
73 | }
74 |
--------------------------------------------------------------------------------
/src/backends/plonky2/emptypod.rs:
--------------------------------------------------------------------------------
1 | use itertools::Itertools;
2 | use plonky2::{
3 | hash::hash_types::HashOutTarget,
4 | iop::witness::{PartialWitness, WitnessWrite},
5 | plonk::{
6 | circuit_data::{self, CircuitConfig},
7 | proof::ProofWithPublicInputs,
8 | },
9 | };
10 | use serde::{Deserialize, Serialize};
11 |
12 | use crate::{
13 | backends::plonky2::{
14 | basetypes::{CircuitBuilder, Proof, C, D},
15 | cache_get_standard_rec_main_pod_common_circuit_data,
16 | circuits::{
17 | common::{Flattenable, StatementTarget},
18 | mainpod::{calculate_statements_hash_circuit, PI_OFFSET_STATEMENTS_HASH},
19 | },
20 | deserialize_proof, deserialize_verifier_only,
21 | error::{Error, Result},
22 | hash_common_data,
23 | mainpod::{self, calculate_statements_hash},
24 | recursion::pad_circuit,
25 | serialization::{
26 | CircuitDataSerializer, VerifierCircuitDataSerializer, VerifierOnlyCircuitDataSerializer,
27 | },
28 | serialize_proof, serialize_verifier_only,
29 | },
30 | cache::{self, CacheEntry},
31 | middleware::{
32 | self, Hash, IntroPredicateRef, Params, Pod, PodType, Statement, ToFields, VDSet,
33 | VerifierOnlyCircuitData, EMPTY_HASH, F, HASH_SIZE,
34 | },
35 | timed,
36 | };
37 |
38 | fn empty_statement() -> Statement {
39 | Statement::Intro(
40 | IntroPredicateRef {
41 | name: "empty".to_string(),
42 | args_len: 0,
43 | verifier_data_hash: EMPTY_HASH,
44 | },
45 | vec![],
46 | )
47 | }
48 |
49 | #[derive(Clone, Serialize, Deserialize)]
50 | pub struct EmptyPodVerifyTarget {
51 | vds_root: HashOutTarget,
52 | }
53 |
54 | impl EmptyPodVerifyTarget {
55 | pub fn new_virtual(builder: &mut CircuitBuilder) -> Self {
56 | Self {
57 | vds_root: builder.add_virtual_hash(),
58 | }
59 | }
60 | pub fn set_targets(&self, pw: &mut PartialWitness, vds_root: Hash) -> Result<()> {
61 | Ok(pw.set_target_arr(&self.vds_root.elements, &vds_root.0)?)
62 | }
63 | }
64 |
65 | fn verify_empty_pod_circuit(
66 | params: &Params,
67 | builder: &mut CircuitBuilder,
68 | empty_pod: &EmptyPodVerifyTarget,
69 | ) {
70 | let empty_statement = StatementTarget::from_flattened(
71 | params,
72 | &builder.constants(&empty_statement().to_fields(params)),
73 | );
74 | let sts_hash = calculate_statements_hash_circuit(params, builder, &[empty_statement]);
75 | builder.register_public_inputs(&sts_hash.elements);
76 | builder.register_public_inputs(&empty_pod.vds_root.elements);
77 | }
78 |
79 | #[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)]
80 | pub struct EmptyPod {
81 | params: Params,
82 | sts_hash: Hash,
83 | verifier_only: VerifierOnlyCircuitDataSerializer,
84 | common_hash: String,
85 | vd_set: VDSet,
86 | proof: Proof,
87 | }
88 |
89 | type CircuitData = circuit_data::CircuitData;
90 |
91 | pub fn cache_get_standard_empty_pod_circuit_data(
92 | ) -> CacheEntry<(EmptyPodVerifyTarget, CircuitDataSerializer)> {
93 | cache::get("standard_empty_pod_circuit_data", &(), |_| {
94 | let (target, circuit_data) = build().expect("successful build");
95 | (target, CircuitDataSerializer(circuit_data))
96 | })
97 | .expect("cache ok")
98 | }
99 |
100 | pub fn cache_get_standard_empty_pod_verifier_circuit_data(
101 | ) -> CacheEntry {
102 | cache::get("standard_empty_pod_verifier_circuit_data", &(), |_| {
103 | let (_, standard_empty_pod_circuit_data) = &*cache_get_standard_empty_pod_circuit_data();
104 | VerifierCircuitDataSerializer(standard_empty_pod_circuit_data.verifier_data().clone())
105 | })
106 | .expect("cache ok")
107 | }
108 |
109 | fn build() -> Result<(EmptyPodVerifyTarget, CircuitData)> {
110 | let params = Params::default();
111 |
112 | #[cfg(not(feature = "zk"))]
113 | let config = CircuitConfig::standard_recursion_config();
114 | #[cfg(feature = "zk")]
115 | let config = CircuitConfig::standard_recursion_zk_config();
116 |
117 | let mut builder = CircuitBuilder::new(config);
118 | let empty_pod = EmptyPodVerifyTarget::new_virtual(&mut builder);
119 | verify_empty_pod_circuit(¶ms, &mut builder, &empty_pod);
120 | let common_circuit_data = &*cache_get_standard_rec_main_pod_common_circuit_data();
121 | pad_circuit(&mut builder, common_circuit_data);
122 |
123 | let data = timed!("EmptyPod build", builder.build::());
124 | assert_eq!(common_circuit_data.0, data.common);
125 | Ok((empty_pod, data))
126 | }
127 |
128 | impl EmptyPod {
129 | fn new(params: &Params, vd_set: VDSet) -> Result {
130 | let (empty_pod_verify_target, data) = &*cache_get_standard_empty_pod_circuit_data();
131 |
132 | let mut pw = PartialWitness::::new();
133 | empty_pod_verify_target.set_targets(&mut pw, vd_set.root())?;
134 | let proof = timed!("EmptyPod prove", data.prove(pw)?);
135 | let sts_hash = {
136 | let v = &proof.public_inputs
137 | [PI_OFFSET_STATEMENTS_HASH..PI_OFFSET_STATEMENTS_HASH + HASH_SIZE];
138 | Hash([v[0], v[1], v[2], v[3]])
139 | };
140 | let common_hash = hash_common_data(&data.common).expect("hash ok");
141 | Ok(EmptyPod {
142 | params: params.clone(),
143 | verifier_only: VerifierOnlyCircuitDataSerializer(data.verifier_only.clone()),
144 | common_hash,
145 | sts_hash,
146 | vd_set,
147 | proof: proof.proof,
148 | })
149 | }
150 | pub fn new_boxed(params: &Params, vd_set: VDSet) -> Box {
151 | let default_params = Params::default();
152 | assert_eq!(default_params.id_params(), params.id_params());
153 |
154 | let empty_pod = cache::get(
155 | "empty_pod",
156 | &(default_params, vd_set),
157 | |(params, vd_set)| Self::new(params, vd_set.clone()).expect("prove EmptyPod"),
158 | )
159 | .expect("cache ok");
160 | Box::new(empty_pod.clone())
161 | }
162 | }
163 |
164 | #[derive(Serialize, Deserialize)]
165 | struct Data {
166 | proof: String,
167 | verifier_only: String,
168 | common_hash: String,
169 | }
170 |
171 | impl Pod for EmptyPod {
172 | fn params(&self) -> &Params {
173 | &self.params
174 | }
175 | fn verify(&self) -> Result<()> {
176 | let statements = self
177 | .pub_self_statements()
178 | .into_iter()
179 | .map(mainpod::Statement::from)
180 | .collect_vec();
181 | let sts_hash = calculate_statements_hash(&statements, &self.params);
182 | if sts_hash != self.sts_hash {
183 | return Err(Error::statements_hash_not_equal(self.sts_hash, sts_hash));
184 | }
185 |
186 | let public_inputs = sts_hash
187 | .to_fields(&self.params)
188 | .iter()
189 | .chain(self.vd_set.root().0.iter())
190 | .cloned()
191 | .collect_vec();
192 |
193 | let standard_empty_pod_verifier_data = cache_get_standard_empty_pod_verifier_circuit_data();
194 | standard_empty_pod_verifier_data
195 | .verify(ProofWithPublicInputs {
196 | proof: self.proof.clone(),
197 | public_inputs,
198 | })
199 | .map_err(|e| Error::plonky2_proof_fail("EmptyPod", e))
200 | }
201 |
202 | fn statements_hash(&self) -> Hash {
203 | self.sts_hash
204 | }
205 | fn pod_type(&self) -> (usize, &'static str) {
206 | (PodType::Empty as usize, "Empty")
207 | }
208 |
209 | fn pub_self_statements(&self) -> Vec {
210 | vec![empty_statement()]
211 | }
212 |
213 | fn verifier_data(&self) -> VerifierOnlyCircuitData {
214 | self.verifier_only.0.clone()
215 | }
216 | fn common_hash(&self) -> String {
217 | self.common_hash.clone()
218 | }
219 | fn proof(&self) -> Proof {
220 | self.proof.clone()
221 | }
222 | fn vd_set(&self) -> &VDSet {
223 | &self.vd_set
224 | }
225 |
226 | fn serialize_data(&self) -> serde_json::Value {
227 | serde_json::to_value(Data {
228 | proof: serialize_proof(&self.proof),
229 | verifier_only: serialize_verifier_only(&self.verifier_only),
230 | common_hash: self.common_hash.clone(),
231 | })
232 | .expect("serialization to json")
233 | }
234 | fn deserialize_data(
235 | params: Params,
236 | data: serde_json::Value,
237 | vd_set: VDSet,
238 | sts_hash: Hash,
239 | ) -> Result {
240 | let data: Data = serde_json::from_value(data)?;
241 | let common_circuit_data = cache_get_standard_rec_main_pod_common_circuit_data();
242 | let proof = deserialize_proof(&common_circuit_data, &data.proof)?;
243 | let verifier_only = deserialize_verifier_only(&data.verifier_only)?;
244 | Ok(Self {
245 | params,
246 | sts_hash,
247 | verifier_only: VerifierOnlyCircuitDataSerializer(verifier_only),
248 | common_hash: data.common_hash,
249 | vd_set,
250 | proof,
251 | })
252 | }
253 | }
254 |
255 | #[cfg(test)]
256 | pub mod tests {
257 | use super::*;
258 |
259 | #[test]
260 | fn test_empty_pod() {
261 | let params = Params::default();
262 |
263 | let empty_pod = EmptyPod::new_boxed(¶ms, VDSet::new(8, &[]).unwrap());
264 | empty_pod.verify().unwrap();
265 | }
266 | }
267 |
--------------------------------------------------------------------------------
/src/backends/plonky2/error.rs:
--------------------------------------------------------------------------------
1 | use std::{backtrace::Backtrace, fmt::Debug};
2 |
3 | use crate::middleware::Hash;
4 |
5 | pub type Result = core::result::Result;
6 |
7 | #[derive(thiserror::Error, Debug)]
8 | pub enum InnerError {
9 | #[error("Statements hash does not match, expected {0}, found {1}")]
10 | StsHashNotEqual(Hash, Hash),
11 |
12 | // POD related
13 | #[error("verification failed: POD does not have type statement")]
14 | NotTypeStatement,
15 | #[error("repeated ValueOf")]
16 | RepeatedValueOf,
17 | #[error("Statement did not check")]
18 | StatementNotCheck,
19 | #[error("Key not found")]
20 | KeyNotFound,
21 |
22 | // Other
23 | #[error("{0}")]
24 | Custom(String),
25 | }
26 |
27 | #[derive(thiserror::Error)]
28 | pub enum Error {
29 | #[error("Inner: {inner}\n{backtrace}")]
30 | Inner {
31 | inner: Box,
32 | backtrace: Box,
33 | },
34 | #[error("anyhow::Error: {0}")]
35 | Anyhow(#[from] anyhow::Error),
36 | #[error("Plonky2 proof failed to verify {0}: {1}")]
37 | Plonky2ProofFail(String, anyhow::Error),
38 | #[error("base64::DecodeError: {0}")]
39 | Base64Decode(#[from] base64::DecodeError),
40 | #[error("serde_json::Error: {0}")]
41 | SerdeJson(#[from] serde_json::Error),
42 | #[error(transparent)]
43 | Tree(#[from] crate::backends::plonky2::primitives::merkletree::error::TreeError),
44 | #[error(transparent)]
45 | Middleware(#[from] crate::middleware::Error),
46 | }
47 |
48 | impl Debug for Error {
49 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
50 | std::fmt::Display::fmt(self, f)
51 | }
52 | }
53 |
54 | macro_rules! new {
55 | ($inner:expr) => {
56 | Error::Inner {
57 | inner: Box::new($inner),
58 | backtrace: Box::new(Backtrace::capture()),
59 | }
60 | };
61 | }
62 | use InnerError::*;
63 | impl Error {
64 | pub fn custom(s: String) -> Self {
65 | new!(Custom(s))
66 | }
67 | pub fn plonky2_proof_fail(context: impl Into, e: anyhow::Error) -> Self {
68 | Self::Plonky2ProofFail(context.into(), e)
69 | }
70 | pub fn key_not_found() -> Self {
71 | new!(KeyNotFound)
72 | }
73 | pub fn statement_not_check() -> Self {
74 | new!(StatementNotCheck)
75 | }
76 | pub fn repeated_value_of() -> Self {
77 | new!(RepeatedValueOf)
78 | }
79 | pub fn not_type_statement() -> Self {
80 | new!(NotTypeStatement)
81 | }
82 | pub fn statements_hash_not_equal(expected: Hash, found: Hash) -> Self {
83 | new!(StsHashNotEqual(expected, found))
84 | }
85 | }
86 |
--------------------------------------------------------------------------------
/src/backends/plonky2/mainpod/operation.rs:
--------------------------------------------------------------------------------
1 | use std::fmt;
2 |
3 | use serde::{Deserialize, Serialize};
4 |
5 | use crate::{
6 | backends::plonky2::{
7 | error::{Error, Result},
8 | mainpod::{SignedBy, Statement},
9 | primitives::merkletree::{MerkleClaimAndProof, MerkleTreeStateTransitionProof},
10 | },
11 | middleware::{self, OperationType, Params},
12 | };
13 |
14 | #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
15 | pub enum OperationArg {
16 | None,
17 | Index(usize),
18 | }
19 |
20 | impl OperationArg {
21 | pub fn is_none(&self) -> bool {
22 | matches!(self, OperationArg::None)
23 | }
24 |
25 | pub fn as_usize(&self) -> usize {
26 | match self {
27 | Self::None => 0,
28 | Self::Index(i) => *i,
29 | }
30 | }
31 | }
32 |
33 | #[derive(Clone, Copy, Debug, PartialEq, Serialize, Deserialize)]
34 | pub enum OperationAux {
35 | None,
36 | MerkleProofIndex(usize),
37 | PublicKeyOfIndex(usize),
38 | SignedByIndex(usize),
39 | MerkleTreeStateTransitionProofIndex(usize),
40 | CustomPredVerifyIndex(usize),
41 | }
42 |
43 | impl OperationAux {
44 | fn table_offset_merkle_proof(_params: &Params) -> usize {
45 | // At index 0 we store a zero entry
46 | 1
47 | }
48 | fn table_offset_public_key_of(params: &Params) -> usize {
49 | Self::table_offset_merkle_proof(params) + params.max_merkle_proofs_containers
50 | }
51 | fn table_offset_signed_by(params: &Params) -> usize {
52 | Self::table_offset_public_key_of(params) + params.max_public_key_of
53 | }
54 | fn table_offset_merkle_tree_state_transition_proof(params: &Params) -> usize {
55 | Self::table_offset_signed_by(params) + params.max_signed_by
56 | }
57 | fn table_offset_custom_pred_verify(params: &Params) -> usize {
58 | Self::table_offset_merkle_tree_state_transition_proof(params)
59 | + params.max_merkle_tree_state_transition_proofs_containers
60 | }
61 | pub(crate) fn table_size(params: &Params) -> usize {
62 | 1 + params.max_merkle_proofs_containers
63 | + params.max_public_key_of
64 | + params.max_signed_by
65 | + params.max_merkle_tree_state_transition_proofs_containers
66 | + params.max_custom_predicate_verifications
67 | }
68 | pub fn table_index(&self, params: &Params) -> usize {
69 | match self {
70 | Self::None => 0,
71 | Self::MerkleProofIndex(i) => Self::table_offset_merkle_proof(params) + *i,
72 | Self::PublicKeyOfIndex(i) => Self::table_offset_public_key_of(params) + *i,
73 | Self::SignedByIndex(i) => Self::table_offset_signed_by(params) + *i,
74 | Self::MerkleTreeStateTransitionProofIndex(i) => {
75 | Self::table_offset_merkle_tree_state_transition_proof(params) + *i
76 | }
77 | Self::CustomPredVerifyIndex(i) => Self::table_offset_custom_pred_verify(params) + *i,
78 | }
79 | }
80 | }
81 |
82 | #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
83 | pub struct Operation(pub OperationType, pub Vec, pub OperationAux);
84 |
85 | impl Operation {
86 | pub fn op_type(&self) -> OperationType {
87 | self.0.clone()
88 | }
89 | pub fn args(&self) -> &[OperationArg] {
90 | &self.1
91 | }
92 | pub fn aux(&self) -> &OperationAux {
93 | &self.2
94 | }
95 | pub fn deref(
96 | &self,
97 | statements: &[Statement],
98 | signatures: &[SignedBy],
99 | merkle_proofs: &[MerkleClaimAndProof],
100 | merkle_tree_state_transition_proofs: &[MerkleTreeStateTransitionProof],
101 | ) -> Result {
102 | let deref_args = self
103 | .1
104 | .iter()
105 | .flat_map(|arg| match arg {
106 | OperationArg::None => None,
107 | OperationArg::Index(i) => {
108 | let st: Result =
109 | statements[*i].clone().try_into();
110 | Some(st)
111 | }
112 | })
113 | .collect::>>()?;
114 | let deref_aux = match self.2 {
115 | OperationAux::None => crate::middleware::OperationAux::None,
116 | OperationAux::CustomPredVerifyIndex(_) => crate::middleware::OperationAux::None,
117 | OperationAux::MerkleProofIndex(i) => crate::middleware::OperationAux::MerkleProof(
118 | merkle_proofs
119 | .get(i)
120 | .ok_or(Error::custom(format!("Missing Merkle proof index {}", i)))?
121 | .proof
122 | .clone(),
123 | ),
124 | OperationAux::MerkleTreeStateTransitionProofIndex(i) => {
125 | crate::middleware::OperationAux::MerkleTreeStateTransitionProof(
126 | merkle_tree_state_transition_proofs
127 | .get(i)
128 | .ok_or(Error::custom(format!(
129 | "Missing Merkle state transition proof index {}",
130 | i
131 | )))?
132 | .clone(),
133 | )
134 | }
135 | OperationAux::SignedByIndex(i) => crate::middleware::OperationAux::Signature(
136 | signatures
137 | .get(i)
138 | .ok_or(Error::custom(format!("Missing SignedBy data index {}", i)))?
139 | .sig
140 | .clone(),
141 | ),
142 | OperationAux::PublicKeyOfIndex(_) => crate::middleware::OperationAux::None,
143 | };
144 | Ok(middleware::Operation::op(
145 | self.0.clone(),
146 | &deref_args,
147 | &deref_aux,
148 | )?)
149 | }
150 | }
151 |
152 | impl fmt::Display for Operation {
153 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
154 | write!(f, "{:?} ", self.0)?;
155 | for (i, arg) in self.1.iter().enumerate() {
156 | if f.alternate() || !arg.is_none() {
157 | if i != 0 {
158 | write!(f, " ")?;
159 | }
160 | match arg {
161 | OperationArg::None => write!(f, "none")?,
162 | OperationArg::Index(i) => write!(f, "{:02}", i)?,
163 | }
164 | }
165 | }
166 | match self.2 {
167 | OperationAux::None => (),
168 | OperationAux::MerkleProofIndex(i) => write!(f, " merkle_proof_{:02}", i)?,
169 | OperationAux::CustomPredVerifyIndex(i) => write!(f, " custom_pred_verify_{:02}", i)?,
170 | OperationAux::PublicKeyOfIndex(i) => write!(f, " public_key_of_{:02}", i)?,
171 | OperationAux::SignedByIndex(i) => write!(f, " signed_by_{:02}", i)?,
172 | OperationAux::MerkleTreeStateTransitionProofIndex(i) => {
173 | write!(f, " merkle_tree_state_transition_proof_{:02}", i)?
174 | }
175 | }
176 | Ok(())
177 | }
178 | }
179 |
--------------------------------------------------------------------------------
/src/backends/plonky2/mainpod/statement.rs:
--------------------------------------------------------------------------------
1 | use std::{fmt, iter};
2 |
3 | use serde::{Deserialize, Serialize};
4 |
5 | use crate::{
6 | backends::plonky2::error::{Error, Result},
7 | middleware::{self, NativePredicate, Params, Predicate, StatementArg, ToFields, Value},
8 | };
9 |
10 | #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
11 | pub struct Statement(pub Predicate, pub Vec);
12 |
13 | impl Eq for Statement {}
14 |
15 | impl Statement {
16 | pub fn is_none(&self) -> bool {
17 | self.0 == Predicate::Native(NativePredicate::None)
18 | }
19 | pub fn predicate(&self) -> Predicate {
20 | self.0.clone()
21 | }
22 | /// Argument method. Trailing Nones are filtered out.
23 | pub fn args(&self) -> Vec {
24 | let maybe_last_arg_index = (0..self.1.len()).rev().find(|i| !self.1[*i].is_none());
25 | match maybe_last_arg_index {
26 | None => vec![],
27 | Some(i) => self.1[0..i + 1].to_vec(),
28 | }
29 | }
30 | }
31 |
32 | impl ToFields for Statement {
33 | fn to_fields(&self, params: &Params) -> Vec {
34 | let mut fields = self.0.to_fields(params);
35 | fields.extend(
36 | self.1
37 | .iter()
38 | .chain(iter::repeat(&StatementArg::None))
39 | .take(params.max_statement_args)
40 | .flat_map(|arg| arg.to_fields(params)),
41 | );
42 | fields
43 | }
44 | }
45 |
46 | impl TryFrom for middleware::Statement {
47 | type Error = Error;
48 | fn try_from(s: Statement) -> Result {
49 | type S = middleware::Statement;
50 | type NP = NativePredicate;
51 | type SA = StatementArg;
52 | let proper_args = s.args();
53 | Ok(match s.0 {
54 | Predicate::Native(np) => match (np, &proper_args.as_slice()) {
55 | (NP::None, &[]) => S::None,
56 | (NP::Equal, &[a1, a2]) => S::Equal(a1.try_into()?, a2.try_into()?),
57 | (NP::NotEqual, &[a1, a2]) => S::NotEqual(a1.try_into()?, a2.try_into()?),
58 | (NP::LtEq, &[a1, a2]) => S::LtEq(a1.try_into()?, a2.try_into()?),
59 | (NP::Lt, &[a1, a2]) => S::Lt(a1.try_into()?, a2.try_into()?),
60 | (NP::Contains, &[a1, a2, a3]) => {
61 | S::Contains(a1.try_into()?, a2.try_into()?, a3.try_into()?)
62 | }
63 | (NP::NotContains, &[a1, a2]) => S::NotContains(a1.try_into()?, a2.try_into()?),
64 | (NP::SumOf, &[a1, a2, a3]) => {
65 | S::SumOf(a1.try_into()?, a2.try_into()?, a3.try_into()?)
66 | }
67 | (NP::ProductOf, &[a1, a2, a3]) => {
68 | S::ProductOf(a1.try_into()?, a2.try_into()?, a3.try_into()?)
69 | }
70 | (NP::MaxOf, &[a1, a2, a3]) => {
71 | S::MaxOf(a1.try_into()?, a2.try_into()?, a3.try_into()?)
72 | }
73 | (NP::HashOf, &[a1, a2, a3]) => {
74 | S::HashOf(a1.try_into()?, a2.try_into()?, a3.try_into()?)
75 | }
76 | (NP::PublicKeyOf, &[a1, a2]) => S::PublicKeyOf(a1.try_into()?, a2.try_into()?),
77 | (NP::SignedBy, &[a1, a2]) => S::SignedBy(a1.try_into()?, a2.try_into()?),
78 | (NP::ContainerInsert, &[a1, a2, a3, a4]) => S::ContainerInsert(
79 | a1.try_into()?,
80 | a2.try_into()?,
81 | a3.try_into()?,
82 | a4.try_into()?,
83 | ),
84 | (NP::ContainerUpdate, &[a1, a2, a3, a4]) => S::ContainerUpdate(
85 | a1.try_into()?,
86 | a2.try_into()?,
87 | a3.try_into()?,
88 | a4.try_into()?,
89 | ),
90 | (NP::ContainerDelete, &[a1, a2, a3]) => {
91 | S::ContainerDelete(a1.try_into()?, a2.try_into()?, a3.try_into()?)
92 | }
93 | _ => Err(Error::custom(format!(
94 | "Ill-formed statement expression {:?}",
95 | s
96 | )))?,
97 | },
98 | Predicate::Custom(cpr) => {
99 | let vs: Vec = proper_args
100 | .into_iter()
101 | .filter_map(|arg| match arg {
102 | SA::None => None,
103 | SA::Literal(v) => Some(v),
104 | _ => unreachable!(),
105 | })
106 | .collect();
107 | S::Custom(cpr, vs)
108 | }
109 | Predicate::Intro(ir) => {
110 | let vs: Vec = proper_args
111 | .into_iter()
112 | .filter_map(|arg| match arg {
113 | SA::None => None,
114 | SA::Literal(v) => Some(v),
115 | _ => unreachable!(),
116 | })
117 | .collect();
118 | S::Intro(ir, vs)
119 | }
120 | Predicate::BatchSelf(_) => {
121 | unreachable!()
122 | }
123 | })
124 | }
125 | }
126 |
127 | impl From for Statement {
128 | fn from(s: middleware::Statement) -> Self {
129 | match s.predicate() {
130 | middleware::Predicate::Native(c) => Statement(
131 | middleware::Predicate::Native(c),
132 | s.args().into_iter().collect(),
133 | ),
134 | middleware::Predicate::Custom(cpr) => Statement(
135 | middleware::Predicate::Custom(cpr),
136 | s.args().into_iter().collect(),
137 | ),
138 | middleware::Predicate::Intro(ir) => Statement(
139 | middleware::Predicate::Intro(ir),
140 | s.args().into_iter().collect(),
141 | ),
142 | middleware::Predicate::BatchSelf(_) => unreachable!(),
143 | }
144 | }
145 | }
146 |
147 | impl fmt::Display for Statement {
148 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
149 | write!(f, "{:?} ", self.0)?;
150 | for (i, arg) in self.1.iter().enumerate() {
151 | if f.alternate() || !arg.is_none() {
152 | if i != 0 {
153 | write!(f, " ")?;
154 | }
155 | arg.fmt(f)?;
156 | }
157 | }
158 | Ok(())
159 | }
160 | }
161 |
--------------------------------------------------------------------------------
/src/backends/plonky2/mock/emptypod.rs:
--------------------------------------------------------------------------------
1 | use itertools::Itertools;
2 |
3 | use crate::{
4 | backends::plonky2::{
5 | basetypes::{Proof, VerifierOnlyCircuitData},
6 | error::{Error, Result},
7 | mainpod::{self, calculate_statements_hash},
8 | },
9 | middleware::{Hash, IntroPredicateRef, Params, Pod, PodType, Statement, VDSet, EMPTY_HASH},
10 | };
11 |
12 | #[derive(Clone, Debug, PartialEq, Eq)]
13 | pub struct MockEmptyPod {
14 | params: Params,
15 | sts_hash: Hash,
16 | vd_set: VDSet,
17 | }
18 |
19 | fn empty_statement() -> Statement {
20 | Statement::Intro(
21 | IntroPredicateRef {
22 | name: "mock_empty".to_string(),
23 | args_len: 0,
24 | verifier_data_hash: EMPTY_HASH,
25 | },
26 | vec![],
27 | )
28 | }
29 |
30 | impl MockEmptyPod {
31 | pub fn new_boxed(params: &Params, vd_set: VDSet) -> Box {
32 | let statements = [mainpod::Statement::from(empty_statement())];
33 | let sts_hash = calculate_statements_hash(&statements, params);
34 | Box::new(Self {
35 | params: params.clone(),
36 | sts_hash,
37 | vd_set,
38 | })
39 | }
40 | }
41 |
42 | impl Pod for MockEmptyPod {
43 | fn params(&self) -> &Params {
44 | &self.params
45 | }
46 | fn verify(&self) -> Result<()> {
47 | let statements = self
48 | .pub_self_statements()
49 | .into_iter()
50 | .map(mainpod::Statement::from)
51 | .collect_vec();
52 | let sts_hash = calculate_statements_hash(&statements, &self.params);
53 | if sts_hash != self.sts_hash {
54 | return Err(Error::statements_hash_not_equal(self.sts_hash, sts_hash));
55 | }
56 | Ok(())
57 | }
58 | fn statements_hash(&self) -> Hash {
59 | self.sts_hash
60 | }
61 | fn pod_type(&self) -> (usize, &'static str) {
62 | (PodType::MockEmpty as usize, "MockEmpty")
63 | }
64 | fn pub_self_statements(&self) -> Vec {
65 | vec![empty_statement()]
66 | }
67 |
68 | fn verifier_data_hash(&self) -> Hash {
69 | EMPTY_HASH
70 | }
71 | fn verifier_data(&self) -> VerifierOnlyCircuitData {
72 | panic!("MockEmptyPod can't be verified in a recursive MainPod circuit");
73 | }
74 | fn common_hash(&self) -> String {
75 | panic!("MockEmptyPod can't be verified in a recursive MainPod circuit");
76 | }
77 | fn proof(&self) -> Proof {
78 | panic!("MockEmptyPod can't be verified in a recursive MainPod circuit");
79 | }
80 | fn vd_set(&self) -> &VDSet {
81 | &self.vd_set
82 | }
83 | fn serialize_data(&self) -> serde_json::Value {
84 | serde_json::Value::Null
85 | }
86 | fn deserialize_data(
87 | params: Params,
88 | _data: serde_json::Value,
89 | vd_set: VDSet,
90 | sts_hash: Hash,
91 | ) -> Result {
92 | Ok(Self {
93 | params,
94 | sts_hash,
95 | vd_set,
96 | })
97 | }
98 | }
99 |
100 | #[cfg(test)]
101 | pub mod tests {
102 | use super::*;
103 |
104 | #[test]
105 | fn test_mock_empty_pod() {
106 | let params = Params::default();
107 |
108 | let empty_pod = MockEmptyPod::new_boxed(¶ms, VDSet::new(8, &[]).unwrap());
109 | empty_pod.verify().unwrap();
110 | }
111 | }
112 |
--------------------------------------------------------------------------------
/src/backends/plonky2/mock/mod.rs:
--------------------------------------------------------------------------------
1 | pub mod emptypod;
2 | pub mod mainpod;
3 |
--------------------------------------------------------------------------------
/src/backends/plonky2/mod.rs:
--------------------------------------------------------------------------------
1 | pub mod basetypes;
2 | pub mod circuits;
3 | pub mod emptypod;
4 | mod error;
5 | pub mod mainpod;
6 | pub mod mock;
7 | pub mod primitives;
8 | pub mod recursion;
9 | pub mod serialization;
10 | pub mod signer;
11 |
12 | use std::iter;
13 |
14 | use base64::{prelude::BASE64_STANDARD, Engine};
15 | pub use error::*;
16 | use plonky2::{
17 | field::{
18 | extension::quadratic::QuadraticExtension,
19 | types::{Field, Field64},
20 | },
21 | hash::hash_types::HashOut,
22 | plonk::vars::EvaluationVars,
23 | util::serialization::{Buffer, Read},
24 | };
25 | use rand::prelude::*;
26 | use rand_chacha::ChaCha20Rng;
27 | use serde::{ser, Deserialize, Serialize};
28 | use sha2::{Digest, Sha256};
29 |
30 | use crate::{
31 | backends::plonky2::{
32 | basetypes::{CommonCircuitData, Proof, VerifierOnlyCircuitData, F},
33 | circuits::mainpod::{MainPodVerifyTarget, NUM_PUBLIC_INPUTS},
34 | recursion::RecursiveCircuit,
35 | serialization::{CommonCircuitDataSerializer, Pod2GateSerializer},
36 | },
37 | cache::{self, CacheEntry},
38 | middleware::Params,
39 | timed,
40 | };
41 |
42 | pub fn cache_get_standard_rec_main_pod_common_circuit_data(
43 | ) -> CacheEntry {
44 | let params = Params::default();
45 | cache::get(
46 | "standard_rec_main_pod_common_circuit_data",
47 | ¶ms,
48 | |params| {
49 | let circuit_data = timed!(
50 | "recursive MainPod circuit_data",
51 | RecursiveCircuit::::target_and_circuit_data(
52 | params.max_input_pods,
53 | NUM_PUBLIC_INPUTS,
54 | params
55 | )
56 | .expect("calculate circuit_data")
57 | );
58 | CommonCircuitDataSerializer(circuit_data.1.common)
59 | },
60 | )
61 | .expect("cache ok")
62 | }
63 |
64 | pub fn serialize_bytes(bytes: &[u8]) -> String {
65 | BASE64_STANDARD.encode(bytes)
66 | }
67 |
68 | pub fn deserialize_bytes(data: &str) -> Result> {
69 | BASE64_STANDARD.decode(data).map_err(|e| {
70 | Error::custom(format!(
71 | "Failed to decode data from base64: {}. Value: {}",
72 | e, data
73 | ))
74 | })
75 | }
76 |
77 | pub fn deserialize_proof(common: &CommonCircuitData, proof: &str) -> Result {
78 | let decoded = deserialize_bytes(proof)?;
79 | let mut buf = Buffer::new(&decoded);
80 | let proof = buf.read_proof(common).map_err(|e| {
81 | Error::custom(format!(
82 | "Failed to read proof from buffer: {}. Value: {}",
83 | e, proof
84 | ))
85 | })?;
86 |
87 | Ok(proof)
88 | }
89 |
90 | pub fn serialize_verifier_only(verifier_only: &VerifierOnlyCircuitData) -> String {
91 | let bytes = verifier_only.to_bytes().expect("write to Vec");
92 | serialize_bytes(&bytes)
93 | }
94 |
95 | pub fn deserialize_verifier_only(verifier_only: &str) -> Result {
96 | let decoded = deserialize_bytes(verifier_only)?;
97 | let verifier_only = VerifierOnlyCircuitData::from_bytes(&decoded).map_err(|e| {
98 | Error::custom(format!(
99 | "Failed to read VerifierOnlyCircuitData from buffer: {}. Value: {}",
100 | e, verifier_only
101 | ))
102 | })?;
103 |
104 | Ok(verifier_only)
105 | }
106 |
107 | pub fn serialize_proof(proof: &Proof) -> String {
108 | let mut buffer = Vec::new();
109 | use plonky2::util::serialization::Write;
110 | buffer.write_proof(proof).unwrap();
111 | serialize_bytes(&buffer)
112 | }
113 |
114 | fn rand_vec(rng: &mut impl RngCore, len: usize) -> Vec {
115 | iter::repeat_with(|| rng.next_u64())
116 | .filter(|v| *v < F::ORDER)
117 | .map(F::from_canonical_u64)
118 | .take(len)
119 | .collect()
120 | }
121 |
122 | fn base(r: F, xs: &[F]) -> F {
123 | let mut res = F::ZERO;
124 | for x in xs.iter().rev() {
125 | res *= r;
126 | res += *x;
127 | }
128 | res
129 | }
130 |
131 | fn gate_fingerprints(common: &CommonCircuitData) -> Vec<(String, F)> {
132 | type Ext = QuadraticExtension;
133 | let config = &common.config;
134 | let mut rng = ChaCha20Rng::seed_from_u64(42);
135 | let r = rand_vec(&mut rng, 1)[0];
136 | let local_constants: Vec = rand_vec(&mut rng, config.num_constants)
137 | .into_iter()
138 | .map(Ext::from)
139 | .collect();
140 | let local_wires: Vec = rand_vec(&mut rng, config.num_wires)
141 | .into_iter()
142 | .map(Ext::from)
143 | .collect();
144 | let public_inputs_hash = HashOut::from_vec(rand_vec(&mut rng, 4));
145 | let vars = EvaluationVars {
146 | local_constants: &local_constants,
147 | local_wires: &local_wires,
148 | public_inputs_hash: &public_inputs_hash,
149 | };
150 | let mut fingerprints = Vec::new();
151 | for gate in &common.gates {
152 | let eval: Vec = gate
153 | .0
154 | .eval_unfiltered(vars)
155 | .into_iter()
156 | .map(|e| e.0[0])
157 | .collect();
158 | fingerprints.push((gate.0.id(), base(r, &eval)));
159 | }
160 | fingerprints
161 | }
162 |
163 | pub fn hash_common_data(common: &CommonCircuitData) -> serde_json::Result {
164 | #[derive(Serialize, Deserialize)]
165 | pub struct CommonFingerprintData {
166 | common: String,
167 | gate_fingerprints: Vec<(String, F)>,
168 | }
169 |
170 | let gate_serializer = Pod2GateSerializer {};
171 | let bytes = common
172 | .to_bytes(&gate_serializer)
173 | .map_err(ser::Error::custom)?;
174 | let gate_fingerprints = gate_fingerprints(common);
175 | let data = CommonFingerprintData {
176 | common: serialize_bytes(&bytes),
177 | gate_fingerprints,
178 | };
179 |
180 | let json = serde_json::to_string(&data)?;
181 | let json_hash = Sha256::digest(&json);
182 | let json_hash_str_long = format!("{:x}", json_hash);
183 | let json_hash_str = json_hash_str_long[..32].to_string();
184 | Ok(json_hash_str)
185 | }
186 |
--------------------------------------------------------------------------------
/src/backends/plonky2/primitives/ec/gates/mod.rs:
--------------------------------------------------------------------------------
1 | pub mod curve;
2 | pub mod field;
3 |
--------------------------------------------------------------------------------
/src/backends/plonky2/primitives/ec/mod.rs:
--------------------------------------------------------------------------------
1 | pub mod bits;
2 | pub mod curve;
3 | pub mod field;
4 | pub mod gates;
5 | pub mod schnorr;
6 |
--------------------------------------------------------------------------------
/src/backends/plonky2/primitives/merkletree/error.rs:
--------------------------------------------------------------------------------
1 | //! tree errors
2 |
3 | use std::{backtrace::Backtrace, fmt::Debug};
4 |
5 | pub type TreeResult = core::result::Result;
6 |
7 | #[derive(Debug, thiserror::Error)]
8 | pub enum TreeInnerError {
9 | #[error("key not found")]
10 | KeyNotFound,
11 | #[error("key already exists")]
12 | KeyExists,
13 | #[error("max depth reached")]
14 | MaxDepth,
15 | #[error("proof of {0} does not verify")]
16 | ProofFail(String), // inclusion / exclusion
17 | #[error("invalid {0} proof")]
18 | InvalidProof(String),
19 | #[error("invalid state transition proof argument: {0}")]
20 | InvalidStateTransitionProogArg(String),
21 | #[error("state transition proof does not verify, reason: {0}")]
22 | StateTransitionProofFail(String),
23 | #[error("key too short (key length: {0}) for the max_depth: {1}")]
24 | TooShortKey(usize, usize),
25 | }
26 |
27 | #[derive(thiserror::Error)]
28 | pub enum TreeError {
29 | #[error("Inner: {inner}\n{backtrace}")]
30 | Inner {
31 | inner: Box,
32 | backtrace: Box,
33 | },
34 | #[error("anyhow::Error: {0}")]
35 | Anyhow(#[from] anyhow::Error),
36 | }
37 |
38 | impl Debug for TreeError {
39 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
40 | std::fmt::Display::fmt(self, f)
41 | }
42 | }
43 |
44 | macro_rules! new {
45 | ($inner:expr) => {
46 | TreeError::Inner {
47 | inner: Box::new($inner),
48 | backtrace: Box::new(Backtrace::capture()),
49 | }
50 | };
51 | }
52 | use TreeInnerError::*;
53 | impl TreeError {
54 | pub fn inner(&self) -> Option<&TreeInnerError> {
55 | match self {
56 | Self::Inner { inner, .. } => Some(inner),
57 | _ => None,
58 | }
59 | }
60 | pub(crate) fn key_not_found() -> Self {
61 | new!(KeyNotFound)
62 | }
63 | pub(crate) fn key_exists() -> Self {
64 | new!(KeyExists)
65 | }
66 | pub(crate) fn max_depth() -> Self {
67 | new!(MaxDepth)
68 | }
69 | pub(crate) fn proof_fail(obj: String) -> Self {
70 | new!(ProofFail(obj))
71 | }
72 | pub(crate) fn invalid_proof(obj: String) -> Self {
73 | new!(InvalidProof(obj))
74 | }
75 | pub(crate) fn invalid_state_transition_proof_arg(reason: String) -> Self {
76 | new!(InvalidStateTransitionProogArg(reason))
77 | }
78 | pub(crate) fn state_transition_fail(reason: String) -> Self {
79 | new!(StateTransitionProofFail(reason))
80 | }
81 | pub(crate) fn too_short_key(depth: usize, max_depth: usize) -> Self {
82 | new!(TooShortKey(depth, max_depth))
83 | }
84 | }
85 |
--------------------------------------------------------------------------------
/src/backends/plonky2/primitives/mod.rs:
--------------------------------------------------------------------------------
1 | pub mod ec;
2 | pub mod merkletree;
3 | pub mod signature;
4 |
--------------------------------------------------------------------------------
/src/backends/plonky2/primitives/signature/circuit.rs:
--------------------------------------------------------------------------------
1 | #![allow(unused)]
2 | use lazy_static::lazy_static;
3 | use plonky2::{
4 | field::types::Field,
5 | hash::{
6 | hash_types::{HashOut, HashOutTarget},
7 | poseidon::PoseidonHash,
8 | },
9 | iop::{
10 | target::{BoolTarget, Target},
11 | witness::{PartialWitness, WitnessWrite},
12 | },
13 | plonk::{
14 | circuit_builder::CircuitBuilder,
15 | circuit_data::{
16 | CircuitConfig, CircuitData, ProverCircuitData, VerifierCircuitData,
17 | VerifierCircuitTarget,
18 | },
19 | config::Hasher,
20 | proof::{ProofWithPublicInputs, ProofWithPublicInputsTarget},
21 | },
22 | };
23 | use serde::{Deserialize, Serialize};
24 |
25 | use crate::{
26 | backends::plonky2::{
27 | basetypes::{C, D},
28 | circuits::common::{CircuitBuilderPod, ValueTarget},
29 | error::Result,
30 | primitives::ec::{
31 | curve::{CircuitBuilderElliptic, Point, PointTarget, WitnessWriteCurve},
32 | schnorr::{CircuitBuilderSchnorr, Signature, SignatureTarget, WitnessWriteSchnorr},
33 | },
34 | },
35 | measure_gates_begin, measure_gates_end,
36 | middleware::{Hash, Proof, RawValue, EMPTY_HASH, EMPTY_VALUE, F, VALUE_SIZE},
37 | };
38 |
39 | // TODO: This is a very simple wrapper over the signature verification implemented on
40 | // `SignatureTarget`. I think we can remove this and use it directly. Also we're not using the
41 | // `enabled` flag, so it should be straight-forward to remove this.
42 | #[derive(Clone, Serialize, Deserialize)]
43 | pub struct SignatureVerifyTarget {
44 | // `enabled` determines if the signature verification is enabled
45 | pub(crate) enabled: BoolTarget,
46 | pub(crate) pk: PointTarget,
47 | pub(crate) msg: ValueTarget,
48 | // proof of the SignatureInternalCircuit (=signature::Signature.0)
49 | pub(crate) sig: SignatureTarget,
50 | }
51 |
52 | pub fn verify_signature_circuit(
53 | builder: &mut CircuitBuilder,
54 | signature: &SignatureVerifyTarget,
55 | ) {
56 | let measure = measure_gates_begin!(builder, "SignatureVerify");
57 | let verified = signature.sig.verify(
58 | builder,
59 | HashOutTarget::from(signature.msg.elements),
60 | &signature.pk,
61 | );
62 | let result = builder.mul_sub(
63 | signature.enabled.target,
64 | verified.target,
65 | signature.enabled.target,
66 | );
67 | builder.assert_zero(result);
68 | measure_gates_end!(builder, measure);
69 | }
70 |
71 | impl SignatureVerifyTarget {
72 | pub fn new_virtual(builder: &mut CircuitBuilder) -> Self {
73 | SignatureVerifyTarget {
74 | enabled: builder.add_virtual_bool_target_safe(),
75 | pk: builder.add_virtual_point_target(),
76 | msg: builder.add_virtual_value(),
77 | sig: builder.add_virtual_schnorr_signature_target(),
78 | }
79 | }
80 | /// assigns the given values to the targets
81 | pub fn set_targets(
82 | &self,
83 | pw: &mut PartialWitness,
84 | enabled: bool,
85 | pk: Point,
86 | msg: RawValue,
87 | signature: Signature,
88 | ) -> Result<()> {
89 | pw.set_bool_target(self.enabled, enabled)?;
90 | pw.set_point_target(&self.pk, &pk)?;
91 | pw.set_target_arr(&self.msg.elements, &msg.0)?;
92 | pw.set_signature_target(&self.sig, &signature)?;
93 |
94 | Ok(())
95 | }
96 | }
97 |
98 | #[cfg(test)]
99 | pub mod tests {
100 | use num_bigint::RandBigInt;
101 |
102 | use super::*;
103 | use crate::{
104 | backends::plonky2::primitives::ec::{curve::GROUP_ORDER, schnorr::SecretKey},
105 | middleware::Hash,
106 | };
107 |
108 | #[test]
109 | fn test_signature_gadget_enabled() -> Result<()> {
110 | // generate a valid signature
111 | let sk = SecretKey::new_rand();
112 | let pk = sk.public_key();
113 | let msg = RawValue::from(42);
114 | let nonce = 1337u64.into();
115 | let sig = sk.sign(msg, &nonce);
116 | assert!(sig.verify(pk, msg), "Should verify");
117 |
118 | // circuit
119 | let config = CircuitConfig::standard_recursion_zk_config();
120 | let mut builder = CircuitBuilder::::new(config);
121 | let mut pw = PartialWitness::::new();
122 |
123 | let targets = SignatureVerifyTarget::new_virtual(&mut builder);
124 | verify_signature_circuit(&mut builder, &targets);
125 | targets.set_targets(&mut pw, true, pk, msg, sig)?;
126 |
127 | // generate & verify proof
128 | let data = builder.build::();
129 | let proof = data.prove(pw)?;
130 | data.verify(proof.clone())?;
131 |
132 | Ok(())
133 | }
134 |
135 | #[test]
136 | fn test_signature_gadget_disabled() -> Result<()> {
137 | // generate a valid signature
138 | let sk = SecretKey::new_rand();
139 | let pk = sk.public_key();
140 | let msg = RawValue::from(42);
141 | let nonce = 600613u64.into();
142 | let sig = sk.sign(msg, &nonce);
143 | // verification should pass
144 | let v = sig.verify(pk, msg);
145 | assert!(v, "should verify");
146 |
147 | // replace the message, so that verifications should fail
148 | let msg = RawValue::from(24);
149 | // expect signature native verification to fail
150 | let v = sig.verify(pk, RawValue::from(24));
151 | assert!(!v, "should fail to verify");
152 |
153 | // circuit
154 | let config = CircuitConfig::standard_recursion_zk_config();
155 | let mut builder = CircuitBuilder::::new(config);
156 | let targets = SignatureVerifyTarget::new_virtual(&mut builder);
157 | verify_signature_circuit(&mut builder, &targets);
158 | let mut pw = PartialWitness::::new();
159 | targets.set_targets(&mut pw, true, pk, msg, sig.clone())?; // enabled=true
160 |
161 | // generate proof, and expect it to fail
162 | let data = builder.build::();
163 | assert!(data.prove(pw).is_err()); // expect prove to fail
164 |
165 | // build the circuit again, but now disable the selector ('enabled')
166 | // that disables the in-circuit signature verification (ie.
167 | // `enabled=false`)
168 | let config = CircuitConfig::standard_recursion_zk_config();
169 | let mut builder = CircuitBuilder::::new(config);
170 | let mut pw = PartialWitness::::new();
171 |
172 | let targets = SignatureVerifyTarget::new_virtual(&mut builder);
173 | verify_signature_circuit(&mut builder, &targets);
174 | targets.set_targets(&mut pw, false, pk, msg, sig)?; // enabled=false
175 |
176 | // generate & verify proof
177 | let data = builder.build::();
178 | let proof = data.prove(pw)?;
179 | data.verify(proof.clone())?;
180 |
181 | Ok(())
182 | }
183 | }
184 |
--------------------------------------------------------------------------------
/src/backends/plonky2/primitives/signature/mod.rs:
--------------------------------------------------------------------------------
1 | //! Proof-based signatures using Plonky2 proofs, following
2 | //! .
3 |
4 | pub mod circuit;
5 | pub use circuit::*;
6 |
--------------------------------------------------------------------------------
/src/backends/plonky2/recursion/mod.rs:
--------------------------------------------------------------------------------
1 | pub mod circuit;
2 | pub use circuit::{
3 | common_data_for_recursion, hash_verifier_data, new_params, new_params_padded, pad_circuit,
4 | prove_rec_circuit, InnerCircuit, RecursiveCircuit, RecursiveCircuitTarget, RecursiveParams,
5 | VerifiedProofTarget,
6 | };
7 |
--------------------------------------------------------------------------------
/src/backends/plonky2/signer.rs:
--------------------------------------------------------------------------------
1 | use num_bigint::{BigUint, RandBigInt};
2 | use rand::rngs::OsRng;
3 |
4 | use crate::{
5 | backends::plonky2::primitives::ec::{
6 | curve::{Point as PublicKey, GROUP_ORDER},
7 | schnorr::{SecretKey, Signature},
8 | },
9 | middleware::{self, RawValue},
10 | timed,
11 | };
12 |
13 | pub struct Signer(pub SecretKey);
14 |
15 | impl Signer {
16 | pub(crate) fn sign_with_nonce(&self, nonce: BigUint, msg: RawValue) -> Signature {
17 | let signature: Signature = timed!("SignedPod::sign", self.0.sign(msg, &nonce));
18 | signature
19 | }
20 | }
21 |
22 | impl middleware::Signer for Signer {
23 | fn sign(&self, msg: RawValue) -> Signature {
24 | let nonce = OsRng.gen_biguint_below(&GROUP_ORDER);
25 | self.sign_with_nonce(nonce, msg)
26 | }
27 |
28 | fn public_key(&self) -> PublicKey {
29 | self.0.public_key()
30 | }
31 | }
32 |
--------------------------------------------------------------------------------
/src/bin/mainpod_circuit_info.rs:
--------------------------------------------------------------------------------
1 | use std::env;
2 |
3 | use anyhow::anyhow;
4 | use pod2::{
5 | backends::plonky2::{
6 | hash_common_data, mainpod::cache_get_rec_main_pod_verifier_circuit_data,
7 | recursion::circuit::hash_verifier_data,
8 | },
9 | middleware::{Hash, Params},
10 | };
11 | use serde::Serialize;
12 | use sha2::{Digest, Sha256};
13 |
14 | #[derive(Serialize)]
15 | struct Info {
16 | params_hash: String,
17 | verifier_hash: Hash,
18 | common_hash: String,
19 | }
20 |
21 | fn main() -> anyhow::Result<()> {
22 | let args: Vec = env::args().collect();
23 |
24 | let params = Params::default();
25 | match args.get(1).map(|s| s.as_str()) {
26 | Some("params") => {
27 | let params_json = serde_json::to_string_pretty(¶ms)?;
28 | println!("{params_json}");
29 | }
30 | Some("circuit-info") => {
31 | let params_json = serde_json::to_string(¶ms)?;
32 | let params_json_hash = Sha256::digest(¶ms_json);
33 | let params_json_hash_str_long = format!("{params_json_hash:x}");
34 | let params_json_hash_str = params_json_hash_str_long[..32].to_string();
35 |
36 | let vd = &*cache_get_rec_main_pod_verifier_circuit_data(¶ms);
37 | let info = Info {
38 | params_hash: params_json_hash_str,
39 | verifier_hash: Hash(hash_verifier_data(&vd.verifier_only).elements),
40 | common_hash: hash_common_data(&vd.common)?,
41 | };
42 | let json = serde_json::to_string_pretty(&info)?;
43 | println!("{json}");
44 | }
45 | _ => {
46 | return Err(anyhow!(
47 | "Invalid arguments. Usage: {} params/circuit-info",
48 | args[0]
49 | ));
50 | }
51 | }
52 | Ok(())
53 | }
54 |
--------------------------------------------------------------------------------
/src/cache/disk.rs:
--------------------------------------------------------------------------------
1 | use std::{
2 | fs::{create_dir_all, rename, File, TryLockError},
3 | io::{Error, ErrorKind, Read, Write},
4 | ops::Deref,
5 | thread, time,
6 | };
7 |
8 | use directories::BaseDirs;
9 | use serde::{de::DeserializeOwned, Serialize};
10 | use sha2::{Digest, Sha256};
11 |
12 | pub struct CacheEntry {
13 | value: T,
14 | }
15 |
16 | impl Deref for CacheEntry {
17 | type Target = T;
18 |
19 | fn deref(&self) -> &Self::Target {
20 | &self.value
21 | }
22 | }
23 |
24 | /// Get the artifact named `name` from the disk cache. If it doesn't exist, it will be built by
25 | /// calling `build_fn` and stored.
26 | /// The artifact is indexed by git commit first and then by `params: P` second.
27 | pub fn get(
28 | name: &str,
29 | params: &P,
30 | build_fn: fn(&P) -> T,
31 | ) -> Result, Box> {
32 | let commit_hash_str = env!("VERGEN_GIT_SHA");
33 | let params_json = serde_json::to_string(params)?;
34 | let params_json_hash = Sha256::digest(¶ms_json);
35 | let params_json_hash_str_long = format!("{:x}", params_json_hash);
36 | let params_json_hash_str = format!("{}", ¶ms_json_hash_str_long[..32]);
37 | let log_name = format!("{}/{}/{}.cbor", commit_hash_str, params_json_hash_str, name);
38 | log::debug!("getting {} from the disk cache", log_name);
39 |
40 | let base_dirs =
41 | BaseDirs::new().ok_or(Error::new(ErrorKind::Other, "no valid home directory"))?;
42 | let user_cache_dir = base_dirs.cache_dir();
43 | let pod2_cache_dir = user_cache_dir.join("pod2");
44 | let commit_cache_dir = pod2_cache_dir.join(&commit_hash_str);
45 | create_dir_all(&commit_cache_dir)?;
46 |
47 | let cache_dir = commit_cache_dir.join(¶ms_json_hash_str);
48 | create_dir_all(&cache_dir)?;
49 |
50 | // Store the params.json if it doesn't exist for better debuggability
51 | let params_path = cache_dir.join("params.json");
52 | if !params_path.try_exists()? {
53 | // First write the file to .tmp and then rename to avoid a corrupted file if we crash in
54 | // the middle of the write.
55 | let params_path_tmp = cache_dir.join("params.json.tmp");
56 | let mut file = File::create(¶ms_path_tmp)?;
57 | file.write_all(params_json.as_bytes())?;
58 | rename(params_path_tmp, params_path)?;
59 | }
60 |
61 | let cache_path = cache_dir.join(format!("{}.cbor", name));
62 | let cache_path_tmp = cache_dir.join(format!("{}.cbor.tmp", name));
63 |
64 | // First try to open the cached file. If it exists we assume a previous build+cache succeeded
65 | // so we read, deserialize it and return it.
66 | // If it doesn't exist we open a corresponding tmp file and try to acquire it exclusively. If
67 | // we can't acquire it means another process is building the artifact so we retry again in 100
68 | // ms. If we acquire the lock we build the artifact store it in the tmp file and finally
69 | // rename it to the final cached file. This way the final cached file either exists and is
70 | // complete or doesn't exist at all (in case of a crash the corruputed file will be tmp).
71 |
72 | loop {
73 | let mut file = match File::open(&cache_path) {
74 | Ok(file) => file,
75 | Err(err) => {
76 | if err.kind() == ErrorKind::NotFound {
77 | let mut file_tmp = File::create(&cache_path_tmp)?;
78 | match file_tmp.try_lock() {
79 | Ok(_) => (),
80 | Err(TryLockError::WouldBlock) => {
81 | // Lock not acquired. Another process is building the artifact, let's
82 | // try again in 100 ms.
83 | thread::sleep(time::Duration::from_millis(100));
84 | continue;
85 | }
86 | Err(TryLockError::Error(err)) => return Err(Box::new(err)),
87 | }
88 | // Exclusive lock acquired, build the artifact, serialize it and store it.
89 | log::info!("building {} and storing to the disk cache", log_name);
90 | let start = std::time::Instant::now();
91 | let data = build_fn(params);
92 | let elapsed = std::time::Instant::now() - start;
93 | log::debug!("built {} in {:?}", log_name, elapsed);
94 | let data_cbor = minicbor_serde::to_vec(&data)?;
95 | // First write the file to .tmp and then rename to avoid a corrupted file if we
96 | // crash in the middle of the write.
97 | file_tmp.write_all(&data_cbor)?;
98 | rename(cache_path_tmp, cache_path)?;
99 | return Ok(CacheEntry { value: data });
100 | } else {
101 | return Err(Box::new(err));
102 | }
103 | }
104 | };
105 | log::debug!("found {} in the disk cache", log_name);
106 |
107 | let start = std::time::Instant::now();
108 | let mut data_cbor = Vec::new();
109 | file.read_to_end(&mut data_cbor)?;
110 | let elapsed = std::time::Instant::now() - start;
111 | log::debug!("read {} from disk in {:?}", log_name, elapsed);
112 |
113 | let start = std::time::Instant::now();
114 | let data: T = minicbor_serde::from_slice(&data_cbor)?;
115 | let elapsed = std::time::Instant::now() - start;
116 | log::debug!("deserialized {} in {:?}", log_name, elapsed);
117 |
118 | return Ok(CacheEntry { value: data });
119 | }
120 | }
121 |
--------------------------------------------------------------------------------
/src/cache/mem.rs:
--------------------------------------------------------------------------------
1 | use std::{
2 | any::Any,
3 | collections::HashMap,
4 | ops::Deref,
5 | sync::{LazyLock, Mutex},
6 | thread, time,
7 | };
8 |
9 | use serde::{de::DeserializeOwned, Serialize};
10 | use sha2::{Digest, Sha256};
11 |
12 | #[allow(clippy::type_complexity)]
13 | static CACHE: LazyLock>>> =
14 | LazyLock::new(|| Mutex::new(HashMap::new()));
15 |
16 | pub struct CacheEntry {
17 | value: &'static T,
18 | }
19 |
20 | impl Deref for CacheEntry {
21 | type Target = T;
22 |
23 | fn deref(&self) -> &Self::Target {
24 | self.value
25 | }
26 | }
27 |
28 | /// Get the artifact named `name` from the memory cache. If it doesn't exist, it will be built by
29 | /// calling `build_fn` and stored.
30 | /// The artifact is indexed by `params: P`.
31 | pub fn get(
32 | name: &str,
33 | params: &P,
34 | build_fn: fn(&P) -> T,
35 | ) -> Result, Box> {
36 | let params_json = serde_json::to_string(params)?;
37 | let params_json_hash = Sha256::digest(¶ms_json);
38 | let params_json_hash_str_long = format!("{:x}", params_json_hash);
39 | let key = format!("{}/{}", ¶ms_json_hash_str_long[..32], name);
40 | log::debug!("getting {} from the mem cache", name);
41 |
42 | loop {
43 | let mut cache = CACHE.lock()?;
44 | if let Some(entry) = cache.get(&key) {
45 | if let Some(boxed_data) = entry {
46 | if let Some(data) = (*boxed_data as &dyn Any).downcast_ref::() {
47 | log::debug!("found {} in the mem cache", name);
48 | return Ok(CacheEntry { value: data });
49 | } else {
50 | panic!(
51 | "type={} doesn't match the type in the cached boxed value with name={}",
52 | std::any::type_name::(),
53 | name
54 | );
55 | }
56 | } else {
57 | // Another thread is building this entry, let's retry again in 100 ms
58 | drop(cache); // release the lock
59 | thread::sleep(time::Duration::from_millis(100));
60 | continue;
61 | }
62 | }
63 | // No entry in the cache, let's put a `None` to signal that we're building the
64 | // artifact, release the lock, build the artifact and insert it. We do this to avoid
65 | // locking for a long time.
66 | cache.insert(key.clone(), None);
67 | drop(cache); // release the lock
68 | log::info!("building {} and storing to the mem cache", name);
69 | let start = std::time::Instant::now();
70 | let data = build_fn(params);
71 | let elapsed = std::time::Instant::now() - start;
72 | log::debug!("built {} in {:?}", name, elapsed);
73 |
74 | CACHE.lock()?.insert(key, Some(Box::leak(Box::new(data))));
75 | // Call `get` again and this time we'll retrieve the data from the cache
76 | return get(name, params, build_fn);
77 | }
78 | }
79 |
--------------------------------------------------------------------------------
/src/cache/mod.rs:
--------------------------------------------------------------------------------
1 | #[cfg(feature = "disk_cache")]
2 | mod disk;
3 | #[cfg(feature = "disk_cache")]
4 | pub use disk::{get, CacheEntry};
5 |
6 | #[cfg(feature = "mem_cache")]
7 | mod mem;
8 | #[cfg(feature = "mem_cache")]
9 | pub use mem::{get, CacheEntry};
10 |
--------------------------------------------------------------------------------
/src/examples/custom.rs:
--------------------------------------------------------------------------------
1 | use std::sync::Arc;
2 |
3 | use hex::ToHex;
4 |
5 | use crate::{
6 | frontend::{PodRequest, Result},
7 | lang::parse,
8 | middleware::{CustomPredicateBatch, Params},
9 | };
10 |
11 | /// Instantiates an ETHDos batch
12 | pub fn eth_dos_batch(params: &Params) -> Result> {
13 | let input = r#"
14 | eth_friend(src, dst, private: attestation) = AND(
15 | SignedBy(attestation, src)
16 | Contains(attestation, "attestation", dst)
17 | )
18 |
19 | eth_dos_base(src, dst, distance) = AND(
20 | Equal(src, dst)
21 | Equal(distance, 0)
22 | )
23 |
24 | eth_dos_ind(src, dst, distance, private: shorter_distance, intermed) = AND(
25 | eth_dos(src, intermed, shorter_distance)
26 | SumOf(distance, shorter_distance, 1)
27 | eth_friend(intermed, dst)
28 | )
29 |
30 | eth_dos(src, dst, distance) = OR(
31 | eth_dos_base(src, dst, distance)
32 | eth_dos_ind(src, dst, distance)
33 | )
34 | "#;
35 | let batch = parse(input, params, &[]).expect("lang parse").custom_batch;
36 | println!("a.0. {}", batch.predicates[0]);
37 | println!("a.1. {}", batch.predicates[1]);
38 | println!("a.2. {}", batch.predicates[2]);
39 | println!("a.3. {}", batch.predicates[3]);
40 | Ok(batch)
41 | }
42 |
43 | pub fn eth_dos_request() -> Result {
44 | let batch = eth_dos_batch(&Params::default())?;
45 | let batch_id = batch.id().encode_hex::();
46 | let input = format!(
47 | r#"
48 | use batch _, _, _, eth_dos from 0x{batch_id}
49 | REQUEST(
50 | eth_dos(src, dst, distance)
51 | )
52 | "#,
53 | );
54 | let parsed = parse(&input, &Params::default(), &[batch])?;
55 | Ok(parsed.request)
56 | }
57 |
58 | #[cfg(test)]
59 | mod tests {
60 | use super::*;
61 |
62 | #[test]
63 | fn test_eth_friend_batch() {
64 | let params = Params::default();
65 | eth_dos_batch(¶ms).unwrap();
66 | }
67 | }
68 |
--------------------------------------------------------------------------------
/src/frontend/error.rs:
--------------------------------------------------------------------------------
1 | use std::{backtrace::Backtrace, fmt::Debug};
2 |
3 | use crate::middleware::{BackendError, Statement, StatementTmpl, Value};
4 |
5 | pub type Result = core::result::Result;
6 |
7 | fn display_wc_map(wc_map: &[Option]) -> String {
8 | let mut out = String::new();
9 | use std::fmt::Write;
10 | for (i, v) in wc_map.iter().enumerate() {
11 | write!(out, "- {}: ", i).unwrap();
12 | if let Some(v) = v {
13 | writeln!(out, "{}", v).unwrap();
14 | } else {
15 | writeln!(out, "none").unwrap();
16 | }
17 | }
18 | out
19 | }
20 |
21 | #[derive(thiserror::Error, Debug)]
22 | pub enum InnerError {
23 | #[error("{0} {1} is over the limit {2}")]
24 | MaxLength(String, usize, usize),
25 | #[error("{0} doesn't match {1:#}.\nWildcard map:\n{map}\nInternal error: {3}", map=display_wc_map(.2))]
26 | StatementsDontMatch(
27 | Statement,
28 | StatementTmpl,
29 | Vec