├── .cargo └── config.toml ├── .clippy.toml ├── .config └── nextest.toml ├── .github ├── PERF_REGRESSION.md ├── SOLIDITY_COMPAT_ISSUE.md ├── dependabot.yml ├── tables.toml └── workflows │ ├── bench-pr-comment.yml │ ├── docs.yml │ ├── gpu-bench.yml │ ├── gpu-ci.yml │ ├── nightly.yml │ ├── repo-sync.yml │ ├── rust.yml │ └── solidity.yml ├── .gitignore ├── .lycheeignore ├── Cargo.toml ├── LICENSE ├── README.md ├── ThirdPartyNotices.txt ├── _typos.toml ├── benches ├── bench.env ├── common │ ├── mod.rs │ └── supernova │ │ ├── bench.rs │ │ ├── mod.rs │ │ └── targets.rs ├── compressed-snark-supernova.rs ├── compressed-snark.rs ├── compute-digest.rs ├── justfile ├── pcs.rs ├── recursive-snark-supernova.rs ├── recursive-snark.rs ├── sha256.rs └── supernova-ci.rs ├── build.rs ├── deny.toml ├── examples ├── and.rs ├── hashchain.rs └── minroot.rs ├── rust-toolchain.toml ├── rustfmt.toml └── src ├── bellpepper ├── mod.rs ├── r1cs.rs ├── shape_cs.rs ├── solver.rs └── test_shape_cs.rs ├── circuit.rs ├── constants.rs ├── cyclefold ├── circuit.rs ├── gadgets.rs ├── mod.rs ├── nifs.rs ├── nova_circuit.rs ├── snark.rs └── util.rs ├── digest.rs ├── errors.rs ├── gadgets ├── ecc.rs ├── mod.rs ├── nonnative │ ├── bignat.rs │ ├── mod.rs │ └── util.rs ├── r1cs.rs └── utils.rs ├── lib.rs ├── nifs.rs ├── provider ├── bn256_grumpkin.rs ├── hyperkzg.rs ├── ipa_pc.rs ├── keccak.rs ├── kzg_commitment.rs ├── mod.rs ├── non_hiding_zeromorph.rs ├── pasta.rs ├── pedersen.rs ├── poseidon.rs ├── secp_secq.rs ├── tests │ ├── ipa_pc.rs │ └── mod.rs ├── traits.rs └── util │ ├── fb_msm.rs │ └── mod.rs ├── r1cs ├── mod.rs ├── sparse.rs └── util.rs ├── spartan ├── batched.rs ├── batched_ppsnark.rs ├── macros.rs ├── math.rs ├── mod.rs ├── polys │ ├── eq.rs │ ├── identity.rs │ ├── masked_eq.rs │ ├── mod.rs │ ├── multilinear.rs │ ├── power.rs │ └── univariate.rs ├── ppsnark.rs ├── snark.rs └── sumcheck │ ├── engine.rs │ └── mod.rs ├── supernova ├── Readme.md ├── circuit.rs ├── error.rs ├── mod.rs ├── snark.rs ├── test.rs └── utils.rs └── traits ├── circuit.rs ├── commitment.rs ├── evaluation.rs ├── mod.rs └── snark.rs /.cargo/config.toml: -------------------------------------------------------------------------------- 1 | [alias] 2 | # Collection of project wide clippy lints. This is done via an alias because 3 | # clippy doesn't currently allow for specifiying project-wide lints in a 4 | # configuration file. This is a similar workaround to the ones presented here: 5 | # 6 | # TODO: add support for --all-features 7 | xclippy = [ 8 | "clippy", "--all-targets", "--", 9 | "-Wclippy::all", 10 | "-Wclippy::assertions_on_result_states", 11 | "-Wclippy::cast_lossless", 12 | "-Wclippy::checked_conversions", 13 | "-Wclippy::dbg_macro", 14 | "-Wclippy::disallowed_methods", 15 | "-Wclippy::derive_partial_eq_without_eq", 16 | "-Wclippy::filter_map_next", 17 | "-Wclippy::flat_map_option", 18 | "-Wclippy::from_iter_instead_of_collect", 19 | "-Wclippy::inefficient_to_string", 20 | "-Wclippy::large_stack_arrays", 21 | "-Wclippy::large_types_passed_by_value", 22 | "-Wclippy::macro_use_imports", 23 | "-Wclippy::manual_assert", 24 | "-Wclippy::manual_ok_or", 25 | "-Wclippy::map_flatten", 26 | "-Wclippy::map_unwrap_or", 27 | "-Wclippy::match_same_arms", 28 | "-Wclippy::match_wild_err_arm", 29 | "-Wclippy::needless_borrow", 30 | "-Wclippy::needless_continue", 31 | "-Wclippy::needless_for_each", 32 | "-Wclippy::needless_pass_by_value", 33 | "-Wclippy::option_option", 34 | "-Wclippy::same_functions_in_if_condition", 35 | "-Wclippy::single_match_else", 36 | "-Wclippy::trait_duplication_in_bounds", 37 | "-Wclippy::unnecessary_mut_passed", 38 | "-Wclippy::unnecessary_wraps", 39 | "-Wclippy::use_self", 40 | "-Wmissing_debug_implementations", 41 | "-Wnonstandard_style", 42 | "-Wrust_2018_idioms", 43 | "-Wtrivial_numeric_casts", 44 | "-Wunused_lifetimes", 45 | "-Wunused_qualifications", 46 | ] 47 | -------------------------------------------------------------------------------- /.clippy.toml: -------------------------------------------------------------------------------- 1 | type-complexity-threshold = 9999 2 | too-many-arguments-threshold = 20 3 | disallowed-methods = [ 4 | # we are strict about size checks in iterators 5 | { path = "core::iter::traits::iterator::Iterator::zip", reason = "use itertools::zip_eq instead" }, 6 | { path = "rayon::iter::IndexedParallelIterator::zip", reason = "use rayon::iter::IndexedParallelIterator::zip_eq instead" }, 7 | ] -------------------------------------------------------------------------------- /.config/nextest.toml: -------------------------------------------------------------------------------- 1 | [profile.ci] 2 | # Print out output for failing tests as soon as they fail, and also at the end 3 | # of the run (for easy scrollability). 4 | failure-output = "immediate-final" 5 | # Show skipped tests in the CI output. 6 | status-level = "skip" 7 | # Do not cancel the test run on the first failure. 8 | fail-fast = false 9 | # Mark tests as slow after 5mins, kill them after 20mins 10 | slow-timeout = { period = "300s", terminate-after = 4 } 11 | # Retry failed tests once, marked flaky if test then passes 12 | retries = 1 -------------------------------------------------------------------------------- /.github/PERF_REGRESSION.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: ":rotating_light: Performance regression in #{{ env.PR_NUMBER }}" 3 | labels: P-Performance, automated issue 4 | --- 5 | Regression >= {{ env.NOISE_THRESHOLD }}% found during merge of: #{{ env.PR_NUMBER }} 6 | Commit: {{ env.GIT_SHA }} 7 | Triggered by: {{ env.WORKFLOW_URL }} -------------------------------------------------------------------------------- /.github/SOLIDITY_COMPAT_ISSUE.md: -------------------------------------------------------------------------------- 1 | Compatibility with the [Arecibo](https://github.com/argumentcomputer/arecibo) dependency has been broken by commit [`__COMMIT__`](__COMMIT_URL__) from __PR_URL__. 2 | 3 | Check the [Solidity compatibility workflow run](__WORKFLOW_URL__) for details. 4 | 5 | This issue was raised by the workflow at __WORKFLOW_FILE__. 6 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: cargo 4 | directory: / 5 | pull-request-branch-name: 6 | separator: "-" 7 | schedule: 8 | interval: weekly 9 | groups: 10 | rust-dependencies: 11 | patterns: 12 | - "*" 13 | update-types: 14 | - "minor" 15 | - "patch" 16 | open-pull-requests-limit: 5 17 | 18 | - package-ecosystem: "github-actions" 19 | directory: "/" 20 | schedule: 21 | interval: "weekly" 22 | -------------------------------------------------------------------------------- /.github/tables.toml: -------------------------------------------------------------------------------- 1 | [table_comments] 2 | 3 | [top_comments] 4 | Overview = """ 5 | This benchmark report shows the Arecibo GPU benchmarks. 6 | """ -------------------------------------------------------------------------------- /.github/workflows/bench-pr-comment.yml: -------------------------------------------------------------------------------- 1 | # Creates a PR benchmark comment with a comparison to the base branch 2 | name: Benchmark pull requests 3 | on: 4 | issue_comment: 5 | types: [created] 6 | 7 | concurrency: 8 | group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} 9 | cancel-in-progress: true 10 | 11 | jobs: 12 | benchmark: 13 | name: Comparative PR benchmark comment 14 | if: 15 | github.event.issue.pull_request 16 | && github.event.issue.state == 'open' 17 | && (contains(github.event.comment.body, '!benchmark') || contains(github.event.comment.body, '!gpu-benchmark')) 18 | && (github.event.comment.author_association == 'MEMBER' || github.event.comment.author_association == 'OWNER') 19 | uses: argumentcomputer/ci-workflows/.github/workflows/bench-pr-comment.yml@main 20 | with: 21 | default-runner: "self-hosted,gpu-bench" 22 | default-benches: "supernova-ci" 23 | default-env: "BENCH_OUTPUT=pr-comment BENCH_NUM_CONS=16384,524288" 24 | -------------------------------------------------------------------------------- /.github/workflows/docs.yml: -------------------------------------------------------------------------------- 1 | name: Generate and deploy crate docs 2 | 3 | on: 4 | push: 5 | branches: 6 | - dev 7 | paths: 8 | - "**.rs" 9 | - "Cargo.toml" 10 | - "Cargo.lock" 11 | workflow_dispatch: 12 | 13 | jobs: 14 | docs: 15 | uses: argumentcomputer/ci-workflows/.github/workflows/docs.yml@main -------------------------------------------------------------------------------- /.github/workflows/gpu-bench.yml: -------------------------------------------------------------------------------- 1 | # Run regression check only when attempting to merge, shown as skipped status check beforehand 2 | name: GPU benchmark regression test 3 | 4 | on: 5 | pull_request: 6 | types: [opened, synchronize, reopened, ready_for_review] 7 | branches: [dev] 8 | merge_group: 9 | 10 | concurrency: 11 | group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} 12 | cancel-in-progress: true 13 | 14 | jobs: 15 | # Run comparative benchmark against dev, open issue on regression 16 | gpu-benchmark: 17 | if: github.event_name != 'pull_request' || github.event.action == 'enqueued' 18 | name: Run benchmarks on GPU 19 | runs-on: [self-hosted, gpu-bench] 20 | steps: 21 | - uses: actions/checkout@v4 22 | with: 23 | repository: argumentcomputer/ci-workflows 24 | - uses: ./.github/actions/gpu-setup 25 | with: 26 | gpu-framework: 'cuda' 27 | - uses: ./.github/actions/ci-env 28 | - uses: actions/checkout@v4 29 | # Install dependencies 30 | - uses: dtolnay/rust-toolchain@stable 31 | - uses: Swatinem/rust-cache@v2 32 | - uses: taiki-e/install-action@v2 33 | with: 34 | tool: just@1.22 35 | - name: Install criterion 36 | run: | 37 | cargo install cargo-criterion 38 | cargo install criterion-table 39 | - name: Set bench output format and base SHA 40 | run: | 41 | echo "BENCH_OUTPUT=commit-comment" | tee -a $GITHUB_ENV 42 | echo "BENCH_NUM_CONS=16384,1038732" | tee -a $GITHUB_ENV 43 | echo "BASE_COMMIT=${{ github.event.merge_group.base_sha }}" | tee -a $GITHUB_ENV 44 | GPU_NAME=$(nvidia-smi --query-gpu=gpu_name --format=csv,noheader,nounits | tail -n1) 45 | echo "GPU_ID=$(echo $GPU_NAME | awk '{ print $NF }')" | tee -a $GITHUB_ENV 46 | echo "GPU_NAME=$GPU_NAME" | tee -a $GITHUB_ENV 47 | # Checkout base branch for comparative bench 48 | - uses: actions/checkout@v4 49 | with: 50 | ref: dev 51 | path: dev 52 | # Copy the script so the base can bench with the same parameters 53 | - name: Run GPU bench on base branch 54 | run: | 55 | # Copy justfile to dev, overwriting existing config with that of PR branch 56 | cp ../benches/justfile . 57 | # Run benchmark 58 | just gpu-bench-ci supernova-ci 59 | # Copy bench output to PR branch 60 | cp supernova-ci-${{ env.BASE_COMMIT }}.json .. 61 | working-directory: ${{ github.workspace }}/dev 62 | - name: Run GPU bench on PR branch 63 | run: | 64 | just gpu-bench-ci supernova-ci 65 | cp supernova-ci-${{ github.sha }}.json .. 66 | working-directory: ${{ github.workspace }}/benches 67 | - name: copy the benchmark template and prepare it with data 68 | run: | 69 | cp .github/tables.toml . 70 | # Get CPU model 71 | CPU_MODEL=$(grep '^model name' /proc/cpuinfo | head -1 | awk -F ': ' '{ print $2 }') 72 | # Get vCPU count 73 | NUM_VCPUS=$(nproc --all) 74 | # Get total RAM in GB 75 | TOTAL_RAM=$(grep MemTotal /proc/meminfo | awk '{$2=$2/(1024^2); print int($2), "GB RAM";}') 76 | WORKFLOW_URL="https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}" 77 | 78 | # Use conditionals to ensure that only non-empty variables are inserted 79 | [[ ! -z "${{ env.GPU_NAME }}" ]] && sed -i "/^\"\"\"$/i ${{ env.GPU_NAME }}" tables.toml 80 | [[ ! -z "$CPU_MODEL" ]] && sed -i "/^\"\"\"$/i $CPU_MODEL" tables.toml 81 | [[ ! -z "$NUM_VCPUS" ]] && sed -i "/^\"\"\"$/i $NUM_VCPUS vCPUs" tables.toml 82 | [[ ! -z "$TOTAL_RAM" ]] && sed -i "/^\"\"\"$/i $TOTAL_RAM" tables.toml 83 | sed -i "/^\"\"\"$/i Workflow run: $WORKFLOW_URL" tables.toml 84 | echo "WORKFLOW_URL=$WORKFLOW_URL" | tee -a $GITHUB_ENV 85 | working-directory: ${{ github.workspace }} 86 | # Create a `criterion-table` and write in commit comment 87 | - name: Run `criterion-table` 88 | run: | 89 | cat supernova-ci-${{ env.BASE_COMMIT }}.json supernova-ci-${{ github.sha }}.json | criterion-table > BENCHMARKS.md 90 | - name: Write bench on commit comment 91 | uses: peter-evans/commit-comment@v3 92 | with: 93 | body-path: BENCHMARKS.md 94 | # TODO: Set `$BENCH_NOISE_THRESHOLD` via `cardinalby/export-env-action` or hardcode to 1.3 95 | # Check for a slowdown >= `$BENCH_NOISE_THRESHOLD` (fallback is 30%/1.3x). If so, open an issue but don't block merge 96 | # Since we are parsing for slowdowns, we simply add 1 to the noise threshold decimal to get the regression factor 97 | - name: Check for perf regression 98 | id: regression-check 99 | run: | 100 | REGRESSIONS=$(grep -o '[0-9.]*x slower' BENCHMARKS.md | cut -d 'x' -f1) 101 | echo $REGRESSIONS 102 | 103 | if [ ! -z "${{ env.BENCH_NOISE_THRESHOLD}}" ]; then 104 | REGRESSION_FACTOR=$(echo "${{ env.BENCH_NOISE_THRESHOLD }}+1" | bc) 105 | else 106 | REGRESSION_FACTOR=1.3 107 | fi 108 | echo "NOISE_THRESHOLD=$(echo "($REGRESSION_FACTOR-1)*100" | bc)" | tee -a $GITHUB_ENV 109 | 110 | for r in $REGRESSIONS 111 | do 112 | if (( $(echo "$r >= $REGRESSION_FACTOR" | bc -l) )) 113 | then 114 | echo "regression=true" | tee -a $GITHUB_OUTPUT 115 | fi 116 | done 117 | # Not possible to use ${{ github.event.number }} with the `merge_group` trigger 118 | - name: Get PR number from merge branch 119 | if: steps.regression-check.outputs.regression == 'true' 120 | run: | 121 | echo "PR_NUMBER=$(echo ${{ github.event.merge_group.head_ref }} | sed -e 's/.*pr-\(.*\)-.*/\1/')" | tee -a $GITHUB_ENV 122 | - uses: JasonEtco/create-an-issue@v2 123 | if: steps.regression-check.outputs.regression == 'true' 124 | env: 125 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 126 | PR_NUMBER: ${{ env.PR_NUMBER }} 127 | GIT_SHA: ${{ github.sha }} 128 | WORKFLOW_URL: ${{ env.WORKFLOW_URL }} 129 | NOISE_THRESHOLD: ${{ env.NOISE_THRESHOLD }} 130 | with: 131 | update_existing: true 132 | filename: .github/PERF_REGRESSION.md -------------------------------------------------------------------------------- /.github/workflows/gpu-ci.yml: -------------------------------------------------------------------------------- 1 | # Runs the test suite on a self-hosted GPU machine with CUDA enabled 2 | name: GPU tests 3 | 4 | on: 5 | pull_request: 6 | types: [opened, synchronize, reopened, ready_for_review] 7 | branches: [dev] 8 | merge_group: 9 | 10 | concurrency: 11 | group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} 12 | cancel-in-progress: true 13 | 14 | jobs: 15 | cuda: 16 | name: Rust tests on CUDA 17 | if: github.event_name != 'pull_request' || github.event.action == 'enqueued' 18 | runs-on: [self-hosted, gpu-ci] 19 | steps: 20 | - uses: actions/checkout@v4 21 | with: 22 | repository: argumentcomputer/ci-workflows 23 | - uses: ./.github/actions/gpu-setup 24 | with: 25 | gpu-framework: 'cuda' 26 | - uses: ./.github/actions/ci-env 27 | - uses: actions/checkout@v4 28 | with: 29 | submodules: recursive 30 | - uses: dtolnay/rust-toolchain@stable 31 | - uses: taiki-e/install-action@nextest 32 | - uses: Swatinem/rust-cache@v2 33 | - name: CUDA tests 34 | run: | 35 | cargo nextest run --profile ci --release --features cuda,asm 36 | -------------------------------------------------------------------------------- /.github/workflows/nightly.yml: -------------------------------------------------------------------------------- 1 | # Nightly sanity checks 2 | name: nightly 3 | 4 | on: 5 | workflow_dispatch: {} 6 | # Once per day at 00:00 UTC 7 | schedule: 8 | - cron: "0 0 * * *" 9 | 10 | env: 11 | CARGO_TERM_COLOR: always 12 | 13 | jobs: 14 | unused-dependencies: 15 | uses: argumentcomputer/ci-workflows/.github/workflows/unused-deps.yml@main 16 | 17 | rust-version-check: 18 | uses: argumentcomputer/ci-workflows/.github/workflows/rust-version-check.yml@main 19 | 20 | typos: 21 | uses: argumentcomputer/ci-workflows/.github/workflows/typos.yml@main -------------------------------------------------------------------------------- /.github/workflows/repo-sync.yml: -------------------------------------------------------------------------------- 1 | on: 2 | schedule: 3 | - cron: "*/15 * * * *" 4 | workflow_dispatch: 5 | 6 | jobs: 7 | repo-sync: 8 | name: Sync changes from upstream Nova 9 | runs-on: ubuntu-latest 10 | steps: 11 | - uses: actions/checkout@v4 12 | with: 13 | persist-credentials: false 14 | - name: repo-sync 15 | uses: repo-sync/github-sync@v2 16 | with: 17 | source_repo: "https://github.com/microsoft/nova.git" 18 | source_branch: "main" 19 | destination_branch: "main" 20 | github_token: ${{ secrets.GITHUB_TOKEN }} 21 | -------------------------------------------------------------------------------- /.github/workflows/rust.yml: -------------------------------------------------------------------------------- 1 | name: Build and Test Nova 2 | 3 | on: 4 | merge_group: 5 | pull_request: 6 | types: [opened, synchronize, reopened, ready_for_review] 7 | branches: 8 | - dev 9 | - 'feat/**' 10 | - release-candidate 11 | 12 | concurrency: 13 | group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} 14 | cancel-in-progress: true 15 | 16 | jobs: 17 | build: 18 | runs-on: buildjet-8vcpu-ubuntu-2204 19 | steps: 20 | - uses: actions/checkout@v4 21 | with: 22 | repository: argumentcomputer/ci-workflows 23 | - uses: ./.github/actions/ci-env 24 | - uses: actions/checkout@v4 25 | - uses: dtolnay/rust-toolchain@stable 26 | - uses: taiki-e/install-action@nextest 27 | - uses: Swatinem/rust-cache@v2 28 | - name: Check the minroot example compiles under the abomonate feature 29 | run: cargo check --profile dev-ci --features "abomonate" --examples 30 | - name: Check benches compile under the flamegraph feature 31 | run: cargo check --profile dev-ci --features "flamegraph" --benches 32 | - name: Build, with benches & examples 33 | run: cargo build --profile dev-ci --benches --examples 34 | - name: Linux Tests in parallel, with nextest profile ci and cargo profile dev-ci 35 | run: | 36 | cargo nextest run --profile ci --workspace --cargo-profile dev-ci 37 | - name: Run test_pp_digest with the asm feature 38 | run: | 39 | cargo nextest run --profile ci --workspace --cargo-profile dev-ci --features "asm" -E 'test(test_pp_digest)' 40 | 41 | check-lurk-compiles: 42 | uses: argumentcomputer/ci-workflows/.github/workflows/check-lurk-compiles.yml@main 43 | with: 44 | runner: "buildjet-8vcpu-ubuntu-2204" 45 | packages: "pkg-config libssl-dev protobuf-compiler libprotobuf-dev" 46 | 47 | # Rustfmt, clippy, doctests 48 | code-quality: 49 | uses: argumentcomputer/ci-workflows/.github/workflows/lints.yml@main 50 | 51 | # Checks `cargo build --target wasm32-unknown-unknown` 52 | wasm: 53 | uses: argumentcomputer/ci-workflows/.github/workflows/wasm.yml@main 54 | 55 | # Checks MSRV specified by `rust-version` in `Cargo.toml` 56 | msrv: 57 | uses: argumentcomputer/ci-workflows/.github/workflows/msrv.yml@main 58 | 59 | # Check documentation links aren't broken 60 | link-checker: 61 | uses: argumentcomputer/ci-workflows/.github/workflows/links-check.yml@main 62 | with: 63 | fail-fast: true 64 | 65 | # Lint dependencies for licensing and auditing issues as per https://github.com/argumentcomputer/arecibo/blob/main/deny.toml 66 | licenses-audits: 67 | uses: argumentcomputer/ci-workflows/.github/workflows/licenses-audits.yml@main 68 | -------------------------------------------------------------------------------- /.github/workflows/solidity.yml: -------------------------------------------------------------------------------- 1 | # TODO: Reusify and combine with the `check-lurk-compiles` action and/or make a reusable open-issue action 2 | # 3 | # This workflow runs autogenerated `solidity-verifier` compatibility tests on Arecibo PRs and notifies if compatibility is broken 4 | # The workflow is intended to be a required status check only to make sure the Rust test & basic job steps work 5 | # It is NOT intended to block breaking changes from merging, only to noisily surface them for review 6 | # 7 | # If the Rust template fails to generate the Solidity test or the job errors for another reason, the workflow fails immediately 8 | # If the Solidity test fails on `pull_request`, it writes a PR comment to ensure the author/reviewer are notified 9 | # If the Solidity test fails on `merge_group`, it opens an issue in `solidity-verifier` downstream that compatibility has been broken 10 | # `merge_group` failures should only happen intentionally when breaking changes need to be merged in Arecibo 11 | # 12 | # Implementation note: 13 | # `falnyr/replace-env-vars-action`, `micalevisk/last-issue-action` and `peter-evans/create-issue-from-file` replace 14 | # equivalent functionality in `JasonEtco/create-an-issue`. We can't use the latter because it doesn't allow creating 15 | # the issue in another repo. See https://github.com/JasonEtco/create-an-issue/issues/40 16 | name: Test `solidity-verifier` compatibility 17 | 18 | on: 19 | merge_group: 20 | pull_request: 21 | types: [opened, synchronize, reopened, ready_for_review] 22 | branches: 23 | - dev 24 | - 'feat/**' 25 | - release-candidate 26 | 27 | concurrency: 28 | group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} 29 | cancel-in-progress: true 30 | 31 | jobs: 32 | solidity-compat: 33 | runs-on: buildjet-16vcpu-ubuntu-2204 34 | steps: 35 | - uses: actions/checkout@v4 36 | with: 37 | repository: argumentcomputer/ci-workflows 38 | - uses: ./.github/actions/ci-env 39 | - uses: actions/checkout@v4 40 | - uses: dtolnay/rust-toolchain@stable 41 | - uses: taiki-e/install-action@nextest 42 | - uses: Swatinem/rust-cache@v2 43 | - name: Run Solidity test generator 44 | run: | 45 | # Runs all tests with the `test_solidity_compatibility` prefix, e.g. `test_solidity_compatibility_ipa` 46 | cargo nextest run -E 'test(test_solidity_compatibility)' --release --run-ignored all --nocapture > test-output 47 | working-directory: ${{ github.workspace }} 48 | - name: Check out `solidity-verifier` for tests 49 | uses: actions/checkout@v4 50 | with: 51 | repository: argumentcomputer/solidity-verifier 52 | path: solidity-verifier 53 | submodules: recursive 54 | - name: Install Foundry 55 | uses: foundry-rs/foundry-toolchain@v1 56 | with: 57 | version: nightly 58 | - name: Prep Solidity test files 59 | run: | 60 | # Get test names from output, extracting the final word after the final `_` in the test output 61 | # E.g. `test provider::tests::ipa_pc::test::test_solidity_compatibility_ipa ... ok` returns `ipa` 62 | # Expects all tests to live in the `provider` module, but can be changed/strengthened in future 63 | TEST_NAMES=$(grep 'test provider::*' test-output | awk -F'[_(.*?)\b...]' '{ print $(NF-3) }') 64 | echo "$TEST_NAMES" 65 | 66 | # Print output of each test to `.t.sol` file 67 | awk -v names="$TEST_NAMES" 'BEGIN { 68 | file_counter = 0 69 | buffer = "" 70 | # Convert test names to array 71 | split(names, elements, " ") 72 | for (i in elements) { 73 | print "Element:", elements[i] 74 | } 75 | } 76 | 77 | /running 1 test/ { 78 | between = 1 79 | buffer = "" 80 | } 81 | 82 | between { 83 | buffer = buffer $0 ORS 84 | } 85 | 86 | /^test provider.*$/ { 87 | between = 0 88 | 89 | if (buffer != "") { 90 | ++file_counter 91 | print buffer > elements[file_counter]".t.sol" 92 | buffer = "" 93 | } 94 | }' test-output 95 | 96 | # Clean up 97 | shopt -s nullglob 98 | for file in *.t.sol; do 99 | cat $file | sed '1d' | head -n -2 > tmp.file && mv tmp.file solidity-verifier/test/$file 100 | done 101 | shopt -u nullglob 102 | working-directory: ${{ github.workspace }} 103 | - name: Run Forge tests 104 | id: solidity-test 105 | run: forge test -vvv 106 | working-directory: ${{ github.workspace }}/solidity-verifier 107 | continue-on-error: true 108 | # Prepares env vars for use in a PR comment or issue in `solidity-verifier` 109 | - name: Set env vars 110 | if: steps.solidity-test.outcome != 'success' 111 | run: | 112 | if [[ "${{ github.event_name }}" == "pull_request" ]]; then 113 | COMMIT=$(echo ${{ github.event.pull_request.head.sha }} | cut -c -7) 114 | PR_NUMBER=${{ github.event.pull_request.number }} 115 | else 116 | COMMIT=$(echo ${{ github.event.merge_group.head_sha }} | cut -c -7) 117 | PR_NUMBER=$(echo ${{ github.event.merge_group.head_ref }} | sed -e 's/.*pr-\(.*\)-.*/\1/') 118 | fi 119 | GITHUB_URL=https://github.com/${{ github.repository }} 120 | WORKFLOW_URL=$GITHUB_URL/actions/runs/${{ github.run_id }} 121 | echo "WORKFLOW_FILE=$WORKFLOW_URL/workflow" | tee -a $GITHUB_ENV 122 | echo "WORKFLOW_URL=$WORKFLOW_URL" | tee -a $GITHUB_ENV 123 | echo "COMMIT_URL=$GITHUB_URL/commit/$COMMIT" | tee -a $GITHUB_ENV 124 | echo "PR_URL=$GITHUB_URL/pull/$PR_NUMBER" | tee -a $GITHUB_ENV 125 | echo "COMMIT=$COMMIT" | tee -a $GITHUB_ENV 126 | # Comment on PR when test fails on `pull_request` 127 | - name: Comment on failing run 128 | if: steps.solidity-test.outcome != 'success' && github.event_name == 'pull_request' 129 | uses: peter-evans/create-or-update-comment@v4 130 | with: 131 | issue-number: ${{ github.event.pull_request.number }} 132 | body: | 133 | `solidity-verifier` compatibility test failed :x: 134 | 135 | ${{ env.WORKFLOW_URL }} 136 | # Substitutes env vars for their values in `SOLIDITY_COMPAT_ISSUE.md` 137 | - uses: falnyr/replace-env-vars-action@master 138 | if: steps.solidity-test.outcome != 'success' && (github.event_name != 'pull_request' || github.event.action == 'enqueued') 139 | env: 140 | WORKFLOW_URL: ${{ env.WORKFLOW_URL }} 141 | WORKFLOW_FILE: ${{ env.WORKFLOW_FILE }} 142 | COMMIT: ${{ env.COMMIT }} 143 | COMMIT_URL: ${{ env.COMMIT_URL }} 144 | PR_URL: ${{ env.PR_URL }} 145 | with: 146 | filename: .github/SOLIDITY_COMPAT_ISSUE.md 147 | # Finds the last open issue matching given labels 148 | - name: Find the last open compatibility issue 149 | id: last-issue 150 | if: steps.solidity-test.outcome != 'success' && (github.event_name != 'pull_request' || github.event.action == 'enqueued') 151 | uses: micalevisk/last-issue-action@v2 152 | with: 153 | repository: argumentcomputer/solidity-verifier 154 | state: open 155 | # Find the last updated open issue that has these labels: 156 | labels: | 157 | compatibility 158 | debt 159 | automated issue 160 | # Update existing issue in `solidity-verifier` or create new one 161 | - uses: peter-evans/create-issue-from-file@v5 162 | if: steps.solidity-test.outcome != 'success' && (github.event_name != 'pull_request' || github.event.action == 'enqueued') 163 | with: 164 | token: ${{ secrets.REPO_TOKEN }} 165 | repository: argumentcomputer/solidity-verifier 166 | issue-number: ${{ steps.last-issue.outputs.issue-number }} 167 | title: ":rotating_light: Arecibo compatibility is broken" 168 | content-filepath: .github/SOLIDITY_COMPAT_ISSUE.md 169 | labels: | 170 | compatibility 171 | debt 172 | automated issue 173 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Generated by Cargo 2 | # will have compiled files and executables 3 | /target/ 4 | 5 | # Remove Cargo.lock from gitignore if creating an executable, leave it for libraries 6 | # More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html 7 | Cargo.lock 8 | 9 | # These are backup files generated by rustfmt 10 | **/*.rs.bk 11 | 12 | # Configurations for VSCode 13 | .vscode/ 14 | 15 | # Configuration for Jetbrains 16 | .idea/ -------------------------------------------------------------------------------- /.lycheeignore: -------------------------------------------------------------------------------- 1 | __COMMIT_URL__ 2 | __WORKFLOW_URL__ 3 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "arecibo" 3 | version = "0.2.0" 4 | authors = ["Srinath Setty ", "Lurk Engineering "] 5 | edition = "2021" 6 | description = "Recursive zkSNARKs without trusted setup" 7 | documentation = "https://docs.rs/arecibo/" 8 | readme = "README.md" 9 | repository = "https://github.com/argumentcomputer/arecibo" 10 | license-file = "LICENSE" 11 | keywords = ["zkSNARKs", "cryptography", "proofs"] 12 | rust-version="1.79" 13 | 14 | [dependencies] 15 | bellpepper-core = { version = "0.4.0", default-features = false } 16 | bellpepper = { git="https://github.com/argumentcomputer/bellpepper", branch="dev", default-features = false } 17 | ff = { version = "0.13.0", features = ["derive"] } 18 | digest = "0.10" 19 | halo2curves = { version = "0.6.0", features = ["bits", "derive_serde"] } 20 | sha3 = "0.10" 21 | rayon = "1.7" 22 | rand_core = { version = "0.6", default-features = false } 23 | rand_chacha = "0.3" 24 | subtle = "2.5" 25 | pasta_curves = { version = "0.5.0", features = ["repr-c", "serde"] } 26 | neptune = { git = "https://github.com/argumentcomputer/neptune", branch="dev", default-features = false, features = ["abomonation"] } 27 | generic-array = "1.0.0" 28 | num-bigint = { version = "0.4", features = ["serde", "rand"] } 29 | num-traits = "0.2" 30 | num-integer = "0.1" 31 | serde = { version = "1.0", features = ["derive", "rc"] } 32 | bincode = "1.3" 33 | bitvec = "1.0" 34 | byteorder = "1.4.3" 35 | thiserror = "1.0" 36 | group = "0.13.0" 37 | pairing = "0.23.0" 38 | abomonation = "0.7.3" 39 | abomonation_derive = { version = "0.1.0", package = "abomonation_derive_ng" } 40 | tracing = "0.1.37" 41 | cfg-if = "1.0.0" 42 | once_cell = "1.18.0" 43 | itertools = "0.13.0" # zip_eq 44 | rand = "0.8.5" 45 | ref-cast = "1.0.20" # allocation-less conversion in multilinear polys 46 | derive_more = "0.99.17" # lightens impl macros for pasta 47 | static_assertions = "1.1.0" 48 | rayon-scan = "0.1.0" 49 | 50 | [target.'cfg(any(target_arch = "x86_64", target_arch = "aarch64"))'.dependencies] 51 | # grumpkin-msm has been patched to support MSMs for the pasta curve cycle 52 | # see: https://github.com/argumentcomputer/grumpkin-msm/pull/3 53 | grumpkin-msm = { git = "https://github.com/argumentcomputer/grumpkin-msm", branch = "dev" } 54 | 55 | [target.'cfg(target_arch = "wasm32")'.dependencies] 56 | getrandom = { version = "0.2.0", default-features = false, features = ["js"] } 57 | 58 | [target.'cfg(not(target_arch = "wasm32"))'.dependencies] 59 | proptest = "1.2.0" 60 | pprof = { version = "0.13", optional = true } # in benches under feature "flamegraph" 61 | 62 | [target.'cfg(not(target_arch = "wasm32"))'.dev-dependencies] 63 | criterion = { version = "0.5", features = ["html_reports"] } 64 | 65 | [dev-dependencies] 66 | flate2 = "1.0" 67 | hex = "0.4.3" 68 | sha2 = "0.10.7" 69 | tracing-test = "0.2.4" 70 | expect-test = "1.4.1" 71 | anyhow = "1.0.72" 72 | tap = "1.0.1" 73 | tracing-texray = "0.2.0" 74 | tracing-subscriber = { version = "0.3.17", features = ["env-filter"] } 75 | handlebars = "6.0.0" 76 | serde_json = "1.0.1" 77 | 78 | [build-dependencies] 79 | vergen = { version = "8", features = ["build", "git", "gitcl"] } 80 | 81 | [[bench]] 82 | name = "recursive-snark" 83 | harness = false 84 | 85 | [[bench]] 86 | name = "compressed-snark" 87 | harness = false 88 | 89 | [[bench]] 90 | name = "compute-digest" 91 | harness = false 92 | 93 | [[bench]] 94 | name = "sha256" 95 | harness = false 96 | 97 | [[bench]] 98 | name = "recursive-snark-supernova" 99 | harness = false 100 | 101 | [[bench]] 102 | name = "compressed-snark-supernova" 103 | harness = false 104 | 105 | [[bench]] 106 | name = "supernova-ci" 107 | harness = false 108 | 109 | [[bench]] 110 | name = "pcs" 111 | harness = false 112 | 113 | [features] 114 | default = [] 115 | abomonate = [] 116 | asm = ["halo2curves/asm"] 117 | # Compiles in portable mode, w/o ISA extensions => binary can be executed on all systems. 118 | portable = ["grumpkin-msm/portable"] 119 | cuda = ["grumpkin-msm/cuda"] 120 | flamegraph = ["pprof/flamegraph", "pprof/criterion"] 121 | 122 | [profile.dev-ci] 123 | inherits = "dev" 124 | # By compiling dependencies with optimizations, performing tests gets much faster. 125 | opt-level = 3 126 | lto = "thin" 127 | incremental = false 128 | codegen-units = 16 129 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) Microsoft Corporation. 4 | Copyright (c) 2023 Argument Computer Corporation 5 | 6 | Permission is hereby granted, free of charge, to any person obtaining a copy 7 | of this software and associated documentation files (the "Software"), to deal 8 | in the Software without restriction, including without limitation the rights 9 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | copies of the Software, and to permit persons to whom the Software is 11 | furnished to do so, subject to the following conditions: 12 | 13 | The above copyright notice and this permission notice shall be included in all 14 | copies or substantial portions of the Software. 15 | 16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 | SOFTWARE 23 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Nova: High-speed recursive arguments from folding schemes 2 | 3 | > [!NOTE] 4 | > This repository is a fork of the original hosted at [https://github.com/microsoft/nova](https://github.com/microsoft/nova). It's an incubator for experimenting with more advanced variants of the original software and working out the kinks in them. 5 | 6 | > [!IMPORTANT] 7 | > This fork is always maintained as up-to-date as the original repository. Occasionally, this repository backports its own contributions to the original, after an incubation time. Note that until back-ported, the changes in the present repository have not undergone the same level of review. 8 | 9 | 10 | Nova is a high-speed recursive SNARK (a SNARK is type cryptographic proof system that enables a prover to prove a mathematical statement to a verifier with a short proof and succinct verification, and a recursive SNARK enables producing proofs that prove statements about prior proofs). 11 | 12 | More precisely, Nova achieves [incrementally verifiable computation (IVC)](https://iacr.org/archive/tcc2008/49480001/49480001.pdf), a powerful cryptographic primitive that allows a prover to produce a proof of correct execution of a "long running" sequential computations in an incremental fashion. For example, IVC enables the following: The prover takes as input a proof $\pi_i$ proving the first $i$ steps of its computation and then update it to produce a proof $\pi_{i+1}$ proving the correct execution of the first $i + 1$ steps. Crucially, the prover's work to update the proof does not depend on the number of steps executed thus far, and the verifier's work to verify a proof does not grow with the number of steps in the computation. IVC schemes including Nova have a wide variety of applications such as Rollups, verifiable delay functions (VDFs), succinct blockchains, incrementally verifiable versions of [verifiable state machines](https://eprint.iacr.org/2020/758.pdf), and, more generally, proofs of (virtual) machine executions (e.g., EVM, RISC-V). 13 | 14 | A distinctive aspect of Nova is that it is the simplest recursive proof system in the literature, yet it provides the fastest prover. Furthermore, it achieves the smallest verifier circuit (a key metric to minimize in this context): the circuit is constant-sized and its size is about 10,000 multiplication gates. Nova is constructed from a simple primitive called a *folding scheme*, a cryptographic primitive that reduces the task of checking two NP statements into the task of checking a single NP statement. 15 | 16 | ## Details of the library 17 | This repository provides `nova-snark,` a Rust library implementation of Nova over a cycle of elliptic curves. Our code supports three curve cycles: (1) Pallas/Vesta, (2) BN254/Grumpkin, and (3) secp/secq. 18 | 19 | At its core, Nova relies on a commitment scheme for vectors. Compressing IVC proofs using Spartan relies on interpreting commitments to vectors as commitments to multilinear polynomials and prove evaluations of committed polynomials. Our code implements three commitment schemes and evaluation arguments: 20 | 1. Pedersen commitments with IPA-based evaluation argument (supported on all three curve cycles), and 21 | 2. HyperKZG commitments and evaluation argument (supported on curves with pairings e.g., BN254). 22 | 3. KZG commitments with a [Zeromorph](https://eprint.iacr.org/2023/917) evaluation argument (supported on curves equipped with a pairing). 23 | 24 | For more details on using HyperKZG / Zeromorph, please see the test `test_ivc_nontrivial_with_compression`. The HyperKZG instantiation requires a universal trusted setup (the so-called "powers of tau"). In the `setup` method in `src/provider/hyperkzg.rs`, one can load group elements produced in an existing KZG trusted setup (that was created for other proof systems based on univariate polynomials such as Plonk or variants), but the library does not currently do so (please see [this](https://github.com/microsoft/Nova/issues/270) issue). 25 | 26 | We also implement a SNARK, based on [Spartan](https://eprint.iacr.org/2019/550.pdf), to compress IVC proofs produced by Nova. There are two variants, one that does *not* use any preprocessing and another that uses preprocessing of circuits to ensure that the verifier's run time does not depend on the size of the step circuit. 27 | 28 | > [!NOTE] 29 | > This library features an implementation of Zeromorph, exclusively available here until the related changes are integrated with the official Nova repository via pull request [#301](https://github.com/microsoft/Nova/pull/301). 30 | > 31 | > Additionally, we've enhanced Nova to support Supernova, offering a variant that is currently unique to this repository. This advanced capability will remain exclusive here until pull request [#283](https://github.com/microsoft/Nova/pull/283) is merged into the official Nova codebase. 32 | > 33 | > Our implementation of HyperKZG incorporates notable performance enhancements inspired by Shplonk (BDFG20), as detailed in [this paper](https://eprint.iacr.org/2020/081). These improvements are specifically designed to enhance efficiency and speed. 34 | 35 | ## Supported front-ends 36 | A front-end is a tool to take a high-level program and turn it into an intermediate representation (e.g., a circuit) that can be used to prove executions of the program on concrete inputs. There are three supported ways to write high-level programs in a form that can be proven with Nova. 37 | 38 | 1. bellpepper: The native APIs of Nova accept circuits expressed with bellpepper, a Rust toolkit to express circuits. See [minroot.rs](https://github.com/microsoft/Nova/blob/main/examples/minroot.rs) or [sha256.rs](https://github.com/microsoft/Nova/blob/main/benches/sha256.rs) for examples. 39 | 40 | 2. Circom: A DSL and a compiler to transform high-level program expressed in its language into a circuit. There exist middleware to turn output of circom into a form suitable for proving with Nova. See [Nova Scotia](https://github.com/nalinbhardwaj/Nova-Scotia) and [Circom Scotia](https://github.com/argumentcomputer/circom-scotia). In the future, we will add examples in the Nova repository to use these tools with Nova. 41 | 42 | 3. [Lurk](https://github.com/argumentcomputer/lurk-rs): A Lisp dialect and a universal circuit to execute programs expressed in Lurk. The universal circuit can be proven with Nova. 43 | 44 | In the future, we plan to support [Noir](https://noir-lang.org/), a Rust-like DSL and a compiler to transform those programs into an IR. See [this](https://github.com/microsoft/Nova/issues/275) GitHub issue for details. 45 | 46 | ## Tests and examples 47 | By default, we enable the `asm` feature of an underlying library (which boosts performance by up to 50\%). If the library fails to build or run, one can pass `--no-default-features` to `cargo` commands noted below. 48 | 49 | To run tests (we recommend the release mode to drastically shorten run times): 50 | ```text 51 | cargo test --release 52 | ``` 53 | 54 | To run an example: 55 | ```text 56 | cargo run --release --example minroot 57 | ``` 58 | 59 | ## Specs and Documentation 60 | 61 | - [SuperNova](./src/supernova/Readme.md) 62 | 63 | ## References 64 | The following paper, which appeared at CRYPTO 2022, provides details of the Nova proof system and a proof of security: 65 | 66 | [Nova: Recursive Zero-Knowledge Arguments from Folding Schemes](https://eprint.iacr.org/2021/370) \ 67 | Abhiram Kothapalli, Srinath Setty, and Ioanna Tzialla \ 68 | CRYPTO 2022 69 | 70 | For efficiency, our implementation of the Nova proof system is instantiated over a cycle of elliptic curves. The following paper specifies that instantiation and provides a proof of security: 71 | 72 | [Revisiting the Nova Proof System on a Cycle of Curves](https://eprint.iacr.org/2023/969) \ 73 | Wilson Nguyen, Dan Boneh, and Srinath Setty \ 74 | IACR ePrint 2023/969 75 | 76 | This repository implements Supernova, published in : 77 | [SuperNova: Proving universal machine executions without universal circuits](https://eprint.iacr.org/2022/1758) \ 78 | Abhiram Kothapalli, and Srinath Setty 79 | IACR ePrint 2022/1758 80 | 81 | ## Acknowledgments 82 | See the contributors list [here](https://github.com/argumentcomputer/arecibo/graphs/contributors) 83 | -------------------------------------------------------------------------------- /ThirdPartyNotices.txt: -------------------------------------------------------------------------------- 1 | THIRD PARTY NOTICES 2 | 3 | This repository incorporates material as listed below or described in the code. 4 | 5 | ------------------------------------------------------------ 6 | https://github.com/microsoft/Nova 7 | 8 | Licensed under MIT 9 | 10 | MIT License 11 | 12 | Copyright (c) Microsoft Corporation. 13 | 14 | Permission is hereby granted, free of charge, to any person obtaining a copy 15 | of this software and associated documentation files (the "Software"), to deal 16 | in the Software without restriction, including without limitation the rights 17 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 18 | copies of the Software, and to permit persons to whom the Software is 19 | furnished to do so, subject to the following conditions: 20 | 21 | The above copyright notice and this permission notice shall be included in all 22 | copies or substantial portions of the Software. 23 | 24 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 25 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 26 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 27 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 28 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 29 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 | SOFTWARE 31 | 32 | ------------------------------------------------------------ 33 | https://github.com/espressosystems/jellyfish 34 | https://github.com/han0110/plonkish/ 35 | https://github.com/espressosystems/hyperplonk 36 | 37 | Licensed under MIT 38 | 39 | MIT License 40 | 41 | Copyright (c) 2022 Espresso Systems, han0110 42 | 43 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated 44 | documentation files (the "Software"), to deal in the Software without restriction, including without limitation 45 | the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to 46 | permit persons to whom the Software is furnished to do so, subject to the following conditions: 47 | 48 | The above copyright notice and this permission notice shall be included in all copies or substantial 49 | portions of the Software. 50 | 51 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE 52 | WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR 53 | COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 54 | TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 55 | 56 | ------------------------------------------------------------ 57 | https://github.com/AztecProtocol/aztec-packages/ 58 | 59 | Licensed under Apache 2.0 60 | 61 | Copyright 2022 Aztec 62 | 63 | Licensed under the Apache License, Version 2.0 (the "License"); 64 | you may not use this file except in compliance with the License. 65 | You may obtain a copy of the License at 66 | 67 | http://www.apache.org/licenses/LICENSE-2.0 68 | 69 | Unless required by applicable law or agreed to in writing, software 70 | distributed under the License is distributed on an "AS IS" BASIS, 71 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 72 | See the License for the specific language governing permissions and 73 | limitations under the License. -------------------------------------------------------------------------------- /_typos.toml: -------------------------------------------------------------------------------- 1 | [default] 2 | extend-ignore-identifiers-re = [ 3 | # *sigh* this is load-bearing 4 | "[Aa]bomonation", 5 | ] 6 | -------------------------------------------------------------------------------- /benches/bench.env: -------------------------------------------------------------------------------- 1 | # Arecibo config, used only in `justfile` by default 2 | BENCH_NUM_CONS=16384,1048576 3 | BENCH_OUTPUT=commit-comment -------------------------------------------------------------------------------- /benches/common/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod supernova; 2 | 3 | use anyhow::anyhow; 4 | use criterion::BenchmarkId; 5 | 6 | // TODO: Why Copy and &'static str over String? 7 | #[derive(Clone, Debug, Copy)] 8 | pub(crate) struct BenchParams { 9 | pub step_size: usize, 10 | pub date: &'static str, 11 | pub sha: &'static str, 12 | } 13 | impl BenchParams { 14 | pub(crate) fn bench_id(&self, name: &str) -> BenchmarkId { 15 | let output_type = output_type_env().unwrap_or("stdout".into()); 16 | match output_type.as_ref() { 17 | "pr-comment" => BenchmarkId::new(name, format!("StepCircuitSize-{}", self.step_size)), 18 | "commit-comment" => { 19 | let mut short_sha = self.sha.to_owned(); 20 | short_sha.truncate(7); 21 | BenchmarkId::new( 22 | format!("ref={}", short_sha), 23 | format!("{}-NumCons-{}", name, self.step_size), 24 | ) 25 | } 26 | // TODO: refine "gh-pages" 27 | _ => BenchmarkId::new( 28 | name, 29 | format!( 30 | "StepCircuitSize-{}-{}-{}", 31 | self.step_size, self.sha, self.date 32 | ), 33 | ), 34 | } 35 | } 36 | } 37 | 38 | fn output_type_env() -> anyhow::Result { 39 | std::env::var("BENCH_OUTPUT").map_err(|e| anyhow!("BENCH_OUTPUT env var isn't set: {e}")) 40 | } 41 | 42 | pub(crate) fn noise_threshold_env() -> anyhow::Result { 43 | std::env::var("BENCH_NOISE_THRESHOLD") 44 | .map_err(|e| anyhow!("BENCH_NOISE_THRESHOLD env var isn't set: {e}")) 45 | .and_then(|nt| { 46 | nt.parse::() 47 | .map_err(|e| anyhow!("Failed to parse noise threshold: {e}")) 48 | }) 49 | } 50 | -------------------------------------------------------------------------------- /benches/common/supernova/bench.rs: -------------------------------------------------------------------------------- 1 | // Code is considered dead unless used in all benchmark targets 2 | #![allow(dead_code)] 3 | 4 | use crate::common::supernova::{ 5 | num_cons, NonUniformBench, SnarkType, E1, E2, NUM_CONS_VERIFIER_CIRCUIT_PRIMARY, NUM_SAMPLES, 6 | }; 7 | use crate::common::{noise_threshold_env, BenchParams}; 8 | use arecibo::{ 9 | provider::{Bn256EngineKZG, GrumpkinEngine}, 10 | supernova::NonUniformCircuit, 11 | supernova::{snark::CompressedSNARK, PublicParams, RecursiveSNARK}, 12 | traits::{ 13 | snark::RelaxedR1CSSNARKTrait, 14 | snark::{default_ck_hint, BatchedRelaxedR1CSSNARKTrait}, 15 | Engine, 16 | }, 17 | }; 18 | use criterion::{measurement::WallTime, *}; 19 | 20 | /// Benchmarks the SNARK at a provided number of constraints 21 | /// 22 | /// Parameters 23 | /// - `num_augmented_circuits`: the number of augmented circuits in this configuration 24 | /// - `group`: the criterion benchmark group 25 | /// - `num_cons`: the number of constraints in the step circuit 26 | pub fn bench_snark_internal_with_arity< 27 | S1: BatchedRelaxedR1CSSNARKTrait, 28 | S2: RelaxedR1CSSNARKTrait, 29 | >( 30 | group: &mut BenchmarkGroup<'_, WallTime>, 31 | num_augmented_circuits: usize, 32 | num_cons: usize, 33 | snark_type: SnarkType, 34 | ) { 35 | let bench: NonUniformBench = match snark_type { 36 | SnarkType::Recursive => NonUniformBench::::new(2, num_cons), 37 | SnarkType::Compressed => NonUniformBench::::new(num_augmented_circuits, num_cons), 38 | }; 39 | let pp = match snark_type { 40 | SnarkType::Recursive => PublicParams::setup(&bench, &*default_ck_hint(), &*default_ck_hint()), 41 | SnarkType::Compressed => PublicParams::setup(&bench, &*S1::ck_floor(), &*S2::ck_floor()), 42 | }; 43 | 44 | // TODO: Can we use the same number of warmup steps for recursive and compressed? 45 | let num_warmup_steps = match snark_type { 46 | SnarkType::Recursive => 10, 47 | SnarkType::Compressed => 3, 48 | }; 49 | let z0_primary = vec![::Scalar::from(2u64)]; 50 | let z0_secondary = vec![::Scalar::from(2u64)]; 51 | let mut recursive_snark_option: Option> = None; 52 | let mut selected_augmented_circuit = 0; 53 | 54 | for _ in 0..num_warmup_steps { 55 | let mut recursive_snark = recursive_snark_option.unwrap_or_else(|| { 56 | RecursiveSNARK::new( 57 | &pp, 58 | &bench, 59 | &bench.primary_circuit(0), 60 | &bench.secondary_circuit(), 61 | &z0_primary, 62 | &z0_secondary, 63 | ) 64 | .unwrap() 65 | }); 66 | 67 | if selected_augmented_circuit == 0 || selected_augmented_circuit == 1 { 68 | recursive_snark 69 | .prove_step( 70 | &pp, 71 | &bench.primary_circuit(selected_augmented_circuit), 72 | &bench.secondary_circuit(), 73 | ) 74 | .expect("Prove step failed"); 75 | 76 | recursive_snark 77 | .verify(&pp, &z0_primary, &z0_secondary) 78 | .expect("Verify failed"); 79 | } else { 80 | unimplemented!() 81 | } 82 | 83 | selected_augmented_circuit = (selected_augmented_circuit + 1) % num_augmented_circuits; 84 | recursive_snark_option = Some(recursive_snark) 85 | } 86 | 87 | assert!(recursive_snark_option.is_some()); 88 | let recursive_snark = recursive_snark_option.unwrap(); 89 | 90 | let bench_params = BenchParams { 91 | step_size: num_cons, 92 | date: env!("VERGEN_GIT_COMMIT_DATE"), 93 | sha: env!("VERGEN_GIT_SHA"), 94 | }; 95 | 96 | match snark_type { 97 | SnarkType::Compressed => { 98 | let (prover_key, verifier_key) = CompressedSNARK::<_, S1, S2>::setup(&pp).unwrap(); 99 | // Benchmark the prove time 100 | group.bench_function(bench_params.bench_id("Prove"), |b| { 101 | b.iter(|| { 102 | CompressedSNARK::<_, S1, S2>::prove( 103 | black_box(&pp), 104 | black_box(&prover_key), 105 | black_box(&recursive_snark), 106 | ) 107 | .unwrap(); 108 | }) 109 | }); 110 | 111 | let compressed_snark = 112 | CompressedSNARK::<_, S1, S2>::prove(&pp, &prover_key, &recursive_snark).unwrap(); 113 | // Benchmark the verification time 114 | group.bench_function(bench_params.bench_id("Verify"), |b| { 115 | b.iter(|| { 116 | black_box(&compressed_snark) 117 | .verify( 118 | black_box(&pp), 119 | black_box(&verifier_key), 120 | black_box(&z0_primary), 121 | black_box(&z0_secondary), 122 | ) 123 | .unwrap(); 124 | }) 125 | }); 126 | } 127 | SnarkType::Recursive => { 128 | // Benchmark the prove time 129 | group.bench_function(bench_params.bench_id("Prove"), |b| { 130 | b.iter(|| { 131 | black_box(&mut recursive_snark.clone()) 132 | .prove_step( 133 | black_box(&pp), 134 | &bench.primary_circuit(0), 135 | &bench.secondary_circuit(), 136 | ) 137 | .unwrap(); 138 | }) 139 | }); 140 | 141 | // Benchmark the verification time 142 | group.bench_function(bench_params.bench_id("Verify"), |b| { 143 | b.iter(|| { 144 | black_box(&mut recursive_snark.clone()) 145 | .verify( 146 | black_box(&pp), 147 | black_box(&[::Scalar::from(2u64)]), 148 | black_box(&[::Scalar::from(2u64)]), 149 | ) 150 | .unwrap(); 151 | }) 152 | }); 153 | } 154 | } 155 | } 156 | 157 | pub fn run_bench, S2: RelaxedR1CSSNARKTrait>( 158 | c: &mut Criterion, 159 | group_name: &str, 160 | arity: usize, 161 | snark_type: SnarkType, 162 | ) { 163 | // we vary the number of constraints in the step circuit 164 | for &num_cons_in_augmented_circuit in num_cons().iter() { 165 | // number of constraints in the step circuit 166 | let num_cons = num_cons_in_augmented_circuit 167 | .checked_sub(NUM_CONS_VERIFIER_CIRCUIT_PRIMARY) 168 | .expect("Negative `num_cons`, constraint numbers out of date!"); 169 | 170 | let mut group = c.benchmark_group(group_name); 171 | group.sample_size(NUM_SAMPLES); 172 | group.noise_threshold(noise_threshold_env().unwrap_or(0.3)); 173 | 174 | bench_snark_internal_with_arity::(&mut group, arity, num_cons, snark_type); 175 | 176 | group.finish(); 177 | } 178 | } 179 | -------------------------------------------------------------------------------- /benches/common/supernova/mod.rs: -------------------------------------------------------------------------------- 1 | // Code is considered dead unless used in all benchmark targets 2 | #![allow(dead_code)] 3 | 4 | pub mod bench; 5 | pub mod targets; 6 | 7 | use anyhow::anyhow; 8 | use arecibo::{ 9 | supernova::{NonUniformCircuit, StepCircuit, TrivialTestCircuit}, 10 | traits::{CurveCycleEquipped, Dual, Engine}, 11 | }; 12 | use bellpepper_core::{num::AllocatedNum, ConstraintSystem, SynthesisError}; 13 | use core::marker::PhantomData; 14 | use ff::PrimeField; 15 | use halo2curves::bn256::Bn256; 16 | 17 | pub type E1 = arecibo::provider::Bn256EngineKZG; 18 | pub type E2 = arecibo::provider::GrumpkinEngine; 19 | pub type EE1 = arecibo::provider::hyperkzg::EvaluationEngine; 20 | pub type EE2 = arecibo::provider::ipa_pc::EvaluationEngine; 21 | // SNARKs without computation commitments 22 | pub type S1 = arecibo::spartan::batched::BatchedRelaxedR1CSSNARK; 23 | pub type S2 = arecibo::spartan::snark::RelaxedR1CSSNARK; 24 | // SNARKs with computation commitments 25 | pub type SS1 = arecibo::spartan::batched_ppsnark::BatchedRelaxedR1CSSNARK; 26 | pub type SS2 = arecibo::spartan::ppsnark::RelaxedR1CSSNARK; 27 | 28 | // This should match the value in test_supernova_recursive_circuit_pasta 29 | // Note `NUM_CONS_VERIFIER_CIRCUIT_PRIMARY` is different for Nova and Supernova 30 | // TODO: This should also be a table matching the num_augmented_circuits in the below 31 | pub const NUM_CONS_VERIFIER_CIRCUIT_PRIMARY: usize = 9844; 32 | pub const NUM_SAMPLES: usize = 10; 33 | 34 | #[derive(Copy, Clone)] 35 | pub enum SnarkType { 36 | Recursive, 37 | Compressed, 38 | } 39 | 40 | // TODO: Move this up a level to `common/mod.rs`, then integrate with non-Supernova benches 41 | pub fn num_cons() -> Vec { 42 | num_cons_env().unwrap_or_else(|_| { 43 | vec![ 44 | NUM_CONS_VERIFIER_CIRCUIT_PRIMARY, 45 | 16384, 46 | 32768, 47 | 65536, 48 | 131072, 49 | 262144, 50 | 524288, 51 | 1048576, 52 | ] 53 | }) 54 | } 55 | 56 | fn num_cons_env() -> anyhow::Result> { 57 | std::env::var("BENCH_NUM_CONS") 58 | .map_err(|e| anyhow!("BENCH_NUM_CONS env var not set: {e}")) 59 | .and_then(|rc| { 60 | let vec: anyhow::Result> = rc 61 | .split(',') 62 | .map(|rc| { 63 | rc.parse::() 64 | .map_err(|e| anyhow!("Failed to parse constraint number: {e}")) 65 | }) 66 | .collect(); 67 | vec 68 | }) 69 | } 70 | 71 | pub struct NonUniformBench 72 | where 73 | E1: CurveCycleEquipped, 74 | { 75 | num_circuits: usize, 76 | num_cons: usize, 77 | _p: PhantomData, 78 | } 79 | 80 | impl NonUniformBench 81 | where 82 | E1: CurveCycleEquipped, 83 | { 84 | fn new(num_circuits: usize, num_cons: usize) -> Self { 85 | Self { 86 | num_circuits, 87 | num_cons, 88 | _p: PhantomData, 89 | } 90 | } 91 | } 92 | 93 | impl NonUniformCircuit for NonUniformBench 94 | where 95 | E1: CurveCycleEquipped, 96 | { 97 | type C1 = NonTrivialTestCircuit; 98 | type C2 = TrivialTestCircuit< as Engine>::Scalar>; 99 | 100 | fn num_circuits(&self) -> usize { 101 | self.num_circuits 102 | } 103 | 104 | fn primary_circuit(&self, circuit_index: usize) -> Self::C1 { 105 | assert!( 106 | circuit_index < self.num_circuits, 107 | "Circuit index out of bounds: asked for {circuit_index}, but there are only {} circuits.", 108 | self.num_circuits 109 | ); 110 | 111 | NonTrivialTestCircuit::new(self.num_cons) 112 | } 113 | 114 | fn secondary_circuit(&self) -> Self::C2 { 115 | Default::default() 116 | } 117 | } 118 | 119 | #[derive(Clone, Debug, Default)] 120 | pub struct NonTrivialTestCircuit { 121 | num_cons: usize, 122 | _p: PhantomData, 123 | } 124 | 125 | impl NonTrivialTestCircuit 126 | where 127 | F: PrimeField, 128 | { 129 | pub fn new(num_cons: usize) -> Self { 130 | Self { 131 | num_cons, 132 | _p: Default::default(), 133 | } 134 | } 135 | } 136 | impl StepCircuit for NonTrivialTestCircuit 137 | where 138 | F: PrimeField, 139 | { 140 | fn arity(&self) -> usize { 141 | 1 142 | } 143 | 144 | fn circuit_index(&self) -> usize { 145 | 0 146 | } 147 | 148 | fn synthesize>( 149 | &self, 150 | cs: &mut CS, 151 | pc: Option<&AllocatedNum>, 152 | z: &[AllocatedNum], 153 | ) -> Result<(Option>, Vec>), SynthesisError> { 154 | // Consider a an equation: `x^{2 * num_cons} = y`, where `x` and `y` are respectively the input and output. 155 | let mut x = z[0].clone(); 156 | let mut y = x.clone(); 157 | for i in 0..self.num_cons { 158 | y = x.square(cs.namespace(|| format!("x_sq_{i}")))?; 159 | x = y.clone(); 160 | } 161 | Ok((pc.cloned(), vec![y])) 162 | } 163 | } 164 | -------------------------------------------------------------------------------- /benches/common/supernova/targets.rs: -------------------------------------------------------------------------------- 1 | // Code is considered dead unless used in all benchmark targets 2 | #![allow(dead_code)] 3 | use criterion::Criterion; 4 | 5 | use crate::common::supernova::{bench::run_bench, SnarkType, S1, S2, SS1, SS2}; 6 | 7 | // Recursive Supernova SNARK benchmarks 8 | pub fn bench_one_augmented_circuit_recursive_snark(c: &mut Criterion) { 9 | run_bench::(c, "RecursiveSNARK-NIVC-1", 1, SnarkType::Recursive) 10 | } 11 | 12 | pub fn bench_two_augmented_circuit_recursive_snark(c: &mut Criterion) { 13 | run_bench::(c, "RecursiveSNARK-NIVC-2", 2, SnarkType::Recursive) 14 | } 15 | 16 | // Compressed Supernova SNARK benchmarks 17 | pub fn bench_one_augmented_circuit_compressed_snark(c: &mut Criterion) { 18 | run_bench::(c, "CompressedSNARK-NIVC-1", 1, SnarkType::Compressed) 19 | } 20 | 21 | pub fn bench_two_augmented_circuit_compressed_snark(c: &mut Criterion) { 22 | run_bench::(c, "CompressedSNARK-NIVC-2", 2, SnarkType::Compressed) 23 | } 24 | 25 | pub fn bench_two_augmented_circuit_compressed_snark_with_computational_commitments( 26 | c: &mut Criterion, 27 | ) { 28 | run_bench::( 29 | c, 30 | "CompressedSNARK-NIVC-Commitments-2", 31 | 2, 32 | SnarkType::Compressed, 33 | ) 34 | } 35 | -------------------------------------------------------------------------------- /benches/compressed-snark-supernova.rs: -------------------------------------------------------------------------------- 1 | use criterion::*; 2 | use std::time::Duration; 3 | 4 | mod common; 5 | use common::supernova::targets::{ 6 | bench_one_augmented_circuit_compressed_snark, bench_two_augmented_circuit_compressed_snark, 7 | bench_two_augmented_circuit_compressed_snark_with_computational_commitments, 8 | }; 9 | 10 | // To run these benchmarks, first download `criterion` with `cargo install cargo-criterion`. 11 | // Then `cargo criterion --bench compressed-snark-supernova`. The results are located in `target/criterion/data/`. 12 | // For flamegraphs, run `cargo criterion --bench compressed-snark-supernova --features flamegraph -- --profile-time `. 13 | // The results are located in `target/criterion/profile/`. 14 | cfg_if::cfg_if! { 15 | if #[cfg(feature = "flamegraph")] { 16 | criterion_group! { 17 | name = compressed_snark_supernova; 18 | config = Criterion::default().warm_up_time(Duration::from_millis(3000)).with_profiler(pprof::criterion::PProfProfiler::new(100, pprof::criterion::Output::Flamegraph(None))); 19 | targets = bench_one_augmented_circuit_compressed_snark, bench_two_augmented_circuit_compressed_snark, bench_two_augmented_circuit_compressed_snark_with_computational_commitments 20 | } 21 | } else { 22 | criterion_group! { 23 | name = compressed_snark_supernova; 24 | config = Criterion::default().warm_up_time(Duration::from_millis(3000)); 25 | targets = bench_one_augmented_circuit_compressed_snark, bench_two_augmented_circuit_compressed_snark, bench_two_augmented_circuit_compressed_snark_with_computational_commitments 26 | } 27 | } 28 | } 29 | 30 | criterion_main!(compressed_snark_supernova); 31 | -------------------------------------------------------------------------------- /benches/compute-digest.rs: -------------------------------------------------------------------------------- 1 | use std::{marker::PhantomData, time::Duration}; 2 | 3 | use arecibo::{ 4 | provider::{Bn256EngineKZG, GrumpkinEngine}, 5 | traits::{ 6 | circuit::{StepCircuit, TrivialCircuit}, 7 | snark::default_ck_hint, 8 | Engine, 9 | }, 10 | PublicParams, 11 | }; 12 | use bellpepper_core::{num::AllocatedNum, ConstraintSystem, SynthesisError}; 13 | use criterion::{black_box, criterion_group, criterion_main, Criterion}; 14 | use ff::PrimeField; 15 | 16 | type E1 = Bn256EngineKZG; 17 | type E2 = GrumpkinEngine; 18 | type C1 = NonTrivialCircuit<::Scalar>; 19 | type C2 = TrivialCircuit<::Scalar>; 20 | 21 | criterion_group! { 22 | name = compute_digest; 23 | config = Criterion::default().warm_up_time(Duration::from_millis(3000)).sample_size(10); 24 | targets = bench_compute_digest 25 | } 26 | 27 | criterion_main!(compute_digest); 28 | 29 | fn bench_compute_digest(c: &mut Criterion) { 30 | c.bench_function("compute_digest", |b| { 31 | b.iter(|| { 32 | PublicParams::::setup( 33 | black_box(&C1::new(10)), 34 | black_box(&C2::default()), 35 | black_box(&*default_ck_hint()), 36 | black_box(&*default_ck_hint()), 37 | ) 38 | }) 39 | }); 40 | } 41 | 42 | #[derive(Clone, Debug, Default)] 43 | struct NonTrivialCircuit { 44 | num_cons: usize, 45 | _p: PhantomData, 46 | } 47 | 48 | impl NonTrivialCircuit { 49 | pub fn new(num_cons: usize) -> Self { 50 | Self { 51 | num_cons, 52 | _p: PhantomData, 53 | } 54 | } 55 | } 56 | impl StepCircuit for NonTrivialCircuit { 57 | fn arity(&self) -> usize { 58 | 1 59 | } 60 | 61 | fn synthesize>( 62 | &self, 63 | cs: &mut CS, 64 | z: &[AllocatedNum], 65 | ) -> Result>, SynthesisError> { 66 | // Consider a an equation: `x^2 = y`, where `x` and `y` are respectively the input and output. 67 | let mut x = z[0].clone(); 68 | let mut y = x.clone(); 69 | for i in 0..self.num_cons { 70 | y = x.square(cs.namespace(|| format!("x_sq_{i}")))?; 71 | x = y.clone(); 72 | } 73 | Ok(vec![y]) 74 | } 75 | } 76 | -------------------------------------------------------------------------------- /benches/justfile: -------------------------------------------------------------------------------- 1 | # Install with `cargo install just` 2 | # Usage: `just ` 3 | set dotenv-load 4 | set dotenv-filename := "bench.env" 5 | set ignore-comments := true 6 | 7 | commit := `git rev-parse HEAD` 8 | 9 | # Run CPU benchmarks 10 | bench +benches: 11 | #!/bin/sh 12 | [ $(uname -m) = "x86_64" ] && FEATURES="asm" || FEATURES="default" 13 | 14 | for bench in {{benches}}; do 15 | cargo criterion --bench $bench --features $FEATURES 16 | done 17 | 18 | # Run CUDA benchmarks on GPU 19 | gpu-bench +benches: 20 | #!/bin/sh 21 | # The `compute`/`sm` number corresponds to the Nvidia GPU architecture 22 | # In this case, the self-hosted machine uses the Ampere architecture, but we want this to be configurable 23 | # See https://arnon.dk/matching-sm-architectures-arch-and-gencode-for-various-nvidia-cards/ 24 | export CUDA_ARCH=$(nvidia-smi --query-gpu=compute_cap --format=csv,noheader | sed 's/\.//g') 25 | export EC_GPU_CUDA_NVCC_ARGS="--fatbin --gpu-architecture=sm_$CUDA_ARCH --generate-code=arch=compute_$CUDA_ARCH,code=sm_$CUDA_ARCH" 26 | export EC_GPU_FRAMEWORK="cuda" 27 | [ $(uname -m) = "x86_64" ] && FEATURES="cuda, asm" || FEATURES="cuda" 28 | 29 | for bench in {{benches}}; do 30 | cargo criterion --bench $bench --features "cuda" 31 | done 32 | 33 | # Run CUDA benchmarks on GPU, tuned for CI on Linux x86_64 34 | gpu-bench-ci +benches: 35 | #!/bin/sh 36 | printenv PATH 37 | [ $(uname -m) = "x86_64" ] && FEATURES="cuda, asm" || FEATURES="cuda" 38 | 39 | for bench in {{benches}}; do 40 | cargo criterion --bench $bench --features $FEATURES --message-format=json > "$bench-{{commit}}".json 41 | done 42 | 43 | comparative-bench +benches: 44 | #!/bin/sh 45 | # Initialize FEATURES based on architecture 46 | [ $(uname -m) = "x86_64" ] && FEATURES="asm" || FEATURES="" 47 | # Append cuda to FEATURES if nvcc is found 48 | if which nvcc > /dev/null; then 49 | FEATURES="${FEATURES:+$FEATURES,}cuda" 50 | fi 51 | # add default if FEATURES is empty 52 | FEATURES="${FEATURES:-default}" 53 | 54 | for bench in {{benches}}; do 55 | cargo criterion --bench $bench --features $FEATURES --message-format=json > "$bench-{{commit}}".json 56 | done 57 | -------------------------------------------------------------------------------- /benches/pcs.rs: -------------------------------------------------------------------------------- 1 | use arecibo::provider::{ 2 | hyperkzg::EvaluationEngine as MLEvaluationEngine, 3 | ipa_pc::EvaluationEngine as IPAEvaluationEngine, non_hiding_zeromorph::ZMPCS, 4 | }; 5 | use arecibo::provider::{Bn256EngineIPA, Bn256EngineKZG, Bn256EngineZM}; 6 | use arecibo::spartan::polys::multilinear::MultilinearPolynomial; 7 | use arecibo::traits::{ 8 | commitment::CommitmentEngineTrait, evaluation::EvaluationEngineTrait, Engine, 9 | TranscriptEngineTrait, 10 | }; 11 | use criterion::{criterion_group, criterion_main, Bencher, BenchmarkId, Criterion, SamplingMode}; 12 | use ff::Field; 13 | use halo2curves::bn256::Bn256; 14 | use rand::rngs::StdRng; 15 | use rand_core::{CryptoRng, RngCore, SeedableRng}; 16 | use std::any::type_name; 17 | use std::sync::Arc; 18 | use std::time::Duration; 19 | 20 | // To run these benchmarks, first download `criterion` with `cargo install cargo-criterion`. 21 | // Then `cargo criterion --bench pcs`. 22 | // For flamegraphs, run `cargo criterion --bench pcs --features flamegraph -- --profile-time `. 23 | // The results are located in `target/criterion/profile/`. 24 | cfg_if::cfg_if! { 25 | if #[cfg(feature = "flamegraph")] { 26 | criterion_group! { 27 | name = pcs; 28 | config = Criterion::default().warm_up_time(Duration::from_millis(3000)).with_profiler(pprof::criterion::PProfProfiler::new(100, pprof::criterion::Output::Flamegraph(None))).measurement_time(Duration::from_secs(100)); 29 | targets = bench_pcs 30 | } 31 | } else { 32 | criterion_group! { 33 | name = pcs; 34 | config = Criterion::default().warm_up_time(Duration::from_millis(3000)).measurement_time(Duration::from_secs(100)); 35 | targets = bench_pcs 36 | } 37 | } 38 | } 39 | 40 | criterion_main!(pcs); 41 | 42 | const NUM_VARS_TEST_VECTOR: [usize; 6] = [10, 12, 14, 16, 18, 20]; 43 | 44 | struct BenchAssets> { 45 | poly: MultilinearPolynomial<::Scalar>, 46 | point: Vec<::Scalar>, 47 | eval: ::Scalar, 48 | ck: Arc<<::CE as CommitmentEngineTrait>::CommitmentKey>, 49 | commitment: <::CE as CommitmentEngineTrait>::Commitment, 50 | prover_key: >::ProverKey, 51 | verifier_key: >::VerifierKey, 52 | proof: Option<>::EvaluationArgument>, 53 | } 54 | 55 | /// Returns a random polynomial, a point and calculate its evaluation. 56 | pub fn random_poly_with_eval( 57 | num_vars: usize, 58 | mut rng: &mut R, 59 | ) -> ( 60 | MultilinearPolynomial<::Scalar>, 61 | Vec<::Scalar>, 62 | ::Scalar, 63 | ) { 64 | // Generate random polynomial and point. 65 | let poly = MultilinearPolynomial::random(num_vars, &mut rng); 66 | let point = (0..num_vars) 67 | .map(|_| ::Scalar::random(&mut rng)) 68 | .collect::>(); 69 | 70 | // Calculation evaluation of point over polynomial. 71 | let eval = MultilinearPolynomial::evaluate_with(poly.evaluations(), &point); 72 | 73 | (poly, point, eval) 74 | } 75 | 76 | impl> BenchAssets { 77 | pub(crate) fn from_num_vars(num_vars: usize, rng: &mut R) -> Self { 78 | let (poly, point, eval) = random_poly_with_eval::(num_vars, rng); 79 | 80 | // Mock commitment key. 81 | let ck = E::CE::setup(b"test", 1 << num_vars); 82 | let ck = Arc::new(ck); 83 | // Commits to the provided vector using the provided generators. 84 | let commitment = E::CE::commit(&ck, poly.evaluations()); 85 | 86 | let (prover_key, verifier_key) = EE::setup(ck.clone()); 87 | 88 | // Generate proof so that we can bench verification. 89 | let proof = EE::prove( 90 | &ck, 91 | &prover_key, 92 | &mut E::TE::new(b"TestEval"), 93 | &commitment, 94 | poly.evaluations(), 95 | &point, 96 | &eval, 97 | ) 98 | .unwrap(); 99 | 100 | Self { 101 | poly, 102 | point, 103 | eval, 104 | ck, 105 | commitment, 106 | prover_key, 107 | verifier_key, 108 | proof: Some(proof), 109 | } 110 | } 111 | } 112 | 113 | // Macro to generate benchmark code for multiple evaluation engine types 114 | macro_rules! benchmark_all_engines { 115 | ($criterion:expr, $test_vector:expr, $proving_fn:expr, $verifying_fn:expr, $( ($assets:ident, $eval_engine:ty) ),*) => { 116 | for num_vars in $test_vector.iter() { 117 | let mut rng = rand::rngs::StdRng::seed_from_u64(*num_vars as u64); 118 | 119 | $( 120 | let $assets: BenchAssets<_, $eval_engine> = BenchAssets::from_num_vars::(*num_vars, &mut rng); 121 | )* 122 | 123 | // Proving group 124 | let mut proving_group = $criterion.benchmark_group(format!("PCS-Proving {}", num_vars)); 125 | proving_group 126 | .sampling_mode(SamplingMode::Auto) 127 | .sample_size(10); 128 | 129 | $( 130 | proving_group.bench_with_input(BenchmarkId::new(type_name::<$eval_engine>(), num_vars), &num_vars, |b, _| { 131 | $proving_fn(b, &$assets); 132 | }); 133 | )* 134 | 135 | proving_group.finish(); 136 | 137 | // Verifying group 138 | let mut verifying_group = $criterion.benchmark_group(format!("PCS-Verifying {}", num_vars)); 139 | verifying_group 140 | .sampling_mode(SamplingMode::Auto) 141 | .sample_size(10); 142 | 143 | $( 144 | verifying_group.bench_with_input(BenchmarkId::new(type_name::<$eval_engine>(), num_vars), &num_vars, |b, _| { 145 | $verifying_fn(b, &$assets); 146 | }); 147 | )* 148 | 149 | verifying_group.finish(); 150 | } 151 | }; 152 | } 153 | 154 | fn bench_pcs(c: &mut Criterion) { 155 | benchmark_all_engines!( 156 | c, 157 | NUM_VARS_TEST_VECTOR, 158 | bench_pcs_proving_internal, 159 | bench_pcs_verifying_internal, 160 | (ipa_assets, IPAEvaluationEngine), 161 | (hyperkzg_assets, MLEvaluationEngine), 162 | (zm_assets, ZMPCS) 163 | ); 164 | } 165 | 166 | fn bench_pcs_proving_internal>( 167 | b: &mut Bencher<'_>, 168 | bench_assets: &BenchAssets, 169 | ) { 170 | // Bench generate proof. 171 | b.iter(|| { 172 | EE::prove( 173 | &bench_assets.ck, 174 | &bench_assets.prover_key, 175 | &mut E::TE::new(b"TestEval"), 176 | &bench_assets.commitment, 177 | bench_assets.poly.evaluations(), 178 | &bench_assets.point, 179 | &bench_assets.eval, 180 | ) 181 | .unwrap(); 182 | }); 183 | } 184 | 185 | fn bench_pcs_verifying_internal>( 186 | b: &mut Bencher<'_>, 187 | bench_assets: &BenchAssets, 188 | ) { 189 | // Bench verify proof. 190 | b.iter(|| { 191 | EE::verify( 192 | &bench_assets.verifier_key, 193 | &mut E::TE::new(b"TestEval"), 194 | &bench_assets.commitment, 195 | &bench_assets.point, 196 | &bench_assets.eval, 197 | bench_assets.proof.as_ref().unwrap(), 198 | ) 199 | .unwrap(); 200 | }); 201 | } 202 | -------------------------------------------------------------------------------- /benches/recursive-snark-supernova.rs: -------------------------------------------------------------------------------- 1 | use criterion::*; 2 | use std::time::Duration; 3 | 4 | mod common; 5 | use common::supernova::targets::{ 6 | bench_one_augmented_circuit_recursive_snark, bench_two_augmented_circuit_recursive_snark, 7 | }; 8 | 9 | // To run these benchmarks, first download `criterion` with `cargo install cargo-criterion`. 10 | // Then `cargo criterion --bench recursive-snark-supernova`. The results are located in `target/criterion/data/`. 11 | // For flamegraphs, run `cargo criterion --bench recursive-snark-supernova --features flamegraph -- --profile-time `. 12 | // The results are located in `target/criterion/profile/`. 13 | cfg_if::cfg_if! { 14 | if #[cfg(feature = "flamegraph")] { 15 | criterion_group! { 16 | name = recursive_snark_supernova; 17 | config = Criterion::default().warm_up_time(Duration::from_millis(3000)).with_profiler(pprof::criterion::PProfProfiler::new(100, pprof::criterion::Output::Flamegraph(None))); 18 | targets = bench_one_augmented_circuit_recursive_snark, bench_two_augmented_circuit_recursive_snark 19 | } 20 | } else { 21 | criterion_group! { 22 | name = recursive_snark_supernova; 23 | config = Criterion::default().warm_up_time(Duration::from_millis(3000)); 24 | targets = bench_one_augmented_circuit_recursive_snark, bench_two_augmented_circuit_recursive_snark 25 | } 26 | } 27 | } 28 | 29 | criterion_main!(recursive_snark_supernova); 30 | -------------------------------------------------------------------------------- /benches/recursive-snark.rs: -------------------------------------------------------------------------------- 1 | #![allow(non_snake_case)] 2 | use arecibo::{ 3 | provider::{Bn256EngineKZG, GrumpkinEngine}, 4 | traits::{ 5 | circuit::{StepCircuit, TrivialCircuit}, 6 | snark::default_ck_hint, 7 | Engine, 8 | }, 9 | PublicParams, RecursiveSNARK, 10 | }; 11 | use bellpepper_core::{num::AllocatedNum, ConstraintSystem, SynthesisError}; 12 | use core::marker::PhantomData; 13 | use criterion::*; 14 | use ff::PrimeField; 15 | use std::time::Duration; 16 | 17 | mod common; 18 | use common::{noise_threshold_env, BenchParams}; 19 | 20 | type E1 = Bn256EngineKZG; 21 | type E2 = GrumpkinEngine; 22 | 23 | // To run these benchmarks, first download `criterion` with `cargo install cargo-criterion`. 24 | // Then `cargo criterion --bench recursive-snark`. The results are located in `target/criterion/data/`. 25 | // For flamegraphs, run `cargo criterion --bench recursive-snark --features flamegraph -- --profile-time `. 26 | // The results are located in `target/criterion/profile/`. 27 | cfg_if::cfg_if! { 28 | if #[cfg(feature = "flamegraph")] { 29 | criterion_group! { 30 | name = recursive_snark; 31 | config = Criterion::default().warm_up_time(Duration::from_millis(3000)).with_profiler(pprof::criterion::PProfProfiler::new(100, pprof::criterion::Output::Flamegraph(None))); 32 | targets = bench_recursive_snark 33 | } 34 | } else { 35 | criterion_group! { 36 | name = recursive_snark; 37 | config = Criterion::default().warm_up_time(Duration::from_millis(3000)); 38 | targets = bench_recursive_snark 39 | } 40 | } 41 | } 42 | 43 | criterion_main!(recursive_snark); 44 | 45 | // This should match the value for the primary in test_recursive_circuit_pasta 46 | const NUM_CONS_VERIFIER_CIRCUIT_PRIMARY: usize = 9825; 47 | const NUM_SAMPLES: usize = 10; 48 | 49 | fn bench_recursive_snark(c: &mut Criterion) { 50 | // we vary the number of constraints in the step circuit 51 | for &num_cons_in_augmented_circuit in [ 52 | NUM_CONS_VERIFIER_CIRCUIT_PRIMARY, 53 | 16384, 54 | 32768, 55 | 65536, 56 | 131072, 57 | 262144, 58 | 524288, 59 | 1048576, 60 | ] 61 | .iter() 62 | { 63 | // number of constraints in the step circuit 64 | let num_cons = num_cons_in_augmented_circuit - NUM_CONS_VERIFIER_CIRCUIT_PRIMARY; 65 | 66 | let mut group = c.benchmark_group("RecursiveSNARK"); 67 | group.sample_size(NUM_SAMPLES); 68 | group.noise_threshold(noise_threshold_env().unwrap_or(0.05)); 69 | 70 | let c_primary = NonTrivialCircuit::new(num_cons); 71 | let c_secondary = TrivialCircuit::default(); 72 | 73 | // Produce public parameters 74 | let pp = PublicParams::::setup( 75 | &c_primary, 76 | &c_secondary, 77 | &*default_ck_hint(), 78 | &*default_ck_hint(), 79 | ) 80 | .unwrap(); 81 | 82 | // Bench time to produce a recursive SNARK; 83 | // we execute a certain number of warm-up steps since executing 84 | // the first step is cheaper than other steps owing to the presence of 85 | // a lot of zeros in the satisfying assignment 86 | let num_warmup_steps = 10; 87 | let mut recursive_snark: RecursiveSNARK = RecursiveSNARK::new( 88 | &pp, 89 | &c_primary, 90 | &c_secondary, 91 | &[::Scalar::from(2u64)], 92 | &[::Scalar::from(2u64)], 93 | ) 94 | .unwrap(); 95 | 96 | for i in 0..num_warmup_steps { 97 | recursive_snark 98 | .prove_step(&pp, &c_primary, &c_secondary) 99 | .unwrap(); 100 | 101 | // verify the recursive snark at each step of recursion 102 | recursive_snark 103 | .verify( 104 | &pp, 105 | i + 1, 106 | &[::Scalar::from(2u64)], 107 | &[::Scalar::from(2u64)], 108 | ) 109 | .unwrap(); 110 | } 111 | 112 | let bench_params = BenchParams { 113 | step_size: num_cons, 114 | date: env!("VERGEN_GIT_COMMIT_DATE"), 115 | sha: env!("VERGEN_GIT_SHA"), 116 | }; 117 | 118 | group.bench_function(bench_params.bench_id("Prove"), |b| { 119 | b.iter(|| { 120 | // produce a recursive SNARK for a step of the recursion 121 | black_box(&mut recursive_snark.clone()) 122 | .prove_step( 123 | black_box(&pp), 124 | black_box(&c_primary), 125 | black_box(&c_secondary), 126 | ) 127 | .unwrap(); 128 | }) 129 | }); 130 | 131 | // Benchmark the verification time 132 | group.bench_function(bench_params.bench_id("Verify"), |b| { 133 | b.iter(|| { 134 | black_box(&recursive_snark) 135 | .verify( 136 | black_box(&pp), 137 | black_box(num_warmup_steps), 138 | black_box(&[::Scalar::from(2u64)]), 139 | black_box(&[::Scalar::from(2u64)]), 140 | ) 141 | .unwrap(); 142 | }); 143 | }); 144 | group.finish(); 145 | } 146 | } 147 | 148 | #[derive(Clone, Debug, Default)] 149 | struct NonTrivialCircuit { 150 | num_cons: usize, 151 | _p: PhantomData, 152 | } 153 | 154 | impl NonTrivialCircuit { 155 | pub fn new(num_cons: usize) -> Self { 156 | Self { 157 | num_cons, 158 | _p: PhantomData, 159 | } 160 | } 161 | } 162 | impl StepCircuit for NonTrivialCircuit { 163 | fn arity(&self) -> usize { 164 | 1 165 | } 166 | 167 | fn synthesize>( 168 | &self, 169 | cs: &mut CS, 170 | z: &[AllocatedNum], 171 | ) -> Result>, SynthesisError> { 172 | // Consider a an equation: `x^2 = y`, where `x` and `y` are respectively the input and output. 173 | let mut x = z[0].clone(); 174 | let mut y = x.clone(); 175 | for i in 0..self.num_cons { 176 | y = x.square(cs.namespace(|| format!("x_sq_{i}")))?; 177 | x = y.clone(); 178 | } 179 | Ok(vec![y]) 180 | } 181 | } 182 | -------------------------------------------------------------------------------- /benches/sha256.rs: -------------------------------------------------------------------------------- 1 | //! Benchmarks Nova's prover for proving SHA-256 with varying sized messages. 2 | //! We run a single step with the step performing the entire computation. 3 | //! This code invokes a hand-written SHA-256 gadget from bellman/bellperson. 4 | //! It also uses code from bellman/bellperson to compare circuit-generated digest with sha2 crate's output 5 | #![allow(non_snake_case)] 6 | use arecibo::{ 7 | provider::{Bn256EngineKZG, GrumpkinEngine}, 8 | traits::{ 9 | circuit::{StepCircuit, TrivialCircuit}, 10 | snark::default_ck_hint, 11 | Engine, 12 | }, 13 | PublicParams, RecursiveSNARK, 14 | }; 15 | use bellpepper::gadgets::{sha256::sha256, Assignment}; 16 | use bellpepper_core::{ 17 | boolean::{AllocatedBit, Boolean}, 18 | num::{AllocatedNum, Num}, 19 | ConstraintSystem, SynthesisError, 20 | }; 21 | use core::marker::PhantomData; 22 | use core::time::Duration; 23 | use criterion::*; 24 | use ff::{PrimeField, PrimeFieldBits}; 25 | use sha2::{Digest, Sha256}; 26 | 27 | type E1 = Bn256EngineKZG; 28 | type E2 = GrumpkinEngine; 29 | 30 | #[derive(Clone, Debug)] 31 | struct Sha256Circuit { 32 | preimage: Vec, 33 | _p: PhantomData, 34 | } 35 | 36 | impl Sha256Circuit { 37 | pub fn new(preimage: Vec) -> Self { 38 | Self { 39 | preimage, 40 | _p: PhantomData, 41 | } 42 | } 43 | } 44 | 45 | impl StepCircuit for Sha256Circuit { 46 | fn arity(&self) -> usize { 47 | 1 48 | } 49 | 50 | fn synthesize>( 51 | &self, 52 | cs: &mut CS, 53 | _z: &[AllocatedNum], 54 | ) -> Result>, SynthesisError> { 55 | let mut z_out: Vec> = Vec::new(); 56 | 57 | let bit_values: Vec<_> = self 58 | .preimage 59 | .clone() 60 | .into_iter() 61 | .flat_map(|byte| (0..8).map(move |i| (byte >> i) & 1u8 == 1u8)) 62 | .map(Some) 63 | .collect(); 64 | assert_eq!(bit_values.len(), self.preimage.len() * 8); 65 | 66 | let preimage_bits = bit_values 67 | .into_iter() 68 | .enumerate() 69 | .map(|(i, b)| AllocatedBit::alloc(cs.namespace(|| format!("preimage bit {i}")), b)) 70 | .map(|b| b.map(Boolean::from)) 71 | .collect::, _>>()?; 72 | 73 | let hash_bits = sha256(cs.namespace(|| "sha256"), &preimage_bits)?; 74 | 75 | for (i, hash_bits) in hash_bits.chunks(256_usize).enumerate() { 76 | let mut num = Num::::zero(); 77 | let mut coeff = Scalar::ONE; 78 | for bit in hash_bits { 79 | num = num.add_bool_with_coeff(CS::one(), bit, coeff); 80 | 81 | coeff = coeff.double(); 82 | } 83 | 84 | let hash = AllocatedNum::alloc(cs.namespace(|| format!("input {i}")), || { 85 | Ok(*num.get_value().get()?) 86 | })?; 87 | 88 | // num * 1 = hash 89 | cs.enforce( 90 | || format!("packing constraint {i}"), 91 | |_| num.lc(Scalar::ONE), 92 | |lc| lc + CS::one(), 93 | |lc| lc + hash.get_variable(), 94 | ); 95 | z_out.push(hash); 96 | } 97 | 98 | // sanity check with the hasher 99 | let mut hasher = Sha256::new(); 100 | hasher.update(&self.preimage); 101 | let hash_result = hasher.finalize(); 102 | 103 | let mut s = hash_result 104 | .iter() 105 | .flat_map(|&byte| (0..8).rev().map(move |i| (byte >> i) & 1u8 == 1u8)); 106 | 107 | for b in hash_bits { 108 | match b { 109 | Boolean::Is(b) => { 110 | assert!(s.next().unwrap() == b.get_value().unwrap()); 111 | } 112 | Boolean::Not(b) => { 113 | assert!(s.next().unwrap() != b.get_value().unwrap()); 114 | } 115 | Boolean::Constant(_b) => { 116 | panic!("Can't reach here") 117 | } 118 | } 119 | } 120 | 121 | Ok(z_out) 122 | } 123 | } 124 | 125 | criterion_group! { 126 | name = recursive_snark; 127 | config = Criterion::default().warm_up_time(Duration::from_millis(3000)); 128 | targets = bench_recursive_snark 129 | } 130 | 131 | criterion_main!(recursive_snark); 132 | 133 | fn bench_recursive_snark(c: &mut Criterion) { 134 | // Test vectors 135 | let circuits = vec![ 136 | Sha256Circuit::new(vec![0u8; 1 << 6]), 137 | Sha256Circuit::new(vec![0u8; 1 << 7]), 138 | Sha256Circuit::new(vec![0u8; 1 << 8]), 139 | Sha256Circuit::new(vec![0u8; 1 << 9]), 140 | Sha256Circuit::new(vec![0u8; 1 << 10]), 141 | Sha256Circuit::new(vec![0u8; 1 << 11]), 142 | Sha256Circuit::new(vec![0u8; 1 << 12]), 143 | Sha256Circuit::new(vec![0u8; 1 << 13]), 144 | Sha256Circuit::new(vec![0u8; 1 << 14]), 145 | Sha256Circuit::new(vec![0u8; 1 << 15]), 146 | Sha256Circuit::new(vec![0u8; 1 << 16]), 147 | ]; 148 | 149 | for circuit_primary in circuits { 150 | let mut group = c.benchmark_group(format!( 151 | "NovaProve-Sha256-message-len-{}", 152 | circuit_primary.preimage.len() 153 | )); 154 | group.sample_size(10); 155 | 156 | // Produce public parameters 157 | let ttc = TrivialCircuit::default(); 158 | let pp = PublicParams::::setup( 159 | &circuit_primary, 160 | &ttc, 161 | &*default_ck_hint(), 162 | &*default_ck_hint(), 163 | ) 164 | .unwrap(); 165 | 166 | let circuit_secondary = TrivialCircuit::default(); 167 | let z0_primary = vec![::Scalar::from(2u64)]; 168 | let z0_secondary = vec![::Scalar::from(2u64)]; 169 | 170 | group.bench_function("Prove", |b| { 171 | b.iter(|| { 172 | let mut recursive_snark = RecursiveSNARK::new( 173 | black_box(&pp), 174 | black_box(&circuit_primary), 175 | black_box(&circuit_secondary), 176 | black_box(&z0_primary), 177 | black_box(&z0_secondary), 178 | ) 179 | .unwrap(); 180 | 181 | // produce a recursive SNARK for a step of the recursion 182 | recursive_snark 183 | .prove_step( 184 | black_box(&pp), 185 | black_box(&circuit_primary), 186 | black_box(&circuit_secondary), 187 | ) 188 | .unwrap(); 189 | }) 190 | }); 191 | group.finish(); 192 | } 193 | } 194 | -------------------------------------------------------------------------------- /benches/supernova-ci.rs: -------------------------------------------------------------------------------- 1 | use criterion::*; 2 | 3 | use std::time::Duration; 4 | 5 | mod common; 6 | use common::supernova::targets::{ 7 | bench_two_augmented_circuit_compressed_snark_with_computational_commitments, 8 | bench_two_augmented_circuit_recursive_snark, 9 | }; 10 | 11 | // To run these benchmarks, first download `criterion` with `cargo install cargo-criterion`. 12 | // Then `cargo criterion --bench recursive-snark-supernova`. The results are located in `target/criterion/data/`. 13 | // For flamegraphs, run `cargo criterion --bench recursive-snark-supernova --features flamegraph -- --profile-time `. 14 | // The results are located in `target/criterion/profile/`. 15 | cfg_if::cfg_if! { 16 | if #[cfg(feature = "flamegraph")] { 17 | criterion_group! { 18 | name = supernova_ci; 19 | config = Criterion::default().warm_up_time(Duration::from_millis(3000)).with_profiler(pprof::criterion::PProfProfiler::new(100, pprof::criterion::Output::Flamegraph(None))); 20 | targets = bench_two_augmented_circuit_recursive_snark, bench_two_augmented_circuit_compressed_snark_with_computational_commitments 21 | } 22 | } else { 23 | criterion_group! { 24 | name = supernova_ci; 25 | config = Criterion::default().warm_up_time(Duration::from_millis(3000)); 26 | targets = bench_two_augmented_circuit_recursive_snark, bench_two_augmented_circuit_compressed_snark_with_computational_commitments 27 | } 28 | } 29 | } 30 | 31 | criterion_main!(supernova_ci); 32 | -------------------------------------------------------------------------------- /build.rs: -------------------------------------------------------------------------------- 1 | use std::error::Error; 2 | use vergen::EmitBuilder; 3 | 4 | fn main() -> Result<(), Box> { 5 | // Emit the instructions 6 | EmitBuilder::builder().all_git().emit()?; 7 | Ok(()) 8 | } 9 | -------------------------------------------------------------------------------- /examples/hashchain.rs: -------------------------------------------------------------------------------- 1 | //! This example proves the knowledge of preimage to a hash chain tail, with a configurable number of elements per hash chain node. 2 | //! The output of each step tracks the current tail of the hash chain 3 | use arecibo::{ 4 | provider::{Bn256EngineKZG, GrumpkinEngine}, 5 | traits::{ 6 | circuit::{StepCircuit, TrivialCircuit}, 7 | snark::RelaxedR1CSSNARKTrait, 8 | Engine, Group, 9 | }, 10 | CompressedSNARK, PublicParams, RecursiveSNARK, 11 | }; 12 | use bellpepper_core::{num::AllocatedNum, ConstraintSystem, SynthesisError}; 13 | use ff::Field; 14 | use flate2::{write::ZlibEncoder, Compression}; 15 | use generic_array::typenum::U24; 16 | use halo2curves::bn256::Bn256; 17 | use neptune::{ 18 | circuit2::Elt, 19 | sponge::{ 20 | api::{IOPattern, SpongeAPI, SpongeOp}, 21 | circuit::SpongeCircuit, 22 | vanilla::{Mode::Simplex, Sponge, SpongeTrait}, 23 | }, 24 | Strength, 25 | }; 26 | use std::time::Instant; 27 | 28 | type E1 = Bn256EngineKZG; 29 | type E2 = GrumpkinEngine; 30 | type EE1 = arecibo::provider::hyperkzg::EvaluationEngine; 31 | type EE2 = arecibo::provider::ipa_pc::EvaluationEngine; 32 | type S1 = arecibo::spartan::snark::RelaxedR1CSSNARK; // non-preprocessing SNARK 33 | type S2 = arecibo::spartan::snark::RelaxedR1CSSNARK; // non-preprocessing SNARK 34 | 35 | #[derive(Clone, Debug)] 36 | struct HashChainCircuit { 37 | num_elts_per_step: usize, 38 | x_i: Vec, 39 | } 40 | 41 | impl HashChainCircuit { 42 | // produces a preimage to be hashed 43 | fn new(num_elts_per_step: usize) -> Self { 44 | let mut rng = rand::thread_rng(); 45 | let x_i = (0..num_elts_per_step) 46 | .map(|_| G::Scalar::random(&mut rng)) 47 | .collect::>(); 48 | 49 | Self { 50 | num_elts_per_step, 51 | x_i, 52 | } 53 | } 54 | } 55 | 56 | impl StepCircuit for HashChainCircuit { 57 | fn arity(&self) -> usize { 58 | 1 59 | } 60 | 61 | fn synthesize>( 62 | &self, 63 | cs: &mut CS, 64 | z_in: &[AllocatedNum], 65 | ) -> Result>, SynthesisError> { 66 | // z_in provides the running digest 67 | assert_eq!(z_in.len(), 1); 68 | 69 | // allocate x_i 70 | let x_i = (0..self.num_elts_per_step) 71 | .map(|i| AllocatedNum::alloc(cs.namespace(|| format!("x_{}", i)), || Ok(self.x_i[i]))) 72 | .collect::, _>>()?; 73 | 74 | // concatenate z_in and x_i 75 | let mut m = z_in.to_vec(); 76 | m.extend(x_i); 77 | 78 | let elt = m 79 | .iter() 80 | .map(|x| Elt::Allocated(x.clone())) 81 | .collect::>(); 82 | 83 | let num_absorbs = 1 + self.num_elts_per_step as u32; 84 | 85 | let parameter = IOPattern(vec![SpongeOp::Absorb(num_absorbs), SpongeOp::Squeeze(1u32)]); 86 | 87 | let pc = Sponge::::api_constants(Strength::Standard); 88 | let mut ns = cs.namespace(|| "ns"); 89 | 90 | let z_out = { 91 | let mut sponge = SpongeCircuit::new_with_constants(&pc, Simplex); 92 | let acc = &mut ns; 93 | 94 | sponge.start(parameter, None, acc); 95 | SpongeAPI::absorb(&mut sponge, num_absorbs, &elt, acc); 96 | 97 | let output = SpongeAPI::squeeze(&mut sponge, 1, acc); 98 | sponge.finish(acc).unwrap(); 99 | Elt::ensure_allocated(&output[0], &mut ns.namespace(|| "ensure allocated"), true)? 100 | }; 101 | 102 | Ok(vec![z_out]) 103 | } 104 | } 105 | 106 | /// cargo run --release --example hashchain 107 | fn main() { 108 | println!("========================================================="); 109 | println!("Nova-based hashchain example"); 110 | println!("========================================================="); 111 | 112 | let num_steps = 10; 113 | for num_elts_per_step in [1024, 2048, 4096] { 114 | // number of instances of AND per Nova's recursive step 115 | let circuit_primary = HashChainCircuit::<::GE>::new(num_elts_per_step); 116 | let circuit_secondary = TrivialCircuit::default(); 117 | 118 | // produce public parameters 119 | let start = Instant::now(); 120 | println!("Producing public parameters..."); 121 | let pp = PublicParams::::setup( 122 | &circuit_primary, 123 | &circuit_secondary, 124 | &*S1::ck_floor(), 125 | &*S2::ck_floor(), 126 | ) 127 | .unwrap(); 128 | println!("PublicParams::setup, took {:?} ", start.elapsed()); 129 | 130 | println!( 131 | "Number of constraints per step (primary circuit): {}", 132 | pp.num_constraints().0 133 | ); 134 | println!( 135 | "Number of constraints per step (secondary circuit): {}", 136 | pp.num_constraints().1 137 | ); 138 | 139 | println!( 140 | "Number of variables per step (primary circuit): {}", 141 | pp.num_variables().0 142 | ); 143 | println!( 144 | "Number of variables per step (secondary circuit): {}", 145 | pp.num_variables().1 146 | ); 147 | 148 | // produce non-deterministic advice 149 | type C1 = HashChainCircuit<::GE>; 150 | 151 | let circuits = (0..num_steps) 152 | .map(|_| C1::new(num_elts_per_step)) 153 | .collect::>(); 154 | 155 | // produce a recursive SNARK 156 | println!( 157 | "Generating a RecursiveSNARK with {num_elts_per_step} field elements per hashchain node..." 158 | ); 159 | let mut recursive_snark: RecursiveSNARK = RecursiveSNARK::::new( 160 | &pp, 161 | &circuits[0], 162 | &circuit_secondary, 163 | &[::Scalar::zero()], 164 | &[::Scalar::zero()], 165 | ) 166 | .unwrap(); 167 | 168 | for (i, circuit_primary) in circuits.iter().enumerate() { 169 | let start = Instant::now(); 170 | recursive_snark 171 | .prove_step(&pp, circuit_primary, &circuit_secondary) 172 | .unwrap(); 173 | 174 | println!("RecursiveSNARK::prove {} : took {:?} ", i, start.elapsed()); 175 | } 176 | 177 | // verify the recursive SNARK 178 | println!("Verifying a RecursiveSNARK..."); 179 | let res = recursive_snark.verify( 180 | &pp, 181 | num_steps, 182 | &[::Scalar::ZERO], 183 | &[::Scalar::ZERO], 184 | ); 185 | println!("RecursiveSNARK::verify: {:?}", res.is_ok(),); 186 | res.unwrap(); 187 | 188 | // produce a compressed SNARK 189 | println!("Generating a CompressedSNARK using Spartan with HyperKZG..."); 190 | let (pk, vk) = CompressedSNARK::<_, S1, S2>::setup(&pp).unwrap(); 191 | 192 | let start = Instant::now(); 193 | 194 | let res = CompressedSNARK::<_, S1, S2>::prove(&pp, &pk, &recursive_snark); 195 | println!( 196 | "CompressedSNARK::prove: {:?}, took {:?}", 197 | res.is_ok(), 198 | start.elapsed() 199 | ); 200 | assert!(res.is_ok()); 201 | let compressed_snark = res.unwrap(); 202 | 203 | let mut encoder = ZlibEncoder::new(Vec::new(), Compression::default()); 204 | bincode::serialize_into(&mut encoder, &compressed_snark).unwrap(); 205 | let compressed_snark_encoded = encoder.finish().unwrap(); 206 | println!( 207 | "CompressedSNARK::len {:?} bytes", 208 | compressed_snark_encoded.len() 209 | ); 210 | 211 | // verify the compressed SNARK 212 | println!("Verifying a CompressedSNARK..."); 213 | let start = Instant::now(); 214 | let res = compressed_snark.verify( 215 | &vk, 216 | num_steps, 217 | &[::Scalar::ZERO], 218 | &[::Scalar::ZERO], 219 | ); 220 | println!( 221 | "CompressedSNARK::verify: {:?}, took {:?}", 222 | res.is_ok(), 223 | start.elapsed() 224 | ); 225 | res.unwrap(); 226 | println!("========================================================="); 227 | } 228 | } 229 | -------------------------------------------------------------------------------- /rust-toolchain.toml: -------------------------------------------------------------------------------- 1 | [toolchain] 2 | # The default profile includes rustc, rust-std, cargo, rust-docs, rustfmt and clippy. 3 | profile = "default" 4 | channel = "1.79" 5 | targets = [ "wasm32-unknown-unknown" ] 6 | 7 | -------------------------------------------------------------------------------- /rustfmt.toml: -------------------------------------------------------------------------------- 1 | edition = "2021" 2 | tab_spaces = 2 3 | newline_style = "Unix" 4 | use_try_shorthand = true 5 | -------------------------------------------------------------------------------- /src/bellpepper/mod.rs: -------------------------------------------------------------------------------- 1 | //! Support for generating R1CS from [Bellpepper]. 2 | //! 3 | //! [Bellpepper]: https://github.com/argumentcomputer/bellpepper 4 | 5 | pub mod r1cs; 6 | pub mod shape_cs; 7 | pub mod solver; 8 | pub mod test_shape_cs; 9 | 10 | #[cfg(test)] 11 | mod tests { 12 | use crate::{ 13 | bellpepper::{ 14 | r1cs::{NovaShape, NovaWitness}, 15 | shape_cs::ShapeCS, 16 | solver::SatisfyingAssignment, 17 | }, 18 | provider::{Bn256EngineKZG, PallasEngine, Secp256k1Engine}, 19 | traits::{snark::default_ck_hint, Engine}, 20 | }; 21 | use bellpepper_core::{num::AllocatedNum, ConstraintSystem}; 22 | use ff::PrimeField; 23 | 24 | fn synthesize_alloc_bit>(cs: &mut CS) { 25 | // get two bits as input and check that they are indeed bits 26 | let a = AllocatedNum::alloc_infallible(cs.namespace(|| "a"), || Fr::ONE); 27 | let _ = a.inputize(cs.namespace(|| "a is input")); 28 | cs.enforce( 29 | || "check a is 0 or 1", 30 | |lc| lc + CS::one() - a.get_variable(), 31 | |lc| lc + a.get_variable(), 32 | |lc| lc, 33 | ); 34 | let b = AllocatedNum::alloc_infallible(cs.namespace(|| "b"), || Fr::ONE); 35 | let _ = b.inputize(cs.namespace(|| "b is input")); 36 | cs.enforce( 37 | || "check b is 0 or 1", 38 | |lc| lc + CS::one() - b.get_variable(), 39 | |lc| lc + b.get_variable(), 40 | |lc| lc, 41 | ); 42 | } 43 | 44 | fn test_alloc_bit_with() { 45 | // First create the shape 46 | let mut cs: ShapeCS = ShapeCS::new(); 47 | synthesize_alloc_bit(&mut cs); 48 | let (shape, ck) = cs.r1cs_shape_and_key(&*default_ck_hint()); 49 | 50 | // Now get the assignment 51 | let mut cs = SatisfyingAssignment::::new(); 52 | synthesize_alloc_bit(&mut cs); 53 | let (inst, witness) = cs.r1cs_instance_and_witness(&shape, &ck).unwrap(); 54 | 55 | // Make sure that this is satisfiable 56 | shape.is_sat(&ck, &inst, &witness).unwrap(); 57 | } 58 | 59 | #[test] 60 | fn test_alloc_bit() { 61 | test_alloc_bit_with::(); 62 | test_alloc_bit_with::(); 63 | test_alloc_bit_with::(); 64 | } 65 | } 66 | -------------------------------------------------------------------------------- /src/bellpepper/r1cs.rs: -------------------------------------------------------------------------------- 1 | //! Support for generating R1CS using bellpepper. 2 | 3 | #![allow(non_snake_case)] 4 | 5 | use super::{shape_cs::ShapeCS, solver::SatisfyingAssignment, test_shape_cs::TestShapeCS}; 6 | use crate::{ 7 | errors::NovaError, 8 | r1cs::{commitment_key, CommitmentKeyHint, R1CSInstance, R1CSShape, R1CSWitness, SparseMatrix}, 9 | traits::Engine, 10 | CommitmentKey, 11 | }; 12 | use bellpepper_core::{Index, LinearCombination}; 13 | use ff::PrimeField; 14 | 15 | /// `NovaWitness` provide a method for acquiring an `R1CSInstance` and `R1CSWitness` from implementers. 16 | pub trait NovaWitness { 17 | /// Return an instance and witness, given a shape and ck. 18 | fn r1cs_instance_and_witness( 19 | self, 20 | shape: &R1CSShape, 21 | ck: &CommitmentKey, 22 | ) -> Result<(R1CSInstance, R1CSWitness), NovaError>; 23 | } 24 | 25 | /// `NovaShape` provides methods for acquiring `R1CSShape` and `CommitmentKey` from implementers. 26 | pub trait NovaShape { 27 | /// Return an appropriate `R1CSShape` and `CommitmentKey` structs. 28 | /// A `CommitmentKeyHint` should be provided to help guide the construction of the `CommitmentKey`. 29 | /// This parameter is documented in `r1cs::R1CS::commitment_key`. 30 | fn r1cs_shape_and_key(&self, ck_hint: &CommitmentKeyHint) -> (R1CSShape, CommitmentKey) { 31 | let S = self.r1cs_shape(); 32 | let ck = commitment_key(&S, ck_hint); 33 | 34 | (S, ck) 35 | } 36 | /// Return an appropriate `R1CSShape`. 37 | fn r1cs_shape(&self) -> R1CSShape; 38 | } 39 | 40 | impl NovaWitness for SatisfyingAssignment { 41 | fn r1cs_instance_and_witness( 42 | self, 43 | shape: &R1CSShape, 44 | ck: &CommitmentKey, 45 | ) -> Result<(R1CSInstance, R1CSWitness), NovaError> { 46 | let (input_assignment, aux_assignment) = self.to_assignments(); 47 | let W = R1CSWitness::::new(shape, aux_assignment)?; 48 | let X = input_assignment[1..].to_owned(); 49 | 50 | let comm_W = W.commit(ck); 51 | 52 | let instance = R1CSInstance::::new(shape, comm_W, X)?; 53 | 54 | Ok((instance, W)) 55 | } 56 | } 57 | 58 | macro_rules! impl_nova_shape { 59 | ( $name:ident) => { 60 | impl NovaShape for $name 61 | where 62 | E::Scalar: PrimeField, 63 | { 64 | fn r1cs_shape(&self) -> R1CSShape { 65 | let mut A = SparseMatrix::::empty(); 66 | let mut B = SparseMatrix::::empty(); 67 | let mut C: SparseMatrix<::Scalar> = SparseMatrix::::empty(); 68 | 69 | let mut num_cons_added = 0; 70 | let mut X = (&mut A, &mut B, &mut C, &mut num_cons_added); 71 | let num_inputs = self.num_inputs(); 72 | let num_constraints = self.num_constraints(); 73 | let num_vars = self.num_aux(); 74 | 75 | for constraint in self.constraints.iter() { 76 | add_constraint( 77 | &mut X, 78 | num_vars, 79 | &constraint.0, 80 | &constraint.1, 81 | &constraint.2, 82 | ); 83 | } 84 | assert_eq!(num_cons_added, num_constraints); 85 | 86 | A.cols = num_vars + num_inputs; 87 | B.cols = num_vars + num_inputs; 88 | C.cols = num_vars + num_inputs; 89 | 90 | // Don't count One as an input for shape's purposes. 91 | let res = R1CSShape::new(num_constraints, num_vars, num_inputs - 1, A, B, C); 92 | res.unwrap() 93 | } 94 | } 95 | }; 96 | } 97 | 98 | impl_nova_shape!(ShapeCS); 99 | impl_nova_shape!(TestShapeCS); 100 | 101 | fn add_constraint( 102 | X: &mut ( 103 | &mut SparseMatrix, 104 | &mut SparseMatrix, 105 | &mut SparseMatrix, 106 | &mut usize, 107 | ), 108 | num_vars: usize, 109 | a_lc: &LinearCombination, 110 | b_lc: &LinearCombination, 111 | c_lc: &LinearCombination, 112 | ) { 113 | let (A, B, C, nn) = X; 114 | let n = **nn; 115 | assert_eq!(n, A.num_rows(), "A: invalid shape"); 116 | assert_eq!(n, B.num_rows(), "B: invalid shape"); 117 | assert_eq!(n, C.num_rows(), "C: invalid shape"); 118 | 119 | let add_constraint_component = |index: Index, coeff: &S, M: &mut SparseMatrix| { 120 | // we add constraints to the matrix only if the associated coefficient is non-zero 121 | if *coeff != S::ZERO { 122 | match index { 123 | Index::Input(idx) => { 124 | // Inputs come last, with input 0, representing 'one', 125 | // at position num_vars within the witness vector. 126 | let idx = idx + num_vars; 127 | M.data.push(*coeff); 128 | M.indices.push(idx); 129 | } 130 | Index::Aux(idx) => { 131 | M.data.push(*coeff); 132 | M.indices.push(idx); 133 | } 134 | } 135 | } 136 | }; 137 | 138 | for (index, coeff) in a_lc.iter() { 139 | add_constraint_component(index.0, coeff, A); 140 | } 141 | A.indptr.push(A.indices.len()); 142 | 143 | for (index, coeff) in b_lc.iter() { 144 | add_constraint_component(index.0, coeff, B) 145 | } 146 | B.indptr.push(B.indices.len()); 147 | 148 | for (index, coeff) in c_lc.iter() { 149 | add_constraint_component(index.0, coeff, C) 150 | } 151 | C.indptr.push(C.indices.len()); 152 | 153 | **nn += 1; 154 | } 155 | -------------------------------------------------------------------------------- /src/bellpepper/shape_cs.rs: -------------------------------------------------------------------------------- 1 | //! Support for generating R1CS shape using bellpepper. 2 | 3 | use crate::traits::Engine; 4 | use bellpepper_core::{ConstraintSystem, Index, LinearCombination, SynthesisError, Variable}; 5 | use ff::PrimeField; 6 | 7 | /// `ShapeCS` is a `ConstraintSystem` for creating `R1CSShape`s for a circuit. 8 | pub struct ShapeCS 9 | where 10 | E::Scalar: PrimeField, 11 | { 12 | /// All constraints added to the `ShapeCS`. 13 | pub constraints: Vec<( 14 | LinearCombination, 15 | LinearCombination, 16 | LinearCombination, 17 | )>, 18 | inputs: usize, 19 | aux: usize, 20 | } 21 | 22 | impl ShapeCS { 23 | /// Create a new, default `ShapeCS`, 24 | pub fn new() -> Self { 25 | Self::default() 26 | } 27 | 28 | /// Returns the number of constraints defined for this `ShapeCS`. 29 | pub fn num_constraints(&self) -> usize { 30 | self.constraints.len() 31 | } 32 | 33 | /// Returns the number of inputs defined for this `ShapeCS`. 34 | pub fn num_inputs(&self) -> usize { 35 | self.inputs 36 | } 37 | 38 | /// Returns the number of aux inputs defined for this `ShapeCS`. 39 | pub fn num_aux(&self) -> usize { 40 | self.aux 41 | } 42 | } 43 | 44 | impl Default for ShapeCS { 45 | fn default() -> Self { 46 | Self { 47 | constraints: vec![], 48 | inputs: 1, 49 | aux: 0, 50 | } 51 | } 52 | } 53 | 54 | impl ConstraintSystem for ShapeCS { 55 | type Root = Self; 56 | 57 | fn alloc(&mut self, _annotation: A, _f: F) -> Result 58 | where 59 | F: FnOnce() -> Result, 60 | A: FnOnce() -> AR, 61 | AR: Into, 62 | { 63 | self.aux += 1; 64 | 65 | Ok(Variable::new_unchecked(Index::Aux(self.aux - 1))) 66 | } 67 | 68 | fn alloc_input(&mut self, _annotation: A, _f: F) -> Result 69 | where 70 | F: FnOnce() -> Result, 71 | A: FnOnce() -> AR, 72 | AR: Into, 73 | { 74 | self.inputs += 1; 75 | 76 | Ok(Variable::new_unchecked(Index::Input(self.inputs - 1))) 77 | } 78 | 79 | fn enforce(&mut self, _annotation: A, a: LA, b: LB, c: LC) 80 | where 81 | A: FnOnce() -> AR, 82 | AR: Into, 83 | LA: FnOnce(LinearCombination) -> LinearCombination, 84 | LB: FnOnce(LinearCombination) -> LinearCombination, 85 | LC: FnOnce(LinearCombination) -> LinearCombination, 86 | { 87 | let a = a(LinearCombination::zero()); 88 | let b = b(LinearCombination::zero()); 89 | let c = c(LinearCombination::zero()); 90 | 91 | self.constraints.push((a, b, c)); 92 | } 93 | 94 | fn push_namespace(&mut self, _name_fn: N) 95 | where 96 | NR: Into, 97 | N: FnOnce() -> NR, 98 | { 99 | } 100 | 101 | fn pop_namespace(&mut self) {} 102 | 103 | fn get_root(&mut self) -> &mut Self::Root { 104 | self 105 | } 106 | } 107 | -------------------------------------------------------------------------------- /src/bellpepper/solver.rs: -------------------------------------------------------------------------------- 1 | //! Support for generating R1CS witness using bellpepper. 2 | 3 | use crate::traits::Engine; 4 | 5 | use bellpepper::util_cs::witness_cs::WitnessCS; 6 | 7 | /// A `ConstraintSystem` which calculates witness values for a concrete instance of an R1CS circuit. 8 | pub type SatisfyingAssignment = WitnessCS<::Scalar>; 9 | -------------------------------------------------------------------------------- /src/constants.rs: -------------------------------------------------------------------------------- 1 | //! Global Nova constants 2 | 3 | pub(crate) const NUM_CHALLENGE_BITS: usize = 128; 4 | pub(crate) const BN_LIMB_WIDTH: usize = 64; 5 | pub(crate) const BN_N_LIMBS: usize = 4; 6 | pub(crate) const NUM_FE_WITHOUT_IO_FOR_CRHF: usize = 9 + NIO_NOVA_FOLD * BN_N_LIMBS; 7 | pub(crate) const NUM_FE_WITHOUT_IO_FOR_NOVA_FOLD: usize = 7; 8 | pub(crate) const NUM_FE_FOR_RO: usize = 9; 9 | pub(crate) const NIO_NOVA_FOLD: usize = 2; 10 | pub(crate) const NUM_FE_IN_EMULATED_POINT: usize = 2 * BN_N_LIMBS + 1; 11 | pub(crate) const NIO_CYCLE_FOLD: usize = 4; // 1 per point (3) + scalar 12 | 13 | /// Bit size of Nova field element hashes 14 | pub const NUM_HASH_BITS: usize = 250; 15 | -------------------------------------------------------------------------------- /src/cyclefold/mod.rs: -------------------------------------------------------------------------------- 1 | //! This module defines CycleFold folding scheme and its related functions. 2 | 3 | mod circuit; 4 | mod gadgets; 5 | mod nova_circuit; 6 | mod util; 7 | 8 | pub mod nifs; 9 | pub mod snark; 10 | -------------------------------------------------------------------------------- /src/cyclefold/nifs.rs: -------------------------------------------------------------------------------- 1 | //! This module defines the needed wrong-field NIFS prover 2 | 3 | use std::marker::PhantomData; 4 | 5 | use crate::{ 6 | constants::{NIO_CYCLE_FOLD, NUM_CHALLENGE_BITS, NUM_FE_IN_EMULATED_POINT}, 7 | errors::NovaError, 8 | gadgets::scalar_as_base, 9 | r1cs::{R1CSInstance, R1CSShape, R1CSWitness, RelaxedR1CSInstance, RelaxedR1CSWitness}, 10 | traits::{commitment::CommitmentTrait, AbsorbInROTrait, Engine, ROConstants, ROTrait}, 11 | CommitmentKey, CompressedCommitment, 12 | }; 13 | 14 | use super::util::{absorb_cyclefold_r1cs, absorb_primary_commitment, absorb_primary_r1cs}; 15 | 16 | /// A SNARK that holds the proof of a step of an incremental computation of the primary circuit 17 | /// in the CycleFold folding scheme. 18 | /// The difference of this folding scheme from the Nova NIFS in `src/nifs.rs` is that this 19 | #[derive(Debug)] 20 | pub struct PrimaryNIFS 21 | where 22 | E1: Engine::Scalar>, 23 | E2: Engine::Scalar>, 24 | { 25 | pub(crate) comm_T: CompressedCommitment, 26 | _p: PhantomData, 27 | } 28 | 29 | impl PrimaryNIFS 30 | where 31 | E1: Engine::Scalar>, 32 | E2: Engine::Scalar>, 33 | { 34 | /// Takes a relaxed R1CS instance-witness pair (U1, W1) and an R1CS instance-witness pair (U2, W2) 35 | /// and folds them into a new relaxed R1CS instance-witness pair (U, W) and a commitment to the 36 | /// cross term T. It also provides the challenge r used to fold the instances. 37 | pub fn prove( 38 | ck: &CommitmentKey, 39 | ro_consts: &ROConstants, 40 | pp_digest: &E1::Scalar, 41 | S: &R1CSShape, 42 | U1: &RelaxedR1CSInstance, 43 | W1: &RelaxedR1CSWitness, 44 | U2: &R1CSInstance, 45 | W2: &R1CSWitness, 46 | ) -> Result< 47 | ( 48 | Self, 49 | (RelaxedR1CSInstance, RelaxedR1CSWitness), 50 | E1::Scalar, 51 | ), 52 | NovaError, 53 | > { 54 | let arity = U1.X.len(); 55 | 56 | if arity != U2.X.len() { 57 | return Err(NovaError::InvalidInputLength); 58 | } 59 | 60 | let mut ro = E2::RO::new( 61 | ro_consts.clone(), 62 | 1 + NUM_FE_IN_EMULATED_POINT + arity + NUM_FE_IN_EMULATED_POINT, // pp_digest + u.W + u.X + T 63 | ); 64 | 65 | ro.absorb(*pp_digest); 66 | 67 | absorb_primary_r1cs::(U2, &mut ro); 68 | 69 | let (T, comm_T) = S.commit_T(ck, U1, W1, U2, W2)?; 70 | 71 | absorb_primary_commitment::(&comm_T, &mut ro); 72 | 73 | let r = scalar_as_base::(ro.squeeze(NUM_CHALLENGE_BITS)); 74 | 75 | let U = U1.fold(U2, &comm_T, &r); 76 | 77 | let W = W1.fold(W2, &T, &r)?; 78 | 79 | Ok(( 80 | Self { 81 | comm_T: comm_T.compress(), 82 | _p: PhantomData, 83 | }, 84 | (U, W), 85 | r, 86 | )) 87 | } 88 | } 89 | 90 | /// A SNARK that holds the proof of a step of an incremental computation of the CycleFold circuit 91 | /// The difference of this folding scheme from the Nova NIFS in `src/nifs.rs` is that this folding 92 | /// prover and verifier must fold in the `RelaxedR1CSInstance` accumulator because the optimization 93 | /// in the 94 | #[derive(Debug)] 95 | pub struct CycleFoldNIFS { 96 | pub(crate) comm_T: CompressedCommitment, 97 | } 98 | 99 | impl CycleFoldNIFS { 100 | /// Folds an R1CS instance/witness pair (U2, W2) into a relaxed R1CS instance/witness (U1, W1) 101 | /// returning the new folded accumulator and a commitment to the cross-term. 102 | pub fn prove( 103 | ck: &CommitmentKey, 104 | ro_consts: &ROConstants, 105 | pp_digest: &E::Scalar, 106 | S: &R1CSShape, 107 | U1: &RelaxedR1CSInstance, 108 | W1: &RelaxedR1CSWitness, 109 | U2: &R1CSInstance, 110 | W2: &R1CSWitness, 111 | ) -> Result<(Self, (RelaxedR1CSInstance, RelaxedR1CSWitness)), NovaError> { 112 | // Check `U1` and `U2` have the same arity 113 | if U2.X.len() != NIO_CYCLE_FOLD || U1.X.len() != NIO_CYCLE_FOLD { 114 | return Err(NovaError::InvalidInputLength); 115 | } 116 | 117 | // initialize a new RO 118 | let mut ro = E::RO::new( 119 | ro_consts.clone(), 120 | 46, // 1 + (3 + 3 + 1 + NIO_CYCLE_FOLD * BN_N_LIMBS) + (3 + NIO_CYCLE_FOLD * BN_N_LIMBS) + 3, // digest + (U) + (u) + T 121 | ); 122 | 123 | // append the digest of pp to the transcript 124 | ro.absorb(scalar_as_base::(*pp_digest)); 125 | 126 | // append U1 to the transcript. 127 | // NOTE: this must be here because the IO for `U2` does not have the data of the hash of `U1` 128 | U1.absorb_in_ro(&mut ro); 129 | 130 | // append U2 to transcript 131 | absorb_cyclefold_r1cs(U2, &mut ro); 132 | 133 | // compute a commitment to the cross-term 134 | let (T, comm_T) = S.commit_T(ck, U1, W1, U2, W2)?; 135 | 136 | // append `comm_T` to the transcript and obtain a challenge 137 | comm_T.absorb_in_ro(&mut ro); 138 | 139 | // compute a challenge from the RO 140 | let r = ro.squeeze(NUM_CHALLENGE_BITS); 141 | 142 | // fold the instance using `r` and `comm_T` 143 | let U = U1.fold(U2, &comm_T, &r); 144 | 145 | // fold the witness using `r` and `T` 146 | let W = W1.fold(W2, &T, &r)?; 147 | 148 | // return the folded instance and witness 149 | Ok(( 150 | Self { 151 | comm_T: comm_T.compress(), 152 | }, 153 | (U, W), 154 | )) 155 | } 156 | } 157 | -------------------------------------------------------------------------------- /src/cyclefold/util.rs: -------------------------------------------------------------------------------- 1 | //! This module defines some useful utilities for RO absorbing, and the Folding data used in the 2 | //! CycleFold folding scheme. 3 | 4 | use crate::{ 5 | constants::{BN_LIMB_WIDTH, BN_N_LIMBS}, 6 | gadgets::{f_to_nat, nat_to_limbs, scalar_as_base}, 7 | r1cs::{R1CSInstance, RelaxedR1CSInstance}, 8 | traits::{commitment::CommitmentTrait, AbsorbInROTrait, Engine, ROTrait}, 9 | Commitment, 10 | }; 11 | 12 | use ff::Field; 13 | use serde::{Deserialize, Serialize}; 14 | 15 | /// Absorb a commitment over engine `E1` into an RO over engine `E2` by absorbing the limbs 16 | pub(super) fn absorb_primary_commitment( 17 | comm: &impl CommitmentTrait, 18 | ro: &mut impl ROTrait, 19 | ) where 20 | E1: Engine::Scalar>, 21 | E2: Engine::Scalar>, 22 | { 23 | let (x, y, is_infinity) = comm.to_coordinates(); 24 | 25 | let x_limbs = nat_to_limbs(&f_to_nat(&x), BN_LIMB_WIDTH, BN_N_LIMBS).unwrap(); 26 | let y_limbs = nat_to_limbs(&f_to_nat(&y), BN_LIMB_WIDTH, BN_N_LIMBS).unwrap(); 27 | 28 | for limb in x_limbs { 29 | ro.absorb(scalar_as_base::(limb)); 30 | } 31 | for limb in y_limbs { 32 | ro.absorb(scalar_as_base::(limb)); 33 | } 34 | if is_infinity { 35 | ro.absorb(::Scalar::ONE); 36 | } else { 37 | ro.absorb(::Scalar::ZERO); 38 | } 39 | } 40 | 41 | pub(super) fn absorb_primary_r1cs( 42 | u: &R1CSInstance, 43 | ro: &mut impl ROTrait, 44 | ) where 45 | E1: Engine::Scalar>, 46 | E2: Engine::Scalar>, 47 | { 48 | absorb_primary_commitment::(&u.comm_W, ro); 49 | for x in &u.X { 50 | ro.absorb(*x); 51 | } 52 | } 53 | 54 | pub(super) fn absorb_cyclefold_r1cs(u: &R1CSInstance, ro: &mut E::RO) { 55 | u.comm_W.absorb_in_ro(ro); 56 | u.X.iter().for_each(|x| { 57 | let limbs: Vec = nat_to_limbs(&f_to_nat(x), BN_LIMB_WIDTH, BN_N_LIMBS).unwrap(); 58 | limbs 59 | .into_iter() 60 | .for_each(|limb| ro.absorb(scalar_as_base::(limb))); 61 | }); 62 | } 63 | 64 | pub(super) fn absorb_primary_relaxed_r1cs(U: &RelaxedR1CSInstance, ro: &mut E2::RO) 65 | where 66 | E1: Engine::Scalar>, 67 | E2: Engine::Scalar>, 68 | { 69 | absorb_primary_commitment::(&U.comm_W, ro); 70 | absorb_primary_commitment::(&U.comm_E, ro); 71 | ro.absorb(U.u); 72 | for e in &U.X { 73 | ro.absorb(*e); 74 | } 75 | } 76 | 77 | #[derive(Debug, Serialize, Deserialize)] 78 | #[serde(bound = "")] 79 | pub(super) struct FoldingData { 80 | pub U: RelaxedR1CSInstance, 81 | pub u: R1CSInstance, 82 | pub T: Commitment, 83 | } 84 | 85 | impl FoldingData { 86 | pub fn new(U: RelaxedR1CSInstance, u: R1CSInstance, T: Commitment) -> Self { 87 | Self { U, u, T } 88 | } 89 | } 90 | -------------------------------------------------------------------------------- /src/digest.rs: -------------------------------------------------------------------------------- 1 | use bincode::Options; 2 | use ff::PrimeField; 3 | use serde::Serialize; 4 | use sha3::{Digest, Sha3_256}; 5 | use std::io; 6 | use std::marker::PhantomData; 7 | 8 | use crate::constants::NUM_HASH_BITS; 9 | 10 | /// Trait for components with potentially discrete digests to be included in their container's digest. 11 | pub trait Digestible { 12 | /// Write the byte representation of Self in a byte buffer 13 | fn write_bytes(&self, byte_sink: &mut W) -> Result<(), io::Error>; 14 | } 15 | 16 | /// Marker trait to be implemented for types that implement `Digestible` and `Serialize`. 17 | /// Their instances will be serialized to bytes then digested. 18 | pub trait SimpleDigestible: Serialize {} 19 | 20 | impl Digestible for T { 21 | fn write_bytes(&self, byte_sink: &mut W) -> Result<(), io::Error> { 22 | let config = bincode::DefaultOptions::new() 23 | .with_little_endian() 24 | .with_fixint_encoding(); 25 | // Note: bincode recursively length-prefixes every field! 26 | config 27 | .serialize_into(byte_sink, self) 28 | .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e)) 29 | } 30 | } 31 | 32 | pub struct DigestComputer<'a, F, T> { 33 | inner: &'a T, 34 | _phantom: PhantomData, 35 | } 36 | 37 | impl<'a, F: PrimeField, T: Digestible> DigestComputer<'a, F, T> { 38 | fn hasher() -> Sha3_256 { 39 | Sha3_256::new() 40 | } 41 | 42 | fn map_to_field(digest: &[u8]) -> F { 43 | let bv = (0..NUM_HASH_BITS).map(|i| { 44 | let (byte_pos, bit_pos) = (i / 8, i % 8); 45 | let bit = (digest[byte_pos] >> bit_pos) & 1; 46 | bit == 1 47 | }); 48 | 49 | // turn the bit vector into a scalar 50 | let mut digest = F::ZERO; 51 | let mut coeff = F::ONE; 52 | for bit in bv { 53 | if bit { 54 | digest += coeff; 55 | } 56 | coeff += coeff; 57 | } 58 | digest 59 | } 60 | 61 | /// Create a new `DigestComputer` 62 | pub fn new(inner: &'a T) -> Self { 63 | DigestComputer { 64 | inner, 65 | _phantom: PhantomData, 66 | } 67 | } 68 | 69 | /// Compute the digest of a `Digestible` instance. 70 | pub fn digest(&self) -> Result { 71 | let mut hasher = Self::hasher(); 72 | self.inner.write_bytes(&mut hasher)?; 73 | let bytes: [u8; 32] = hasher.finalize().into(); 74 | Ok(Self::map_to_field(&bytes)) 75 | } 76 | } 77 | 78 | #[cfg(test)] 79 | mod tests { 80 | use super::{DigestComputer, SimpleDigestible}; 81 | use crate::{provider::PallasEngine, traits::Engine}; 82 | use ff::Field; 83 | use once_cell::sync::OnceCell; 84 | use serde::{Deserialize, Serialize}; 85 | 86 | type E = PallasEngine; 87 | 88 | #[derive(Serialize, Deserialize)] 89 | struct S { 90 | i: usize, 91 | #[serde(skip, default = "OnceCell::new")] 92 | digest: OnceCell, 93 | } 94 | 95 | impl SimpleDigestible for S {} 96 | 97 | impl S { 98 | fn new(i: usize) -> Self { 99 | Self { 100 | i, 101 | digest: OnceCell::new(), 102 | } 103 | } 104 | 105 | fn digest(&self) -> E::Scalar { 106 | self 107 | .digest 108 | .get_or_try_init(|| DigestComputer::new(self).digest()) 109 | .cloned() 110 | .unwrap() 111 | } 112 | } 113 | 114 | #[test] 115 | fn test_digest_field_not_ingested_in_computation() { 116 | let s1 = S::::new(42); 117 | 118 | // let's set up a struct with a weird digest field to make sure the digest computation does not depend of it 119 | let oc = OnceCell::new(); 120 | oc.set(::Scalar::ONE).unwrap(); 121 | 122 | let s2: S = S { i: 42, digest: oc }; 123 | 124 | assert_eq!( 125 | DigestComputer::<::Scalar, _>::new(&s1) 126 | .digest() 127 | .unwrap(), 128 | DigestComputer::<::Scalar, _>::new(&s2) 129 | .digest() 130 | .unwrap() 131 | ); 132 | 133 | // note: because of the semantics of `OnceCell::get_or_try_init`, the above 134 | // equality will not result in `s1.digest() == s2.digest` 135 | assert_ne!( 136 | s2.digest(), 137 | DigestComputer::<::Scalar, _>::new(&s2) 138 | .digest() 139 | .unwrap() 140 | ); 141 | } 142 | 143 | #[test] 144 | fn test_digest_impervious_to_serialization() { 145 | let good_s = S::::new(42); 146 | 147 | // let's set up a struct with a weird digest field to confuse deserializers 148 | let oc = OnceCell::new(); 149 | oc.set(::Scalar::ONE).unwrap(); 150 | 151 | let bad_s: S = S { i: 42, digest: oc }; 152 | // this justifies the adjective "bad" 153 | assert_ne!(good_s.digest(), bad_s.digest()); 154 | 155 | let naughty_bytes = bincode::serialize(&bad_s).unwrap(); 156 | 157 | let retrieved_s: S = bincode::deserialize(&naughty_bytes).unwrap(); 158 | assert_eq!(good_s.digest(), retrieved_s.digest()) 159 | } 160 | } 161 | -------------------------------------------------------------------------------- /src/errors.rs: -------------------------------------------------------------------------------- 1 | //! This module defines errors returned by the library. 2 | use core::fmt::Debug; 3 | use thiserror::Error; 4 | 5 | /// Errors returned by Nova 6 | #[derive(Debug, Eq, PartialEq, Error)] 7 | #[non_exhaustive] 8 | pub enum NovaError { 9 | /// returned if the supplied row or col in (row,col,val) tuple is out of range 10 | #[error("InvalidIndex")] 11 | InvalidIndex, 12 | /// returned if the step circuit calls inputize or alloc_io in its synthesize method 13 | /// instead of passing output with the return value 14 | #[error("InvalidStepCircuitIO")] 15 | InvalidStepCircuitIO, 16 | /// returned if the supplied input is not of the right length 17 | #[error("InvalidInputLength")] 18 | InvalidInputLength, 19 | /// returned if the supplied witness is not of the right length 20 | #[error("InvalidWitnessLength")] 21 | InvalidWitnessLength, 22 | /// returned if the supplied witness is not a satisfying witness to a given shape and instance 23 | #[error("UnSat")] 24 | UnSat, 25 | /// returned if the supplied witness is not a satisfying witness to a given shape and instance, with error constraint index 26 | #[error("UnSatIndex")] 27 | UnSatIndex(usize), 28 | /// returned when the supplied compressed commitment cannot be decompressed 29 | #[error("DecompressionError")] 30 | DecompressionError, 31 | /// returned if proof verification fails 32 | #[error("ProofVerifyError")] 33 | ProofVerifyError, 34 | /// returned if the provided commitment key is not of sufficient length 35 | #[error("InvalidCommitmentKeyLength")] 36 | InvalidCommitmentKeyLength, 37 | /// returned if the provided number of steps is zero 38 | #[error("InvalidNumSteps")] 39 | InvalidNumSteps, 40 | /// returned if there is an error in the proof/verification of a PCS 41 | #[error("PCSError")] 42 | PCSError(#[from] PCSError), 43 | /// returned when an invalid sum-check proof is provided 44 | #[error("InvalidSumcheckProof")] 45 | InvalidSumcheckProof, 46 | /// returned when the initial input to an incremental computation differs from a previously declared arity 47 | #[error("InvalidInitialInputLength")] 48 | InvalidInitialInputLength, 49 | /// returned when the step execution produces an output whose length differs from a previously declared arity 50 | #[error("InvalidStepOutputLength")] 51 | InvalidStepOutputLength, 52 | /// returned when the transcript engine encounters an overflow of the round number 53 | #[error("InternalTranscriptError")] 54 | InternalTranscriptError, 55 | /// returned when the multiset check fails 56 | #[error("InvalidMultisetProof")] 57 | InvalidMultisetProof, 58 | /// returned when the product proof check fails 59 | #[error("InvalidProductProof")] 60 | InvalidProductProof, 61 | /// returned when the consistency with public IO and assignment used fails 62 | #[error("IncorrectWitness")] 63 | IncorrectWitness, 64 | /// return when error during synthesis 65 | #[error("SynthesisError: {0}")] 66 | SynthesisError(String), 67 | /// returned when there is an error creating a digest 68 | #[error("DigestError")] 69 | DigestError, 70 | /// returned when the prover cannot prove the provided statement due to completeness error 71 | #[error("InternalError")] 72 | InternalError, 73 | } 74 | 75 | /// Errors specific to the Polynomial commitment scheme 76 | #[derive(Debug, Eq, PartialEq, Error)] 77 | pub enum PCSError { 78 | /// returned when an invalid PCS evaluation argument is provided 79 | #[error("InvalidPCS")] 80 | InvalidPCS, 81 | /// returned when there is a Zeromorph error 82 | #[error("ZMError")] 83 | ZMError, 84 | /// returned when a length check fails in a PCS 85 | #[error("LengthError")] 86 | LengthError, 87 | } 88 | 89 | impl From for NovaError { 90 | fn from(err: bellpepper_core::SynthesisError) -> Self { 91 | Self::SynthesisError(err.to_string()) 92 | } 93 | } 94 | -------------------------------------------------------------------------------- /src/gadgets/mod.rs: -------------------------------------------------------------------------------- 1 | //! This module implements various gadgets necessary for Nova and applications built with Nova. 2 | mod ecc; 3 | pub(crate) use ecc::AllocatedPoint; 4 | 5 | mod nonnative; 6 | pub(crate) use nonnative::{bignat::nat_to_limbs, bignat::BigNat, util::f_to_nat, util::Num}; 7 | 8 | mod r1cs; 9 | pub(crate) use r1cs::{ 10 | conditionally_select_alloc_relaxed_r1cs, conditionally_select_vec_allocated_relaxed_r1cs_instance, 11 | }; 12 | pub(crate) use r1cs::{AllocatedR1CSInstance, AllocatedRelaxedR1CSInstance}; 13 | 14 | mod utils; 15 | #[cfg(test)] 16 | pub(crate) use utils::alloc_one; 17 | pub(crate) use utils::{ 18 | alloc_bignat_constant, alloc_num_equals, alloc_scalar_as_base, alloc_zero, 19 | conditionally_select_allocated_bit, conditionally_select_bignat, le_bits_to_num, scalar_as_base, 20 | }; 21 | -------------------------------------------------------------------------------- /src/gadgets/nonnative/mod.rs: -------------------------------------------------------------------------------- 1 | //! This module implements various gadgets necessary for doing non-native arithmetic 2 | //! Code in this module is adapted from [bellman-bignat](https://github.com/alex-ozdemir/bellman-bignat), which is licenced under MIT 3 | 4 | use bellpepper_core::SynthesisError; 5 | use ff::PrimeField; 6 | 7 | trait OptionExt { 8 | fn grab(&self) -> Result<&T, SynthesisError>; 9 | } 10 | 11 | impl OptionExt for Option { 12 | fn grab(&self) -> Result<&T, SynthesisError> { 13 | self.as_ref().ok_or(SynthesisError::AssignmentMissing) 14 | } 15 | } 16 | 17 | trait BitAccess { 18 | fn get_bit(&self, i: usize) -> Option; 19 | } 20 | 21 | impl BitAccess for Scalar { 22 | fn get_bit(&self, i: usize) -> Option { 23 | if i as u32 >= Scalar::NUM_BITS { 24 | return None; 25 | } 26 | 27 | let (byte_pos, bit_pos) = (i / 8, i % 8); 28 | let byte = self.to_repr().as_ref()[byte_pos]; 29 | let bit = byte >> bit_pos & 1; 30 | Some(bit == 1) 31 | } 32 | } 33 | 34 | pub mod bignat; 35 | pub mod util; 36 | -------------------------------------------------------------------------------- /src/provider/bn256_grumpkin.rs: -------------------------------------------------------------------------------- 1 | //! This module implements the Nova traits for `bn256::Point`, `bn256::Scalar`, `grumpkin::Point`, `grumpkin::Scalar`. 2 | use crate::{ 3 | impl_traits, 4 | provider::{traits::DlogGroup, util::msm::cpu_best_msm}, 5 | traits::{Group, PrimeFieldExt, TranscriptReprTrait}, 6 | }; 7 | use digest::{ExtendableOutput, Update}; 8 | use ff::{FromUniformBytes, PrimeField}; 9 | use group::{cofactor::CofactorCurveAffine, Curve, Group as AnotherGroup}; 10 | #[cfg(any(target_arch = "x86_64", target_arch = "aarch64"))] 11 | use grumpkin_msm::{bn256 as bn256_msm, grumpkin as grumpkin_msm}; 12 | // Remove this when https://github.com/zcash/pasta_curves/issues/41 resolves 13 | use halo2curves::{bn256::G2Affine, CurveAffine, CurveExt}; 14 | use num_bigint::BigInt; 15 | use num_traits::Num; 16 | use rayon::prelude::*; 17 | use sha3::Shake256; 18 | use std::io::Read; 19 | 20 | // Thus compile-time assertions checks important assumptions in the memory representation 21 | // of group data that supports the use of Abomonation. 22 | static_assertions::assert_eq_size!(G2Affine, [u64; 16]); 23 | 24 | /// Re-exports that give access to the standard aliases used in the code base, for bn256 25 | pub mod bn256 { 26 | pub use halo2curves::bn256::{ 27 | Fq as Base, Fr as Scalar, G1Affine as Affine, G1Compressed as Compressed, G1 as Point, 28 | }; 29 | } 30 | 31 | /// Re-exports that give access to the standard aliases used in the code base, for grumpkin 32 | pub mod grumpkin { 33 | pub use halo2curves::grumpkin::{ 34 | Fq as Base, Fr as Scalar, G1Affine as Affine, G1Compressed as Compressed, G1 as Point, 35 | }; 36 | } 37 | 38 | #[cfg(any(target_arch = "x86_64", target_arch = "aarch64"))] 39 | impl_traits!( 40 | bn256, 41 | "30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f0000001", 42 | "30644e72e131a029b85045b68181585d97816a916871ca8d3c208c16d87cfd47", 43 | bn256_msm 44 | ); 45 | #[cfg(not(any(target_arch = "x86_64", target_arch = "aarch64")))] 46 | impl_traits!( 47 | bn256, 48 | "30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f0000001", 49 | "30644e72e131a029b85045b68181585d97816a916871ca8d3c208c16d87cfd47" 50 | ); 51 | 52 | #[cfg(any(target_arch = "x86_64", target_arch = "aarch64"))] 53 | impl_traits!( 54 | grumpkin, 55 | "30644e72e131a029b85045b68181585d97816a916871ca8d3c208c16d87cfd47", 56 | "30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f0000001", 57 | grumpkin_msm 58 | ); 59 | #[cfg(not(any(target_arch = "x86_64", target_arch = "aarch64")))] 60 | impl_traits!( 61 | grumpkin, 62 | "30644e72e131a029b85045b68181585d97816a916871ca8d3c208c16d87cfd47", 63 | "30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f0000001" 64 | ); 65 | 66 | #[cfg(test)] 67 | mod tests { 68 | use ff::Field; 69 | use rand::thread_rng; 70 | 71 | use crate::provider::{ 72 | bn256_grumpkin::{bn256, grumpkin}, 73 | traits::DlogGroup, 74 | util::msm::cpu_best_msm, 75 | }; 76 | 77 | #[test] 78 | fn test_bn256_msm_correctness() { 79 | let npoints = 1usize << 16; 80 | let points = bn256::Point::from_label(b"test", npoints); 81 | 82 | let mut rng = thread_rng(); 83 | let scalars = (0..npoints) 84 | .map(|_| bn256::Scalar::random(&mut rng)) 85 | .collect::>(); 86 | 87 | let cpu_msm = cpu_best_msm(&points, &scalars); 88 | let gpu_msm = bn256::Point::vartime_multiscalar_mul(&scalars, &points); 89 | 90 | assert_eq!(cpu_msm, gpu_msm); 91 | } 92 | 93 | #[test] 94 | fn test_grumpkin_msm_correctness() { 95 | let npoints = 1usize << 16; 96 | let points = grumpkin::Point::from_label(b"test", npoints); 97 | 98 | let mut rng = thread_rng(); 99 | let scalars = (0..npoints) 100 | .map(|_| grumpkin::Scalar::random(&mut rng)) 101 | .collect::>(); 102 | 103 | let cpu_msm = cpu_best_msm(&points, &scalars); 104 | let gpu_msm = grumpkin::Point::vartime_multiscalar_mul(&scalars, &points); 105 | 106 | assert_eq!(cpu_msm, gpu_msm); 107 | } 108 | } 109 | -------------------------------------------------------------------------------- /src/provider/mod.rs: -------------------------------------------------------------------------------- 1 | //! This module implements Nova's traits using the following several different combinations 2 | 3 | // public modules to be used as an evaluation engine with Spartan 4 | pub mod hyperkzg; 5 | pub mod ipa_pc; 6 | pub mod non_hiding_zeromorph; 7 | 8 | // crate-public modules, made crate-public mostly for tests 9 | pub(crate) mod bn256_grumpkin; 10 | mod pasta; 11 | mod pedersen; 12 | pub(crate) mod poseidon; 13 | pub(crate) mod secp_secq; 14 | pub(crate) mod traits; 15 | // a non-hiding variant of {kzg, zeromorph} 16 | mod kzg_commitment; 17 | pub(crate) mod util; 18 | 19 | // crate-private modules 20 | mod keccak; 21 | mod tests; 22 | 23 | use crate::{ 24 | provider::{ 25 | bn256_grumpkin::{bn256, grumpkin}, 26 | keccak::Keccak256Transcript, 27 | pedersen::CommitmentEngine as PedersenCommitmentEngine, 28 | poseidon::{PoseidonRO, PoseidonROCircuit}, 29 | secp_secq::{secp256k1, secq256k1}, 30 | }, 31 | traits::{CurveCycleEquipped, Engine}, 32 | }; 33 | use halo2curves::bn256::Bn256; 34 | use pasta_curves::{pallas, vesta}; 35 | 36 | use self::kzg_commitment::KZGCommitmentEngine; 37 | 38 | /// An implementation of the Nova `Engine` trait with Grumpkin curve and Pedersen commitment scheme 39 | #[derive(Clone, Copy, Debug, Eq, PartialEq)] 40 | pub struct GrumpkinEngine; 41 | 42 | /// An implementation of the Nova `Engine` trait with BN254 curve and Pedersen commitment scheme 43 | #[derive(Clone, Copy, Debug, Eq, PartialEq)] 44 | pub struct Bn256EngineIPA; 45 | 46 | impl Engine for Bn256EngineIPA { 47 | type Base = bn256::Base; 48 | type Scalar = bn256::Scalar; 49 | type GE = bn256::Point; 50 | type RO = PoseidonRO; 51 | type ROCircuit = PoseidonROCircuit; 52 | type TE = Keccak256Transcript; 53 | type CE = PedersenCommitmentEngine; 54 | } 55 | 56 | impl Engine for GrumpkinEngine { 57 | type Base = grumpkin::Base; 58 | type Scalar = grumpkin::Scalar; 59 | type GE = grumpkin::Point; 60 | type RO = PoseidonRO; 61 | type ROCircuit = PoseidonROCircuit; 62 | type TE = Keccak256Transcript; 63 | type CE = PedersenCommitmentEngine; 64 | } 65 | 66 | /// An implementation of the Nova `Engine` trait with BN254 curve and Zeromorph commitment scheme 67 | #[derive(Clone, Copy, Debug, Eq, PartialEq)] 68 | pub struct Bn256EngineZM; 69 | 70 | impl Engine for Bn256EngineZM { 71 | type Base = bn256::Base; 72 | type Scalar = bn256::Scalar; 73 | type GE = bn256::Point; 74 | type RO = PoseidonRO; 75 | type ROCircuit = PoseidonROCircuit; 76 | type TE = Keccak256Transcript; 77 | type CE = KZGCommitmentEngine; 78 | } 79 | /// An implementation of Nova traits with HyperKZG over the BN256 curve 80 | #[derive(Clone, Copy, Debug, Eq, PartialEq)] 81 | pub struct Bn256EngineKZG; 82 | 83 | impl Engine for Bn256EngineKZG { 84 | type Base = bn256::Base; 85 | type Scalar = bn256::Scalar; 86 | type GE = bn256::Point; 87 | type RO = PoseidonRO; 88 | type ROCircuit = PoseidonROCircuit; 89 | type TE = Keccak256Transcript; 90 | type CE = KZGCommitmentEngine; 91 | } 92 | 93 | impl CurveCycleEquipped for Bn256EngineIPA { 94 | type Secondary = GrumpkinEngine; 95 | } 96 | 97 | impl CurveCycleEquipped for Bn256EngineKZG { 98 | type Secondary = GrumpkinEngine; 99 | } 100 | 101 | impl CurveCycleEquipped for Bn256EngineZM { 102 | type Secondary = GrumpkinEngine; 103 | } 104 | 105 | /// An implementation of the Nova `Engine` trait with Secp256k1 curve and Pedersen commitment scheme 106 | #[derive(Clone, Copy, Debug, Eq, PartialEq)] 107 | pub struct Secp256k1Engine; 108 | 109 | /// An implementation of the Nova `Engine` trait with Secp256k1 curve and Pedersen commitment scheme 110 | #[derive(Clone, Copy, Debug, Eq, PartialEq)] 111 | pub struct Secq256k1Engine; 112 | 113 | impl Engine for Secp256k1Engine { 114 | type Base = secp256k1::Base; 115 | type Scalar = secp256k1::Scalar; 116 | type GE = secp256k1::Point; 117 | type RO = PoseidonRO; 118 | type ROCircuit = PoseidonROCircuit; 119 | type TE = Keccak256Transcript; 120 | type CE = PedersenCommitmentEngine; 121 | } 122 | 123 | impl Engine for Secq256k1Engine { 124 | type Base = secq256k1::Base; 125 | type Scalar = secq256k1::Scalar; 126 | type GE = secq256k1::Point; 127 | type RO = PoseidonRO; 128 | type ROCircuit = PoseidonROCircuit; 129 | type TE = Keccak256Transcript; 130 | type CE = PedersenCommitmentEngine; 131 | } 132 | 133 | impl CurveCycleEquipped for Secp256k1Engine { 134 | type Secondary = Secq256k1Engine; 135 | } 136 | 137 | /// An implementation of the Nova `Engine` trait with Pallas curve and Pedersen commitment scheme 138 | #[derive(Clone, Copy, Debug, Eq, PartialEq)] 139 | pub struct PallasEngine; 140 | 141 | /// An implementation of the Nova `Engine` trait with Vesta curve and Pedersen commitment scheme 142 | #[derive(Clone, Copy, Debug, Eq, PartialEq)] 143 | pub struct VestaEngine; 144 | 145 | impl Engine for PallasEngine { 146 | type Base = pallas::Base; 147 | type Scalar = pallas::Scalar; 148 | type GE = pallas::Point; 149 | type RO = PoseidonRO; 150 | type ROCircuit = PoseidonROCircuit; 151 | type TE = Keccak256Transcript; 152 | type CE = PedersenCommitmentEngine; 153 | } 154 | 155 | impl Engine for VestaEngine { 156 | type Base = vesta::Base; 157 | type Scalar = vesta::Scalar; 158 | type GE = vesta::Point; 159 | type RO = PoseidonRO; 160 | type ROCircuit = PoseidonROCircuit; 161 | type TE = Keccak256Transcript; 162 | type CE = PedersenCommitmentEngine; 163 | } 164 | 165 | impl CurveCycleEquipped for PallasEngine { 166 | type Secondary = VestaEngine; 167 | } 168 | 169 | #[cfg(test)] 170 | mod test { 171 | use crate::provider::{ 172 | bn256_grumpkin::{bn256, grumpkin}, 173 | secp_secq::{secp256k1, secq256k1}, 174 | traits::DlogGroup, 175 | util::msm::cpu_best_msm, 176 | }; 177 | use digest::{ExtendableOutput, Update}; 178 | use group::{ff::Field, Curve, Group}; 179 | use halo2curves::{CurveAffine, CurveExt}; 180 | use itertools::Itertools as _; 181 | use pasta_curves::{pallas, vesta}; 182 | use rand_core::OsRng; 183 | use sha3::Shake256; 184 | use std::io::Read; 185 | 186 | macro_rules! impl_cycle_pair_test { 187 | ($curve:ident) => { 188 | fn from_label_serial(label: &'static [u8], n: usize) -> Vec<$curve::Affine> { 189 | let mut shake = Shake256::default(); 190 | shake.update(label); 191 | let mut reader = shake.finalize_xof(); 192 | (0..n) 193 | .map(|_| { 194 | let mut uniform_bytes = [0u8; 32]; 195 | reader.read_exact(&mut uniform_bytes).unwrap(); 196 | let hash = $curve::Point::hash_to_curve("from_uniform_bytes"); 197 | hash(&uniform_bytes).to_affine() 198 | }) 199 | .collect() 200 | } 201 | 202 | let label = b"test_from_label"; 203 | for n in [ 204 | 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 1021, 205 | ] { 206 | let ck_par = <$curve::Point as DlogGroup>::from_label(label, n); 207 | let ck_ser = from_label_serial(label, n); 208 | assert_eq!(ck_par.len(), n); 209 | assert_eq!(ck_ser.len(), n); 210 | assert_eq!(ck_par, ck_ser); 211 | } 212 | }; 213 | } 214 | 215 | fn test_msm_with>() { 216 | let n = 8; 217 | let coeffs = (0..n).map(|_| F::random(OsRng)).collect::>(); 218 | let bases = (0..n) 219 | .map(|_| A::from(A::generator() * F::random(OsRng))) 220 | .collect::>(); 221 | let naive = coeffs 222 | .iter() 223 | .zip_eq(bases.iter()) 224 | .fold(A::CurveExt::identity(), |acc, (coeff, base)| { 225 | acc + *base * coeff 226 | }); 227 | 228 | assert_eq!(naive, cpu_best_msm(&bases, &coeffs)) 229 | } 230 | 231 | #[test] 232 | fn test_msm() { 233 | test_msm_with::(); 234 | test_msm_with::(); 235 | test_msm_with::(); 236 | test_msm_with::(); 237 | test_msm_with::(); 238 | test_msm_with::(); 239 | } 240 | 241 | #[test] 242 | fn test_bn256_from_label() { 243 | impl_cycle_pair_test!(bn256); 244 | } 245 | 246 | #[test] 247 | fn test_pallas_from_label() { 248 | impl_cycle_pair_test!(pallas); 249 | } 250 | 251 | #[test] 252 | fn test_secp256k1_from_label() { 253 | impl_cycle_pair_test!(secp256k1); 254 | } 255 | } 256 | -------------------------------------------------------------------------------- /src/provider/pasta.rs: -------------------------------------------------------------------------------- 1 | //! This module implements the Nova traits for `pallas::Point`, `pallas::Scalar`, `vesta::Point`, `vesta::Scalar`. 2 | use crate::{ 3 | provider::{traits::DlogGroup, util::msm::cpu_best_msm}, 4 | traits::{Group, PrimeFieldExt, TranscriptReprTrait}, 5 | }; 6 | use derive_more::{From, Into}; 7 | use digest::{ExtendableOutput, Update}; 8 | use ff::{FromUniformBytes, PrimeField}; 9 | use group::{prime::PrimeCurveAffine, Curve}; 10 | use num_bigint::BigInt; 11 | use num_traits::Num; 12 | use pasta_curves::{ 13 | self, 14 | arithmetic::{CurveAffine, CurveExt}, 15 | pallas, vesta, 16 | }; 17 | use rayon::prelude::*; 18 | use serde::{Deserialize, Serialize}; 19 | use sha3::Shake256; 20 | use std::io::Read; 21 | 22 | /// A wrapper for compressed group elements of pallas 23 | #[derive(Clone, Copy, Debug, Eq, From, Into, PartialEq, Serialize, Deserialize)] 24 | pub struct PallasCompressedElementWrapper([u8; 32]); 25 | 26 | /// A wrapper for compressed group elements of vesta 27 | #[derive(Clone, Copy, Debug, Eq, From, Into, PartialEq, Serialize, Deserialize)] 28 | pub struct VestaCompressedElementWrapper([u8; 32]); 29 | 30 | macro_rules! impl_traits { 31 | ( 32 | $name:ident, 33 | $name_compressed:ident, 34 | $order_str:literal, 35 | $base_str:literal 36 | ) => { 37 | // These compile-time assertions check important assumptions in the memory representation 38 | // of group data that supports the use of Abomonation. 39 | static_assertions::assert_eq_size!($name::Affine, [u64; 8]); 40 | static_assertions::assert_eq_size!($name::Point, [u64; 12]); 41 | 42 | impl Group for $name::Point { 43 | type Base = $name::Base; 44 | type Scalar = $name::Scalar; 45 | 46 | fn group_params() -> (Self::Base, Self::Base, BigInt, BigInt) { 47 | let A = $name::Point::a(); 48 | let B = $name::Point::b(); 49 | let order = BigInt::from_str_radix($order_str, 16).unwrap(); 50 | let base = BigInt::from_str_radix($base_str, 16).unwrap(); 51 | 52 | (A, B, order, base) 53 | } 54 | } 55 | 56 | impl DlogGroup for $name::Point { 57 | type ScalarExt = $name::Scalar; 58 | type AffineExt = $name::Affine; 59 | type Compressed = $name_compressed; 60 | 61 | #[tracing::instrument( 62 | skip_all, 63 | level = "trace", 64 | name = "<_ as Group>::vartime_multiscalar_mul" 65 | )] 66 | fn vartime_multiscalar_mul(scalars: &[Self::ScalarExt], bases: &[Self::Affine]) -> Self { 67 | #[cfg(any(target_arch = "x86_64", target_arch = "aarch64"))] 68 | if scalars.len() >= 128 { 69 | grumpkin_msm::pasta::$name(bases, scalars) 70 | } else { 71 | cpu_best_msm(bases, scalars) 72 | } 73 | #[cfg(not(any(target_arch = "x86_64", target_arch = "aarch64")))] 74 | cpu_best_msm(bases, scalars) 75 | } 76 | 77 | fn from_label(label: &'static [u8], n: usize) -> Vec { 78 | let mut shake = Shake256::default(); 79 | shake.update(label); 80 | let mut reader = shake.finalize_xof(); 81 | let mut uniform_bytes_vec = Vec::new(); 82 | for _ in 0..n { 83 | let mut uniform_bytes = [0u8; 32]; 84 | reader.read_exact(&mut uniform_bytes).unwrap(); 85 | uniform_bytes_vec.push(uniform_bytes); 86 | } 87 | let ck_proj: Vec<$name::Point> = (0..n) 88 | .into_par_iter() 89 | .map(|i| { 90 | let hash = $name::Point::hash_to_curve("from_uniform_bytes"); 91 | hash(&uniform_bytes_vec[i]) 92 | }) 93 | .collect(); 94 | 95 | let num_threads = rayon::current_num_threads(); 96 | if ck_proj.len() > num_threads { 97 | let chunk = (ck_proj.len() as f64 / num_threads as f64).ceil() as usize; 98 | (0..num_threads) 99 | .into_par_iter() 100 | .flat_map(|i| { 101 | let start = i * chunk; 102 | let end = if i == num_threads - 1 { 103 | ck_proj.len() 104 | } else { 105 | core::cmp::min((i + 1) * chunk, ck_proj.len()) 106 | }; 107 | if end > start { 108 | let mut ck = vec![$name::Affine::identity(); end - start]; 109 | ::batch_normalize(&ck_proj[start..end], &mut ck); 110 | ck 111 | } else { 112 | vec![] 113 | } 114 | }) 115 | .collect() 116 | } else { 117 | let mut ck = vec![$name::Affine::identity(); n]; 118 | ::batch_normalize(&ck_proj, &mut ck); 119 | ck 120 | } 121 | } 122 | 123 | fn to_coordinates(&self) -> (Self::Base, Self::Base, bool) { 124 | let coordinates = self.to_affine().coordinates(); 125 | if coordinates.is_some().unwrap_u8() == 1 { 126 | (*coordinates.unwrap().x(), *coordinates.unwrap().y(), false) 127 | } else { 128 | (Self::Base::zero(), Self::Base::zero(), true) 129 | } 130 | } 131 | } 132 | 133 | impl PrimeFieldExt for $name::Scalar { 134 | fn from_uniform(bytes: &[u8]) -> Self { 135 | let bytes_arr: [u8; 64] = bytes.try_into().unwrap(); 136 | $name::Scalar::from_uniform_bytes(&bytes_arr) 137 | } 138 | } 139 | 140 | impl TranscriptReprTrait for $name_compressed { 141 | fn to_transcript_bytes(&self) -> Vec { 142 | self.0.to_vec() 143 | } 144 | } 145 | 146 | impl TranscriptReprTrait for $name::Scalar { 147 | fn to_transcript_bytes(&self) -> Vec { 148 | self.to_repr().to_vec() 149 | } 150 | } 151 | 152 | impl TranscriptReprTrait for $name::Affine { 153 | fn to_transcript_bytes(&self) -> Vec { 154 | let (x, y, is_infinity_byte) = { 155 | let coordinates = self.coordinates(); 156 | if coordinates.is_some().unwrap_u8() == 1 { 157 | ( 158 | *coordinates.unwrap().x(), 159 | *coordinates.unwrap().y(), 160 | u8::from(false), 161 | ) 162 | } else { 163 | ($name::Base::zero(), $name::Base::zero(), u8::from(true)) 164 | } 165 | }; 166 | 167 | x.to_repr() 168 | .into_iter() 169 | .chain(y.to_repr().into_iter()) 170 | .chain(std::iter::once(is_infinity_byte)) 171 | .collect() 172 | } 173 | } 174 | }; 175 | } 176 | 177 | impl_traits!( 178 | pallas, 179 | PallasCompressedElementWrapper, 180 | "40000000000000000000000000000000224698fc0994a8dd8c46eb2100000001", 181 | "40000000000000000000000000000000224698fc094cf91b992d30ed00000001" 182 | ); 183 | 184 | impl_traits!( 185 | vesta, 186 | VestaCompressedElementWrapper, 187 | "40000000000000000000000000000000224698fc094cf91b992d30ed00000001", 188 | "40000000000000000000000000000000224698fc0994a8dd8c46eb2100000001" 189 | ); 190 | 191 | #[cfg(test)] 192 | mod tests { 193 | use ff::Field; 194 | use pasta_curves::{pallas, vesta}; 195 | use rand::thread_rng; 196 | 197 | use crate::provider::{traits::DlogGroup, util::msm::cpu_best_msm}; 198 | 199 | #[test] 200 | fn test_pallas_msm_correctness() { 201 | let npoints = 1usize << 16; 202 | let points = pallas::Point::from_label(b"test", npoints); 203 | 204 | let mut rng = thread_rng(); 205 | let scalars = (0..npoints) 206 | .map(|_| pallas::Scalar::random(&mut rng)) 207 | .collect::>(); 208 | 209 | let cpu_msm = cpu_best_msm(&points, &scalars); 210 | let gpu_msm = pallas::Point::vartime_multiscalar_mul(&scalars, &points); 211 | 212 | assert_eq!(cpu_msm, gpu_msm); 213 | } 214 | 215 | #[test] 216 | fn test_vesta_msm_correctness() { 217 | let npoints = 1usize << 16; 218 | let points = vesta::Point::from_label(b"test", npoints); 219 | 220 | let mut rng = thread_rng(); 221 | let scalars = (0..npoints) 222 | .map(|_| vesta::Scalar::random(&mut rng)) 223 | .collect::>(); 224 | 225 | let cpu_msm = cpu_best_msm(&points, &scalars); 226 | let gpu_msm = vesta::Point::vartime_multiscalar_mul(&scalars, &points); 227 | 228 | assert_eq!(cpu_msm, gpu_msm); 229 | } 230 | } 231 | -------------------------------------------------------------------------------- /src/provider/secp_secq.rs: -------------------------------------------------------------------------------- 1 | //! This module implements the Nova traits for `secp::Point`, `secp::Scalar`, `secq::Point`, `secq::Scalar`. 2 | use crate::{ 3 | impl_traits, 4 | provider::{traits::DlogGroup, util::msm::cpu_best_msm}, 5 | traits::{Group, PrimeFieldExt, TranscriptReprTrait}, 6 | }; 7 | use digest::{ExtendableOutput, Update}; 8 | use ff::{FromUniformBytes, PrimeField}; 9 | use group::{cofactor::CofactorCurveAffine, Curve, Group as AnotherGroup}; 10 | use num_bigint::BigInt; 11 | use num_traits::Num; 12 | use pasta_curves::arithmetic::{CurveAffine, CurveExt}; 13 | use rayon::prelude::*; 14 | use sha3::Shake256; 15 | use std::io::Read; 16 | /// Re-exports that give access to the standard aliases used in the code base, for secp 17 | pub mod secp256k1 { 18 | pub use halo2curves::secp256k1::{ 19 | Fp as Base, Fq as Scalar, Secp256k1 as Point, Secp256k1Affine as Affine, 20 | Secp256k1Compressed as Compressed, 21 | }; 22 | } 23 | 24 | /// Re-exports that give access to the standard aliases used in the code base, for secq 25 | pub mod secq256k1 { 26 | pub use halo2curves::secq256k1::{ 27 | Fp as Base, Fq as Scalar, Secq256k1 as Point, Secq256k1Affine as Affine, 28 | Secq256k1Compressed as Compressed, 29 | }; 30 | } 31 | 32 | impl_traits!( 33 | secp256k1, 34 | "fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141", 35 | "fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f" 36 | ); 37 | 38 | impl_traits!( 39 | secq256k1, 40 | "fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f", 41 | "fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141" 42 | ); 43 | -------------------------------------------------------------------------------- /src/provider/tests/ipa_pc.rs: -------------------------------------------------------------------------------- 1 | #[cfg(test)] 2 | mod test { 3 | use crate::provider::ipa_pc::EvaluationEngine; 4 | use crate::provider::tests::solidity_compatibility_utils::{ 5 | compressed_commitment_to_json, ec_points_to_json, field_elements_to_json, 6 | generate_pcs_solidity_unit_test_data, 7 | }; 8 | 9 | use crate::provider::GrumpkinEngine; 10 | use group::Curve; 11 | 12 | use crate::provider::pedersen::{CommitmentKey, CommitmentKeyExtTrait}; 13 | use handlebars::Handlebars; 14 | use serde_json::json; 15 | use serde_json::{Map, Value}; 16 | 17 | static IPA_COMPATIBILITY_UNIT_TESTING_TEMPLATE: &str = " 18 | // SPDX-License-Identifier: Apache-2.0 19 | pragma solidity ^0.8.16; 20 | import \"@std/Test.sol\"; 21 | import \"src/blocks/grumpkin/Grumpkin.sol\"; 22 | import \"src/blocks/EqPolynomial.sol\"; 23 | import \"src/Utilities.sol\"; 24 | import \"src/blocks/IpaPcs.sol\"; 25 | 26 | contract IpaTest is Test { 27 | function composeIpaInput() public pure returns (InnerProductArgument.IpaInputGrumpkin memory) { 28 | Grumpkin.GrumpkinAffinePoint[] memory ck_v = new Grumpkin.GrumpkinAffinePoint[]({{ len ck_v }}); 29 | {{ #each ck_v }} ck_v[{{ i }}]=Grumpkin.GrumpkinAffinePoint({{ x }}, {{y}});\n {{ /each }} 30 | 31 | Grumpkin.GrumpkinAffinePoint[] memory ck_s = new Grumpkin.GrumpkinAffinePoint[]({{ len ck_s }}); 32 | {{ #each ck_s }} ck_s[{{ i }}]=Grumpkin.GrumpkinAffinePoint({{ x }}, {{y}});\n {{ /each }} 33 | 34 | uint256[] memory point = new uint256[]({{ len point }}); 35 | {{ #each point }} point[{{ i }}]={{ val }};\n {{ /each }} 36 | 37 | uint256[] memory L_vec = new uint256[]({{ len L_vec }}); 38 | {{ #each L_vec }} L_vec[{{ i }}]={{ compressed }};\n {{ /each }} 39 | 40 | uint256[] memory R_vec = new uint256[]({{ len R_vec }}); 41 | {{ #each R_vec }} R_vec[{{ i }}]={{ compressed }};\n {{ /each }} 42 | 43 | uint256 a_hat = {{ a_hat }}; 44 | 45 | // InnerProductInstance 46 | Grumpkin.GrumpkinAffinePoint memory commitment = Grumpkin.GrumpkinAffinePoint({{ commitment_x }}, {{ commitment_y }}); 47 | 48 | uint256 eval = {{ eval }}; 49 | 50 | return InnerProductArgument.IpaInputGrumpkin(ck_v, ck_s, point, L_vec, R_vec, commitment, eval, a_hat); 51 | } 52 | 53 | function testIpaGrumpkinVerification_{{ num_vars }}_Variables() public { 54 | InnerProductArgument.IpaInputGrumpkin memory input = composeIpaInput(); 55 | assertTrue(InnerProductArgument.verifyGrumpkin(input, getTranscript())); 56 | } 57 | 58 | function getTranscript() public pure returns (KeccakTranscriptLib.KeccakTranscript memory) { 59 | // b\"TestEval\" in Rust 60 | uint8[] memory label = new uint8[](8); 61 | label[0] = 0x54; 62 | label[1] = 0x65; 63 | label[2] = 0x73; 64 | label[3] = 0x74; 65 | label[4] = 0x45; 66 | label[5] = 0x76; 67 | label[6] = 0x61; 68 | label[7] = 0x6c; 69 | 70 | KeccakTranscriptLib.KeccakTranscript memory keccak_transcript = KeccakTranscriptLib.instantiate(label); 71 | return keccak_transcript; 72 | } 73 | } 74 | "; 75 | 76 | // To generate Solidity unit-test: 77 | // cargo test test_solidity_compatibility_ipa --release -- --ignored --nocapture > ipa.t.sol 78 | #[test] 79 | #[ignore] 80 | fn test_solidity_compatibility_ipa() { 81 | let num_vars = 2; 82 | 83 | // Secondary part of verification is IPA over Grumpkin 84 | let (commitment, point, eval, proof, vk) = 85 | generate_pcs_solidity_unit_test_data::<_, EvaluationEngine>(num_vars); 86 | 87 | let num_vars_string = format!("{}", num_vars); 88 | let eval_string = format!("{:?}", eval); 89 | let commitment_x_string = format!("{:?}", commitment.comm.to_affine().x); 90 | let commitment_y_string = format!("{:?}", commitment.comm.to_affine().y); 91 | let proof_a_hat_string = format!("{:?}", proof.a_hat); 92 | 93 | let r_vec = CommitmentKey::::reinterpret_commitments_as_ck(&proof.R_vec) 94 | .expect("can't reinterpred R_vec"); 95 | let l_vec = CommitmentKey::::reinterpret_commitments_as_ck(&proof.L_vec) 96 | .expect("can't reinterpred L_vec"); 97 | 98 | let r_vec_array = compressed_commitment_to_json::(&r_vec.ck); 99 | let l_vec_array = compressed_commitment_to_json::(&l_vec.ck); 100 | let point_array = field_elements_to_json::(&point); 101 | let ckv_array = ec_points_to_json::(&vk.ck_v.ck); 102 | let cks_array = ec_points_to_json::(&vk.ck_s.ck); 103 | 104 | let mut map = Map::new(); 105 | map.insert("num_vars".to_string(), Value::String(num_vars_string)); 106 | map.insert("eval".to_string(), Value::String(eval_string)); 107 | map.insert( 108 | "commitment_x".to_string(), 109 | Value::String(commitment_x_string), 110 | ); 111 | map.insert( 112 | "commitment_y".to_string(), 113 | Value::String(commitment_y_string), 114 | ); 115 | map.insert("R_vec".to_string(), Value::Array(r_vec_array)); 116 | map.insert("L_vec".to_string(), Value::Array(l_vec_array)); 117 | map.insert("a_hat".to_string(), Value::String(proof_a_hat_string)); 118 | map.insert("point".to_string(), Value::Array(point_array)); 119 | map.insert("ck_v".to_string(), Value::Array(ckv_array)); 120 | map.insert("ck_s".to_string(), Value::Array(cks_array)); 121 | 122 | let mut reg = Handlebars::new(); 123 | reg 124 | .register_template_string("ipa.t.sol", IPA_COMPATIBILITY_UNIT_TESTING_TEMPLATE) 125 | .expect("can't register template"); 126 | 127 | let solidity_unit_test_source = reg.render("ipa.t.sol", &json!(map)).expect("can't render"); 128 | println!("{}", solidity_unit_test_source); 129 | } 130 | } 131 | -------------------------------------------------------------------------------- /src/provider/tests/mod.rs: -------------------------------------------------------------------------------- 1 | mod ipa_pc; 2 | 3 | #[cfg(test)] 4 | pub mod solidity_compatibility_utils { 5 | use crate::provider::traits::DlogGroup; 6 | use crate::spartan::polys::multilinear::MultilinearPolynomial; 7 | use crate::traits::{ 8 | commitment::CommitmentEngineTrait, evaluation::EvaluationEngineTrait, Engine, 9 | }; 10 | use group::prime::PrimeCurve; 11 | use group::prime::PrimeCurveAffine; 12 | use group::GroupEncoding; 13 | use rand::rngs::StdRng; 14 | use serde_json::{Map, Value}; 15 | use std::sync::Arc; 16 | 17 | pub(crate) fn generate_pcs_solidity_unit_test_data>( 18 | num_vars: usize, 19 | ) -> ( 20 | >::Commitment, 21 | Vec, 22 | E::Scalar, 23 | EE::EvaluationArgument, 24 | EE::VerifierKey, 25 | ) { 26 | use rand_core::SeedableRng; 27 | 28 | let mut rng = StdRng::seed_from_u64(num_vars as u64); 29 | 30 | let (poly, point, eval) = 31 | crate::provider::util::test_utils::random_poly_with_eval::(num_vars, &mut rng); 32 | 33 | // Mock commitment key. 34 | let ck = E::CE::setup(b"test", 1 << num_vars); 35 | let ck_arc = Arc::new(ck.clone()); 36 | // Commits to the provided vector using the provided generators. 37 | let commitment = E::CE::commit(&ck_arc, poly.evaluations()); 38 | 39 | let (proof, vk) = prove_verify_solidity::(ck_arc, &commitment, &poly, &point, &eval); 40 | 41 | (commitment, point, eval, proof, vk) 42 | } 43 | 44 | fn prove_verify_solidity>( 45 | ck: Arc<<::CE as CommitmentEngineTrait>::CommitmentKey>, 46 | commitment: &<::CE as CommitmentEngineTrait>::Commitment, 47 | poly: &MultilinearPolynomial<::Scalar>, 48 | point: &[::Scalar], 49 | eval: &::Scalar, 50 | ) -> (EE::EvaluationArgument, EE::VerifierKey) { 51 | use crate::traits::TranscriptEngineTrait; 52 | 53 | // Generate Prover and verifier key for given commitment key. 54 | let ock = ck.clone(); 55 | let (prover_key, verifier_key) = EE::setup(ck); 56 | 57 | // Generate proof. 58 | let mut prover_transcript = E::TE::new(b"TestEval"); 59 | let proof: EE::EvaluationArgument = EE::prove( 60 | &*ock, 61 | &prover_key, 62 | &mut prover_transcript, 63 | commitment, 64 | poly.evaluations(), 65 | point, 66 | eval, 67 | ) 68 | .unwrap(); 69 | let pcp = prover_transcript.squeeze(b"c").unwrap(); 70 | 71 | // Verify proof. 72 | let mut verifier_transcript = E::TE::new(b"TestEval"); 73 | EE::verify( 74 | &verifier_key, 75 | &mut verifier_transcript, 76 | commitment, 77 | point, 78 | eval, 79 | &proof, 80 | ) 81 | .unwrap(); 82 | let pcv = verifier_transcript.squeeze(b"c").unwrap(); 83 | 84 | // Check if the prover transcript and verifier transcript are kept in the same state. 85 | assert_eq!(pcp, pcv); 86 | 87 | (proof, verifier_key) 88 | } 89 | 90 | pub(crate) fn field_elements_to_json(field_elements: &[E::Scalar]) -> Vec { 91 | let mut value_vector = vec![]; 92 | field_elements.iter().enumerate().for_each(|(i, fe)| { 93 | let mut value = Map::new(); 94 | value.insert("i".to_string(), Value::String(i.to_string())); 95 | value.insert("val".to_string(), Value::String(format!("{:?}", fe))); 96 | value_vector.push(Value::Object(value)); 97 | }); 98 | value_vector 99 | } 100 | 101 | pub(crate) fn ec_points_to_json(ec_points: &[::Affine]) -> Vec 102 | where 103 | E: Engine, 104 | E::GE: DlogGroup, 105 | { 106 | let mut value_vector = vec![]; 107 | ec_points.iter().enumerate().for_each(|(i, ec_point)| { 108 | let mut value = Map::new(); 109 | let coordinates_info = ec_point.to_curve().to_coordinates(); 110 | let not_infinity = !coordinates_info.2; 111 | assert!(not_infinity); 112 | value.insert("i".to_string(), Value::String(i.to_string())); 113 | value.insert( 114 | "x".to_string(), 115 | Value::String(format!("{:?}", coordinates_info.0)), 116 | ); 117 | value.insert( 118 | "y".to_string(), 119 | Value::String(format!("{:?}", coordinates_info.1)), 120 | ); 121 | value_vector.push(Value::Object(value)); 122 | }); 123 | value_vector 124 | } 125 | 126 | pub(crate) fn compressed_commitment_to_json( 127 | ec_points: &[::Affine], 128 | ) -> Vec 129 | where 130 | E: Engine, 131 | E::GE: DlogGroup, 132 | { 133 | let mut value_vector = vec![]; 134 | ec_points.iter().enumerate().for_each(|(i, ec_point)| { 135 | let mut value = Map::new(); 136 | let compressed_commitment_info = ec_point.to_curve().to_bytes(); 137 | let mut data = compressed_commitment_info.as_ref().to_vec(); 138 | data.reverse(); 139 | 140 | value.insert("i".to_string(), Value::String(i.to_string())); 141 | value.insert( 142 | "compressed".to_string(), 143 | Value::String(format!("0x{}", hex::encode(data))), 144 | ); 145 | value_vector.push(Value::Object(value)); 146 | }); 147 | value_vector 148 | } 149 | } 150 | -------------------------------------------------------------------------------- /src/provider/traits.rs: -------------------------------------------------------------------------------- 1 | use crate::traits::{Group, TranscriptReprTrait}; 2 | use group::prime::PrimeCurveAffine; 3 | use group::{prime::PrimeCurve, GroupEncoding}; 4 | use serde::{Deserialize, Serialize}; 5 | use std::fmt::Debug; 6 | use std::ops::Mul; 7 | 8 | /// A trait that defines extensions to the Group trait 9 | pub trait DlogGroup: 10 | Group::ScalarExt> 11 | + Serialize 12 | + for<'de> Deserialize<'de> 13 | + PrimeCurve::ScalarExt, Affine = ::AffineExt> 14 | { 15 | type ScalarExt; 16 | type AffineExt: Clone 17 | + Debug 18 | + Eq 19 | + Serialize 20 | + for<'de> Deserialize<'de> 21 | + Sync 22 | + Send 23 | // technical bounds, should disappear when associated_type_bounds stabilizes 24 | + Mul 25 | + PrimeCurveAffine; 26 | type Compressed: Clone 27 | + Debug 28 | + Eq 29 | + From<::Repr> 30 | + Into<::Repr> 31 | + Serialize 32 | + for<'de> Deserialize<'de> 33 | + Sync 34 | + Send 35 | + TranscriptReprTrait; 36 | 37 | /// A method to compute a multiexponentation 38 | fn vartime_multiscalar_mul(scalars: &[Self::ScalarExt], bases: &[Self::AffineExt]) -> Self; 39 | 40 | /// Produce a vector of group elements using a static label 41 | fn from_label(label: &'static [u8], n: usize) -> Vec; 42 | 43 | /// Returns the affine coordinates (x, y, infinity) for the point 44 | fn to_coordinates(&self) -> (::Base, ::Base, bool); 45 | } 46 | 47 | /// This implementation behaves in ways specific to the halo2curves suite of curves in: 48 | // - to_coordinates, 49 | // - vartime_multiscalar_mul, where it does not call into accelerated implementations. 50 | // A specific reimplementation exists for the pasta curves in their own module. 51 | #[macro_export] 52 | macro_rules! impl_traits { 53 | ( 54 | $name:ident, 55 | $order_str:literal, 56 | $base_str:literal 57 | ) => { 58 | $crate::impl_traits!($name, $order_str, $base_str, cpu_best_msm); 59 | }; 60 | ( 61 | $name:ident, 62 | $order_str:literal, 63 | $base_str:literal, 64 | $large_msm_method: ident 65 | ) => { 66 | // These compile-time assertions check important assumptions in the memory representation 67 | // of group data that supports the use of Abomonation. 68 | static_assertions::assert_eq_size!($name::Affine, [u64; 8]); 69 | static_assertions::assert_eq_size!($name::Point, [u64; 12]); 70 | 71 | impl Group for $name::Point { 72 | type Base = $name::Base; 73 | type Scalar = $name::Scalar; 74 | 75 | fn group_params() -> (Self::Base, Self::Base, BigInt, BigInt) { 76 | let A = $name::Point::a(); 77 | let B = $name::Point::b(); 78 | let order = BigInt::from_str_radix($order_str, 16).unwrap(); 79 | let base = BigInt::from_str_radix($base_str, 16).unwrap(); 80 | 81 | (A, B, order, base) 82 | } 83 | } 84 | 85 | impl DlogGroup for $name::Point { 86 | type ScalarExt = $name::Scalar; 87 | type AffineExt = $name::Affine; 88 | // note: for halo2curves implementations, $name::Compressed == <$name::Point as GroupEncoding>::Repr 89 | // so the blanket impl From for T and impl Into apply. 90 | type Compressed = $name::Compressed; 91 | 92 | fn vartime_multiscalar_mul(scalars: &[Self::ScalarExt], bases: &[Self::AffineExt]) -> Self { 93 | #[cfg(any(target_arch = "x86_64", target_arch = "aarch64"))] 94 | if scalars.len() >= 128 { 95 | $large_msm_method(bases, scalars) 96 | } else { 97 | cpu_best_msm(bases, scalars) 98 | } 99 | #[cfg(not(any(target_arch = "x86_64", target_arch = "aarch64")))] 100 | cpu_best_msm(bases, scalars) 101 | } 102 | 103 | fn from_label(label: &'static [u8], n: usize) -> Vec { 104 | let mut shake = Shake256::default(); 105 | shake.update(label); 106 | let mut reader = shake.finalize_xof(); 107 | let mut uniform_bytes_vec = Vec::new(); 108 | for _ in 0..n { 109 | let mut uniform_bytes = [0u8; 32]; 110 | reader.read_exact(&mut uniform_bytes).unwrap(); 111 | uniform_bytes_vec.push(uniform_bytes); 112 | } 113 | let gens_proj: Vec<$name::Point> = (0..n) 114 | .into_par_iter() 115 | .map(|i| { 116 | let hash = $name::Point::hash_to_curve("from_uniform_bytes"); 117 | hash(&uniform_bytes_vec[i]) 118 | }) 119 | .collect(); 120 | 121 | let num_threads = rayon::current_num_threads(); 122 | if gens_proj.len() > num_threads { 123 | let chunk = (gens_proj.len() as f64 / num_threads as f64).ceil() as usize; 124 | (0..num_threads) 125 | .into_par_iter() 126 | .flat_map(|i| { 127 | let start = i * chunk; 128 | let end = if i == num_threads - 1 { 129 | gens_proj.len() 130 | } else { 131 | core::cmp::min((i + 1) * chunk, gens_proj.len()) 132 | }; 133 | if end > start { 134 | let mut gens = vec![$name::Affine::identity(); end - start]; 135 | ::batch_normalize(&gens_proj[start..end], &mut gens); 136 | gens 137 | } else { 138 | vec![] 139 | } 140 | }) 141 | .collect() 142 | } else { 143 | let mut gens = vec![$name::Affine::identity(); n]; 144 | ::batch_normalize(&gens_proj, &mut gens); 145 | gens 146 | } 147 | } 148 | 149 | fn to_coordinates(&self) -> (Self::Base, Self::Base, bool) { 150 | let coordinates = self.to_affine().coordinates(); 151 | if coordinates.is_some().unwrap_u8() == 1 && ($name::Point::identity() != *self) { 152 | (*coordinates.unwrap().x(), *coordinates.unwrap().y(), false) 153 | } else { 154 | (Self::Base::zero(), Self::Base::zero(), true) 155 | } 156 | } 157 | } 158 | 159 | impl PrimeFieldExt for $name::Scalar { 160 | fn from_uniform(bytes: &[u8]) -> Self { 161 | let bytes_arr: [u8; 64] = bytes.try_into().unwrap(); 162 | $name::Scalar::from_uniform_bytes(&bytes_arr) 163 | } 164 | } 165 | 166 | impl TranscriptReprTrait for $name::Compressed { 167 | fn to_transcript_bytes(&self) -> Vec { 168 | self.as_ref().to_vec() 169 | } 170 | } 171 | 172 | impl TranscriptReprTrait for $name::Scalar { 173 | fn to_transcript_bytes(&self) -> Vec { 174 | self.to_repr().to_vec() 175 | } 176 | } 177 | 178 | impl TranscriptReprTrait for $name::Affine { 179 | fn to_transcript_bytes(&self) -> Vec { 180 | let (x, y, is_infinity_byte) = { 181 | let coordinates = self.coordinates(); 182 | if coordinates.is_some().unwrap_u8() == 1 && ($name::Affine::identity() != *self) { 183 | let c = coordinates.unwrap(); 184 | (*c.x(), *c.y(), u8::from(false)) 185 | } else { 186 | ($name::Base::zero(), $name::Base::zero(), u8::from(false)) 187 | } 188 | }; 189 | 190 | x.to_repr() 191 | .into_iter() 192 | .chain(y.to_repr().into_iter()) 193 | .chain(std::iter::once(is_infinity_byte)) 194 | .collect() 195 | } 196 | } 197 | }; 198 | } 199 | -------------------------------------------------------------------------------- /src/provider/util/fb_msm.rs: -------------------------------------------------------------------------------- 1 | /// # Fixed-base Scalar Multiplication 2 | /// 3 | /// This module provides an implementation of fixed-base scalar multiplication on elliptic curves. 4 | /// 5 | /// The multiplication is optimized through a windowed method, where scalars are broken into fixed-size 6 | /// windows, pre-computation tables are generated, and results are efficiently combined. 7 | use ff::{PrimeField, PrimeFieldBits}; 8 | use group::{prime::PrimeCurve, Curve}; 9 | 10 | use rayon::prelude::*; 11 | 12 | /// Determines the window size for scalar multiplication based on the number of scalars. 13 | /// 14 | /// This is used to balance between pre-computation and number of point additions. 15 | pub(crate) fn get_mul_window_size(num_scalars: usize) -> usize { 16 | if num_scalars < 32 { 17 | 3 18 | } else { 19 | (num_scalars as f64).ln().ceil() as usize 20 | } 21 | } 22 | 23 | /// Generates a table of multiples of a base point `g` for use in windowed scalar multiplication. 24 | /// 25 | /// This pre-computes multiples of a base point for each window and organizes them 26 | /// into a table for quick lookup during the scalar multiplication process. The table is a vector 27 | /// of vectors, each inner vector corresponding to a window and containing the multiples of `g` 28 | /// for that window. 29 | pub(crate) fn get_window_table( 30 | scalar_size: usize, 31 | window: usize, 32 | g: T, 33 | ) -> Vec> 34 | where 35 | T: Curve, 36 | T::AffineRepr: Send, 37 | { 38 | let in_window = 1 << window; 39 | // Number of outer iterations needed to cover the entire scalar 40 | let outerc = (scalar_size + window - 1) / window; 41 | 42 | // Number of multiples of the window's "outer point" needed for each window (fewer for the last window) 43 | let last_in_window = 1 << (scalar_size - (outerc - 1) * window); 44 | 45 | let mut multiples_of_g = vec![vec![T::identity(); in_window]; outerc]; 46 | 47 | // Compute the multiples of g for each window 48 | // g_outers = [ 2^{k*window}*g for k in 0..outerc] 49 | let mut g_outer = g; 50 | let mut g_outers = Vec::with_capacity(outerc); 51 | for _ in 0..outerc { 52 | g_outers.push(g_outer); 53 | for _ in 0..window { 54 | g_outer = g_outer.double(); 55 | } 56 | } 57 | multiples_of_g 58 | .par_iter_mut() 59 | .enumerate() 60 | .zip_eq(g_outers) 61 | .for_each(|((outer, multiples_of_g), g_outer)| { 62 | let cur_in_window = if outer == outerc - 1 { 63 | last_in_window 64 | } else { 65 | in_window 66 | }; 67 | 68 | // multiples_of_g = [id, g_outer, 2*g_outer, 3*g_outer, ...], 69 | // where g_outer = 2^{outer*window}*g 70 | let mut g_inner = T::identity(); 71 | for inner in multiples_of_g.iter_mut().take(cur_in_window) { 72 | *inner = g_inner; 73 | g_inner.add_assign(&g_outer); 74 | } 75 | }); 76 | multiples_of_g 77 | .par_iter() 78 | .map(|s| s.iter().map(|s| s.to_affine()).collect()) 79 | .collect() 80 | } 81 | 82 | /// Performs the actual windowed scalar multiplication using a pre-computed table of points. 83 | /// 84 | /// Given a scalar and a table of pre-computed multiples of a base point, this function 85 | /// efficiently computes the scalar multiplication by breaking the scalar into windows and 86 | /// adding the corresponding multiples from the table. 87 | fn windowed_mul( 88 | outerc: usize, 89 | window: usize, 90 | multiples_of_g: &[Vec], 91 | scalar: &T::Scalar, 92 | ) -> T 93 | where 94 | T: PrimeCurve, 95 | T::Scalar: PrimeFieldBits, 96 | { 97 | let modulus_size = ::NUM_BITS as usize; 98 | let scalar_val: Vec = scalar.to_le_bits().into_iter().collect(); 99 | 100 | let mut res = T::identity(); 101 | for outer in 0..outerc { 102 | let mut inner = 0usize; 103 | for i in 0..window { 104 | if outer * window + i < modulus_size && scalar_val[outer * window + i] { 105 | inner |= 1 << i; 106 | } 107 | } 108 | res.add_assign(&multiples_of_g[outer][inner]); 109 | } 110 | res 111 | } 112 | 113 | /// Computes multiple scalar multiplications simultaneously using the windowed method. 114 | pub(crate) fn multi_scalar_mul( 115 | scalar_size: usize, 116 | window: usize, 117 | table: &[Vec], 118 | v: &[T::Scalar], 119 | ) -> Vec 120 | where 121 | T: PrimeCurve, 122 | T::Scalar: PrimeFieldBits, 123 | { 124 | let outerc = (scalar_size + window - 1) / window; 125 | assert!(outerc <= table.len()); 126 | 127 | v.par_iter() 128 | .map(|e| windowed_mul::(outerc, window, table, e)) 129 | .collect::>() 130 | } 131 | -------------------------------------------------------------------------------- /src/provider/util/mod.rs: -------------------------------------------------------------------------------- 1 | //! Utilities for provider module. 2 | pub(in crate::provider) mod fb_msm; 3 | pub mod msm { 4 | use halo2curves::msm::best_multiexp; 5 | use halo2curves::CurveAffine; 6 | 7 | // this argument swap is useful until Rust gets named arguments 8 | // and saves significant complexity in macro code 9 | pub fn cpu_best_msm(bases: &[C], scalars: &[C::Scalar]) -> C::Curve { 10 | best_multiexp(scalars, bases) 11 | } 12 | } 13 | 14 | pub mod field { 15 | use crate::errors::NovaError; 16 | use ff::{BatchInverter, Field}; 17 | 18 | #[inline] 19 | pub fn batch_invert(mut v: Vec) -> Result, NovaError> { 20 | // we only allocate the scratch space if every element of v is nonzero 21 | let mut scratch_space = v 22 | .iter() 23 | .map(|x| { 24 | if !x.is_zero_vartime() { 25 | Ok(*x) 26 | } else { 27 | Err(NovaError::InternalError) 28 | } 29 | }) 30 | .collect::, _>>()?; 31 | let _ = BatchInverter::invert_with_external_scratch(&mut v, &mut scratch_space[..]); 32 | Ok(v) 33 | } 34 | } 35 | 36 | pub mod iterators { 37 | use ff::Field; 38 | use rayon::iter::{IndexedParallelIterator, IntoParallelIterator, ParallelIterator}; 39 | use rayon_scan::ScanParallelIterator; 40 | use std::iter::DoubleEndedIterator; 41 | use std::{ 42 | borrow::Borrow, 43 | ops::{AddAssign, MulAssign}, 44 | }; 45 | 46 | pub trait DoubleEndedIteratorExt: DoubleEndedIterator { 47 | /// This function employs Horner's scheme and core traits to create a combination of an iterator input with the powers 48 | /// of a provided coefficient. 49 | fn rlc(&mut self, coefficient: &F) -> T 50 | where 51 | T: Clone + for<'a> MulAssign<&'a F> + for<'r> AddAssign<&'r T>, 52 | Self::Item: Borrow, 53 | { 54 | let mut iter = self.rev(); 55 | let Some(fst) = iter.next() else { 56 | panic!("input iterator should not be empty") 57 | }; 58 | 59 | iter.fold(fst.borrow().clone(), |mut acc, item| { 60 | acc *= coefficient; 61 | acc += item.borrow(); 62 | acc 63 | }) 64 | } 65 | } 66 | 67 | impl DoubleEndedIteratorExt for I {} 68 | 69 | pub trait IndexedParallelIteratorExt: IndexedParallelIterator { 70 | /// This function core traits to create a combination of an iterator input with the powers 71 | /// of a provided coefficient. 72 | fn rlc(self, coefficient: &F) -> T 73 | where 74 | F: Field, 75 | Self::Item: Borrow, 76 | T: Clone + for<'a> MulAssign<&'a F> + for<'r> AddAssign<&'r T> + Send + Sync, 77 | { 78 | debug_assert!(self.len() > 0); 79 | // generate an iterator of powers of the right length 80 | let v = { 81 | let mut v = vec![*coefficient; self.len()]; 82 | v[0] = F::ONE; 83 | v 84 | }; 85 | // the collect is due to Scan being unindexed 86 | let powers: Vec<_> = v.into_par_iter().scan(|a, b| *a * *b, F::ONE).collect(); 87 | 88 | self 89 | .zip_eq(powers.into_par_iter()) 90 | .map(|(pt, val)| { 91 | let mut pt = pt.borrow().clone(); 92 | pt *= &val; 93 | pt 94 | }) 95 | .reduce_with(|mut a, b| { 96 | a += &b; 97 | a 98 | }) 99 | .unwrap() 100 | } 101 | } 102 | 103 | impl IndexedParallelIteratorExt for I {} 104 | } 105 | 106 | #[cfg(test)] 107 | pub mod test_utils { 108 | //! Contains utilities for testing and benchmarking. 109 | use crate::spartan::polys::multilinear::MultilinearPolynomial; 110 | use crate::traits::{ 111 | commitment::CommitmentEngineTrait, evaluation::EvaluationEngineTrait, Engine, 112 | }; 113 | use ff::Field; 114 | use rand::rngs::StdRng; 115 | use rand_core::{CryptoRng, RngCore}; 116 | use std::sync::Arc; 117 | 118 | /// Returns a random polynomial, a point and calculate its evaluation. 119 | pub(crate) fn random_poly_with_eval( 120 | num_vars: usize, 121 | mut rng: &mut R, 122 | ) -> ( 123 | MultilinearPolynomial<::Scalar>, 124 | Vec<::Scalar>, 125 | ::Scalar, 126 | ) { 127 | // Generate random polynomial and point. 128 | let poly = MultilinearPolynomial::random(num_vars, &mut rng); 129 | let point = (0..num_vars) 130 | .map(|_| ::Scalar::random(&mut rng)) 131 | .collect::>(); 132 | 133 | // Calculation evaluation of point over polynomial. 134 | let eval = poly.evaluate(&point); 135 | 136 | (poly, point, eval) 137 | } 138 | 139 | /// Methods used to test the prove and verify flow of [`MultilinearPolynomial`] Commitment Schemes 140 | /// (PCS). 141 | /// 142 | /// Generates a random polynomial and point from a seed to test a proving/verifying flow of one 143 | /// of our [`EvaluationEngine`]. 144 | pub(crate) fn prove_verify_from_num_vars>( 145 | num_vars: usize, 146 | ) { 147 | use rand_core::SeedableRng; 148 | 149 | let mut rng = StdRng::seed_from_u64(num_vars as u64); 150 | 151 | let (poly, point, eval) = random_poly_with_eval::(num_vars, &mut rng); 152 | 153 | // Mock commitment key. 154 | let ck = E::CE::setup(b"test", 1 << num_vars); 155 | let ck = Arc::new(ck); 156 | // Commits to the provided vector using the provided generators. 157 | let commitment = E::CE::commit(&ck, poly.evaluations()); 158 | 159 | prove_verify_with::(ck, &commitment, &poly, &point, &eval, true) 160 | } 161 | 162 | fn prove_verify_with>( 163 | ck: Arc<<::CE as CommitmentEngineTrait>::CommitmentKey>, 164 | commitment: &<::CE as CommitmentEngineTrait>::Commitment, 165 | poly: &MultilinearPolynomial<::Scalar>, 166 | point: &[::Scalar], 167 | eval: &::Scalar, 168 | evaluate_bad_proof: bool, 169 | ) { 170 | use crate::traits::TranscriptEngineTrait; 171 | use std::ops::Add; 172 | 173 | // Generate Prover and verifier key for given commitment key. 174 | let ock = ck.clone(); 175 | let (prover_key, verifier_key) = EE::setup(ck); 176 | 177 | // Generate proof. 178 | let mut prover_transcript = E::TE::new(b"TestEval"); 179 | let proof = EE::prove( 180 | &*ock, 181 | &prover_key, 182 | &mut prover_transcript, 183 | commitment, 184 | poly.evaluations(), 185 | point, 186 | eval, 187 | ) 188 | .unwrap(); 189 | let pcp = prover_transcript.squeeze(b"c").unwrap(); 190 | 191 | // Verify proof. 192 | let mut verifier_transcript = E::TE::new(b"TestEval"); 193 | EE::verify( 194 | &verifier_key, 195 | &mut verifier_transcript, 196 | commitment, 197 | point, 198 | eval, 199 | &proof, 200 | ) 201 | .unwrap(); 202 | let pcv = verifier_transcript.squeeze(b"c").unwrap(); 203 | 204 | // Check if the prover transcript and verifier transcript are kept in the same state. 205 | assert_eq!(pcp, pcv); 206 | 207 | if evaluate_bad_proof { 208 | // Generate another point to verify proof. Also produce eval. 209 | let altered_verifier_point = point 210 | .iter() 211 | .map(|s| s.add(::Scalar::ONE)) 212 | .collect::>(); 213 | let altered_verifier_eval = 214 | MultilinearPolynomial::evaluate_with(poly.evaluations(), &altered_verifier_point); 215 | 216 | // Verify proof, should fail. 217 | let mut verifier_transcript = E::TE::new(b"TestEval"); 218 | assert!(EE::verify( 219 | &verifier_key, 220 | &mut verifier_transcript, 221 | commitment, 222 | &altered_verifier_point, 223 | &altered_verifier_eval, 224 | &proof, 225 | ) 226 | .is_err()); 227 | } 228 | } 229 | } 230 | -------------------------------------------------------------------------------- /src/r1cs/util.rs: -------------------------------------------------------------------------------- 1 | use ff::PrimeField; 2 | use group::Group; 3 | #[cfg(not(target_arch = "wasm32"))] 4 | use proptest::prelude::*; 5 | 6 | /// Wrapper struct around a field element that implements additional traits 7 | #[derive(Clone, Debug, PartialEq, Eq)] 8 | pub struct FWrap(pub F); 9 | 10 | impl Copy for FWrap {} 11 | 12 | #[cfg(not(target_arch = "wasm32"))] 13 | /// Trait implementation for generating `FWrap` instances with proptest 14 | impl Arbitrary for FWrap { 15 | type Parameters = (); 16 | type Strategy = BoxedStrategy; 17 | 18 | fn arbitrary_with(_args: Self::Parameters) -> Self::Strategy { 19 | use rand::rngs::StdRng; 20 | use rand_core::SeedableRng; 21 | 22 | let strategy = any::<[u8; 32]>() 23 | .prop_map(|seed| Self(F::random(StdRng::from_seed(seed)))) 24 | .no_shrink(); 25 | strategy.boxed() 26 | } 27 | } 28 | 29 | /// Wrapper struct around a Group element that implements additional traits 30 | #[derive(Clone, Debug, PartialEq, Eq)] 31 | pub struct GWrap(pub G); 32 | 33 | impl Copy for GWrap {} 34 | 35 | #[cfg(not(target_arch = "wasm32"))] 36 | /// Trait implementation for generating `GWrap` instances with proptest 37 | impl Arbitrary for GWrap { 38 | type Parameters = (); 39 | type Strategy = BoxedStrategy; 40 | 41 | fn arbitrary_with(_args: Self::Parameters) -> Self::Strategy { 42 | use rand::rngs::StdRng; 43 | use rand_core::SeedableRng; 44 | 45 | let strategy = any::<[u8; 32]>() 46 | .prop_map(|seed| Self(G::random(StdRng::from_seed(seed)))) 47 | .no_shrink(); 48 | strategy.boxed() 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /src/spartan/macros.rs: -------------------------------------------------------------------------------- 1 | /// Macros to give syntactic sugar for zipWith pattern and variants. 2 | /// 3 | /// ```ignore 4 | /// use crate::spartan::zip_with; 5 | /// use itertools::Itertools as _; // we use zip_eq to zip! 6 | /// let v = vec![0, 1, 2]; 7 | /// let w = vec![2, 3, 4]; 8 | /// let y = vec![4, 5, 6]; 9 | /// 10 | /// // Using the `zip_with!` macro to zip three iterators together and apply a closure 11 | /// // that sums the elements of each iterator. 12 | /// let res = zip_with!((v.iter(), w.iter(), y.iter()), |a, b, c| a + b + c) 13 | /// .collect::>(); 14 | /// 15 | /// println!("{:?}", res); // Output: [6, 9, 12] 16 | /// ``` 17 | 18 | #[macro_export] 19 | macro_rules! zip_with { 20 | // no iterator projection specified: the macro assumes the arguments *are* iterators 21 | // ```ignore 22 | // zip_with!((iter1, iter2, iter3), |a, b, c| a + b + c) -> 23 | // iter1.zip_eq(iter2.zip_eq(iter3)).map(|(a, (b, c))| a + b + c) 24 | // ``` 25 | // 26 | // iterator projection specified: use it on each argument 27 | // ```ignore 28 | // zip_with!(par_iter, (vec1, vec2, vec3), |a, b, c| a + b + c) -> 29 | // vec1.par_iter().zip_eq(vec2.par_iter().zip_eq(vec3.par_iter())).map(|(a, (b, c))| a + b + c) 30 | // ```` 31 | ($($f:ident,)? ($e:expr $(, $rest:expr)*), $($move:ident)? |$($i:ident),+ $(,)?| $($work:tt)*) => {{ 32 | $crate::zip_with!($($f,)? ($e $(, $rest)*), map, $($move)? |$($i),+| $($work)*) 33 | }}; 34 | // no iterator projection specified: the macro assumes the arguments *are* iterators 35 | // optional zipping function specified as well: use it instead of map 36 | // ```ignore 37 | // zip_with!((iter1, iter2, iter3), for_each, |a, b, c| a + b + c) -> 38 | // iter1.zip_eq(iter2.zip_eq(iter3)).for_each(|(a, (b, c))| a + b + c) 39 | // ``` 40 | // 41 | // 42 | // iterator projection specified: use it on each argument 43 | // optional zipping function specified as well: use it instead of map 44 | // ```ignore 45 | // zip_with!(par_iter, (vec1, vec2, vec3), for_each, |a, b, c| a + b + c) -> 46 | // vec1.part_iter().zip_eq(vec2.par_iter().zip_eq(vec3.par_iter())).for_each(|(a, (b, c))| a + b + c) 47 | // ``` 48 | ($($f:ident,)? ($e:expr $(, $rest:expr)*), $worker:ident, $($move:ident,)? |$($i:ident),+ $(,)?| $($work:tt)*) => {{ 49 | $crate::zip_all!($($f,)? ($e $(, $rest)*)) 50 | .$worker($($move)? |$crate::nested_idents!($($i),+)| { 51 | $($work)* 52 | }) 53 | }}; 54 | } 55 | 56 | /// Like `zip_with` but use `for_each` instead of `map`. 57 | #[macro_export] 58 | macro_rules! zip_with_for_each { 59 | // no iterator projection specified: the macro assumes the arguments *are* iterators 60 | // ```ignore 61 | // zip_with_for_each!((iter1, iter2, iter3), |a, b, c| a + b + c) -> 62 | // iter1.zip_eq(iter2.zip_eq(iter3)).for_each(|(a, (b, c))| a + b + c) 63 | // ``` 64 | // 65 | // iterator projection specified: use it on each argument 66 | // ```ignore 67 | // zip_with_for_each!(par_iter, (vec1, vec2, vec3), |a, b, c| a + b + c) -> 68 | // vec1.par_iter().zip_eq(vec2.par_iter().zip_eq(vec3.par_iter())).for_each(|(a, (b, c))| a + b + c) 69 | // ```` 70 | ($($f:ident,)? ($e:expr $(, $rest:expr)*), $($move:ident)? |$($i:ident),+ $(,)?| $($work:tt)*) => {{ 71 | $crate::zip_with!($($f,)? ($e $(, $rest)*), for_each, $($move)? |$($i),+| $($work)*) 72 | }}; 73 | } 74 | 75 | // Foldright-like nesting for idents (a, b, c) -> (a, (b, c)) 76 | #[doc(hidden)] 77 | #[macro_export] 78 | macro_rules! nested_idents { 79 | ($a:ident, $b:ident) => { 80 | ($a, $b) 81 | }; 82 | ($first:ident, $($rest:ident),+) => { 83 | ($first, $crate::nested_idents!($($rest),+)) 84 | }; 85 | } 86 | 87 | // Fold-right like zipping, with an optional function `f` to apply to each argument 88 | #[doc(hidden)] 89 | #[macro_export] 90 | macro_rules! zip_all { 91 | (($e:expr,)) => { 92 | $e 93 | }; 94 | ($f:ident, ($e:expr,)) => { 95 | $e.$f() 96 | }; 97 | ($f:ident, ($first:expr, $second:expr $(, $rest:expr)*)) => { 98 | ($first.$f().zip_eq($crate::zip_all!($f, ($second, $( $rest),*)))) 99 | }; 100 | (($first:expr, $second:expr $(, $rest:expr)*)) => { 101 | ($first.zip_eq($crate::zip_all!(($second, $( $rest),*)))) 102 | }; 103 | } 104 | -------------------------------------------------------------------------------- /src/spartan/math.rs: -------------------------------------------------------------------------------- 1 | pub trait Math { 2 | fn log_2(self) -> usize; 3 | } 4 | 5 | impl Math for usize { 6 | fn log_2(self) -> usize { 7 | assert_ne!(self, 0); 8 | 9 | if self.is_power_of_two() { 10 | (1usize.leading_zeros() - self.leading_zeros()) as Self 11 | } else { 12 | (0usize.leading_zeros() - self.leading_zeros()) as Self 13 | } 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /src/spartan/polys/eq.rs: -------------------------------------------------------------------------------- 1 | //! `EqPolynomial`: Represents multilinear extension of equality polynomials, evaluated based on binary input values. 2 | 3 | use ff::PrimeField; 4 | use rayon::prelude::{IndexedParallelIterator, IntoParallelRefMutIterator, ParallelIterator}; 5 | 6 | /// Represents the multilinear extension polynomial (MLE) of the equality polynomial $eq(x,e)$, denoted as $\tilde{eq}(x, e)$. 7 | /// 8 | /// The polynomial is defined by the formula: 9 | /// $$ 10 | /// \tilde{eq}(x, e) = \prod_{i=1}^m(e_i * x_i + (1 - e_i) * (1 - x_i)) 11 | /// $$ 12 | /// 13 | /// Each element in the vector `r` corresponds to a component $e_i$, representing a bit from the binary representation of an input value $e$. 14 | /// This polynomial evaluates to 1 if every component $x_i$ equals its corresponding $e_i$, and 0 otherwise. 15 | /// 16 | /// For instance, for e = 6 (with a binary representation of 0b110), the vector r would be [1, 1, 0]. 17 | #[derive(Debug)] 18 | pub struct EqPolynomial { 19 | pub(in crate::spartan) r: Vec, 20 | } 21 | 22 | impl EqPolynomial { 23 | /// Creates a new `EqPolynomial` from a vector of Scalars `r`. 24 | /// 25 | /// Each Scalar in `r` corresponds to a bit from the binary representation of an input value `e`. 26 | pub const fn new(r: Vec) -> Self { 27 | Self { r } 28 | } 29 | 30 | /// Evaluates the `EqPolynomial` at a given point `rx`. 31 | /// 32 | /// This function computes the value of the polynomial at the point specified by `rx`. 33 | /// It expects `rx` to have the same length as the internal vector `r`. 34 | /// 35 | /// Panics if `rx` and `r` have different lengths. 36 | pub fn evaluate(&self, rx: &[Scalar]) -> Scalar { 37 | assert_eq!(self.r.len(), rx.len()); 38 | (0..rx.len()) 39 | .map(|i| self.r[i] * rx[i] + (Scalar::ONE - self.r[i]) * (Scalar::ONE - rx[i])) 40 | .product() 41 | } 42 | 43 | /// Evaluates the `EqPolynomial` at all the `2^|r|` points in its domain. 44 | /// 45 | /// Returns a vector of Scalars, each corresponding to the polynomial evaluation at a specific point. 46 | #[must_use = "this returns an expensive vector and leaves self unchanged"] 47 | pub fn evals(&self) -> Vec { 48 | Self::evals_from_points(&self.r) 49 | } 50 | 51 | /// Evaluates the `EqPolynomial` from the `2^|r|` points in its domain, without creating an intermediate polynomial 52 | /// representation. 53 | /// 54 | /// Returns a vector of Scalars, each corresponding to the polynomial evaluation at a specific point. 55 | pub fn evals_from_points(r: &[Scalar]) -> Vec { 56 | let ell = r.len(); 57 | let mut evals: Vec = vec![Scalar::ZERO; (2_usize).pow(ell as u32)]; 58 | let mut size = 1; 59 | evals[0] = Scalar::ONE; 60 | 61 | for r in r.iter().rev() { 62 | let (evals_left, evals_right) = evals.split_at_mut(size); 63 | let (evals_right, _) = evals_right.split_at_mut(size); 64 | 65 | evals_left 66 | .par_iter_mut() 67 | .zip_eq(evals_right.par_iter_mut()) 68 | .for_each(|(x, y)| { 69 | *y = *x * r; 70 | *x -= &*y; 71 | }); 72 | 73 | size *= 2; 74 | } 75 | 76 | evals 77 | } 78 | } 79 | 80 | impl FromIterator for EqPolynomial { 81 | fn from_iter>(iter: I) -> Self { 82 | let r: Vec<_> = iter.into_iter().collect(); 83 | Self { r } 84 | } 85 | } 86 | 87 | #[cfg(test)] 88 | mod tests { 89 | use crate::provider; 90 | 91 | use super::*; 92 | use pasta_curves::Fp; 93 | 94 | fn test_eq_polynomial_with() { 95 | let eq_poly = EqPolynomial::::new(vec![F::ONE, F::ZERO, F::ONE]); 96 | let y = eq_poly.evaluate(vec![F::ONE, F::ONE, F::ONE].as_slice()); 97 | assert_eq!(y, F::ZERO); 98 | 99 | let y = eq_poly.evaluate(vec![F::ONE, F::ZERO, F::ONE].as_slice()); 100 | assert_eq!(y, F::ONE); 101 | 102 | let eval_list = eq_poly.evals(); 103 | for (i, &coeff) in eval_list.iter().enumerate().take((2_usize).pow(3)) { 104 | if i == 5 { 105 | assert_eq!(coeff, F::ONE); 106 | } else { 107 | assert_eq!(coeff, F::ZERO); 108 | } 109 | } 110 | } 111 | 112 | #[test] 113 | fn test_eq_polynomial() { 114 | test_eq_polynomial_with::(); 115 | test_eq_polynomial_with::(); 116 | test_eq_polynomial_with::(); 117 | } 118 | } 119 | -------------------------------------------------------------------------------- /src/spartan/polys/identity.rs: -------------------------------------------------------------------------------- 1 | use core::marker::PhantomData; 2 | use ff::PrimeField; 3 | 4 | pub struct IdentityPolynomial { 5 | ell: usize, 6 | _p: PhantomData, 7 | } 8 | 9 | impl IdentityPolynomial { 10 | pub fn new(ell: usize) -> Self { 11 | Self { 12 | ell, 13 | _p: PhantomData, 14 | } 15 | } 16 | 17 | pub fn evaluate(&self, r: &[Scalar]) -> Scalar { 18 | assert_eq!(self.ell, r.len()); 19 | let mut power_of_two = 1_u64; 20 | (0..self.ell) 21 | .rev() 22 | .map(|i| { 23 | let result = Scalar::from(power_of_two) * r[i]; 24 | power_of_two *= 2; 25 | result 26 | }) 27 | .sum() 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /src/spartan/polys/masked_eq.rs: -------------------------------------------------------------------------------- 1 | //! `MaskedEqPolynomial`: Represents the `eq` polynomial over n variables, where the first 2^m entries are 0. 2 | 3 | use crate::spartan::polys::eq::EqPolynomial; 4 | use ff::PrimeField; 5 | use itertools::zip_eq; 6 | 7 | /// Represents the multilinear extension polynomial (MLE) of the equality polynomial $eqₘ(x,r)$ 8 | /// over n variables, where the first 2^m evaluations are 0. 9 | /// 10 | /// The polynomial is defined by the formula: 11 | /// eqₘ(x,r) = eq(x,r) - ( ∏_{0 ≤ i < n-m} (1−rᵢ)(1−xᵢ) )⋅( ∏_{n-m ≤ i < n} (1−rᵢ)(1−xᵢ) + rᵢ⋅xᵢ ) 12 | #[derive(Debug)] 13 | pub struct MaskedEqPolynomial<'a, Scalar> { 14 | eq: &'a EqPolynomial, 15 | num_masked_vars: usize, 16 | } 17 | 18 | impl<'a, Scalar: PrimeField> MaskedEqPolynomial<'a, Scalar> { 19 | /// Creates a new `MaskedEqPolynomial` from a vector of Scalars `r` of size n, with the number of 20 | /// masked variables m = `num_masked_vars`. 21 | pub const fn new(eq: &'a EqPolynomial, num_masked_vars: usize) -> Self { 22 | MaskedEqPolynomial { 23 | eq, 24 | num_masked_vars, 25 | } 26 | } 27 | 28 | /// Evaluates the `MaskedEqPolynomial` at a given point `rx`. 29 | /// 30 | /// This function computes the value of the polynomial at the point specified by `rx`. 31 | /// It expects `rx` to have the same length as the internal vector `r`. 32 | /// 33 | /// Panics if `rx` and `r` have different lengths. 34 | pub fn evaluate(&self, rx: &[Scalar]) -> Scalar { 35 | let r = &self.eq.r; 36 | assert_eq!(r.len(), rx.len()); 37 | let split_idx = r.len() - self.num_masked_vars; 38 | 39 | let (r_lo, r_hi) = r.split_at(split_idx); 40 | let (rx_lo, rx_hi) = rx.split_at(split_idx); 41 | let eq_lo = zip_eq(r_lo, rx_lo) 42 | .map(|(r, rx)| *r * rx + (Scalar::ONE - r) * (Scalar::ONE - rx)) 43 | .product::(); 44 | let eq_hi = zip_eq(r_hi, rx_hi) 45 | .map(|(r, rx)| *r * rx + (Scalar::ONE - r) * (Scalar::ONE - rx)) 46 | .product::(); 47 | let mask_lo = zip_eq(r_lo, rx_lo) 48 | .map(|(r, rx)| (Scalar::ONE - r) * (Scalar::ONE - rx)) 49 | .product::(); 50 | 51 | (eq_lo - mask_lo) * eq_hi 52 | } 53 | 54 | /// Evaluates the `MaskedEqPolynomial` at all the `2^|r|` points in its domain. 55 | /// 56 | /// Returns a vector of Scalars, each corresponding to the polynomial evaluation at a specific point. 57 | pub fn evals(&self) -> Vec { 58 | Self::evals_from_points(&self.eq.r, self.num_masked_vars) 59 | } 60 | 61 | /// Evaluates the `MaskedEqPolynomial` from the `2^|r|` points in its domain, without creating an intermediate polynomial 62 | /// representation. 63 | /// 64 | /// Returns a vector of Scalars, each corresponding to the polynomial evaluation at a specific point. 65 | fn evals_from_points(r: &[Scalar], num_masked_vars: usize) -> Vec { 66 | let mut evals = EqPolynomial::evals_from_points(r); 67 | 68 | // replace the first 2^m evaluations with 0 69 | let num_masked_evals = 1 << num_masked_vars; 70 | evals[..num_masked_evals] 71 | .iter_mut() 72 | .for_each(|e| *e = Scalar::ZERO); 73 | 74 | evals 75 | } 76 | } 77 | 78 | #[cfg(test)] 79 | mod tests { 80 | use crate::provider; 81 | 82 | use super::*; 83 | use crate::spartan::polys::eq::EqPolynomial; 84 | use pasta_curves::Fp; 85 | use rand_chacha::ChaCha20Rng; 86 | use rand_core::{CryptoRng, RngCore, SeedableRng}; 87 | 88 | fn test_masked_eq_polynomial_with( 89 | num_vars: usize, 90 | num_masked_vars: usize, 91 | mut rng: &mut R, 92 | ) { 93 | let num_masked_evals = 1 << num_masked_vars; 94 | 95 | // random point 96 | let r = std::iter::from_fn(|| Some(F::random(&mut rng))) 97 | .take(num_vars) 98 | .collect::>(); 99 | // evaluation point 100 | let rx = std::iter::from_fn(|| Some(F::random(&mut rng))) 101 | .take(num_vars) 102 | .collect::>(); 103 | 104 | let poly_eq = EqPolynomial::new(r); 105 | let poly_eq_evals = poly_eq.evals(); 106 | 107 | let masked_eq_poly = MaskedEqPolynomial::new(&poly_eq, num_masked_vars); 108 | let masked_eq_poly_evals = masked_eq_poly.evals(); 109 | 110 | // ensure the first 2^m entries are 0 111 | assert_eq!( 112 | masked_eq_poly_evals[..num_masked_evals], 113 | vec![F::ZERO; num_masked_evals] 114 | ); 115 | // ensure the remaining evaluations match eq(r) 116 | assert_eq!( 117 | masked_eq_poly_evals[num_masked_evals..], 118 | poly_eq_evals[num_masked_evals..] 119 | ); 120 | 121 | // compute the evaluation at rx succinctly 122 | let masked_eq_eval = masked_eq_poly.evaluate(&rx); 123 | 124 | // compute the evaluation as a MLE 125 | let rx_evals = EqPolynomial::evals_from_points(&rx); 126 | let expected_masked_eq_eval = zip_eq(rx_evals, masked_eq_poly_evals) 127 | .map(|(rx, r)| rx * r) 128 | .sum(); 129 | 130 | assert_eq!(masked_eq_eval, expected_masked_eq_eval); 131 | } 132 | 133 | #[test] 134 | fn test_masked_eq_polynomial() { 135 | let mut rng = ChaCha20Rng::from_seed([0u8; 32]); 136 | let num_vars = 5; 137 | let num_masked_vars = 2; 138 | test_masked_eq_polynomial_with::(num_vars, num_masked_vars, &mut rng); 139 | test_masked_eq_polynomial_with::( 140 | num_vars, 141 | num_masked_vars, 142 | &mut rng, 143 | ); 144 | test_masked_eq_polynomial_with::( 145 | num_vars, 146 | num_masked_vars, 147 | &mut rng, 148 | ); 149 | } 150 | } 151 | -------------------------------------------------------------------------------- /src/spartan/polys/mod.rs: -------------------------------------------------------------------------------- 1 | //! This module contains the definitions of polynomial types used in the Spartan SNARK. 2 | pub(crate) mod eq; 3 | pub(crate) mod identity; 4 | pub(crate) mod masked_eq; 5 | pub mod multilinear; 6 | pub(crate) mod power; 7 | pub(crate) mod univariate; 8 | -------------------------------------------------------------------------------- /src/spartan/polys/power.rs: -------------------------------------------------------------------------------- 1 | //! `PowPolynomial`: Represents multilinear extension of power polynomials 2 | 3 | use crate::spartan::polys::eq::EqPolynomial; 4 | use ff::PrimeField; 5 | use std::iter::successors; 6 | 7 | /// Represents the multilinear extension polynomial (MLE) of the equality polynomial $pow(x,t)$, denoted as $\tilde{pow}(x, t)$. 8 | /// 9 | /// The polynomial is defined by the formula: 10 | /// $$ 11 | /// \tilde{power}(x, t) = \prod_{i=1}^m(1 + (t^{2^i} - 1) * x_i) 12 | /// $$ 13 | pub struct PowPolynomial { 14 | eq: EqPolynomial, 15 | } 16 | 17 | impl PowPolynomial { 18 | /// Creates a new `PowPolynomial` from a Scalars `t`. 19 | pub fn new(t: &Scalar, ell: usize) -> Self { 20 | // t_pow = [t^{2^0}, t^{2^1}, ..., t^{2^{ell-1}}] 21 | let t_pow = Self::squares(t, ell); 22 | 23 | Self { 24 | eq: EqPolynomial::new(t_pow), 25 | } 26 | } 27 | 28 | /// Create powers the following powers of `t`: 29 | /// [t^{2^0}, t^{2^1}, ..., t^{2^{ell-1}}] 30 | pub fn squares(t: &Scalar, ell: usize) -> Vec { 31 | successors(Some(*t), |p: &Scalar| Some(p.square())) 32 | .take(ell) 33 | .collect::>() 34 | } 35 | 36 | /// Creates the evals corresponding to a `PowPolynomial` from an already-existing vector of powers. 37 | /// `t_pow.len() > ell` must be true. 38 | pub(crate) fn evals_with_powers(powers: &[Scalar], ell: usize) -> Vec { 39 | let t_pow = powers[..ell].to_vec(); 40 | EqPolynomial::evals_from_points(&t_pow) 41 | } 42 | 43 | /// Evaluates the `PowPolynomial` at a given point `rx`. 44 | /// 45 | /// This function computes the value of the polynomial at the point specified by `rx`. 46 | /// It expects `rx` to have the same length as the internal vector `t_pow`. 47 | /// 48 | /// Panics if `rx` and `t_pow` have different lengths. 49 | pub fn evaluate(&self, rx: &[Scalar]) -> Scalar { 50 | self.eq.evaluate(rx) 51 | } 52 | 53 | pub fn coordinates(self) -> Vec { 54 | self.eq.r 55 | } 56 | 57 | /// Evaluates the `PowPolynomial` at all the `2^|t_pow|` points in its domain. 58 | /// 59 | /// Returns a vector of Scalars, each corresponding to the polynomial evaluation at a specific point. 60 | pub fn evals(&self) -> Vec { 61 | self.eq.evals() 62 | } 63 | } 64 | 65 | impl From> for EqPolynomial { 66 | fn from(polynomial: PowPolynomial) -> Self { 67 | polynomial.eq 68 | } 69 | } 70 | -------------------------------------------------------------------------------- /src/supernova/error.rs: -------------------------------------------------------------------------------- 1 | //! This module defines errors returned by the library. 2 | use core::fmt::Debug; 3 | use thiserror::Error; 4 | 5 | use crate::errors::NovaError; 6 | 7 | /// Errors returned by Nova 8 | #[derive(Debug, Eq, PartialEq, Error)] 9 | pub enum SuperNovaError { 10 | /// Nova error 11 | #[error("NovaError")] 12 | NovaError(#[from] NovaError), 13 | /// missing commitment key 14 | #[error("MissingCK")] 15 | MissingCK, 16 | /// Extended error for supernova 17 | #[error("UnSatIndex")] 18 | UnSatIndex(&'static str, usize), 19 | } 20 | -------------------------------------------------------------------------------- /src/supernova/utils.rs: -------------------------------------------------------------------------------- 1 | use bellpepper_core::{ 2 | boolean::{AllocatedBit, Boolean}, 3 | num::AllocatedNum, 4 | ConstraintSystem, LinearCombination, SynthesisError, 5 | }; 6 | use ff::PrimeField; 7 | use itertools::Itertools as _; 8 | 9 | use crate::{ 10 | constants::NIO_NOVA_FOLD, 11 | gadgets::{conditionally_select_alloc_relaxed_r1cs, AllocatedRelaxedR1CSInstance}, 12 | traits::Engine, 13 | }; 14 | 15 | /// Return the element of `a` given by the indicator bit in `selector_vec`. 16 | /// 17 | /// This function assumes `selector_vec` has been properly constrained", i.e. that exactly one entry is equal to 1. 18 | // 19 | // NOTE: When `a` is greater than 5 (estimated), it will be cheaper to use a multicase gadget. 20 | // 21 | // We should plan to rely on a well-designed gadget offering a common interface but that adapts its implementation based 22 | // on the size of inputs (known at synthesis time). The threshold size depends on the size of the elements of `a`. The 23 | // larger the elements, the fewer are needed before multicase becomes cost-effective. 24 | pub fn get_from_vec_alloc_relaxed_r1cs::Base>>( 25 | mut cs: CS, 26 | a: &[AllocatedRelaxedR1CSInstance], 27 | selector_vec: &[Boolean], 28 | ) -> Result, SynthesisError> { 29 | assert_eq!(a.len(), selector_vec.len()); 30 | 31 | // Compare all instances in `a` to the first one 32 | let first: AllocatedRelaxedR1CSInstance = a 33 | .first() 34 | .cloned() 35 | .ok_or_else(|| SynthesisError::IncompatibleLengthVector("empty vec length".to_string()))?; 36 | 37 | // Since `selector_vec` is correct, only one entry is 1. 38 | // If selector_vec[0] is 1, then all `conditionally_select` will return `first`. 39 | // Otherwise, the correct instance will be selected. 40 | // TODO: reformulate when iterator_try_reduce stabilizes 41 | let selected = a 42 | .iter() 43 | .zip_eq(selector_vec.iter()) 44 | .enumerate() 45 | .skip(1) 46 | .try_fold(first, |matched, (i, (candidate, equal_bit))| { 47 | conditionally_select_alloc_relaxed_r1cs( 48 | cs.namespace(|| format!("next_matched_allocated-{:?}", i)), 49 | candidate, 50 | &matched, 51 | equal_bit, 52 | ) 53 | })?; 54 | 55 | Ok(selected) 56 | } 57 | 58 | /// Compute a selector vector `s` of size `num_indices`, such that 59 | /// `s[i] == 1` if i == `target_index` and 0 otherwise. 60 | pub fn get_selector_vec_from_index>( 61 | mut cs: CS, 62 | target_index: &AllocatedNum, 63 | num_indices: usize, 64 | ) -> Result, SynthesisError> { 65 | assert_ne!(num_indices, 0); 66 | 67 | // Compute the selector vector non-deterministically 68 | let selector = (0..num_indices) 69 | .map(|idx| { 70 | // b <- idx == target_index 71 | Ok(Boolean::Is(AllocatedBit::alloc( 72 | cs.namespace(|| format!("allocate s_{:?}", idx)), 73 | target_index.get_value().map(|v| v == F::from(idx as u64)), 74 | )?)) 75 | }) 76 | .collect::, SynthesisError>>()?; 77 | 78 | // Enforce ∑ selector[i] = 1 79 | { 80 | let selected_sum = selector.iter().fold(LinearCombination::zero(), |lc, bit| { 81 | lc + &bit.lc(CS::one(), F::ONE) 82 | }); 83 | cs.enforce( 84 | || "exactly-one-selection", 85 | |_| selected_sum, 86 | |lc| lc + CS::one(), 87 | |lc| lc + CS::one(), 88 | ); 89 | } 90 | 91 | // Enforce `target_index - ∑ i * selector[i] = 0`` 92 | { 93 | let selected_value = selector 94 | .iter() 95 | .enumerate() 96 | .fold(LinearCombination::zero(), |lc, (i, bit)| { 97 | lc + &bit.lc(CS::one(), F::from(i as u64)) 98 | }); 99 | cs.enforce( 100 | || "target_index - ∑ i * selector[i] = 0", 101 | |lc| lc, 102 | |lc| lc, 103 | |lc| lc + target_index.get_variable() - &selected_value, 104 | ); 105 | } 106 | 107 | Ok(selector) 108 | } 109 | 110 | #[cfg(test)] 111 | mod test { 112 | use crate::provider::PallasEngine; 113 | 114 | use super::*; 115 | use bellpepper_core::test_cs::TestConstraintSystem; 116 | use pasta_curves::pallas::Base; 117 | 118 | #[test] 119 | fn test_get_from_vec_alloc_relaxed_r1cs_bounds() { 120 | let n = 3; 121 | for selected in 0..(2 * n) { 122 | let mut cs = TestConstraintSystem::::new(); 123 | 124 | let allocated_target = AllocatedNum::alloc_infallible(&mut cs.namespace(|| "target"), || { 125 | Base::from(selected as u64) 126 | }); 127 | 128 | let selector_vec = get_selector_vec_from_index(&mut cs, &allocated_target, n).unwrap(); 129 | 130 | let vec = (0..n) 131 | .map(|i| { 132 | AllocatedRelaxedR1CSInstance::::default( 133 | &mut cs.namespace(|| format!("elt-{i}")), 134 | 4, 135 | 64, 136 | ) 137 | .unwrap() 138 | }) 139 | .collect::>(); 140 | 141 | get_from_vec_alloc_relaxed_r1cs(&mut cs.namespace(|| "test-fn"), &vec, &selector_vec) 142 | .unwrap(); 143 | 144 | if selected < n { 145 | assert!(cs.is_satisfied()) 146 | } else { 147 | // If selected is out of range, the circuit must be unsatisfied. 148 | assert!(!cs.is_satisfied()) 149 | } 150 | } 151 | } 152 | 153 | #[test] 154 | fn test_get_selector() { 155 | for n in 1..4 { 156 | for selected in 0..(2 * n) { 157 | let mut cs = TestConstraintSystem::::new(); 158 | 159 | let allocated_target = 160 | AllocatedNum::alloc_infallible(&mut cs.namespace(|| "target"), || { 161 | Base::from(selected as u64) 162 | }); 163 | 164 | let selector_vec = get_selector_vec_from_index(&mut cs, &allocated_target, n).unwrap(); 165 | 166 | if selected < n { 167 | // Check that the selector bits are correct 168 | assert_eq!(selector_vec.len(), n); 169 | for (i, bit) in selector_vec.iter().enumerate() { 170 | assert_eq!(bit.get_value().unwrap(), i == selected); 171 | } 172 | 173 | assert!(cs.is_satisfied()); 174 | } else { 175 | // If selected is out of range, the circuit must be unsatisfied. 176 | assert!(!cs.is_satisfied()); 177 | } 178 | } 179 | } 180 | } 181 | } 182 | -------------------------------------------------------------------------------- /src/traits/circuit.rs: -------------------------------------------------------------------------------- 1 | //! This module defines traits that a step function must implement 2 | use bellpepper_core::{num::AllocatedNum, ConstraintSystem, SynthesisError}; 3 | use core::marker::PhantomData; 4 | use ff::PrimeField; 5 | 6 | /// A helper trait for a step of the incremental computation (i.e., circuit for F) 7 | pub trait StepCircuit: Send + Sync + Clone { 8 | /// Return the number of inputs or outputs of each step 9 | /// (this method is called only at circuit synthesis time) 10 | /// `synthesize` and `output` methods are expected to take as 11 | /// input a vector of size equal to arity and output a vector of size equal to arity 12 | fn arity(&self) -> usize; 13 | 14 | /// Sythesize the circuit for a computation step and return variable 15 | /// that corresponds to the output of the step `z_{i+1}` 16 | fn synthesize>( 17 | &self, 18 | cs: &mut CS, 19 | z: &[AllocatedNum], 20 | ) -> Result>, SynthesisError>; 21 | } 22 | 23 | /// A trivial step circuit that simply returns the input 24 | #[derive(Clone, Debug, Default, PartialEq, Eq)] 25 | pub struct TrivialCircuit { 26 | _p: PhantomData, 27 | } 28 | 29 | impl StepCircuit for TrivialCircuit { 30 | fn arity(&self) -> usize { 31 | 1 32 | } 33 | 34 | fn synthesize>( 35 | &self, 36 | _cs: &mut CS, 37 | z: &[AllocatedNum], 38 | ) -> Result>, SynthesisError> { 39 | Ok(z.to_vec()) 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /src/traits/commitment.rs: -------------------------------------------------------------------------------- 1 | //! This module defines a collection of traits that define the behavior of a commitment engine 2 | //! We require the commitment engine to provide a commitment to vectors with a single group element 3 | use crate::{ 4 | errors::NovaError, 5 | traits::{AbsorbInROTrait, Engine, TranscriptReprTrait}, 6 | }; 7 | use abomonation::Abomonation; 8 | use core::{ 9 | fmt::Debug, 10 | ops::{Add, Mul, MulAssign}, 11 | }; 12 | use serde::{Deserialize, Serialize}; 13 | 14 | /// A helper trait for types implementing scalar multiplication. 15 | pub trait ScalarMul: Mul + MulAssign {} 16 | 17 | impl ScalarMul for T where T: Mul + MulAssign 18 | {} 19 | 20 | /// This trait defines the behavior of the commitment 21 | pub trait CommitmentTrait: 22 | Clone 23 | + Copy 24 | + Debug 25 | + Default 26 | + PartialEq 27 | + Eq 28 | + Send 29 | + Sync 30 | + TranscriptReprTrait 31 | + Serialize 32 | + for<'de> Deserialize<'de> 33 | + Abomonation 34 | + AbsorbInROTrait 35 | + Add 36 | + ScalarMul 37 | { 38 | /// Holds the type of the compressed commitment 39 | type CompressedCommitment: Clone 40 | + Debug 41 | + PartialEq 42 | + Eq 43 | + Send 44 | + Sync 45 | + TranscriptReprTrait 46 | + Serialize 47 | + for<'de> Deserialize<'de>; 48 | 49 | /// Compresses self into a compressed commitment 50 | fn compress(&self) -> Self::CompressedCommitment; 51 | 52 | /// Returns the coordinate representation of the commitment 53 | fn to_coordinates(&self) -> (E::Base, E::Base, bool); 54 | 55 | /// Decompresses a compressed commitment into a commitment 56 | fn decompress(c: &Self::CompressedCommitment) -> Result; 57 | } 58 | 59 | /// A trait that helps determine the length of a structure. 60 | /// Note this does not impose any memory representation constraints on the structure. 61 | pub trait Len { 62 | /// Returns the length of the structure. 63 | fn length(&self) -> usize; 64 | } 65 | 66 | /// A trait that ties different pieces of the commitment generation together 67 | pub trait CommitmentEngineTrait: Clone + Send + Sync { 68 | /// Holds the type of the commitment key 69 | /// The key should quantify its length in terms of group generators. 70 | type CommitmentKey: Len 71 | + Clone 72 | + PartialEq 73 | + Debug 74 | + Send 75 | + Sync 76 | + Serialize 77 | + for<'de> Deserialize<'de> 78 | + Abomonation; 79 | 80 | /// Holds the type of the commitment 81 | type Commitment: CommitmentTrait; 82 | 83 | /// Samples a new commitment key of a specified size 84 | fn setup(label: &'static [u8], n: usize) -> Self::CommitmentKey; 85 | 86 | /// Commits to the provided vector using the provided generators 87 | fn commit(ck: &Self::CommitmentKey, v: &[E::Scalar]) -> Self::Commitment; 88 | } 89 | -------------------------------------------------------------------------------- /src/traits/evaluation.rs: -------------------------------------------------------------------------------- 1 | //! This module defines a collection of traits that define the behavior of a polynomial evaluation engine 2 | //! A vector of size N is treated as a multilinear polynomial in \log{N} variables, 3 | //! and a commitment provided by the commitment engine is treated as a multilinear polynomial commitment 4 | use std::sync::Arc; 5 | 6 | use crate::{ 7 | errors::NovaError, 8 | traits::{commitment::CommitmentEngineTrait, Engine}, 9 | }; 10 | use serde::{Deserialize, Serialize}; 11 | 12 | /// A trait that ties different pieces of the commitment evaluation together 13 | pub trait EvaluationEngineTrait: Clone + Send + Sync { 14 | /// A type that holds the prover key 15 | type ProverKey: Send + Sync; 16 | 17 | /// A type that holds the verifier key 18 | type VerifierKey: Send 19 | + Sync 20 | // required for easy Digest computation purposes, could be relaxed to 21 | // [`crate::digest::Digestible`] 22 | + Serialize; 23 | 24 | /// A type that holds the evaluation argument 25 | type EvaluationArgument: Clone + Send + Sync + Serialize + for<'de> Deserialize<'de>; 26 | 27 | /// A method to perform any additional setup needed to produce proofs of evaluations 28 | /// 29 | /// **Note:** This method should be cheap and should not copy most of the 30 | /// commitment key. Look at `CommitmentEngineTrait::setup` for generating SRS data. 31 | fn setup( 32 | ck: Arc<<::CE as CommitmentEngineTrait>::CommitmentKey>, 33 | ) -> (Self::ProverKey, Self::VerifierKey); 34 | 35 | /// A method to prove the evaluation of a multilinear polynomial 36 | fn prove( 37 | ck: &<::CE as CommitmentEngineTrait>::CommitmentKey, 38 | pk: &Self::ProverKey, 39 | transcript: &mut E::TE, 40 | comm: &<::CE as CommitmentEngineTrait>::Commitment, 41 | poly: &[E::Scalar], 42 | point: &[E::Scalar], 43 | eval: &E::Scalar, 44 | ) -> Result; 45 | 46 | /// A method to verify the purported evaluation of a multilinear polynomials 47 | fn verify( 48 | vk: &Self::VerifierKey, 49 | transcript: &mut E::TE, 50 | comm: &<::CE as CommitmentEngineTrait>::Commitment, 51 | point: &[E::Scalar], 52 | eval: &E::Scalar, 53 | arg: &Self::EvaluationArgument, 54 | ) -> Result<(), NovaError>; 55 | } 56 | -------------------------------------------------------------------------------- /src/traits/mod.rs: -------------------------------------------------------------------------------- 1 | //! This module defines various traits required by the users of the library to implement. 2 | use crate::errors::NovaError; 3 | use abomonation::Abomonation; 4 | use bellpepper_core::{boolean::AllocatedBit, num::AllocatedNum, ConstraintSystem, SynthesisError}; 5 | use core::fmt::Debug; 6 | use ff::{PrimeField, PrimeFieldBits}; 7 | use num_bigint::BigInt; 8 | use serde::{Deserialize, Serialize}; 9 | 10 | pub mod commitment; 11 | 12 | use commitment::CommitmentEngineTrait; 13 | 14 | /// Represents an element of a group 15 | /// This is currently tailored for an elliptic curve group 16 | pub trait Group: Clone + Copy + Debug + Send + Sync + Sized + Eq + PartialEq { 17 | /// A type representing an element of the base field of the group 18 | type Base: PrimeFieldBits + Serialize + for<'de> Deserialize<'de>; 19 | 20 | /// A type representing an element of the scalar field of the group 21 | type Scalar: PrimeFieldBits + PrimeFieldExt + Send + Sync + Serialize + for<'de> Deserialize<'de>; 22 | 23 | /// Returns A, B, the order of the group, the size of the base field as big integers 24 | fn group_params() -> (Self::Base, Self::Base, BigInt, BigInt); 25 | } 26 | 27 | /// A collection of engines that are required by the library 28 | pub trait Engine: Clone + Copy + Debug + Send + Sync + Sized + Eq + PartialEq { 29 | /// A type representing an element of the base field of the group 30 | type Base: PrimeFieldBits + TranscriptReprTrait + Serialize + for<'de> Deserialize<'de>; 31 | 32 | /// A type representing an element of the scalar field of the group 33 | type Scalar: PrimeFieldBits 34 | + PrimeFieldExt 35 | + Send 36 | + Sync 37 | + TranscriptReprTrait 38 | + Serialize 39 | + for<'de> Deserialize<'de>; 40 | 41 | /// A type that represents an element of the group 42 | type GE: Group + Serialize + for<'de> Deserialize<'de>; 43 | 44 | /// A type that represents a circuit-friendly sponge that consumes elements 45 | /// from the base field and squeezes out elements of the scalar field 46 | type RO: ROTrait; 47 | 48 | /// An alternate implementation of `Self::RO` in the circuit model 49 | type ROCircuit: ROCircuitTrait; 50 | 51 | /// A type that provides a generic Fiat-Shamir transcript to be used when externalizing proofs 52 | type TE: TranscriptEngineTrait; 53 | 54 | /// A type that defines a commitment engine over scalars in the group 55 | type CE: CommitmentEngineTrait; 56 | } 57 | 58 | /// This is a convenience trait to pair engines which fields are in a curve cycle relationship 59 | pub trait CurveCycleEquipped: Engine { 60 | /// The secondary `Engine` of `Self` 61 | type Secondary: Engine::Scalar, Scalar = ::Base>; 62 | } 63 | 64 | /// Convenience projection to the secondary `Engine` of a `CurveCycleEquipped` 65 | pub type Dual = ::Secondary; 66 | 67 | /// A helper trait to absorb different objects in RO 68 | pub trait AbsorbInROTrait { 69 | /// Absorbs the value in the provided RO 70 | fn absorb_in_ro(&self, ro: &mut E::RO); 71 | } 72 | 73 | /// A helper trait that defines the behavior of a hash function that we use as an RO 74 | pub trait ROTrait { 75 | /// The circuit alter ego of this trait impl - this constrains it to use the same constants 76 | type CircuitRO: ROCircuitTrait; 77 | 78 | /// A type representing constants/parameters associated with the hash function 79 | type Constants: Debug 80 | + Default 81 | + Clone 82 | + PartialEq 83 | + Send 84 | + Sync 85 | + Serialize 86 | + for<'de> Deserialize<'de> 87 | + Abomonation; 88 | 89 | /// Initializes the hash function 90 | fn new(constants: Self::Constants, num_absorbs: usize) -> Self; 91 | 92 | /// Adds a scalar to the internal state 93 | fn absorb(&mut self, e: Base); 94 | 95 | /// Returns a challenge of `num_bits` by hashing the internal state 96 | fn squeeze(&mut self, num_bits: usize) -> Scalar; 97 | } 98 | 99 | /// A helper trait that defines the behavior of a hash function that we use as an RO in the circuit model 100 | pub trait ROCircuitTrait { 101 | /// the vanilla alter ego of this trait - this constrains it to use the same constants 102 | type NativeRO: ROTrait; 103 | 104 | /// A type representing constants/parameters associated with the hash function on this Base field 105 | type Constants: Debug 106 | + Default 107 | + Clone 108 | + PartialEq 109 | + Send 110 | + Sync 111 | + Serialize 112 | + for<'de> Deserialize<'de> 113 | + Abomonation; 114 | 115 | /// Initializes the hash function 116 | fn new(constants: Self::Constants, num_absorbs: usize) -> Self; 117 | 118 | /// Adds a scalar to the internal state 119 | fn absorb(&mut self, e: &AllocatedNum); 120 | 121 | /// Returns a challenge of `num_bits` by hashing the internal state 122 | fn squeeze>( 123 | &mut self, 124 | cs: CS, 125 | num_bits: usize, 126 | ) -> Result, SynthesisError>; 127 | } 128 | 129 | /// An alias for constants associated with `E::RO` 130 | pub type ROConstants = 131 | <::RO as ROTrait<::Base, ::Scalar>>::Constants; 132 | 133 | /// An alias for constants associated with `E::ROCircuit` 134 | pub type ROConstantsCircuit = 135 | <::ROCircuit as ROCircuitTrait<::Base>>::Constants; 136 | 137 | /// This trait allows types to implement how they want to be added to `TranscriptEngine` 138 | pub trait TranscriptReprTrait: Send + Sync { 139 | /// returns a byte representation of self to be added to the transcript 140 | fn to_transcript_bytes(&self) -> Vec; 141 | } 142 | 143 | /// This trait defines the behavior of a transcript engine compatible with Spartan 144 | pub trait TranscriptEngineTrait: Send + Sync { 145 | /// initializes the transcript 146 | fn new(label: &'static [u8]) -> Self; 147 | 148 | /// returns a scalar element of the group as a challenge 149 | fn squeeze(&mut self, label: &'static [u8]) -> Result; 150 | 151 | /// absorbs any type that implements `TranscriptReprTrait` under a label 152 | fn absorb>(&mut self, label: &'static [u8], o: &T); 153 | 154 | /// adds a domain separator 155 | fn dom_sep(&mut self, bytes: &'static [u8]); 156 | } 157 | 158 | /// Defines additional methods on `PrimeField` objects 159 | pub trait PrimeFieldExt: PrimeField { 160 | /// Returns a scalar representing the bytes 161 | fn from_uniform(bytes: &[u8]) -> Self; 162 | } 163 | 164 | impl> TranscriptReprTrait for &[T] { 165 | fn to_transcript_bytes(&self) -> Vec { 166 | self 167 | .iter() 168 | .flat_map(|t| t.to_transcript_bytes()) 169 | .collect::>() 170 | } 171 | } 172 | 173 | pub mod circuit; 174 | pub mod evaluation; 175 | pub mod snark; 176 | -------------------------------------------------------------------------------- /src/traits/snark.rs: -------------------------------------------------------------------------------- 1 | //! This module defines a collection of traits that define the behavior of a `zkSNARK` for `RelaxedR1CS` 2 | use crate::{ 3 | errors::NovaError, 4 | r1cs::{R1CSShape, RelaxedR1CSInstance, RelaxedR1CSWitness}, 5 | traits::Engine, 6 | CommitmentKey, 7 | }; 8 | 9 | use serde::{Deserialize, Serialize}; 10 | use std::sync::Arc; 11 | 12 | /// Public parameter creation takes a size hint. This size hint carries the particular requirements of 13 | /// the final compressing SNARK the user expected to use with these public parameters, and the below 14 | /// is a sensible default, which is to not require any more bases then the usual (maximum of the number of 15 | /// variables and constraints of the involved R1CS circuit). 16 | pub fn default_ck_hint() -> Box Fn(&'a R1CSShape) -> usize> { 17 | // The default is to not put an additional floor on the size of the commitment key 18 | Box::new(|_shape: &R1CSShape| 0) 19 | } 20 | 21 | /// A trait that defines the behavior of a `zkSNARK` 22 | pub trait RelaxedR1CSSNARKTrait: 23 | Send + Sync + Serialize + for<'de> Deserialize<'de> 24 | { 25 | /// A type that represents the prover's key 26 | type ProverKey: Send + Sync; 27 | 28 | /// A type that represents the verifier's key 29 | type VerifierKey: Send + Sync + Serialize; 30 | 31 | /// This associated function (not a method) provides a hint that offers 32 | /// a minimum sizing cue for the commitment key used by this SNARK 33 | /// implementation. The commitment key passed in setup should then 34 | /// be at least as large as this hint. 35 | fn ck_floor() -> Box Fn(&'a R1CSShape) -> usize> { 36 | // The default is to not put an additional floor on the size of the commitment key 37 | default_ck_hint() 38 | } 39 | 40 | /// Produces the keys for the prover and the verifier 41 | fn setup( 42 | ck: Arc>, 43 | S: &R1CSShape, 44 | ) -> Result<(Self::ProverKey, Self::VerifierKey), NovaError>; 45 | 46 | /// Produces a new SNARK for a relaxed R1CS 47 | fn prove( 48 | ck: &CommitmentKey, 49 | pk: &Self::ProverKey, 50 | S: &R1CSShape, 51 | U: &RelaxedR1CSInstance, 52 | W: &RelaxedR1CSWitness, 53 | ) -> Result; 54 | 55 | /// Verifies a SNARK for a relaxed R1CS 56 | fn verify(&self, vk: &Self::VerifierKey, U: &RelaxedR1CSInstance) -> Result<(), NovaError>; 57 | } 58 | 59 | /// A trait that defines the behavior of a `zkSNARK` to prove knowledge of satisfying witness to batches of relaxed R1CS instances. 60 | pub trait BatchedRelaxedR1CSSNARKTrait: 61 | Send + Sync + Serialize + for<'de> Deserialize<'de> 62 | { 63 | /// A type that represents the prover's key 64 | type ProverKey: Send + Sync; 65 | 66 | /// A type that represents the verifier's key 67 | type VerifierKey: Send + Sync + DigestHelperTrait; 68 | 69 | /// This associated function (not a method) provides a hint that offers 70 | /// a minimum sizing cue for the commitment key used by this SNARK 71 | /// implementation. The commitment key passed in setup should then 72 | /// be at least as large as this hint. 73 | fn ck_floor() -> Box Fn(&'a R1CSShape) -> usize> { 74 | default_ck_hint() 75 | } 76 | 77 | /// Produces the keys for the prover and the verifier 78 | /// 79 | /// **Note:** This method should be cheap and should not copy most of the 80 | /// commitment key. Look at `CommitmentEngineTrait::setup` for generating SRS data. 81 | fn setup( 82 | ck: Arc>, 83 | S: Vec<&R1CSShape>, 84 | ) -> Result<(Self::ProverKey, Self::VerifierKey), NovaError>; 85 | 86 | /// Produces a new SNARK for a batch of relaxed R1CS 87 | fn prove( 88 | ck: &CommitmentKey, 89 | pk: &Self::ProverKey, 90 | S: Vec<&R1CSShape>, 91 | U: &[RelaxedR1CSInstance], 92 | W: &[RelaxedR1CSWitness], 93 | ) -> Result; 94 | 95 | /// Verifies a SNARK for a batch of relaxed R1CS 96 | fn verify(&self, vk: &Self::VerifierKey, U: &[RelaxedR1CSInstance]) -> Result<(), NovaError>; 97 | } 98 | 99 | /// A helper trait that defines the behavior of a verifier key of `zkSNARK` 100 | pub trait DigestHelperTrait { 101 | /// Returns the digest of the verifier's key 102 | fn digest(&self) -> E::Scalar; 103 | } 104 | --------------------------------------------------------------------------------