├── .gitattributes ├── .github ├── ISSUE_TEMPLATE │ └── bounty.md └── workflows │ ├── artifacts.yaml │ ├── release.yaml │ └── test.yml ├── .gitignore ├── .mocharc.json ├── .rustfmt.toml ├── CHANGELOG.md ├── Cargo.lock ├── Cargo.toml ├── LICENSE ├── Makefile ├── README.md ├── builds ├── target_256b │ ├── http_verification_256b.circom │ ├── json_extraction_256b.circom │ └── plaintext_authentication_256b.circom └── target_512b │ ├── http_verification_512b.circom │ ├── json_extraction_512b.circom │ └── plaintext_authentication_512b.circom ├── circomkit.json ├── circuits.json ├── circuits ├── chacha20 │ ├── authentication.circom │ ├── chacha-qr.circom │ ├── chacha-round.circom │ └── chacha20.circom ├── http │ ├── machine.circom │ ├── parser.circom │ └── verification.circom ├── json │ ├── extraction.circom │ ├── hash_machine.circom │ ├── language.circom │ ├── machine.circom │ └── parser.circom ├── test │ ├── chacha20 │ │ ├── authentication.test.ts │ │ └── chacha20.test.ts │ ├── common │ │ ├── chacha.ts │ │ ├── http.ts │ │ ├── index.ts │ │ └── poseidon.ts │ ├── full │ │ ├── full.test.ts │ │ └── testCase.test.ts │ ├── http │ │ ├── parser.test.ts │ │ └── verification.test.ts │ ├── json │ │ ├── extraction.test.ts │ │ ├── index.ts │ │ ├── parser.test.ts │ │ ├── parsing_types.test.ts │ │ ├── stack.test.ts │ │ └── values.test.ts │ └── utils │ │ ├── array.test.ts │ │ ├── hash.test.ts │ │ └── operators.test.ts └── utils │ ├── array.circom │ ├── bits.circom │ ├── functions.circom │ ├── hash.circom │ └── operators.circom ├── create-pp ├── Cargo.toml └── src │ └── main.rs ├── docs ├── http.md ├── images │ ├── v0.7.0.png │ ├── v0.7.5.jpg │ └── v0.9.0.jpg └── json.md ├── examples ├── http │ ├── get_request.http │ ├── get_response.http │ ├── github_response.http │ ├── large_request.http │ ├── large_response.http │ ├── post_request.http │ ├── reddit_request.http │ ├── spotify_top_artists_request.http │ └── spotify_top_artists_response.http └── json │ ├── array_only.json │ ├── binance.json │ ├── empty.json │ ├── primitives.json │ ├── primitives_array.json │ ├── reddit.json │ ├── spotify.json │ ├── string_escape.json │ ├── value_array.json │ ├── value_array_object.json │ ├── value_object.json │ └── venmo.json ├── package-lock.json ├── package.json ├── rust-toolchain.toml ├── tsconfig.json └── witness-generator ├── Cargo.toml └── src ├── error.rs ├── http ├── mod.rs └── parser.rs ├── json ├── mod.rs └── parser.rs ├── lib.rs └── mock.rs /.gitattributes: -------------------------------------------------------------------------------- 1 | # Auto detect text files and perform LF normalization 2 | * text=auto 3 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bounty.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bounty 3 | about: Create a bounty for contributors 4 | title: '' 5 | labels: 'bounty' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Bounty description** 11 | 12 | A clear and concise description of the project; should be accessible to a contributor with minimal context. 13 | 14 | **Implementation requirements** 15 | 16 | A clear and comprehensive list of the requirements for the bounty to be considered complete. 17 | 18 | - [ ] Task 1 19 | - Subtasks (if relevant) 20 | - [ ] Task 2 21 | - Subtasks (if relevant) 22 | - [ ] Task 3 23 | - Subtasks (if relevant) 24 | 25 | **Bonus features** 26 | 27 | Any additional features that will enhance the value of the bounty. 28 | 29 | - [ ] Bonus Feature 1 30 | - [ ] Bonus Feature 2 31 | 32 | **Resources** 33 | 34 | A list of potentially-useful resources. This list should be less than 5 total resources. 35 | 36 | **Criteria** 37 | 38 | For timely submissions of bounties that meet the implementation requirements, a bounty of at least $250 will be awarded to the contributor. Additional bounty amounts are available and will be rewarded based on the following criteria: 39 | 40 | 1. Completion of any of the bonus features listed above and/or other bonus features that improve the quality of the submission. 41 | 2. Correctness and security: A thorough review of the implementation should convince our team that they are correct and secure, with all requirements met. 42 | 3. Code clarity and quality: Succinct, easy-to-follow code with appropriate naming conventions. Utilize Rust’s type system for flexibility and security (e.g., compile-time checks where possible), and avoid external crates. Optimizations should be a lower priority than clarity, but can be included behind a feature flag as a bonus. 43 | 4. Documentation quality: Provide comprehensive README’s, Cargo docs, and inline comments where code itself is not self-explanatory. Prioritize clarity and readability. 44 | -------------------------------------------------------------------------------- /.github/workflows/artifacts.yaml: -------------------------------------------------------------------------------- 1 | name: build-artifacts 2 | 3 | concurrency: 4 | group: ${{ github.workflow }}-${{ github.ref }} 5 | cancel-in-progress: true 6 | 7 | on: 8 | pull_request: 9 | branches: [ "main" ] 10 | 11 | jobs: 12 | check-version: 13 | name: Check package.json version update 14 | runs-on: ubuntu-latest 15 | 16 | steps: 17 | - name: Checkout code 18 | uses: actions/checkout@v4 19 | with: 20 | fetch-depth: 2 21 | 22 | - name: Fetch main branch 23 | run: | 24 | git fetch origin main 25 | 26 | - name: Compare package.json version with main 27 | id: version_check 28 | run: | 29 | PR_VERSION=$(jq -r .version package.json) 30 | MAIN_VERSION=$(git show origin/main:package.json | jq -r .version) 31 | echo "PR version: $PR_VERSION" 32 | echo "Main version: $MAIN_VERSION" 33 | if [ "$PR_VERSION" == "$MAIN_VERSION" ]; then 34 | echo "Error: package.json version has not been updated in this PR." 35 | exit 1 36 | else 37 | echo "package.json version has been updated in this PR." 38 | fi 39 | 40 | build: 41 | runs-on: ubuntu-latest 42 | needs: check-version 43 | 44 | steps: 45 | - name: Checkout code 46 | uses: actions/checkout@v4 47 | with: 48 | fetch-depth: 0 49 | 50 | - name: Install Protocol Buffers 51 | run: | 52 | sudo apt-get update 53 | sudo apt-get install -y protobuf-compiler libprotobuf-dev 54 | 55 | - name: Configure Git for Private Repos 56 | run: | 57 | git config --global url."https://${{ secrets.PAT }}@github.com/".insteadOf "https://github.com/" 58 | 59 | - name: Install Rust 60 | uses: dtolnay/rust-toolchain@master 61 | with: 62 | toolchain: nightly-2024-10-28 63 | 64 | - name: Rust Cache 65 | uses: Swatinem/rust-cache@v2 66 | with: 67 | cache-on-failure: true 68 | 69 | - name: Install Circom 70 | run: | 71 | CIRCOM_VERSION=2.1.9 72 | curl -L https://github.com/iden3/circom/releases/download/v$CIRCOM_VERSION/circom-linux-amd64 -o circom 73 | chmod +x circom 74 | sudo mv circom /usr/local/bin/ 75 | circom --version 76 | 77 | - name: Install Node.js dependencies 78 | run: | 79 | npm ci 80 | 81 | - name: Get package version 82 | id: package_version 83 | run: | 84 | VERSION=$(node -p "require('./package.json').version") 85 | echo "VERSION=$VERSION" >> $GITHUB_ENV 86 | 87 | - name: Setup circom-witnesscalc 88 | run: | 89 | cd .. && git clone https://github.com/pluto/circom-witnesscalc.git 90 | cd circom-witnesscalc 91 | cargo install --path . 92 | echo $(which build-circuit) 93 | 94 | - name: Build circuits using Makefile 95 | run: | 96 | make debug # Show what will be processed 97 | make build # Build the circuits 98 | make check # Check all circuits are built 99 | 100 | - name: Build and run parameter generator 101 | run: | 102 | rustup install nightly 103 | 104 | # Build the parameter generator 105 | cargo build --release 106 | 107 | # Generate parameters using makefile target 108 | make params 109 | echo "Successfully generated all parameter files for ${size}" 110 | 111 | - name: Create release artifacts 112 | run: | 113 | # First verify parameter files were created 114 | for target_dir in builds/target_*b; do 115 | size=$(basename "$target_dir" | sed 's/target_//') 116 | # Calculate ROM length the same way as in Makefile 117 | 118 | echo "Successfully generated all parameter files for ${size}" 119 | 120 | # Create zip archive for this target size 121 | if [ -d "$target_dir/artifacts" ]; then 122 | echo "Creating archive for $size" 123 | ( cd "$target_dir/artifacts" && \ 124 | find . -type f -name "*.wasm" -exec bash -c 'mv "$1" "$(dirname "$1")/../"' _ {} \; &&\ 125 | rm -rf *_js &&\ 126 | zip -r "../../../circom-artifacts-${size}-v${{ env.VERSION }}.zip" . ) 127 | fi 128 | done 129 | 130 | - name: Clean build artifacts 131 | if: always() 132 | run: make clean 133 | 134 | - name: Upload artifacts 135 | if: github.event_name == 'pull_request' 136 | uses: actions/upload-artifact@v4 137 | with: 138 | name: circom-artifacts-v${{ env.VERSION }} 139 | path: circom-artifacts-*-v${{ env.VERSION }}.zip 140 | retention-days: 14 141 | -------------------------------------------------------------------------------- /.github/workflows/release.yaml: -------------------------------------------------------------------------------- 1 | name: Release 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | 8 | permissions: 9 | contents: write 10 | pull-requests: write 11 | issues: write 12 | id-token: write 13 | 14 | jobs: 15 | release: 16 | name: Release 17 | runs-on: ubuntu-latest 18 | if: github.event_name == 'push' && github.ref == 'refs/heads/main' 19 | steps: 20 | - name: Checkout 21 | uses: actions/checkout@v4 22 | with: 23 | fetch-depth: 0 24 | 25 | - uses: "google-github-actions/auth@v2" 26 | with: 27 | project_id: "web-prover-circuits-fa4cf3" 28 | workload_identity_provider: "projects/530011589985/locations/global/workloadIdentityPools/github/providers/web-prover-circuits" 29 | 30 | - name: Setup Node.js 31 | uses: actions/setup-node@v4 32 | with: 33 | node-version: "lts/*" 34 | 35 | - name: Install dependencies 36 | run: npm ci 37 | 38 | - name: Get package version 39 | id: package_version 40 | run: | 41 | VERSION=$(node -p "require('./package.json').version") 42 | echo "VERSION=$VERSION" >> $GITHUB_ENV 43 | 44 | - name: Get Latest Successful build-circuits Run ID 45 | id: get_run_id 46 | env: 47 | GH_TOKEN: ${{ github.token }} 48 | run: | 49 | # Fetch the latest successful run of the build-artifacts workflow on main 50 | response=$(gh api -X GET /repos/${{ github.repository }}/actions/workflows/artifacts.yaml/runs \ 51 | -f status=success -q '.workflow_runs | map(select(.name == "build-artifacts")) | .[0].id') 52 | echo "run_id=${response}" >> $GITHUB_ENV 53 | 54 | - name: Download Build Artifacts 55 | uses: actions/download-artifact@v4 56 | with: 57 | name: circom-artifacts-v${{ env.VERSION }} 58 | path: ./artifacts 59 | github-token: ${{ secrets.GITHUB_TOKEN }} 60 | run-id: ${{ env.run_id }} 61 | 62 | - name: Prepare Release Notes 63 | run: | 64 | echo "Automated release of compiled Circom circuits" > release_notes.md 65 | echo "Version: ${{ env.VERSION }}" >> release_notes.md 66 | echo "Commit: ${{ github.sha }}" >> release_notes.md 67 | echo "Artifacts included:" >> release_notes.md 68 | for zip in artifacts/circom-artifacts-*-v${{ env.VERSION }}.zip; do 69 | basename "$zip" >> release_notes.md 70 | done 71 | 72 | # Create release with all artifact files 73 | - name: Upload Release Assets 74 | uses: softprops/action-gh-release@v2 75 | with: 76 | files: artifacts/circom-artifacts-*-v${{ env.VERSION }}.zip 77 | tag_name: v${{ env.VERSION }} 78 | body_path: release_notes.md 79 | env: 80 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 81 | 82 | - name: Prepare artifacts for Google Cloud Storage Bucket 83 | run: | 84 | # Unzip, then gzip each individual file (but remove .gz suffix), delete zip files 85 | for zip_file in artifacts/circom-artifacts-*-v${{ env.VERSION }}.zip; do 86 | unzip "$zip_file" -d "${zip_file%.zip}" 87 | (cd "${zip_file%.zip}" && gzip -7 * && for file in *.gz; do mv "$file" "${file%.gz}"; done) 88 | done 89 | rm artifacts/*.zip 90 | 91 | - uses: "google-github-actions/upload-cloud-storage@v2" 92 | with: 93 | path: "artifacts" 94 | destination: "web-prover-circuits.pluto.xyz/${{ github.sha }}" 95 | parent: false 96 | gzip: false # already gzipped 97 | headers: |- 98 | content-type: application/octet-stream 99 | content-encoding: gzip 100 | -------------------------------------------------------------------------------- /.github/workflows/test.yml: -------------------------------------------------------------------------------- 1 | name: circom 2 | 3 | concurrency: 4 | group: ${{ github.workflow }}-${{ github.ref }} 5 | cancel-in-progress: true 6 | 7 | on: 8 | push: 9 | branches: [ main ] 10 | pull_request: 11 | branches: [ main ] 12 | 13 | jobs: 14 | test: 15 | runs-on: ubuntu-latest 16 | 17 | steps: 18 | - uses: actions/checkout@v4 19 | 20 | - name: Use Node.js 21 | uses: actions/setup-node@v4 22 | with: 23 | node-version: '20' 24 | 25 | - name: Install dependencies 26 | run: | 27 | npm install 28 | npm install -g snarkjs 29 | 30 | - name: Download and install Circom 31 | run: | 32 | CIRCOM_VERSION=2.1.9 33 | curl -L https://github.com/iden3/circom/releases/download/v$CIRCOM_VERSION/circom-linux-amd64 -o circom 34 | chmod +x circom 35 | sudo mv circom /usr/local/bin/ 36 | circom --version 37 | 38 | - name: Run tests 39 | run: npm run test -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | **/target 2 | 3 | node_modules/* 4 | 5 | # Circomkit generated 6 | build/* 7 | ptau/* 8 | circuits/test/*.circom 9 | circuits/main/* 10 | 11 | # Rust generated 12 | inputs/**/*.json 13 | !inputs/search/witness.json 14 | 15 | # Circom-witnesscalc generated 16 | ir_log/* 17 | log_input_signals.txt 18 | *.bin 19 | *.r1cs 20 | builds/**/artifacts/ 21 | 22 | # MacOS folks 23 | **/.DS_Store -------------------------------------------------------------------------------- /.mocharc.json: -------------------------------------------------------------------------------- 1 | { 2 | "extension": [ 3 | "ts" 4 | ], 5 | "require": "ts-node/register", 6 | "spec": "circuits/test/**/*.test.ts", 7 | "timeout": 100000, 8 | "exit": true 9 | } -------------------------------------------------------------------------------- /.rustfmt.toml: -------------------------------------------------------------------------------- 1 | # Rustfmt configuration 2 | # Opinionated whitespace and tabs. The most important of these are imports and width settings. 3 | # Others may want to borrow or change these to their own liking. 4 | # https://rust-lang.github.io/rustfmt 5 | 6 | # version-related 7 | unstable_features=true 8 | use_try_shorthand=true # replace any `try!` (2015 Rust) with `?` 9 | 10 | # misc formatting 11 | condense_wildcard_suffixes =true # replace: (a,b,_,_)=(1, 2, 3, 4); -> (a,b,..)=(1, 2, 3, 4); 12 | format_code_in_doc_comments =true # format code blocks in doc comments 13 | format_macro_matchers =true # $a: ident -> $a:ident 14 | format_strings =true # break and insert newlines for long string literals 15 | match_block_trailing_comma =true # include comma in match blocks after '}' 16 | normalize_comments =true # convert /*..*/ to //.. where possible 17 | reorder_impl_items =true # move `type` and `const` declarations to top of impl block 18 | struct_field_align_threshold=20 # align struct arguments' types vertically 19 | use_field_init_shorthand =true # struct initialization short {x: x} -> {x} 20 | 21 | # reduce whitespace 22 | blank_lines_upper_bound=1 # default: 1. Sometimes useful to change to 0 to condense a file. 23 | brace_style ="PreferSameLine" # prefer starting `{` without inserting extra \n 24 | fn_single_line =true # if it's a short 1-liner, let it be a short 1-liner 25 | match_arm_blocks =false # remove unnecessary {} in match arms 26 | newline_style ="Unix" # not auto, we won the culture war. \n over \r\n 27 | overflow_delimited_expr=true # prefer ]); to ]\n); 28 | where_single_line =true # put where on a single line if possible 29 | 30 | # imports preferences 31 | group_imports ="StdExternalCrate" # create import groupings for std, external libs, and internal deps 32 | imports_granularity="Crate" # aggressively group imports 33 | 34 | # width settings: everything to 100 35 | comment_width =100 # default: 80 36 | inline_attribute_width=60 # inlines #[cfg(test)]\nmod test -> #[cfg(test)] mod test 37 | max_width =100 # default: 100 38 | use_small_heuristics ="Max" # don't ever newline short of `max_width`. 39 | wrap_comments =true # wrap comments at `comment_width` 40 | # format_strings = true # wrap strings at `max_length` 41 | 42 | # tabs and spaces 43 | hard_tabs =false # (def: false) use spaces over tabs 44 | tab_spaces=2 # 2 > 4, it's just math. 45 | 46 | ignore=["tls"] 47 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | 2 | # Change Log 3 | All notable changes to this project will be documented in this file. 4 | 5 | The format is based on [Keep a Changelog](http://keepachangelog.com/) 6 | and this project adheres to [Semantic Versioning](http://semver.org/). 7 | 8 | ## [0.7.0] - 2024-12-3 9 | 10 | ### Added 11 | ### Changed 12 | #### Circuit Builds 13 | #### Artifacts 14 | - **Circuit sizes:** 15 | - `plaintext_authentication_1024b`: 16 | - non-linear constaints: `383,102` 17 | - R1CS file: `123.4MB` 18 | - Graph file: `19.9MB` 19 | - `http_verification_1024b`: 20 | - non-linear constaints: `121,376` 21 | - R1CS file: `80.7MB` 22 | - Graph file: `4.4MB` 23 | - **WARNING:** Extremely slow build with `--O2` flag. Need to investigate. 24 | - `json_extraction_1024b`: 25 | - non-linear constaints: `452,683` 26 | - R1CS file: `90.3MB` 27 | - Graph file: `13.2MB` 28 | - **Total size:** `243.7MB` 29 | - **Circuit param file sizes (SNARK):** 30 | - `aux_params`: `62.2MB` 31 | - `prover_key`: `50.3MB` 32 | - `verifier_key`: `415.3MB` 33 | 34 | ### Notes 35 | 36 | --- 37 | 38 | ## [0.6.0] - 2024-12-3 39 | 40 | ### Added 41 | 42 | ### Changed 43 | #### Circuit Builds 44 | - Removed `512b` build path 45 | - Removed `aes_gctr_nivc_*b.circom` from build 46 | 47 | #### Artifacts 48 | - Adjusted circuit names: 49 | - `aes_gctr_nivc` and `chacha20-nivc` replaced with a more suitable name: `plaintext_authentication` 50 | - Runs with `512b` per fold 51 | - `http_nivc` replaced with more suitable name: `http_verification` 52 | 53 | ### Notes 54 | - **Total circuits:** 5 55 | - **Circuit sizes:** 56 | - `plaintext_authentication_1024b` 57 | - non-linear constraints: `365,484` 58 | - linear-constraints: `40,463` 59 | - Theoretical storage size: `(40,463 + 365,484) * 3 * 32 bytes = 38,971,912 bytes ≈ 39 MB` 60 | - R1CS file: `121.3MB` 61 | - Graph file: `13.1MB` 62 | - **WARNINGS:** Yes. Run `circomkit compile plaintext_authentication_1024b` 63 | - `http_verification_1024b`: 64 | - non-linear constaints: `546,895` **(WARNING: greater than `2^19 == 524,288`)** 65 | - linear-constraints: `543,804` 66 | - Theoretical storage size: `(546,895 + 543,804) * 3 * 32 bytes = 104,707,104 bytes ≈ 105 MB` 67 | - R1CS file: `246.4MB` 68 | - Graph file: `16.5MB` 69 | - **WARNINGS:** Yes. Run `circomkit compile http_verification_1024b` 70 | - `json_mask_object_1024b`: 71 | - non-linear constraints: `550,001` **(WARNING: greater than `2^20 == 524,288`)** 72 | - linear-constraints: `316,205` 73 | - Theoretical storage size: `(550,001 + 316,205) * 3 * 32 bytes = 83,155,776 bytes ≈ 83 MB` 74 | - R1CS file: `109MB` 75 | - Graph file: `9.3MB` 76 | - **WARNINGS:** Yes. Run `circomkit compile json_mask_object_1024b` 77 | - `json_mask_array_index_1024b`: 78 | - non-linear constraints: `295,146` 79 | - linear-constraints: `194,082` 80 | - Theoretical storage size: `(295,146 + 194,082) * 3 * 32 bytes = 46,966,080 bytes ≈ 47 MB` 81 | - R1CS file: `67.4MB` 82 | - Graph file: `7.4MB` 83 | - **WARNINGS:** Yes. Run `circomkit compile json_mask_array_index_1024b` 84 | - `json_extract_value_1024b`: 85 | - non-linear constraints == `32,039` 86 | - linear-constraints: `18,644` 87 | - Theoretical storage size: `(32,039 + 18,644) * 3 * 32 bytes = 4,865,568 bytes ≈ 4.8 MB` 88 | - R1CS file: `11.1MB` 89 | - Graph file: `949KB` 90 | - **Estimated expanded R1CS base memory requirements:** `2^{20} * 32 * 5 ~ 168MB`$ 91 | - **Circuit param file sizes (SNARK):** 92 | - `aux_params`: `115.1MB` 93 | - `prover_key`: `100.7MB` 94 | - `verifier_key`: `780.3MB` 95 | - **Circuit param file sizes (ppSNARK):** 96 | - `aux_params`: `836MB` **(WARNING: THIS IS LARGE)** 97 | - `prover_key`: `5.86GB` **(WARNING: THIS IS EXTREMELY LARGE!!!)** 98 | - `verifier_key`: `16.8MB` 99 | 100 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [workspace.package] 2 | version = "0.1.0" 3 | edition = "2021" 4 | 5 | [workspace] 6 | members = ["witness-generator", "create-pp"] 7 | resolver = "2" 8 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | # Find all target directories 2 | TARGET_DIRS := $(wildcard builds/target_*b) 3 | 4 | # Find all .circom files in those directories 5 | CIRCOM_FILES := $(wildcard $(addsuffix /*_*b.circom,$(TARGET_DIRS))) 6 | 7 | # Extract target sizes (e.g., "512b", "1024b") from directory names 8 | TARGET_SIZES := $(patsubst builds/target_%,%,$(TARGET_DIRS)) 9 | 10 | 11 | # Create artifacts directories 12 | $(shell mkdir -p $(addsuffix /artifacts,$(TARGET_DIRS))) 13 | 14 | # Default target 15 | .PHONY: all clean 16 | all: build check params 17 | 18 | # Build target 19 | .PHONY: build 20 | build: 21 | @set -e; 22 | @for circuit in $(CIRCOM_FILES); do \ 23 | echo "Processing $${circuit}..."; \ 24 | circom "$${circuit}" --r1cs --wasm --O2 -o "$$(dirname $${circuit})/artifacts" -l node_modules; \ 25 | build-circuit "$${circuit}" "$$(dirname $${circuit})/artifacts/$$(basename $${circuit} .circom).bin" -l node_modules; \ 26 | echo "====================xxxxxxxxxx===================="; \ 27 | done 28 | 29 | # Parameters target 30 | .PHONY: params 31 | params: 32 | @for target_dir in $(TARGET_DIRS); do \ 33 | size=$$(basename "$$target_dir" | sed 's/target_//' | sed 's/b//'); \ 34 | echo "Generating parameters for $${size}b with ROM length 100..."; \ 35 | cargo +nightly run --release -p create-pp -- "$$target_dir/artifacts" "$${size}b" "100" || exit 1; \ 36 | done 37 | 38 | .PHONY: check 39 | check: 40 | @echo "Checking that all .bin artifacts exist..." 41 | @set -e; \ 42 | for circuit in $(CIRCOM_FILES); do \ 43 | f1="$$(dirname $${circuit})/artifacts/$$(basename $${circuit} .circom).bin"; \ 44 | f2="$$(dirname $${circuit})/artifacts/$$(basename $${circuit} .circom).r1cs"; \ 45 | if [ ! -f "$${f1}" ] || [ ! -f "$${f2}" ]; then \ 46 | echo "ERROR: Missing artifact '$${f1}', '$${f2}"; \ 47 | exit 1; \ 48 | else \ 49 | echo "OK: $${f1}, $${f2}"; \ 50 | fi; \ 51 | done 52 | @echo "All artifacts present!" 53 | 54 | # Clean target 55 | clean: 56 | rm -rf $(addsuffix /artifacts,$(TARGET_DIRS)) 57 | 58 | # Debug target to show what files were found 59 | .PHONY: debug 60 | debug: 61 | @echo "Found directories:" 62 | @echo $(TARGET_DIRS) 63 | @echo "\nFound circom files:" 64 | @echo $(CIRCOM_FILES) 65 | @echo "\nFound target sizes:" 66 | @echo $(TARGET_SIZES) 67 | @echo "\nArtifacts will be generated in:" 68 | @echo $(addsuffix /artifacts,$(TARGET_DIRS)) -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 |

2 | Web-Prover Circuits 3 |

4 | 5 |
6 | 7 | Contributors 8 | 9 | 10 | Tests 11 | 12 |
13 | 14 | > [!WARNING] 15 | > ⚠️ Repository No Longer Maintained ⚠️ 16 | >https://github.com/pluto/noir-web-prover-circuits 17 | > This repository has been archived and is no longer maintained. 18 | All development has moved to the [noir-web-prover-circuits](https://github.com/pluto/noir-web-prover-circuits) repository under the Pluto organization. 19 | 20 | ## Overview 21 | 22 | `web-prover-circuits` is a project focused on implementing parsers and extractors/selective-disclosure for various data formats inside zero-knowledge circuits. 23 | Specifically, these are designed to be used in an NIVC folding scheme. 24 | Currently, our program layout looks like this: 25 | ![v0.9.0](docs/images/v0.9.0.png) 26 | 27 | ## Repository Structure 28 | 29 | - `circuits/`: Current implementation of circuits 30 | - `chacha`: ChaCha encryption circuit 31 | - `http`: HTTP parser and extractor 32 | - `json`: JSON parser and extractor 33 | - `json` has its own documentation [here](docs/json.md) 34 | - `utils`: Utility circuits 35 | - `test`: Circuit tests 36 | - `src/`: Rust public-params creation binary 37 | - `examples/`: Reference examples for JSON and HTTP parsers 38 | 39 | Documentation, in general, can be found in the `docs` directory. 40 | 41 | ## Getting Started 42 | 43 | ### Prerequisites 44 | 45 | To use this repo, you will need to install the following dependencies. 46 | These instructions should work on Linux/GNU and MacOS, but aren't guaranteed to work on Windows. 47 | 48 | #### Install Rust 49 | To install Rust, you need to run: 50 | ```sh 51 | curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh 52 | exec $SHELL 53 | ``` 54 | Check this is installed by running: 55 | ```sh 56 | rustc --version && cargo --version 57 | ``` 58 | to see the path to your Rust compiler and Cargo package manager. 59 | 60 | #### Install Circom 61 | Succinctly, `cd` to a directory of your choosing and run: 62 | ```sh 63 | git clone https://github.com/iden3/circom.git 64 | cd circom 65 | cargo build --release 66 | cargo install --path circom 67 | ``` 68 | in order to install `circom` globally. 69 | 70 | #### Install Node 71 | First, install `nvm` by running: 72 | ```sh 73 | curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.39.3/install.sh | bash 74 | exec $SHELL 75 | ``` 76 | Now with `nvm` installed, run: 77 | ```sh 78 | nvm install --lts 79 | nvm use --lts 80 | node --version && npm --version 81 | ``` 82 | 83 | #### Node packages 84 | From the root of the repository, you can now run: 85 | ```sh 86 | npm install 87 | ``` 88 | which will install all the necessary packages for working with Circom. 89 | This includes executables `circomkit`, `snarkjs`, and `mocha` which are accessible with Node: `npx`. 90 | 91 | ##### Circomkit 92 | This repository uses `circomkit` to manage Circom circuits. 93 | To see what you can do with `circomkit`, we suggest running: 94 | ``` 95 | npx circomkit help 96 | ``` 97 | `circomkit` can essentially do everything you would want to do with these Circuits, though we can't guarantee all commands work properly. 98 | 99 | **Example:** 100 | For example, to compile the `plaintext_authentication`, you can run the following from the repository root: 101 | ``` 102 | npx circomkit compile plaintext_authentication_1024b 103 | ``` 104 | which implicitly checks the `circuits.json` for an object that points to the circuit's code itself. 105 | 106 | If you are having trouble with `circomkit`, consider 107 | 108 | ##### Mocha 109 | `mocha` will also be installed from before. 110 | Running 111 | ```sh 112 | npx mocha 113 | ``` 114 | will run every circuit test. 115 | To filter tests, you can use the `-g` flag (very helpful!). 116 | 117 | ## License 118 | 119 | Licensed under the Apache License, Version 2.0 ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0) 120 | 121 | ## Contributing 122 | 123 | We welcome contributions to our open-source projects. If you want to contribute or follow along with contributor discussions, join our [main Telegram channel](https://t.me/pluto_xyz/1) to chat about Pluto's development. 124 | 125 | Our contributor guidelines can be found in [CONTRIBUTING.md](./CONTRIBUTING.md). A good starting point is issues labelled 'bounty' in our repositories. 126 | 127 | Unless you explicitly state otherwise, any contribution intentionally submitted for inclusion in the work by you, as defined in the Apache-2.0 license, shall be licensed as above, without any additional terms or conditions. 128 | -------------------------------------------------------------------------------- /builds/target_256b/http_verification_256b.circom: -------------------------------------------------------------------------------- 1 | pragma circom 2.1.9; 2 | 3 | include "../../circuits/http/verification.circom"; 4 | 5 | component main { public [step_in] } = HTTPVerification(256, 25, 11); 6 | -------------------------------------------------------------------------------- /builds/target_256b/json_extraction_256b.circom: -------------------------------------------------------------------------------- 1 | pragma circom 2.1.9; 2 | 3 | include "../../circuits/json/extraction.circom"; 4 | 5 | component main { public [step_in] } = JSONExtraction(256, 12, 11); -------------------------------------------------------------------------------- /builds/target_256b/plaintext_authentication_256b.circom: -------------------------------------------------------------------------------- 1 | pragma circom 2.1.9; 2 | 3 | include "../../circuits/chacha20/authentication.circom"; 4 | 5 | component main { public [step_in] } = PlaintextAuthentication(256, 11); -------------------------------------------------------------------------------- /builds/target_512b/http_verification_512b.circom: -------------------------------------------------------------------------------- 1 | pragma circom 2.1.9; 2 | 3 | include "../../circuits/http/verification.circom"; 4 | 5 | component main { public [step_in] } = HTTPVerification(512, 25, 11); 6 | -------------------------------------------------------------------------------- /builds/target_512b/json_extraction_512b.circom: -------------------------------------------------------------------------------- 1 | pragma circom 2.1.9; 2 | 3 | include "../../circuits/json/extraction.circom"; 4 | 5 | component main { public [step_in] } = JSONExtraction(512, 12, 11); -------------------------------------------------------------------------------- /builds/target_512b/plaintext_authentication_512b.circom: -------------------------------------------------------------------------------- 1 | pragma circom 2.1.9; 2 | 3 | include "../../circuits/chacha20/authentication.circom"; 4 | 5 | component main { public [step_in] } = PlaintextAuthentication(512, 11); -------------------------------------------------------------------------------- /circomkit.json: -------------------------------------------------------------------------------- 1 | { 2 | "version": "2.1.9", 3 | "proofSystem": "groth16", 4 | "curve": "bn128", 5 | "includes": [ 6 | "node_modules" 7 | ], 8 | "optimization": 2 9 | } -------------------------------------------------------------------------------- /circuits.json: -------------------------------------------------------------------------------- 1 | { 2 | "plaintext_authentication_1024b": { 3 | "file": "chacha20/nivc/chacha20_nivc", 4 | "template": "ChaCha20_NIVC", 5 | "params": [ 6 | 1024 7 | ] 8 | }, 9 | "http_verification_1024b": { 10 | "file": "http/verification", 11 | "template": "HTTPVerification", 12 | "params": [ 13 | 1024, 14 | 25 15 | ] 16 | }, 17 | "json_extraction_1024b": { 18 | "file": "json/extraction", 19 | "template": "JSONExtraction", 20 | "params": [ 21 | 1024, 22 | 10 23 | ] 24 | } 25 | } -------------------------------------------------------------------------------- /circuits/chacha20/authentication.circom: -------------------------------------------------------------------------------- 1 | // initially from https://github.com/reclaimprotocol/zk-symmetric-crypto 2 | // modified for our needs 3 | pragma circom 2.1.9; 4 | 5 | include "chacha-round.circom"; 6 | include "chacha-qr.circom"; 7 | include "../utils/bits.circom"; 8 | include "../utils/hash.circom"; 9 | include "../utils/array.circom"; 10 | include "circomlib/circuits/poseidon.circom"; 11 | 12 | 13 | /** ChaCha20 in counter mode */ 14 | // Chacha20 opperates a 4x4 matrix of 32-bit words where the first 4 words are constants: C 15 | // and the next 8 words are the 256 bit key: K. The next 2 words are the block counter: # 16 | // and the last 2 words are the nonce: N. 17 | // +---+---+---+---+ 18 | // | C | C | C | C | 19 | // +---+---+---+---+ 20 | // | K | K | K | K | 21 | // +---+---+---+---+ 22 | // | K | K | K | K | 23 | // +---+---+---+---+ 24 | // | # | N | N | N | 25 | // +---+---+---+---+ 26 | // paramaterized by `DATA_BYTES` which is the plaintext length in bytes 27 | template PlaintextAuthentication(DATA_BYTES, PUBLIC_IO_LENGTH) { 28 | // key => 8 32-bit words = 32 bytes 29 | signal input key[8][32]; 30 | // nonce => 3 32-bit words = 12 bytes 31 | signal input nonce[3][32]; 32 | // counter => 32-bit word to apply w nonce 33 | signal input counter[32]; 34 | 35 | // the below can be both ciphertext or plaintext depending on the direction 36 | // in => N 32-bit words => N 4 byte words 37 | signal input plaintext[DATA_BYTES]; 38 | 39 | signal input ciphertext_digest; 40 | 41 | // step_in should be the ciphertext digest + the HTTP digests + JSON seq digest 42 | signal input step_in[PUBLIC_IO_LENGTH]; 43 | 44 | // step_out should be the plaintext digest 45 | signal output step_out[PUBLIC_IO_LENGTH]; 46 | 47 | signal isPadding[DATA_BYTES]; // == 1 in the case we hit padding number 48 | signal plaintextBits[DATA_BYTES / 4][32]; 49 | component toBits[DATA_BYTES / 4]; 50 | for (var i = 0 ; i < DATA_BYTES / 4 ; i++) { 51 | toBits[i] = fromWords32ToLittleEndian(); 52 | for (var j = 0 ; j < 4 ; j++) { 53 | isPadding[i * 4 + j] <== IsEqual()([plaintext[i * 4 + j], -1]); 54 | toBits[i].words[j] <== (1 - isPadding[i * 4 + j]) * plaintext[i*4 + j]; 55 | } 56 | plaintextBits[i] <== toBits[i].data; 57 | } 58 | 59 | var tmp[16][32] = [ 60 | [ 61 | // constant 0x61707865 62 | 0, 1, 1, 0, 0, 0, 0, 1, 0, 63 | 1, 1, 1, 0, 0, 0, 0, 0, 1, 64 | 1, 1, 1, 0, 0, 0, 0, 1, 1, 65 | 0, 0, 1, 0, 1 66 | ], 67 | [ 68 | // constant 0x3320646e 69 | 0, 0, 1, 1, 0, 0, 1, 1, 0, 70 | 0, 1, 0, 0, 0, 0, 0, 0, 1, 71 | 1, 0, 0, 1, 0, 0, 0, 1, 1, 72 | 0, 1, 1, 1, 0 73 | ], 74 | [ 75 | // constant 0x79622d32 76 | 0, 1, 1, 1, 1, 0, 0, 1, 0, 77 | 1, 1, 0, 0, 0, 1, 0, 0, 0, 78 | 1, 0, 1, 1, 0, 1, 0, 0, 1, 79 | 1, 0, 0, 1, 0 80 | ], 81 | [ 82 | // constant 0x6b206574 83 | 0, 1, 1, 0, 1, 0, 1, 1, 0, 84 | 0, 1, 0, 0, 0, 0, 0, 0, 1, 85 | 1, 0, 0, 1, 0, 1, 0, 1, 1, 86 | 1, 0, 1, 0, 0 87 | ], 88 | key[0], key[1], key[2], key[3], 89 | key[4], key[5], key[6], key[7], 90 | counter, nonce[0], nonce[1], nonce[2] 91 | ]; 92 | 93 | // 1 in 32-bit words 94 | signal one[32]; 95 | one <== [ 96 | 0, 0, 0, 0, 0, 0, 0, 0, 97 | 0, 0, 0, 0, 0, 0, 0, 0, 98 | 0, 0, 0, 0, 0, 0, 0, 0, 99 | 0, 0, 0, 0, 0, 0, 0, 1 100 | ]; 101 | 102 | var i = 0; 103 | var j = 0; 104 | 105 | // do the ChaCha20 rounds 106 | // rounds opperates on 4 words at a time 107 | component rounds[DATA_BYTES / 64]; 108 | component xors[DATA_BYTES]; 109 | component counter_adder[DATA_BYTES / 64 - 1]; 110 | 111 | signal cipherText[DATA_BYTES / 4][32]; 112 | 113 | for(i = 0; i < DATA_BYTES / 64; i++) { 114 | rounds[i] = Round(); 115 | rounds[i].in <== tmp; 116 | // XOR block with input 117 | for(j = 0; j < 16; j++) { 118 | xors[i*16 + j] = XorBits(32); 119 | xors[i*16 + j].a <== plaintextBits[i*16 + j]; 120 | xors[i*16 + j].b <== rounds[i].out[j]; 121 | cipherText[i*16 + j] <== xors[i*16 + j].out; 122 | } 123 | 124 | if(i < DATA_BYTES / 64 - 1) { 125 | counter_adder[i] = AddBits(32); 126 | counter_adder[i].a <== tmp[12]; 127 | counter_adder[i].b <== one; 128 | 129 | // increment the counter 130 | tmp[12] = counter_adder[i].out; 131 | } 132 | } 133 | 134 | component toCiphertextBytes[DATA_BYTES / 4]; 135 | signal bigEndianCiphertext[DATA_BYTES]; 136 | 137 | for (var i = 0 ; i < DATA_BYTES / 4 ; i++) { 138 | toCiphertextBytes[i] = fromLittleEndianToWords32(); 139 | for (var j = 0 ; j < 32 ; j++) { 140 | toCiphertextBytes[i].data[j] <== cipherText[i][j]; 141 | } 142 | for (var j = 0 ; j < 4 ; j++) { 143 | bigEndianCiphertext[i*4 + j] <== isPadding[i * 4 + j] * (-1 - toCiphertextBytes[i].words[j]) + toCiphertextBytes[i].words[j]; // equal to: (isPadding[i * 4 + j] * (-1)) + (1 - isPadding[i * 4 + j]) * toCiphertextBytes[i].words[j]; 144 | } 145 | } 146 | 147 | // for (var i = 0 ; i < DATA_BYTES ; i++) { 148 | // log("bigEndianCiphertext[",i,"]", bigEndianCiphertext[i]); 149 | // } 150 | 151 | // Count the number of non-padding bytes 152 | signal ciphertext_digest_pow[DATA_BYTES+1]; 153 | ciphertext_digest_pow[0] <== step_in[1]; 154 | signal mult_factor[DATA_BYTES]; 155 | // Sets any padding bytes to zero (which are presumably at the end) so they don't accum into the poly hash 156 | signal zeroed_plaintext[DATA_BYTES]; 157 | for(var i = 0 ; i < DATA_BYTES ; i++) { 158 | zeroed_plaintext[i] <== (1 - isPadding[i]) * plaintext[i]; 159 | mult_factor[i] <== (1 - isPadding[i]) * ciphertext_digest + isPadding[i]; 160 | ciphertext_digest_pow[i+1] <== ciphertext_digest_pow[i] * mult_factor[i]; 161 | } 162 | signal part_ciphertext_digest <== DataHasherWithSeed(DATA_BYTES)(step_in[10],bigEndianCiphertext); 163 | 164 | // log("part_ciphertext_digest: ", part_ciphertext_digest); 165 | 166 | signal plaintext_digest <== PolynomialDigestWithCounter(DATA_BYTES)(zeroed_plaintext, ciphertext_digest, step_in[1]); 167 | 168 | // log("plaintext_digest: ", plaintext_digest); 169 | 170 | step_out[0] <== step_in[0] + step_in[10] - part_ciphertext_digest + plaintext_digest; 171 | step_out[1] <== ciphertext_digest_pow[DATA_BYTES]; 172 | // TODO: I was lazy and put this at the end instead of in a better spot 173 | step_out[10] <== part_ciphertext_digest; 174 | 175 | // reset HTTP Verification inputs 176 | step_out[2] <== step_in[2]; // Ciphertext digest POW accumulator 177 | step_out[3] <== PolynomialDigest(8)([1, 0, 0, 0, 0, 0, 0, 1], ciphertext_digest); // default Machine state digest 178 | for (var i = 4 ; i < PUBLIC_IO_LENGTH - 1 ; i++) { 179 | if (i == 6) { 180 | step_out[i] <== 0; // Body ciphertext digest pow counter 181 | } else { 182 | step_out[i] <== step_in[i]; 183 | } 184 | } 185 | 186 | // for (var i = 0; i < PUBLIC_IO_LENGTH ; i++) { 187 | // log("step_out[",i,"]", step_out[i]); 188 | // } 189 | // log("xxxxxx Authentication Done xxxxxx"); 190 | } -------------------------------------------------------------------------------- /circuits/chacha20/chacha-qr.circom: -------------------------------------------------------------------------------- 1 | // initially from https://github.com/reclaimprotocol/zk-symmetric-crypto 2 | // modified for our needs 3 | pragma circom 2.1.9; 4 | 5 | include "../utils/bits.circom"; 6 | 7 | /** 8 | * Perform ChaCha Quarter Round 9 | * Assume 4 words of 32 bits each 10 | * Each word must be little endian 11 | */ 12 | template QR() { 13 | signal input in[4][32]; 14 | signal output out[4][32]; 15 | 16 | var tmp[4][32] = in; 17 | 18 | // a += b 19 | component add1 = AddBits(32); 20 | add1.a <== tmp[0]; 21 | add1.b <== tmp[1]; 22 | 23 | tmp[0] = add1.out; 24 | 25 | // d ^= a 26 | component xor1 = XorBits(32); 27 | xor1.a <== tmp[3]; 28 | xor1.b <== tmp[0]; 29 | tmp[3] = xor1.out; 30 | 31 | // d = RotateLeft32BitsUnsafe(d, 16) 32 | component rot1 = RotateLeftBits(32, 16); 33 | rot1.in <== tmp[3]; 34 | tmp[3] = rot1.out; 35 | 36 | // c += d 37 | component add2 = AddBits(32); 38 | add2.a <== tmp[2]; 39 | add2.b <== tmp[3]; 40 | tmp[2] = add2.out; 41 | 42 | // b ^= c 43 | component xor2 = XorBits(32); 44 | xor2.a <== tmp[1]; 45 | xor2.b <== tmp[2]; 46 | tmp[1] = xor2.out; 47 | 48 | // b = RotateLeft32BitsUnsafe(b, 12) 49 | component rot2 = RotateLeftBits(32, 12); 50 | rot2.in <== tmp[1]; 51 | tmp[1] = rot2.out; 52 | 53 | // a += b 54 | component add3 = AddBits(32); 55 | add3.a <== tmp[0]; 56 | add3.b <== tmp[1]; 57 | tmp[0] = add3.out; 58 | 59 | // d ^= a 60 | component xor3 = XorBits(32); 61 | xor3.a <== tmp[3]; 62 | xor3.b <== tmp[0]; 63 | tmp[3] = xor3.out; 64 | 65 | // d = RotateLeft32BitsUnsafe(d, 8) 66 | component rot3 = RotateLeftBits(32, 8); 67 | rot3.in <== tmp[3]; 68 | tmp[3] = rot3.out; 69 | 70 | // c += d 71 | component add4 = AddBits(32); 72 | add4.a <== tmp[2]; 73 | add4.b <== tmp[3]; 74 | tmp[2] = add4.out; 75 | 76 | // b ^= c 77 | component xor4 = XorBits(32); 78 | xor4.a <== tmp[1]; 79 | xor4.b <== tmp[2]; 80 | tmp[1] = xor4.out; 81 | 82 | // b = RotateLeft32BitsUnsafe(b, 7) 83 | component rot4 = RotateLeftBits(32, 7); 84 | rot4.in <== tmp[1]; 85 | tmp[1] = rot4.out; 86 | 87 | out <== tmp; 88 | } -------------------------------------------------------------------------------- /circuits/chacha20/chacha-round.circom: -------------------------------------------------------------------------------- 1 | // initially from https://github.com/reclaimprotocol/zk-symmetric-crypto 2 | // modified for our needs 3 | pragma circom 2.1.9; 4 | 5 | include "./chacha-qr.circom"; 6 | include "../utils/bits.circom"; 7 | 8 | template Round() { 9 | // in => 16 32-bit words 10 | signal input in[16][32]; 11 | // out => 16 32-bit words 12 | signal output out[16][32]; 13 | 14 | var tmp[16][32] = in; 15 | 16 | component rounds[10 * 8]; 17 | component finalAdd[16]; 18 | // i-th round 19 | var i = 0; 20 | // col loop counter 21 | var j = 0; 22 | // counter for the rounds array 23 | var k = 0; 24 | for(i = 0; i < 10; i++) { 25 | // columns of the matrix in a loop 26 | // 0, 4, 8, 12 27 | // 1, 5, 9, 13 28 | // 2, 6, 10, 14 29 | // 3, 7, 11, 15 30 | for(j = 0; j < 4; j++) { 31 | rounds[k] = QR(); 32 | rounds[k].in[0] <== tmp[j]; 33 | rounds[k].in[1] <== tmp[j + 4]; 34 | rounds[k].in[2] <== tmp[j + 8]; 35 | rounds[k].in[3] <== tmp[j + 12]; 36 | 37 | tmp[j] = rounds[k].out[0]; 38 | tmp[j + 4] = rounds[k].out[1]; 39 | tmp[j + 8] = rounds[k].out[2]; 40 | tmp[j + 12] = rounds[k].out[3]; 41 | 42 | k ++; 43 | } 44 | 45 | // 4 diagnals 46 | // 0, 5, 10, 15 47 | rounds[k] = QR(); 48 | rounds[k].in[0] <== tmp[0]; 49 | rounds[k].in[1] <== tmp[5]; 50 | rounds[k].in[2] <== tmp[10]; 51 | rounds[k].in[3] <== tmp[15]; 52 | 53 | tmp[0] = rounds[k].out[0]; 54 | tmp[5] = rounds[k].out[1]; 55 | tmp[10] = rounds[k].out[2]; 56 | tmp[15] = rounds[k].out[3]; 57 | 58 | k ++; 59 | 60 | // 1, 6, 11, 12 61 | rounds[k] = QR(); 62 | rounds[k].in[0] <== tmp[1]; 63 | rounds[k].in[1] <== tmp[6]; 64 | rounds[k].in[2] <== tmp[11]; 65 | rounds[k].in[3] <== tmp[12]; 66 | 67 | tmp[1] = rounds[k].out[0]; 68 | tmp[6] = rounds[k].out[1]; 69 | tmp[11] = rounds[k].out[2]; 70 | tmp[12] = rounds[k].out[3]; 71 | 72 | k ++; 73 | 74 | // 2, 7, 8, 13 75 | rounds[k] = QR(); 76 | rounds[k].in[0] <== tmp[2]; 77 | rounds[k].in[1] <== tmp[7]; 78 | rounds[k].in[2] <== tmp[8]; 79 | rounds[k].in[3] <== tmp[13]; 80 | 81 | tmp[2] = rounds[k].out[0]; 82 | tmp[7] = rounds[k].out[1]; 83 | tmp[8] = rounds[k].out[2]; 84 | tmp[13] = rounds[k].out[3]; 85 | 86 | k ++; 87 | 88 | // 3, 4, 9, 14 89 | rounds[k] = QR(); 90 | rounds[k].in[0] <== tmp[3]; 91 | rounds[k].in[1] <== tmp[4]; 92 | rounds[k].in[2] <== tmp[9]; 93 | rounds[k].in[3] <== tmp[14]; 94 | 95 | tmp[3] = rounds[k].out[0]; 96 | tmp[4] = rounds[k].out[1]; 97 | tmp[9] = rounds[k].out[2]; 98 | tmp[14] = rounds[k].out[3]; 99 | 100 | k ++; 101 | } 102 | 103 | // add the result to the input 104 | for(i = 0; i < 16; i++) { 105 | finalAdd[i] = AddBits(32); 106 | finalAdd[i].a <== tmp[i]; 107 | finalAdd[i].b <== in[i]; 108 | tmp[i] = finalAdd[i].out; 109 | } 110 | 111 | out <== tmp; 112 | } 113 | -------------------------------------------------------------------------------- /circuits/chacha20/chacha20.circom: -------------------------------------------------------------------------------- 1 | // initially from https://github.com/reclaimprotocol/zk-symmetric-crypto 2 | // modified for our needs 3 | pragma circom 2.1.9; 4 | 5 | include "./chacha-round.circom"; 6 | include "./chacha-qr.circom"; 7 | include "../utils/bits.circom"; 8 | 9 | /** ChaCha20 in counter mode */ 10 | // Chacha20 opperates a 4x4 matrix of 32-bit words where the first 4 words are constants: C 11 | // and the next 8 words are the 256 bit key: K. The next 2 words are the block counter: # 12 | // and the last 2 words are the nonce: N. 13 | // +---+---+---+---+ 14 | // | C | C | C | C | 15 | // +---+---+---+---+ 16 | // | K | K | K | K | 17 | // +---+---+---+---+ 18 | // | K | K | K | K | 19 | // +---+---+---+---+ 20 | // | # | N | N | N | 21 | // +---+---+---+---+ 22 | // paramaterized by n which is the number of 32-bit words to encrypt 23 | template ChaCha20(N) { 24 | // key => 8 32-bit words = 32 bytes 25 | signal input key[8][32]; 26 | // nonce => 3 32-bit words = 12 bytes 27 | signal input nonce[3][32]; 28 | // counter => 32-bit word to apply w nonce 29 | signal input counter[32]; 30 | 31 | // the below can be both ciphertext or plaintext depending on the direction 32 | // in => N 32-bit words => N 4 byte words 33 | signal input in[N][32]; 34 | // out => N 32-bit words => N 4 byte words 35 | signal output out[N][32]; 36 | 37 | var tmp[16][32] = [ 38 | [ 39 | // constant 0x61707865 40 | 0, 1, 1, 0, 0, 0, 0, 1, 0, 41 | 1, 1, 1, 0, 0, 0, 0, 0, 1, 42 | 1, 1, 1, 0, 0, 0, 0, 1, 1, 43 | 0, 0, 1, 0, 1 44 | ], 45 | [ 46 | // constant 0x3320646e 47 | 0, 0, 1, 1, 0, 0, 1, 1, 0, 48 | 0, 1, 0, 0, 0, 0, 0, 0, 1, 49 | 1, 0, 0, 1, 0, 0, 0, 1, 1, 50 | 0, 1, 1, 1, 0 51 | ], 52 | [ 53 | // constant 0x79622d32 54 | 0, 1, 1, 1, 1, 0, 0, 1, 0, 55 | 1, 1, 0, 0, 0, 1, 0, 0, 0, 56 | 1, 0, 1, 1, 0, 1, 0, 0, 1, 57 | 1, 0, 0, 1, 0 58 | ], 59 | [ 60 | // constant 0x6b206574 61 | 0, 1, 1, 0, 1, 0, 1, 1, 0, 62 | 0, 1, 0, 0, 0, 0, 0, 0, 1, 63 | 1, 0, 0, 1, 0, 1, 0, 1, 1, 64 | 1, 0, 1, 0, 0 65 | ], 66 | key[0], key[1], key[2], key[3], 67 | key[4], key[5], key[6], key[7], 68 | counter, nonce[0], nonce[1], nonce[2] 69 | ]; 70 | 71 | // 1 in 32-bit words 72 | signal one[32]; 73 | one <== [ 74 | 0, 0, 0, 0, 0, 0, 0, 0, 75 | 0, 0, 0, 0, 0, 0, 0, 0, 76 | 0, 0, 0, 0, 0, 0, 0, 0, 77 | 0, 0, 0, 0, 0, 0, 0, 1 78 | ]; 79 | 80 | var i = 0; 81 | var j = 0; 82 | 83 | // do the ChaCha20 rounds 84 | component rounds[N/16]; 85 | component xors[N]; 86 | component counter_adder[N/16 - 1]; 87 | 88 | for(i = 0; i < N/16; i++) { 89 | rounds[i] = Round(); 90 | rounds[i].in <== tmp; 91 | // XOR block with input 92 | for(j = 0; j < 16; j++) { 93 | xors[i*16 + j] = XorBits(32); 94 | xors[i*16 + j].a <== in[i*16 + j]; 95 | xors[i*16 + j].b <== rounds[i].out[j]; 96 | out[i*16 + j] <== xors[i*16 + j].out; 97 | } 98 | 99 | if(i < N/16 - 1) { 100 | counter_adder[i] = AddBits(32); 101 | counter_adder[i].a <== tmp[12]; 102 | counter_adder[i].b <== one; 103 | 104 | // increment the counter 105 | tmp[12] = counter_adder[i].out; 106 | } 107 | } 108 | } -------------------------------------------------------------------------------- /circuits/http/machine.circom: -------------------------------------------------------------------------------- 1 | pragma circom 2.1.9; 2 | 3 | include "../utils/array.circom"; 4 | 5 | template HttpStateUpdate() { 6 | signal input parsing_start; // flag that counts up to 3 for each value in the start line 7 | signal input parsing_header; // Flag + Counter for what header line we are in 8 | signal input parsing_field_name; // flag that tells if parsing header field name 9 | signal input parsing_field_value; // flag that tells if parsing header field value 10 | signal input parsing_body; // Flag when we are inside body 11 | signal input line_status; // Flag that counts up to 4 to read a double CRLF 12 | signal input byte; 13 | 14 | signal output next_parsing_start; 15 | signal output next_parsing_header; 16 | signal output next_parsing_field_name; 17 | signal output next_parsing_field_value; 18 | signal output next_parsing_body; 19 | signal output next_line_status; 20 | 21 | //---------------------------------------------------------------------------------// 22 | // check if we read space: 32 or colon: 58 23 | component readSP = IsEqual(); 24 | readSP.in <== [byte, 32]; 25 | component readColon = IsEqual(); 26 | readColon.in <== [byte, 58]; 27 | 28 | // Check if what we just read is a CR / LF 29 | component readCR = IsEqual(); 30 | readCR.in <== [byte, 13]; 31 | component readLF = IsEqual(); 32 | readLF.in <== [byte, 10]; 33 | 34 | signal notCRAndLF <== (1 - readCR.out) * (1 - readLF.out); 35 | //---------------------------------------------------------------------------------// 36 | 37 | //---------------------------------------------------------------------------------// 38 | // Check if we had read previously CR / LF or multiple 39 | component prevReadCR = IsEqual(); 40 | prevReadCR.in <== [line_status, 1]; 41 | component prevReadCRLF = IsEqual(); 42 | prevReadCRLF.in <== [line_status, 2]; 43 | component prevReadCRLFCR = IsEqual(); 44 | prevReadCRLFCR.in <== [line_status, 3]; 45 | 46 | signal readCRLF <== prevReadCR.out * readLF.out; 47 | signal readCRLFCR <== prevReadCRLF.out * readCR.out; 48 | signal readCRLFCRLF <== prevReadCRLFCR.out * readLF.out; 49 | //---------------------------------------------------------------------------------// 50 | 51 | //---------------------------------------------------------------------------------// 52 | // Take current state and CRLF info to update state 53 | signal state[4] <== [parsing_start, parsing_header, parsing_field_value, parsing_body]; 54 | component stateChange = StateChange(); 55 | stateChange.readCR <== readCR.out; 56 | stateChange.prevReadCRLF <== prevReadCRLF.out; 57 | stateChange.readCRLF <== readCRLF; 58 | stateChange.readCRLFCR <== readCRLFCR; 59 | stateChange.readCRLFCRLF <== readCRLFCRLF; 60 | stateChange.readSP <== readSP.out; 61 | stateChange.readColon <== readColon.out; 62 | stateChange.state <== state; 63 | 64 | component nextState = ArrayAdd(5); 65 | nextState.lhs <== [state[0], state[1], parsing_field_name, parsing_field_value, parsing_body]; 66 | nextState.rhs <== stateChange.out; 67 | //---------------------------------------------------------------------------------// 68 | 69 | next_parsing_start <== nextState.out[0]; 70 | next_parsing_header <== nextState.out[1]; 71 | next_parsing_field_name <== nextState.out[2]; 72 | next_parsing_field_value <== nextState.out[3]; 73 | next_parsing_body <== nextState.out[4]; 74 | signal cancelTerm <== line_status * (notCRAndLF + readCRLFCRLF); 75 | next_line_status <== (line_status + readCR.out + readCRLF - cancelTerm) * (1 - next_parsing_body); 76 | } 77 | 78 | // TODO: 79 | // - multiple space between start line values 80 | // - header value parsing doesn't handle SPACE between colon and actual value 81 | template StateChange() { 82 | signal input prevReadCRLF; 83 | signal input readCR; 84 | signal input readCRLF; 85 | signal input readCRLFCR; 86 | signal input readCRLFCRLF; 87 | signal input readSP; 88 | signal input readColon; 89 | signal input state[4]; 90 | signal output out[5]; 91 | 92 | // GreaterEqThan(2) because start line can have at most 3 values for request or response 93 | signal isParsingStart <== GreaterEqThan(2)([state[0], 1]); 94 | // increment parsing start counter on reading SP 95 | signal incrementParsingStart <== readSP * isParsingStart; 96 | // disable parsing start on reading CRLF 97 | signal disableParsingStart <== readCR * state[0]; 98 | 99 | // enable parsing header on reading CRLF 100 | // signal enableParsingHeader <== readCRLF * isParsingStart; 101 | // check if we are parsing header 102 | // Allows for max headers to be 2^5 = 32 103 | signal isParsingHeader <== GreaterEqThan(5)([state[1], 1]); 104 | // increment parsing header counter on CRLF and parsing header 105 | signal incrementParsingHeader <== prevReadCRLF * (1 - readCRLFCR); 106 | // disable parsing header on reading CRLF-CRLF 107 | signal disableParsingHeader <== readCRLFCRLF * state[1]; 108 | // parsing field value when parsing header and read Colon `:` 109 | signal readColonNotInFieldValue <== readColon * (1 - state[2]); 110 | signal isParsingFieldValue <== isParsingHeader * readColonNotInFieldValue; 111 | 112 | // parsing body when reading CRLF-CRLF and parsing header 113 | signal enableParsingBody <== readCRLFCRLF * isParsingHeader; 114 | 115 | // disable the parsing field value if we should increment parsing header and were previously parsing field value too 116 | signal disableParsingFieldValue <== readCR * state[2]; 117 | 118 | // parsing_start = out[0] = increment start - disable start 119 | // parsing_header = out[1] = (increment header - disable header) * parsing body 120 | // parsing_field_name = out[2] = (increment header - parsing field value) * parsing body 121 | // parsing_field_value = out[3] = (parsing field value - disable parsing field value) * parsing body 122 | // parsing_body = out[4] = enable body 123 | out <== [ 124 | (incrementParsingStart - disableParsingStart), 125 | (incrementParsingHeader - disableParsingHeader) * (1 - state[3]), 126 | (incrementParsingHeader - isParsingFieldValue) * (1 - state[3]), 127 | (isParsingFieldValue - disableParsingFieldValue) * (1 - state[3]), 128 | enableParsingBody 129 | ]; 130 | } -------------------------------------------------------------------------------- /circuits/http/parser.circom: -------------------------------------------------------------------------------- 1 | include "machine.circom"; 2 | 3 | template Parser(DATA_BYTES) { 4 | signal input data[DATA_BYTES]; 5 | 6 | component State[DATA_BYTES]; 7 | State[0] = HttpStateUpdate(); 8 | State[0].byte <== data[0]; 9 | State[0].parsing_start <== 1; 10 | State[0].parsing_header <== 0; 11 | State[0].parsing_field_name <== 0; 12 | State[0].parsing_field_value <== 0; 13 | State[0].parsing_body <== 0; 14 | State[0].line_status <== 0; 15 | 16 | log("-------------------------------------------------"); 17 | log("byte: ", data[0]); 18 | log("-------------------------------------------------"); 19 | log("State[", 0, "].next_parsing_start =", State[0].next_parsing_start); 20 | log("State[", 0, "].next_parsing_header =", State[0].next_parsing_header); 21 | log("State[", 0, "].next_parsing_field_name =", State[0].next_parsing_field_name); 22 | log("State[", 0, "].next_parsing_field_value =", State[0].next_parsing_field_value); 23 | log("State[", 0, "].next_parsing_body =", State[0].next_parsing_body); 24 | log("State[", 0, "].next_line_status =", State[0].next_line_status); 25 | log("xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"); 26 | 27 | for(var data_idx = 1; data_idx < DATA_BYTES; data_idx++) { 28 | State[data_idx] = HttpStateUpdate(); 29 | State[data_idx].byte <== data[data_idx]; 30 | State[data_idx].parsing_start <== State[data_idx - 1].next_parsing_start; 31 | State[data_idx].parsing_header <== State[data_idx - 1].next_parsing_header; 32 | State[data_idx].parsing_field_name <== State[data_idx - 1].next_parsing_field_name; 33 | State[data_idx].parsing_field_value <== State[data_idx - 1].next_parsing_field_value; 34 | State[data_idx].parsing_body <== State[data_idx - 1].next_parsing_body; 35 | State[data_idx].line_status <== State[data_idx - 1].next_line_status; 36 | log("-------------------------------------------------"); 37 | log("byte: ", data[data_idx]); 38 | log("-------------------------------------------------"); 39 | log("State[", data_idx, "].next_parsing_start =", State[data_idx].next_parsing_start); 40 | log("State[", data_idx, "].next_parsing_header =", State[data_idx].next_parsing_header); 41 | log("State[", data_idx, "].next_parsing_field_name =", State[data_idx].next_parsing_field_name); 42 | log("State[", data_idx, "].next_parsing_field_value =", State[data_idx].next_parsing_field_value); 43 | log("State[", data_idx, "].next_parsing_body =", State[data_idx].next_parsing_body); 44 | log("State[", data_idx, "].next_line_status =", State[data_idx].next_line_status); 45 | log("xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"); 46 | } 47 | 48 | // Verify machine ends in a valid state 49 | State[DATA_BYTES - 1].next_parsing_start === 0; 50 | State[DATA_BYTES - 1].next_parsing_header === 0; 51 | State[DATA_BYTES - 1].next_parsing_field_name === 0; 52 | State[DATA_BYTES - 1].next_parsing_field_value === 0; 53 | State[DATA_BYTES - 1].next_parsing_body === 1; 54 | State[DATA_BYTES - 1].next_line_status === 0; 55 | 56 | } -------------------------------------------------------------------------------- /circuits/http/verification.circom: -------------------------------------------------------------------------------- 1 | pragma circom 2.1.9; 2 | 3 | include "circomlib/circuits/comparators.circom"; 4 | include "machine.circom"; 5 | include "../utils/hash.circom"; 6 | 7 | template HTTPVerification(DATA_BYTES, MAX_NUMBER_OF_HEADERS, PUBLIC_IO_LENGTH) { 8 | signal input step_in[PUBLIC_IO_LENGTH]; 9 | signal output step_out[PUBLIC_IO_LENGTH]; 10 | 11 | // next_parsing_start, next_parsing_header, next_parsing_field_name, next_parsing_field_value, next_parsing_body, next_line_status, line_digest, main_monomial 12 | signal input machine_state[8]; 13 | 14 | signal input ciphertext_digest; 15 | 16 | // step_in[2] = ciphertext_digest ** plaintext_ctr 17 | signal ciphertext_digest_pow[DATA_BYTES+1]; 18 | ciphertext_digest_pow[0] <== step_in[2]; 19 | signal mult_factor[DATA_BYTES]; 20 | 21 | signal input data[DATA_BYTES]; 22 | signal isPadding[DATA_BYTES]; // == 1 in the case we hit padding number 23 | signal zeroed_data[DATA_BYTES]; 24 | for (var i = 0 ; i < DATA_BYTES ; i++) { 25 | isPadding[i] <== IsEqual()([data[i], -1]); 26 | zeroed_data[i] <== (1 - isPadding[i]) * data[i]; 27 | mult_factor[i] <== (1 - isPadding[i]) * ciphertext_digest + isPadding[i]; 28 | ciphertext_digest_pow[i+1] <== ciphertext_digest_pow[i] * mult_factor[i]; 29 | } 30 | signal pt_digest <== PolynomialDigestWithCounter(DATA_BYTES)(zeroed_data, ciphertext_digest, step_in[2]); 31 | // log("inner plaintext_digest: ", pt_digest); 32 | 33 | // Contains digests of start line and all intended headers (up to `MAX_NUMBER_OF_HEADERS`) 34 | signal input main_digests[MAX_NUMBER_OF_HEADERS + 1]; 35 | signal not_contained[MAX_NUMBER_OF_HEADERS + 1]; 36 | for(var i = 0 ; i < MAX_NUMBER_OF_HEADERS + 1 ; i++) { 37 | not_contained[i] <== IsZero()(main_digests[i]); 38 | } 39 | 40 | // assertions: 41 | // - check step_in[3] = machine state hash digest 42 | signal machine_state_digest <== PolynomialDigest(8)(machine_state, ciphertext_digest); 43 | step_in[3] === machine_state_digest; 44 | // - check step_in[4] = start line hash digest + all header hash digests 45 | // TODO: I don't like this `MAX_NUMBER_OF_HEADERS + 1` now. It should just be `NUMBER_OF_STATEMENTS_TO_LOCK` or something 46 | signal option_hash[MAX_NUMBER_OF_HEADERS + 1]; 47 | signal main_digests_hashed[MAX_NUMBER_OF_HEADERS + 1]; 48 | var accumulated_main_digests_hashed = 0; 49 | for(var i = 0 ; i < MAX_NUMBER_OF_HEADERS + 1 ; i++) { 50 | option_hash[i] <== Poseidon(1)([main_digests[i]]); 51 | main_digests_hashed[i] <== (1 - not_contained[i]) * option_hash[i]; 52 | accumulated_main_digests_hashed += main_digests_hashed[i]; 53 | } 54 | step_in[4] === accumulated_main_digests_hashed; 55 | 56 | // populate the state machine with the previous state 57 | component State[DATA_BYTES]; 58 | State[0] = HttpStateUpdate(); 59 | State[0].byte <== data[0]; 60 | State[0].parsing_start <== machine_state[0]; 61 | State[0].parsing_header <== machine_state[1]; 62 | State[0].parsing_field_name <== machine_state[2]; 63 | State[0].parsing_field_value <== machine_state[3]; 64 | State[0].parsing_body <== machine_state[4]; 65 | State[0].line_status <== machine_state[5]; 66 | for(var data_idx = 1; data_idx < DATA_BYTES; data_idx++) { 67 | State[data_idx] = HttpStateUpdate(); 68 | State[data_idx].byte <== data[data_idx]; 69 | State[data_idx].parsing_start <== State[data_idx - 1].next_parsing_start; 70 | State[data_idx].parsing_header <== State[data_idx - 1].next_parsing_header; 71 | State[data_idx].parsing_field_name <== State[data_idx - 1].next_parsing_field_name; 72 | State[data_idx].parsing_field_value <== State[data_idx - 1].next_parsing_field_value; 73 | State[data_idx].parsing_body <== State[data_idx - 1].next_parsing_body; 74 | State[data_idx].line_status <== State[data_idx - 1].next_line_status; 75 | } 76 | 77 | 78 | signal main_monomials[DATA_BYTES]; 79 | main_monomials[0] <== machine_state[7]; 80 | 81 | signal is_line_change[DATA_BYTES-1]; 82 | signal was_cleared[DATA_BYTES-1]; 83 | signal not_body_and_not_line_change[DATA_BYTES-1]; 84 | 85 | signal rescaled_or_was_cleared[DATA_BYTES-1]; 86 | for(var i = 0 ; i < DATA_BYTES - 1 ; i++) { 87 | is_line_change[i] <== Contains(2)(data[i + 1], [10, 13]); // capture if we hit an end line sequence 88 | was_cleared[i] <== IsZero()(main_monomials[i]); 89 | not_body_and_not_line_change[i] <== (1 - State[i + 1].parsing_body) * (1 - is_line_change[i]); 90 | rescaled_or_was_cleared[i] <== (main_monomials[i] * ciphertext_digest + was_cleared[i]); 91 | main_monomials[i + 1] <== not_body_and_not_line_change[i] * rescaled_or_was_cleared[i]; 92 | } 93 | 94 | signal is_match[DATA_BYTES]; 95 | signal contains[DATA_BYTES]; 96 | signal is_zero[DATA_BYTES]; 97 | signal monomial_is_zero[DATA_BYTES]; 98 | signal accum_prev[DATA_BYTES]; 99 | var num_matched = 0; 100 | signal line_digest[DATA_BYTES + 1]; 101 | // Set this to what the previous digest was 102 | line_digest[0] <== machine_state[6]; 103 | for(var i = 0 ; i < DATA_BYTES ; i++) { 104 | monomial_is_zero[i] <== IsZero()(main_monomials[i]); 105 | accum_prev[i] <== (1 - monomial_is_zero[i]) * line_digest[i]; 106 | line_digest[i+1] <== accum_prev[i] + data[i] * main_monomials[i]; 107 | is_zero[i] <== IsZero()(line_digest[i+1]); 108 | contains[i] <== Contains(MAX_NUMBER_OF_HEADERS + 1)(line_digest[i+1], main_digests); 109 | is_match[i] <== (1 - is_zero[i]) * contains[i]; 110 | num_matched += is_match[i]; 111 | } 112 | 113 | // BODY 114 | // step_in[6] is the (ciphertext digest ** body_ctr) 115 | signal body_ctr_is_zero <== IsEqual()([step_in[6], 0]); 116 | signal initial_pow_accumulation <== step_in[6] * ciphertext_digest; 117 | signal pow_accumulation <== initial_pow_accumulation + body_ctr_is_zero * State[0].parsing_body; // pow_accumulation = 0 if we are not in the body 118 | 119 | // log("pow_accumulation: ", pow_accumulation); 120 | signal body_monomials_pow_accumulation[DATA_BYTES]; // power of monomials for the body 121 | signal body_monomials[DATA_BYTES]; // power of monomials for the body 122 | signal body_ctr[DATA_BYTES]; // body counter 123 | signal body_switch[DATA_BYTES -1]; // switch to add the previous monomial or not 124 | signal body_digest[DATA_BYTES]; // body digest 125 | body_monomials[0] <== pow_accumulation; // (ciphertext_digest ** body_ctr) * State.parsing_body (0 if we are not in the body) 126 | body_ctr[0] <== body_ctr_is_zero * State[0].parsing_body + (1 - body_ctr_is_zero); // checks if we are in the body 127 | // log("body_ctr[0] = ", body_ctr[0]); 128 | // Set this to what the previous digest was 129 | body_digest[0] <== body_monomials[0] * zeroed_data[0]; 130 | for(var i = 0 ; i < DATA_BYTES - 1 ; i++) { 131 | // log("State[",i+1,"].parsing_body: ", State[i+1].parsing_body); 132 | body_ctr[i + 1] <== body_ctr[i] + State[i + 1].parsing_body * (1 - isPadding[i + 1]); 133 | body_switch[i] <== IsEqual()([body_ctr[i + 1], 1]); // check if we are in the body 134 | // log("body_switch[",i,"] = ", body_switch[i]); 135 | body_monomials_pow_accumulation[i] <== body_monomials[i] * ciphertext_digest + body_switch[i]; // add the previous monomial if we are in the body 136 | body_monomials[i + 1] <== (body_monomials[i] - body_monomials_pow_accumulation[i]) * isPadding[i + 1] + body_monomials_pow_accumulation[i]; // do not update monomials if padding 137 | body_digest[i + 1] <== body_digest[i] + body_monomials[i + 1] * zeroed_data[i + 1]; // add the monomial to the digest 138 | // log("body_digest[",i+1,"] = ", body_digest[i+1]); 139 | } 140 | 141 | // Note: This body digest computed here is just a diff since we added the other component before 142 | step_out[0] <== step_in[0] - pt_digest + body_digest[DATA_BYTES - 1]; 143 | step_out[1] <== step_in[1]; 144 | step_out[2] <== ciphertext_digest_pow[DATA_BYTES]; 145 | // pass machine state to next iteration 146 | step_out[3] <== PolynomialDigest(8)( 147 | [State[DATA_BYTES - 1].next_parsing_start, 148 | State[DATA_BYTES - 1].next_parsing_header, 149 | State[DATA_BYTES - 1].next_parsing_field_name, 150 | State[DATA_BYTES - 1].next_parsing_field_value, 151 | State[DATA_BYTES - 1].next_parsing_body, 152 | State[DATA_BYTES - 1].next_line_status, 153 | line_digest[DATA_BYTES], 154 | main_monomials[DATA_BYTES - 1] * ciphertext_digest 155 | ], 156 | ciphertext_digest 157 | ); 158 | step_out[4] <== step_in[4]; 159 | step_out[5] <== step_in[5] - num_matched; // No longer check above, subtract here so circuits later check 160 | step_out[6] <== body_monomials[DATA_BYTES - 1]; 161 | 162 | step_out[7] <== 1; // TODO: can i continue this counter? 163 | step_out[8] <== 0; // TODO: This is a hack to make the circuit work. We should remove this in the future 164 | for (var i = 9 ; i < PUBLIC_IO_LENGTH ; i++) { 165 | step_out[i] <== step_in[i]; 166 | } 167 | 168 | // log("next_parsing_start: ", State[DATA_BYTES - 1].next_parsing_start); 169 | // log("next_parsing_header: ", State[DATA_BYTES - 1].next_parsing_header); 170 | // log("next_parsing_field_name: ", State[DATA_BYTES - 1].next_parsing_field_name); 171 | // log("next_parsing_field_value: ", State[DATA_BYTES - 1].next_parsing_field_value); 172 | // log("next_parsing_body: ", State[DATA_BYTES - 1].next_parsing_body); 173 | // log("next_line_status: ", State[DATA_BYTES - 1].next_line_status); 174 | // log("line_digest: ", line_digest[DATA_BYTES]); 175 | // log("main_monomial: ", main_monomials[DATA_BYTES - 1] * ciphertext_digest); 176 | // log("body_digest: ", body_digest[DATA_BYTES - 1]); 177 | 178 | // for (var i = 0 ; i < PUBLIC_IO_LENGTH ; i++) { 179 | // log("step_out[",i,"] = ", step_out[i]); 180 | // } 181 | // log("xxxxx HTTP Verification Done xxxxx"); 182 | } 183 | -------------------------------------------------------------------------------- /circuits/json/extraction.circom: -------------------------------------------------------------------------------- 1 | pragma circom 2.1.9; 2 | 3 | include "../utils/bits.circom"; 4 | include "hash_machine.circom"; 5 | 6 | template JSONExtraction(DATA_BYTES, MAX_STACK_HEIGHT, PUBLIC_IO_LENGTH) { 7 | signal input data[DATA_BYTES]; 8 | signal input ciphertext_digest; 9 | signal input sequence_digest; // todo(sambhav): should sequence digest be 0 for first json circuit? 10 | signal input value_digest; 11 | signal input state[MAX_STACK_HEIGHT * 4 + 4]; 12 | 13 | signal input step_in[PUBLIC_IO_LENGTH]; 14 | signal output step_out[PUBLIC_IO_LENGTH]; 15 | 16 | //--------------------------------------------------------------------------------------------// 17 | 18 | // assertions: 19 | // step_in[5] === 0; // HTTP statements matched // TODO: either remove this or send a public io var 20 | signal input_state_digest <== PolynomialDigest(MAX_STACK_HEIGHT * 4 + 4)(state, ciphertext_digest); 21 | step_in[8] === input_state_digest; 22 | signal sequence_digest_hashed <== Poseidon(1)([sequence_digest]); 23 | step_in[9] === sequence_digest_hashed; 24 | 25 | 26 | component State[DATA_BYTES]; 27 | 28 | // Set up monomials for stack/tree digesting 29 | signal monomials[3 * MAX_STACK_HEIGHT]; 30 | monomials[0] <== 1; 31 | for(var i = 1 ; i < 3 * MAX_STACK_HEIGHT ; i++) { 32 | monomials[i] <== monomials[i - 1] * ciphertext_digest; 33 | } 34 | signal intermediate_digest[DATA_BYTES][3 * MAX_STACK_HEIGHT]; 35 | signal state_digest[DATA_BYTES]; 36 | 37 | var total_matches = 0; 38 | signal sequence_is_matched[DATA_BYTES]; 39 | signal value_is_matched[DATA_BYTES]; 40 | signal sequence_and_value_matched[DATA_BYTES]; 41 | for(var data_idx = 0; data_idx < DATA_BYTES; data_idx++) { 42 | if(data_idx == 0) { 43 | State[0] = StateUpdateHasher(MAX_STACK_HEIGHT); 44 | for(var i = 0; i < MAX_STACK_HEIGHT; i++) { 45 | State[0].stack[i] <== [state[i*2],state[i*2+1]]; 46 | State[0].tree_hash[i] <== [state[MAX_STACK_HEIGHT*2 + i*2],state[MAX_STACK_HEIGHT*2 + i*2 + 1]]; 47 | } 48 | State[0].byte <== data[0]; 49 | State[0].polynomial_input <== ciphertext_digest; 50 | State[0].monomial <== state[MAX_STACK_HEIGHT*4]; 51 | State[0].parsing_string <== state[MAX_STACK_HEIGHT*4 + 1]; 52 | State[0].parsing_primitive <== state[MAX_STACK_HEIGHT*4 + 2]; 53 | State[0].escaped <== state[MAX_STACK_HEIGHT*4 + 3]; 54 | } else { 55 | State[data_idx] = StateUpdateHasher(MAX_STACK_HEIGHT); 56 | State[data_idx].byte <== data[data_idx]; 57 | State[data_idx].polynomial_input <== ciphertext_digest; 58 | State[data_idx].stack <== State[data_idx - 1].next_stack; 59 | State[data_idx].tree_hash <== State[data_idx - 1].next_tree_hash; 60 | State[data_idx].monomial <== State[data_idx - 1].next_monomial; 61 | State[data_idx].parsing_string <== State[data_idx - 1].next_parsing_string; 62 | State[data_idx].parsing_primitive <== State[data_idx - 1].next_parsing_primitive; 63 | State[data_idx].escaped <== State[data_idx - 1].next_escaped; 64 | } 65 | 66 | // Digest the whole stack and key tree hash 67 | var accumulator = 0; 68 | for(var i = 0 ; i < MAX_STACK_HEIGHT ; i++) { 69 | intermediate_digest[data_idx][3 * i] <== State[data_idx].next_stack[i][0] * monomials[3 * i]; 70 | intermediate_digest[data_idx][3 * i + 1] <== State[data_idx].next_stack[i][1] * monomials[3 * i + 1]; 71 | intermediate_digest[data_idx][3 * i + 2] <== State[data_idx].next_tree_hash[i][0] * monomials[3 * i + 2]; 72 | accumulator += intermediate_digest[data_idx][3 * i] + intermediate_digest[data_idx][3 * i + 1] + intermediate_digest[data_idx][3 * i + 2]; 73 | } 74 | state_digest[data_idx] <== accumulator; 75 | sequence_is_matched[data_idx] <== IsEqual()([state_digest[data_idx], sequence_digest]); 76 | 77 | // Now check for if the value digest appears 78 | var value_digest_in_stack = 0; 79 | for(var i = 0 ; i < MAX_STACK_HEIGHT ; i++) { 80 | // A single value can be present only, and it is on index 1, so we can just accum 81 | value_digest_in_stack += State[data_idx].next_tree_hash[i][1]; 82 | } 83 | value_is_matched[data_idx] <== IsEqual()([value_digest, value_digest_in_stack]); 84 | sequence_and_value_matched[data_idx] <== sequence_is_matched[data_idx] * value_is_matched[data_idx]; 85 | total_matches += sequence_and_value_matched[data_idx]; 86 | 87 | // Debugging 88 | // log("State[", data_idx, "].byte =", State[data_idx].byte); 89 | // for(var i = 0; i { 5 | describe("qtr-round", () => { 6 | let circuit: WitnessTester<["in"], ["out"]>; 7 | it("should perform qtr-round", async () => { 8 | circuit = await circomkit.WitnessTester(`QR`, { 9 | file: "chacha20/chacha-qr", 10 | template: "QR", 11 | }); 12 | // Test case from RCF https://www.rfc-editor.org/rfc/rfc7539.html#section-2.1 13 | let input = [ 14 | hexToBits("0x11111111"), 15 | hexToBits("0x01020304"), 16 | hexToBits("0x9b8d6f43"), 17 | hexToBits("0x01234567") 18 | ]; 19 | let expected = [ 20 | hexToBits("0xea2a92f4"), 21 | hexToBits("0xcb1cf8ce"), 22 | hexToBits("0x4581472e"), 23 | hexToBits("0x5881c4bb") 24 | ]; 25 | await circuit.expectPass({ in: input }, { out: expected }); 26 | }); 27 | }); 28 | 29 | describe("full-round", () => { 30 | let circuit: WitnessTester<["in"], ["out"]>; 31 | it("should perform qtr-round", async () => { 32 | circuit = await circomkit.WitnessTester(`QR`, { 33 | file: "chacha20/chacha-round", 34 | template: "Round", 35 | }); 36 | // Test case from RCF https://www.rfc-editor.org/rfc/rfc7539.html#section-2.1 37 | let input = [ 38 | hexToBits("61707865"), hexToBits("3320646e"), hexToBits("79622d32"), hexToBits("6b206574"), 39 | hexToBits("03020100"), hexToBits("07060504"), hexToBits("0b0a0908"), hexToBits("0f0e0d0c"), 40 | hexToBits("13121110"), hexToBits("17161514"), hexToBits("1b1a1918"), hexToBits("1f1e1d1c"), 41 | hexToBits("00000001"), hexToBits("09000000"), hexToBits("4a000000"), hexToBits("00000000") 42 | ]; 43 | let expected = [ 44 | hexToBits("e4e7f110"), hexToBits("15593bd1"), hexToBits("1fdd0f50"), hexToBits("c47120a3"), 45 | hexToBits("c7f4d1c7"), hexToBits("0368c033"), hexToBits("9aaa2204"), hexToBits("4e6cd4c3"), 46 | hexToBits("466482d2"), hexToBits("09aa9f07"), hexToBits("05d7c214"), hexToBits("a2028bd9"), 47 | hexToBits("d19c12b5"), hexToBits("b94e16de"), hexToBits("e883d0cb"), hexToBits("4e3c50a2") 48 | ]; 49 | await circuit.expectPass({ in: input }, { out: expected }); 50 | }); 51 | }); 52 | 53 | // this is failing right now 54 | describe("2 block test", () => { 55 | let circuit: WitnessTester<["key", "nonce", "counter", "in"], ["out"]>; 56 | it("should perform encryption", async () => { 57 | circuit = await circomkit.WitnessTester(`ChaCha20`, { 58 | file: "chacha20/chacha20", 59 | template: "ChaCha20", 60 | params: [16] // number of 32-bit words in the key, 512 / 32 = 16 61 | }); 62 | // Test case from RCF https://www.rfc-editor.org/rfc/rfc7539.html#section-2.4.2 63 | // the input encoding here is not the most intuitive. inputs are serialized as little endian. 64 | // i.e. "e4e7f110" is serialized as "10 f1 e7 e4". So the way i am reading in inputs is 65 | // to ensure that every 32 bit word is byte reversed before being turned into bits. 66 | // i think this should be easy when we compute witness in rust. 67 | let test = { 68 | keyBytes: Buffer.from( 69 | [ 70 | 0x00, 0x01, 0x02, 0x03, 71 | 0x04, 0x05, 0x06, 0x07, 72 | 0x08, 0x09, 0x0a, 0x0b, 73 | 0x0c, 0x0d, 0x0e, 0x0f, 74 | 0x10, 0x11, 0x12, 0x13, 75 | 0x14, 0x15, 0x16, 0x17, 76 | 0x18, 0x19, 0x1a, 0x1b, 77 | 0x1c, 0x1d, 0x1e, 0x1f 78 | ] 79 | ), 80 | nonceBytes: Buffer.from( 81 | [ 82 | 0x00, 0x00, 0x00, 0x00, 83 | 0x00, 0x00, 0x00, 0x4a, 84 | 0x00, 0x00, 0x00, 0x00 85 | ] 86 | ), 87 | counter: 1, 88 | plaintextBytes: Buffer.from( 89 | [ 90 | 0x4c, 0x61, 0x64, 0x69, 0x65, 0x73, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x47, 0x65, 0x6e, 0x74, 0x6c, 91 | 0x65, 0x6d, 0x65, 0x6e, 0x20, 0x6f, 0x66, 0x20, 0x74, 0x68, 0x65, 0x20, 0x63, 0x6c, 0x61, 0x73, 92 | 0x73, 0x20, 0x6f, 0x66, 0x20, 0x27, 0x39, 0x39, 0x3a, 0x20, 0x49, 0x66, 0x20, 0x49, 0x20, 0x63, 93 | 0x6f, 0x75, 0x6c, 0x64, 0x20, 0x6f, 0x66, 0x66, 0x65, 0x72, 0x20, 0x79, 0x6f, 0x75, 0x20, 0x6f, 94 | ] 95 | ), 96 | ciphertextBytes: Buffer.from( 97 | [ 98 | 0x6e, 0x2e, 0x35, 0x9a, 0x25, 0x68, 0xf9, 0x80, 0x41, 0xba, 0x07, 0x28, 0xdd, 0x0d, 0x69, 0x81, 99 | 0xe9, 0x7e, 0x7a, 0xec, 0x1d, 0x43, 0x60, 0xc2, 0x0a, 0x27, 0xaf, 0xcc, 0xfd, 0x9f, 0xae, 0x0b, 100 | 0xf9, 0x1b, 0x65, 0xc5, 0x52, 0x47, 0x33, 0xab, 0x8f, 0x59, 0x3d, 0xab, 0xcd, 0x62, 0xb3, 0x57, 101 | 0x16, 0x39, 0xd6, 0x24, 0xe6, 0x51, 0x52, 0xab, 0x8f, 0x53, 0x0c, 0x35, 0x9f, 0x08, 0x61, 0xd8 102 | ] 103 | ) 104 | } 105 | const ciphertextBits = uintArray32ToBits(toUint32Array(test.ciphertextBytes)) 106 | const plaintextBits = uintArray32ToBits(toUint32Array(test.plaintextBytes)) 107 | const counterBits = uintArray32ToBits([test.counter])[0] 108 | await circuit.expectPass({ 109 | key: uintArray32ToBits(toUint32Array(test.keyBytes)), 110 | nonce: uintArray32ToBits(toUint32Array(test.nonceBytes)), 111 | counter: counterBits, 112 | in: plaintextBits, 113 | }, { out: ciphertextBits }); 114 | 115 | /// decryption since symmetric 116 | const w2 = await circuit.expectPass({ 117 | key: uintArray32ToBits(toUint32Array(test.keyBytes)), 118 | nonce: uintArray32ToBits(toUint32Array(test.nonceBytes)), 119 | counter: counterBits, 120 | in: ciphertextBits, 121 | }, { out: plaintextBits }); 122 | }); 123 | }); 124 | }); -------------------------------------------------------------------------------- /circuits/test/common/chacha.ts: -------------------------------------------------------------------------------- 1 | export function to_nonce(iv: Uint8Array, seq: number): Uint8Array { 2 | let nonce = new Uint8Array(12); 3 | nonce.fill(0); 4 | 5 | // nonce[4..].copy_from_slice(&seq.to_be_bytes()); 6 | const seqBytes = new Uint8Array(new BigUint64Array([BigInt(seq)]).buffer).reverse(); 7 | nonce.set(seqBytes, 4); 8 | 9 | nonce.forEach((_, i) => { 10 | nonce[i] ^= iv[i]; 11 | }); 12 | 13 | return nonce; 14 | } -------------------------------------------------------------------------------- /circuits/test/common/http.ts: -------------------------------------------------------------------------------- 1 | import { PolynomialDigest, toByte } from "."; 2 | import { join } from "path"; 3 | import { readFileSync } from "fs"; 4 | 5 | export function defaultHttpMachineState(polynomial_input: bigint): [number[], bigint] { 6 | let state = Array(8).fill(0); 7 | state[0] = 1; 8 | state[7] = 1; 9 | 10 | let digest = PolynomialDigest(state, polynomial_input, BigInt(0)); 11 | return [state, digest]; 12 | } 13 | 14 | export function readLockFile(filename: string): T { 15 | const filePath = join(__dirname, "..", "..", "..", "examples", "http", "lockfile", filename); 16 | const jsonString = readFileSync(filePath, 'utf-8'); 17 | const jsonData = JSON.parse(jsonString); 18 | return jsonData; 19 | } 20 | 21 | export function getHeaders(data: Request | Response): [string, string][] { 22 | const headers: [string, string][] = []; 23 | let i = 1; 24 | while (true) { 25 | const nameKey = `headerName${i}`; 26 | const valueKey = `headerValue${i}`; 27 | if (nameKey in data && valueKey in data) { 28 | headers.push([data[nameKey], data[valueKey]]); 29 | i++; 30 | } else { 31 | break; 32 | } 33 | } 34 | return headers; 35 | } 36 | 37 | export interface Request { 38 | method: string, 39 | target: string, 40 | version: string, 41 | [key: string]: string, 42 | } 43 | 44 | export interface Response { 45 | version: string, 46 | status: string, 47 | message: string, 48 | [key: string]: string, 49 | } 50 | 51 | export function readHTTPInputFile(filename: string) { 52 | const filePath = join(__dirname, "..", "..", "..", "examples", "http", filename); 53 | let data = readFileSync(filePath, 'utf-8'); 54 | 55 | let input = toByte(data); 56 | 57 | // Split headers and body, accounting for possible lack of body 58 | const parts = data.split('\r\n\r\n'); 59 | const headerSection = parts[0]; 60 | const bodySection = parts.length > 1 ? parts[1] : ''; 61 | 62 | // Function to parse headers into a dictionary 63 | function parseHeaders(headerLines: string[]) { 64 | const headers: { [id: string]: string } = {}; 65 | 66 | headerLines.forEach(line => { 67 | const [key, value] = line.split(/:\s(.+)/); 68 | if (key) headers[key] = value ? value : ''; 69 | }); 70 | 71 | return headers; 72 | } 73 | 74 | // Parse the headers 75 | const headerLines = headerSection.split('\r\n'); 76 | const initialLine = headerLines[0].split(' '); 77 | const headers = parseHeaders(headerLines.slice(1)); 78 | 79 | // Parse the body, if JSON response 80 | let responseBody = {}; 81 | if (headers["content-type"] && headers["content-type"].startsWith("application/json") && bodySection) { 82 | try { 83 | responseBody = JSON.parse(bodySection); 84 | } catch (e) { 85 | console.error("Failed to parse JSON body:", e); 86 | } 87 | } 88 | 89 | // Combine headers and body into an object 90 | return { 91 | input: input, 92 | initialLine: initialLine, 93 | headers: headers, 94 | body: responseBody, 95 | bodyBytes: toByte(bodySection || ''), 96 | }; 97 | } -------------------------------------------------------------------------------- /circuits/test/common/poseidon.ts: -------------------------------------------------------------------------------- 1 | import { poseidon1, poseidon10, poseidon11, poseidon12, poseidon3, poseidon4, poseidon5, poseidon6, poseidon7, poseidon8, poseidon9, poseidon13, poseidon14, poseidon15, poseidon16, poseidon2 } from "poseidon-lite"; 2 | 3 | export function PoseidonModular(input: Array): bigint { 4 | let chunks = Math.ceil(input.length / 16); 5 | let result: bigint = BigInt(0); 6 | 7 | for (var i = 0; i < chunks; i++) { 8 | let chunk_hash: bigint = BigInt(0); 9 | if (i == chunks - 1) { 10 | switch (input.length % 16) { 11 | case 0: 12 | chunk_hash = poseidon16(input.slice(i * 16, (i + 1) * 16)); 13 | break; 14 | case 1: 15 | chunk_hash = poseidon1(input.slice(i * 16, (i + 1) * 16)); 16 | break; 17 | case 2: 18 | chunk_hash = poseidon2(input.slice(i * 16, (i + 1) * 16)); 19 | break; 20 | case 3: 21 | chunk_hash = poseidon3(input.slice(i * 16, (i + 1) * 16)); 22 | break; 23 | case 4: 24 | chunk_hash = poseidon4(input.slice(i * 16, (i + 1) * 16)); 25 | break; 26 | case 5: 27 | chunk_hash = poseidon5(input.slice(i * 16, (i + 1) * 16)); 28 | break; 29 | case 6: 30 | chunk_hash = poseidon6(input.slice(i * 16, (i + 1) * 16)); 31 | break; 32 | case 7: 33 | chunk_hash = poseidon7(input.slice(i * 16, (i + 1) * 16)); 34 | break; 35 | case 8: 36 | chunk_hash = poseidon8(input.slice(i * 16, (i + 1) * 16)); 37 | break; 38 | case 9: 39 | chunk_hash = poseidon9(input.slice(i * 16, (i + 1) * 16)); 40 | break; 41 | case 10: 42 | chunk_hash = poseidon10(input.slice(i * 16, (i + 1) * 16)); 43 | break; 44 | case 11: 45 | chunk_hash = poseidon11(input.slice(i * 16, (i + 1) * 16)); 46 | break; 47 | case 12: 48 | chunk_hash = poseidon12(input.slice(i * 16, (i + 1) * 16)); 49 | break; 50 | case 13: 51 | chunk_hash = poseidon13(input.slice(i * 16, (i + 1) * 16)); 52 | break; 53 | case 14: 54 | chunk_hash = poseidon14(input.slice(i * 16, (i + 1) * 16)); 55 | break; 56 | case 15: 57 | chunk_hash = poseidon15(input.slice(i * 16, (i + 1) * 16)); 58 | break; 59 | 60 | default: 61 | break; 62 | } 63 | } else { 64 | chunk_hash = poseidon16(input.slice(i * 16, (i + 1) * 16)); 65 | } 66 | if (i == 0) { 67 | result = chunk_hash; 68 | } else { 69 | result = poseidon2([result, chunk_hash]); 70 | } 71 | } 72 | 73 | return result; 74 | } 75 | 76 | export function DataHasher(input: number[], seed: bigint): bigint { 77 | let hashes: bigint[] = [seed]; // Initialize first hash as 0 78 | 79 | for (let i = 0; i < Math.ceil(input.length / 16); i++) { 80 | let packedInput = BigInt(0); 81 | let isPaddedChunk = 0; 82 | 83 | // Allow for using unpadded input: 84 | let innerLoopLength = 16; 85 | let lengthRemaining = input.length - 16 * i; 86 | if (lengthRemaining < 16) { 87 | innerLoopLength = lengthRemaining; 88 | } 89 | // Pack 16 bytes into a single number 90 | for (let j = 0; j < innerLoopLength; j++) { 91 | if (input[16 * i + j] != -1) { 92 | packedInput += BigInt(input[16 * i + j]) * BigInt(2 ** (8 * j)); 93 | } else { 94 | isPaddedChunk += 1; 95 | } 96 | } 97 | 98 | // Compute next hash using previous hash and packed input, but if the whole block was padding, don't do it 99 | if (isPaddedChunk == innerLoopLength) { 100 | hashes.push(hashes[i]); 101 | } else { 102 | hashes.push(PoseidonModular([hashes[i], packedInput])); 103 | } 104 | } 105 | 106 | // Return the last hash 107 | return hashes[Math.ceil(input.length / 16)]; 108 | } 109 | -------------------------------------------------------------------------------- /circuits/test/http/parser.test.ts: -------------------------------------------------------------------------------- 1 | import { circomkit, WitnessTester } from "../common"; 2 | 3 | 4 | const HTTP_BYTES = [ 5 | 72, 84, 84, 80, 47, 49, 46, 49, 32, 50, 48, 48, 32, 79, 75, 13, 10, 67, 111, 110, 110, 101, 99, 6 | 116, 105, 111, 110, 58, 32, 99, 108, 111, 115, 101, 13, 10, 67, 111, 110, 116, 101, 110, 116, 7 | 45, 76, 101, 110, 103, 116, 104, 58, 32, 50, 50, 13, 10, 67, 97, 99, 104, 101, 45, 67, 111, 8 | 110, 116, 114, 111, 108, 58, 32, 109, 97, 120, 45, 97, 103, 101, 61, 51, 48, 48, 13, 10, 67, 9 | 111, 110, 116, 101, 110, 116, 45, 83, 101, 99, 117, 114, 105, 116, 121, 45, 80, 111, 108, 105, 10 | 99, 121, 58, 32, 100, 101, 102, 97, 117, 108, 116, 45, 115, 114, 99, 32, 39, 110, 111, 110, 11 | 101, 39, 59, 32, 115, 116, 121, 108, 101, 45, 115, 114, 99, 32, 39, 117, 110, 115, 97, 102, 12 | 101, 45, 105, 110, 108, 105, 110, 101, 39, 59, 32, 115, 97, 110, 100, 98, 111, 120, 13, 10, 67, 13 | 111, 110, 116, 101, 110, 116, 45, 84, 121, 112, 101, 58, 32, 116, 101, 120, 116, 47, 112, 108, 14 | 97, 105, 110, 59, 32, 99, 104, 97, 114, 115, 101, 116, 61, 117, 116, 102, 45, 56, 13, 10, 69, 15 | 84, 97, 103, 58, 32, 34, 101, 48, 101, 54, 53, 49, 48, 99, 49, 102, 99, 49, 51, 98, 51, 97, 54, 16 | 51, 97, 99, 98, 99, 48, 54, 49, 53, 101, 101, 48, 55, 97, 52, 57, 53, 50, 56, 55, 51, 97, 56, 17 | 100, 97, 55, 55, 48, 50, 55, 100, 48, 48, 52, 49, 50, 102, 99, 99, 102, 49, 97, 53, 99, 101, 18 | 50, 57, 34, 13, 10, 83, 116, 114, 105, 99, 116, 45, 84, 114, 97, 110, 115, 112, 111, 114, 116, 19 | 45, 83, 101, 99, 117, 114, 105, 116, 121, 58, 32, 109, 97, 120, 45, 97, 103, 101, 61, 51, 49, 20 | 53, 51, 54, 48, 48, 48, 13, 10, 88, 45, 67, 111, 110, 116, 101, 110, 116, 45, 84, 121, 112, 21 | 101, 45, 79, 112, 116, 105, 111, 110, 115, 58, 32, 110, 111, 115, 110, 105, 102, 102, 13, 10, 22 | 88, 45, 70, 114, 97, 109, 101, 45, 79, 112, 116, 105, 111, 110, 115, 58, 32, 100, 101, 110, 23 | 121, 13, 10, 88, 45, 88, 83, 83, 45, 80, 114, 111, 116, 101, 99, 116, 105, 111, 110, 58, 32, 24 | 49, 59, 32, 109, 111, 100, 101, 61, 98, 108, 111, 99, 107, 13, 10, 88, 45, 71, 105, 116, 72, 25 | 117, 98, 45, 82, 101, 113, 117, 101, 115, 116, 45, 73, 100, 58, 32, 55, 56, 51, 49, 58, 51, 50, 26 | 55, 52, 49, 52, 58, 49, 50, 70, 57, 69, 54, 58, 49, 65, 51, 51, 67, 50, 58, 54, 55, 54, 52, 54, 27 | 56, 70, 49, 13, 10, 65, 99, 99, 101, 112, 116, 45, 82, 97, 110, 103, 101, 115, 58, 32, 98, 121, 28 | 116, 101, 115, 13, 10, 68, 97, 116, 101, 58, 32, 84, 104, 117, 44, 32, 49, 57, 32, 68, 101, 99, 29 | 32, 50, 48, 50, 52, 32, 50, 49, 58, 51, 53, 58, 53, 57, 32, 71, 77, 84, 13, 10, 86, 105, 97, 30 | 58, 32, 49, 46, 49, 32, 118, 97, 114, 110, 105, 115, 104, 13, 10, 88, 45, 83, 101, 114, 118, 31 | 101, 100, 45, 66, 121, 58, 32, 99, 97, 99, 104, 101, 45, 104, 121, 100, 49, 49, 48, 48, 48, 51, 32 | 52, 45, 72, 89, 68, 13, 10, 88, 45, 67, 97, 99, 104, 101, 58, 32, 72, 73, 84, 13, 10, 88, 45, 33 | 67, 97, 99, 104, 101, 45, 72, 105, 116, 115, 58, 32, 48, 13, 10, 88, 45, 84, 105, 109, 101, 34 | 114, 58, 32, 83, 49, 55, 51, 52, 54, 52, 52, 49, 54, 48, 46, 53, 54, 48, 57, 53, 51, 44, 86, 35 | 83, 48, 44, 86, 69, 49, 13, 10, 86, 97, 114, 121, 58, 32, 65, 117, 116, 104, 111, 114, 105, 36 | 122, 97, 116, 105, 111, 110, 44, 65, 99, 99, 101, 112, 116, 45, 69, 110, 99, 111, 100, 105, 37 | 110, 103, 44, 79, 114, 105, 103, 105, 110, 13, 10, 65, 99, 99, 101, 115, 115, 45, 67, 111, 110, 38 | 116, 114, 111, 108, 45, 65, 108, 108, 111, 119, 45, 79, 114, 105, 103, 105, 110, 58, 32, 42, 39 | 13, 10, 67, 114, 111, 115, 115, 45, 79, 114, 105, 103, 105, 110, 45, 82, 101, 115, 111, 117, 40 | 114, 99, 101, 45, 80, 111, 108, 105, 99, 121, 58, 32, 99, 114, 111, 115, 115, 45, 111, 114, 41 | 105, 103, 105, 110, 13, 10, 88, 45, 70, 97, 115, 116, 108, 121, 45, 82, 101, 113, 117, 101, 42 | 115, 116, 45, 73, 68, 58, 32, 50, 48, 97, 101, 102, 56, 55, 48, 50, 53, 102, 54, 56, 52, 98, 43 | 101, 55, 54, 50, 53, 55, 102, 49, 53, 98, 102, 102, 53, 97, 55, 57, 50, 97, 99, 49, 53, 97, 97, 44 | 100, 50, 13, 10, 69, 120, 112, 105, 114, 101, 115, 58, 32, 84, 104, 117, 44, 32, 49, 57, 32, 45 | 68, 101, 99, 32, 50, 48, 50, 52, 32, 50, 49, 58, 52, 48, 58, 53, 57, 32, 71, 77, 84, 13, 10, 46 | 83, 111, 117, 114, 99, 101, 45, 65, 103, 101, 58, 32, 49, 53, 51, 13, 10, 13, 10, 123, 10, 32, 47 | 32, 34, 104, 101, 108, 108, 111, 34, 58, 32, 34, 119, 111, 114, 108, 100, 34, 10, 125, 48 | ]; 49 | 50 | // HTTP/1.1 200 OK 51 | // Connection: close 52 | // Content-Length: 22 53 | // Cache-Control: max-age=300 54 | // Content-Security-Policy: default-src 'none'; style-src 'unsafe-inline'; sandbox 55 | // Content-Type: text/plain; charset=utf-8 56 | // ETag: "e0e6510c1fc13b3a63acbc0615ee07a4952873a8da77027d00412fccf1a5ce29" 57 | // Strict-Transport-Security: max-age=31536000 58 | // X-Content-Type-Options: nosniff 59 | // X-Frame-Options: deny 60 | // X-XSS-Protection: 1; mode=block 61 | // X-GitHub-Request-Id: 7831:327414:12F9E6:1A33C2:676468F1 62 | // Accept-Ranges: bytes 63 | // Date: Thu, 19 Dec 2024 21:35:59 GMT 64 | // Via: 1.1 varnish 65 | // X-Served-By: cache-hyd1100034-HYD 66 | // X-Cache: HIT 67 | // X-Cache-Hits: 0 68 | // X-Timer: S1734644160.560953,VS0,VE1 69 | // Vary: Authorization,Accept-Encoding,Origin 70 | // Access-Control-Allow-Origin: * 71 | // Cross-Origin-Resource-Policy: cross-origin 72 | // X-Fastly-Request-ID: 20aef87025f684be76257f15bff5a792ac15aad2 73 | // Expires: Thu, 19 Dec 2024 21:40:59 GMT 74 | // Source-Age: 153 75 | 76 | // { 77 | // "hello": "world" 78 | // } 79 | 80 | describe("HTTP Parser", async () => { 81 | let HTTPParser: WitnessTester<["data"], []>; 82 | before(async () => { 83 | HTTPParser = await circomkit.WitnessTester("http_nivc", { 84 | file: "http/parser", 85 | template: "Parser", 86 | params: [HTTP_BYTES.length] 87 | }); 88 | }); 89 | 90 | it("witness: example", async () => { 91 | await HTTPParser.expectPass({ 92 | data: HTTP_BYTES, 93 | }); 94 | }); 95 | 96 | }); -------------------------------------------------------------------------------- /circuits/test/json/index.ts: -------------------------------------------------------------------------------- 1 | // constants.ts 2 | 3 | export const Delimiters = { 4 | // ASCII char: `{` 5 | START_BRACE: 123, 6 | // ASCII char: `}` 7 | END_BRACE: 125, 8 | // ASCII char `[` 9 | START_BRACKET: 91, 10 | // ASCII char `]` 11 | END_BRACKET: 93, 12 | // ASCII char `"` 13 | QUOTE: 34, 14 | // ASCII char `:` 15 | COLON: 58, 16 | // ASCII char `,` 17 | COMMA: 44, 18 | }; 19 | 20 | export const WhiteSpace = { 21 | // ASCII char: `\n` 22 | NEWLINE: 10, 23 | // ASCII char: ` ` 24 | SPACE: 32, 25 | }; 26 | 27 | export const Numbers = { 28 | ZERO: 48, 29 | ONE: 49, 30 | TWO: 50, 31 | THREE: 51, 32 | FOUR: 52, 33 | FIVE: 53, 34 | SIX: 54, 35 | SEVEN: 55, 36 | EIGHT: 56, 37 | NINE: 57 38 | } 39 | 40 | export const Escape = { 41 | // ASCII char: `\` 42 | BACKSLASH: 92, 43 | }; 44 | 45 | export const INITIAL_IN = { 46 | byte: 0, 47 | stack: [[0, 0], [0, 0], [0, 0], [0, 0]], 48 | parsing_string: 0, 49 | parsing_number: 0, 50 | escaped: 0, 51 | }; 52 | 53 | export const INITIAL_OUT = { 54 | next_stack: INITIAL_IN.stack, 55 | next_parsing_string: INITIAL_IN.parsing_string, 56 | next_parsing_number: INITIAL_IN.parsing_number, 57 | next_escaped: INITIAL_IN.escaped 58 | }; -------------------------------------------------------------------------------- /circuits/test/json/parser.test.ts: -------------------------------------------------------------------------------- 1 | import { circomkit, WitnessTester, readJSONInputFile } from "../common"; 2 | 3 | describe("JSON Parser", () => { 4 | let circuit: WitnessTester<["data"]>; 5 | 6 | it(`array only input`, async () => { 7 | let filename = "array_only"; 8 | let [input, keyUnicode, output] = readJSONInputFile(`${filename}.json`, []); 9 | 10 | circuit = await circomkit.WitnessTester(`Parser`, { 11 | file: "json/parser", 12 | template: "Parser", 13 | params: [input.length, 2], 14 | }); 15 | 16 | await circuit.expectPass({ 17 | data: input 18 | }); 19 | }); 20 | 21 | it(`object input`, async () => { 22 | let filename = "value_object"; 23 | let [input, keyUnicode, output] = readJSONInputFile(`${filename}.json`, []); 24 | 25 | circuit = await circomkit.WitnessTester(`Parser`, { 26 | file: "json/parser", 27 | template: "Parser", 28 | params: [input.length, 3], 29 | }); 30 | 31 | await circuit.expectPass({ 32 | data: input 33 | }); 34 | }); 35 | 36 | it(`string_escape input`, async () => { 37 | let filename = "string_escape"; 38 | let [input, keyUnicode, output] = readJSONInputFile(`${filename}.json`, []); 39 | 40 | circuit = await circomkit.WitnessTester(`Parser`, { 41 | file: "json/parser", 42 | template: "Parser", 43 | params: [input.length, 3], 44 | }); 45 | 46 | await circuit.expectPass({ 47 | data: input 48 | }); 49 | }); 50 | }) -------------------------------------------------------------------------------- /circuits/test/json/parsing_types.test.ts: -------------------------------------------------------------------------------- 1 | import { circomkit, WitnessTester, generateDescription } from "../common"; 2 | import { Delimiters, WhiteSpace, INITIAL_IN, INITIAL_OUT } from '.'; 3 | 4 | describe("StateUpdate", () => { 5 | let circuit: WitnessTester< 6 | ["byte", "stack", "parsing_string", "parsing_number"], 7 | ["next_stack", "next_parsing_string", "next_parsing_number"] 8 | >; 9 | 10 | function generatePassCase(input: any, expected: any, desc: string) { 11 | const description = generateDescription(input); 12 | 13 | it(`(valid) witness: ${description}\n${desc}`, async () => { 14 | await circuit.expectPass(input, expected); 15 | }); 16 | } 17 | 18 | before(async () => { 19 | circuit = await circomkit.WitnessTester(`StateUpdate`, { 20 | file: "json/machine", 21 | template: "StateUpdate", 22 | params: [4], 23 | }); 24 | }); 25 | 26 | //-TEST_1----------------------------------------------------------// 27 | // init: ZEROS then read `do_nothing` byte 28 | // expect: ZEROS 29 | generatePassCase(INITIAL_IN, INITIAL_OUT, ">>>> `NUL` read"); 30 | 31 | 32 | //-TEST_2----------------------------------------------------------// 33 | // state: stack == [[1, 0], [0, 0], [0, 0], [0, 0]] 34 | // read: `"` 35 | // expect: parsing_string --> 1 36 | let in_object_find_key = { ...INITIAL_IN }; 37 | in_object_find_key.stack = [[1, 0], [0, 0], [0, 0], [0, 0]]; 38 | in_object_find_key.byte = Delimiters.QUOTE; 39 | let in_object_find_key_out = { ...INITIAL_OUT }; 40 | in_object_find_key_out.next_stack = [[1, 0], [0, 0], [0, 0], [0, 0]]; 41 | in_object_find_key_out.next_parsing_string = 1; 42 | generatePassCase(in_object_find_key, 43 | in_object_find_key_out, 44 | ">>>> `\"` read" 45 | ); 46 | 47 | //-TEST_3----------------------------------------------------------// 48 | // state: stack = [[1, 0], [0, 0], [0, 0], [0, 0]], parsing_string == 1 49 | // read: ` ` 50 | // expect: NIL 51 | let in_key = { ...INITIAL_IN }; 52 | in_key.stack = [[1, 0], [0, 0], [0, 0], [0, 0]]; 53 | in_key.parsing_string = 1; 54 | in_key.byte = WhiteSpace.SPACE; 55 | let in_key_out = { ...INITIAL_OUT }; 56 | in_key_out.next_stack = [[1, 0], [0, 0], [0, 0], [0, 0]]; 57 | in_key_out.next_parsing_string = 1; 58 | generatePassCase(in_key, in_key_out, ">>>> ` ` read"); 59 | 60 | //-TEST_4----------------------------------------------------------// 61 | // init: stack == [[1, 0], [0, 0], [0, 0], [0, 0]] 62 | // read: `"` 63 | // expect: parsing_string --> 0 64 | // 65 | let in_key_to_exit = { ...INITIAL_IN }; 66 | in_key_to_exit.stack = [[1, 0], [0, 0], [0, 0], [0, 0]]; 67 | in_key_to_exit.parsing_string = 1 68 | in_key_to_exit.byte = Delimiters.QUOTE; 69 | let in_key_to_exit_out = { ...INITIAL_OUT }; 70 | in_key_to_exit_out.next_stack = [[1, 0], [0, 0], [0, 0], [0, 0]]; 71 | generatePassCase(in_key_to_exit, in_key_to_exit_out, "`\"` read"); 72 | 73 | //-TEST_5----------------------------------------------------------// 74 | // state: stack == [[1, 1], [0, 0], [0, 0], [0, 0]] 75 | // read: `"` 76 | // expect: parsing_string --> 1 77 | let in_tree_find_value = { ...INITIAL_IN }; 78 | in_tree_find_value.stack = [[1, 1], [0, 0], [0, 0], [0, 0]]; 79 | in_tree_find_value.byte = Delimiters.QUOTE; 80 | let in_tree_find_value_out = { ...INITIAL_OUT }; 81 | in_tree_find_value_out.next_stack = [[1, 1], [0, 0], [0, 0], [0, 0]]; 82 | in_tree_find_value_out.next_parsing_string = 1; 83 | generatePassCase(in_tree_find_value, 84 | in_tree_find_value_out, 85 | ">>>> `\"` read" 86 | ); 87 | 88 | //-TEST_6----------------------------------------------------------// 89 | // state: stack == [[1, 1], [0, 0], [0, 0], [0, 0]];, parsing_string == 1 90 | // read: `"` 91 | // expect: parsing_string == 0, 92 | let in_value_to_exit = { ...INITIAL_IN }; 93 | in_value_to_exit.stack = [[1, 1], [0, 0], [0, 0], [0, 0]]; 94 | in_value_to_exit.parsing_string = 1; 95 | in_value_to_exit.byte = Delimiters.QUOTE; 96 | let in_value_to_exit_out = { ...INITIAL_OUT }; 97 | in_value_to_exit_out.next_stack = [[1, 1], [0, 0], [0, 0], [0, 0]]; 98 | generatePassCase(in_value_to_exit, 99 | in_value_to_exit_out, 100 | ">>>> `\"` is read" 101 | ); 102 | 103 | }); 104 | 105 | 106 | 107 | -------------------------------------------------------------------------------- /circuits/test/json/stack.test.ts: -------------------------------------------------------------------------------- 1 | import { circomkit, WitnessTester, generateDescription } from "../common"; 2 | import { Delimiters, INITIAL_IN, INITIAL_OUT } from '.'; 3 | 4 | describe("GetTopOfStack", () => { 5 | let circuit: WitnessTester<["stack"], ["value", "pointer"]>; 6 | before(async () => { 7 | circuit = await circomkit.WitnessTester(`GetTopOfStack`, { 8 | file: "json/machine", 9 | template: "GetTopOfStack", 10 | params: [4], 11 | }); 12 | }); 13 | 14 | function generatePassCase(input: any, expected: any) { 15 | const description = generateDescription(input); 16 | 17 | it(`(valid) witness: ${description}`, async () => { 18 | await circuit.expectPass(input, expected); 19 | }); 20 | } 21 | 22 | generatePassCase({ stack: [[1, 0], [2, 0], [3, 1], [4, 2]] }, { value: [4, 2], pointer: 4 }); 23 | 24 | generatePassCase({ stack: [[1, 0], [2, 1], [0, 0], [0, 0]] }, { value: [2, 1], pointer: 2 }); 25 | 26 | generatePassCase({ stack: [[0, 0], [0, 0], [0, 0], [0, 0]] }, { value: [0, 0], pointer: 0 }); 27 | }); 28 | 29 | describe("StateUpdate :: RewriteStack", () => { 30 | let circuit: WitnessTester< 31 | ["byte", "stack", "parsing_string", "parsing_number"], 32 | ["next_stack", "next_parsing_string", "next_parsing_number"] 33 | >; 34 | before(async () => { 35 | circuit = await circomkit.WitnessTester(`GetTopOfStack`, { 36 | file: "json/machine", 37 | template: "StateUpdate", 38 | params: [4], 39 | }); 40 | }); 41 | 42 | function generatePassCase(input: any, expected: any, desc: string) { 43 | const description = generateDescription(input); 44 | 45 | it(`(valid) witness: ${description}\n${desc}`, async () => { 46 | await circuit.expectPass(input, expected); 47 | }); 48 | } 49 | 50 | function generateFailCase(input: any, desc: string) { 51 | const description = generateDescription(input); 52 | 53 | it(`(valid) witness: ${description}\n${desc}`, async () => { 54 | await circuit.expectFail(input); 55 | }); 56 | } 57 | 58 | //-TEST_1----------------------------------------------------------// 59 | // init: stack == [[0, 0], [0, 0], [0, 0], [0, 0]] 60 | // read: `{` 61 | // expect: stack --> [[1, 0], [0, 0], [0, 0], [0, 0]] 62 | let read_start_brace = { ...INITIAL_IN }; 63 | read_start_brace.byte = Delimiters.START_BRACE; 64 | let read_start_brace_out = { ...INITIAL_OUT }; 65 | read_start_brace_out.next_stack = [[1, 0], [0, 0], [0, 0], [0, 0]]; 66 | generatePassCase(read_start_brace, 67 | read_start_brace_out, 68 | ">>>> `{` read" 69 | ); 70 | 71 | //-TEST_2----------------------------------------------------------// 72 | // state: stack == [[1, 0], [0, 0], [0, 0], [0, 0]] 73 | // read: `{` 74 | // expect: stack --> [[1, 0], [1, 0], [0, 0], [0, 0]] 75 | let in_object = { ...INITIAL_IN }; 76 | in_object.stack = [[1, 0], [0, 0], [0, 0], [0, 0]]; 77 | in_object.byte = Delimiters.START_BRACE; 78 | let in_object_out = { ...INITIAL_OUT }; 79 | in_object_out.next_stack = [[1, 0], [1, 0], [0, 0], [0, 0]]; 80 | generatePassCase(in_object, in_object_out, ">>>> `{` read"); 81 | 82 | //-TEST_3----------------------------------------------------------// 83 | // state: stack == [[1, 0], [0, 0], [0, 0], [0, 0]] 84 | // read: `}` 85 | // expect: stack --> [[0, 0], [0, 0], [0, 0], [0, 0]] 86 | let in_object_to_leave = { ...INITIAL_IN }; 87 | in_object_to_leave.stack = [[1, 0], [0, 0], [0, 0], [0, 0]]; 88 | in_object_to_leave.byte = Delimiters.END_BRACE; 89 | let in_object_to_leave_out = { ...INITIAL_OUT }; 90 | generatePassCase(in_object_to_leave, 91 | in_object_to_leave_out, 92 | ">>>> `}` read" 93 | ); 94 | 95 | //-TEST_4----------------------------------------------------------// 96 | // init: stack == [[1, 0], [0, 0], [0, 0], [0, 0]] 97 | // read: `[` 98 | // expect: stack --> [[1, 0], [2, 0], [0, 0], [0, 0]] 99 | let in_object_to_read_start_bracket = { ...INITIAL_IN }; 100 | in_object_to_read_start_bracket.byte = Delimiters.START_BRACKET; 101 | in_object_to_read_start_bracket.stack = [[1, 0], [0, 0], [0, 0], [0, 0]]; 102 | let in_object_to_read_start_bracket_out = { ...INITIAL_OUT }; 103 | in_object_to_read_start_bracket_out.next_stack = [[1, 0], [2, 0], [0, 0], [0, 0]]; 104 | generatePassCase(in_object_to_read_start_bracket, 105 | in_object_to_read_start_bracket_out, 106 | ">>>> `[` read" 107 | ); 108 | 109 | //-TEST_5----------------------------------------------------------// 110 | // init: stack == [[1, 0], [2, 0], [0, 0], [0, 0]] 111 | // read: `]` 112 | // expect: stack --> [[1, 0], [0, 0], [0, 0], [0, 0]] 113 | let in_object_and_array = { ...INITIAL_IN }; 114 | in_object_and_array.byte = Delimiters.END_BRACKET; 115 | in_object_and_array.stack = [[1, 0], [2, 0], [0, 0], [0, 0]]; 116 | let in_object_and_array_out = { ...INITIAL_OUT }; 117 | in_object_and_array_out.next_stack = [[1, 0], [0, 0], [0, 0], [0, 0]]; 118 | generatePassCase(in_object_and_array, 119 | in_object_and_array_out, 120 | ">>>> `]` read" 121 | ); 122 | 123 | //-TEST_6-----------------------------------------------------------// 124 | // state: stack == [[1, 0], [0, 0], [0, 0], [0, 0]] 125 | // read: `:` 126 | // expect: stack --> [[1, 1], [0, 0], [0, 0], [0, 0]] 127 | let parsed_key_wait_to_parse_value = { ...INITIAL_IN }; 128 | parsed_key_wait_to_parse_value.stack = [[1, 0], [0, 0], [0, 0], [0, 0]]; 129 | parsed_key_wait_to_parse_value.byte = Delimiters.COLON; 130 | let parsed_key_wait_to_parse_value_out = { ...INITIAL_OUT }; 131 | parsed_key_wait_to_parse_value_out.next_stack = [[1, 1], [0, 0], [0, 0], [0, 0]]; 132 | generatePassCase(parsed_key_wait_to_parse_value, 133 | parsed_key_wait_to_parse_value_out, 134 | ">>>> `:` read" 135 | ); 136 | 137 | //-TEST_7----------------------------------------------------------// 138 | // init: stack == [[1, 0], [0, 0], [0, 0], [0, 0]] 139 | // expect: stack --> [[1, 0], [0, 0], [0, 0], [0, 0]] 140 | let in_object_and_value = { ...INITIAL_IN }; 141 | in_object_and_value.byte = Delimiters.COMMA; 142 | in_object_and_value.stack = [[1, 1], [0, 0], [0, 0], [0, 0]]; 143 | let in_object_and_value_out = { ...INITIAL_OUT }; 144 | in_object_and_value_out.next_stack = [[1, 0], [0, 0], [0, 0], [0, 0]]; 145 | generatePassCase(in_object_and_value, 146 | in_object_and_value_out, 147 | ">>>> `,` read" 148 | ); 149 | 150 | //-TEST_8----------------------------------------------------------// 151 | // init: stack == [[1, 1], [0, 0], [0, 0], [0, 0]] 152 | // read: `}` 153 | // expect: stack --> [[0, 0], [0, 0], [0, 0], [0, 0]] 154 | let in_object_and_value_to_leave_object = { ...INITIAL_IN }; 155 | in_object_and_value_to_leave_object.byte = Delimiters.END_BRACE; 156 | in_object_and_value_to_leave_object.stack = [[1, 1], [0, 0], [0, 0], [0, 0]]; 157 | let in_object_and_value_to_leave_object_out = { ...INITIAL_OUT }; 158 | in_object_and_value_to_leave_object_out.next_stack = [[0, 0], [0, 0], [0, 0], [0, 0]]; 159 | generatePassCase(in_object_and_value_to_leave_object, 160 | in_object_and_value_to_leave_object_out, 161 | ">>>> `}` read" 162 | ); 163 | 164 | //-TEST_9----------------------------------------------------------// 165 | // idea: Inside a number value after a key in an object. 166 | // state: stack == [[1, 1], [0, 0], [0, 0], [0, 0]], parsing_number == 1 167 | // read: `,` 168 | // expect: pointer --> 2 169 | // stack --> [[1, 0], [0, 0], [0, 0], [0, 0]] 170 | // parsing_number --> 0 171 | let inside_number = { ...INITIAL_IN }; 172 | inside_number.stack = [[1, 1], [0, 0], [0, 0], [0, 0]]; 173 | inside_number.parsing_number = 1; 174 | inside_number.byte = Delimiters.COMMA; 175 | let inside_number_out = { ...INITIAL_OUT }; 176 | inside_number_out.next_stack = [[1, 0], [0, 0], [0, 0], [0, 0]]; 177 | generatePassCase(inside_number, inside_number_out, ">>>> `,` read"); 178 | 179 | 180 | // TODO: FAIL CASES, ADD STACK UNDERFLOW CASES TOO and RENUMBER 181 | //-TEST_3----------------------------------------------------------// 182 | // state: INIT 183 | // read: `}` 184 | // expect: FAIL (stack underflow) 185 | let read_end_brace = { ...INITIAL_IN }; 186 | read_end_brace.byte = Delimiters.END_BRACE; 187 | generateFailCase(read_end_brace, 188 | ">>>> `}` read --> (stack underflow)" 189 | ); 190 | 191 | //-TEST_9----------------------------------------------------------// 192 | // init: stack == [[1, 0], [1, 0], [1, 0], [1, 0]] 193 | // expect: FAIL, STACK OVERFLOW 194 | let in_max_stack = { ...INITIAL_IN }; 195 | in_max_stack.byte = Delimiters.START_BRACE; 196 | in_max_stack.stack = [[1, 0], [1, 0], [1, 0], [1, 0]]; 197 | generateFailCase(in_max_stack, ">>>> `{` read --> (stack overflow)"); 198 | 199 | //-TEST_10----------------------------------------------------------// 200 | // init: stack == [[1, 0], [1, 0], [1, 0], [1, 0]] 201 | // expect: FAIL, STACK OVERFLOW 202 | let in_max_stack_2 = { ...INITIAL_IN }; 203 | in_max_stack_2.byte = Delimiters.START_BRACKET; 204 | in_max_stack_2.stack = [[1, 0], [1, 0], [1, 0], [1, 0]]; 205 | generateFailCase(in_max_stack, ">>>> `[` read --> (stack overflow)"); 206 | 207 | // TODO: This requires a more careful check of the stack that popping clears the current value. Use an IsZero 208 | // //-TEST_3----------------------------------------------------------// 209 | // // init: stack == [1,0,0,0] 210 | // // read: `]` 211 | // // expect: FAIL, INVALID CHAR 212 | // let in_object_to_read_start_bracket = { ...INITIAL_IN }; 213 | // in_object_to_read_start_bracket.byte = Delimiters.START_BRACKET; 214 | // in_object_to_read_start_bracket.pointer = 1; 215 | // in_object_to_read_start_bracket.stack = [[1, 0], [0, 0], [0, 0], [0, 0]]; 216 | // let in_object_to_read_start_bracket_out = { ...INITIAL_OUT }; 217 | // in_object_to_read_start_bracket_out.next_pointer = 2; 218 | // in_object_to_read_start_bracket_out.next_stack = [[1, 0], [2, 0], [0, 0], [0, 0]]; 219 | // generatePassCase(in_object_to_read_start_bracket, 220 | // in_object_to_read_start_bracket_out, 221 | // ">>>> `[` read" 222 | // ); 223 | }); -------------------------------------------------------------------------------- /circuits/test/json/values.test.ts: -------------------------------------------------------------------------------- 1 | import { circomkit, WitnessTester, generateDescription } from "../common"; 2 | import { Delimiters, WhiteSpace, Numbers, Escape, INITIAL_IN, INITIAL_OUT } from '.'; 3 | 4 | describe("StateUpdate :: Values", () => { 5 | let circuit: WitnessTester< 6 | ["byte", "pointer", "stack", "parsing_string", "parsing_number", "escaped"], 7 | ["next_pointer", "next_stack", "next_parsing_string", "next_parsing_number", "next_escaped"] 8 | >; 9 | before(async () => { 10 | circuit = await circomkit.WitnessTester(`GetTopOfStack`, { 11 | file: "json/machine", 12 | template: "StateUpdate", 13 | params: [4], 14 | }); 15 | }); 16 | 17 | function generatePassCase(input: any, expected: any, desc: string) { 18 | const description = generateDescription(input); 19 | 20 | it(`(valid) witness: ${description}\n${desc}`, async () => { 21 | await circuit.expectPass(input, expected); 22 | }); 23 | } 24 | 25 | describe("StateUpdate :: Values :: Number", () => { 26 | //-TEST_1----------------------------------------------------------// 27 | // idea: Read a number value after a key in an object. 28 | // state: stack == [[1, 1], [0, 0], [0, 0], [0, 0]] 29 | // read: `0` 30 | // expect: stack --> [[1, 1], [0, 0], [0, 0], [0, 0]] 31 | // parsing_number --> 1 32 | let read_number = { ...INITIAL_IN }; 33 | read_number.stack = [[1, 1], [0, 0], [0, 0], [0, 0]]; 34 | read_number.byte = Numbers.ZERO; 35 | let read_number_out = { ...INITIAL_OUT }; 36 | read_number_out.next_stack = [[1, 1], [0, 0], [0, 0], [0, 0]]; 37 | read_number_out.next_parsing_number = 1; 38 | generatePassCase(read_number, read_number_out, ">>>> `0` read"); 39 | 40 | // // TODO: Note that reading a space while reading a number will not throw an error! 41 | 42 | //-TEST_2----------------------------------------------------------// 43 | // idea: Inside a number value after a key in an object. 44 | // state: stack == [[1, 1], [0, 0], [0, 0], [0, 0]], parsing_number == 1 45 | // read: `1` 46 | // expect: stack --> [[1, 1], [0, 0], [0, 0], [0, 0]] 47 | // parsing_number --> 0 48 | let inside_number_continue = { ...INITIAL_IN }; 49 | inside_number_continue.stack = [[1, 1], [0, 0], [0, 0], [0, 0]]; 50 | inside_number_continue.parsing_number = 1; 51 | inside_number_continue.byte = Numbers.ONE; 52 | let inside_number_continue_out = { ...INITIAL_OUT }; 53 | inside_number_continue_out.next_stack = [[1, 1], [0, 0], [0, 0], [0, 0]]; 54 | inside_number_continue_out.next_parsing_number = 1; 55 | generatePassCase(inside_number_continue, inside_number_continue_out, ">>>> `1` read"); 56 | 57 | //-TEST_2----------------------------------------------------------// 58 | // idea: Inside a number value after a key in an object. 59 | // state: stack == [[1, 1], [0, 0], [0, 0], [0, 0]], parsing_number == 1 60 | // read: `1` 61 | // expect: stack --> [[1, 1], [0, 0], [0, 0], [0, 0]] 62 | // parsing_number --> 0 63 | let inside_number_exit = { ...INITIAL_IN }; 64 | inside_number_exit.stack = [[1, 1], [0, 0], [0, 0], [0, 0]]; 65 | inside_number_exit.parsing_number = 1; 66 | inside_number_exit.byte = WhiteSpace.SPACE; 67 | let inside_number_exit_out = { ...INITIAL_OUT }; 68 | inside_number_exit_out.next_stack = [[1, 1], [0, 0], [0, 0], [0, 0]]; 69 | inside_number_exit_out.next_parsing_number = 0; 70 | generatePassCase(inside_number_exit, inside_number_exit_out, ">>>> ` ` read"); 71 | 72 | //-TEST_3----------------------------------------------------------// 73 | // idea: Inside a number value after a key in an object. 74 | // state: stack == [[1, 1], [0, 0], [0, 0], [0, 0]], parsing_number == 1 75 | // read: `$` 76 | // expect: stack --> [[1, 1], [0, 0], [0, 0], [0, 0]] 77 | // parsing_number --> 0 78 | let inside_number_exit2 = { ...INITIAL_IN }; 79 | inside_number_exit2.stack = [[1, 1], [0, 0], [0, 0], [0, 0]]; 80 | inside_number_exit2.parsing_number = 1; 81 | inside_number_exit2.byte = 36; // Dollar sign `$` 82 | let inside_number_exit2_out = { ...INITIAL_OUT }; 83 | inside_number_exit2_out.next_stack = [[1, 1], [0, 0], [0, 0], [0, 0]]; 84 | inside_number_exit2_out.next_parsing_number = 0; 85 | generatePassCase(inside_number_exit2, inside_number_exit2_out, ">>>> `$` read"); 86 | }); 87 | 88 | describe("StateUpdate :: Values :: String", () => { 89 | //-TEST_4----------------------------------------------------------// 90 | // idea: Inside a string key inside an object 91 | // state: stack == [[1, 0], [0, 0], [0, 0], [0, 0]], parsing_string == 1 92 | // read: `,` 93 | // expect: stack --> [[1, 0], [0, 0], [0, 0], [0, 0]] 94 | // parsing_string --> 0 95 | let inside_number = { ...INITIAL_IN }; 96 | inside_number.stack = [[1, 1], [0, 0], [0, 0], [0, 0]]; 97 | inside_number.parsing_string = 1; 98 | inside_number.byte = Delimiters.COMMA; 99 | let inside_number_out = { ...INITIAL_OUT }; 100 | inside_number_out.next_stack = [[1, 0], [0, 0], [0, 0], [0, 0]]; 101 | inside_number_out.next_parsing_string = 1; 102 | generatePassCase(inside_number, inside_number_out, ">>>> `,` read"); 103 | }); 104 | 105 | describe("StateUpdate :: Values :: Array", () => { 106 | // Internal array parsing -----------------------------------------// 107 | 108 | //-TEST_10----------------------------------------------------------// 109 | // init: stack == [[1, 0], [2, 0], [0, 0], [0, 0]] 110 | // read: `,` 111 | // expext: stack --> [[1, 0], [2, 1], [0, 0], [0, 0]] 112 | let in_arr = { ...INITIAL_IN }; 113 | in_arr.stack = [[1, 0], [2, 0], [0, 0], [0, 0]]; 114 | in_arr.byte = Delimiters.COMMA; 115 | let in_arr_out = { ...INITIAL_OUT }; 116 | in_arr_out.next_stack = [[1, 0], [2, 1], [0, 0], [0, 0]]; 117 | generatePassCase(in_arr, in_arr_out, ">>>> `,` read"); 118 | 119 | //-TEST_10----------------------------------------------------------// 120 | // init: stack == [[1, 0], [2, 1], [0, 0], [0, 0]] 121 | // read: `]` 122 | // expect: stack --> [[1, 0], [0, 0], [0, 0], [0, 0]] 123 | let in_arr_idx_to_leave = { ...INITIAL_IN }; 124 | in_arr_idx_to_leave.stack = [[1, 0], [2, 1], [0, 0], [0, 0]]; 125 | in_arr_idx_to_leave.byte = Delimiters.END_BRACKET; 126 | let in_arr_idx_to_leave_out = { ...INITIAL_OUT }; 127 | in_arr_idx_to_leave_out.next_stack = [[1, 0], [0, 0], [0, 0], [0, 0]]; 128 | generatePassCase(in_arr_idx_to_leave, in_arr_idx_to_leave_out, ">>>> `]` read"); 129 | }); 130 | }); -------------------------------------------------------------------------------- /circuits/test/utils/array.test.ts: -------------------------------------------------------------------------------- 1 | import { circomkit, WitnessTester } from "../common"; 2 | 3 | describe("IsEqualArray", () => { 4 | let circuit: WitnessTester<["in"], ["out"]>; 5 | before(async () => { 6 | circuit = await circomkit.WitnessTester(`IsEqualArray`, { 7 | file: "utils/array", 8 | template: "IsEqualArray", 9 | params: [3], 10 | }); 11 | }); 12 | 13 | it("witness: [[0,0,0],[0,0,0]]", async () => { 14 | await circuit.expectPass( 15 | { in: [[0, 0, 0], [0, 0, 0]] }, 16 | { out: 1 } 17 | ); 18 | }); 19 | 20 | it("witness: [[1,420,69],[1,420,69]]", async () => { 21 | await circuit.expectPass( 22 | { in: [[1, 420, 69], [1, 420, 69]] }, 23 | { out: 1 }, 24 | ); 25 | }); 26 | 27 | it("witness: [[0,0,0],[1,420,69]]", async () => { 28 | await circuit.expectPass( 29 | { in: [[0, 0, 0], [1, 420, 69]] }, 30 | { out: 0 }, 31 | ); 32 | }); 33 | 34 | it("witness: [[1,420,0],[1,420,69]]", async () => { 35 | await circuit.expectPass( 36 | { in: [[1, 420, 0], [1, 420, 69]] }, 37 | { out: 0 }, 38 | ); 39 | }); 40 | 41 | it("witness: [[1,0,69],[1,420,69]]", async () => { 42 | await circuit.expectPass( 43 | { in: [[1, 0, 69], [1, 420, 69]] }, 44 | { out: 0 }, 45 | ); 46 | }); 47 | 48 | it("witness: [[0,420,69],[1,420,69]]", async () => { 49 | await circuit.expectPass( 50 | { in: [[0, 420, 69], [1, 420, 69]] }, 51 | { out: 0 }, 52 | ); 53 | }); 54 | }); 55 | 56 | describe("Contains", () => { 57 | let circuit: WitnessTester<["in", "array"], ["out"]>; 58 | before(async () => { 59 | circuit = await circomkit.WitnessTester(`Contains`, { 60 | file: "utils/array", 61 | template: "Contains", 62 | params: [3], 63 | }); 64 | }); 65 | 66 | it("witness: in = 0, array = [0,1,2]", async () => { 67 | await circuit.expectPass( 68 | { in: 0, array: [0, 1, 2] }, 69 | { out: 1 } 70 | ); 71 | }); 72 | 73 | it("witness: in = 1, array = [0,1,2]", async () => { 74 | await circuit.expectPass( 75 | { in: 1, array: [0, 1, 2] }, 76 | { out: 1 } 77 | ); 78 | }); 79 | 80 | it("witness: in = 2, array = [0,1,2]", async () => { 81 | await circuit.expectPass( 82 | { in: 2, array: [0, 1, 2] }, 83 | { out: 1 } 84 | ); 85 | }); 86 | 87 | it("witness: in = 42069, array = [0,1,2]", async () => { 88 | await circuit.expectPass( 89 | { in: 42069, array: [0, 1, 2] }, 90 | { out: 0 } 91 | ); 92 | }); 93 | 94 | }); 95 | 96 | describe("ArrayAdd", () => { 97 | let circuit: WitnessTester<["lhs", "rhs"], ["out"]>; 98 | before(async () => { 99 | circuit = await circomkit.WitnessTester(`ArrayAdd`, { 100 | file: "utils/array", 101 | template: "ArrayAdd", 102 | params: [3], 103 | }); 104 | }); 105 | 106 | it("witness: lhs = [0,1,2], rhs = [3,5,7]", async () => { 107 | await circuit.expectPass( 108 | { lhs: [0, 1, 2], rhs: [3, 5, 7] }, 109 | { out: [3, 6, 9] } 110 | ); 111 | }); 112 | 113 | }); 114 | 115 | describe("ArrayMul", () => { 116 | let circuit: WitnessTester<["lhs", "rhs"], ["out"]>; 117 | before(async () => { 118 | circuit = await circomkit.WitnessTester(`ArrayMul`, { 119 | file: "utils/array", 120 | template: "ArrayMul", 121 | params: [3], 122 | }); 123 | }); 124 | 125 | it("witness: lhs = [0,1,2], rhs = [3,5,7]", async () => { 126 | await circuit.expectPass( 127 | { lhs: [0, 1, 2], rhs: [3, 5, 7] }, 128 | { out: [0, 5, 14] } 129 | ); 130 | }); 131 | 132 | }); 133 | 134 | describe("GenericArrayAdd", () => { 135 | let circuit: WitnessTester<["arrays"], ["out"]>; 136 | before(async () => { 137 | circuit = await circomkit.WitnessTester(`ArrayAdd`, { 138 | file: "utils/array", 139 | template: "GenericArrayAdd", 140 | params: [3, 2], 141 | }); 142 | }); 143 | 144 | it("witness: arrays = [[0,1,2],[3,5,7]]", async () => { 145 | await circuit.expectPass( 146 | { arrays: [[0, 1, 2], [3, 5, 7]] }, 147 | { out: [3, 6, 9] } 148 | ); 149 | }); 150 | 151 | }); 152 | 153 | describe("fromLittleEndianToWords32", () => { 154 | let circuit: WitnessTester<["data"], ["words"]>; 155 | it("fromLittleEndianToWords32", async () => { 156 | circuit = await circomkit.WitnessTester(`fromLittleEndianToWords32`, { 157 | file: "utils/array", 158 | template: "fromLittleEndianToWords32", 159 | }); 160 | 161 | let input = [ 162 | 0, 1, 0, 1, 0, 0, 0, 0, 0, 163 | 1, 0, 1, 0, 1, 0, 0, 0, 1, 164 | 0, 1, 0, 1, 0, 0, 0, 1, 0, 165 | 0, 1, 0, 0, 0 166 | ]; 167 | await circuit.expectPass({ data: input }, { words: [72, 84, 84, 80] }) 168 | }); 169 | }); 170 | 171 | describe("fromWords32ToLittleEndian", () => { 172 | let circuit: WitnessTester<["words"], ["data"]>; 173 | it("fromWords32ToLittleEndian", async () => { 174 | circuit = await circomkit.WitnessTester(`fromWords32ToLittleEndian`, { 175 | file: "utils/array", 176 | template: "fromWords32ToLittleEndian", 177 | }); 178 | 179 | let input = [72, 84, 84, 80]; 180 | await circuit.expectPass({ words: input }, { 181 | data: [ 182 | 0, 1, 0, 1, 0, 0, 0, 0, 0, 183 | 1, 0, 1, 0, 1, 0, 0, 0, 1, 184 | 0, 1, 0, 1, 0, 0, 0, 1, 0, 185 | 0, 1, 0, 0, 0 186 | ] 187 | }) 188 | }); 189 | }); 190 | 191 | -------------------------------------------------------------------------------- /circuits/test/utils/hash.test.ts: -------------------------------------------------------------------------------- 1 | import assert from "assert"; 2 | import { circomkit, http_response_plaintext, http_start_line, PolynomialDigest, WitnessTester } from "../common"; 3 | import { DataHasher, PoseidonModular } from "../common/poseidon"; 4 | import { poseidon1 } from "poseidon-lite"; 5 | 6 | 7 | describe("DataHasher", () => { 8 | let circuit: WitnessTester<["in"], ["out"]>; 9 | 10 | before(async () => { 11 | circuit = await circomkit.WitnessTester(`DataHasher`, { 12 | file: "utils/hash", 13 | template: "DataHasher", 14 | params: [16], 15 | }); 16 | }); 17 | 18 | let all_zero_hash = BigInt("14744269619966411208579211824598458697587494354926760081771325075741142829156"); 19 | it("witness: in = [0,...x16]", async () => { 20 | const input = Array(16).fill(0); 21 | await circuit.expectPass( 22 | { in: input }, 23 | { out: all_zero_hash } 24 | ); 25 | }); 26 | // Check that TS version of DataHasher also is correct 27 | assert.deepEqual(DataHasher(Array(16).fill(0), BigInt(0)), all_zero_hash); 28 | 29 | it("witness: in = [-1,...x16]", async () => { 30 | const input = Array(16).fill(-1); 31 | await circuit.expectPass( 32 | { in: input }, 33 | { out: 0 } 34 | ); 35 | }); 36 | // Check that TS version of DataHasher also is correct 37 | assert.deepEqual(DataHasher(Array(16).fill(-1), BigInt(0)), 0); 38 | 39 | it("witness: in = [1,0,...x15]", async () => { 40 | let input = Array(16).fill(0); 41 | input[0] = 1; 42 | const hash = PoseidonModular([0, 1]); 43 | await circuit.expectPass( 44 | { in: input }, 45 | { out: hash } 46 | ); 47 | }); 48 | 49 | 50 | it("witness: in = [0,0,...x15,1]", async () => { 51 | let input = Array(16).fill(0); 52 | input[15] = 1; 53 | const hash = PoseidonModular([0, "1329227995784915872903807060280344576"]); 54 | await circuit.expectPass( 55 | { in: input }, 56 | { out: hash } 57 | ); 58 | }); 59 | }); 60 | 61 | const padded_http_start_line = http_start_line.concat(Array(320 - http_start_line.length).fill(-1)); 62 | 63 | describe("DataHasherHTTP", () => { 64 | let circuit: WitnessTester<["in"], ["out"]>; 65 | let circuit_small: WitnessTester<["in"], ["out"]>; 66 | 67 | before(async () => { 68 | circuit = await circomkit.WitnessTester(`DataHasher`, { 69 | file: "utils/hash", 70 | template: "DataHasher", 71 | params: [320], 72 | }); 73 | 74 | circuit_small = await circomkit.WitnessTester(`DataHasher`, { 75 | file: "utils/hash", 76 | template: "DataHasher", 77 | params: [32], 78 | }); 79 | }); 80 | 81 | it("witness: HTTP bytes", async () => { 82 | let hash = DataHasher(http_response_plaintext, BigInt(0)); 83 | assert.deepEqual(String(hash), "2195365663909569734943279727560535141179588918483111718403427949138562480675"); 84 | await circuit.expectPass({ in: http_response_plaintext }, { out: "2195365663909569734943279727560535141179588918483111718403427949138562480675" }); 85 | }); 86 | 87 | let padded_hash = DataHasher(padded_http_start_line, BigInt(0)); 88 | it("witness: padded HTTP start line", async () => { 89 | await circuit.expectPass({ in: padded_http_start_line }, { out: padded_hash }); 90 | }); 91 | 92 | let hash = DataHasher(http_start_line, BigInt(0)); 93 | it("witness: unpadded HTTP start line", async () => { 94 | await circuit_small.expectPass({ in: http_start_line.concat(Array(32 - http_start_line.length).fill(-1)) }, { out: hash }); 95 | }); 96 | }); 97 | 98 | describe("PolynomialDigest", () => { 99 | let circuit: WitnessTester<["bytes", "polynomial_input"], ["digest"]>; 100 | 101 | before(async () => { 102 | circuit = await circomkit.WitnessTester(`PolynomialDigest`, { 103 | file: "utils/hash", 104 | template: "PolynomialDigest", 105 | params: [4], 106 | }); 107 | }); 108 | 109 | it("witness: bytes = [0,0,0,0], polynomial_input = 1", async () => { 110 | const bytes = [0, 0, 0, 0]; 111 | const polynomial_input = 0; 112 | 113 | await circuit.expectPass( 114 | { bytes, polynomial_input }, 115 | { digest: 0 } 116 | ); 117 | }); 118 | 119 | it("witness: bytes = [1,2,3,4], polynomial_input = 7", async () => { 120 | const bytes = [1, 2, 3, 4]; 121 | const polynomial_input = 7; 122 | 123 | await circuit.expectPass( 124 | { bytes, polynomial_input }, 125 | { digest: 1 + 2 * 7 + 3 * 7 ** 2 + 4 * 7 ** 3 } 126 | ); 127 | }); 128 | 129 | it("witness: bytes = [4*random], polynomial_input = random", async () => { 130 | const bytes = Array.from({ length: 4 }, () => Math.floor(Math.random() * 256)); 131 | const polynomial_input = poseidon1([BigInt(Math.floor(Math.random() * 694206942069420))]); 132 | const digest = PolynomialDigest(bytes, polynomial_input, BigInt(0)); 133 | 134 | await circuit.expectPass( 135 | { bytes, polynomial_input }, 136 | { digest } 137 | ); 138 | }); 139 | 140 | }); 141 | 142 | describe("PolynomialDigestWithCounter", () => { 143 | let circuit: WitnessTester<["bytes", "polynomial_input", "pow_ctr"], ["digest"]>; 144 | 145 | before(async () => { 146 | circuit = await circomkit.WitnessTester(`PolynomialDigestWithCounter`, { 147 | file: "utils/hash", 148 | template: "PolynomialDigestWithCounter", 149 | params: [4], 150 | }); 151 | }); 152 | 153 | it("witness: bytes = [1,2,3,4], polynomial_input = 7, pow_ctr = 1", async () => { 154 | const bytes = [1, 2, 3, 4]; 155 | const polynomial_input = 7; 156 | 157 | await circuit.expectPass( 158 | { bytes, polynomial_input, pow_ctr: 1 }, 159 | { digest: 1 + 2 * 7 + 3 * 7 ** 2 + 4 * 7 ** 3 } 160 | ); 161 | }); 162 | 163 | it("witness: bytes = [1,2,3,4], polynomial_input = 7, pow_ctr = 7**2", async () => { 164 | const bytes = [1, 2, 3, 4]; 165 | const polynomial_input = 7; 166 | 167 | await circuit.expectPass( 168 | { bytes, polynomial_input, pow_ctr: 7 ** 2 }, 169 | { digest: 1 * 7 ** 2 + 2 * 7 ** 3 + 3 * 7 ** 4 + 4 * 7 ** 5 } 170 | ); 171 | }); 172 | }); -------------------------------------------------------------------------------- /circuits/test/utils/operators.test.ts: -------------------------------------------------------------------------------- 1 | import { circomkit, WitnessTester } from "../common"; 2 | 3 | describe("SwitchArray", () => { 4 | let circuit: WitnessTester<["case", "branches", "vals"], ["match", "out"]>; 5 | before(async () => { 6 | circuit = await circomkit.WitnessTester(`SwitchArray`, { 7 | file: "utils/operators", 8 | template: "SwitchArray", 9 | params: [3, 2], 10 | }); 11 | }); 12 | 13 | it("witness: case = 0, branches = [0, 1, 2], vals = [[69,0], [420,1], [1337,2]]", async () => { 14 | await circuit.expectPass( 15 | { case: 0, branches: [0, 1, 2], vals: [[69, 0], [420, 1], [1337, 2]] }, 16 | { match: 1, out: [69, 0] }, 17 | ); 18 | }); 19 | 20 | it("witness: case = 1, branches = [0, 1, 2], vals = [[69,0], [420,1], [1337,2]]", async () => { 21 | await circuit.expectPass( 22 | { case: 1, branches: [0, 1, 2], vals: [[69, 0], [420, 1], [1337, 2]] }, 23 | { match: 1, out: [420, 1] }, 24 | ); 25 | }); 26 | 27 | it("witness: case = 2, branches = [0, 1, 2], vals = [[69,0], [420,1], [1337,2]]", async () => { 28 | await circuit.expectPass( 29 | { case: 2, branches: [0, 1, 2], vals: [[69, 0], [420, 1], [1337, 2]] }, 30 | { match: 1, out: [1337, 2] }, 31 | ); 32 | }); 33 | 34 | it("witness: case = 3, branches = [0, 1, 2], vals = [[69,0], [420,1], [1337,2]]", async () => { 35 | await circuit.expectPass( 36 | { case: 3, branches: [0, 1, 2], vals: [[69, 0], [420, 1], [1337, 2]] }, 37 | { match: 0, out: [0, 0] } 38 | ); 39 | }); 40 | 41 | it("witness: case = 420, branches = [69, 420, 1337], vals = [[10,3], [20,5], [30,7]]", async () => { 42 | await circuit.expectPass( 43 | { case: 420, branches: [69, 420, 1337], vals: [[10, 3], [20, 5], [30, 7]] }, 44 | { match: 1, out: [20, 5] } 45 | ); 46 | }); 47 | 48 | it("witness: case = 0, branches = [69, 420, 1337], vals = [[10,3], [20,5], [30,7]]", async () => { 49 | await circuit.expectPass( 50 | { case: 0, branches: [69, 420, 1337], vals: [[10, 3], [20, 5], [30, 7]] }, 51 | { match: 0, out: [0, 0] } 52 | ); 53 | }); 54 | 55 | }); 56 | 57 | describe("Switch", () => { 58 | let circuit: WitnessTester<["case", "branches", "vals"], ["match", "out"]>; 59 | before(async () => { 60 | circuit = await circomkit.WitnessTester(`Switch`, { 61 | file: "utils/operators", 62 | template: "Switch", 63 | params: [3], 64 | }); 65 | }); 66 | 67 | it("witness: case = 0, branches = [0, 1, 2], vals = [69, 420, 1337]", async () => { 68 | await circuit.expectPass( 69 | { case: 0, branches: [0, 1, 2], vals: [69, 420, 1337] }, 70 | { match: 1, out: 69 }, 71 | ); 72 | }); 73 | 74 | it("witness: case = 1, branches = [0, 1, 2], vals = [69, 420, 1337]", async () => { 75 | await circuit.expectPass( 76 | { case: 1, branches: [0, 1, 2], vals: [69, 420, 1337] }, 77 | { match: 1, out: 420 }, 78 | ); 79 | }); 80 | 81 | it("witness: case = 2, branches = [0, 1, 2], vals = [69, 420, 1337]", async () => { 82 | await circuit.expectPass( 83 | { case: 2, branches: [0, 1, 2], vals: [69, 420, 1337] }, 84 | { match: 1, out: 1337 }, 85 | ); 86 | }); 87 | 88 | it("witness: case = 3, branches = [0, 1, 2], vals = [69, 420, 1337]", async () => { 89 | await circuit.expectPass( 90 | { case: 3, branches: [0, 1, 2], vals: [69, 420, 1337] }, 91 | { match: 0, out: 0 }, 92 | ); 93 | }); 94 | 95 | 96 | }); 97 | 98 | describe("InRange", () => { 99 | let circuit: WitnessTester<["in", "range"], ["out"]>; 100 | before(async () => { 101 | circuit = await circomkit.WitnessTester(`InRange`, { 102 | file: "utils/operators", 103 | template: "InRange", 104 | params: [8], 105 | }); 106 | }); 107 | 108 | it("witness: in = 1, range = [0,2]", async () => { 109 | await circuit.expectPass( 110 | { in: 1, range: [0, 2] }, 111 | { out: 1 } 112 | ); 113 | }); 114 | 115 | it("witness: in = 69, range = [128,255]", async () => { 116 | await circuit.expectPass( 117 | { in: 69, range: [128, 255] }, 118 | { out: 0 } 119 | ); 120 | }); 121 | 122 | it("witness: in = 200, range = [128,255]", async () => { 123 | await circuit.expectPass( 124 | { in: 1, range: [0, 2] }, 125 | { out: 1 } 126 | ); 127 | }); 128 | }); -------------------------------------------------------------------------------- /circuits/utils/array.circom: -------------------------------------------------------------------------------- 1 | pragma circom 2.1.9; 2 | 3 | include "circomlib/circuits/comparators.circom"; 4 | include "circomlib/circuits/gates.circom"; 5 | include "circomlib/circuits/mux1.circom"; 6 | 7 | /* 8 | This template is an indicator for two equal array inputs. 9 | 10 | # Params: 11 | - `n`: the length of arrays to compare 12 | 13 | # Inputs: 14 | - `in[2][n]`: two arrays of `n` numbers 15 | 16 | # Outputs: 17 | - `out`: either `0` or `1` 18 | - `1` if `in[0]` is equal to `in[1]` as arrays (i.e., component by component) 19 | - `0` otherwise 20 | */ 21 | template IsEqualArray(n) { 22 | signal input in[2][n]; 23 | signal output out; 24 | 25 | var accum = 0; 26 | component equalComponent[n]; 27 | 28 | for(var i = 0; i < n; i++) { 29 | equalComponent[i] = IsEqual(); 30 | equalComponent[i].in[0] <== in[0][i]; 31 | equalComponent[i].in[1] <== in[1][i]; 32 | accum += equalComponent[i].out; 33 | } 34 | 35 | component totalEqual = IsEqual(); 36 | totalEqual.in[0] <== n; 37 | totalEqual.in[1] <== accum; 38 | out <== totalEqual.out; 39 | } 40 | 41 | 42 | /* 43 | This template is an indicator for if an array contains an element. 44 | 45 | # Params: 46 | - `n`: the size of the array to search through 47 | 48 | # Inputs: 49 | - `in`: a number 50 | - `array[n]`: the array we want to search through 51 | 52 | # Outputs: 53 | - `out`: either `0` or `1` 54 | - `1` if `in` is found inside `array` 55 | - `0` otherwise 56 | */ 57 | template Contains(n) { 58 | assert(n > 0); 59 | /* 60 | If `n = p` for this large `p`, then it could be that this template 61 | returns the wrong value if every element in `array` was equal to `in`. 62 | This is EXTREMELY unlikely and iterating this high is impossible anyway. 63 | But it is better to check than miss something, so we bound it by `2**254` for now. 64 | */ 65 | assert(n < 2**254); 66 | signal input in; 67 | signal input array[n]; 68 | signal output out; 69 | 70 | signal accum[n+1]; 71 | accum[0] <== 1; 72 | component equalComponent[n]; 73 | for(var i = 0; i < n; i++) { 74 | accum[i+1] <== accum[i] * (array[i] - in); 75 | } 76 | 77 | component someEqual = IsZero(); 78 | someEqual.in <== accum[n]; 79 | 80 | out <== someEqual.out; 81 | } 82 | 83 | /* 84 | This template adds two arrays component by component. 85 | 86 | # Params: 87 | - `n`: the length of arrays to compare 88 | 89 | # Inputs: 90 | - `in[2][n]`: two arrays of `n` numbers 91 | 92 | # Outputs: 93 | - `out[n]`: the array sum value 94 | */ 95 | template ArrayAdd(n) { 96 | signal input lhs[n]; 97 | signal input rhs[n]; 98 | signal output out[n]; 99 | 100 | for(var i = 0; i < n; i++) { 101 | out[i] <== lhs[i] + rhs[i]; 102 | } 103 | } 104 | 105 | /* 106 | This template multiplies two arrays component by component. 107 | 108 | # Params: 109 | - `n`: the length of arrays to compare 110 | 111 | # Inputs: 112 | - `in[2][n]`: two arrays of `n` numbers 113 | 114 | # Outputs: 115 | - `out[n]`: the array multiplication value 116 | */ 117 | template ArrayMul(n) { 118 | signal input lhs[n]; 119 | signal input rhs[n]; 120 | signal output out[n]; 121 | 122 | for(var i = 0; i < n; i++) { 123 | out[i] <== lhs[i] * rhs[i]; 124 | } 125 | } 126 | 127 | /* 128 | This template multiplies two arrays component by component. 129 | 130 | # Params: 131 | - `m`: the length of the arrays to add 132 | - `n`: the number of arrays to add 133 | 134 | # Inputs: 135 | - `arrays[m][n]`: `n` arrays of `m` numbers 136 | 137 | # Outputs: 138 | - `out[m]`: the sum of all the arrays 139 | */ 140 | template GenericArrayAdd(m,n) { 141 | signal input arrays[n][m]; 142 | signal output out[m]; 143 | 144 | var accum[m]; 145 | for(var i = 0; i < m; i++) { 146 | for(var j = 0; j < n; j++) { 147 | accum[i] += arrays[j][i]; 148 | } 149 | } 150 | out <== accum; 151 | } 152 | 153 | /* 154 | This template multiplies each component of an array by a scalar value. 155 | 156 | # Params: 157 | - `n`: the length of the array 158 | 159 | # Inputs: 160 | - `array[n]`: an array of `n` numbers 161 | 162 | # Outputs: 163 | - `out[n]`: the scalar multiplied array 164 | */ 165 | template ScalarArrayMul(n) { 166 | signal input array[n]; 167 | signal input scalar; 168 | signal output out[n]; 169 | 170 | for(var i = 0; i < n; i++) { 171 | out[i] <== scalar * array[i]; 172 | } 173 | } 174 | 175 | /* 176 | This template sums over the elements in an array 177 | # Params: 178 | - `n`: the length of the array 179 | 180 | # Inputs: 181 | - `array[n]`: an array of `n` numbers 182 | 183 | # Outputs: 184 | - `sum`: the sum of the array elements 185 | */ 186 | template SumMultiple(n) { 187 | signal input nums[n]; 188 | signal output sum; 189 | 190 | signal sums[n]; 191 | sums[0] <== nums[0]; 192 | 193 | for(var i=1; i 80 234 | // 0, 1, 0, 1, 0, 1, 0, 0, => 84 235 | // 0, 1, 0, 1, 0, 1, 0, 0, => 84 236 | // 0, 1, 0, 0, 1, 0, 0, 0, => 72 237 | // shoud be encoded as 238 | // 72, 84, 84, 80 239 | template fromLittleEndianToWords32() { 240 | signal input data[32]; 241 | signal output words[4]; 242 | component Bits2Num[4]; 243 | for(var i = 3; i >= 0; i--) { 244 | Bits2Num[i] = Bits2Num(8); 245 | for(var j = 7; j >= 0; j--) { 246 | Bits2Num[i].in[7-j] <== data[i*8 + j]; 247 | } 248 | words[3-i] <== Bits2Num[i].out; 249 | } 250 | } 251 | template fromWords32ToLittleEndian() { 252 | signal input words[4]; 253 | signal output data[32]; 254 | component Num2Bits[4]; 255 | 256 | for(var i = 3; i >= 0; i--) { 257 | Num2Bits[i] = Num2Bits(8); 258 | Num2Bits[i].in <== words[3-i]; 259 | 260 | for(var j = 7; j >= 0; j--) { 261 | data[i*8 + j] <== Num2Bits[i].out[7-j]; 262 | } 263 | } 264 | } 265 | 266 | -------------------------------------------------------------------------------- /circuits/utils/bits.circom: -------------------------------------------------------------------------------- 1 | pragma circom 2.1.9; 2 | 3 | include "circomlib/circuits/bitify.circom"; 4 | 5 | // initially from https://github.com/reclaimprotocol/zk-symmetric-crypto 6 | // modified for our needs 7 | 8 | /** 9 | * Add N bit numbers together 10 | * copied in from: https://github.com/iden3/circomlib/blob/master/circuits/binsum.circom 11 | * but rewritten slightly to reduce the final number of wires & labels 12 | * and possibly look at reducing the number of constraints 13 | */ 14 | template AddBits(BITS) { 15 | signal input a[BITS]; 16 | signal input b[BITS]; 17 | signal output out[BITS]; 18 | signal carrybit; 19 | 20 | var lin = 0; 21 | var lout = 0; 22 | 23 | var k; 24 | var j = 0; 25 | 26 | var e2; 27 | 28 | // create e2 which 29 | // is the numerical sum of 2^k 30 | e2 = 1; 31 | for (k = BITS - 1; k >= 0; k--) { 32 | lin += (a[k] + b[k]) * e2; 33 | e2 *= 2; 34 | } 35 | 36 | e2 = 1; 37 | for (k = BITS - 1; k >= 0; k--) { 38 | out[k] <-- (lin >> j) & 1; 39 | // Ensure out is binary 40 | out[k] * (out[k] - 1) === 0; 41 | lout += out[k] * e2; 42 | e2 *= 2; 43 | j += 1; 44 | } 45 | 46 | carrybit <-- (lin >> j) & 1; 47 | // Ensure out is binary 48 | carrybit * (carrybit - 1) === 0; 49 | lout += carrybit * e2; 50 | 51 | // Ensure the sum matches 52 | lin === lout; 53 | } 54 | 55 | /** 56 | * Rotate left a BITS bit integer L bits 57 | */ 58 | template RotateLeftBits(BITS, L) { 59 | signal input in[BITS]; 60 | signal output out[BITS]; 61 | for (var i = 0; i < BITS; i++) { 62 | out[i] <== in[(i + L) % BITS]; 63 | } 64 | } 65 | 66 | /** 67 | * XOR BITS-bit words 68 | */ 69 | template XorBits(BITS) { 70 | signal input a[BITS]; 71 | signal input b[BITS]; 72 | signal output out[BITS]; 73 | 74 | for (var k=0; k 0) { 8 | r++; 9 | n \= 2; 10 | } 11 | 12 | return r; 13 | } -------------------------------------------------------------------------------- /circuits/utils/hash.circom: -------------------------------------------------------------------------------- 1 | pragma circom 2.1.9; 2 | 3 | include "circomlib/circuits/poseidon.circom"; 4 | include "./array.circom"; 5 | include "./functions.circom"; 6 | 7 | template MaskedByteStreamDigest(DATA_BYTES) { 8 | signal input in[DATA_BYTES]; 9 | signal output out; 10 | 11 | signal hashes[DATA_BYTES + 1]; 12 | signal option_hash[DATA_BYTES]; 13 | signal not_to_hash[DATA_BYTES]; 14 | hashes[0] <== 0; 15 | for(var i = 0 ; i < DATA_BYTES ; i++) { 16 | not_to_hash[i] <== IsEqual()([in[i], -1]); 17 | option_hash[i] <== Poseidon(2)([hashes[i],in[i]]); 18 | hashes[i+1] <== not_to_hash[i] * (hashes[i] - option_hash[i]) + option_hash[i]; // same as: (1 - not_to_hash[i]) * option_hash[i] + not_to_hash[i] * hash[i]; 19 | } 20 | out <== hashes[DATA_BYTES]; 21 | } 22 | 23 | // TODO (autoparallel): This could modified to support an arbitrary length while combining 31 bytes at a time instead of 16 24 | template DataHasher(DATA_BYTES) { 25 | // TODO: add this assert back after witnesscalc supports 26 | // assert(DATA_BYTES % 16 == 0); 27 | signal input in[DATA_BYTES]; 28 | signal output out; 29 | 30 | signal not_to_hash[DATA_BYTES \ 16]; 31 | signal option_hash[DATA_BYTES \ 16]; 32 | signal hashes[DATA_BYTES \ 16 + 1]; 33 | signal isPadding[DATA_BYTES]; 34 | hashes[0] <== 0; 35 | for(var i = 0 ; i < DATA_BYTES \ 16 ; i++) { 36 | var packedInput = 0; 37 | var isPaddedChunk = 0; 38 | for(var j = 0 ; j < 16 ; j++) { 39 | /* 40 | If in[16 * i + j] is ever -1 we get `isPadding[16 * i + j] === 1` and since we add this 41 | we get zero which does not change `packedInput`. 42 | */ 43 | isPadding[16 * i + j] <== IsEqual()([in[16 * i + j], -1]); 44 | isPaddedChunk += isPadding[16 * i + j]; 45 | packedInput += (in[16 * i + j] + isPadding[16 * i + j]) * 2**(8*j); 46 | } 47 | not_to_hash[i] <== IsEqual()([isPaddedChunk, 16]); 48 | option_hash[i] <== Poseidon(2)([hashes[i],packedInput]); 49 | hashes[i+1] <== not_to_hash[i] * (hashes[i] - option_hash[i]) + option_hash[i]; // same as: (1 - not_to_hash[i]) * option_hash[i] + not_to_hash[i] * hash[i]; 50 | } 51 | out <== hashes[DATA_BYTES \ 16]; 52 | } 53 | 54 | // TODO: Lazy so i made a new template when we should probably just reuse the other and refactor elsewhere 55 | template DataHasherWithSeed(DATA_BYTES) { 56 | // TODO: add this assert back after witnesscalc supports 57 | // assert(DATA_BYTES % 16 == 0); 58 | signal input seed; 59 | signal input in[DATA_BYTES]; 60 | signal output out; 61 | 62 | signal not_to_hash[DATA_BYTES \ 16]; 63 | signal option_hash[DATA_BYTES \ 16]; 64 | signal hashes[DATA_BYTES \ 16 + 1]; 65 | signal isPadding[DATA_BYTES]; 66 | hashes[0] <== seed; 67 | for(var i = 0 ; i < DATA_BYTES \ 16 ; i++) { 68 | var packedInput = 0; 69 | var isPaddedChunk = 0; 70 | for(var j = 0 ; j < 16 ; j++) { 71 | /* 72 | If in[16 * i + j] is ever -1 we get `isPadding[16 * i + j] === 1` and since we add this 73 | we get zero which does not change `packedInput`. 74 | */ 75 | isPadding[16 * i + j] <== IsEqual()([in[16 * i + j], -1]); 76 | isPaddedChunk += isPadding[16 * i + j]; 77 | packedInput += (in[16 * i + j] + isPadding[16 * i + j]) * 2**(8*j); 78 | } 79 | not_to_hash[i] <== IsEqual()([isPaddedChunk, 16]); 80 | option_hash[i] <== Poseidon(2)([hashes[i],packedInput]); 81 | hashes[i+1] <== not_to_hash[i] * (hashes[i] - option_hash[i]) + option_hash[i]; // same as: (1 - not_to_hash[i]) * option_hash[i] + not_to_hash[i] * hash[i]; 82 | } 83 | out <== hashes[DATA_BYTES \ 16]; 84 | } 85 | 86 | template PolynomialDigest(N) { 87 | signal input bytes[N]; 88 | signal input polynomial_input; 89 | 90 | signal output digest; 91 | 92 | signal monomials[N]; 93 | signal terms[N]; 94 | monomials[0] <== 1; 95 | terms[0] <== bytes[0] * monomials[0]; 96 | var accumulation = terms[0]; 97 | for(var i = 1 ; i < N ; i++) { 98 | monomials[i] <== monomials[i - 1] * polynomial_input; 99 | terms[i] <== monomials[i] * bytes[i]; 100 | accumulation += terms[i]; 101 | } 102 | digest <== accumulation; 103 | } 104 | 105 | template PolynomialDigestWithCounter(N) { 106 | signal input bytes[N]; 107 | signal input polynomial_input; 108 | signal input pow_ctr; 109 | 110 | var logN = log2Ceil(N); 111 | 112 | signal output digest; 113 | 114 | signal monomials[N]; 115 | signal terms[N]; 116 | 117 | // monomials[0] = polynomial_input ** counter 118 | monomials[0] <== pow_ctr; 119 | terms[0] <== bytes[0] * monomials[0]; 120 | var accumulation = terms[0]; 121 | for(var i = 1 ; i < N ; i++) { 122 | monomials[i] <== monomials[i - 1] * polynomial_input; 123 | terms[i] <== monomials[i] * bytes[i]; 124 | accumulation += terms[i]; 125 | } 126 | digest <== accumulation; 127 | } -------------------------------------------------------------------------------- /circuits/utils/operators.circom: -------------------------------------------------------------------------------- 1 | /* 2 | # `utils` 3 | This module consists of helper templates for convencience. 4 | It mostly extends the `bitify` and `comparators` modules from Circomlib. 5 | 6 | ## Layout 7 | The key ingredients of `utils` are: 8 | - `ASCII`: Verify if a an input array contains valid ASCII values (e.g., u8 vals). 9 | - `IsEqualArray`: Check if two arrays are equal component by component. 10 | - `Contains`: Check if an element is contained in a given array. 11 | - `ArrayAdd`: Add two arrays together component by component. 12 | - `ArrayMul`: Multiply two arrays together component by component. 13 | - `GenericArrayAdd`: Add together an arbitrary amount of arrays. 14 | - `ScalarArrayMul`: Multiply each array element by a scalar value. 15 | - `InRange`: Check if a given number is in a given range. 16 | - `Switch`: Return a scalar value given a specific case. 17 | - `SwitchArray`: Return an array given a specific case. 18 | 19 | 20 | ## Testing 21 | Tests for this module are located in the file: `./test/utils/utils.test.ts` 22 | */ 23 | 24 | pragma circom 2.1.9; 25 | 26 | include "circomlib/circuits/bitify.circom"; 27 | include "circomlib/circuits/comparators.circom"; 28 | include "array.circom"; 29 | 30 | 31 | /* 32 | This template checks if a given `n`-bit value is contained in a range of `n`-bit values 33 | 34 | # Params: 35 | - `n`: the number of bits to use 36 | 37 | # Inputs: 38 | - `range[2]`: the lower and upper bound of the array, respectively 39 | 40 | # Outputs: 41 | - `out`: either `0` or `1` 42 | - `1` if `in` is within the range 43 | - `0` otherwise 44 | */ 45 | template InRange(n) { 46 | signal input in; 47 | signal input range[2]; 48 | signal output out; 49 | 50 | component gte = GreaterEqThan(n); 51 | gte.in <== [in, range[0]]; 52 | 53 | component lte = LessEqThan(n); 54 | lte.in <== [in, range[1]]; 55 | 56 | out <== gte.out * lte.out; 57 | } 58 | 59 | /* 60 | This template is creates an exhaustive switch statement from a list of branch values. 61 | # Params: 62 | - `n`: the number of switch cases 63 | 64 | # Inputs: 65 | - `case`: which case of the switch to select 66 | - `branches[n]`: the values that enable taking different branches in the switch 67 | (e.g., if `branch[i] == 10` then if `case == 10` we set `out == `vals[i]`) 68 | - `vals[n]`: the value that is emitted for a given switch case 69 | (e.g., `val[i]` array is emitted on `case == `branch[i]`) 70 | 71 | # Outputs 72 | - `match`: is set to `0` if `case` does not match on any of `branches` 73 | - `out[n]`: the selected output value if one of `branches` is selected (will be `0` otherwise) 74 | ^^^^^^ BEWARE OF THIS FACT ABOVE! 75 | */ 76 | template Switch(n) { 77 | assert(n > 0); 78 | signal input case; 79 | signal input branches[n]; 80 | signal input vals[n]; 81 | signal output match; 82 | signal output out; 83 | 84 | 85 | // Verify that the `case` is in the possible set of branches 86 | component indicator[n]; 87 | component matchChecker = Contains(n); 88 | signal temp_val[n]; 89 | var sum; 90 | for(var i = 0; i < n; i++) { 91 | indicator[i] = IsZero(); 92 | indicator[i].in <== case - branches[i]; 93 | matchChecker.array[i] <== 1 - indicator[i].out; 94 | temp_val[i] <== indicator[i].out * vals[i]; 95 | sum += temp_val[i]; 96 | } 97 | matchChecker.in <== 0; 98 | match <== matchChecker.out; 99 | 100 | out <== sum; 101 | } 102 | 103 | /* 104 | This template is creates an exhaustive switch statement from a list of branch values. 105 | # Params: 106 | - `m`: the number of switch cases 107 | - `n`: the output array length 108 | 109 | # Inputs: 110 | 111 | - `case`: which case of the switch to select 112 | - `branches[m]`: the values that enable taking different branches in the switch 113 | (e.g., if `branch[i] == 10` then if `case == 10` we set `out == `vals[i]`) 114 | - `vals[m][n]`: the value that is emitted for a given switch case 115 | (e.g., `val[i]` array is emitted on `case == `branch[i]`) 116 | 117 | # Outputs 118 | - `match`: is set to `0` if `case` does not match on any of `branches` 119 | - `out[n]`: the selected output value if one of `branches` is selected (will be `[0,0,...]` otherwise) 120 | ^^^^^^ BEWARE OF THIS FACT ABOVE! 121 | */ 122 | template SwitchArray(m, n) { 123 | assert(m > 0); 124 | assert(n > 0); 125 | signal input case; 126 | signal input branches[m]; 127 | signal input vals[m][n]; 128 | signal output match; 129 | signal output out[n]; 130 | 131 | 132 | // Verify that the `case` is in the possible set of branches 133 | component indicator[m]; 134 | component matchChecker = Contains(m); 135 | signal component_out[m][n]; 136 | var sum[n]; 137 | for(var i = 0; i < m; i++) { 138 | indicator[i] = IsZero(); 139 | indicator[i].in <== case - branches[i]; 140 | matchChecker.array[i] <== 1 - indicator[i].out; 141 | for(var j = 0; j < n; j++) { 142 | component_out[i][j] <== indicator[i].out * vals[i][j]; 143 | sum[j] += component_out[i][j]; 144 | } 145 | } 146 | matchChecker.in <== 0; 147 | match <== matchChecker.out; 148 | 149 | out <== sum; 150 | } 151 | 152 | -------------------------------------------------------------------------------- /create-pp/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "create-pp" 3 | version.workspace = true 4 | edition.workspace = true 5 | 6 | [dependencies] 7 | proofs = { git = "https://github.com/pluto/web-prover", rev = "d8c81d5" } 8 | anyhow = "1.0" 9 | 10 | [[bin]] 11 | name = "create_pp" 12 | path = "src/main.rs" 13 | -------------------------------------------------------------------------------- /create-pp/src/main.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | fs::{self, File}, 3 | io::Write, 4 | path::{Path, PathBuf}, 5 | }; 6 | 7 | use anyhow::{Context, Result}; 8 | 9 | struct CircuitFiles { 10 | r1cs_path: PathBuf, 11 | } 12 | 13 | const BASE_CIRCUIT_NAMES: &[&str] = 14 | &["plaintext_authentication", "http_verification", "json_extraction"]; 15 | 16 | fn read_file(path: &Path) -> Result> { 17 | fs::read(path).with_context(|| format!("Failed to read file: {}", path.display())) 18 | } 19 | 20 | fn load_circuit_files(artifacts_dir: &Path, target_size: &str) -> Result> { 21 | BASE_CIRCUIT_NAMES 22 | .iter() 23 | .map(|name| { 24 | let circuit_name = format!("{name}_{target_size}"); 25 | let r1cs_path = artifacts_dir.join(format!("{circuit_name}.r1cs")); 26 | 27 | // Verify files exist before proceeding 28 | if !r1cs_path.exists() { 29 | anyhow::bail!("R1CS file not found: {}", r1cs_path.display()); 30 | } 31 | 32 | Ok(CircuitFiles { r1cs_path }) 33 | }) 34 | .collect() 35 | } 36 | 37 | fn main() -> Result<()> { 38 | let args: Vec = std::env::args().collect(); 39 | if args.len() != 4 { 40 | anyhow::bail!("Usage: {} ", args[0]); 41 | } 42 | 43 | let artifacts_dir = PathBuf::from(&args[1]); 44 | let target_size = &args[2]; 45 | let max_rom_length = args[3].parse().context("Failed to parse max_rom_length as number")?; 46 | 47 | println!("Processing circuits for target size: {target_size}"); 48 | println!("Loading circuit files from: {}", artifacts_dir.display()); 49 | println!("Using max ROM length: {max_rom_length}"); 50 | 51 | let circuit_files = load_circuit_files(&artifacts_dir, target_size)?; 52 | 53 | let r1cs_files = circuit_files 54 | .iter() 55 | .map(|cf| { 56 | let data = read_file(&cf.r1cs_path)?; 57 | Ok(proofs::program::data::R1CSType::Raw(data)) 58 | }) 59 | .collect::>>()?; 60 | 61 | println!("Generating `BackendData`..."); 62 | 63 | let setup = proofs::setup::setup(&r1cs_files, max_rom_length); 64 | 65 | let output_file = 66 | artifacts_dir.join(format!("serialized_setup_{target_size}_rom_length_{max_rom_length}.bin",)); 67 | println!("Writing output to: {}", output_file.display()); 68 | 69 | let mut file = File::create(&output_file) 70 | .with_context(|| format!("Failed to create output file: {}", output_file.display()))?; 71 | 72 | file 73 | .write_all(&setup) 74 | .with_context(|| format!("Failed to write to output file: {}", output_file.display()))?; 75 | 76 | println!("Successfully completed setup for target size: {target_size}"); 77 | Ok(()) 78 | } 79 | -------------------------------------------------------------------------------- /docs/http.md: -------------------------------------------------------------------------------- 1 | # HTTP Extractor 2 | 3 | > [!WARNING] Deprecated docs 4 | > These docs are out-of-date with current architecture which uses hash-based approach. Will be updated later. More details can be found in [circuits](../circuits/http/verification.circom) 5 | 6 | HTTP is a more strict and well-defined specification that JSON, and thus, it's parser is a lot easier than JSON. 7 | 8 | Proof generation for HTTP extractor is broken into: 9 | - [Parser](../circuits/http/parser/machine.circom): state parser based on a stack machine 10 | - [Interpreter](../circuits/http/interpreter.circom): interpretation of stack machine to represent different HTTP states. 11 | - [Locker](../circuits/http/locker.circom): locks start line, headers in a HTTP file 12 | - [codegen](../src/codegen/http.rs): generates locker circuit that locks start line, headers and extract response 13 | 14 | ## Parser 15 | 16 | We follow [RFC 9112](https://httpwg.org/specs/rfc9112.html) to represent and understand HTTP state in the parser. 17 | 18 | Parser is divided into two files: 19 | - [Language](../circuits/json/parser/language.circom): HTTP language syntax 20 | - [Machine](../circuits/json/parser/machine.circom): stack machine responsible for updating state 21 | 22 | HTTP parser state consists of: 23 | - `parsing_start`: flag that counts up to 3 for each value in the start line. Request has `[method, target, version]` and Response has `[version, status, message]`. 24 | - `parsing_header`: flag + counter for each new header 25 | - `parsing_field_name`: flag tracking if inside a field name 26 | - `parsing_field_value`: flag tracking whether inside field value 27 | - `parsing_body`: flag tracking if inside body 28 | - `line_status`: flag counting double CRLF 29 | 30 | We advise to go through detailed [tests](../circuits/test/http/locker.test.ts) to understand HTTP state parsing. 31 | 32 | ## Interpreter 33 | Interpreter builds following high-level circuit to understand parser state: 34 | - `inStartLine`: whether parser is inside start line 35 | - `inStartMiddle`: whether parser is inside second value of start line 36 | - `inStartEnd`: whether parser is inside last value of start line 37 | - `MethodMatch`: matches a method at specified index 38 | - `HeaderFieldNameValueMatch`: match a header field name and value 39 | - `HeaderFieldNameMatch`: match a header field name 40 | 41 | ## Codegen 42 | [Lockfile](../examples/http/lockfile/) needs to be supplied while generating the code through `pabuild` cli and should follow certain rules. 43 | 44 | ```json 45 | { 46 | "version": "HTTP/1.1", 47 | "status": "200", 48 | "message": "OK", 49 | "headerName1": "Content-Type", 50 | "headerValue1": "application/json" 51 | } 52 | ``` 53 | 54 | It should mention start line values depending on Request or Response file, and header field names and values to be matched. 55 | 56 | Codegen generates a circom template to match lockfile values and extracts response body, if the lockfile is for response data. 57 | 58 | ## Extractor 59 | Extracting response body is done by checking whether parser state is inside body and creating a mask to determine starting bytes. Shifting the body by starting byte index gives the response body. -------------------------------------------------------------------------------- /docs/images/v0.7.0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pluto/web-prover-circuits/c0b31bf925fb8a0bec939e8b8167d1a8f8ad2703/docs/images/v0.7.0.png -------------------------------------------------------------------------------- /docs/images/v0.7.5.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pluto/web-prover-circuits/c0b31bf925fb8a0bec939e8b8167d1a8f8ad2703/docs/images/v0.7.5.jpg -------------------------------------------------------------------------------- /docs/images/v0.9.0.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pluto/web-prover-circuits/c0b31bf925fb8a0bec939e8b8167d1a8f8ad2703/docs/images/v0.9.0.jpg -------------------------------------------------------------------------------- /docs/json.md: -------------------------------------------------------------------------------- 1 | # JSON extractor 2 | 3 | Extractor module provides circuits to generate proofs of arbitrary values in a JSON file. To achieve this, proof generation is broken into following components: 4 | - [parser](../circuits/json/parser/): state parser based on a stack machine 5 | - [interpreter](../circuits/json/interpreter.circom): high-level interpretation of JSON state 6 | - [codegen](../src/bin/codegen.rs): extractor circuit generation 7 | - [extractor](../circuits/main/extractor.circom): extracting value for a specific key inside a JSON 8 | 9 | ## Parser 10 | Parser is divided into three files: 11 | - [Language](../circuits/json/parser/language.circom): JSON language syntax 12 | - [Parser](../circuits/json/parser/parser.circom): initialises the parser and parse individual bytes 13 | - [Machine](../circuits/json/parser/machine.circom): stack machine responsible for updating state 14 | 15 | State of JSON parser consists of: 16 | - `stack` with a maximum `MAX_STACK_HEIGHT` argument 17 | - `parsing_string` 18 | - `parsing_number` 19 | 20 | Let's take a simple [example](../examples/json/test/value_string.json): `{ "k": "v" }`. Parser initialises the stack with `[0, 0]` and starts iterating through each byte one-by-one. 21 | 22 | 1. `0`: detects `START_BRACKET: {`. so, we're inside a key and updates stack to `[1, 0]` 23 | 2. `3`: detects a `QUOTE:"` and toggles `parsing_string` to `1` 24 | 3. `4`: detects another `QUOTE` and toggles `parsing_string` back to `0` 25 | 4. `5`: detects `COLON` and updates stack to `[1, 1]` which means we're now inside a value 26 | 5. `7`: detects a `QUOTE` again and toggles `parsing_string` which is toggled back on `9` 27 | 6. `11`: detects `CLOSING_BRACKET: }` and resets stack back to `[0, 0]` 28 | 29 | ``` 30 | State[ 0 ].byte = 123 31 | State[ 0 ].stack[ 0 ] = [ 1 ][ 0 ] 32 | State[ 0 ].parsing_string = 0 33 | State[ 0 ].parsing_number = 0 34 | mask 0 35 | xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx 36 | State[ 1 ].byte = 32 37 | State[ 1 ].stack[ 0 ] = [ 1 ][ 0 ] 38 | State[ 1 ].parsing_string = 0 39 | State[ 1 ].parsing_number = 0 40 | mask 0 41 | xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx 42 | State[ 2 ].byte = 34 43 | State[ 2 ].stack[ 0 ] = [ 1 ][ 0 ] 44 | State[ 2 ].parsing_string = 1 45 | State[ 2 ].parsing_number = 0 46 | mask 0 47 | xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx 48 | State[ 3 ].byte = 107 49 | State[ 3 ].stack[ 0 ] = [ 1 ][ 0 ] 50 | State[ 3 ].parsing_string = 1 51 | State[ 3 ].parsing_number = 0 52 | mask 0 53 | xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx 54 | State[ 4 ].byte = 34 55 | State[ 4 ].stack[ 0 ] = [ 1 ][ 0 ] 56 | State[ 4 ].parsing_string = 0 57 | State[ 4 ].parsing_number = 0 58 | mask 0 59 | xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx 60 | State[ 5 ].byte = 58 61 | State[ 5 ].stack[ 0 ] = [ 1 ][ 1 ] 62 | State[ 5 ].parsing_string = 0 63 | State[ 5 ].parsing_number = 0 64 | mask 0 65 | xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx 66 | State[ 6 ].byte = 32 67 | State[ 6 ].stack[ 0 ] = [ 1 ][ 1 ] 68 | State[ 6 ].parsing_string = 0 69 | State[ 6 ].parsing_number = 0 70 | mask 0 71 | xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx 72 | State[ 7 ].byte = 34 73 | State[ 7 ].stack[ 0 ] = [ 1 ][ 1 ] 74 | State[ 7 ].parsing_string = 1 75 | State[ 7 ].parsing_number = 0 76 | mask 34 77 | xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx 78 | State[ 8 ].byte = 118 79 | State[ 8 ].stack[ 0 ] = [ 1 ][ 1 ] 80 | State[ 8 ].parsing_string = 1 81 | State[ 8 ].parsing_number = 0 82 | mask 118 83 | xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx 84 | State[ 9 ].byte = 34 85 | State[ 9 ].stack[ 0 ] = [ 1 ][ 1 ] 86 | State[ 9 ].parsing_string = 0 87 | State[ 9 ].parsing_number = 0 88 | mask 0 89 | xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx 90 | State[ 10 ].byte = 32 91 | State[ 10 ].stack[ 0 ] = [ 1 ][ 1 ] 92 | State[ 10 ].parsing_string = 0 93 | State[ 10 ].parsing_number = 0 94 | mask 0 95 | xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx 96 | State[ 11 ].stack[ 0 ] = [ 0 ][ 0 ] 97 | State[ 11 ].parsing_string = 0 98 | State[ 11 ].parsing_number = 0 99 | xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx 100 | value_starting_index 7 101 | value[ 0 ]= 118 102 | ``` 103 | 104 | Logic for parser: 105 | - Iterate through each byte. 106 | - Determine current byte token, and form instruction. 107 | - determine what is the current byte in signals: start brace, end brace, start bracket, end bracket, colon, comma, number, quote, other (whitespace) 108 | - create an instruction by multiplying arrays together 109 | - form a state mask based on current state 110 | - multiply instruction and mask together to calculate whether reading or writing value to stack. 111 | - rewrite stack using new instruction 112 | - stack[0] can change when pushing (read start brace or bracket) / popping (read end brace or bracket) 113 | - stack[1] can change when readColon / readComma 114 | 115 | Let's deep dive into interpreter and extractor. 116 | 117 | ## Interpreter 118 | Interpreter builds high-level circuits on top of stack to understand state better. It provides following templates: 119 | - `InsideKey` 120 | - `InsideValueAtTop` & `InsideValue` 121 | - `InsideArrayIndexAtTop` & `InsideArrayIndex` 122 | - `NextKVPair` & `NextKVPairAtDepth` 123 | - `KeyMatch` & `KeyMatchAtDepth` 124 | 125 | ## Codegen 126 | To handle arbitrary depth JSON key, we need to generate circuits on-the-fly using some metadata. 127 | 128 | ```json 129 | { 130 | "keys": [ 131 | "a" 132 | ], 133 | "value_type": "string" 134 | } 135 | ``` 136 | 137 | Each new key in `keys` is associated with depth in parser stack, i.e. key `a` has depth `0`, and the value type of `a` is a `string`. 138 | Using this, a rust program generates circuit that can extract any key at depth 0 (and not just key `a`) whose value type is a string. 139 | 140 | ## Extractor 141 | To extract a key at specific depth and value type, we provide 142 | 143 | arguments: 144 | - `DATA_BYTES`: data length in bytes 145 | - `MAX_STACK_HEIGHT`: maximum stack height possible during parsing of `data`. Equal to maximum open brackets `{, [` in data. 146 | - `keyLen{i}`: ith key length in bytes, if key is a string 147 | - `index{i}`: ith key array index 148 | - `depth{i}`: ith key's stack depth 149 | - `maxValueLen`: maximum value length 150 | 151 | inputs: 152 | - `data`: data in bytes array of `DATA_BYTES` length 153 | - `key{i}`: key i in bytes array of `keyLen{i}` length 154 | 155 | output: 156 | - `value`: value of the specified key 157 | 158 | Extractor performs following operations: 159 | - parse data byte-by-byte using parser 160 | - use interpreter to gather more information on current state, i.e. whether we're parsing key or value 161 | - if `parsing_key`, then it matches each key in `is_key{i}_match` signal 162 | - if `parsing_value`, then it checks whether we're inside correct values at each depth, i.e. 163 | - if the key looks like `a.0.b.0` then, value of stack at depth `0` should be `[1, 1]`, and depth `1` should be `[2, 0]`, and so on. 164 | - if the key matches, then we need to propogate this result to the value of that key. 165 | - We use interpreter's `NextKVPair` template to determine when we start parsing next key pair again in `is_next_pair_at_depth{i}` 166 | - In previous example, 167 | - key match (`byte = 107`) happened at state 3. so we toggle `is_key1_match_for_value[3]` true. 168 | - At state 4, `is_key1_match[4]` will return false, but, since we're not parsing next key pair again, we want `is_key1_match_for_value[4]=true` as well. 169 | - So, we just use previous index's `is_key1_match_for_value` value, i.e. `is_key1_match_for_value[4] = is_key1_match_for_value[3] * is_next_pair[4]` 170 | - as soon as we hit next pair, we toggle this bit again to 0, and wait for key match again. 171 | - To extract the value, we create a `mask` around that value. 172 | - `mask[i] = data[i] * parsing_value[i] * is_value_match[i]`, i.e. we're inside the correct value and the key matched for this value. 173 | - Then, we just shift `data` by `value_starting_bytes` to the left and truncate `data` length to `maxValueLen`. 174 | 175 | We encourage you to look at [tests](../circuits/test/json/), if you need deeper understanding of [examples](../examples/json/test/). -------------------------------------------------------------------------------- /examples/http/get_request.http: -------------------------------------------------------------------------------- 1 | GET /api HTTP/1.1 2 | Accept: application/json 3 | Host: localhost -------------------------------------------------------------------------------- /examples/http/get_response.http: -------------------------------------------------------------------------------- 1 | HTTP/1.1 200 OK 2 | Content-Type: application/json 3 | Content-Length: 19 4 | 5 | {"success":"true"} -------------------------------------------------------------------------------- /examples/http/github_response.http: -------------------------------------------------------------------------------- 1 | HTTP/1.1 200 OK 2 | Connection: close 3 | Content-Length: 22 4 | Cache-Control: max-age=300 5 | Content-Security-Policy: default-src 'none'; style-src 'unsafe-inline'; sandbox 6 | Content-Type: text/plain; charset=utf-8 7 | ETag: "e0e6510c1fc13b3a63acbc0615ee07a4952873a8da77027d00412fccf1a5ce29" 8 | Strict-Transport-Security: max-age=31536000 9 | X-Content-Type-Options: nosniff 10 | X-Frame-Options: deny 11 | X-XSS-Protection: 1; mode=block 12 | X-GitHub-Request-Id: 7831:327414:12F9E6:1A33C2:676468F1 13 | Accept-Ranges: bytes 14 | Date: Thu, 19 Dec 2024 21:35:59 GMT 15 | Via: 1.1 varnish 16 | X-Served-By: cache-hyd1100034-HYD 17 | X-Cache: HIT 18 | X-Cache-Hits: 0 19 | X-Timer: S1734644160.560953,VS0,VE1 20 | Vary: Authorization,Accept-Encoding,Origin 21 | Access-Control-Allow-Origin: * 22 | Cross-Origin-Resource-Policy: cross-origin 23 | X-Fastly-Request-ID: 20aef87025f684be76257f15bff5a792ac15aad2 24 | Expires: Thu, 19 Dec 2024 21:40:59 GMT 25 | Source-Age: 153 26 | 27 | { 28 | "hello": "world" 29 | } -------------------------------------------------------------------------------- /examples/http/large_request.http: -------------------------------------------------------------------------------- 1 | POST /api/v1/data HTTP/1.1 2 | Host: example.com 3 | Content-Type: application/json 4 | User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 5 | Accept: application/json 6 | Accept-Encoding: gzip, deflate, br 7 | Accept-Language: en-US,en;q=0.9 8 | Cache-Control: no-cache 9 | Connection: keep-alive 10 | Cookie: session=abc123; user_id=12345; theme=dark 11 | Authorization: Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ 12 | X-Request-ID: 7b92f2b0-69f6-11ee-8c99-0242ac120002 13 | X-Forwarded-For: 203.0.113.195 14 | Content-Length: 989 15 | 16 | { 17 | "metadata": { 18 | "requestId": "7b92f2b0-69f6-11ee-8c99-0242ac120002", 19 | "timestamp": "2025-01-27T10:15:30Z", 20 | "source": "web-client", 21 | "version": "1.0.0" 22 | }, 23 | "data": { 24 | "user": { 25 | "id": "usr_12345", 26 | "email": "john.doe@example.com", 27 | "preferences": { 28 | "notifications": { 29 | "email": true, 30 | "push": false, 31 | "sms": true 32 | }, 33 | "timezone": "America/New_York", 34 | "language": "en-US" 35 | } 36 | }, 37 | "items": [ 38 | { 39 | "id": "item_001", 40 | "name": "Product A", 41 | "quantity": 2, 42 | "price": 29.99 43 | }, 44 | { 45 | "id": "item_002", 46 | "name": "Product B", 47 | "quantity": 1, 48 | "price": 49.99 49 | } 50 | ] 51 | } 52 | } -------------------------------------------------------------------------------- /examples/http/large_response.http: -------------------------------------------------------------------------------- 1 | HTTP/1.1 200 OK 2 | Date: Mon, 27 Jan 2025 10:15:31 GMT 3 | Content-Type: application/json 4 | Content-Length: 1760 5 | Server: nginx/1.18.0 6 | Cache-Control: no-cache, no-store, must-revalidate 7 | Pragma: no-cache 8 | Expires: 0 9 | X-Request-ID: 7b92f2b0-69f6-11ee-8c99-0242ac120002 10 | X-Frame-Options: DENY 11 | X-Content-Type-Options: nosniff 12 | X-XSS-Protection: 1; mode=block 13 | Strict-Transport-Security: max-age=31536000; includeSubDomains 14 | Access-Control-Allow-Origin: * 15 | Access-Control-Allow-Methods: GET, POST, PUT, DELETE, OPTIONS 16 | Access-Control-Allow-Headers: Content-Type, Authorization 17 | 18 | { 19 | "status": "success", 20 | "code": 200, 21 | "requestId": "7b92f2b0-69f6-11ee-8c99-0242ac120002", 22 | "timestamp": "2025-01-27T10:15:31Z", 23 | "data": { 24 | "orderDetails": { 25 | "orderId": "ord_67890", 26 | "status": "confirmed", 27 | "createdAt": "2025-01-27T10:15:30Z", 28 | "updatedAt": "2025-01-27T10:15:31Z", 29 | "totalAmount": 109.97, 30 | "currency": "USD", 31 | "items": [ 32 | { 33 | "id": "item_001", 34 | "name": "Product A", 35 | "quantity": 2, 36 | "price": 29.99, 37 | "subtotal": 59.98, 38 | "metadata": { 39 | "sku": "SKU123456", 40 | "category": "Electronics", 41 | "weight": "0.5kg", 42 | "dimensions": "10x5x2cm" 43 | } 44 | }, 45 | { 46 | "id": "item_002", 47 | "name": "Product B", 48 | "quantity": 1, 49 | "price": 49.99, 50 | "subtotal": 49.99, 51 | "metadata": { 52 | "sku": "SKU789012", 53 | "category": "Accessories", 54 | "weight": "0.3kg", 55 | "dimensions": "15x8x4cm" 56 | } 57 | } 58 | ] 59 | }, 60 | "metadata": { 61 | "processingTime": "125ms", 62 | "serverRegion": "us-east-1", 63 | "apiVersion": "2024-01", 64 | "debug": { 65 | "cache": "miss", 66 | "database": "primary", 67 | "loadBalancer": "lb-01" 68 | } 69 | } 70 | } 71 | } -------------------------------------------------------------------------------- /examples/http/post_request.http: -------------------------------------------------------------------------------- 1 | POST /contact_form.php HTTP/1.1 2 | Host: developer.mozilla.org 3 | Content-Length: 64 4 | Content-Type: application/x-www-form-urlencoded 5 | 6 | name=Joe%20User&request=Send%20me%20one%20of%20your%20catalogue -------------------------------------------------------------------------------- /examples/http/reddit_request.http: -------------------------------------------------------------------------------- 1 | POST https://gql.reddit.com/ HTTP/1.1 2 | authorization: Bearer eyJhbGciOiJSUzI1NiIsImtpZCI6IlNIQTI1NjpzS3dsMnlsV0VtMjVmcXhwTU40cWY4MXE2OWFFdWFyMnpLMUdhVGxjdWNZIiwidHlwIjoiSldUIn0.eyJzdWIiOiJ1c2VyIiwiZXhwIjoxNzM5MzcyMDExLjUzMjg3OSwiaWF0IjoxNzM5Mjg1NjExLjUzMjg3OSwianRpIjoiRjV5NU9ROHFnTlNRM290eGNCdWlpZjRxVnZqZnlRIiwiY2lkIjoiMFItV0FNaHVvby1NeVEiLCJsaWQiOiJ0Ml90YXppNm1rIiwiYWlkIjoidDJfdGF6aTZtayIsImxjYSI6MTUxNjY4NDYwMTUxMywic2NwIjoiZUp4a2tkR090REFJaGQtRmE1X2dmNVVfbTAxdGNZYXNMUWFvazNuN0RWb2NrNzA3Y0Q0cEhQOURLb3FGRENaWGdxbkFCRmdUclREQlJ1VDluTG0zZzJpTmU4dFlzWm5DQkZtd0ZEcmttTEdzaVFRbWVKSWF5eHNtb0lMTnlGeXV0R05OTFQwUUpxaGNNcmVGSHBjMm9ia2JpNTZkR0ZXNXJEeW9zVmZsMHRqR0ZMWW54amNicXcycHVDNm5Na25MUXZrc1h2VGpOOVczOXZtel9TYTBKOE9LcXVtQjNobEpDRzRzZnBpbTNkOVRrNTZ0Q3hhMTkzcVEydWQ2M0s1OTFpdzBPN2VmNl9sckl4bVhZMmgtSnZ0MzF5LWhBNDg4THpQcUFFYXM0VWNaZG1RZF9sVUhVTG1nSkdNSjR0TUk1TXJsMjM4SnRtdlR2OGJ0RXo5OE0tS21OX3pXRE5SekNlTFFwX0gxR3dBQV9fOFExZVRSIiwicmNpZCI6Im0wbHNSSmFIdDZWdDZLRklZZW1XTGRGVGJGYV9GU1RxRDgwWTRrMS1VekUiLCJmbG8iOjJ9.rK51OmevkhnzKEdabkJijC8T0mXYhw_fxIfCNcUXVq7yUNFZ343EnRHbqRmmqxLhw3VFSg5Ow3mF6FwsOcWyoQ2oSQD98Eokylu6BoxjjmHsv5Wqilgomk53CLmnxjmtliX9FiYDneUCtu8vguJ1c7cw44NKAFX3Z8rz0clN0hXIBfF1_8_KPOS3iBYNItB1wUt5ycEX82oOOIeCmKJ6osG7GBzewfBmDaVr04Ya46sK5XKE1QzoE1-27AEfxeUUd--SBfSGufLcDtT8mEbdWpOzwhjiUXhbcPyWa8oWD8vXeMJ-uy1l0zbZLA0qFXLD8BM-PBjmeUtN7siNCMo8HQ 3 | user-agent: Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/124.0.0.0 Mobile Safari/537.36 4 | content-type: application/json 5 | host: gql.reddit.com 6 | accept-encoding: identity 7 | connection: close 8 | accept: */* 9 | content-length: 51 10 | 11 | {"id":"db6eb1356b13","variables":{"name":"sample"}} -------------------------------------------------------------------------------- /examples/http/spotify_top_artists_request.http: -------------------------------------------------------------------------------- 1 | GET /v1/me/top/artists?time_range=medium_term&limit=1 HTTP/1.1 2 | Host: api.spotify.com 3 | Authorization: Bearer BQBXRpIm2NL08akEiaxB5l42eiq6Zd9Q0S2-0Q4k0CMoa5u8o_ah_Ddjxt6Mv3226AEDyKYcFPpgw_6Asg-Y2hJpcuMya8wzqyqgiV-KH0vcEq7EFzODXoaBxsB0wryVCWDF6p5dqcpIHOz4QJqQa9mUA6sFzYNyECglT-BGcRe_N9f_3aqYTGQ-kkE-devPkPkEfDcbziT6mOzJfGRzLw -------------------------------------------------------------------------------- /examples/http/spotify_top_artists_response.http: -------------------------------------------------------------------------------- 1 | HTTP/1.1 200 OK 2 | content-type: application/json; charset=utf-8 3 | content-encoding: gzip 4 | Transfer-Encoding: chunked 5 | 6 | { 7 | "data": {"items": [{"data": "Artist","profile": {"name": "Taylor Swift"}}]} 8 | } -------------------------------------------------------------------------------- /examples/json/array_only.json: -------------------------------------------------------------------------------- 1 | [ 42, { "a" : "b" } , [ 0 , 1 ] , "foobar"] -------------------------------------------------------------------------------- /examples/json/binance.json: -------------------------------------------------------------------------------- 1 | {"code":"000000","message":null,"messageDetail":null,"data":{"binanceUid":197716620,"squareUid":"197716620","avatar":"https://public.nftstatic.com/static/nft/res/nft-cex/S3/1698378428368_x0i3hlmbus130x1yc3k0kfrcnz7hjqx0.png","displayName":"Odis Drozda lPPq","biography":null,"role":11,"profileImage":null,"verificationType":0,"verificationDescription":null,"totalFollowerCount":0,"totalListedPostCount":0,"accountLang":"en","badge":null,"totalLikeCount":null,"totalShareCount":null,"totalBitCount":null,"totalArticleCount":null,"totalFollowCount":0,"isFollowed":null,"followsYou":false,"allowPushNotification":null,"liveStatusVO":null,"badgeVO":null,"tippingControl":0,"tippingNotification":0,"badgeInfos":null,"username":"Square-Creator-665d163bc568","followersForShow":null,"lastFollowerCount":null,"totalEachFollowerCount":null,"blockType":null,"createTime":null,"modifyTime":null,"hasCopyTradingEntrance":null,"copyTradingIosDeepLink":null,"copyTradingType":null,"copyTradingAndroidDeepLink":null,"copyTradingWebDeepLink":null,"userShareLink":null,"accountStatus":null,"punishStartTime":null,"punishEndTime":null,"punishReason":null,"resetUsername":null,"resetDisplayName":null,"resetAvatar":null,"userTags":null,"holdTokens":null,"tradeTokens":null,"holdingStatus":null,"publicHoldingScope":null,"status":1,"auditStatus":2,"privilege":0,"subAccount":false,"level":0,"note":null,"termsOfUseVersion":"none","totalPostCount":0,"totalViewCount":0,"userProfileLink":"bnc://app.binance.com/mp/app?appId=znf9fpiMh6ufdU3vDtAvi4&startPagePath=cGFnZXMvYnV6ei1wcm9maWxlL2luZGV4&startPageQuery=dWlkPTE5NzcxNjYyMCZ1c2VybmFtZT1TcXVhcmUtQ3JlYXRvci02NjVkMTYzYmM1Njg&sceneValue=1100","isKol":true,"banStatus":0,"updateBiographyTime":null,"updateBiographyLimitationDay":365,"videoEnable":false,"hasMyTradingEntrance":null,"myTradingIosDeepLink":null,"myTradingAndroidDeepLink":null,"myTradingWebDeepLink":null,"defaultTabType":null,"contentLocatedFollowingTabExpGroup":null,"checkinEntranceStartTime":null,"checkinEntranceEndTime":null},"success":true} -------------------------------------------------------------------------------- /examples/json/empty.json: -------------------------------------------------------------------------------- 1 | {"object":{},"arr":[]} -------------------------------------------------------------------------------- /examples/json/primitives.json: -------------------------------------------------------------------------------- 1 | {"null": null, "false": false, "true": true, "num1": 2.0E-1, "num2": 2.0e+1} -------------------------------------------------------------------------------- /examples/json/primitives_array.json: -------------------------------------------------------------------------------- 1 | [null,false,true,2.0E-1,2.0e+1] -------------------------------------------------------------------------------- /examples/json/reddit.json: -------------------------------------------------------------------------------- 1 | { 2 | "data": { 3 | "redditorInfoByName": [ 4 | { 5 | "id": "t1_bepsb", 6 | "karma": { 7 | "fromAwardsGiven": 10, 8 | "fromAwardsReceived": 70, 9 | "fromComments": 95, 10 | "fromPosts": 228, 11 | "total": 3281 12 | } 13 | }, 14 | { 15 | "id": "t2_bepsb", 16 | "karma": { 17 | "fromAwardsGiven": 0, 18 | "fromAwardsReceived": 470, 19 | "fromComments": 9583, 20 | "fromPosts": 13228, 21 | "total": 23281 22 | } 23 | } 24 | ] 25 | } 26 | } -------------------------------------------------------------------------------- /examples/json/spotify.json: -------------------------------------------------------------------------------- 1 | {"data":{"me":{"profile":{"topArtists":{"__typename":"ArtistPageV2","items":[{"data":{"__typename":"Artist","profile":{"name":"Pink Floyd"},"uri":"spotify:artist:0k17h0D3J5VfsdmQ1iZtE9","visuals":{"avatarImage":{"sources":[{"height":625,"url":"https://i.scdn.co/image/d011c95081cd9a329e506abd7ded47535d524a07","width":640},{"height":63,"url":"https://i.scdn.co/image/ec1fb7127168dbaa962404031409c5a293b95ec6","width":64},{"height":195,"url":"https://i.scdn.co/image/f0a39a8a196a87a7236bdcf8a8708f6d5d3547cc","width":200},{"height":977,"url":"https://i.scdn.co/image/e69f71e2be4b67b82af90fb8e9d805715e0684fa","width":1000}]}}}},{"data":{"__typename":"Artist","profile":{"name":"Karl Schintz"},"uri":"spotify:artist:1nsmmigdcrwlaJE81ebiun","visuals":{"avatarImage":{"sources":[{"height":640,"url":"https://i.scdn.co/image/ab67616d0000b2736f61a4b07afef775b5f6fe73","width":640},{"height":64,"url":"https://i.scdn.co/image/ab67616d000048516f61a4b07afef775b5f6fe73","width":64},{"height":300,"url":"https://i.scdn.co/image/ab67616d00001e026f61a4b07afef775b5f6fe73","width":300}]}}}},{"data":{"__typename":"Artist","profile":{"name":"The Strokes"},"uri":"spotify:artist:0epOFNiUfyON9EYx7Tpr6V","visuals":{"avatarImage":{"sources":[{"height":640,"url":"https://i.scdn.co/image/ab6761610000e5ebc3b137793230f4043feb0089","width":640},{"height":160,"url":"https://i.scdn.co/image/ab6761610000f178c3b137793230f4043feb0089","width":160},{"height":320,"url":"https://i.scdn.co/image/ab67616100005174c3b137793230f4043feb0089","width":320}]}}}},{"data":{"__typename":"Artist","profile":{"name":"Led Zeppelin"},"uri":"spotify:artist:36QJpDe2go2KgaRleHCDTp","visuals":{"avatarImage":{"sources":[{"height":600,"url":"https://i.scdn.co/image/207803ce008388d3427a685254f9de6a8f61dc2e","width":600},{"height":64,"url":"https://i.scdn.co/image/16eb3cdae0d824b520ac17710e943a99d3ef6602","width":64},{"height":200,"url":"https://i.scdn.co/image/b0248a44865493e6a03832aa89854ada16ff07a8","width":200}]}}}}],"totalCount":4}}}},"extensions":{}} -------------------------------------------------------------------------------- /examples/json/string_escape.json: -------------------------------------------------------------------------------- 1 | {"a": "\"b\""} -------------------------------------------------------------------------------- /examples/json/value_array.json: -------------------------------------------------------------------------------- 1 | { "k" : [ 420 , 69 , 4200 , 600 ] , "b" : [ "ab" , "ba" , "ccc" , "d" ] } -------------------------------------------------------------------------------- /examples/json/value_array_object.json: -------------------------------------------------------------------------------- 1 | { "a" : [ { "b" : [ 1 , 4 ] } , { "c" : "b" } ] } -------------------------------------------------------------------------------- /examples/json/value_object.json: -------------------------------------------------------------------------------- 1 | { "a" : { "d" : "e" , "e" : "c" } , "e" : { "f" : "a" , "e" : "2" } , "g" : { "h" : { "a" : "c" } } , "ab" : "foobar" , "bc" : 42 , "dc" : [ 0 , 1 , "a" ] } -------------------------------------------------------------------------------- /examples/json/venmo.json: -------------------------------------------------------------------------------- 1 | { 2 | "data": { 3 | "profile": { 4 | "identity": { 5 | "balance": { 6 | "userBalance": { 7 | "value": 523.69 8 | } 9 | } 10 | } 11 | } 12 | } 13 | } -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "web-prover-circuits", 3 | "description": "ZK Circuits for WebProofs", 4 | "version": "0.10.0", 5 | "license": "Apache-2.0", 6 | "repository": { 7 | "type": "git", 8 | "url": "https://github.com/pluto/web-prover-circuits.git" 9 | }, 10 | "scripts": { 11 | "test": "npx mocha", 12 | "par-test": "npx mocha --parallel" 13 | }, 14 | "dependencies": { 15 | "circomlib": "^2.0.5" 16 | }, 17 | "devDependencies": { 18 | "@semantic-release/commit-analyzer": "^11.1.0", 19 | "@semantic-release/git": "^10.0.1", 20 | "@semantic-release/github": "^9.2.6", 21 | "@semantic-release/npm": "^11.0.2", 22 | "@semantic-release/release-notes-generator": "^12.1.0", 23 | "@types/chai": "^5.0.1", 24 | "@types/mocha": "^10.0.1", 25 | "@types/node": "22.5.4", 26 | "circomkit": "0.3.0", 27 | "mocha": "10.7.3", 28 | "poseidon-lite": "0.3.0", 29 | "semantic-release": "^23.0.2", 30 | "snarkjs": "0.7.4", 31 | "ts-node": "^10.9.1", 32 | "typescript": "5.6.2" 33 | } 34 | } -------------------------------------------------------------------------------- /rust-toolchain.toml: -------------------------------------------------------------------------------- 1 | [toolchain] 2 | channel = "nightly" 3 | profile = "default" 4 | -------------------------------------------------------------------------------- /tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "esModuleInterop": true, 4 | "resolveJsonModule": true, 5 | "forceConsistentCasingInFileNames": true, 6 | "strict": true 7 | } 8 | } -------------------------------------------------------------------------------- /witness-generator/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "web-proof-circuits-witness-generator" 3 | version.workspace = true 4 | edition.workspace = true 5 | 6 | [dependencies] 7 | anyhow = "1.0" 8 | serde = "1.0" 9 | serde_json = "1.0" 10 | client-side-prover = { git = "https://github.com/pluto/client-side-prover", rev = "8e7eb839e901dcee416179116bb0f9c4f7ae683c" } 11 | ff = { version = "0.13", default-features = false, features = ["derive"] } 12 | thiserror = "2.0" 13 | light-poseidon = { git = "https://github.com/pluto/light-poseidon" } 14 | ark-bn254 = "=0.4.0" 15 | halo2curves = "0.6.1" 16 | sppark = "=0.1.10" 17 | num-bigint = "0.4" 18 | 19 | [dev-dependencies] 20 | rstest = "0.24" 21 | -------------------------------------------------------------------------------- /witness-generator/src/error.rs: -------------------------------------------------------------------------------- 1 | use thiserror::Error; 2 | 3 | #[derive(Error, Debug)] 4 | pub enum WitnessGeneratorError { 5 | #[error("{0}")] 6 | CatchAll(String), 7 | #[error(transparent)] 8 | SerdeJson(#[from] serde_json::Error), 9 | #[error("{0}")] 10 | JsonParser(String), 11 | #[error("json key not found: {0}")] 12 | JsonKeyError(String), 13 | } 14 | -------------------------------------------------------------------------------- /witness-generator/src/http/mod.rs: -------------------------------------------------------------------------------- 1 | use serde::Serialize; 2 | 3 | use super::*; 4 | pub mod parser; 5 | 6 | #[derive(Debug, Clone, Copy, PartialEq, Eq)] 7 | pub struct HttpMachine { 8 | pub header_num: usize, 9 | pub status: HttpStatus, 10 | pub line_digest: F, 11 | pub line_monomial: F, 12 | } 13 | 14 | #[derive(Debug, Clone, Copy, PartialEq, Eq, Default, Serialize)] 15 | #[serde(into = "[String; 8]")] 16 | pub struct RawHttpMachine { 17 | pub parsing_start: F, 18 | pub parsing_header: F, 19 | pub parsing_field_name: F, 20 | pub parsing_field_value: F, 21 | pub parsing_body: F, 22 | pub line_status: F, 23 | pub line_digest: F, 24 | pub line_monomial: F, 25 | } 26 | 27 | /// Implement From for [String; 8] 28 | impl From for [String; 8] { 29 | fn from(machine: RawHttpMachine) -> Self { 30 | [ 31 | field_element_to_base10_string(machine.parsing_start), 32 | field_element_to_base10_string(machine.parsing_header), 33 | field_element_to_base10_string(machine.parsing_field_name), 34 | field_element_to_base10_string(machine.parsing_field_value), 35 | field_element_to_base10_string(machine.parsing_body), 36 | field_element_to_base10_string(machine.line_status), 37 | field_element_to_base10_string(machine.line_digest), 38 | field_element_to_base10_string(machine.line_monomial), 39 | ] 40 | } 41 | } 42 | 43 | impl From for RawHttpMachine { 44 | fn from(value: HttpMachine) -> Self { 45 | let mut raw_http_machine = RawHttpMachine { 46 | line_digest: value.line_digest, 47 | parsing_header: F::from(value.header_num as u64), 48 | line_monomial: value.line_monomial, 49 | ..Default::default() 50 | }; 51 | match value.status { 52 | HttpStatus::ParsingStart(start_line_location) => match start_line_location { 53 | StartLineLocation::Beginning => raw_http_machine.parsing_start = F::ONE, 54 | StartLineLocation::Middle => raw_http_machine.parsing_start = F::from(2), 55 | StartLineLocation::End => raw_http_machine.parsing_start = F::from(3), 56 | }, 57 | HttpStatus::ParsingHeader(name_or_value) => match name_or_value { 58 | NameOrValue::Name => { 59 | raw_http_machine.parsing_field_name = F::ONE; 60 | raw_http_machine.parsing_field_value = F::ZERO; 61 | }, 62 | NameOrValue::Value => { 63 | raw_http_machine.parsing_field_name = F::ZERO; 64 | raw_http_machine.parsing_field_value = F::ONE; 65 | }, 66 | }, 67 | HttpStatus::ParsingBody => raw_http_machine.parsing_body = F::ONE, 68 | HttpStatus::LineStatus(line_status) => match line_status { 69 | LineStatus::CR => raw_http_machine.line_status = F::ONE, 70 | LineStatus::CRLF => raw_http_machine.line_status = F::from(2), 71 | LineStatus::CRLFCR => raw_http_machine.line_status = F::from(3), 72 | }, 73 | } 74 | raw_http_machine 75 | } 76 | } 77 | 78 | impl RawHttpMachine { 79 | pub fn initial_state() -> Self { 80 | Self { parsing_start: F::ONE, line_monomial: F::ONE, ..Default::default() } 81 | } 82 | 83 | pub fn flatten(&self) -> [F; 8] { 84 | [ 85 | self.parsing_start, 86 | self.parsing_header, 87 | self.parsing_field_name, 88 | self.parsing_field_value, 89 | self.parsing_body, 90 | self.line_status, 91 | self.line_digest, 92 | self.line_monomial, 93 | ] 94 | } 95 | } 96 | 97 | #[derive(Debug, Clone, Copy, PartialEq, Eq)] 98 | pub enum HttpStatus { 99 | ParsingStart(StartLineLocation), 100 | ParsingHeader(NameOrValue), 101 | ParsingBody, 102 | LineStatus(LineStatus), 103 | } 104 | 105 | #[derive(Debug, Clone, Copy, PartialEq, Eq)] 106 | pub enum NameOrValue { 107 | Name, 108 | Value, 109 | } 110 | 111 | #[derive(Debug, Clone, Copy, PartialEq, Eq)] 112 | pub enum StartLineLocation { 113 | Beginning, 114 | Middle, 115 | End, 116 | } 117 | 118 | #[derive(Debug, Clone, Copy, PartialEq, Eq)] 119 | pub enum LineStatus { 120 | CR, 121 | CRLF, 122 | CRLFCR, 123 | } 124 | 125 | pub enum HttpMaskType { 126 | StartLine, 127 | Header(usize), 128 | Body, 129 | } 130 | 131 | // TODO: Note, HTTP does not require a `:` and space between the name and value of a header, so we 132 | // will have to deal with this somehow, but for now I'm assuming there's a space 133 | pub fn headers_to_bytes(headers: &HashMap) -> impl Iterator> + '_ { 134 | headers.iter().map(|(k, v)| format!("{}: {}", k.clone(), v.clone()).as_bytes().to_vec()) 135 | } 136 | 137 | /// compute private inputs for the HTTP circuit. 138 | /// # Arguments 139 | /// - `plaintext`: the plaintext HTTP request/response padded with `-1` to nearest power of 2 140 | /// - `mask_at`: the [`HttpMaskType`] of the HTTP request/response to mask 141 | /// # Returns 142 | /// - the masked HTTP request/response 143 | pub fn compute_http_witness(plaintext: &[u8], mask_at: HttpMaskType) -> Vec { 144 | let mut result = Vec::new(); 145 | match mask_at { 146 | HttpMaskType::StartLine => { 147 | // Find the first CRLF sequence 148 | for i in 0..plaintext.len().saturating_sub(1) { 149 | if plaintext[i] == b'\r' && plaintext[i + 1] == b'\n' { 150 | result = plaintext[..i].to_vec(); 151 | break; 152 | } 153 | } 154 | }, 155 | HttpMaskType::Header(idx) => { 156 | let mut current_header = 0; 157 | let mut start_pos = 0; 158 | 159 | // Skip the start line 160 | for i in 0..plaintext.len().saturating_sub(1) { 161 | if plaintext[i] == b'\r' && plaintext[i + 1] == b'\n' { 162 | start_pos = i + 2; 163 | break; 164 | } 165 | } 166 | 167 | // Find the specified header 168 | let mut header_start_pos = start_pos; 169 | for i in start_pos..plaintext.len().saturating_sub(1) { 170 | if plaintext[i] == b'\r' && plaintext[i + 1] == b'\n' { 171 | if current_header == idx { 172 | // Copy the header line (including CRLF) 173 | result = plaintext[header_start_pos..i].to_vec(); 174 | break; 175 | } 176 | 177 | // Check for end of headers (double CRLF) 178 | if i + 3 < plaintext.len() && plaintext[i + 2] == b'\r' && plaintext[i + 3] == b'\n' { 179 | break; 180 | } 181 | 182 | current_header += 1; 183 | header_start_pos = i + 2; 184 | } 185 | } 186 | }, 187 | HttpMaskType::Body => { 188 | // Find double CRLF that marks start of body 189 | for i in 0..plaintext.len().saturating_sub(3) { 190 | if plaintext[i] == b'\r' 191 | && plaintext[i + 1] == b'\n' 192 | && plaintext[i + 2] == b'\r' 193 | && plaintext[i + 3] == b'\n' 194 | { 195 | // Copy everything after the double CRLF 196 | let body_start = i + 4; 197 | if body_start < plaintext.len() { 198 | result = plaintext[body_start..].to_vec(); 199 | } 200 | break; 201 | } 202 | } 203 | }, 204 | } 205 | result 206 | } 207 | 208 | pub fn compute_http_header_witness(plaintext: &[u8], name: &[u8]) -> (usize, Vec) { 209 | let mut result = Vec::new(); 210 | 211 | let mut current_header = 0; 212 | let mut current_header_name = vec![]; 213 | let mut start_pos = 0; 214 | 215 | // Skip the start line 216 | for i in 1..plaintext.len().saturating_sub(1) { 217 | if plaintext[i] == b'\r' && plaintext[i + 1] == b'\n' { 218 | start_pos = i + 2; 219 | break; 220 | } 221 | } 222 | 223 | // Find the specified header 224 | let mut header_start_pos = start_pos; 225 | for i in start_pos..plaintext.len().saturating_sub(1) { 226 | // find header name 227 | if plaintext[i] == b':' { 228 | current_header_name = plaintext[header_start_pos..i].to_vec(); 229 | } 230 | // find next header line 231 | if plaintext[i] == b'\r' && plaintext[i + 1] == b'\n' { 232 | if current_header_name == name { 233 | // Copy the header line (including CRLF) 234 | result = plaintext[header_start_pos..i].to_vec(); 235 | break; 236 | } 237 | 238 | // Check for end of headers (double CRLF) 239 | if i + 3 < plaintext.len() && plaintext[i + 2] == b'\r' && plaintext[i + 3] == b'\n' { 240 | break; 241 | } 242 | 243 | current_header += 1; 244 | header_start_pos = i + 2; 245 | } 246 | } 247 | 248 | (current_header, result) 249 | } 250 | 251 | #[cfg(test)] 252 | mod tests { 253 | use super::*; 254 | 255 | #[test] 256 | fn test_compute_http_witness_start_line() { 257 | let bytes = compute_http_witness(RESPONSE_PLAINTEXT.as_bytes(), HttpMaskType::StartLine); 258 | assert_eq!(bytes, RESPONSE_START_LINE.as_bytes()); 259 | } 260 | 261 | #[test] 262 | fn test_compute_http_witness_header_0() { 263 | let bytes = compute_http_witness(RESPONSE_PLAINTEXT.as_bytes(), HttpMaskType::Header(0)); 264 | assert_eq!(bytes, RESPONSE_HEADER_0.as_bytes()); 265 | } 266 | 267 | #[test] 268 | fn test_compute_http_witness_header_1() { 269 | let bytes = compute_http_witness(RESPONSE_PLAINTEXT.as_bytes(), HttpMaskType::Header(1)); 270 | assert_eq!(bytes, RESPONSE_HEADER_1.as_bytes()); 271 | } 272 | 273 | #[test] 274 | fn test_compute_http_witness_body() { 275 | let bytes = compute_http_witness(RESPONSE_PLAINTEXT.as_bytes(), HttpMaskType::Body); 276 | assert_eq!(bytes, RESPONSE_BODY.as_bytes()); 277 | } 278 | 279 | #[test] 280 | fn test_compute_http_witness_name() { 281 | let (index, bytes_from_name) = 282 | compute_http_header_witness(RESPONSE_PLAINTEXT.as_bytes(), "Transfer-Encoding".as_bytes()); 283 | let bytes_from_index = 284 | compute_http_witness(RESPONSE_PLAINTEXT.as_bytes(), HttpMaskType::Header(2)); 285 | assert_eq!(bytes_from_index, bytes_from_name); 286 | assert_eq!(index, 2); 287 | } 288 | 289 | #[test] 290 | fn test_compute_http_witness_name_not_present() { 291 | let (_, bytes_from_name) = 292 | compute_http_header_witness(RESPONSE_PLAINTEXT.as_bytes(), "pluto-rocks".as_bytes()); 293 | assert!(bytes_from_name.is_empty()); 294 | } 295 | } 296 | -------------------------------------------------------------------------------- /witness-generator/src/http/parser.rs: -------------------------------------------------------------------------------- 1 | use super::*; 2 | 3 | const SPACE: u8 = 32; 4 | const CR: u8 = 13; 5 | const LF: u8 = 10; 6 | const COLON: u8 = 58; 7 | 8 | pub fn parse(bytes: &[u8], polynomial_input: F) -> Result, WitnessGeneratorError> { 9 | let mut machine = HttpMachine { 10 | header_num: 0, 11 | status: HttpStatus::ParsingStart(StartLineLocation::Beginning), 12 | line_digest: F::ZERO, 13 | line_monomial: F::ONE, 14 | }; 15 | 16 | let mut output = vec![]; 17 | let mut line_ctr = 0; 18 | for (_ctr, char) in bytes.iter().enumerate() { 19 | // println!("-------------------------------------------------"); 20 | // println!("char: {:?}, {}", *char as char, *char); 21 | // println!("-------------------------------------------------"); 22 | match (*char, machine.status) { 23 | (SPACE, HttpStatus::ParsingStart(loc)) => { 24 | match loc { 25 | StartLineLocation::Beginning => 26 | machine.status = HttpStatus::ParsingStart(StartLineLocation::Middle), 27 | StartLineLocation::Middle => 28 | machine.status = HttpStatus::ParsingStart(StartLineLocation::End), 29 | StartLineLocation::End => {}, 30 | }; 31 | machine.line_digest += polynomial_input.pow([line_ctr]) * F::from(*char as u64); 32 | line_ctr += 1; 33 | }, 34 | ( 35 | CR, 36 | HttpStatus::ParsingStart(StartLineLocation::End) 37 | | HttpStatus::ParsingHeader(NameOrValue::Value), 38 | ) => { 39 | machine.status = HttpStatus::LineStatus(LineStatus::CR); 40 | line_ctr = 0; 41 | machine.line_digest = F::ZERO; 42 | }, 43 | (CR, HttpStatus::LineStatus(LineStatus::CRLF)) => { 44 | machine.status = HttpStatus::LineStatus(LineStatus::CRLFCR); 45 | line_ctr = 0; 46 | machine.line_digest = F::ZERO; 47 | }, 48 | (LF, HttpStatus::LineStatus(LineStatus::CR)) => { 49 | machine.status = HttpStatus::LineStatus(LineStatus::CRLF); 50 | line_ctr = 0; 51 | machine.line_digest = F::ZERO; 52 | }, 53 | (LF, HttpStatus::LineStatus(LineStatus::CRLFCR)) => { 54 | machine.status = HttpStatus::ParsingBody; 55 | machine.header_num = 0; 56 | line_ctr = 0; 57 | machine.line_digest = F::ZERO; 58 | }, 59 | (_, HttpStatus::LineStatus(LineStatus::CRLF)) => { 60 | machine.status = HttpStatus::ParsingHeader(NameOrValue::Name); 61 | machine.header_num += 1; 62 | machine.line_digest += polynomial_input.pow([line_ctr]) * F::from(*char as u64); 63 | line_ctr += 1; 64 | }, 65 | (COLON, HttpStatus::ParsingHeader(NameOrValue::Name)) => { 66 | machine.status = HttpStatus::ParsingHeader(NameOrValue::Value); 67 | machine.line_digest += polynomial_input.pow([line_ctr]) * F::from(*char as u64); 68 | line_ctr += 1; 69 | }, 70 | (_, HttpStatus::ParsingBody) => {}, 71 | _ => { 72 | machine.line_digest += polynomial_input.pow([line_ctr]) * F::from(*char as u64); 73 | line_ctr += 1; 74 | }, 75 | } 76 | machine.line_monomial = if line_ctr == 0 { F::ZERO } else { polynomial_input.pow([line_ctr]) }; 77 | output.push(machine); 78 | // let raw_state = RawHttpMachine::from(machine); 79 | 80 | // println!( 81 | // "state[ {ctr:?} ].parsing_start = {:?}", 82 | // BigUint::from_bytes_le(&raw_state.parsing_start.to_bytes()) 83 | // ); 84 | // println!( 85 | // "state[ {ctr:?} ].parsing_header = {:?}", 86 | // BigUint::from_bytes_le(&raw_state.parsing_header.to_bytes()) 87 | // ); 88 | // println!( 89 | // "state[ {ctr:?} ].parsing_field_name = {:?}", 90 | // BigUint::from_bytes_le(&raw_state.parsing_field_name.to_bytes()) 91 | // ); 92 | // println!( 93 | // "state[ {ctr:?} ].parsing_field_value = {:?}", 94 | // BigUint::from_bytes_le(&raw_state.parsing_field_value.to_bytes()) 95 | // ); 96 | // println!( 97 | // "state[ {ctr:?} ].parsing_body = {:?}", 98 | // BigUint::from_bytes_le(&raw_state.parsing_body.to_bytes()) 99 | // ); 100 | // println!( 101 | // "state[ {ctr:?} ].line_status = {:?}", 102 | // BigUint::from_bytes_le(&raw_state.line_status.to_bytes()) 103 | // ); 104 | // println!( 105 | // "state[ {ctr:?} ].inner_main_digest = {:?}", 106 | // BigUint::from_bytes_le(&raw_state.line_digest.to_bytes()) 107 | // ); 108 | // println!( 109 | // "state[ {ctr:?} ].line_monomial = {:?}", 110 | // BigUint::from_bytes_le(&raw_state.line_monomial.to_bytes()) 111 | // ); 112 | // println!("xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"); 113 | } 114 | Ok(output) 115 | } 116 | 117 | #[cfg(test)] 118 | mod tests { 119 | use super::*; 120 | 121 | #[test] 122 | pub fn test_parse_http() { 123 | // It's funny to me every time 124 | let polynomial_input = poseidon::<2>(&[F::from(69), F::from(420)]); 125 | let states = parse(mock::RESPONSE_PLAINTEXT.as_bytes(), polynomial_input).unwrap(); 126 | assert_eq!(states.len(), mock::RESPONSE_PLAINTEXT.len()); 127 | 128 | let machine_state = RawHttpMachine::from(states.last().unwrap().to_owned()); 129 | assert_eq!(machine_state.parsing_start, F::ZERO); 130 | assert_eq!(machine_state.parsing_header, F::ZERO); 131 | assert_eq!(machine_state.parsing_field_name, F::ZERO); 132 | assert_eq!(machine_state.parsing_field_value, F::ZERO); 133 | assert_eq!(machine_state.parsing_body, F::ONE); 134 | assert_eq!(machine_state.line_status, F::from(0)); 135 | assert_eq!(machine_state.line_digest, F::from(0)); 136 | assert_eq!(machine_state.line_monomial, F::from(0)); 137 | } 138 | 139 | #[rstest] 140 | #[case::github("github_response")] 141 | #[case::reddit("reddit_request")] 142 | pub fn test_parse_http_complex(#[case] filename: &str) { 143 | // It's funny to me every time 144 | let polynomial_input = poseidon::<2>(&[F::from(69), F::from(420)]); 145 | 146 | let input = std::fs::read(format!("../examples/http/{}.http", filename)).unwrap(); 147 | let states = parse(&input, polynomial_input).unwrap(); 148 | 149 | let machine_state: [String; 8] = RawHttpMachine::from(states[511].to_owned()).into(); 150 | dbg!(machine_state); 151 | 152 | let machine_state = RawHttpMachine::from(states.last().unwrap().to_owned()); 153 | assert_eq!(machine_state.parsing_start, F::ZERO); 154 | assert_eq!(machine_state.parsing_header, F::ZERO); 155 | assert_eq!(machine_state.parsing_field_name, F::ZERO); 156 | assert_eq!(machine_state.parsing_field_value, F::ZERO); 157 | assert_eq!(machine_state.parsing_body, F::ONE); 158 | assert_eq!(machine_state.line_status, F::from(0)); 159 | assert_eq!(machine_state.line_digest, F::from(0)); 160 | assert_eq!(machine_state.line_monomial, F::from(0)); 161 | } 162 | } 163 | -------------------------------------------------------------------------------- /witness-generator/src/lib.rs: -------------------------------------------------------------------------------- 1 | //! Used for computing the witnesses needed for HTTP and JSON elements of Web Proof NIVC 2 | //! hashchain-based circuits. 3 | #![allow(incomplete_features)] 4 | #![feature(generic_const_exprs)] 5 | 6 | pub mod error; 7 | pub mod http; 8 | pub mod json; 9 | #[cfg(test)] pub(crate) mod mock; 10 | 11 | use std::collections::HashMap; 12 | 13 | use client_side_prover::traits::{Engine, Group}; 14 | use ff::{Field, PrimeField}; 15 | use light_poseidon::{Poseidon, PoseidonBytesHasher}; 16 | use num_bigint::BigUint; 17 | #[cfg(test)] use rstest::rstest; 18 | use serde::{Deserialize, Serialize}; 19 | use serde_json::Value; 20 | 21 | pub use self::error::WitnessGeneratorError; 22 | #[cfg(test)] pub(crate) use self::mock::*; 23 | 24 | pub type E = client_side_prover::provider::Bn256EngineKZG; 25 | pub type G = ::GE; 26 | pub type F = ::Scalar; 27 | 28 | /// Struct representing a byte or padding. 29 | #[derive(Debug, Clone, Copy, PartialEq)] 30 | pub enum ByteOrPad { 31 | /// A byte. 32 | Byte(u8), 33 | /// Padding byte. 34 | /// substituted to `-1` for `Fr` field element and `0` for `u8` byte. 35 | Pad, 36 | } 37 | 38 | impl ByteOrPad { 39 | /// Converts a slice of bytes to a vector of `ByteOrPad` with padding. 40 | pub fn from_bytes_with_padding(bytes: &[u8], padding: usize) -> Vec { 41 | let mut result = bytes.iter().map(|&b| Self::Byte(b)).collect::>(); 42 | result.extend(std::iter::repeat(Self::Pad).take(padding)); 43 | result 44 | } 45 | 46 | pub fn pad_to_nearest_multiple(bytes: &[u8], multiple: usize) -> Vec { 47 | let padding = if bytes.len() % multiple == 0 { 0 } else { multiple - (bytes.len() % multiple) }; 48 | Self::from_bytes_with_padding(bytes, padding) 49 | } 50 | } 51 | 52 | impl From for ByteOrPad { 53 | fn from(b: u8) -> Self { Self::Byte(b) } 54 | } 55 | 56 | impl From<&u8> for ByteOrPad { 57 | fn from(b: &u8) -> Self { Self::Byte(*b) } 58 | } 59 | 60 | impl From<&ByteOrPad> for halo2curves::bn256::Fr { 61 | fn from(b: &ByteOrPad) -> Self { 62 | match b { 63 | ByteOrPad::Byte(b) => Self::from(u64::from(*b)), 64 | ByteOrPad::Pad => -Self::one(), 65 | } 66 | } 67 | } 68 | 69 | /// Converts a field element to a base10 string. 70 | pub fn field_element_to_base10_string(fe: F) -> String { 71 | BigUint::from_bytes_le(&fe.to_bytes()).to_str_radix(10) 72 | } 73 | 74 | impl Serialize for ByteOrPad { 75 | /// converts to field element using `to_field_element` and then to base10 string 76 | fn serialize(&self, serializer: S) -> Result 77 | where S: serde::Serializer { 78 | serializer.serialize_str(field_element_to_base10_string(self.into()).as_str()) 79 | } 80 | } 81 | 82 | impl PartialEq for ByteOrPad { 83 | fn eq(&self, other: &u8) -> bool { 84 | match self { 85 | Self::Byte(b) => b == other, 86 | Self::Pad => false, 87 | } 88 | } 89 | } 90 | 91 | /// Packs a chunk of 16 bytes into a field element 92 | /// 93 | /// **Note**: if the chunk is fully padded, it will be ignored 94 | fn bytepack(bytes: &[ByteOrPad]) -> Option { 95 | let mut output = F::ZERO; 96 | let mut is_padded_chunk = 0; 97 | for (idx, byte) in bytes.iter().enumerate() { 98 | let mut pow = F::ONE; 99 | match byte { 100 | ByteOrPad::Byte(byte) => { 101 | output += F::from(u64::from(*byte)) * { 102 | for _ in 0..(8 * idx) { 103 | pow *= F::from(2); 104 | } 105 | pow 106 | }; 107 | }, 108 | ByteOrPad::Pad => { 109 | is_padded_chunk += 1; 110 | }, 111 | } 112 | } 113 | 114 | if is_padded_chunk == bytes.len() { 115 | None 116 | } else { 117 | Some(output) 118 | } 119 | } 120 | 121 | pub fn poseidon(preimage: &[F]) -> F { 122 | let mut poseidon = Poseidon::::new_circom(N).unwrap(); 123 | 124 | // Convert each field element to bytes and collect into a Vec 125 | let byte_arrays: Vec<[u8; 32]> = preimage.iter().map(F::to_bytes).collect(); 126 | 127 | // Create slice of references to the bytes 128 | let byte_slices: Vec<&[u8]> = byte_arrays.iter().map(<[u8; 32]>::as_slice).collect(); 129 | 130 | let hash: [u8; 32] = poseidon.hash_bytes_le(&byte_slices).unwrap(); 131 | 132 | F::from_repr(hash).unwrap() 133 | } 134 | 135 | /// Hashes byte array padded with -1 with Poseidon 136 | /// 137 | /// **Note**: 138 | /// - any chunk of 16 bytes that is fully padded with -1 will be ignored 139 | /// - check [`bytepack`] for more details 140 | pub fn data_hasher(preimage: &[ByteOrPad], seed: F) -> F { 141 | // Pack the input bytes in chunks of 16 into field elements 142 | let packed_inputs = preimage.chunks(16).map(bytepack).collect::>>(); 143 | 144 | // Iterate over the packed inputs and hash them with Poseidon 145 | let mut hash_val = seed; 146 | for packed_input in packed_inputs { 147 | if packed_input.is_none() { 148 | continue; 149 | } 150 | hash_val = poseidon::<2>(&[hash_val, packed_input.unwrap()]); 151 | } 152 | hash_val 153 | } 154 | 155 | pub fn polynomial_digest(bytes: &[u8], polynomial_input: F, counter: u64) -> F { 156 | let mut monomial = if counter == 0 { F::ONE } else { polynomial_input.pow([counter]) }; 157 | let mut accumulated = F::ZERO; 158 | for byte in bytes { 159 | accumulated += F::from(u64::from(*byte)) * monomial; 160 | monomial *= polynomial_input; 161 | } 162 | accumulated 163 | } 164 | 165 | #[cfg(test)] 166 | mod tests { 167 | 168 | use super::*; 169 | 170 | #[test] 171 | fn test_bytepack() { 172 | let pack0 = bytepack(&[0, 0, 0].into_iter().map(ByteOrPad::from).collect::>()); 173 | assert_eq!(pack0, Some(F::from(0))); 174 | 175 | let pack1 = bytepack(&[1, 0, 0].into_iter().map(ByteOrPad::from).collect::>()); 176 | assert_eq!(pack1, Some(F::from(1))); 177 | 178 | let pack2 = bytepack(&[0, 1, 0].into_iter().map(ByteOrPad::from).collect::>()); 179 | assert_eq!(pack2, Some(F::from(256))); 180 | 181 | let pack3 = bytepack(&[0, 0, 1].into_iter().map(ByteOrPad::from).collect::>()); 182 | assert_eq!(pack3, Some(F::from(65536))); 183 | 184 | let pack4 = bytepack(&[ByteOrPad::Pad; 3]); 185 | assert_eq!(pack4, None); 186 | } 187 | 188 | #[test] 189 | fn test_poseidon() { 190 | // let hash = poseidon_chainer(&[bytepack(&[0]), bytepack(&[0])]); 191 | let hash = poseidon::<2>(&[F::from(0), F::from(0)]); 192 | assert_eq!(hash.to_bytes(), [ 193 | 100, 72, 182, 70, 132, 238, 57, 168, 35, 213, 254, 95, 213, 36, 49, 220, 129, 228, 129, 123, 194 | 242, 195, 234, 60, 171, 158, 35, 158, 251, 245, 152, 32 195 | ]); 196 | 197 | let hash = poseidon::<2>(&[F::from(69), F::from(420)]); 198 | assert_eq!(hash.to_bytes(), [ 199 | 10, 230, 247, 95, 9, 23, 36, 117, 25, 37, 98, 141, 178, 220, 241, 100, 187, 169, 126, 226, 200 | 80, 175, 17, 100, 232, 1, 29, 0, 165, 144, 139, 2, 201 | ]); 202 | } 203 | 204 | #[test] 205 | fn test_data_hasher() { 206 | let hash = data_hasher(&[ByteOrPad::Byte(0); 16], F::ZERO); 207 | assert_eq!( 208 | hash, 209 | F::from_str_vartime( 210 | "14744269619966411208579211824598458697587494354926760081771325075741142829156" 211 | ) 212 | .unwrap() 213 | ); 214 | 215 | let hash = data_hasher(&[ByteOrPad::Pad; 16], F::ZERO); 216 | assert_eq!(hash, F::ZERO); 217 | 218 | let mut hash_input = [ByteOrPad::Byte(0); 16]; 219 | hash_input[0] = ByteOrPad::Byte(1); 220 | let hash = data_hasher(hash_input.as_ref(), F::ZERO); 221 | assert_eq!(hash, poseidon::<2>([F::ZERO, F::ONE].as_ref())); 222 | 223 | hash_input = [ByteOrPad::Byte(0); 16]; 224 | hash_input[15] = ByteOrPad::Byte(1); 225 | let hash = data_hasher(hash_input.as_ref(), F::ZERO); 226 | assert_eq!( 227 | hash, 228 | poseidon::<2>( 229 | [F::ZERO, F::from_str_vartime("1329227995784915872903807060280344576").unwrap()].as_ref() 230 | ) 231 | ); 232 | } 233 | 234 | #[test] 235 | fn test_polynomial_digest() { 236 | let bytes = [1, 2, 3, 4, 5]; 237 | let digest_ctr_0 = polynomial_digest(&bytes, F::from(2), 0); 238 | assert_eq!( 239 | digest_ctr_0, 240 | F::from(1 + 2 * 2 + 3 * 2_u64.pow(2) + 4 * 2_u64.pow(3) + 5 * 2_u64.pow(4)) 241 | ); 242 | 243 | let digest_ctr_2 = polynomial_digest(&bytes, F::from(2), 2); 244 | assert_eq!( 245 | digest_ctr_2, 246 | F::from( 247 | 2_u64.pow(2) + 2 * 2_u64.pow(3) + 3 * 2_u64.pow(4) + 4 * 2_u64.pow(5) + 5 * 2_u64.pow(6) 248 | ) 249 | ); 250 | } 251 | } 252 | -------------------------------------------------------------------------------- /witness-generator/src/mock.rs: -------------------------------------------------------------------------------- 1 | pub(crate) const RESPONSE_PLAINTEXT: &str = "HTTP/1.1 200 OK\r 2 | content-type: application/json; charset=utf-8\r 3 | content-encoding: gzip\r 4 | Transfer-Encoding: chunked\r\n\r 5 | {\r 6 | \"data\": {\r 7 | \"items\": [\r 8 | {\r 9 | \"data\": \"Artist\",\r 10 | \"profile\": {\r 11 | \"name\": \"Taylor Swift\"\r 12 | }\r 13 | }\r 14 | ]\r 15 | }\r 16 | }"; 17 | 18 | pub(crate) const RESPONSE_START_LINE: &str = "HTTP/1.1 200 OK"; 19 | 20 | pub(crate) const RESPONSE_HEADER_0: &str = "content-type: application/json; charset=utf-8"; 21 | 22 | pub(crate) const RESPONSE_HEADER_1: &str = "content-encoding: gzip"; 23 | 24 | pub(crate) const RESPONSE_BODY: &str = "{\r 25 | \"data\": {\r 26 | \"items\": [\r 27 | {\r 28 | \"data\": \"Artist\",\r 29 | \"profile\": {\r 30 | \"name\": \"Taylor Swift\"\r 31 | }\r 32 | }\r 33 | ]\r 34 | }\r 35 | }"; 36 | 37 | pub(crate) const KEY_0: &str = "data"; 38 | pub(crate) const KEY_1: &str = "items"; 39 | pub(crate) const KEY_2: &str = "profile"; 40 | pub(crate) const KEY_3: &str = "name"; 41 | --------------------------------------------------------------------------------