├── .circleci └── config.yml ├── .gitignore ├── Cargo.toml ├── LICENSE-APACHE ├── LICENSE-MIT ├── README.md ├── benches ├── nova.rs └── vdf.rs ├── rust-toolchain └── src ├── lib.rs ├── minroot.rs └── nova ├── mod.rs └── proof.rs /.circleci/config.yml: -------------------------------------------------------------------------------- 1 | version: 2.1 2 | 3 | parameters: 4 | nightly-version: 5 | type: string 6 | default: "nightly-2021-03-18" 7 | 8 | executors: 9 | default: 10 | docker: 11 | - image: filecoin/rust:latest 12 | working_directory: /mnt/crate 13 | resource_class: 2xlarge+ 14 | # In case someone should be run on a GPU instance, comment this section in, 15 | # change the `executor` of your job to `gpu` and make sure you also run the 16 | # `install-gpu-deps` command 17 | #gpu: 18 | # machine: 19 | # image: ubuntu-1604-cuda-10.1:201909-23 20 | # working_directory: ~/gpuci 21 | # resource_class: gpu.nvidia.medium 22 | 23 | commands: 24 | set-env-path: 25 | steps: 26 | - run: 27 | name: Set the PATH env variable 28 | command: | 29 | # Also put the Rust LLVM tools into the PATH. 30 | echo 'export PATH="$HOME:~/.cargo/bin:~/.rustup/toolchains/<< pipeline.parameters.nightly-version >>-x86_64-unknown-linux-gnu/lib/rustlib/x86_64-unknown-linux-gnu/bin:$PATH"' | tee --append $BASH_ENV 31 | source $BASH_ENV 32 | 33 | # This command is only needed if the GPU executor is used 34 | install-gpu-deps: 35 | steps: 36 | - run: 37 | name: Install libraries for GPU tests 38 | command: | 39 | sudo apt-get update -y 40 | sudo apt install -y ocl-icd-opencl-dev 41 | 42 | restore-workspace: 43 | steps: 44 | - attach_workspace: 45 | at: "." 46 | 47 | save-rustup-cache: 48 | steps: 49 | # Move things from the home directory to `/tmp` first, so that it can be 50 | # restored on executors that have a different home directory. 51 | - run: cp -R ~/.cargo ~/.rustup /tmp/ 52 | - save_cache: 53 | name: "Save rustup cache" 54 | key: cargo-v1-{{ checksum "rust-toolchain" }}-{{ checksum "Cargo.toml" }}-{{ checksum "Cargo.lock" }} 55 | paths: 56 | - /tmp/.cargo 57 | - /tmp/.rustup 58 | 59 | restore-rustup-cache: 60 | steps: 61 | - restore_cache: 62 | name: "Restore rustup cache" 63 | key: cargo-v1-{{ checksum "rust-toolchain" }}-{{ checksum "Cargo.toml" }}-{{ checksum "Cargo.lock" }} 64 | # Cache might not be created yet, hence ignore if the move fails 65 | - run: cp -R /tmp/.cargo /tmp/.rustup ~/ || true 66 | 67 | jobs: 68 | cargo_fetch: 69 | executor: default 70 | steps: 71 | - checkout 72 | - run: 73 | name: Update submodules 74 | command: git submodule update --init --recursive 75 | - run: 76 | name: Calculate dependencies 77 | command: cargo generate-lockfile 78 | - restore-rustup-cache 79 | - run: cargo update 80 | - run: cargo fetch 81 | - run: rustup install $(cat rust-toolchain) 82 | - run: rustup default $(cat rust-toolchain) 83 | # A nightly build is needed for code coverage reporting 84 | - run: rustup toolchain install << pipeline.parameters.nightly-version >> 85 | - run: rustup component add --toolchain << pipeline.parameters.nightly-version >> llvm-tools-preview 86 | - run: rustc --version 87 | - persist_to_workspace: 88 | root: "." 89 | paths: 90 | - Cargo.lock 91 | - save-rustup-cache 92 | 93 | rustfmt: 94 | executor: default 95 | steps: 96 | - checkout 97 | - restore-workspace 98 | - restore-rustup-cache 99 | - set-env-path 100 | - run: 101 | name: Run cargo fmt 102 | command: cargo fmt --all -- --check 103 | 104 | clippy: 105 | executor: default 106 | steps: 107 | - checkout 108 | - restore-workspace 109 | - restore-rustup-cache 110 | - set-env-path 111 | - run: 112 | name: Run cargo clippy 113 | command: cargo +$(cat rust-toolchain) clippy --all-targets --all-features --workspace -- -D warnings 114 | 115 | test: 116 | executor: default 117 | steps: 118 | - checkout 119 | - set-env-path 120 | - restore-workspace 121 | - restore-rustup-cache 122 | - run: 123 | command: cargo test 124 | 125 | coverage_run: 126 | executor: default 127 | environment: 128 | # Incremental build is not supported when profiling 129 | CARGO_INCREMENTAL: 0 130 | # -Zinstrument-coverage: enable llvm coverage instrumentation 131 | # -Ccodegen-units=1: building in parallel is not supported when profiling 132 | # -Copt-level=0: disable optimizations for more accurate coverage 133 | # -Clink-dead-code: dead code should be considered as not covered code 134 | # -Coverflow-checks=off: checking for overflow is not needed for coverage reporting 135 | # -Cinline-threshold=0: do not inline 136 | RUSTFLAGS: -Zinstrument-coverage -Ccodegen-units=1 -Copt-level=0 -Clink-dead-code -Coverflow-checks=off -Cinline-threshold=0 137 | # Make sure that each run of an executable creates a new profile file, with the default 138 | # name they would override each other 139 | LLVM_PROFILE_FILE: "%m.profraw" 140 | steps: 141 | - checkout 142 | - restore-workspace 143 | - restore-rustup-cache 144 | - set-env-path 145 | - run: 146 | name: Generate coverage report 147 | command: | 148 | RUST_LOG=info cargo +<< pipeline.parameters.nightly-version >> test -- --nocapture 149 | 150 | # Do *not* use sparse output. It leads to more lines that are not 151 | # taken into account at all 152 | llvm-profdata merge --output=default.profdata ./*.profraw 153 | 154 | # The compiled files contain the coverage information. From running the tests we don't 155 | # know what those files are called, hence use all files from the `./target/debug/deps` 156 | # directory which don't have an extension. 157 | OBJECT_FILES=$(find ./target/debug/deps/* -name '*' -not -name '*\.*' -printf '%p,'|head --bytes -1) 158 | # Only export the coverage of this project, we don't care about coverage of 159 | # dependencies 160 | llvm-cov export --ignore-filename-regex=".cargo|.rustup" --format=lcov -instr-profile=default.profdata --object=${OBJECT_FILES} > lcov.info 161 | 162 | workflows: 163 | version: 2.1 164 | 165 | test: 166 | jobs: 167 | - cargo_fetch 168 | - rustfmt: 169 | requires: 170 | - cargo_fetch 171 | - clippy: 172 | requires: 173 | - cargo_fetch 174 | - test: 175 | requires: 176 | - cargo_fetch 177 | - coverage_run: 178 | requires: 179 | - cargo_fetch 180 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Generated by Cargo 2 | # will have compiled files and executables 3 | /target/ 4 | 5 | # Remove Cargo.lock from gitignore if creating an executable, leave it for libraries 6 | # More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html 7 | Cargo.lock 8 | 9 | # These are backup files generated by rustfmt 10 | **/*.rs.bk 11 | 12 | 13 | # Added by cargo 14 | # 15 | # already existing elements were commented out 16 | 17 | /target 18 | #Cargo.lock 19 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "vdf" 3 | version = "0.1.0" 4 | authors = ["porcuquine "] 5 | license = "MIT OR Apache-2.0" 6 | edition = "2021" 7 | 8 | [dependencies] 9 | bellperson = { version = "0.22", default-features = false } 10 | crossbeam = "0.8" 11 | ff = "0.12.0" 12 | generic-array = "0.14.4" 13 | merlin = "2.0.0" 14 | neptune = { version = "7.2.0", default-features = false } 15 | nova = { package = "nova-snark", version = "0.8.0", default-features = false } 16 | pairing = "0.21" 17 | pasta_curves = { version = "0.4.0", features = ["repr-c"] } 18 | pasta-msm = "0.1.1" 19 | rand = "0.8" 20 | subtle = "2.3" 21 | 22 | [dev-dependencies] 23 | criterion = "0.3" 24 | rand_xorshift = "0.3.0" 25 | 26 | [[bench]] 27 | name = "vdf" 28 | harness = false 29 | 30 | [[bench]] 31 | name = "nova" 32 | harness = false 33 | -------------------------------------------------------------------------------- /LICENSE-APACHE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2021 Protocol Labs 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at 4 | 5 | http://www.apache.org/licenses/LICENSE-2.0 6 | 7 | Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. 8 | -------------------------------------------------------------------------------- /LICENSE-MIT: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2021 Protocol Labs 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # VDF 2 | A Sloth-based Verifiable Delay Function (VDF) evaluator and SNARK prover. 3 | 4 | # Status: Work-in-progress. 5 | 6 | ## License 7 | 8 | The VDF Project is dual-licensed under Apache 2.0 and MIT terms: 9 | 10 | - Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0) 11 | - MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT) 12 | -------------------------------------------------------------------------------- /benches/nova.rs: -------------------------------------------------------------------------------- 1 | use pasta_curves::arithmetic::Field; 2 | use pasta_curves::pallas; 3 | 4 | use vdf::minroot::{MinRootVDF, PallasVDF, State, VanillaVDFProof}; 5 | 6 | use vdf::nova::proof::{make_nova_proof, PallasScalar, RawVanillaProof}; 7 | 8 | use vdf::TEST_SEED; 9 | 10 | use rand::SeedableRng; 11 | use rand_xorshift::XorShiftRng; 12 | 13 | use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion, SamplingMode}; 14 | 15 | #[allow(clippy::many_single_char_names)] 16 | fn bench_nova_proof>(c: &mut Criterion, t: u64, n: u64) { 17 | let mut group = c.benchmark_group("nova"); 18 | group.sampling_mode(SamplingMode::Flat); 19 | 20 | let mut rng = XorShiftRng::from_seed(TEST_SEED); 21 | 22 | type F = pallas::Scalar; 23 | 24 | let x = Field::random(&mut rng); 25 | let y = F::zero(); 26 | let state = State { x, y, i: F::zero() }; 27 | 28 | let first_vanilla_proof = VanillaVDFProof::::eval_and_prove(state, t); 29 | 30 | let mut all_vanilla_proofs = Vec::with_capacity((n * t) as usize); 31 | all_vanilla_proofs.push(first_vanilla_proof.clone()); 32 | 33 | let final_vanilla_proof = (1..(n as usize)).fold(first_vanilla_proof, |acc, _| { 34 | let new_proof = VanillaVDFProof::::eval_and_prove(acc.result, t); 35 | all_vanilla_proofs.push(new_proof.clone()); 36 | acc.append(new_proof).expect("failed to append proof") 37 | }); 38 | 39 | assert_eq!( 40 | V::element(final_vanilla_proof.t), 41 | final_vanilla_proof.result.i 42 | ); 43 | assert_eq!(n * t, final_vanilla_proof.t); 44 | assert!(final_vanilla_proof.verify(state)); 45 | 46 | let raw_vanilla_proofs: Vec> = all_vanilla_proofs 47 | .iter() 48 | .map(|p| (p.clone()).into()) 49 | .collect(); 50 | 51 | let (shape, gens) = RawVanillaProof::::new_empty(raw_vanilla_proofs[0].t) 52 | .make_nova_shape_and_gens(); 53 | 54 | group.bench_function( 55 | BenchmarkId::new("nova-VDF-proof", format!("t={};n={}", t, n)), 56 | |b| { 57 | b.iter(|| make_nova_proof::(&raw_vanilla_proofs, &shape, &gens, false)); 58 | }, 59 | ); 60 | } 61 | 62 | fn bench_nova(c: &mut Criterion) { 63 | bench_nova_proof::(c, 10, 200); 64 | bench_nova_proof::(c, 100, 20); 65 | bench_nova_proof::(c, 1000, 2); 66 | } 67 | 68 | criterion_group! { 69 | name = nova; 70 | config = Criterion::default().sample_size(10); 71 | targets = bench_nova 72 | } 73 | 74 | criterion_main!(nova); 75 | -------------------------------------------------------------------------------- /benches/vdf.rs: -------------------------------------------------------------------------------- 1 | use criterion::{criterion_group, criterion_main, Criterion}; 2 | use pasta_curves::arithmetic::FieldExt; 3 | use pasta_curves::{pallas, vesta}; 4 | use vdf::minroot::{EvalMode, MinRootVDF, PallasVDF, State, VanillaVDFProof, VestaVDF}; 5 | 6 | fn bench_eval, F: FieldExt>(eval_mode: EvalMode, c: &mut Criterion, name: &str) { 7 | let t = 10000; 8 | let mut group = c.benchmark_group(format!("{}VDF-eval-{:?}-{}", name, eval_mode, t)); 9 | 10 | let x = State { 11 | x: V::element(123), 12 | y: V::element(321), 13 | i: F::zero(), 14 | }; 15 | 16 | group.bench_function("eval_and_prove", |b| { 17 | b.iter(|| { 18 | VanillaVDFProof::::eval_and_prove_with_mode(eval_mode, x, t); 19 | }); 20 | }); 21 | 22 | group.finish(); 23 | } 24 | 25 | fn bench_verify, F: FieldExt>(c: &mut Criterion, name: &str) { 26 | let t = 10000; 27 | let mut group = c.benchmark_group(format!("{}VDF-verify-{}", name, t)); 28 | 29 | let x = State { 30 | x: V::element(123), 31 | y: V::element(321), 32 | i: F::zero(), 33 | }; 34 | 35 | group.bench_function("verify", |b| { 36 | let proof = VanillaVDFProof::::eval_and_prove(x, t); 37 | 38 | b.iter(|| { 39 | proof.verify(x); 40 | }); 41 | }); 42 | group.finish(); 43 | } 44 | 45 | fn bench_pallas(c: &mut Criterion) { 46 | for eval_mode in EvalMode::all().iter() { 47 | bench_eval::(*eval_mode, c, "Pallas") 48 | } 49 | 50 | bench_verify::(c, "Pallas") 51 | } 52 | fn bench_vesta(c: &mut Criterion) { 53 | bench_eval::(EvalMode::LTRSequential, c, "Vesta"); 54 | bench_verify::(c, "Vesta") 55 | } 56 | 57 | criterion_group! { 58 | name = vdf; 59 | config = Criterion::default().sample_size(60); 60 | targets = bench_pallas, bench_vesta 61 | } 62 | 63 | criterion_main!(vdf); 64 | -------------------------------------------------------------------------------- /rust-toolchain: -------------------------------------------------------------------------------- 1 | nightly-2022-04-29 2 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | pub mod minroot; 2 | pub mod nova; 3 | 4 | pub const TEST_SEED: [u8; 16] = [42; 16]; 5 | -------------------------------------------------------------------------------- /src/minroot.rs: -------------------------------------------------------------------------------- 1 | use core::fmt::Debug; 2 | use ff::Field; 3 | 4 | use pasta_curves::{pallas, vesta}; 5 | use std::cell::UnsafeCell; 6 | use std::marker::PhantomData; 7 | use std::ops::{Add, Sub, SubAssign}; 8 | use std::sync::Arc; 9 | 10 | use nova::traits::Group; 11 | 12 | // Question: Should the naming of `PallasVDF` and `VestaVDF` be reversed? 13 | 14 | #[derive(Debug, Clone, Copy, PartialEq)] 15 | pub enum EvalMode { 16 | LTRSequential, 17 | LTRAddChainSequential, 18 | RTLSequential, 19 | RTLAddChainSequential, 20 | } 21 | 22 | impl EvalMode { 23 | pub fn all() -> Vec { 24 | vec![ 25 | Self::LTRSequential, 26 | Self::LTRAddChainSequential, 27 | Self::RTLSequential, 28 | Self::RTLAddChainSequential, 29 | ] 30 | } 31 | } 32 | 33 | #[derive(Debug)] 34 | struct Sq(Arc>>); 35 | unsafe impl Send for Sq {} 36 | unsafe impl Sync for Sq {} 37 | 38 | /// Modulus is that of `Fq`, which is the base field of `Vesta` and scalar field of `Pallas`. 39 | #[derive(Debug, PartialEq)] 40 | pub struct PallasVDF { 41 | eval_mode: EvalMode, 42 | } 43 | 44 | impl MinRootVDF for PallasVDF { 45 | fn new_with_mode(eval_mode: EvalMode) -> Self { 46 | PallasVDF { eval_mode } 47 | } 48 | 49 | // To bench with this on 3970x: 50 | // RUSTFLAG="-C target-cpu=native -g" taskset -c 0,40 cargo bench 51 | fn eval(&mut self, x: State, t: u64) -> State { 52 | match self.eval_mode { 53 | EvalMode::LTRSequential 54 | | EvalMode::LTRAddChainSequential 55 | | EvalMode::RTLAddChainSequential 56 | | EvalMode::RTLSequential => self.simple_eval(x, t), 57 | } 58 | } 59 | 60 | fn element(n: u64) -> pallas::Scalar { 61 | pallas::Scalar::from(n) 62 | } 63 | 64 | fn exponent() -> [u64; 4] { 65 | FQ_RESCUE_INVALPHA 66 | } 67 | 68 | fn inverse_exponent() -> u64 { 69 | 5 70 | } 71 | 72 | /// Pallas' inverse_exponent is 5, so we can hardcode this. 73 | fn inverse_step(x: pallas::Scalar) -> pallas::Scalar { 74 | x.mul(&x.square().square()) 75 | } 76 | 77 | fn forward_step(&mut self, x: pallas::Scalar) -> pallas::Scalar { 78 | match self.eval_mode { 79 | EvalMode::LTRSequential => self.forward_step_ltr_sequential(x), 80 | EvalMode::RTLSequential => self.forward_step_rtl_sequential(x), 81 | EvalMode::RTLAddChainSequential => self.forward_step_sequential_rtl_addition_chain(x), 82 | EvalMode::LTRAddChainSequential => self.forward_step_ltr_addition_chain(x), 83 | } 84 | } 85 | } 86 | 87 | impl PallasVDF { 88 | fn forward_step_ltr_addition_chain(&mut self, x: pallas::Scalar) -> pallas::Scalar { 89 | let sqr = |x: pallas::Scalar, i: u32| (0..i).fold(x, |x, _| x.square()); 90 | 91 | let mul = |x: pallas::Scalar, y| x.mul(y); 92 | let sqr_mul = |x, n, y: pallas::Scalar| y.mul(&sqr(x, n)); 93 | 94 | let q1 = x; 95 | let q10 = sqr(q1, 1); 96 | let q11 = mul(q10, &q1); 97 | let q101 = mul(q10, &q11); 98 | let q110 = sqr(q11, 1); 99 | let q111 = mul(q110, &q1); 100 | let q1001 = mul(q111, &q10); 101 | let q1111 = mul(q1001, &q110); 102 | let qr2 = sqr_mul(q110, 3, q11); 103 | let qr4 = sqr_mul(qr2, 8, qr2); 104 | let qr8 = sqr_mul(qr4, 16, qr4); 105 | let qr16 = sqr_mul(qr8, 32, qr8); 106 | let qr32 = sqr_mul(qr16, 64, qr16); 107 | let qr32a = sqr_mul(qr32, 5, q1001); 108 | let qr32b = sqr_mul(qr32a, 8, q111); 109 | let qr32c = sqr_mul(qr32b, 4, q1); 110 | let qr32d = sqr_mul(qr32c, 2, qr4); 111 | let qr32e = sqr_mul(qr32d, 7, q11); 112 | let qr32f = sqr_mul(qr32e, 6, q1001); 113 | let qr32g = sqr_mul(qr32f, 3, q101); 114 | let qr32h = sqr_mul(qr32g, 7, q101); 115 | let qr32i = sqr_mul(qr32h, 7, q111); 116 | let qr32j = sqr_mul(qr32i, 4, q111); 117 | let qr32k = sqr_mul(qr32j, 5, q1001); 118 | let qr32l = sqr_mul(qr32k, 5, q101); 119 | let qr32m = sqr_mul(qr32l, 3, q11); 120 | let qr32n = sqr_mul(qr32m, 4, q101); 121 | let qr32o = sqr_mul(qr32n, 3, q101); 122 | let qr32p = sqr_mul(qr32o, 6, q1111); 123 | let qr32q = sqr_mul(qr32p, 4, q1001); 124 | let qr32r = sqr_mul(qr32q, 6, q101); 125 | let qr32s = sqr_mul(qr32r, 37, qr8); 126 | sqr_mul(qr32s, 2, q1) 127 | } 128 | 129 | // Sequential RTL square-and-multiply. 130 | fn forward_step_rtl_sequential(&mut self, x: pallas::Scalar) -> pallas::Scalar { 131 | (0..254) 132 | .scan(x, |state, _| { 133 | let ret = *state; 134 | *state = (*state).square(); 135 | Some(ret) 136 | }) 137 | .fold( 138 | (Self::exponent(), pallas::Scalar::one(), 0), 139 | |(mut remaining, acc, count), elt| { 140 | let limb_index = count / 64; 141 | let limb = remaining[limb_index]; 142 | 143 | let one = (limb & 1) == 1; 144 | let acc = if one { acc.mul(&elt) } else { acc }; 145 | remaining[limb_index] = limb >> 1; 146 | 147 | (remaining, acc, count + 1) 148 | }, 149 | ) 150 | .1 151 | } 152 | 153 | // Sequential RTL square-and-multiply with optimized addition chain. 154 | fn forward_step_sequential_rtl_addition_chain(&mut self, x: pallas::Scalar) -> pallas::Scalar { 155 | let first_section_bit_count = 128; 156 | let acc = pallas::Scalar::one(); 157 | 158 | // First section is same as rtl without addition chain. 159 | let (_, acc, _, square_acc) = (0..first_section_bit_count) 160 | .scan(x, |state, _| { 161 | let ret = *state; 162 | *state = (*state).square(); 163 | Some(ret) 164 | }) 165 | .fold( 166 | (Self::exponent(), acc, 0, pallas::Scalar::zero()), 167 | |(mut remaining, acc, count, _previous_elt), elt| { 168 | let limb_index = count / 64; 169 | let limb = remaining[limb_index]; 170 | 171 | let one = (limb & 1) == 1; 172 | let acc = if one { acc.mul(&elt) } else { acc }; 173 | remaining[limb_index] = limb >> 1; 174 | 175 | (remaining, acc, count + 1, elt) 176 | }, 177 | ); 178 | 179 | let square_acc = square_acc.mul(&square_acc.square()); 180 | let square_acc = square_acc.mul(&square_acc.square().square().square().square()); 181 | 182 | (0..122) 183 | .scan(square_acc, |state, _| { 184 | *state = (*state).square(); 185 | 186 | Some(*state) 187 | }) 188 | .fold((acc, 1), |(acc, count), elt| { 189 | if count % 8 == 1 { 190 | (acc.mul(&elt), count + 1) 191 | } else { 192 | (acc, count + 1) 193 | } 194 | }) 195 | .0 196 | } 197 | } 198 | 199 | /// Modulus is that of `Fp`, which is the base field of `Pallas and scalar field of Vesta. 200 | #[derive(Debug)] 201 | pub struct VestaVDF {} 202 | impl MinRootVDF for VestaVDF { 203 | fn new_with_mode(_eval_mode: EvalMode) -> Self { 204 | VestaVDF {} 205 | } 206 | 207 | fn element(n: u64) -> vesta::Scalar { 208 | vesta::Scalar::from(n) 209 | } 210 | 211 | fn exponent() -> [u64; 4] { 212 | FP_RESCUE_INVALPHA 213 | } 214 | 215 | fn inverse_exponent() -> u64 { 216 | 5 217 | } 218 | 219 | /// Vesta's inverse_exponent is 5, so we can hardcode this. 220 | fn inverse_step(x: vesta::Scalar) -> vesta::Scalar { 221 | x.mul(&x.square().square()) 222 | } 223 | fn forward_step(&mut self, x: vesta::Scalar) -> vesta::Scalar { 224 | let sqr = |x: vesta::Scalar, i: u32| (0..i).fold(x, |x, _| x.square()); 225 | 226 | let mul = |x: vesta::Scalar, y| x.mul(y); 227 | let sqr_mul = |x, n, y: vesta::Scalar| y.mul(&sqr(x, n)); 228 | 229 | let p1 = x; 230 | let p10 = sqr(p1, 1); 231 | let p11 = mul(p10, &p1); 232 | let p101 = mul(p10, &p11); 233 | let p110 = sqr(p11, 1); 234 | let p111 = mul(p110, &p1); 235 | let p1001 = mul(p111, &p10); 236 | let p1111 = mul(p1001, &p110); 237 | let pr2 = sqr_mul(p110, 3, p11); 238 | let pr4 = sqr_mul(pr2, 8, pr2); 239 | let pr8 = sqr_mul(pr4, 16, pr4); 240 | let pr16 = sqr_mul(pr8, 32, pr8); 241 | let pr32 = sqr_mul(pr16, 64, pr16); 242 | let pr32a = sqr_mul(pr32, 5, p1001); 243 | let pr32b = sqr_mul(pr32a, 8, p111); 244 | let pr32c = sqr_mul(pr32b, 4, p1); 245 | let pr32d = sqr_mul(pr32c, 2, pr4); 246 | let pr32e = sqr_mul(pr32d, 7, p11); 247 | let pr32f = sqr_mul(pr32e, 6, p1001); 248 | let pr32g = sqr_mul(pr32f, 3, p101); 249 | let pr32h = sqr_mul(pr32g, 5, p1); 250 | let pr32i = sqr_mul(pr32h, 7, p101); 251 | let pr32j = sqr_mul(pr32i, 4, p11); 252 | let pr32k = sqr_mul(pr32j, 8, p111); 253 | let pr32l = sqr_mul(pr32k, 4, p1); 254 | let pr32m = sqr_mul(pr32l, 4, p111); 255 | let pr32n = sqr_mul(pr32m, 9, p1111); 256 | let pr32o = sqr_mul(pr32n, 8, p1111); 257 | let pr32p = sqr_mul(pr32o, 6, p1111); 258 | let pr32q = sqr_mul(pr32p, 2, p11); 259 | let pr32r = sqr_mul(pr32q, 34, pr8); 260 | sqr_mul(pr32r, 2, p1) 261 | } 262 | } 263 | 264 | // Question: Is this right, or is it the reverse? Which scalar fields' modulus do we want to target? 265 | pub type TargetVDF<'a> = PallasVDF; 266 | 267 | #[derive(std::cmp::PartialEq, Debug, Clone, Copy)] 268 | pub struct State { 269 | pub x: T, 270 | pub y: T, 271 | pub i: T, 272 | } 273 | const FP_RESCUE_INVALPHA: [u64; 4] = [ 274 | 0xe0f0f3f0cccccccd, 275 | 0x4e9ee0c9a10a60e2, 276 | 0x3333333333333333, 277 | 0x3333333333333333, 278 | ]; 279 | 280 | const FQ_RESCUE_INVALPHA: [u64; 4] = [ 281 | 0xd69f2280cccccccd, 282 | 0x4e9ee0c9a143ba4a, 283 | 0x3333333333333333, 284 | 0x3333333333333333, 285 | ]; 286 | 287 | pub trait MinRootVDF: Debug 288 | where 289 | G: Group, 290 | { 291 | fn new() -> Self 292 | where 293 | Self: Sized, 294 | { 295 | Self::new_with_mode(Self::default_mode()) 296 | } 297 | 298 | fn new_with_mode(eval_mode: EvalMode) -> Self; 299 | 300 | fn default_mode() -> EvalMode { 301 | EvalMode::LTRSequential 302 | } 303 | 304 | /// Exponent used to take a root in the 'slow' direction. 305 | fn exponent() -> [u64; 4]; 306 | 307 | /// Exponent used in the 'fast' direction. 308 | fn inverse_exponent() -> u64; 309 | 310 | #[inline] 311 | /// The building block of a round in the slow, 'forward' direction. 312 | fn forward_step_ltr_sequential(&mut self, x: G::Scalar) -> G::Scalar { 313 | x.pow_vartime(Self::exponent()) 314 | } 315 | 316 | #[inline] 317 | /// The building block of a round in the slow, 'forward' direction. 318 | fn forward_step(&mut self, x: G::Scalar) -> G::Scalar { 319 | self.forward_step_ltr_sequential(x) 320 | } 321 | 322 | #[inline] 323 | /// The building block of a round in the fast, 'inverse' direction. 324 | fn inverse_step(x: G::Scalar) -> G::Scalar { 325 | x.pow_vartime([Self::inverse_exponent(), 0, 0, 0]) 326 | } 327 | 328 | /// one round in the slow/forward direction. 329 | fn round(&mut self, s: State) -> State { 330 | State { 331 | x: self.forward_step(G::Scalar::add(s.x, s.y)), 332 | y: G::Scalar::add(s.x, s.i), 333 | i: G::Scalar::add(s.i, G::Scalar::one()), 334 | } 335 | } 336 | 337 | /// One round in the fast/inverse direction. 338 | fn inverse_round(s: State) -> State { 339 | let i = G::Scalar::sub(s.i, &G::Scalar::one()); 340 | let x = G::Scalar::sub(s.y, &i); 341 | let mut y = Self::inverse_step(s.x); 342 | y.sub_assign(&x); 343 | State { x, y, i } 344 | } 345 | 346 | /// Evaluate input `x` with time/difficulty parameter, `t` in the 347 | /// slow/forward direction. 348 | fn eval(&mut self, x: State, t: u64) -> State { 349 | self.simple_eval(x, t) 350 | } 351 | 352 | fn simple_eval(&mut self, x: State, t: u64) -> State { 353 | let mut acc = x; 354 | for _ in 0..t { 355 | acc = self.round(acc); 356 | } 357 | 358 | acc 359 | } 360 | 361 | /// Invert evaluation of output `x` with time/difficulty parameter, `t` in 362 | /// the fast/inverse direction. 363 | fn inverse_eval(x: State, t: u64) -> State { 364 | (0..t).fold(x, |acc, _| Self::inverse_round(acc)) 365 | } 366 | 367 | /// Quickly check that `result` is the result of having slowly evaluated 368 | /// `original` with time/difficulty parameter `t`. 369 | fn check(result: State, t: u64, original: State) -> bool { 370 | original == Self::inverse_eval(result, t) 371 | } 372 | 373 | fn element(n: u64) -> G::Scalar; 374 | } 375 | 376 | #[derive(Debug, PartialEq)] 377 | pub struct Evaluation + Debug, G: Group> { 378 | pub result: State, 379 | pub t: u64, 380 | _v: PhantomData, 381 | } 382 | 383 | impl, G: Group> Clone for Evaluation { 384 | fn clone(&self) -> Self { 385 | Self { 386 | result: self.result, 387 | t: self.t, 388 | _v: PhantomData::::default(), 389 | } 390 | } 391 | } 392 | 393 | impl, G: Group> Evaluation { 394 | pub fn eval(x: State, t: u64) -> (Vec, Self) { 395 | let mut vdf = V::new(); 396 | let result = vdf.eval(x, t); 397 | 398 | let z0 = vec![result.x, result.y, result.i]; 399 | 400 | ( 401 | z0, 402 | Self { 403 | result, 404 | t, 405 | _v: PhantomData::, 406 | }, 407 | ) 408 | } 409 | 410 | pub fn eval_with_mode(eval_mode: EvalMode, x: State, t: u64) -> Self { 411 | let mut vdf = V::new_with_mode(eval_mode); 412 | let result = vdf.eval(x, t); 413 | Self { 414 | result, 415 | t, 416 | _v: PhantomData::, 417 | } 418 | } 419 | 420 | pub fn result(&self) -> State { 421 | self.result 422 | } 423 | 424 | pub fn verify(&self, original: State) -> bool { 425 | V::check(self.result, self.t, original) 426 | } 427 | 428 | pub fn append(&self, other: Self) -> Option { 429 | if other.verify(self.result) { 430 | Some(Self { 431 | result: other.result, 432 | t: self.t + other.t, 433 | _v: PhantomData::, 434 | }) 435 | } else { 436 | None 437 | } 438 | } 439 | } 440 | 441 | #[cfg(test)] 442 | mod tests { 443 | use super::*; 444 | use crate::TEST_SEED; 445 | 446 | use rand::SeedableRng; 447 | use rand_xorshift::XorShiftRng; 448 | 449 | #[test] 450 | fn test_exponents() { 451 | test_exponents_aux::(); 452 | test_exponents_aux::(); 453 | } 454 | 455 | fn test_exponents_aux, G: Group>() { 456 | assert_eq!(V::inverse_exponent(), 5); 457 | assert_eq!(V::inverse_exponent(), 5); 458 | } 459 | 460 | #[test] 461 | fn test_steps() { 462 | test_steps_aux::(); 463 | test_steps_aux::(); 464 | } 465 | 466 | fn test_steps_aux, G: Group>() { 467 | let mut rng = XorShiftRng::from_seed(TEST_SEED); 468 | let mut vdf = V::new(); 469 | 470 | for _ in 0..100 { 471 | let x = G::Scalar::random(&mut rng); 472 | let y = vdf.forward_step(x); 473 | let z = V::inverse_step(y); 474 | 475 | assert_eq!(x, z); 476 | } 477 | } 478 | 479 | #[test] 480 | fn test_eval() { 481 | println!("top"); 482 | test_eval_aux::(); 483 | } 484 | 485 | fn test_eval_aux, G: Group>() { 486 | for mode in EvalMode::all().iter() { 487 | test_eval_aux2::(*mode) 488 | } 489 | } 490 | 491 | fn test_eval_aux2, G: Group>(eval_mode: EvalMode) { 492 | let mut rng = XorShiftRng::from_seed(TEST_SEED); 493 | let mut vdf = V::new_with_mode(eval_mode); 494 | 495 | for _ in 0..10 { 496 | let t = 10; 497 | let x = G::Scalar::random(&mut rng); 498 | let y = G::Scalar::random(&mut rng); 499 | let x = State { 500 | x, 501 | y, 502 | i: G::Scalar::zero(), 503 | }; 504 | let result = vdf.eval(x, t); 505 | let again = V::inverse_eval(result, t); 506 | 507 | assert_eq!(x, again); 508 | assert!(V::check(result, t, x)); 509 | } 510 | } 511 | 512 | #[test] 513 | fn test_vanilla_proof() { 514 | test_vanilla_proof_aux::(); 515 | test_vanilla_proof_aux::(); 516 | } 517 | 518 | fn test_vanilla_proof_aux, G: Group>() { 519 | let mut rng = XorShiftRng::from_seed(TEST_SEED); 520 | 521 | let x = G::Scalar::random(&mut rng); 522 | let y = G::Scalar::zero(); 523 | let x = State { 524 | x, 525 | y, 526 | i: G::Scalar::zero(), 527 | }; 528 | let t = 4; 529 | let n = 3; 530 | 531 | let (_z0, first_proof) = Evaluation::::eval(x, t); 532 | 533 | let final_proof = (1..n).fold(first_proof, |acc, _| { 534 | let (_, new_proof) = Evaluation::::eval(acc.result, t); 535 | 536 | acc.append(new_proof).expect("failed to append proof") 537 | }); 538 | 539 | assert_eq!(V::element(final_proof.t), final_proof.result.i); 540 | assert_eq!(n * t, final_proof.t); 541 | assert!(final_proof.verify(x)); 542 | } 543 | } 544 | -------------------------------------------------------------------------------- /src/nova/mod.rs: -------------------------------------------------------------------------------- 1 | #![allow( 2 | non_snake_case, 3 | clippy::many_single_char_names, 4 | dead_code, 5 | clippy::type_complexity 6 | )] 7 | pub mod proof; 8 | -------------------------------------------------------------------------------- /src/nova/proof.rs: -------------------------------------------------------------------------------- 1 | use std::fmt::Debug; 2 | 3 | use bellperson::{ 4 | gadgets::{ 5 | boolean::Boolean, 6 | num::{AllocatedNum, Num}, 7 | }, 8 | ConstraintSystem, LinearCombination, SynthesisError, 9 | }; 10 | 11 | use ff::{Field, PrimeField}; 12 | 13 | use nova::{ 14 | errors::NovaError, 15 | traits::{ 16 | circuit::{StepCircuit, TrivialTestCircuit}, 17 | Group, 18 | }, 19 | CompressedSNARK, RecursiveSNARK, 20 | }; 21 | 22 | use pasta_curves::{pallas, vesta}; 23 | 24 | use crate::minroot::{Evaluation, MinRootVDF, State}; 25 | 26 | type G1 = pallas::Point; 27 | type G2 = vesta::Point; 28 | 29 | type S1 = pallas::Scalar; 30 | type S2 = vesta::Scalar; 31 | 32 | type SS1 = nova::spartan_with_ipa_pc::RelaxedR1CSSNARK; 33 | type SS2 = nova::spartan_with_ipa_pc::RelaxedR1CSSNARK; 34 | 35 | type C1 = InverseMinRootCircuit; 36 | type C2 = TrivialTestCircuit<::Scalar>; 37 | 38 | type NovaVDFPublicParams = nova::PublicParams< 39 | G1, 40 | G2, 41 | InverseMinRootCircuit, 42 | TrivialTestCircuit<::Scalar>, 43 | >; 44 | 45 | #[derive(Debug)] 46 | pub enum Error { 47 | Nova(NovaError), 48 | Synthesis(SynthesisError), 49 | } 50 | 51 | #[allow(clippy::large_enum_variant)] 52 | pub enum NovaVDFProof { 53 | Recursive(RecursiveSNARK), 54 | Compressed(CompressedSNARK), 55 | } 56 | 57 | #[derive(Clone, Debug)] 58 | pub struct InverseMinRootCircuit 59 | where 60 | G: Debug + Group, 61 | { 62 | pub inverse_exponent: u64, 63 | pub result: Option>, 64 | pub input: Option>, 65 | pub t: u64, 66 | } 67 | 68 | impl InverseMinRootCircuit { 69 | fn new>(v: &Evaluation, previous_state: State) -> Self { 70 | InverseMinRootCircuit { 71 | inverse_exponent: V::inverse_exponent(), 72 | result: Some(v.result), 73 | input: Some(previous_state), 74 | t: v.t, 75 | } 76 | } 77 | } 78 | 79 | impl StepCircuit for InverseMinRootCircuit 80 | where 81 | G: Group, 82 | { 83 | fn arity(&self) -> usize { 84 | 3 85 | } 86 | 87 | fn synthesize( 88 | &self, 89 | cs: &mut CS, 90 | z: &[AllocatedNum], 91 | ) -> Result>, SynthesisError> 92 | where 93 | CS: ConstraintSystem, 94 | { 95 | assert_eq!(self.arity(), z.len()); 96 | 97 | let t = self.t; 98 | let mut x = z[0].clone(); 99 | let mut y = z[1].clone(); 100 | let i = z[2].clone(); 101 | let mut i_num = Num::from(i); 102 | 103 | let mut final_x = x.clone(); 104 | let mut final_y = y.clone(); 105 | let mut final_i_num = i_num.clone(); 106 | 107 | for j in 0..t { 108 | let (new_i, new_x, new_y) = inverse_round( 109 | &mut cs.namespace(|| format!("inverse_round_{}", j)), 110 | i_num, 111 | x, 112 | y, 113 | )?; 114 | final_x = new_x.clone(); 115 | final_y = new_y.clone(); 116 | final_i_num = new_i.clone(); 117 | i_num = new_i; 118 | x = new_x; 119 | y = new_y; 120 | } 121 | 122 | let final_i = AllocatedNum::::alloc(&mut cs.namespace(|| "final_i"), || { 123 | final_i_num 124 | .get_value() 125 | .ok_or(SynthesisError::AssignmentMissing) 126 | })?; 127 | 128 | cs.enforce( 129 | || "final_i matches final_i_num", 130 | |lc| lc + final_i.get_variable(), 131 | |lc| lc + CS::one(), 132 | |_| final_i_num.lc(G::Scalar::one()), 133 | ); 134 | 135 | let res = vec![final_x, final_y, final_i]; 136 | 137 | assert_eq!(self.arity(), z.len()); 138 | 139 | Ok(res) 140 | } 141 | 142 | fn output(&self, z: &[G::Scalar]) -> Vec { 143 | // sanity check 144 | let result = self.result.expect("result missing"); 145 | let state = self.input.expect("state missing"); 146 | 147 | debug_assert_eq!(z[0], result.x); 148 | debug_assert_eq!(z[1], result.y); 149 | debug_assert_eq!(z[2], result.i); 150 | 151 | vec![state.x, state.y, state.i] 152 | } 153 | } 154 | 155 | fn inverse_round, F: PrimeField>( 156 | cs: &mut CS, 157 | i: Num, 158 | x: AllocatedNum, 159 | y: AllocatedNum, 160 | ) -> Result<(Num, AllocatedNum, AllocatedNum), SynthesisError> { 161 | // i = i - 1 162 | let new_i = i 163 | .clone() 164 | .add_bool_with_coeff(CS::one(), &Boolean::Constant(true), -F::from(1)); 165 | 166 | // new_x = y - new_i = y - i + 1 167 | let new_x = AllocatedNum::::alloc(&mut cs.namespace(|| "new_x"), || { 168 | if let (Some(y), Some(new_i)) = (y.get_value(), new_i.get_value()) { 169 | Ok(y - new_i) 170 | } else { 171 | Err(SynthesisError::AssignmentMissing) 172 | } 173 | })?; 174 | 175 | // tmp1 = x * x 176 | let tmp1 = x.square(&mut cs.namespace(|| "tmp1"))?; 177 | // tmp2 = tmp1 * tmp1 178 | let tmp2 = tmp1.square(&mut cs.namespace(|| "tmp2"))?; 179 | 180 | // new_y = (tmp2 * x) - new_x 181 | let new_y = AllocatedNum::::alloc(&mut cs.namespace(|| "new_y"), || { 182 | if let (Some(x), Some(new_x), Some(tmp2)) = 183 | (x.get_value(), new_x.get_value(), tmp2.get_value()) 184 | { 185 | Ok((tmp2 * x) - new_x) 186 | } else { 187 | Err(SynthesisError::AssignmentMissing) 188 | } 189 | })?; 190 | 191 | // new_y = (tmp2 * x) - new_x 192 | // (tmp2 * x) = new_y + new_x 193 | // (tmp2 * x) = new_y + y - i + 1 194 | if tmp2.get_value().is_some() { 195 | debug_assert_eq!( 196 | tmp2.get_value().ok_or(SynthesisError::AssignmentMissing)? 197 | * x.get_value().ok_or(SynthesisError::AssignmentMissing)?, 198 | new_y.get_value().ok_or(SynthesisError::AssignmentMissing)? 199 | + new_x.get_value().ok_or(SynthesisError::AssignmentMissing)?, 200 | ); 201 | 202 | debug_assert_eq!( 203 | new_x.get_value().ok_or(SynthesisError::AssignmentMissing)?, 204 | y.get_value().ok_or(SynthesisError::AssignmentMissing)? 205 | - i.get_value().ok_or(SynthesisError::AssignmentMissing)? 206 | + F::one() 207 | ); 208 | 209 | debug_assert_eq!( 210 | tmp2.get_value().ok_or(SynthesisError::AssignmentMissing)? 211 | * x.get_value().ok_or(SynthesisError::AssignmentMissing)?, 212 | new_y.get_value().ok_or(SynthesisError::AssignmentMissing)? 213 | + y.get_value().ok_or(SynthesisError::AssignmentMissing)? 214 | - i.get_value().ok_or(SynthesisError::AssignmentMissing)? 215 | + F::one() 216 | ); 217 | } 218 | 219 | cs.enforce( 220 | || "new_y + new_x = (tmp2 * x)", 221 | |lc| lc + tmp2.get_variable(), 222 | |lc| lc + x.get_variable(), 223 | |lc| { 224 | lc + new_y.get_variable() + y.get_variable() - &i.lc(1.into()) 225 | + &LinearCombination::from_coeff(CS::one(), 1.into()) 226 | }, 227 | ); 228 | 229 | Ok((new_i, new_x, new_y)) 230 | } 231 | 232 | pub fn public_params(num_iters_per_step: u64) -> NovaVDFPublicParams { 233 | let (circuit_primary, circuit_secondary) = 234 | InverseMinRootCircuit::::circuits(num_iters_per_step); 235 | 236 | NovaVDFPublicParams::setup(circuit_primary, circuit_secondary.clone()) 237 | } 238 | 239 | impl InverseMinRootCircuit { 240 | pub fn circuits( 241 | num_iters_per_step: u64, 242 | ) -> (InverseMinRootCircuit, TrivialTestCircuit) { 243 | ( 244 | Self::circuit_primary(num_iters_per_step), 245 | Self::circuit_secondary(), 246 | ) 247 | } 248 | 249 | pub fn circuit_primary(num_iters_per_step: u64) -> InverseMinRootCircuit { 250 | InverseMinRootCircuit { 251 | inverse_exponent: 5, 252 | result: None, 253 | input: None, 254 | t: num_iters_per_step, 255 | } 256 | } 257 | 258 | pub fn circuit_secondary() -> TrivialTestCircuit { 259 | TrivialTestCircuit::default() 260 | } 261 | 262 | pub fn eval_and_make_circuits>( 263 | _v: V, 264 | num_iters_per_step: u64, 265 | num_steps: usize, 266 | initial_state: State, 267 | ) -> (Vec, Vec>) { 268 | assert!(num_steps > 0); 269 | 270 | let (z0_primary, all_vanilla_proofs) = { 271 | let mut all_vanilla_proofs = Vec::with_capacity(num_steps); 272 | let mut state = initial_state; 273 | let mut z0_primary_opt = None; 274 | for _ in 0..num_steps { 275 | let (z0, proof) = Evaluation::::eval(state, num_iters_per_step); 276 | state = proof.result; 277 | all_vanilla_proofs.push(proof); 278 | z0_primary_opt = Some(z0); 279 | } 280 | let z0_primary = z0_primary_opt.unwrap(); 281 | (z0_primary, all_vanilla_proofs) 282 | }; 283 | 284 | let circuits = { 285 | let mut previous_state = initial_state; 286 | let mut circuits = all_vanilla_proofs 287 | .iter() 288 | .map(|p| { 289 | let rvp = Self::new(p, previous_state); 290 | previous_state = rvp.result.unwrap(); 291 | rvp 292 | }) 293 | .collect::>(); 294 | circuits.reverse(); 295 | circuits 296 | }; 297 | (z0_primary, circuits) 298 | } 299 | } 300 | 301 | impl NovaVDFProof { 302 | pub fn prove_recursively( 303 | pp: &NovaVDFPublicParams, 304 | circuits: &[InverseMinRootCircuit], 305 | num_iters_per_step: u64, 306 | z0: Vec, 307 | ) -> Result { 308 | let debug = false; 309 | let z0_primary = z0; 310 | let z0_secondary = Self::z0_secondary(); 311 | 312 | let (_circuit_primary, circuit_secondary) = 313 | InverseMinRootCircuit::::circuits(num_iters_per_step); 314 | 315 | // produce a recursive SNARK 316 | let mut recursive_snark: Option> = None; 317 | 318 | for (i, circuit_primary) in circuits.iter().enumerate() { 319 | if debug { 320 | // For debugging purposes, synthesize the circuit and check that the constraint system is satisfied. 321 | use bellperson::util_cs::test_cs::TestConstraintSystem; 322 | let mut cs = TestConstraintSystem::<::Scalar>::new(); 323 | 324 | let r = circuit_primary.result.unwrap(); 325 | 326 | let zi_allocated = vec![ 327 | AllocatedNum::alloc(cs.namespace(|| format!("z{}_1", i)), || Ok(r.x)) 328 | .map_err(Error::Synthesis)?, 329 | AllocatedNum::alloc(cs.namespace(|| format!("z{}_2", i)), || Ok(r.y)) 330 | .map_err(Error::Synthesis)?, 331 | AllocatedNum::alloc(cs.namespace(|| format!("z{}_0", i)), || Ok(r.i)) 332 | .map_err(Error::Synthesis)?, 333 | ]; 334 | 335 | circuit_primary 336 | .synthesize(&mut cs, zi_allocated.as_slice()) 337 | .map_err(Error::Synthesis)?; 338 | 339 | assert!(cs.is_satisfied()); 340 | } 341 | 342 | let res = RecursiveSNARK::prove_step( 343 | pp, 344 | recursive_snark, 345 | circuit_primary.clone(), 346 | circuit_secondary.clone(), 347 | z0_primary.clone(), 348 | z0_secondary.clone(), 349 | ); 350 | if res.is_err() { 351 | dbg!(&res); 352 | } 353 | assert!(res.is_ok()); 354 | recursive_snark = Some(res.map_err(Error::Nova)?); 355 | } 356 | 357 | Ok(Self::Recursive(recursive_snark.unwrap())) 358 | } 359 | 360 | pub fn compress(self, pp: &NovaVDFPublicParams) -> Result { 361 | match &self { 362 | Self::Recursive(recursive_snark) => Ok(Self::Compressed( 363 | CompressedSNARK::<_, _, _, _, SS1, SS2>::prove(pp, recursive_snark) 364 | .map_err(Error::Nova)?, 365 | )), 366 | Self::Compressed(_) => Ok(self), 367 | } 368 | } 369 | 370 | pub fn verify( 371 | &self, 372 | pp: &NovaVDFPublicParams, 373 | num_steps: usize, 374 | z0: Vec, 375 | zi: &[S1], 376 | ) -> Result { 377 | let (z0_primary, zi_primary) = (z0, zi); 378 | let z0_secondary = Self::z0_secondary(); 379 | let zi_secondary = z0_secondary.clone(); 380 | 381 | let (zi_primary_verified, zi_secondary_verified) = match self { 382 | Self::Recursive(p) => p.verify(pp, num_steps, z0_primary, z0_secondary), 383 | Self::Compressed(p) => p.verify(pp, num_steps, z0_primary, z0_secondary), 384 | }?; 385 | 386 | Ok(zi_primary == zi_primary_verified && zi_secondary == zi_secondary_verified) 387 | } 388 | 389 | fn z0_secondary() -> Vec { 390 | vec![::Scalar::zero()] 391 | } 392 | } 393 | 394 | #[cfg(test)] 395 | mod test { 396 | use super::*; 397 | use crate::minroot::{PallasVDF, State}; 398 | use crate::TEST_SEED; 399 | 400 | use rand::SeedableRng; 401 | use rand_xorshift::XorShiftRng; 402 | 403 | #[test] 404 | fn test_nova_proof() { 405 | test_nova_proof_aux::(5, 3); 406 | } 407 | 408 | fn test_nova_proof_aux + PartialEq>( 409 | num_iters_per_step: u64, 410 | num_steps: usize, 411 | ) { 412 | let mut rng = XorShiftRng::from_seed(TEST_SEED); 413 | 414 | type F = S1; 415 | type G = G1; 416 | 417 | let x = Field::random(&mut rng); 418 | let y = F::zero(); 419 | let initial_i = F::one(); 420 | 421 | let initial_state = State { x, y, i: initial_i }; 422 | let zi = vec![x, y, initial_i]; 423 | 424 | // produce public parameters 425 | let pp = public_params(num_iters_per_step); 426 | 427 | let (z0, circuits) = InverseMinRootCircuit::eval_and_make_circuits( 428 | V::new(), 429 | num_iters_per_step, 430 | num_steps, 431 | initial_state, 432 | ); 433 | 434 | let recursive_snark = 435 | NovaVDFProof::prove_recursively(&pp, &circuits, num_iters_per_step, z0.clone()) 436 | .unwrap(); 437 | 438 | // verify the recursive SNARK 439 | let res = recursive_snark.verify(&pp, num_steps, z0.clone(), &zi); 440 | 441 | if !res.is_ok() { 442 | dbg!(&res); 443 | } 444 | assert!(res.unwrap()); 445 | 446 | // produce a compressed SNARK 447 | let compressed_snark = recursive_snark.compress(&pp).unwrap(); 448 | // verify the compressed SNARK 449 | let res = compressed_snark.verify(&pp, num_steps, z0, &zi); 450 | assert!(res.is_ok()); 451 | } 452 | } 453 | --------------------------------------------------------------------------------