├── .gitignore ├── Cargo.toml ├── LICENSE ├── README.md ├── cuda ├── ArithmeticGate.cuh ├── BaseSumGate.cuh ├── CMakeLists.txt ├── Cargo.toml ├── ComparisonGate.cuh ├── ConstantGate.cuh ├── NoopGate.cuh ├── PoseidonGate.cuh ├── PublicInputGate.cuh ├── RandomAccessGate.cuh ├── U32AddManyGate.cuh ├── U32ArithmeticGate.cuh ├── U32RangeCheckGate.cuh ├── U32SubtractionGate.cuh ├── build.rs ├── constants.cuh ├── def.cuh ├── gates-def.cuh ├── plonky2_gpu.cu ├── plonky2_gpu_impl.cuh ├── src │ ├── lib.c │ └── lib.rs └── test.cu ├── ecdsa ├── Cargo.toml └── src │ ├── curve │ ├── curve_adds.rs │ ├── curve_msm.rs │ ├── curve_multiplication.rs │ ├── curve_summation.rs │ ├── curve_types.rs │ ├── ecdsa.rs │ ├── glv.rs │ ├── mod.rs │ └── secp256k1.rs │ ├── gadgets │ ├── biguint.rs │ ├── curve.rs │ ├── curve_fixed_base.rs │ ├── curve_msm.rs │ ├── curve_windowed_mul.rs │ ├── ecdsa.rs │ ├── glv.rs │ ├── mod.rs │ ├── nonnative.rs │ └── split_nonnative.rs │ └── lib.rs ├── evm ├── Cargo.toml ├── benches │ └── stack_manipulation.rs ├── src │ ├── all_stark.rs │ ├── arithmetic │ │ ├── add.rs │ │ ├── arithmetic_stark.rs │ │ ├── columns.rs │ │ ├── compare.rs │ │ ├── mod.rs │ │ ├── modular.rs │ │ ├── mul.rs │ │ ├── sub.rs │ │ └── utils.rs │ ├── bin │ │ └── assemble.rs │ ├── config.rs │ ├── constraint_consumer.rs │ ├── cpu │ │ ├── bootstrap_kernel.rs │ │ ├── columns │ │ │ ├── general.rs │ │ │ ├── mod.rs │ │ │ └── ops.rs │ │ ├── contextops.rs │ │ ├── control_flow.rs │ │ ├── cpu_stark.rs │ │ ├── decode.rs │ │ ├── dup_swap.rs │ │ ├── jumps.rs │ │ ├── kernel │ │ │ ├── aggregator.rs │ │ │ ├── assembler.rs │ │ │ ├── ast.rs │ │ │ ├── constants │ │ │ │ ├── context_metadata.rs │ │ │ │ ├── global_metadata.rs │ │ │ │ ├── mod.rs │ │ │ │ ├── trie_type.rs │ │ │ │ └── txn_fields.rs │ │ │ ├── cost_estimator.rs │ │ │ ├── interpreter.rs │ │ │ ├── keccak_util.rs │ │ │ ├── mod.rs │ │ │ ├── opcodes.rs │ │ │ ├── optimizer.rs │ │ │ ├── parser.rs │ │ │ ├── stack │ │ │ │ ├── mod.rs │ │ │ │ ├── permutations.rs │ │ │ │ └── stack_manipulation.rs │ │ │ ├── tests │ │ │ │ ├── account_code.rs │ │ │ │ ├── balance.rs │ │ │ │ ├── core │ │ │ │ │ ├── create_addresses.rs │ │ │ │ │ ├── intrinsic_gas.rs │ │ │ │ │ ├── jumpdest_analysis.rs │ │ │ │ │ └── mod.rs │ │ │ │ ├── curve_ops.rs │ │ │ │ ├── ecrecover.rs │ │ │ │ ├── exp.rs │ │ │ │ ├── fields.rs │ │ │ │ ├── hash.rs │ │ │ │ ├── mod.rs │ │ │ │ ├── mpt │ │ │ │ │ ├── hash.rs │ │ │ │ │ ├── hex_prefix.rs │ │ │ │ │ ├── insert.rs │ │ │ │ │ ├── load.rs │ │ │ │ │ ├── mod.rs │ │ │ │ │ └── read.rs │ │ │ │ ├── packing.rs │ │ │ │ ├── ripemd.rs │ │ │ │ ├── rlp │ │ │ │ │ ├── decode.rs │ │ │ │ │ ├── encode.rs │ │ │ │ │ ├── mod.rs │ │ │ │ │ └── num_bytes.rs │ │ │ │ └── transaction_parsing │ │ │ │ │ ├── mod.rs │ │ │ │ │ └── parse_type_0_txn.rs │ │ │ └── utils.rs │ │ ├── membus.rs │ │ ├── memio.rs │ │ ├── mod.rs │ │ ├── modfp254.rs │ │ ├── pc.rs │ │ ├── shift.rs │ │ ├── simple_logic │ │ │ ├── eq_iszero.rs │ │ │ ├── mod.rs │ │ │ └── not.rs │ │ ├── stack.rs │ │ ├── stack_bounds.rs │ │ └── syscalls.rs │ ├── cross_table_lookup.rs │ ├── generation │ │ ├── mod.rs │ │ ├── mpt.rs │ │ ├── prover_input.rs │ │ ├── rlp.rs │ │ └── state.rs │ ├── get_challenges.rs │ ├── keccak │ │ ├── columns.rs │ │ ├── constants.rs │ │ ├── keccak_stark.rs │ │ ├── logic.rs │ │ ├── mod.rs │ │ └── round_flags.rs │ ├── keccak_sponge │ │ ├── columns.rs │ │ ├── keccak_sponge_stark.rs │ │ └── mod.rs │ ├── lib.rs │ ├── logic.rs │ ├── lookup.rs │ ├── memory │ │ ├── columns.rs │ │ ├── memory_stark.rs │ │ ├── mod.rs │ │ └── segments.rs │ ├── permutation.rs │ ├── proof.rs │ ├── prover.rs │ ├── recursive_verifier.rs │ ├── stark.rs │ ├── stark_testing.rs │ ├── util.rs │ ├── vanishing_poly.rs │ ├── vars.rs │ ├── verifier.rs │ └── witness │ │ ├── errors.rs │ │ ├── mem_tx.rs │ │ ├── memory.rs │ │ ├── mod.rs │ │ ├── operation.rs │ │ ├── state.rs │ │ ├── traces.rs │ │ ├── transition.rs │ │ └── util.rs └── tests │ ├── empty_txn_list.rs │ └── transfer_to_new_addr.rs ├── field ├── Cargo.toml └── src │ ├── arch │ ├── mod.rs │ └── x86_64 │ │ ├── avx2_goldilocks_field.rs │ │ ├── avx512_goldilocks_field.rs │ │ └── mod.rs │ ├── batch_util.rs │ ├── cosets.rs │ ├── extension │ ├── algebra.rs │ ├── mod.rs │ ├── quadratic.rs │ ├── quartic.rs │ └── quintic.rs │ ├── fft.rs │ ├── field_testing.rs │ ├── goldilocks_extensions.rs │ ├── goldilocks_field.rs │ ├── interpolation.rs │ ├── inversion.rs │ ├── lib.rs │ ├── ops.rs │ ├── packable.rs │ ├── packed.rs │ ├── polynomial │ ├── division.rs │ └── mod.rs │ ├── prime_field_testing.rs │ ├── secp256k1_base.rs │ ├── secp256k1_scalar.rs │ ├── types.rs │ └── zero_poly_coset.rs ├── insertion ├── Cargo.toml └── src │ ├── insert_gadget.rs │ ├── insertion_gate.rs │ └── lib.rs ├── maybe_rayon ├── Cargo.toml └── src │ └── lib.rs ├── plonky2 ├── Cargo.toml ├── benches │ ├── allocator │ │ └── mod.rs │ ├── ffts.rs │ ├── field_arithmetic.rs │ ├── hashing.rs │ ├── merkle.rs │ ├── reverse_index_bits.rs │ └── transpose.rs ├── examples │ ├── bench_recursion.rs │ ├── factorial.rs │ ├── fibonacci.rs │ └── square_root.rs └── src │ ├── bin │ └── generate_constants.rs │ ├── fri │ ├── challenges.rs │ ├── mod.rs │ ├── oracle.rs │ ├── proof.rs │ ├── prover.rs │ ├── recursive_verifier.rs │ ├── reduction_strategies.rs │ ├── structure.rs │ ├── validate_shape.rs │ ├── verifier.rs │ └── witness_util.rs │ ├── gadgets │ ├── arithmetic.rs │ ├── arithmetic_extension.rs │ ├── hash.rs │ ├── mod.rs │ ├── polynomial.rs │ ├── random_access.rs │ ├── range_check.rs │ ├── select.rs │ ├── split_base.rs │ └── split_join.rs │ ├── gates │ ├── arithmetic_base.rs │ ├── arithmetic_extension.rs │ ├── base_sum.rs │ ├── constant.rs │ ├── exponentiation.rs │ ├── gate.rs │ ├── gate_testing.rs │ ├── high_degree_interpolation.rs │ ├── interpolation.rs │ ├── low_degree_interpolation.rs │ ├── mod.rs │ ├── multiplication_extension.rs │ ├── noop.rs │ ├── packed_util.rs │ ├── poseidon.rs │ ├── poseidon_mds.rs │ ├── public_input.rs │ ├── random_access.rs │ ├── reducing.rs │ ├── reducing_extension.rs │ ├── selectors.rs │ └── util.rs │ ├── hash │ ├── arch │ │ ├── aarch64 │ │ │ ├── mod.rs │ │ │ └── poseidon_goldilocks_neon.rs │ │ ├── mod.rs │ │ └── x86_64 │ │ │ ├── mod.rs │ │ │ └── poseidon_goldilocks_avx2_bmi2.rs │ ├── hash_types.rs │ ├── hashing.rs │ ├── keccak.rs │ ├── merkle_proofs.rs │ ├── merkle_tree.rs │ ├── mod.rs │ ├── path_compression.rs │ ├── poseidon.rs │ ├── poseidon_crandall.rs │ └── poseidon_goldilocks.rs │ ├── iop │ ├── challenger.rs │ ├── ext_target.rs │ ├── generator.rs │ ├── mod.rs │ ├── target.rs │ ├── wire.rs │ └── witness.rs │ ├── lib.rs │ ├── plonk │ ├── circuit_builder.rs │ ├── circuit_data.rs │ ├── config.rs │ ├── copy_constraint.rs │ ├── get_challenges.rs │ ├── mod.rs │ ├── permutation_argument.rs │ ├── plonk_common.rs │ ├── proof.rs │ ├── prover.rs │ ├── validate_shape.rs │ ├── vanishing_poly.rs │ ├── vars.rs │ └── verifier.rs │ ├── recursion │ ├── conditional_recursive_verifier.rs │ ├── cyclic_recursion.rs │ ├── dummy_circuit.rs │ ├── mod.rs │ ├── recursive_verifier.rs │ └── tree_recursion.rs │ └── util │ ├── context_tree.rs │ ├── mod.rs │ ├── partial_products.rs │ ├── reducing.rs │ ├── serialization.rs │ ├── strided_view.rs │ └── timing.rs ├── starky ├── Cargo.toml └── src │ ├── config.rs │ ├── constraint_consumer.rs │ ├── fibonacci_stark.rs │ ├── get_challenges.rs │ ├── lib.rs │ ├── permutation.rs │ ├── proof.rs │ ├── prover.rs │ ├── recursive_verifier.rs │ ├── stark.rs │ ├── stark_testing.rs │ ├── util.rs │ ├── vanishing_poly.rs │ ├── vars.rs │ └── verifier.rs ├── system_zero ├── Cargo.toml ├── benches │ ├── allocator │ │ └── mod.rs │ └── lookup_permuted_cols.rs └── src │ ├── alu │ ├── addition.rs │ ├── bitops.rs │ ├── canonical.rs │ ├── division.rs │ ├── mod.rs │ ├── mul_add.rs │ ├── rotate_shift.rs │ └── subtraction.rs │ ├── core_registers.rs │ ├── lib.rs │ ├── lookup.rs │ ├── memory.rs │ ├── permutation_unit.rs │ ├── public_input_layout.rs │ ├── registers │ ├── alu.rs │ ├── boolean.rs │ ├── core.rs │ ├── logic.rs │ ├── lookup.rs │ ├── memory.rs │ ├── mod.rs │ ├── permutation.rs │ ├── range_check_16.rs │ └── range_check_degree.rs │ └── system_zero.rs ├── u32 ├── Cargo.toml └── src │ ├── gadgets │ ├── arithmetic_u32.rs │ ├── mod.rs │ ├── multiple_comparison.rs │ └── range_check.rs │ ├── gates │ ├── add_many_u32.rs │ ├── arithmetic_u32.rs │ ├── comparison.rs │ ├── mod.rs │ ├── range_check_u32.rs │ └── subtraction_u32.rs │ ├── lib.rs │ └── witness.rs ├── util ├── Cargo.toml └── src │ ├── lib.rs │ └── transpose_util.rs └── waksman ├── Cargo.toml └── src ├── bimap.rs ├── gates ├── assert_le.rs ├── mod.rs └── switch.rs ├── lib.rs ├── permutation.rs └── sorting.rs /.gitignore: -------------------------------------------------------------------------------- 1 | # Generated by Cargo 2 | # will have compiled files and executables 3 | debug/ 4 | target/ 5 | 6 | # Remove Cargo.lock from gitignore if creating an executable, leave it for libraries 7 | # More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html 8 | Cargo.lock 9 | 10 | # These are backup files generated by rustfmt 11 | **/*.rs.bk 12 | 13 | # MSVC Windows builds of rustc generate these, which store debugging information 14 | *.pdb 15 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [workspace] 2 | members = ["ecdsa", "evm", "field", "insertion", "maybe_rayon", "plonky2", "starky", "system_zero", "u32", "util", "waksman"] 3 | 4 | [profile.release] 5 | opt-level = 3 6 | #lto = "fat" 7 | #codegen-units = 1 8 | 9 | [profile.bench] 10 | opt-level = 3 11 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | 2 | # Welcome to the `plonky2-gpu` Repository! 3 | 4 | In the pursuit of cryptographic efficiency, we introduce `plonky2-gpu` — a GPU-accelerated version of the [Plonky2](https://github.com/0xPolygonZero/plonky2) project. This iteration leverages the CUDA framework and represents a comprehensive reengineering of the original Plonky2 codebase. It specifically focuses on optimizing three key computations: Fast Fourier Transform (FFT), Merkle tree construction, and polynomial manipulation. 5 | 6 | **Hardware Requirements:** 7 | - **CPU:** 8 cores 8 | - **RAM:** 16GB 9 | - **GPU:** NVIDIA 2080 Ti 10 | - **GPU RAM:** 12GB 11 | - **CUDA Version**: 12+ 12 | 13 | **Examples:** 14 | - [`Plonky2-25519`](https://github.com/sideprotocol/plonky2-ed25519): Discover the enhanced performance, specially optimized for ed25519 signatures. This accelerated implementation has notably reduced the proving time for ed25519 signatures from 45 seconds to just 5 seconds. Experience the optimized performance specifically tailored for ed25519 signatures. 15 | 16 | **DISCLAIMER: This implementation is currently in its early stages and is not deemed production-ready. Use for experimentation and exploration purposes only.** 17 | 18 | ## Contributor 19 | 20 | - [Side Labs](https://sidelabs.co) 21 | -------------------------------------------------------------------------------- /cuda/ArithmeticGate.cuh: -------------------------------------------------------------------------------- 1 | 2 | struct ArithmeticGate INHERIT_BASE { 3 | int num_ops; 4 | 5 | __device__ inline 6 | usize wire_ith_multiplicand_0(usize i) { 7 | return 4 * i; 8 | } 9 | __device__ inline 10 | usize wire_ith_multiplicand_1(usize i) { 11 | return 4 * i + 1; 12 | } 13 | __device__ inline 14 | usize wire_ith_addend(usize i) { 15 | return 4 * i + 2; 16 | } 17 | __device__ inline 18 | usize wire_ith_output(usize i) { 19 | return 4 * i + 3; 20 | } 21 | 22 | __device__ inline 23 | VIRTUAL int num_constraints() const OVERRIDE { 24 | return num_ops; 25 | } 26 | 27 | __device__ inline 28 | VIRTUAL void eval_unfiltered_base_packed( 29 | EvaluationVarsBasePacked vars, 30 | StridedConstraintConsumer yield_constr) OVERRIDE { 31 | auto const_0 = vars.local_constants[0]; 32 | auto const_1 = vars.local_constants[1]; 33 | 34 | for (int i = 0; i < num_ops; ++i) { 35 | auto multiplicand_0 = vars.local_wires[wire_ith_multiplicand_0(i)]; 36 | auto multiplicand_1 = vars.local_wires[wire_ith_multiplicand_1(i)]; 37 | auto addend = vars.local_wires[wire_ith_addend(i)]; 38 | auto output = vars.local_wires[wire_ith_output(i)]; 39 | auto computed_output = multiplicand_0 * multiplicand_1 * const_0 + addend * const_1; 40 | 41 | yield_constr.one(output - computed_output); 42 | } 43 | } 44 | 45 | 46 | }; 47 | 48 | -------------------------------------------------------------------------------- /cuda/BaseSumGate.cuh: -------------------------------------------------------------------------------- 1 | /// Returns the index of the `i`th limb wire. 2 | 3 | 4 | template 5 | struct BaseSumGate INHERIT_BASE{ 6 | usize num_limbs; 7 | 8 | 9 | static constexpr usize WIRE_SUM = 0; 10 | static constexpr usize START_LIMBS = 1; 11 | 12 | __device__ inline 13 | Range limbs() { 14 | return Range{START_LIMBS, START_LIMBS + num_limbs}; 15 | } 16 | 17 | __device__ inline 18 | VIRTUAL int num_constraints() const OVERRIDE { 19 | return 1 + this->num_limbs; 20 | } 21 | 22 | __device__ inline 23 | VIRTUAL void eval_unfiltered_base_packed( 24 | EvaluationVarsBasePacked vars, 25 | StridedConstraintConsumer yield_constr) OVERRIDE { 26 | auto sum = vars.local_wires[WIRE_SUM]; 27 | auto limbs = vars.local_wires.view(this->limbs()); 28 | auto computed_sum = reduce_with_powers(limbs, GoldilocksField::from_canonical_u64(B)); 29 | 30 | // auto computed_sum = GoldilocksField{0}; 31 | // auto range = Range{0, limbs.len}; 32 | // for (int i = range.second-1; i >= range.first; --i) { 33 | // auto limb = [limbs](int i) ->GoldilocksField { 34 | // return limbs[i]; 35 | // }(i); 36 | // computed_sum = computed_sum * GoldilocksField::from_canonical_u64(B) + limb; 37 | //// if (vars.index == 1048576) { 38 | //// printf("i: %d, r: %d, ", vars.index, i); 39 | //// limb.print_hex("limb", GoldilocksField::colum_space); 40 | //// computed_sum.print_hex("computed_sum", GoldilocksField::newline); 41 | //// } 42 | // } 43 | 44 | // auto computed_sum = 45 | // reduce_with_powers(Range{0, limbs.len}, [limbs](int i) ->GoldilocksField { 46 | // return limbs[i]; 47 | // }, GoldilocksField::from_canonical_u64(B)); 48 | 49 | // auto index = vars.index; 50 | // if (index == 1048576) { 51 | // printf("i: %d, ", index); 52 | // sum.print_hex("sum", GoldilocksField::colum_space); 53 | // computed_sum.print_hex("computed_sum", GoldilocksField::newline); 54 | // } 55 | 56 | yield_constr.one(computed_sum - sum); 57 | 58 | for (auto limb: limbs) { 59 | GoldilocksField product = GoldilocksField{1}; 60 | for (int i = 0; i < B; ++i) { 61 | product *= limb - GoldilocksField::from_canonical_u64(i); 62 | } 63 | yield_constr.one(product); 64 | } 65 | 66 | } 67 | }; 68 | 69 | 70 | -------------------------------------------------------------------------------- /cuda/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 3.16) 2 | project(PLONKY2_CUDA CUDA) 3 | set(CMAKE_CUDA_STANDARD 17) 4 | set(CMAKE_CUDA_ARCHITECTURES 75) 5 | 6 | set(SRC_LIST 7 | plonky2_gpu.cu 8 | test.cu 9 | # plonky2_gpu.cu 10 | ) 11 | #add_definitions(-D__CUDA_ARCH__) 12 | 13 | #add_compile_options(-G) 14 | 15 | #add_library(plonky2_gpu SHARED ${SRC_LIST}) 16 | add_executable(cutest ${SRC_LIST}) 17 | 18 | -------------------------------------------------------------------------------- /cuda/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "plonky2_cuda" 3 | version = "0.1.0" 4 | authors = [ "unknown" ] 5 | description = "gpu accelerate for plonky2" 6 | edition = "2021" 7 | 8 | [build-dependencies.cc] 9 | version = "^1.0.70" 10 | 11 | [build-dependencies.which] 12 | version = "^4.0" 13 | -------------------------------------------------------------------------------- /cuda/ConstantGate.cuh: -------------------------------------------------------------------------------- 1 | struct ConstantGate INHERIT_BASE { 2 | usize num_consts; 3 | 4 | __device__ inline 5 | usize const_input(usize i) { 6 | assert(i < this->num_consts); 7 | return i; 8 | } 9 | 10 | __device__ inline 11 | usize wire_output(usize i) { 12 | assert(i < this->num_consts); 13 | return i; 14 | } 15 | 16 | __device__ inline 17 | VIRTUAL int num_constraints() const OVERRIDE { 18 | return this->num_consts; 19 | } 20 | 21 | __device__ inline 22 | VIRTUAL void eval_unfiltered_base_packed( 23 | EvaluationVarsBasePacked vars, 24 | StridedConstraintConsumer yield_constr) OVERRIDE { 25 | for (int i = 0; i < num_consts; ++i) { 26 | yield_constr.one(vars.local_constants[this->const_input(i)] - vars.local_wires[this->wire_output(i)]); 27 | } 28 | } 29 | 30 | }; 31 | 32 | -------------------------------------------------------------------------------- /cuda/NoopGate.cuh: -------------------------------------------------------------------------------- 1 | struct NoopGate INHERIT_BASE { 2 | __device__ inline 3 | VIRTUAL int num_constraints() const OVERRIDE { 4 | return 0; 5 | } 6 | 7 | 8 | __device__ inline 9 | VIRTUAL void eval_unfiltered_base_packed( 10 | EvaluationVarsBasePacked vars, 11 | StridedConstraintConsumer yield_constr) OVERRIDE { 12 | } 13 | 14 | }; 15 | 16 | -------------------------------------------------------------------------------- /cuda/PublicInputGate.cuh: -------------------------------------------------------------------------------- 1 | struct PublicInputGate :public Gate { 2 | 3 | __device__ inline 4 | VIRTUAL int num_constraints() const OVERRIDE { 5 | return 4; 6 | } 7 | 8 | __device__ inline 9 | VIRTUAL void eval_unfiltered_base_packed( 10 | EvaluationVarsBasePacked vars, 11 | StridedConstraintConsumer yield_constr) OVERRIDE { 12 | for (int i = 0; i < 4; ++i) { 13 | auto wire = i; 14 | auto hash_part = vars.public_inputs_hash.elements[i]; 15 | yield_constr.one(vars.local_wires[wire] - hash_part); 16 | } 17 | } 18 | 19 | }; 20 | 21 | -------------------------------------------------------------------------------- /cuda/U32RangeCheckGate.cuh: -------------------------------------------------------------------------------- 1 | struct U32RangeCheckGate INHERIT_BASE { 2 | usize num_input_limbs; 3 | typedef U32RangeCheckGate Self; 4 | 5 | static constexpr usize AUX_LIMB_BITS = 2; 6 | static constexpr usize BASE = 1 << AUX_LIMB_BITS; 7 | 8 | __device__ inline 9 | usize aux_limbs_per_input_limb() const { 10 | return ceil_div_usize(32, AUX_LIMB_BITS); 11 | } 12 | __device__ inline 13 | usize wire_ith_input_limb(usize i) { 14 | assert(i < this->num_input_limbs); 15 | return i; 16 | } 17 | __device__ inline 18 | usize wire_ith_input_limb_jth_aux_limb(usize i, usize j) { 19 | assert(i < this->num_input_limbs); 20 | assert(j < this->aux_limbs_per_input_limb()); 21 | return this->num_input_limbs + this->aux_limbs_per_input_limb() * i + j; 22 | } 23 | 24 | __device__ inline 25 | VIRTUAL int num_constraints() const OVERRIDE { 26 | return this->num_input_limbs * (1 + this->aux_limbs_per_input_limb()); 27 | } 28 | 29 | __device__ inline 30 | VIRTUAL void eval_unfiltered_base_one( 31 | EvaluationVarsBasePacked vars, 32 | StridedConstraintConsumer yield_constr) OVERRIDE { 33 | auto base = GoldilocksField::from_canonical_usize(BASE); 34 | for (int i = 0; i < this->num_input_limbs; ++i) { 35 | auto input_limb = vars.local_wires[this->wire_ith_input_limb(i)]; 36 | auto aux_limbs_range = Range{0, this->aux_limbs_per_input_limb()}; 37 | auto aux_limbs = [vars, this, i](int j) -> GoldilocksField { 38 | return vars.local_wires[this->wire_ith_input_limb_jth_aux_limb(i, j)]; 39 | }; 40 | auto computed_sum = reduce_with_powers(aux_limbs_range, aux_limbs, base); 41 | 42 | yield_constr.one(computed_sum - input_limb); 43 | for (auto j: aux_limbs_range) { 44 | auto aux_limb = aux_limbs(j); 45 | GoldilocksField product = {1}; 46 | for (int k = 0; k < BASE; ++k) { 47 | product *= aux_limb - GoldilocksField::from_canonical_usize(k); 48 | } 49 | yield_constr.one(product); 50 | } 51 | } 52 | } 53 | 54 | __device__ inline 55 | VIRTUAL void eval_unfiltered_base_packed( 56 | EvaluationVarsBasePacked vars, 57 | StridedConstraintConsumer yield_constr) OVERRIDE { 58 | eval_unfiltered_base_one( 59 | vars, 60 | yield_constr 61 | ); 62 | } 63 | 64 | }; 65 | 66 | -------------------------------------------------------------------------------- /cuda/U32SubtractionGate.cuh: -------------------------------------------------------------------------------- 1 | struct U32SubtractionGate INHERIT_BASE { 2 | usize num_ops; 3 | typedef U32SubtractionGate Self; 4 | 5 | 6 | 7 | __device__ inline 8 | usize wire_ith_input_x(usize i) { 9 | assert(i < this->num_ops); 10 | return 5 * i; 11 | } 12 | __device__ inline 13 | usize wire_ith_input_y(usize i) { 14 | assert(i < this->num_ops); 15 | return 5 * i + 1; 16 | } 17 | __device__ inline 18 | usize wire_ith_input_borrow(usize i) { 19 | assert(i < this->num_ops); 20 | return 5 * i + 2; 21 | } 22 | 23 | __device__ inline 24 | usize wire_ith_output_result(usize i) { 25 | assert(i < this->num_ops); 26 | return 5 * i + 3; 27 | } 28 | __device__ inline 29 | usize wire_ith_output_borrow(usize i) { 30 | assert(i < this->num_ops); 31 | return 5 * i + 4; 32 | } 33 | 34 | __device__ inline 35 | static constexpr usize limb_bits() { 36 | return 2; 37 | } 38 | // We have limbs for the 32 bits of `output_result`. 39 | __device__ inline 40 | static constexpr usize num_limbs() { 41 | return 32 / Self::limb_bits(); 42 | } 43 | 44 | __device__ inline 45 | usize wire_ith_output_jth_limb(usize i, usize j) { 46 | assert(i < this->num_ops); 47 | assert(j < Self::num_limbs()); 48 | return 5 * this->num_ops + Self::num_limbs() * i + j; 49 | } 50 | 51 | 52 | 53 | 54 | 55 | __device__ inline 56 | VIRTUAL int num_constraints() const OVERRIDE { 57 | return this->num_ops * (3 + Self::num_limbs()); 58 | } 59 | 60 | __device__ inline 61 | VIRTUAL void eval_unfiltered_base_packed( 62 | EvaluationVarsBasePacked vars, 63 | StridedConstraintConsumer yield_constr) OVERRIDE { 64 | for (int i = 0; i < this->num_ops; ++i) { 65 | auto input_x = vars.local_wires[this->wire_ith_input_x(i)]; 66 | auto input_y = vars.local_wires[this->wire_ith_input_y(i)]; 67 | auto input_borrow = vars.local_wires[this->wire_ith_input_borrow(i)]; 68 | 69 | auto result_initial = input_x - input_y - input_borrow; 70 | auto base = GoldilocksField::from_canonical_u64(1ULL << 32); 71 | 72 | auto output_result = vars.local_wires[this->wire_ith_output_result(i)]; 73 | auto output_borrow = vars.local_wires[this->wire_ith_output_borrow(i)]; 74 | 75 | yield_constr.one(output_result - (result_initial + output_borrow * base)); 76 | 77 | // Range-check output_result to be at most 32 bits. 78 | auto combined_limbs = GoldilocksField{0}; 79 | auto limb_base = GoldilocksField::from_canonical_u64(1ULL << Self::limb_bits()); 80 | for (int j = Self::num_limbs()-1; j >=0; --j) { 81 | auto this_limb = vars.local_wires[this->wire_ith_output_jth_limb(i, j)]; 82 | auto max_limb = 1 << Self::limb_bits(); 83 | GoldilocksField product = {1}; 84 | for (int x = 0; x < max_limb; ++x) { 85 | product *= this_limb - GoldilocksField::from_canonical_usize(x); 86 | } 87 | yield_constr.one(product); 88 | 89 | combined_limbs = combined_limbs * limb_base + this_limb; 90 | } 91 | yield_constr.one(combined_limbs - output_result); 92 | 93 | // Range-check output_borrow to be one bit. 94 | yield_constr.one(output_borrow * (GoldilocksField{1} - output_borrow)); 95 | } 96 | } 97 | }; 98 | 99 | -------------------------------------------------------------------------------- /cuda/build.rs: -------------------------------------------------------------------------------- 1 | // Copyright (C) 2019-2022 Aleo Systems Inc. 2 | // This file is part of the snarkVM library. 3 | 4 | // The snarkVM library is free software: you can redistribute it and/or modify 5 | // it under the terms of the GNU General Public License as published by 6 | // the Free Software Foundation, either version 3 of the License, or 7 | // (at your option) any later version. 8 | 9 | // The snarkVM library is distributed in the hope that it will be useful, 10 | // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 | // GNU General Public License for more details. 13 | 14 | // You should have received a copy of the GNU General Public License 15 | // along with the snarkVM library. If not, see . 16 | 17 | use std::{env, path::PathBuf}; 18 | 19 | fn main() { 20 | 21 | // Detect if there is CUDA compiler and engage "cuda" feature accordingly 22 | let nvcc = match env::var("NVCC") { 23 | Ok(var) => which::which(var), 24 | Err(_) => which::which("nvcc"), 25 | }; 26 | 27 | if nvcc.is_ok() { 28 | let mut nvcc = cc::Build::new(); 29 | nvcc.cuda(true); 30 | nvcc.flag("-g"); 31 | nvcc.flag("-O5"); 32 | nvcc.flag("-arch=sm_75"); 33 | nvcc.flag("-maxrregcount=255"); 34 | // nvcc.flag("-Xcompiler").flag("-Wno-unused-function"); 35 | // nvcc.flag("-Xcompiler").flag("-Wno-subobject-linkage"); 36 | nvcc.file("plonky2_gpu.cu").compile("plonky2_cuda"); 37 | 38 | println!("cargo:rustc-cfg=feature=\"cuda\""); 39 | println!("cargo:rerun-if-changed=cuda"); 40 | println!("cargo:rerun-if-env-changed=CXXFLAGS"); 41 | } else { 42 | println!("nvcc must be in the path. Consider adding /usr/local/cuda/bin."); 43 | // panic!(); 44 | } 45 | } 46 | -------------------------------------------------------------------------------- /cuda/gates-def.cuh: -------------------------------------------------------------------------------- 1 | #ifndef GATES_DEF_CUH 2 | #define GATES_DEF_CUH 3 | 4 | #include "def.cuh" 5 | 6 | struct Gate { 7 | __device__ inline 8 | virtual int num_constraints() const = 0; 9 | __device__ inline 10 | virtual void eval_unfiltered_base_packed( 11 | EvaluationVarsBasePacked vars, 12 | StridedConstraintConsumer yield_constr) {}; 13 | 14 | __device__ inline 15 | virtual void eval_unfiltered_base_one( 16 | EvaluationVarsBasePacked vars, 17 | StridedConstraintConsumer yield_constr) {}; 18 | 19 | __device__ inline 20 | virtual void eval_unfiltered_base_batch( 21 | EvaluationVarsBasePacked vars, 22 | GoldilocksField* constraints_batch, 23 | GoldilocksField* terms 24 | ) { 25 | 26 | eval_unfiltered_base_packed( 27 | vars, 28 | StridedConstraintConsumer{terms} 29 | ); 30 | } 31 | }; 32 | 33 | #define USE_VIRTUAL_FUN 0 34 | 35 | #if USE_VIRTUAL_FUN 36 | #define INHERIT_BASE : public Gate 37 | #define VIRTUAL virtual 38 | #define OVERRIDE override 39 | #else 40 | #define INHERIT_BASE 41 | #define VIRTUAL 42 | #define OVERRIDE 43 | 44 | #endif 45 | 46 | #include "ArithmeticGate.cuh" 47 | #include "BaseSumGate.cuh" 48 | 49 | #include "ComparisonGate.cuh" 50 | #include "ConstantGate.cuh" 51 | #include "NoopGate.cuh" 52 | #include "PoseidonGate.cuh" 53 | #include "PublicInputGate.cuh" 54 | #include "RandomAccessGate.cuh" 55 | #include "U32AddManyGate.cuh" 56 | #include "U32ArithmeticGate.cuh" 57 | #include "U32RangeCheckGate.cuh" 58 | #include "U32SubtractionGate.cuh" 59 | 60 | 61 | 62 | #endif 63 | 64 | -------------------------------------------------------------------------------- /cuda/src/lib.c: -------------------------------------------------------------------------------- 1 | // Copyright (C) 2019-2022 Aleo Systems Inc. 2 | // This file is part of the snarkVM library. 3 | 4 | // The snarkVM library is free software: you can redistribute it and/or modify 5 | // it under the terms of the GNU General Public License as published by 6 | // the Free Software Foundation, either version 3 of the License, or 7 | // (at your option) any later version. 8 | 9 | // The snarkVM library is distributed in the hope that it will be useful, 10 | // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 | // GNU General Public License for more details. 13 | 14 | // You should have received a copy of the GNU General Public License 15 | // along with the snarkVM library. If not, see . 16 | -------------------------------------------------------------------------------- /ecdsa/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "plonky2_ecdsa" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | [features] 7 | parallel = ["maybe_rayon/parallel", "plonky2/parallel"] 8 | 9 | [dependencies] 10 | anyhow = { version = "1.0.40", default-features = false } 11 | itertools = { version = "0.10.0", default-features = false } 12 | maybe_rayon = { path = "../maybe_rayon", default-features = false } 13 | num = { version = "0.4.0", default-features = false } 14 | plonky2 = { path = "../plonky2", default-features = false } 15 | plonky2_u32 = { path = "../u32", default-features = false } 16 | serde = { version = "1.0", default-features = false, features = ["derive"] } 17 | 18 | [dev-dependencies] 19 | rand = { version = "0.8.4", default-features = false, features = ["getrandom"] } 20 | -------------------------------------------------------------------------------- /ecdsa/src/curve/curve_multiplication.rs: -------------------------------------------------------------------------------- 1 | use alloc::vec::Vec; 2 | use core::ops::Mul; 3 | 4 | use plonky2::field::types::{Field, PrimeField}; 5 | 6 | use crate::curve::curve_types::{Curve, CurveScalar, ProjectivePoint}; 7 | 8 | const WINDOW_BITS: usize = 4; 9 | const BASE: usize = 1 << WINDOW_BITS; 10 | 11 | fn digits_per_scalar() -> usize { 12 | (C::ScalarField::BITS + WINDOW_BITS - 1) / WINDOW_BITS 13 | } 14 | 15 | /// Precomputed state used for scalar x ProjectivePoint multiplications, 16 | /// specific to a particular generator. 17 | #[derive(Clone)] 18 | pub struct MultiplicationPrecomputation { 19 | /// [(2^w)^i] g for each i < digits_per_scalar. 20 | powers: Vec>, 21 | } 22 | 23 | impl ProjectivePoint { 24 | pub fn mul_precompute(&self) -> MultiplicationPrecomputation { 25 | let num_digits = digits_per_scalar::(); 26 | let mut powers = Vec::with_capacity(num_digits); 27 | powers.push(*self); 28 | for i in 1..num_digits { 29 | let mut power_i = powers[i - 1]; 30 | for _j in 0..WINDOW_BITS { 31 | power_i = power_i.double(); 32 | } 33 | powers.push(power_i); 34 | } 35 | 36 | MultiplicationPrecomputation { powers } 37 | } 38 | 39 | #[must_use] 40 | pub fn mul_with_precomputation( 41 | &self, 42 | scalar: C::ScalarField, 43 | precomputation: MultiplicationPrecomputation, 44 | ) -> Self { 45 | // Yao's method; see https://koclab.cs.ucsb.edu/teaching/ecc/eccPapers/Doche-ch09.pdf 46 | let precomputed_powers = precomputation.powers; 47 | 48 | let digits = to_digits::(&scalar); 49 | 50 | let mut y = ProjectivePoint::ZERO; 51 | let mut u = ProjectivePoint::ZERO; 52 | let mut all_summands = Vec::new(); 53 | for j in (1..BASE).rev() { 54 | let mut u_summands = Vec::new(); 55 | for (i, &digit) in digits.iter().enumerate() { 56 | if digit == j as u64 { 57 | u_summands.push(precomputed_powers[i]); 58 | } 59 | } 60 | all_summands.push(u_summands); 61 | } 62 | 63 | let all_sums: Vec> = all_summands 64 | .iter() 65 | .cloned() 66 | .map(|vec| vec.iter().fold(ProjectivePoint::ZERO, |a, &b| a + b)) 67 | .collect(); 68 | for i in 0..all_sums.len() { 69 | u = u + all_sums[i]; 70 | y = y + u; 71 | } 72 | y 73 | } 74 | } 75 | 76 | impl Mul> for CurveScalar { 77 | type Output = ProjectivePoint; 78 | 79 | fn mul(self, rhs: ProjectivePoint) -> Self::Output { 80 | let precomputation = rhs.mul_precompute(); 81 | rhs.mul_with_precomputation(self.0, precomputation) 82 | } 83 | } 84 | 85 | #[allow(clippy::assertions_on_constants)] 86 | fn to_digits(x: &C::ScalarField) -> Vec { 87 | debug_assert!( 88 | 64 % WINDOW_BITS == 0, 89 | "For simplicity, only power-of-two window sizes are handled for now" 90 | ); 91 | let digits_per_u64 = 64 / WINDOW_BITS; 92 | let mut digits = Vec::with_capacity(digits_per_scalar::()); 93 | for limb in x.to_canonical_biguint().to_u64_digits() { 94 | for j in 0..digits_per_u64 { 95 | digits.push((limb >> (j * WINDOW_BITS) as u64) % BASE as u64); 96 | } 97 | } 98 | 99 | digits 100 | } 101 | -------------------------------------------------------------------------------- /ecdsa/src/curve/ecdsa.rs: -------------------------------------------------------------------------------- 1 | use plonky2::field::types::{Field, Sample}; 2 | use serde::{Deserialize, Serialize}; 3 | 4 | use crate::curve::curve_msm::msm_parallel; 5 | use crate::curve::curve_types::{base_to_scalar, AffinePoint, Curve, CurveScalar}; 6 | 7 | #[derive(Copy, Clone, Debug, Deserialize, Eq, Hash, PartialEq, Serialize)] 8 | pub struct ECDSASignature { 9 | pub r: C::ScalarField, 10 | pub s: C::ScalarField, 11 | } 12 | 13 | #[derive(Copy, Clone, Debug, Deserialize, Eq, Hash, PartialEq, Serialize)] 14 | pub struct ECDSASecretKey(pub C::ScalarField); 15 | 16 | impl ECDSASecretKey { 17 | pub fn to_public(&self) -> ECDSAPublicKey { 18 | ECDSAPublicKey((CurveScalar(self.0) * C::GENERATOR_PROJECTIVE).to_affine()) 19 | } 20 | } 21 | 22 | #[derive(Copy, Clone, Debug, Deserialize, Eq, Hash, PartialEq, Serialize)] 23 | pub struct ECDSAPublicKey(pub AffinePoint); 24 | 25 | pub fn sign_message(msg: C::ScalarField, sk: ECDSASecretKey) -> ECDSASignature { 26 | let (k, rr) = { 27 | let mut k = C::ScalarField::rand(); 28 | let mut rr = (CurveScalar(k) * C::GENERATOR_PROJECTIVE).to_affine(); 29 | while rr.x == C::BaseField::ZERO { 30 | k = C::ScalarField::rand(); 31 | rr = (CurveScalar(k) * C::GENERATOR_PROJECTIVE).to_affine(); 32 | } 33 | (k, rr) 34 | }; 35 | let r = base_to_scalar::(rr.x); 36 | 37 | let s = k.inverse() * (msg + r * sk.0); 38 | 39 | ECDSASignature { r, s } 40 | } 41 | 42 | pub fn verify_message( 43 | msg: C::ScalarField, 44 | sig: ECDSASignature, 45 | pk: ECDSAPublicKey, 46 | ) -> bool { 47 | let ECDSASignature { r, s } = sig; 48 | 49 | assert!(pk.0.is_valid()); 50 | 51 | let c = s.inverse(); 52 | let u1 = msg * c; 53 | let u2 = r * c; 54 | 55 | let g = C::GENERATOR_PROJECTIVE; 56 | let w = 5; // Experimentally fastest 57 | let point_proj = msm_parallel(&[u1, u2], &[g, pk.0.to_projective()], w); 58 | let point = point_proj.to_affine(); 59 | 60 | let x = base_to_scalar::(point.x); 61 | r == x 62 | } 63 | 64 | #[cfg(test)] 65 | mod tests { 66 | use plonky2::field::secp256k1_scalar::Secp256K1Scalar; 67 | use plonky2::field::types::Sample; 68 | 69 | use crate::curve::ecdsa::{sign_message, verify_message, ECDSASecretKey}; 70 | use crate::curve::secp256k1::Secp256K1; 71 | 72 | #[test] 73 | fn test_ecdsa_native() { 74 | type C = Secp256K1; 75 | 76 | let msg = Secp256K1Scalar::rand(); 77 | let sk = ECDSASecretKey::(Secp256K1Scalar::rand()); 78 | let pk = sk.to_public(); 79 | 80 | let sig = sign_message(msg, sk); 81 | let result = verify_message(msg, sig, pk); 82 | assert!(result); 83 | } 84 | } 85 | -------------------------------------------------------------------------------- /ecdsa/src/curve/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod curve_adds; 2 | pub mod curve_msm; 3 | pub mod curve_multiplication; 4 | pub mod curve_summation; 5 | pub mod curve_types; 6 | pub mod ecdsa; 7 | pub mod glv; 8 | pub mod secp256k1; 9 | -------------------------------------------------------------------------------- /ecdsa/src/curve/secp256k1.rs: -------------------------------------------------------------------------------- 1 | use plonky2::field::secp256k1_base::Secp256K1Base; 2 | use plonky2::field::secp256k1_scalar::Secp256K1Scalar; 3 | use plonky2::field::types::Field; 4 | use serde::{Deserialize, Serialize}; 5 | 6 | use crate::curve::curve_types::{AffinePoint, Curve}; 7 | 8 | #[derive(Debug, Copy, Clone, Deserialize, Eq, Hash, PartialEq, Serialize)] 9 | pub struct Secp256K1; 10 | 11 | impl Curve for Secp256K1 { 12 | type BaseField = Secp256K1Base; 13 | type ScalarField = Secp256K1Scalar; 14 | 15 | const A: Secp256K1Base = Secp256K1Base::ZERO; 16 | const B: Secp256K1Base = Secp256K1Base([7, 0, 0, 0]); 17 | const GENERATOR_AFFINE: AffinePoint = AffinePoint { 18 | x: SECP256K1_GENERATOR_X, 19 | y: SECP256K1_GENERATOR_Y, 20 | zero: false, 21 | }; 22 | } 23 | 24 | // 55066263022277343669578718895168534326250603453777594175500187360389116729240 25 | const SECP256K1_GENERATOR_X: Secp256K1Base = Secp256K1Base([ 26 | 0x59F2815B16F81798, 27 | 0x029BFCDB2DCE28D9, 28 | 0x55A06295CE870B07, 29 | 0x79BE667EF9DCBBAC, 30 | ]); 31 | 32 | /// 32670510020758816978083085130507043184471273380659243275938904335757337482424 33 | const SECP256K1_GENERATOR_Y: Secp256K1Base = Secp256K1Base([ 34 | 0x9C47D08FFB10D4B8, 35 | 0xFD17B448A6855419, 36 | 0x5DA4FBFC0E1108A8, 37 | 0x483ADA7726A3C465, 38 | ]); 39 | 40 | #[cfg(test)] 41 | mod tests { 42 | use num::BigUint; 43 | use plonky2::field::secp256k1_scalar::Secp256K1Scalar; 44 | use plonky2::field::types::{Field, PrimeField}; 45 | 46 | use crate::curve::curve_types::{AffinePoint, Curve, ProjectivePoint}; 47 | use crate::curve::secp256k1::Secp256K1; 48 | 49 | #[test] 50 | fn test_generator() { 51 | let g = Secp256K1::GENERATOR_AFFINE; 52 | assert!(g.is_valid()); 53 | 54 | let neg_g = AffinePoint:: { 55 | x: g.x, 56 | y: -g.y, 57 | zero: g.zero, 58 | }; 59 | assert!(neg_g.is_valid()); 60 | } 61 | 62 | #[test] 63 | fn test_naive_multiplication() { 64 | let g = Secp256K1::GENERATOR_PROJECTIVE; 65 | let ten = Secp256K1Scalar::from_canonical_u64(10); 66 | let product = mul_naive(ten, g); 67 | let sum = g + g + g + g + g + g + g + g + g + g; 68 | assert_eq!(product, sum); 69 | } 70 | 71 | #[test] 72 | fn test_g1_multiplication() { 73 | let lhs = Secp256K1Scalar::from_noncanonical_biguint(BigUint::from_slice(&[ 74 | 1111, 2222, 3333, 4444, 5555, 6666, 7777, 8888, 75 | ])); 76 | assert_eq!( 77 | Secp256K1::convert(lhs) * Secp256K1::GENERATOR_PROJECTIVE, 78 | mul_naive(lhs, Secp256K1::GENERATOR_PROJECTIVE) 79 | ); 80 | } 81 | 82 | /// A simple, somewhat inefficient implementation of multiplication which is used as a reference 83 | /// for correctness. 84 | fn mul_naive( 85 | lhs: Secp256K1Scalar, 86 | rhs: ProjectivePoint, 87 | ) -> ProjectivePoint { 88 | let mut g = rhs; 89 | let mut sum = ProjectivePoint::ZERO; 90 | for limb in lhs.to_canonical_biguint().to_u64_digits().iter() { 91 | for j in 0..64 { 92 | if (limb >> j & 1u64) != 0u64 { 93 | sum = sum + g; 94 | } 95 | g = g.double(); 96 | } 97 | } 98 | sum 99 | } 100 | } 101 | -------------------------------------------------------------------------------- /ecdsa/src/gadgets/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod biguint; 2 | pub mod curve; 3 | pub mod curve_fixed_base; 4 | pub mod curve_msm; 5 | pub mod curve_windowed_mul; 6 | pub mod ecdsa; 7 | pub mod glv; 8 | pub mod nonnative; 9 | pub mod split_nonnative; 10 | -------------------------------------------------------------------------------- /ecdsa/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![allow(clippy::needless_range_loop)] 2 | #![cfg_attr(not(test), no_std)] 3 | 4 | extern crate alloc; 5 | 6 | pub mod curve; 7 | pub mod gadgets; 8 | -------------------------------------------------------------------------------- /evm/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "plonky2_evm" 3 | description = "Implementation of STARKs for the Ethereum Virtual Machine" 4 | version = "0.1.0" 5 | edition = "2021" 6 | 7 | [dependencies] 8 | anyhow = "1.0.40" 9 | blake2 = "0.10.5" 10 | env_logger = "0.10.0" 11 | eth_trie_utils = "0.4.0" 12 | ethereum-types = "0.14.0" 13 | hex = { version = "0.4.3", optional = true } 14 | hex-literal = "0.3.4" 15 | itertools = "0.10.3" 16 | keccak-hash = "0.10.0" 17 | log = "0.4.14" 18 | maybe_rayon = { path = "../maybe_rayon" } 19 | num = "0.4.0" 20 | once_cell = "1.13.0" 21 | pest = "2.1.3" 22 | pest_derive = "2.1.0" 23 | plonky2 = { path = "../plonky2", default-features = false, features = ["timing"] } 24 | plonky2_util = { path = "../util" } 25 | rand = "0.8.5" 26 | rand_chacha = "0.3.1" 27 | rlp = "0.5.1" 28 | rlp-derive = "0.1.0" 29 | serde = { version = "1.0.144", features = ["derive"] } 30 | static_assertions = "1.1.0" 31 | tiny-keccak = "2.0.2" 32 | 33 | [target.'cfg(not(target_env = "msvc"))'.dependencies] 34 | jemallocator = "0.5.0" 35 | 36 | [dev-dependencies] 37 | criterion = "0.4.0" 38 | hex = "0.4.3" 39 | ripemd = "0.1.3" 40 | sha2 = "0.10.6" 41 | 42 | [features] 43 | default = ["parallel"] 44 | asmtools = ["hex"] 45 | parallel = ["plonky2/parallel", "maybe_rayon/parallel"] 46 | 47 | [[bin]] 48 | name = "assemble" 49 | required-features = ["asmtools"] 50 | 51 | [[bench]] 52 | name = "stack_manipulation" 53 | harness = false 54 | -------------------------------------------------------------------------------- /evm/benches/stack_manipulation.rs: -------------------------------------------------------------------------------- 1 | use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion}; 2 | use plonky2_evm::cpu::kernel::assemble_to_bytes; 3 | 4 | fn criterion_benchmark(c: &mut Criterion) { 5 | rotl_group(c); 6 | rotr_group(c); 7 | insert_group(c); 8 | delete_group(c); 9 | replace_group(c); 10 | shuffle_group(c); 11 | misc_group(c); 12 | } 13 | 14 | fn rotl_group(c: &mut Criterion) { 15 | let mut group = c.benchmark_group("rotl"); 16 | group.sample_size(10); 17 | group.bench_function(BenchmarkId::from_parameter(8), |b| { 18 | b.iter(|| assemble("%stack (a, b, c, d, e, f, g, h) -> (b, c, d, e, f, g, h, a)")) 19 | }); 20 | } 21 | 22 | fn rotr_group(c: &mut Criterion) { 23 | let mut group = c.benchmark_group("rotr"); 24 | group.sample_size(10); 25 | group.bench_function(BenchmarkId::from_parameter(8), |b| { 26 | b.iter(|| assemble("%stack (a, b, c, d, e, f, g, h) -> (h, a, b, c, d, e, f, g)")) 27 | }); 28 | } 29 | 30 | fn insert_group(c: &mut Criterion) { 31 | let mut group = c.benchmark_group("insert"); 32 | group.sample_size(10); 33 | group.bench_function(BenchmarkId::from_parameter(8), |b| { 34 | b.iter(|| assemble("%stack (a, b, c, d, e, f, g, h) -> (a, b, c, d, 123, e, f, g, h)")) 35 | }); 36 | } 37 | 38 | fn delete_group(c: &mut Criterion) { 39 | let mut group = c.benchmark_group("delete"); 40 | group.sample_size(10); 41 | group.bench_function(BenchmarkId::from_parameter(8), |b| { 42 | b.iter(|| assemble("%stack (a, b, c, d, e, f, g, h) -> (a, b, c, e, f, g, h)")) 43 | }); 44 | } 45 | 46 | fn replace_group(c: &mut Criterion) { 47 | let mut group = c.benchmark_group("replace"); 48 | group.sample_size(10); 49 | group.bench_function(BenchmarkId::from_parameter(8), |b| { 50 | b.iter(|| assemble("%stack (a, b, c, d, e, f, g, h) -> (a, b, c, 5, e, f, g, h)")) 51 | }); 52 | } 53 | 54 | fn shuffle_group(c: &mut Criterion) { 55 | let mut group = c.benchmark_group("shuffle"); 56 | group.sample_size(10); 57 | group.bench_function(BenchmarkId::from_parameter(8), |b| { 58 | b.iter(|| assemble("%stack (a, b, c, d, e, f, g, h) -> (g, d, h, a, f, e, b, c)")) 59 | }); 60 | } 61 | 62 | fn misc_group(c: &mut Criterion) { 63 | let mut group = c.benchmark_group("misc"); 64 | group.sample_size(10); 65 | group.bench_function(BenchmarkId::from_parameter(8), |b| { 66 | b.iter(|| assemble("%stack (a, b, c, a, e, f, g, h) -> (g, 1, h, g, f, 3, b, b)")) 67 | }); 68 | } 69 | 70 | criterion_group!(benches, criterion_benchmark); 71 | criterion_main!(benches); 72 | 73 | fn assemble(code: &str) { 74 | assemble_to_bytes(&[code.into()]); 75 | } 76 | -------------------------------------------------------------------------------- /evm/src/bin/assemble.rs: -------------------------------------------------------------------------------- 1 | use std::{env, fs}; 2 | 3 | use hex::encode; 4 | use plonky2_evm::cpu::kernel::assemble_to_bytes; 5 | 6 | fn main() { 7 | let mut args = env::args(); 8 | args.next(); 9 | let file_contents: Vec<_> = args.map(|path| fs::read_to_string(path).unwrap()).collect(); 10 | let assembled = assemble_to_bytes(&file_contents[..]); 11 | println!("{}", encode(assembled)); 12 | } 13 | -------------------------------------------------------------------------------- /evm/src/config.rs: -------------------------------------------------------------------------------- 1 | use plonky2::fri::reduction_strategies::FriReductionStrategy; 2 | use plonky2::fri::{FriConfig, FriParams}; 3 | 4 | pub struct StarkConfig { 5 | pub security_bits: usize, 6 | 7 | /// The number of challenge points to generate, for IOPs that have soundness errors of (roughly) 8 | /// `degree / |F|`. 9 | pub num_challenges: usize, 10 | 11 | pub fri_config: FriConfig, 12 | } 13 | 14 | impl StarkConfig { 15 | /// A typical configuration with a rate of 2, resulting in fast but large proofs. 16 | /// Targets ~100 bit conjectured security. 17 | pub fn standard_fast_config() -> Self { 18 | Self { 19 | security_bits: 100, 20 | num_challenges: 2, 21 | fri_config: FriConfig { 22 | rate_bits: 1, 23 | cap_height: 4, 24 | proof_of_work_bits: 16, 25 | reduction_strategy: FriReductionStrategy::ConstantArityBits(4, 5), 26 | num_query_rounds: 84, 27 | }, 28 | } 29 | } 30 | 31 | pub(crate) fn fri_params(&self, degree_bits: usize) -> FriParams { 32 | self.fri_config.fri_params(degree_bits, false) 33 | } 34 | } 35 | -------------------------------------------------------------------------------- /evm/src/cpu/columns/ops.rs: -------------------------------------------------------------------------------- 1 | use std::borrow::{Borrow, BorrowMut}; 2 | use std::mem::{size_of, transmute}; 3 | use std::ops::{Deref, DerefMut}; 4 | 5 | use crate::util::{indices_arr, transmute_no_compile_time_size_checks}; 6 | 7 | #[repr(C)] 8 | #[derive(Clone, Copy, Eq, PartialEq, Debug)] 9 | pub struct OpsColumnsView { 10 | // TODO: combine ADD, MUL, SUB, DIV, MOD, ADDFP254, MULFP254, SUBFP254, LT, and GT into one flag 11 | pub add: T, 12 | pub mul: T, 13 | pub sub: T, 14 | pub div: T, 15 | pub mod_: T, 16 | // TODO: combine ADDMOD, MULMOD into one flag 17 | pub addmod: T, 18 | pub mulmod: T, 19 | pub addfp254: T, 20 | pub mulfp254: T, 21 | pub subfp254: T, 22 | pub lt: T, 23 | pub gt: T, 24 | pub eq: T, // Note: This column must be 0 when is_cpu_cycle = 0. 25 | pub iszero: T, // Note: This column must be 0 when is_cpu_cycle = 0. 26 | // TODO: combine AND, OR, and XOR into one flag 27 | pub and: T, 28 | pub or: T, 29 | pub xor: T, 30 | pub not: T, 31 | pub byte: T, 32 | // TODO: combine SHL and SHR into one flag 33 | pub shl: T, 34 | pub shr: T, 35 | pub keccak_general: T, 36 | pub prover_input: T, 37 | pub pop: T, 38 | // TODO: combine JUMP and JUMPI into one flag 39 | pub jump: T, // Note: This column must be 0 when is_cpu_cycle = 0. 40 | pub jumpi: T, // Note: This column must be 0 when is_cpu_cycle = 0. 41 | pub pc: T, 42 | pub gas: T, 43 | pub jumpdest: T, 44 | pub push: T, 45 | pub dup: T, 46 | pub swap: T, 47 | // TODO: combine GET_CONTEXT and SET_CONTEXT into one flag 48 | pub get_context: T, 49 | pub set_context: T, 50 | pub consume_gas: T, 51 | pub exit_kernel: T, 52 | // TODO: combine MLOAD_GENERAL and MSTORE_GENERAL into one flag 53 | pub mload_general: T, 54 | pub mstore_general: T, 55 | 56 | pub syscall: T, 57 | } 58 | 59 | // `u8` is guaranteed to have a `size_of` of 1. 60 | pub const NUM_OPS_COLUMNS: usize = size_of::>(); 61 | 62 | impl From<[T; NUM_OPS_COLUMNS]> for OpsColumnsView { 63 | fn from(value: [T; NUM_OPS_COLUMNS]) -> Self { 64 | unsafe { transmute_no_compile_time_size_checks(value) } 65 | } 66 | } 67 | 68 | impl From> for [T; NUM_OPS_COLUMNS] { 69 | fn from(value: OpsColumnsView) -> Self { 70 | unsafe { transmute_no_compile_time_size_checks(value) } 71 | } 72 | } 73 | 74 | impl Borrow> for [T; NUM_OPS_COLUMNS] { 75 | fn borrow(&self) -> &OpsColumnsView { 76 | unsafe { transmute(self) } 77 | } 78 | } 79 | 80 | impl BorrowMut> for [T; NUM_OPS_COLUMNS] { 81 | fn borrow_mut(&mut self) -> &mut OpsColumnsView { 82 | unsafe { transmute(self) } 83 | } 84 | } 85 | 86 | impl Deref for OpsColumnsView { 87 | type Target = [T; NUM_OPS_COLUMNS]; 88 | fn deref(&self) -> &Self::Target { 89 | unsafe { transmute(self) } 90 | } 91 | } 92 | 93 | impl DerefMut for OpsColumnsView { 94 | fn deref_mut(&mut self) -> &mut Self::Target { 95 | unsafe { transmute(self) } 96 | } 97 | } 98 | 99 | const fn make_col_map() -> OpsColumnsView { 100 | let indices_arr = indices_arr::(); 101 | unsafe { transmute::<[usize; NUM_OPS_COLUMNS], OpsColumnsView>(indices_arr) } 102 | } 103 | 104 | pub const COL_MAP: OpsColumnsView = make_col_map(); 105 | -------------------------------------------------------------------------------- /evm/src/cpu/contextops.rs: -------------------------------------------------------------------------------- 1 | use plonky2::field::extension::Extendable; 2 | use plonky2::field::packed::PackedField; 3 | use plonky2::hash::hash_types::RichField; 4 | use plonky2::iop::ext_target::ExtensionTarget; 5 | use plonky2::plonk::circuit_builder::CircuitBuilder; 6 | 7 | use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer}; 8 | use crate::cpu::columns::CpuColumnsView; 9 | use crate::cpu::membus::NUM_GP_CHANNELS; 10 | 11 | fn eval_packed_get( 12 | lv: &CpuColumnsView

, 13 | yield_constr: &mut ConstraintConsumer

, 14 | ) { 15 | let filter = lv.op.get_context; 16 | let push_channel = lv.mem_channels[NUM_GP_CHANNELS - 1]; 17 | yield_constr.constraint(filter * (push_channel.value[0] - lv.context)); 18 | for &limb in &push_channel.value[1..] { 19 | yield_constr.constraint(filter * limb); 20 | } 21 | } 22 | 23 | fn eval_ext_circuit_get, const D: usize>( 24 | builder: &mut CircuitBuilder, 25 | lv: &CpuColumnsView>, 26 | yield_constr: &mut RecursiveConstraintConsumer, 27 | ) { 28 | let filter = lv.op.get_context; 29 | let push_channel = lv.mem_channels[NUM_GP_CHANNELS - 1]; 30 | { 31 | let diff = builder.sub_extension(push_channel.value[0], lv.context); 32 | let constr = builder.mul_extension(filter, diff); 33 | yield_constr.constraint(builder, constr); 34 | } 35 | for &limb in &push_channel.value[1..] { 36 | let constr = builder.mul_extension(filter, limb); 37 | yield_constr.constraint(builder, constr); 38 | } 39 | } 40 | 41 | fn eval_packed_set( 42 | lv: &CpuColumnsView

, 43 | nv: &CpuColumnsView

, 44 | yield_constr: &mut ConstraintConsumer

, 45 | ) { 46 | let filter = lv.op.set_context; 47 | let pop_channel = lv.mem_channels[0]; 48 | yield_constr.constraint_transition(filter * (pop_channel.value[0] - nv.context)); 49 | } 50 | 51 | fn eval_ext_circuit_set, const D: usize>( 52 | builder: &mut CircuitBuilder, 53 | lv: &CpuColumnsView>, 54 | nv: &CpuColumnsView>, 55 | yield_constr: &mut RecursiveConstraintConsumer, 56 | ) { 57 | let filter = lv.op.set_context; 58 | let pop_channel = lv.mem_channels[0]; 59 | 60 | let diff = builder.sub_extension(pop_channel.value[0], nv.context); 61 | let constr = builder.mul_extension(filter, diff); 62 | yield_constr.constraint_transition(builder, constr); 63 | } 64 | 65 | pub fn eval_packed( 66 | lv: &CpuColumnsView

, 67 | nv: &CpuColumnsView

, 68 | yield_constr: &mut ConstraintConsumer

, 69 | ) { 70 | eval_packed_get(lv, yield_constr); 71 | eval_packed_set(lv, nv, yield_constr); 72 | } 73 | 74 | pub fn eval_ext_circuit, const D: usize>( 75 | builder: &mut CircuitBuilder, 76 | lv: &CpuColumnsView>, 77 | nv: &CpuColumnsView>, 78 | yield_constr: &mut RecursiveConstraintConsumer, 79 | ) { 80 | eval_ext_circuit_get(builder, lv, yield_constr); 81 | eval_ext_circuit_set(builder, lv, nv, yield_constr); 82 | } 83 | -------------------------------------------------------------------------------- /evm/src/cpu/kernel/ast.rs: -------------------------------------------------------------------------------- 1 | use ethereum_types::U256; 2 | 3 | use crate::generation::prover_input::ProverInputFn; 4 | 5 | #[derive(Debug)] 6 | pub(crate) struct File { 7 | pub(crate) body: Vec, 8 | } 9 | 10 | #[derive(Eq, PartialEq, Clone, Debug)] 11 | pub(crate) enum Item { 12 | /// Defines a new macro: name, params, body. 13 | MacroDef(String, Vec, Vec), 14 | /// Calls a macro: name, args. 15 | MacroCall(String, Vec), 16 | /// Repetition, like `%rep` in NASM. 17 | Repeat(U256, Vec), 18 | /// A directive to manipulate the stack according to a specified pattern. 19 | /// The first list gives names to items on the top of the stack. 20 | /// The second list specifies replacement items. 21 | /// Example: `(a, b, c) -> (c, 5, 0x20, @SOME_CONST, a)`. 22 | StackManipulation(Vec, Vec), 23 | /// Declares a global label. 24 | GlobalLabelDeclaration(String), 25 | /// Declares a label that is local to the current file. 26 | LocalLabelDeclaration(String), 27 | /// Declares a label that is local to the macro it's declared in. 28 | MacroLabelDeclaration(String), 29 | /// A `PUSH` operation. 30 | Push(PushTarget), 31 | /// A `ProverInput` operation. 32 | ProverInput(ProverInputFn), 33 | /// Any opcode besides a PUSH opcode. 34 | StandardOp(String), 35 | /// Literal hex data; should contain an even number of hex chars. 36 | Bytes(Vec), 37 | /// Creates a table of addresses from a list of labels. 38 | Jumptable(Vec), 39 | } 40 | 41 | /// The left hand side of a %stack stack-manipulation macro. 42 | #[derive(Eq, PartialEq, Clone, Debug)] 43 | pub(crate) struct StackPlaceholder(pub String, pub usize); 44 | 45 | /// The right hand side of a %stack stack-manipulation macro. 46 | #[derive(Eq, PartialEq, Clone, Debug)] 47 | pub(crate) enum StackReplacement { 48 | Literal(U256), 49 | /// Can be either a named item or a label. 50 | Identifier(String), 51 | Label(String), 52 | MacroLabel(String), 53 | MacroVar(String), 54 | Constant(String), 55 | } 56 | 57 | impl From for StackReplacement { 58 | fn from(target: PushTarget) -> Self { 59 | match target { 60 | PushTarget::Literal(x) => Self::Literal(x), 61 | PushTarget::Label(l) => Self::Label(l), 62 | PushTarget::MacroLabel(l) => Self::MacroLabel(l), 63 | PushTarget::MacroVar(v) => Self::MacroVar(v), 64 | PushTarget::Constant(c) => Self::Constant(c), 65 | } 66 | } 67 | } 68 | 69 | /// The target of a `PUSH` operation. 70 | #[derive(Clone, Debug, Eq, PartialEq, Hash)] 71 | pub(crate) enum PushTarget { 72 | Literal(U256), 73 | Label(String), 74 | MacroLabel(String), 75 | MacroVar(String), 76 | Constant(String), 77 | } 78 | -------------------------------------------------------------------------------- /evm/src/cpu/kernel/constants/context_metadata.rs: -------------------------------------------------------------------------------- 1 | /// These metadata fields contain VM state specific to a particular context. 2 | #[derive(Copy, Clone, Eq, PartialEq, Hash, Ord, PartialOrd, Debug)] 3 | pub(crate) enum ContextMetadata { 4 | /// The ID of the context which created this one. 5 | ParentContext = 0, 6 | /// The program counter to return to when we return to the parent context. 7 | ParentProgramCounter = 1, 8 | CalldataSize = 2, 9 | ReturndataSize = 3, 10 | /// The address of the account associated with this context. 11 | Address = 4, 12 | /// The size of the code under the account associated with this context. 13 | /// While this information could be obtained from the state trie, it is best to cache it since 14 | /// the `CODESIZE` instruction is very cheap. 15 | CodeSize = 5, 16 | /// The address of the caller who spawned this context. 17 | Caller = 6, 18 | /// The value (in wei) deposited by the caller. 19 | CallValue = 7, 20 | /// Whether this context was created by `STATICCALL`, in which case state changes are 21 | /// prohibited. 22 | Static = 8, 23 | /// Pointer to the initial version of the state trie, at the creation of this context. Used when 24 | /// we need to revert a context. 25 | StateTrieCheckpointPointer = 9, 26 | /// Size of the active main memory. 27 | MSize = 10, 28 | } 29 | 30 | impl ContextMetadata { 31 | pub(crate) const COUNT: usize = 11; 32 | 33 | pub(crate) fn all() -> [Self; Self::COUNT] { 34 | [ 35 | Self::ParentContext, 36 | Self::ParentProgramCounter, 37 | Self::CalldataSize, 38 | Self::ReturndataSize, 39 | Self::Address, 40 | Self::CodeSize, 41 | Self::Caller, 42 | Self::CallValue, 43 | Self::Static, 44 | Self::StateTrieCheckpointPointer, 45 | Self::MSize, 46 | ] 47 | } 48 | 49 | /// The variable name that gets passed into kernel assembly code. 50 | pub(crate) fn var_name(&self) -> &'static str { 51 | match self { 52 | ContextMetadata::ParentContext => "CTX_METADATA_PARENT_CONTEXT", 53 | ContextMetadata::ParentProgramCounter => "CTX_METADATA_PARENT_PC", 54 | ContextMetadata::CalldataSize => "CTX_METADATA_CALLDATA_SIZE", 55 | ContextMetadata::ReturndataSize => "CTX_METADATA_RETURNDATA_SIZE", 56 | ContextMetadata::Address => "CTX_METADATA_ADDRESS", 57 | ContextMetadata::CodeSize => "CTX_METADATA_CODE_SIZE", 58 | ContextMetadata::Caller => "CTX_METADATA_CALLER", 59 | ContextMetadata::CallValue => "CTX_METADATA_CALL_VALUE", 60 | ContextMetadata::Static => "CTX_METADATA_STATIC", 61 | ContextMetadata::StateTrieCheckpointPointer => "CTX_METADATA_STATE_TRIE_CHECKPOINT_PTR", 62 | ContextMetadata::MSize => "CTX_METADATA_MSIZE", 63 | } 64 | } 65 | } 66 | -------------------------------------------------------------------------------- /evm/src/cpu/kernel/constants/trie_type.rs: -------------------------------------------------------------------------------- 1 | use eth_trie_utils::partial_trie::PartialTrie; 2 | 3 | pub(crate) enum PartialTrieType { 4 | Empty = 0, 5 | Hash = 1, 6 | Branch = 2, 7 | Extension = 3, 8 | Leaf = 4, 9 | } 10 | 11 | impl PartialTrieType { 12 | pub(crate) const COUNT: usize = 5; 13 | 14 | pub(crate) fn of(trie: &PartialTrie) -> Self { 15 | match trie { 16 | PartialTrie::Empty => Self::Empty, 17 | PartialTrie::Hash(_) => Self::Hash, 18 | PartialTrie::Branch { .. } => Self::Branch, 19 | PartialTrie::Extension { .. } => Self::Extension, 20 | PartialTrie::Leaf { .. } => Self::Leaf, 21 | } 22 | } 23 | 24 | pub(crate) fn all() -> [Self; Self::COUNT] { 25 | [ 26 | Self::Empty, 27 | Self::Hash, 28 | Self::Branch, 29 | Self::Extension, 30 | Self::Leaf, 31 | ] 32 | } 33 | 34 | /// The variable name that gets passed into kernel assembly code. 35 | pub(crate) fn var_name(&self) -> &'static str { 36 | match self { 37 | Self::Empty => "MPT_NODE_EMPTY", 38 | Self::Hash => "MPT_NODE_HASH", 39 | Self::Branch => "MPT_NODE_BRANCH", 40 | Self::Extension => "MPT_NODE_EXTENSION", 41 | Self::Leaf => "MPT_NODE_LEAF", 42 | } 43 | } 44 | } 45 | -------------------------------------------------------------------------------- /evm/src/cpu/kernel/constants/txn_fields.rs: -------------------------------------------------------------------------------- 1 | /// These are normalized transaction fields, i.e. not specific to any transaction type. 2 | #[allow(dead_code)] 3 | #[derive(Copy, Clone, Eq, PartialEq, Hash, Ord, PartialOrd, Debug)] 4 | pub(crate) enum NormalizedTxnField { 5 | /// Whether a chain ID was present in the txn data. Type 0 transaction with v=27 or v=28 have 6 | /// no chain ID. This affects what fields get signed. 7 | ChainIdPresent = 0, 8 | ChainId = 1, 9 | Nonce = 2, 10 | MaxPriorityFeePerGas = 3, 11 | MaxFeePerGas = 4, 12 | GasLimit = 5, 13 | To = 6, 14 | Value = 7, 15 | /// The length of the data field. The data itself is stored in another segment. 16 | DataLen = 8, 17 | YParity = 9, 18 | R = 10, 19 | S = 11, 20 | Origin = 12, 21 | } 22 | 23 | impl NormalizedTxnField { 24 | pub(crate) const COUNT: usize = 13; 25 | 26 | pub(crate) fn all() -> [Self; Self::COUNT] { 27 | [ 28 | Self::ChainIdPresent, 29 | Self::ChainId, 30 | Self::Nonce, 31 | Self::MaxPriorityFeePerGas, 32 | Self::MaxFeePerGas, 33 | Self::GasLimit, 34 | Self::To, 35 | Self::Value, 36 | Self::DataLen, 37 | Self::YParity, 38 | Self::R, 39 | Self::S, 40 | Self::Origin, 41 | ] 42 | } 43 | 44 | /// The variable name that gets passed into kernel assembly code. 45 | pub(crate) fn var_name(&self) -> &'static str { 46 | match self { 47 | NormalizedTxnField::ChainIdPresent => "TXN_FIELD_CHAIN_ID_PRESENT", 48 | NormalizedTxnField::ChainId => "TXN_FIELD_CHAIN_ID", 49 | NormalizedTxnField::Nonce => "TXN_FIELD_NONCE", 50 | NormalizedTxnField::MaxPriorityFeePerGas => "TXN_FIELD_MAX_PRIORITY_FEE_PER_GAS", 51 | NormalizedTxnField::MaxFeePerGas => "TXN_FIELD_MAX_FEE_PER_GAS", 52 | NormalizedTxnField::GasLimit => "TXN_FIELD_GAS_LIMIT", 53 | NormalizedTxnField::To => "TXN_FIELD_TO", 54 | NormalizedTxnField::Value => "TXN_FIELD_VALUE", 55 | NormalizedTxnField::DataLen => "TXN_FIELD_DATA_LEN", 56 | NormalizedTxnField::YParity => "TXN_FIELD_Y_PARITY", 57 | NormalizedTxnField::R => "TXN_FIELD_R", 58 | NormalizedTxnField::S => "TXN_FIELD_S", 59 | NormalizedTxnField::Origin => "TXN_FIELD_ORIGIN", 60 | } 61 | } 62 | } 63 | -------------------------------------------------------------------------------- /evm/src/cpu/kernel/cost_estimator.rs: -------------------------------------------------------------------------------- 1 | use crate::cpu::kernel::assembler::BYTES_PER_OFFSET; 2 | use crate::cpu::kernel::ast::Item; 3 | use crate::cpu::kernel::ast::Item::*; 4 | use crate::cpu::kernel::ast::PushTarget::*; 5 | use crate::cpu::kernel::utils::u256_to_trimmed_be_bytes; 6 | 7 | pub(crate) fn is_code_improved(before: &[Item], after: &[Item]) -> bool { 8 | cost_estimate(after) < cost_estimate(before) 9 | } 10 | 11 | fn cost_estimate(code: &[Item]) -> u32 { 12 | code.iter().map(cost_estimate_item).sum() 13 | } 14 | 15 | fn cost_estimate_item(item: &Item) -> u32 { 16 | match item { 17 | MacroDef(_, _, _) => 0, 18 | GlobalLabelDeclaration(_) => 0, 19 | LocalLabelDeclaration(_) => 0, 20 | Push(Literal(n)) => cost_estimate_push(u256_to_trimmed_be_bytes(n).len()), 21 | Push(Label(_)) => cost_estimate_push(BYTES_PER_OFFSET as usize), 22 | ProverInput(_) => 1, 23 | StandardOp(op) => cost_estimate_standard_op(op.as_str()), 24 | _ => panic!("Unexpected item: {item:?}"), 25 | } 26 | } 27 | 28 | fn cost_estimate_standard_op(_op: &str) -> u32 { 29 | // For now we just treat any standard operation as having the same cost. This is pretty naive, 30 | // but should work fine with our current set of simple optimization rules. 31 | 1 32 | } 33 | 34 | fn cost_estimate_push(num_bytes: usize) -> u32 { 35 | // TODO: Once PUSH is actually implemented, check if this needs to be revised. 36 | num_bytes as u32 37 | } 38 | -------------------------------------------------------------------------------- /evm/src/cpu/kernel/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod aggregator; 2 | pub mod assembler; 3 | mod ast; 4 | pub(crate) mod constants; 5 | mod cost_estimator; 6 | pub(crate) mod keccak_util; 7 | mod opcodes; 8 | mod optimizer; 9 | mod parser; 10 | pub mod stack; 11 | mod utils; 12 | 13 | #[cfg(test)] 14 | mod interpreter; 15 | #[cfg(test)] 16 | mod tests; 17 | 18 | use assembler::assemble; 19 | use parser::parse; 20 | 21 | use crate::cpu::kernel::constants::evm_constants; 22 | 23 | /// Assemble files, outputting bytes. 24 | /// This is for debugging the kernel only. 25 | pub fn assemble_to_bytes(files: &[String]) -> Vec { 26 | let parsed_files: Vec<_> = files.iter().map(|f| parse(f)).collect(); 27 | let kernel = assemble(parsed_files, evm_constants(), true); 28 | kernel.code 29 | } 30 | -------------------------------------------------------------------------------- /evm/src/cpu/kernel/stack/mod.rs: -------------------------------------------------------------------------------- 1 | mod permutations; 2 | pub mod stack_manipulation; 3 | -------------------------------------------------------------------------------- /evm/src/cpu/kernel/tests/core/create_addresses.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Result; 2 | 3 | use crate::cpu::kernel::aggregator::KERNEL; 4 | use crate::cpu::kernel::interpreter::Interpreter; 5 | 6 | #[test] 7 | fn test_get_create_address() -> Result<()> { 8 | let get_create_address = KERNEL.global_labels["get_create_address"]; 9 | 10 | // TODO: Replace with real data once we have a real implementation. 11 | let retaddr = 0xdeadbeefu32.into(); 12 | let nonce = 5.into(); 13 | let sender = 0.into(); 14 | let expected_addr = 123.into(); 15 | 16 | let initial_stack = vec![retaddr, nonce, sender]; 17 | let mut interpreter = Interpreter::new_with_kernel(get_create_address, initial_stack); 18 | interpreter.run()?; 19 | 20 | assert_eq!(interpreter.stack(), &[expected_addr]); 21 | 22 | Ok(()) 23 | } 24 | 25 | #[test] 26 | fn test_get_create2_address() -> Result<()> { 27 | let get_create2_address = KERNEL.global_labels["get_create2_address"]; 28 | 29 | // TODO: Replace with real data once we have a real implementation. 30 | let retaddr = 0xdeadbeefu32.into(); 31 | let code_len = 0.into(); 32 | let code_offset = 0.into(); 33 | let code_segment = 0.into(); 34 | let code_context = 0.into(); 35 | let salt = 5.into(); 36 | let sender = 0.into(); 37 | let expected_addr = 123.into(); 38 | 39 | let initial_stack = vec![ 40 | retaddr, 41 | code_len, 42 | code_offset, 43 | code_segment, 44 | code_context, 45 | salt, 46 | sender, 47 | ]; 48 | let mut interpreter = Interpreter::new_with_kernel(get_create2_address, initial_stack); 49 | interpreter.run()?; 50 | 51 | assert_eq!(interpreter.stack(), &[expected_addr]); 52 | 53 | Ok(()) 54 | } 55 | -------------------------------------------------------------------------------- /evm/src/cpu/kernel/tests/core/intrinsic_gas.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Result; 2 | 3 | use crate::cpu::kernel::aggregator::KERNEL; 4 | use crate::cpu::kernel::constants::txn_fields::NormalizedTxnField; 5 | use crate::cpu::kernel::interpreter::Interpreter; 6 | 7 | const GAS_TX: u32 = 21_000; 8 | const GAS_TXCREATE: u32 = 32_000; 9 | 10 | #[test] 11 | fn test_intrinsic_gas() -> Result<()> { 12 | let intrinsic_gas = KERNEL.global_labels["intrinsic_gas"]; 13 | 14 | // Contract creation transaction. 15 | let initial_stack = vec![0xdeadbeefu32.into()]; 16 | let mut interpreter = Interpreter::new_with_kernel(intrinsic_gas, initial_stack.clone()); 17 | interpreter.run()?; 18 | assert_eq!(interpreter.stack(), vec![(GAS_TX + GAS_TXCREATE).into()]); 19 | 20 | // Message transaction. 21 | let mut interpreter = Interpreter::new_with_kernel(intrinsic_gas, initial_stack); 22 | interpreter.set_txn_field(NormalizedTxnField::To, 123.into()); 23 | interpreter.run()?; 24 | assert_eq!(interpreter.stack(), vec![GAS_TX.into()]); 25 | 26 | Ok(()) 27 | } 28 | -------------------------------------------------------------------------------- /evm/src/cpu/kernel/tests/core/jumpdest_analysis.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Result; 2 | 3 | use crate::cpu::kernel::aggregator::KERNEL; 4 | use crate::cpu::kernel::interpreter::Interpreter; 5 | use crate::cpu::kernel::opcodes::{get_opcode, get_push_opcode}; 6 | 7 | #[test] 8 | fn test_jumpdest_analysis() -> Result<()> { 9 | let jumpdest_analysis = KERNEL.global_labels["jumpdest_analysis"]; 10 | const CONTEXT: usize = 3; // arbitrary 11 | 12 | let add = get_opcode("ADD"); 13 | let push2 = get_push_opcode(2); 14 | let jumpdest = get_opcode("JUMPDEST"); 15 | 16 | #[rustfmt::skip] 17 | let code: Vec = vec![ 18 | add, 19 | jumpdest, 20 | push2, 21 | jumpdest, // part of PUSH2 22 | jumpdest, // part of PUSH2 23 | jumpdest, 24 | add, 25 | jumpdest, 26 | ]; 27 | 28 | let expected_jumpdest_bits = vec![false, true, false, false, false, true, false, true]; 29 | 30 | // Contract creation transaction. 31 | let initial_stack = vec![0xDEADBEEFu32.into(), code.len().into(), CONTEXT.into()]; 32 | let mut interpreter = Interpreter::new_with_kernel(jumpdest_analysis, initial_stack); 33 | interpreter.set_code(CONTEXT, code); 34 | interpreter.run()?; 35 | assert_eq!(interpreter.stack(), vec![]); 36 | assert_eq!( 37 | interpreter.get_jumpdest_bits(CONTEXT), 38 | expected_jumpdest_bits 39 | ); 40 | 41 | Ok(()) 42 | } 43 | -------------------------------------------------------------------------------- /evm/src/cpu/kernel/tests/core/mod.rs: -------------------------------------------------------------------------------- 1 | mod create_addresses; 2 | mod intrinsic_gas; 3 | mod jumpdest_analysis; 4 | -------------------------------------------------------------------------------- /evm/src/cpu/kernel/tests/ecrecover.rs: -------------------------------------------------------------------------------- 1 | use std::str::FromStr; 2 | 3 | use anyhow::Result; 4 | use ethereum_types::U256; 5 | 6 | use crate::cpu::kernel::aggregator::KERNEL; 7 | use crate::cpu::kernel::interpreter::run_interpreter; 8 | use crate::cpu::kernel::tests::u256ify; 9 | 10 | fn test_valid_ecrecover(hash: &str, v: &str, r: &str, s: &str, expected: &str) -> Result<()> { 11 | let ecrecover = KERNEL.global_labels["ecrecover"]; 12 | let initial_stack = u256ify(["0xdeadbeef", s, r, v, hash])?; 13 | let stack = run_interpreter(ecrecover, initial_stack)?.stack().to_vec(); 14 | assert_eq!(stack[0], U256::from_str(expected).unwrap()); 15 | 16 | Ok(()) 17 | } 18 | 19 | fn test_invalid_ecrecover(hash: &str, v: &str, r: &str, s: &str) -> Result<()> { 20 | let ecrecover = KERNEL.global_labels["ecrecover"]; 21 | let initial_stack = u256ify(["0xdeadbeef", s, r, v, hash])?; 22 | let stack = run_interpreter(ecrecover, initial_stack)?.stack().to_vec(); 23 | assert_eq!(stack, vec![U256::MAX]); 24 | 25 | Ok(()) 26 | } 27 | 28 | #[test] 29 | fn test_ecrecover() -> Result<()> { 30 | test_valid_ecrecover( 31 | "0x55f77e8909b1f1c9531c4a309bb2d40388e9ed4b87830c8f90363c6b36255fb9", 32 | "0x1b", 33 | "0xd667c5a20fa899b253924099e10ae92998626718585b8171eb98de468bbebc", 34 | "0x58351f48ce34bf134ee611fb5bf255a5733f0029561d345a7d46bfa344b60ac0", 35 | "0x67f3c0Da351384838d7F7641AB0fCAcF853E1844", 36 | )?; 37 | test_valid_ecrecover( 38 | "0x55f77e8909b1f1c9531c4a309bb2d40388e9ed4b87830c8f90363c6b36255fb9", 39 | "0x1c", 40 | "0xd667c5a20fa899b253924099e10ae92998626718585b8171eb98de468bbebc", 41 | "0x58351f48ce34bf134ee611fb5bf255a5733f0029561d345a7d46bfa344b60ac0", 42 | "0xaA58436DeABb64982a386B2De1A8015AA28fCCc0", 43 | )?; 44 | test_valid_ecrecover( 45 | "0x0", 46 | "0x1c", 47 | "0x1", 48 | "0xfffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364140", 49 | "0x3344c6f6eeCA588be132142DB0a32C71ABFAAe7B", 50 | )?; 51 | 52 | test_invalid_ecrecover( 53 | "0x0", 54 | "0x42", // v not in {27,28} 55 | "0x1", 56 | "0xfffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364140", 57 | )?; 58 | test_invalid_ecrecover( 59 | "0x0", 60 | "0x42", 61 | "0xd667c5a20fa899b253924099e10ae92998626718585b8171eb98de468bbebc", 62 | "0x0", // s=0 63 | )?; 64 | test_invalid_ecrecover( 65 | "0x0", 66 | "0x42", 67 | "0x0", // r=0 68 | "0xd667c5a20fa899b253924099e10ae92998626718585b8171eb98de468bbebc", 69 | )?; 70 | test_invalid_ecrecover( 71 | "0x0", 72 | "0x1c", 73 | "0x3a18b21408d275dde53c0ea86f9c1982eca60193db0ce15008fa408d43024847", // r^3 + 7 isn't a square 74 | "0x5db9745f44089305b2f2c980276e7025a594828d878e6e36dd2abd34ca6b9e3d", 75 | )?; 76 | 77 | Ok(()) 78 | } 79 | -------------------------------------------------------------------------------- /evm/src/cpu/kernel/tests/exp.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Result; 2 | use ethereum_types::U256; 3 | use rand::{thread_rng, Rng}; 4 | 5 | use crate::cpu::kernel::aggregator::KERNEL; 6 | use crate::cpu::kernel::interpreter::{run, run_interpreter}; 7 | 8 | #[test] 9 | fn test_exp() -> Result<()> { 10 | // Make sure we can parse and assemble the entire kernel. 11 | let exp = KERNEL.global_labels["exp"]; 12 | let mut rng = thread_rng(); 13 | let a = U256([0; 4].map(|_| rng.gen())); 14 | let b = U256([0; 4].map(|_| rng.gen())); 15 | 16 | // Random input 17 | let initial_stack = vec![0xDEADBEEFu32.into(), b, a]; 18 | let stack_with_kernel = run_interpreter(exp, initial_stack)?.stack().to_vec(); 19 | let initial_stack = vec![b, a]; 20 | let code = [0xa, 0x63, 0xde, 0xad, 0xbe, 0xef, 0x56]; // EXP, PUSH4 deadbeef, JUMP 21 | let stack_with_opcode = run(&code, 0, initial_stack, &KERNEL.prover_inputs)? 22 | .stack() 23 | .to_vec(); 24 | assert_eq!(stack_with_kernel, stack_with_opcode); 25 | 26 | // 0 base 27 | let initial_stack = vec![0xDEADBEEFu32.into(), b, U256::zero()]; 28 | let stack_with_kernel = run_interpreter(exp, initial_stack)?.stack().to_vec(); 29 | let initial_stack = vec![b, U256::zero()]; 30 | let code = [0xa, 0x63, 0xde, 0xad, 0xbe, 0xef, 0x56]; // EXP, PUSH4 deadbeef, JUMP 31 | let stack_with_opcode = run(&code, 0, initial_stack, &KERNEL.prover_inputs)? 32 | .stack() 33 | .to_vec(); 34 | assert_eq!(stack_with_kernel, stack_with_opcode); 35 | 36 | // 0 exponent 37 | let initial_stack = vec![0xDEADBEEFu32.into(), U256::zero(), a]; 38 | let stack_with_kernel = run_interpreter(exp, initial_stack)?.stack().to_vec(); 39 | let initial_stack = vec![U256::zero(), a]; 40 | let code = [0xa, 0x63, 0xde, 0xad, 0xbe, 0xef, 0x56]; // EXP, PUSH4 deadbeef, JUMP 41 | let stack_with_opcode = run(&code, 0, initial_stack, &KERNEL.prover_inputs)? 42 | .stack() 43 | .to_vec(); 44 | assert_eq!(stack_with_kernel, stack_with_opcode); 45 | 46 | Ok(()) 47 | } 48 | -------------------------------------------------------------------------------- /evm/src/cpu/kernel/tests/mod.rs: -------------------------------------------------------------------------------- 1 | mod account_code; 2 | mod balance; 3 | mod core; 4 | mod curve_ops; 5 | mod ecrecover; 6 | mod exp; 7 | mod fields; 8 | mod hash; 9 | mod mpt; 10 | mod packing; 11 | mod ripemd; 12 | mod rlp; 13 | mod transaction_parsing; 14 | 15 | use std::str::FromStr; 16 | 17 | use anyhow::Result; 18 | use ethereum_types::U256; 19 | 20 | pub(crate) fn u256ify<'a>(hexes: impl IntoIterator) -> Result> { 21 | Ok(hexes 22 | .into_iter() 23 | .map(U256::from_str) 24 | .collect::, _>>()?) 25 | } 26 | -------------------------------------------------------------------------------- /evm/src/cpu/kernel/tests/mpt/hex_prefix.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Result; 2 | 3 | use crate::cpu::kernel::aggregator::KERNEL; 4 | use crate::cpu::kernel::interpreter::Interpreter; 5 | 6 | #[test] 7 | fn hex_prefix_even_nonterminated() -> Result<()> { 8 | let hex_prefix = KERNEL.global_labels["hex_prefix_rlp"]; 9 | 10 | let retdest = 0xDEADBEEFu32.into(); 11 | let terminated = 0.into(); 12 | let packed_nibbles = 0xABCDEF.into(); 13 | let num_nibbles = 6.into(); 14 | let rlp_pos = 0.into(); 15 | let initial_stack = vec![retdest, terminated, packed_nibbles, num_nibbles, rlp_pos]; 16 | let mut interpreter = Interpreter::new_with_kernel(hex_prefix, initial_stack); 17 | interpreter.run()?; 18 | assert_eq!(interpreter.stack(), vec![5.into()]); 19 | 20 | assert_eq!( 21 | interpreter.get_rlp_memory(), 22 | vec![ 23 | 0x80 + 4, // prefix 24 | 0, // neither flag is set 25 | 0xAB, 26 | 0xCD, 27 | 0xEF 28 | ] 29 | ); 30 | 31 | Ok(()) 32 | } 33 | 34 | #[test] 35 | fn hex_prefix_odd_terminated() -> Result<()> { 36 | let hex_prefix = KERNEL.global_labels["hex_prefix_rlp"]; 37 | 38 | let retdest = 0xDEADBEEFu32.into(); 39 | let terminated = 1.into(); 40 | let packed_nibbles = 0xABCDE.into(); 41 | let num_nibbles = 5.into(); 42 | let rlp_pos = 0.into(); 43 | let initial_stack = vec![retdest, terminated, packed_nibbles, num_nibbles, rlp_pos]; 44 | let mut interpreter = Interpreter::new_with_kernel(hex_prefix, initial_stack); 45 | interpreter.run()?; 46 | assert_eq!(interpreter.stack(), vec![4.into()]); 47 | 48 | assert_eq!( 49 | interpreter.get_rlp_memory(), 50 | vec![ 51 | 0x80 + 3, // prefix 52 | (2 + 1) * 16 + 0xA, 53 | 0xBC, 54 | 0xDE, 55 | ] 56 | ); 57 | 58 | Ok(()) 59 | } 60 | 61 | #[test] 62 | fn hex_prefix_odd_terminated_tiny() -> Result<()> { 63 | let hex_prefix = KERNEL.global_labels["hex_prefix_rlp"]; 64 | 65 | let retdest = 0xDEADBEEFu32.into(); 66 | let terminated = 1.into(); 67 | let packed_nibbles = 0xA.into(); 68 | let num_nibbles = 1.into(); 69 | let rlp_pos = 2.into(); 70 | let initial_stack = vec![retdest, terminated, packed_nibbles, num_nibbles, rlp_pos]; 71 | let mut interpreter = Interpreter::new_with_kernel(hex_prefix, initial_stack); 72 | interpreter.run()?; 73 | assert_eq!(interpreter.stack(), vec![3.into()]); 74 | 75 | assert_eq!( 76 | interpreter.get_rlp_memory(), 77 | vec![ 78 | // Since rlp_pos = 2, we skipped over the first two bytes. 79 | 0, 80 | 0, 81 | // No length prefix; this tiny string is its own RLP encoding. 82 | (2 + 1) * 16 + 0xA, 83 | ] 84 | ); 85 | 86 | Ok(()) 87 | } 88 | -------------------------------------------------------------------------------- /evm/src/cpu/kernel/tests/mpt/mod.rs: -------------------------------------------------------------------------------- 1 | use eth_trie_utils::partial_trie::{Nibbles, PartialTrie}; 2 | use ethereum_types::{BigEndianHash, H256, U256}; 3 | 4 | use crate::generation::mpt::AccountRlp; 5 | 6 | mod hash; 7 | mod hex_prefix; 8 | mod insert; 9 | mod load; 10 | mod read; 11 | 12 | pub(crate) fn nibbles_64>(v: T) -> Nibbles { 13 | let packed = v.into(); 14 | Nibbles { count: 64, packed } 15 | } 16 | 17 | pub(crate) fn nibbles_count>(v: T, count: usize) -> Nibbles { 18 | let packed = v.into(); 19 | Nibbles { count, packed } 20 | } 21 | 22 | pub(crate) fn test_account_1() -> AccountRlp { 23 | AccountRlp { 24 | nonce: U256::from(1111), 25 | balance: U256::from(2222), 26 | storage_root: H256::from_uint(&U256::from(3333)), 27 | code_hash: H256::from_uint(&U256::from(4444)), 28 | } 29 | } 30 | 31 | pub(crate) fn test_account_1_rlp() -> Vec { 32 | rlp::encode(&test_account_1()).to_vec() 33 | } 34 | 35 | pub(crate) fn test_account_2() -> AccountRlp { 36 | AccountRlp { 37 | nonce: U256::from(5555), 38 | balance: U256::from(6666), 39 | storage_root: H256::from_uint(&U256::from(7777)), 40 | code_hash: H256::from_uint(&U256::from(8888)), 41 | } 42 | } 43 | 44 | pub(crate) fn test_account_2_rlp() -> Vec { 45 | rlp::encode(&test_account_2()).to_vec() 46 | } 47 | 48 | /// A `PartialTrie` where an extension node leads to a leaf node containing an account. 49 | pub(crate) fn extension_to_leaf(value: Vec) -> PartialTrie { 50 | PartialTrie::Extension { 51 | nibbles: 0xABC_u64.into(), 52 | child: PartialTrie::Leaf { 53 | nibbles: Nibbles { 54 | count: 3, 55 | packed: 0xDEF.into(), 56 | }, 57 | value, 58 | } 59 | .into(), 60 | } 61 | } 62 | -------------------------------------------------------------------------------- /evm/src/cpu/kernel/tests/mpt/read.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Result; 2 | use ethereum_types::BigEndianHash; 3 | 4 | use crate::cpu::kernel::aggregator::KERNEL; 5 | use crate::cpu::kernel::constants::global_metadata::GlobalMetadata; 6 | use crate::cpu::kernel::interpreter::Interpreter; 7 | use crate::cpu::kernel::tests::mpt::{extension_to_leaf, test_account_1, test_account_1_rlp}; 8 | use crate::generation::mpt::all_mpt_prover_inputs_reversed; 9 | use crate::generation::TrieInputs; 10 | 11 | #[test] 12 | fn mpt_read() -> Result<()> { 13 | let trie_inputs = TrieInputs { 14 | state_trie: extension_to_leaf(test_account_1_rlp()), 15 | transactions_trie: Default::default(), 16 | receipts_trie: Default::default(), 17 | storage_tries: vec![], 18 | }; 19 | 20 | let load_all_mpts = KERNEL.global_labels["load_all_mpts"]; 21 | let mpt_read = KERNEL.global_labels["mpt_read"]; 22 | 23 | let initial_stack = vec![0xdeadbeefu32.into()]; 24 | let mut interpreter = Interpreter::new_with_kernel(load_all_mpts, initial_stack); 25 | interpreter.generation_state.mpt_prover_inputs = all_mpt_prover_inputs_reversed(&trie_inputs); 26 | interpreter.run()?; 27 | assert_eq!(interpreter.stack(), vec![]); 28 | 29 | // Now, execute mpt_read on the state trie. 30 | interpreter.generation_state.registers.program_counter = mpt_read; 31 | interpreter.push(0xdeadbeefu32.into()); 32 | interpreter.push(0xABCDEFu64.into()); 33 | interpreter.push(6.into()); 34 | interpreter.push(interpreter.get_global_metadata_field(GlobalMetadata::StateTrieRoot)); 35 | interpreter.run()?; 36 | 37 | assert_eq!(interpreter.stack().len(), 1); 38 | let result_ptr = interpreter.stack()[0].as_usize(); 39 | let result = &interpreter.get_trie_data()[result_ptr..][..4]; 40 | assert_eq!(result[0], test_account_1().nonce); 41 | assert_eq!(result[1], test_account_1().balance); 42 | // result[2] is the storage root pointer. We won't check that it matches a 43 | // particular address, since that seems like over-specifying. 44 | assert_eq!(result[3], test_account_1().code_hash.into_uint()); 45 | 46 | Ok(()) 47 | } 48 | -------------------------------------------------------------------------------- /evm/src/cpu/kernel/tests/packing.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Result; 2 | use ethereum_types::U256; 3 | 4 | use crate::cpu::kernel::aggregator::KERNEL; 5 | use crate::cpu::kernel::interpreter::Interpreter; 6 | use crate::memory::segments::Segment; 7 | 8 | #[test] 9 | fn test_mload_packing_1_byte() -> Result<()> { 10 | let mstore_unpacking = KERNEL.global_labels["mload_packing"]; 11 | 12 | let retdest = 0xDEADBEEFu32.into(); 13 | let len = 1.into(); 14 | let offset = 2.into(); 15 | let segment = (Segment::RlpRaw as u32).into(); 16 | let context = 0.into(); 17 | let initial_stack = vec![retdest, len, offset, segment, context]; 18 | 19 | let mut interpreter = Interpreter::new_with_kernel(mstore_unpacking, initial_stack); 20 | interpreter.set_rlp_memory(vec![0, 0, 0xAB]); 21 | 22 | interpreter.run()?; 23 | assert_eq!(interpreter.stack(), vec![0xAB.into()]); 24 | 25 | Ok(()) 26 | } 27 | 28 | #[test] 29 | fn test_mload_packing_3_bytes() -> Result<()> { 30 | let mstore_unpacking = KERNEL.global_labels["mload_packing"]; 31 | 32 | let retdest = 0xDEADBEEFu32.into(); 33 | let len = 3.into(); 34 | let offset = 2.into(); 35 | let segment = (Segment::RlpRaw as u32).into(); 36 | let context = 0.into(); 37 | let initial_stack = vec![retdest, len, offset, segment, context]; 38 | 39 | let mut interpreter = Interpreter::new_with_kernel(mstore_unpacking, initial_stack); 40 | interpreter.set_rlp_memory(vec![0, 0, 0xAB, 0xCD, 0xEF]); 41 | 42 | interpreter.run()?; 43 | assert_eq!(interpreter.stack(), vec![0xABCDEF.into()]); 44 | 45 | Ok(()) 46 | } 47 | 48 | #[test] 49 | fn test_mload_packing_32_bytes() -> Result<()> { 50 | let mstore_unpacking = KERNEL.global_labels["mload_packing"]; 51 | 52 | let retdest = 0xDEADBEEFu32.into(); 53 | let len = 32.into(); 54 | let offset = 0.into(); 55 | let segment = (Segment::RlpRaw as u32).into(); 56 | let context = 0.into(); 57 | let initial_stack = vec![retdest, len, offset, segment, context]; 58 | 59 | let mut interpreter = Interpreter::new_with_kernel(mstore_unpacking, initial_stack); 60 | interpreter.set_rlp_memory(vec![0xFF; 32]); 61 | 62 | interpreter.run()?; 63 | assert_eq!(interpreter.stack(), vec![U256::MAX]); 64 | 65 | Ok(()) 66 | } 67 | 68 | #[test] 69 | fn test_mstore_unpacking() -> Result<()> { 70 | let mstore_unpacking = KERNEL.global_labels["mstore_unpacking"]; 71 | 72 | let retdest = 0xDEADBEEFu32.into(); 73 | let len = 4.into(); 74 | let value = 0xABCD1234u32.into(); 75 | let offset = 0.into(); 76 | let segment = (Segment::TxnData as u32).into(); 77 | let context = 0.into(); 78 | let initial_stack = vec![retdest, len, value, offset, segment, context]; 79 | 80 | let mut interpreter = Interpreter::new_with_kernel(mstore_unpacking, initial_stack); 81 | 82 | interpreter.run()?; 83 | assert_eq!(interpreter.stack(), vec![4.into()]); 84 | assert_eq!( 85 | &interpreter.get_txn_data(), 86 | &[0xAB.into(), 0xCD.into(), 0x12.into(), 0x34.into()] 87 | ); 88 | 89 | Ok(()) 90 | } 91 | -------------------------------------------------------------------------------- /evm/src/cpu/kernel/tests/ripemd.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Result; 2 | use ethereum_types::U256; 3 | use itertools::Itertools; 4 | 5 | use crate::cpu::kernel::aggregator::KERNEL; 6 | use crate::cpu::kernel::interpreter::run_interpreter; 7 | 8 | fn make_input(word: &str) -> Vec { 9 | let mut input: Vec = vec![word.len().try_into().unwrap()]; 10 | input.append(&mut word.as_bytes().iter().map(|&x| x as u32).collect_vec()); 11 | input.push(u32::from_str_radix("deadbeef", 16).unwrap()); 12 | input 13 | } 14 | 15 | #[test] 16 | fn test_ripemd_reference() -> Result<()> { 17 | let reference = vec![ 18 | ("", "0x9c1185a5c5e9fc54612808977ee8f548b2258d31"), 19 | ("a", "0x0bdc9d2d256b3ee9daae347be6f4dc835a467ffe"), 20 | ("abc", "0x8eb208f7e05d987a9b044a8e98c6b087f15a0bfc"), 21 | ( 22 | "message digest", 23 | "0x5d0689ef49d2fae572b881b123a85ffa21595f36", 24 | ), 25 | ( 26 | "abcdefghijklmnopqrstuvwxyz", 27 | "0xf71c27109c692c1b56bbdceb5b9d2865b3708dbc", 28 | ), 29 | ( 30 | "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq", 31 | "0x12a053384a9c0c88e405a06c27dcf49ada62eb2b", 32 | ), 33 | ( 34 | "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789", 35 | "0xb0e20b6e3116640286ed3a87a5713079b21f5189", 36 | ), 37 | ( 38 | "12345678901234567890123456789012345678901234567890123456789012345678901234567890", 39 | "0x9b752e45573d4b39f4dbd3323cab82bf63326bfb", 40 | ), 41 | ]; 42 | 43 | for (x, y) in reference { 44 | let input: Vec = make_input(x); 45 | let expected = U256::from(y); 46 | 47 | let initial_offset = KERNEL.global_labels["ripemd_stack"]; 48 | let initial_stack: Vec = input.iter().map(|&x| U256::from(x)).rev().collect(); 49 | let final_stack: Vec = run_interpreter(initial_offset, initial_stack)? 50 | .stack() 51 | .to_vec(); 52 | let actual = final_stack[0]; 53 | assert_eq!(actual, expected); 54 | } 55 | Ok(()) 56 | } 57 | -------------------------------------------------------------------------------- /evm/src/cpu/kernel/tests/rlp/mod.rs: -------------------------------------------------------------------------------- 1 | mod decode; 2 | mod encode; 3 | mod num_bytes; 4 | -------------------------------------------------------------------------------- /evm/src/cpu/kernel/tests/rlp/num_bytes.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Result; 2 | 3 | use crate::cpu::kernel::aggregator::KERNEL; 4 | use crate::cpu::kernel::interpreter::Interpreter; 5 | 6 | #[test] 7 | fn test_num_bytes_0() -> Result<()> { 8 | let num_bytes = KERNEL.global_labels["num_bytes"]; 9 | 10 | let retdest = 0xDEADBEEFu32.into(); 11 | let x = 0.into(); 12 | let initial_stack = vec![retdest, x]; 13 | let mut interpreter = Interpreter::new_with_kernel(num_bytes, initial_stack); 14 | 15 | interpreter.run()?; 16 | assert_eq!(interpreter.stack(), vec![1.into()]); 17 | Ok(()) 18 | } 19 | 20 | #[test] 21 | fn test_num_bytes_small() -> Result<()> { 22 | let num_bytes = KERNEL.global_labels["num_bytes"]; 23 | 24 | let retdest = 0xDEADBEEFu32.into(); 25 | let x = 42.into(); 26 | let initial_stack = vec![retdest, x]; 27 | let mut interpreter = Interpreter::new_with_kernel(num_bytes, initial_stack); 28 | 29 | interpreter.run()?; 30 | assert_eq!(interpreter.stack(), vec![1.into()]); 31 | Ok(()) 32 | } 33 | 34 | #[test] 35 | fn test_num_bytes_medium() -> Result<()> { 36 | let num_bytes = KERNEL.global_labels["num_bytes"]; 37 | 38 | let retdest = 0xDEADBEEFu32.into(); 39 | let x = 0xAABBCCDDu32.into(); 40 | let initial_stack = vec![retdest, x]; 41 | let mut interpreter = Interpreter::new_with_kernel(num_bytes, initial_stack); 42 | 43 | interpreter.run()?; 44 | assert_eq!(interpreter.stack(), vec![4.into()]); 45 | Ok(()) 46 | } 47 | -------------------------------------------------------------------------------- /evm/src/cpu/kernel/tests/transaction_parsing/mod.rs: -------------------------------------------------------------------------------- 1 | mod parse_type_0_txn; 2 | -------------------------------------------------------------------------------- /evm/src/cpu/kernel/tests/transaction_parsing/parse_type_0_txn.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Result; 2 | use ethereum_types::U256; 3 | use hex_literal::hex; 4 | use NormalizedTxnField::*; 5 | 6 | use crate::cpu::kernel::aggregator::KERNEL; 7 | use crate::cpu::kernel::constants::txn_fields::NormalizedTxnField; 8 | use crate::cpu::kernel::interpreter::Interpreter; 9 | 10 | #[test] 11 | fn process_type_0_txn() -> Result<()> { 12 | let process_type_0_txn = KERNEL.global_labels["process_type_0_txn"]; 13 | let process_normalized_txn = KERNEL.global_labels["process_normalized_txn"]; 14 | 15 | let retaddr = 0xDEADBEEFu32.into(); 16 | let mut interpreter = Interpreter::new_with_kernel(process_type_0_txn, vec![retaddr]); 17 | 18 | // When we reach process_normalized_txn, we're done with parsing and normalizing. 19 | // Processing normalized transactions is outside the scope of this test. 20 | interpreter.halt_offsets.push(process_normalized_txn); 21 | 22 | // Generated with py-evm: 23 | // import eth, eth_keys, eth_utils, rlp 24 | // genesis_params = { 'difficulty': eth.constants.GENESIS_DIFFICULTY } 25 | // chain = eth.chains.mainnet.MainnetChain.from_genesis(eth.db.atomic.AtomicDB(), genesis_params, {}) 26 | // unsigned_txn = chain.create_unsigned_transaction( 27 | // nonce=5, 28 | // gas_price=10, 29 | // gas=22_000, 30 | // to=eth.constants.ZERO_ADDRESS, 31 | // value=100, 32 | // data=b'\x42\x42', 33 | // ) 34 | // sk = eth_keys.keys.PrivateKey(eth_utils.decode_hex('4c0883a69102937d6231471b5dbb6204fe5129617082792ae468d01a3f362318')) 35 | // signed_txn = unsigned_txn.as_signed_transaction(sk) 36 | // rlp.encode(signed_txn).hex() 37 | interpreter.set_rlp_memory(hex!("f861050a8255f0940000000000000000000000000000000000000000648242421ca07c5c61ed975ebd286f6b027b8c504842e50a47d318e1e801719dd744fe93e6c6a01e7b5119b57dd54e175ff2f055c91f3ab1b53eba0b2c184f347cdff0e745aca2").to_vec()); 38 | 39 | interpreter.run()?; 40 | 41 | assert_eq!(interpreter.get_txn_field(ChainIdPresent), 0.into()); 42 | assert_eq!(interpreter.get_txn_field(ChainId), 0.into()); 43 | assert_eq!(interpreter.get_txn_field(Nonce), 5.into()); 44 | assert_eq!(interpreter.get_txn_field(MaxPriorityFeePerGas), 10.into()); 45 | assert_eq!(interpreter.get_txn_field(MaxPriorityFeePerGas), 10.into()); 46 | assert_eq!(interpreter.get_txn_field(MaxFeePerGas), 10.into()); 47 | assert_eq!(interpreter.get_txn_field(To), 0.into()); 48 | assert_eq!(interpreter.get_txn_field(Value), 100.into()); 49 | assert_eq!(interpreter.get_txn_field(DataLen), 2.into()); 50 | assert_eq!(interpreter.get_txn_data(), &[0x42.into(), 0x42.into()]); 51 | assert_eq!(interpreter.get_txn_field(YParity), 1.into()); 52 | assert_eq!( 53 | interpreter.get_txn_field(R), 54 | U256::from_big_endian(&hex!( 55 | "7c5c61ed975ebd286f6b027b8c504842e50a47d318e1e801719dd744fe93e6c6" 56 | )) 57 | ); 58 | assert_eq!( 59 | interpreter.get_txn_field(S), 60 | U256::from_big_endian(&hex!( 61 | "1e7b5119b57dd54e175ff2f055c91f3ab1b53eba0b2c184f347cdff0e745aca2" 62 | )) 63 | ); 64 | 65 | Ok(()) 66 | } 67 | -------------------------------------------------------------------------------- /evm/src/cpu/kernel/utils.rs: -------------------------------------------------------------------------------- 1 | use std::fmt::Debug; 2 | 3 | use ethereum_types::U256; 4 | use plonky2_util::ceil_div_usize; 5 | 6 | /// Enumerate the length `W` windows of `vec`, and run `maybe_replace` on each one. 7 | /// 8 | /// Whenever `maybe_replace` returns `Some(replacement)`, the given replacement will be applied. 9 | pub(crate) fn replace_windows(vec: &mut Vec, maybe_replace: F) 10 | where 11 | T: Clone + Debug, 12 | F: Fn([T; W]) -> Option>, 13 | { 14 | let mut start = 0; 15 | while start + W <= vec.len() { 16 | let range = start..start + W; 17 | let window = vec[range.clone()].to_vec().try_into().unwrap(); 18 | if let Some(replacement) = maybe_replace(window) { 19 | vec.splice(range, replacement); 20 | // Go back to the earliest window that changed. 21 | start = start.saturating_sub(W - 1); 22 | } else { 23 | start += 1; 24 | } 25 | } 26 | } 27 | 28 | pub(crate) fn u256_to_trimmed_be_bytes(u256: &U256) -> Vec { 29 | let num_bytes = ceil_div_usize(u256.bits(), 8).max(1); 30 | // `byte` is little-endian, so we manually reverse it. 31 | (0..num_bytes).rev().map(|i| u256.byte(i)).collect() 32 | } 33 | 34 | pub(crate) fn u256_from_bool(b: bool) -> U256 { 35 | if b { 36 | U256::one() 37 | } else { 38 | U256::zero() 39 | } 40 | } 41 | 42 | #[cfg(test)] 43 | mod tests { 44 | use super::*; 45 | 46 | #[test] 47 | fn test_replace_windows() { 48 | // This replacement function adds pairs of integers together. 49 | let mut vec = vec![1, 2, 3, 4, 5]; 50 | replace_windows(&mut vec, |[x, y]| Some(vec![x + y])); 51 | assert_eq!(vec, vec![15u32]); 52 | 53 | // This replacement function splits each composite integer into two factors. 54 | let mut vec = vec![9, 1, 6, 8, 15, 7, 9]; 55 | replace_windows(&mut vec, |[n]| { 56 | (2..n).find(|d| n % d == 0).map(|d| vec![d, n / d]) 57 | }); 58 | assert_eq!(vec, vec![3, 3, 1, 2, 3, 2, 2, 2, 3, 5, 7, 3, 3]); 59 | } 60 | 61 | #[test] 62 | fn literal_to_be_bytes() { 63 | assert_eq!(u256_to_trimmed_be_bytes(&0.into()), vec![0x00]); 64 | 65 | assert_eq!(u256_to_trimmed_be_bytes(&768.into()), vec![0x03, 0x00]); 66 | 67 | assert_eq!(u256_to_trimmed_be_bytes(&0xa1b2.into()), vec![0xa1, 0xb2]); 68 | 69 | assert_eq!(u256_to_trimmed_be_bytes(&0x1b2.into()), vec![0x1, 0xb2]); 70 | } 71 | } 72 | -------------------------------------------------------------------------------- /evm/src/cpu/mod.rs: -------------------------------------------------------------------------------- 1 | pub(crate) mod bootstrap_kernel; 2 | pub(crate) mod columns; 3 | mod contextops; 4 | pub(crate) mod control_flow; 5 | pub mod cpu_stark; 6 | pub(crate) mod decode; 7 | mod dup_swap; 8 | mod jumps; 9 | pub mod kernel; 10 | pub(crate) mod membus; 11 | mod memio; 12 | mod modfp254; 13 | mod pc; 14 | mod shift; 15 | pub(crate) mod simple_logic; 16 | mod stack; 17 | pub(crate) mod stack_bounds; 18 | mod syscalls; 19 | -------------------------------------------------------------------------------- /evm/src/cpu/modfp254.rs: -------------------------------------------------------------------------------- 1 | use itertools::izip; 2 | use plonky2::field::extension::Extendable; 3 | use plonky2::field::packed::PackedField; 4 | use plonky2::field::types::Field; 5 | use plonky2::hash::hash_types::RichField; 6 | use plonky2::iop::ext_target::ExtensionTarget; 7 | 8 | use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer}; 9 | use crate::cpu::columns::CpuColumnsView; 10 | 11 | // Python: 12 | // >>> P = 21888242871839275222246405745257275088696311157297823662689037894645226208583 13 | // >>> "[" + ", ".join(hex((P >> n) % 2**32) for n in range(0, 256, 32)) + "]" 14 | const P_LIMBS: [u32; 8] = [ 15 | 0xd87cfd47, 0x3c208c16, 0x6871ca8d, 0x97816a91, 0x8181585d, 0xb85045b6, 0xe131a029, 0x30644e72, 16 | ]; 17 | 18 | pub fn eval_packed( 19 | lv: &CpuColumnsView

, 20 | yield_constr: &mut ConstraintConsumer

, 21 | ) { 22 | let filter = lv.is_cpu_cycle * (lv.op.addfp254 + lv.op.mulfp254 + lv.op.subfp254); 23 | 24 | // We want to use all the same logic as the usual mod operations, but without needing to read 25 | // the modulus from the stack. We simply constrain `mem_channels[2]` to be our prime (that's 26 | // where the modulus goes in the generalized operations). 27 | let channel_val = lv.mem_channels[2].value; 28 | for (channel_limb, p_limb) in izip!(channel_val, P_LIMBS) { 29 | let p_limb = P::Scalar::from_canonical_u32(p_limb); 30 | yield_constr.constraint(filter * (channel_limb - p_limb)); 31 | } 32 | } 33 | 34 | pub fn eval_ext_circuit, const D: usize>( 35 | builder: &mut plonky2::plonk::circuit_builder::CircuitBuilder, 36 | lv: &CpuColumnsView>, 37 | yield_constr: &mut RecursiveConstraintConsumer, 38 | ) { 39 | let filter = { 40 | let flag_sum = builder.add_many_extension([lv.op.addfp254, lv.op.mulfp254, lv.op.subfp254]); 41 | builder.mul_extension(lv.is_cpu_cycle, flag_sum) 42 | }; 43 | 44 | // We want to use all the same logic as the usual mod operations, but without needing to read 45 | // the modulus from the stack. We simply constrain `mem_channels[2]` to be our prime (that's 46 | // where the modulus goes in the generalized operations). 47 | let channel_val = lv.mem_channels[2].value; 48 | for (channel_limb, p_limb) in izip!(channel_val, P_LIMBS) { 49 | let p_limb = F::from_canonical_u32(p_limb); 50 | let constr = builder.arithmetic_extension(F::ONE, -p_limb, filter, channel_limb, filter); 51 | yield_constr.constraint(builder, constr); 52 | } 53 | } 54 | -------------------------------------------------------------------------------- /evm/src/cpu/pc.rs: -------------------------------------------------------------------------------- 1 | use plonky2::field::extension::Extendable; 2 | use plonky2::field::packed::PackedField; 3 | use plonky2::hash::hash_types::RichField; 4 | use plonky2::iop::ext_target::ExtensionTarget; 5 | 6 | use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer}; 7 | use crate::cpu::columns::CpuColumnsView; 8 | use crate::cpu::membus::NUM_GP_CHANNELS; 9 | 10 | pub fn eval_packed( 11 | lv: &CpuColumnsView

, 12 | yield_constr: &mut ConstraintConsumer

, 13 | ) { 14 | let filter = lv.op.pc; 15 | let push_value = lv.mem_channels[NUM_GP_CHANNELS - 1].value; 16 | yield_constr.constraint(filter * (push_value[0] - lv.program_counter)); 17 | for &limb in &push_value[1..] { 18 | yield_constr.constraint(filter * limb); 19 | } 20 | } 21 | 22 | pub fn eval_ext_circuit, const D: usize>( 23 | builder: &mut plonky2::plonk::circuit_builder::CircuitBuilder, 24 | lv: &CpuColumnsView>, 25 | yield_constr: &mut RecursiveConstraintConsumer, 26 | ) { 27 | let filter = lv.op.pc; 28 | let push_value = lv.mem_channels[NUM_GP_CHANNELS - 1].value; 29 | { 30 | let diff = builder.sub_extension(push_value[0], lv.program_counter); 31 | let constr = builder.mul_extension(filter, diff); 32 | yield_constr.constraint(builder, constr); 33 | } 34 | for &limb in &push_value[1..] { 35 | let constr = builder.mul_extension(filter, limb); 36 | yield_constr.constraint(builder, constr); 37 | } 38 | } 39 | -------------------------------------------------------------------------------- /evm/src/cpu/simple_logic/mod.rs: -------------------------------------------------------------------------------- 1 | pub(crate) mod eq_iszero; 2 | mod not; 3 | 4 | use plonky2::field::extension::Extendable; 5 | use plonky2::field::packed::PackedField; 6 | use plonky2::hash::hash_types::RichField; 7 | use plonky2::iop::ext_target::ExtensionTarget; 8 | 9 | use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer}; 10 | use crate::cpu::columns::CpuColumnsView; 11 | 12 | pub fn eval_packed( 13 | lv: &CpuColumnsView

, 14 | yield_constr: &mut ConstraintConsumer

, 15 | ) { 16 | not::eval_packed(lv, yield_constr); 17 | eq_iszero::eval_packed(lv, yield_constr); 18 | } 19 | 20 | pub fn eval_ext_circuit, const D: usize>( 21 | builder: &mut plonky2::plonk::circuit_builder::CircuitBuilder, 22 | lv: &CpuColumnsView>, 23 | yield_constr: &mut RecursiveConstraintConsumer, 24 | ) { 25 | not::eval_ext_circuit(builder, lv, yield_constr); 26 | eq_iszero::eval_ext_circuit(builder, lv, yield_constr); 27 | } 28 | -------------------------------------------------------------------------------- /evm/src/cpu/simple_logic/not.rs: -------------------------------------------------------------------------------- 1 | use plonky2::field::extension::Extendable; 2 | use plonky2::field::packed::PackedField; 3 | use plonky2::field::types::Field; 4 | use plonky2::hash::hash_types::RichField; 5 | use plonky2::iop::ext_target::ExtensionTarget; 6 | 7 | use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer}; 8 | use crate::cpu::columns::CpuColumnsView; 9 | use crate::cpu::membus::NUM_GP_CHANNELS; 10 | 11 | const LIMB_SIZE: usize = 32; 12 | const ALL_1_LIMB: u64 = (1 << LIMB_SIZE) - 1; 13 | 14 | pub fn eval_packed( 15 | lv: &CpuColumnsView

, 16 | yield_constr: &mut ConstraintConsumer

, 17 | ) { 18 | // This is simple: just do output = 0xffffffff - input. 19 | let input = lv.mem_channels[0].value; 20 | let output = lv.mem_channels[NUM_GP_CHANNELS - 1].value; 21 | let cycle_filter = lv.is_cpu_cycle; 22 | let is_not_filter = lv.op.not; 23 | let filter = cycle_filter * is_not_filter; 24 | for (input_limb, output_limb) in input.into_iter().zip(output) { 25 | yield_constr.constraint( 26 | filter * (output_limb + input_limb - P::Scalar::from_canonical_u64(ALL_1_LIMB)), 27 | ); 28 | } 29 | } 30 | 31 | pub fn eval_ext_circuit, const D: usize>( 32 | builder: &mut plonky2::plonk::circuit_builder::CircuitBuilder, 33 | lv: &CpuColumnsView>, 34 | yield_constr: &mut RecursiveConstraintConsumer, 35 | ) { 36 | let input = lv.mem_channels[0].value; 37 | let output = lv.mem_channels[NUM_GP_CHANNELS - 1].value; 38 | let cycle_filter = lv.is_cpu_cycle; 39 | let is_not_filter = lv.op.not; 40 | let filter = builder.mul_extension(cycle_filter, is_not_filter); 41 | for (input_limb, output_limb) in input.into_iter().zip(output) { 42 | let constr = builder.add_extension(output_limb, input_limb); 43 | let constr = builder.arithmetic_extension( 44 | F::ONE, 45 | -F::from_canonical_u64(ALL_1_LIMB), 46 | filter, 47 | constr, 48 | filter, 49 | ); 50 | yield_constr.constraint(builder, constr); 51 | } 52 | } 53 | -------------------------------------------------------------------------------- /evm/src/generation/rlp.rs: -------------------------------------------------------------------------------- 1 | use ethereum_types::U256; 2 | 3 | pub(crate) fn all_rlp_prover_inputs_reversed(signed_txns: &[Vec]) -> Vec { 4 | let mut inputs = all_rlp_prover_inputs(signed_txns); 5 | inputs.reverse(); 6 | inputs 7 | } 8 | 9 | fn all_rlp_prover_inputs(signed_txns: &[Vec]) -> Vec { 10 | let mut prover_inputs = vec![]; 11 | for txn in signed_txns { 12 | prover_inputs.push(txn.len().into()); 13 | for &byte in txn { 14 | prover_inputs.push(byte.into()); 15 | } 16 | } 17 | prover_inputs 18 | } 19 | -------------------------------------------------------------------------------- /evm/src/generation/state.rs: -------------------------------------------------------------------------------- 1 | use ethereum_types::U256; 2 | use plonky2::field::types::Field; 3 | 4 | use crate::generation::mpt::all_mpt_prover_inputs_reversed; 5 | use crate::generation::rlp::all_rlp_prover_inputs_reversed; 6 | use crate::generation::GenerationInputs; 7 | use crate::witness::memory::MemoryState; 8 | use crate::witness::state::RegistersState; 9 | use crate::witness::traces::{TraceCheckpoint, Traces}; 10 | 11 | pub(crate) struct GenerationStateCheckpoint { 12 | pub(crate) registers: RegistersState, 13 | pub(crate) traces: TraceCheckpoint, 14 | } 15 | 16 | #[derive(Debug)] 17 | pub(crate) struct GenerationState { 18 | pub(crate) inputs: GenerationInputs, 19 | pub(crate) registers: RegistersState, 20 | pub(crate) memory: MemoryState, 21 | pub(crate) traces: Traces, 22 | 23 | pub(crate) next_txn_index: usize, 24 | 25 | /// Prover inputs containing MPT data, in reverse order so that the next input can be obtained 26 | /// via `pop()`. 27 | pub(crate) mpt_prover_inputs: Vec, 28 | 29 | /// Prover inputs containing RLP data, in reverse order so that the next input can be obtained 30 | /// via `pop()`. 31 | pub(crate) rlp_prover_inputs: Vec, 32 | } 33 | 34 | impl GenerationState { 35 | pub(crate) fn new(inputs: GenerationInputs, kernel_code: &[u8]) -> Self { 36 | let mpt_prover_inputs = all_mpt_prover_inputs_reversed(&inputs.tries); 37 | let rlp_prover_inputs = all_rlp_prover_inputs_reversed(&inputs.signed_txns); 38 | 39 | Self { 40 | inputs, 41 | registers: Default::default(), 42 | memory: MemoryState::new(kernel_code), 43 | traces: Traces::default(), 44 | next_txn_index: 0, 45 | mpt_prover_inputs, 46 | rlp_prover_inputs, 47 | } 48 | } 49 | 50 | pub fn checkpoint(&self) -> GenerationStateCheckpoint { 51 | GenerationStateCheckpoint { 52 | registers: self.registers, 53 | traces: self.traces.checkpoint(), 54 | } 55 | } 56 | 57 | pub fn rollback(&mut self, checkpoint: GenerationStateCheckpoint) { 58 | self.registers = checkpoint.registers; 59 | self.traces.rollback(checkpoint.traces); 60 | } 61 | } 62 | -------------------------------------------------------------------------------- /evm/src/keccak/logic.rs: -------------------------------------------------------------------------------- 1 | use plonky2::field::extension::Extendable; 2 | use plonky2::field::packed::PackedField; 3 | use plonky2::field::types::PrimeField64; 4 | use plonky2::hash::hash_types::RichField; 5 | use plonky2::iop::ext_target::ExtensionTarget; 6 | use plonky2::plonk::circuit_builder::CircuitBuilder; 7 | 8 | pub(crate) fn xor(xs: [F; N]) -> F { 9 | xs.into_iter().fold(F::ZERO, |acc, x| { 10 | debug_assert!(x.is_zero() || x.is_one()); 11 | F::from_canonical_u64(acc.to_canonical_u64() ^ x.to_canonical_u64()) 12 | }) 13 | } 14 | 15 | /// Computes the arithmetic generalization of `xor(x, y)`, i.e. `x + y - 2 x y`. 16 | pub(crate) fn xor_gen(x: P, y: P) -> P { 17 | x + y - x * y.doubles() 18 | } 19 | 20 | /// Computes the arithmetic generalization of `xor3(x, y, z)`. 21 | pub(crate) fn xor3_gen(x: P, y: P, z: P) -> P { 22 | xor_gen(x, xor_gen(y, z)) 23 | } 24 | 25 | /// Computes the arithmetic generalization of `xor(x, y)`, i.e. `x + y - 2 x y`. 26 | pub(crate) fn xor_gen_circuit, const D: usize>( 27 | builder: &mut CircuitBuilder, 28 | x: ExtensionTarget, 29 | y: ExtensionTarget, 30 | ) -> ExtensionTarget { 31 | let sum = builder.add_extension(x, y); 32 | builder.arithmetic_extension(-F::TWO, F::ONE, x, y, sum) 33 | } 34 | 35 | /// Computes the arithmetic generalization of `xor(x, y)`, i.e. `x + y - 2 x y`. 36 | pub(crate) fn xor3_gen_circuit, const D: usize>( 37 | builder: &mut CircuitBuilder, 38 | x: ExtensionTarget, 39 | y: ExtensionTarget, 40 | z: ExtensionTarget, 41 | ) -> ExtensionTarget { 42 | let x_xor_y = xor_gen_circuit(builder, x, y); 43 | xor_gen_circuit(builder, x_xor_y, z) 44 | } 45 | 46 | pub(crate) fn andn(x: F, y: F) -> F { 47 | debug_assert!(x.is_zero() || x.is_one()); 48 | debug_assert!(y.is_zero() || y.is_one()); 49 | let x = x.to_canonical_u64(); 50 | let y = y.to_canonical_u64(); 51 | F::from_canonical_u64(!x & y) 52 | } 53 | 54 | pub(crate) fn andn_gen(x: P, y: P) -> P { 55 | (P::ONES - x) * y 56 | } 57 | 58 | pub(crate) fn andn_gen_circuit, const D: usize>( 59 | builder: &mut CircuitBuilder, 60 | x: ExtensionTarget, 61 | y: ExtensionTarget, 62 | ) -> ExtensionTarget { 63 | // (1 - x) y = -xy + y 64 | builder.arithmetic_extension(F::NEG_ONE, F::ONE, x, y, y) 65 | } 66 | -------------------------------------------------------------------------------- /evm/src/keccak/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod columns; 2 | pub mod constants; 3 | pub mod keccak_stark; 4 | pub mod logic; 5 | pub mod round_flags; 6 | -------------------------------------------------------------------------------- /evm/src/keccak/round_flags.rs: -------------------------------------------------------------------------------- 1 | use plonky2::field::extension::Extendable; 2 | use plonky2::field::packed::PackedField; 3 | use plonky2::field::types::Field; 4 | use plonky2::hash::hash_types::RichField; 5 | use plonky2::plonk::circuit_builder::CircuitBuilder; 6 | 7 | use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer}; 8 | use crate::keccak::columns::{reg_step, NUM_COLUMNS}; 9 | use crate::keccak::keccak_stark::NUM_ROUNDS; 10 | use crate::vars::{StarkEvaluationTargets, StarkEvaluationVars}; 11 | 12 | pub(crate) fn eval_round_flags>( 13 | vars: StarkEvaluationVars, 14 | yield_constr: &mut ConstraintConsumer

, 15 | ) { 16 | // Initially, the first step flag should be 1 while the others should be 0. 17 | yield_constr.constraint_first_row(vars.local_values[reg_step(0)] - F::ONE); 18 | for i in 1..NUM_ROUNDS { 19 | yield_constr.constraint_first_row(vars.local_values[reg_step(i)]); 20 | } 21 | 22 | for i in 0..NUM_ROUNDS { 23 | let current_round_flag = vars.local_values[reg_step(i)]; 24 | let next_round_flag = vars.next_values[reg_step((i + 1) % NUM_ROUNDS)]; 25 | yield_constr.constraint_transition(next_round_flag - current_round_flag); 26 | } 27 | } 28 | 29 | pub(crate) fn eval_round_flags_recursively, const D: usize>( 30 | builder: &mut CircuitBuilder, 31 | vars: StarkEvaluationTargets, 32 | yield_constr: &mut RecursiveConstraintConsumer, 33 | ) { 34 | let one = builder.one_extension(); 35 | 36 | // Initially, the first step flag should be 1 while the others should be 0. 37 | let step_0_minus_1 = builder.sub_extension(vars.local_values[reg_step(0)], one); 38 | yield_constr.constraint_first_row(builder, step_0_minus_1); 39 | for i in 1..NUM_ROUNDS { 40 | yield_constr.constraint_first_row(builder, vars.local_values[reg_step(i)]); 41 | } 42 | 43 | for i in 0..NUM_ROUNDS { 44 | let current_round_flag = vars.local_values[reg_step(i)]; 45 | let next_round_flag = vars.next_values[reg_step((i + 1) % NUM_ROUNDS)]; 46 | let diff = builder.sub_extension(next_round_flag, current_round_flag); 47 | yield_constr.constraint_transition(builder, diff); 48 | } 49 | } 50 | -------------------------------------------------------------------------------- /evm/src/keccak_sponge/mod.rs: -------------------------------------------------------------------------------- 1 | //! The Keccak sponge STARK is used to hash a variable amount of data which is read from memory. 2 | //! It connects to the memory STARK to read input data, and to the Keccak-f STARK to evaluate the 3 | //! permutation at each absorption step. 4 | 5 | pub mod columns; 6 | pub mod keccak_sponge_stark; 7 | -------------------------------------------------------------------------------- /evm/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![allow(incomplete_features)] 2 | #![allow(clippy::needless_range_loop)] 3 | #![allow(clippy::too_many_arguments)] 4 | #![allow(clippy::type_complexity)] 5 | #![allow(clippy::field_reassign_with_default)] 6 | #![feature(let_chains)] 7 | #![feature(generic_const_exprs)] 8 | 9 | pub mod all_stark; 10 | pub mod arithmetic; 11 | pub mod config; 12 | pub mod constraint_consumer; 13 | pub mod cpu; 14 | pub mod cross_table_lookup; 15 | pub mod generation; 16 | mod get_challenges; 17 | pub mod keccak; 18 | pub mod keccak_sponge; 19 | pub mod logic; 20 | pub mod lookup; 21 | pub mod memory; 22 | pub mod permutation; 23 | pub mod proof; 24 | pub mod prover; 25 | pub mod recursive_verifier; 26 | pub mod stark; 27 | pub mod stark_testing; 28 | pub mod util; 29 | pub mod vanishing_poly; 30 | pub mod vars; 31 | pub mod verifier; 32 | pub mod witness; 33 | 34 | // Set up Jemalloc 35 | #[cfg(not(target_env = "msvc"))] 36 | use jemallocator::Jemalloc; 37 | 38 | #[cfg(not(target_env = "msvc"))] 39 | #[global_allocator] 40 | static GLOBAL: Jemalloc = Jemalloc; 41 | -------------------------------------------------------------------------------- /evm/src/memory/columns.rs: -------------------------------------------------------------------------------- 1 | //! Memory registers. 2 | 3 | use crate::memory::{NUM_CHANNELS, VALUE_LIMBS}; 4 | 5 | // Columns for memory operations, ordered by (addr, timestamp). 6 | /// 1 if this is an actual memory operation, or 0 if it's a padding row. 7 | pub(crate) const FILTER: usize = 0; 8 | pub(crate) const TIMESTAMP: usize = FILTER + 1; 9 | pub(crate) const IS_READ: usize = TIMESTAMP + 1; 10 | pub(crate) const ADDR_CONTEXT: usize = IS_READ + 1; 11 | pub(crate) const ADDR_SEGMENT: usize = ADDR_CONTEXT + 1; 12 | pub(crate) const ADDR_VIRTUAL: usize = ADDR_SEGMENT + 1; 13 | 14 | // Eight 32-bit limbs hold a total of 256 bits. 15 | // If a value represents an integer, it is little-endian encoded. 16 | const VALUE_START: usize = ADDR_VIRTUAL + 1; 17 | pub(crate) const fn value_limb(i: usize) -> usize { 18 | debug_assert!(i < VALUE_LIMBS); 19 | VALUE_START + i 20 | } 21 | 22 | // Flags to indicate whether this part of the address differs from the next row, 23 | // and the previous parts do not differ. 24 | // That is, e.g., `SEGMENT_FIRST_CHANGE` is `F::ONE` iff `ADDR_CONTEXT` is the same in this 25 | // row and the next, but `ADDR_SEGMENT` is not. 26 | pub(crate) const CONTEXT_FIRST_CHANGE: usize = VALUE_START + VALUE_LIMBS; 27 | pub(crate) const SEGMENT_FIRST_CHANGE: usize = CONTEXT_FIRST_CHANGE + 1; 28 | pub(crate) const VIRTUAL_FIRST_CHANGE: usize = SEGMENT_FIRST_CHANGE + 1; 29 | 30 | // We use a range check to enforce the ordering. 31 | pub(crate) const RANGE_CHECK: usize = VIRTUAL_FIRST_CHANGE + NUM_CHANNELS; 32 | // The counter column (used for the range check) starts from 0 and increments. 33 | pub(crate) const COUNTER: usize = RANGE_CHECK + 1; 34 | // Helper columns for the permutation argument used to enforce the range check. 35 | pub(crate) const RANGE_CHECK_PERMUTED: usize = COUNTER + 1; 36 | pub(crate) const COUNTER_PERMUTED: usize = RANGE_CHECK_PERMUTED + 1; 37 | 38 | pub(crate) const NUM_COLUMNS: usize = COUNTER_PERMUTED + 1; 39 | -------------------------------------------------------------------------------- /evm/src/memory/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod columns; 2 | pub mod memory_stark; 3 | pub mod segments; 4 | 5 | // TODO: Move to CPU module, now that channels have been removed from the memory table. 6 | pub(crate) const NUM_CHANNELS: usize = crate::cpu::membus::NUM_CHANNELS; 7 | pub(crate) const VALUE_LIMBS: usize = 8; 8 | -------------------------------------------------------------------------------- /evm/src/vanishing_poly.rs: -------------------------------------------------------------------------------- 1 | use plonky2::field::extension::{Extendable, FieldExtension}; 2 | use plonky2::field::packed::PackedField; 3 | use plonky2::hash::hash_types::RichField; 4 | use plonky2::plonk::circuit_builder::CircuitBuilder; 5 | use plonky2::plonk::config::GenericConfig; 6 | 7 | use crate::config::StarkConfig; 8 | use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer}; 9 | use crate::cross_table_lookup::{ 10 | eval_cross_table_lookup_checks, eval_cross_table_lookup_checks_circuit, CtlCheckVars, 11 | CtlCheckVarsTarget, 12 | }; 13 | use crate::permutation::{ 14 | eval_permutation_checks, eval_permutation_checks_circuit, PermutationCheckDataTarget, 15 | PermutationCheckVars, 16 | }; 17 | use crate::stark::Stark; 18 | use crate::vars::{StarkEvaluationTargets, StarkEvaluationVars}; 19 | 20 | pub(crate) fn eval_vanishing_poly( 21 | stark: &S, 22 | config: &StarkConfig, 23 | vars: StarkEvaluationVars, 24 | permutation_vars: Option>, 25 | ctl_vars: &[CtlCheckVars], 26 | consumer: &mut ConstraintConsumer

, 27 | ) where 28 | F: RichField + Extendable, 29 | FE: FieldExtension, 30 | P: PackedField, 31 | C: GenericConfig, 32 | S: Stark, 33 | { 34 | stark.eval_packed_generic(vars, consumer); 35 | if let Some(permutation_vars) = permutation_vars { 36 | eval_permutation_checks::( 37 | stark, 38 | config, 39 | vars, 40 | permutation_vars, 41 | consumer, 42 | ); 43 | } 44 | eval_cross_table_lookup_checks::(vars, ctl_vars, consumer); 45 | } 46 | 47 | pub(crate) fn eval_vanishing_poly_circuit( 48 | builder: &mut CircuitBuilder, 49 | stark: &S, 50 | config: &StarkConfig, 51 | vars: StarkEvaluationTargets, 52 | permutation_data: Option>, 53 | ctl_vars: &[CtlCheckVarsTarget], 54 | consumer: &mut RecursiveConstraintConsumer, 55 | ) where 56 | F: RichField + Extendable, 57 | C: GenericConfig, 58 | S: Stark, 59 | [(); S::COLUMNS]:, 60 | { 61 | stark.eval_ext_circuit(builder, vars, consumer); 62 | if let Some(permutation_data) = permutation_data { 63 | eval_permutation_checks_circuit::( 64 | builder, 65 | stark, 66 | config, 67 | vars, 68 | permutation_data, 69 | consumer, 70 | ); 71 | } 72 | eval_cross_table_lookup_checks_circuit::(builder, vars, ctl_vars, consumer); 73 | } 74 | -------------------------------------------------------------------------------- /evm/src/vars.rs: -------------------------------------------------------------------------------- 1 | use plonky2::field::packed::PackedField; 2 | use plonky2::field::types::Field; 3 | use plonky2::iop::ext_target::ExtensionTarget; 4 | 5 | #[derive(Debug, Copy, Clone)] 6 | pub struct StarkEvaluationVars<'a, F, P, const COLUMNS: usize> 7 | where 8 | F: Field, 9 | P: PackedField, 10 | { 11 | pub local_values: &'a [P; COLUMNS], 12 | pub next_values: &'a [P; COLUMNS], 13 | } 14 | 15 | #[derive(Debug, Copy, Clone)] 16 | pub struct StarkEvaluationTargets<'a, const D: usize, const COLUMNS: usize> { 17 | pub local_values: &'a [ExtensionTarget; COLUMNS], 18 | pub next_values: &'a [ExtensionTarget; COLUMNS], 19 | } 20 | -------------------------------------------------------------------------------- /evm/src/witness/errors.rs: -------------------------------------------------------------------------------- 1 | #[allow(dead_code)] 2 | #[derive(Debug)] 3 | pub enum ProgramError { 4 | OutOfGas, 5 | InvalidOpcode, 6 | StackUnderflow, 7 | InvalidJumpDestination, 8 | InvalidJumpiDestination, 9 | StackOverflow, 10 | } 11 | -------------------------------------------------------------------------------- /evm/src/witness/mem_tx.rs: -------------------------------------------------------------------------------- 1 | use crate::witness::memory::{MemoryOp, MemoryOpKind, MemoryState}; 2 | 3 | pub fn apply_mem_ops(state: &mut MemoryState, mut ops: Vec) { 4 | ops.sort_unstable_by_key(|mem_op| mem_op.timestamp); 5 | 6 | for op in ops { 7 | let MemoryOp { address, op, .. } = op; 8 | if let MemoryOpKind::Write(val) = op { 9 | state.set(address, val); 10 | } 11 | } 12 | } 13 | -------------------------------------------------------------------------------- /evm/src/witness/mod.rs: -------------------------------------------------------------------------------- 1 | mod errors; 2 | pub(crate) mod memory; 3 | mod operation; 4 | pub(crate) mod state; 5 | pub(crate) mod traces; 6 | pub mod transition; 7 | pub(crate) mod util; 8 | -------------------------------------------------------------------------------- /evm/src/witness/state.rs: -------------------------------------------------------------------------------- 1 | use crate::cpu::kernel::aggregator::KERNEL; 2 | 3 | const KERNEL_CONTEXT: usize = 0; 4 | 5 | #[derive(Clone, Copy, Debug, Eq, PartialEq)] 6 | pub struct RegistersState { 7 | pub program_counter: usize, 8 | pub is_kernel: bool, 9 | pub stack_len: usize, 10 | pub context: usize, 11 | } 12 | 13 | impl RegistersState { 14 | pub(crate) fn effective_context(&self) -> usize { 15 | if self.is_kernel { 16 | KERNEL_CONTEXT 17 | } else { 18 | self.context 19 | } 20 | } 21 | } 22 | 23 | impl Default for RegistersState { 24 | fn default() -> Self { 25 | Self { 26 | program_counter: KERNEL.global_labels["main"], 27 | is_kernel: true, 28 | stack_len: 0, 29 | context: 0, 30 | } 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /evm/tests/empty_txn_list.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashMap; 2 | use std::time::Duration; 3 | 4 | use env_logger::{try_init_from_env, Env, DEFAULT_FILTER_ENV}; 5 | use eth_trie_utils::partial_trie::PartialTrie; 6 | use plonky2::field::goldilocks_field::GoldilocksField; 7 | use plonky2::plonk::config::PoseidonGoldilocksConfig; 8 | use plonky2::util::timing::TimingTree; 9 | use plonky2_evm::all_stark::AllStark; 10 | use plonky2_evm::config::StarkConfig; 11 | use plonky2_evm::generation::{GenerationInputs, TrieInputs}; 12 | use plonky2_evm::proof::BlockMetadata; 13 | use plonky2_evm::prover::prove; 14 | use plonky2_evm::verifier::verify_proof; 15 | 16 | type F = GoldilocksField; 17 | const D: usize = 2; 18 | type C = PoseidonGoldilocksConfig; 19 | 20 | /// Execute the empty list of transactions, i.e. a no-op. 21 | #[test] 22 | fn test_empty_txn_list() -> anyhow::Result<()> { 23 | init_logger(); 24 | 25 | let all_stark = AllStark::::default(); 26 | let config = StarkConfig::standard_fast_config(); 27 | 28 | let block_metadata = BlockMetadata::default(); 29 | 30 | let state_trie = PartialTrie::Empty; 31 | let transactions_trie = PartialTrie::Empty; 32 | let receipts_trie = PartialTrie::Empty; 33 | let storage_tries = vec![]; 34 | 35 | let state_trie_root = state_trie.calc_hash(); 36 | let txns_trie_root = transactions_trie.calc_hash(); 37 | let receipts_trie_root = receipts_trie.calc_hash(); 38 | 39 | let inputs = GenerationInputs { 40 | signed_txns: vec![], 41 | tries: TrieInputs { 42 | state_trie, 43 | transactions_trie, 44 | receipts_trie, 45 | storage_tries, 46 | }, 47 | contract_code: HashMap::new(), 48 | block_metadata, 49 | }; 50 | 51 | let mut timing = TimingTree::new("prove", log::Level::Debug); 52 | let proof = prove::(&all_stark, &config, inputs, &mut timing)?; 53 | timing.filter(Duration::from_millis(100)).print(); 54 | 55 | assert_eq!( 56 | proof.public_values.trie_roots_before.state_root, 57 | state_trie_root 58 | ); 59 | assert_eq!( 60 | proof.public_values.trie_roots_after.state_root, 61 | state_trie_root 62 | ); 63 | assert_eq!( 64 | proof.public_values.trie_roots_before.transactions_root, 65 | txns_trie_root 66 | ); 67 | assert_eq!( 68 | proof.public_values.trie_roots_after.transactions_root, 69 | txns_trie_root 70 | ); 71 | assert_eq!( 72 | proof.public_values.trie_roots_before.receipts_root, 73 | receipts_trie_root 74 | ); 75 | assert_eq!( 76 | proof.public_values.trie_roots_after.receipts_root, 77 | receipts_trie_root 78 | ); 79 | 80 | verify_proof(all_stark, proof, &config) 81 | } 82 | 83 | fn init_logger() { 84 | let _ = try_init_from_env(Env::default().filter_or(DEFAULT_FILTER_ENV, "info")); 85 | } 86 | -------------------------------------------------------------------------------- /field/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "plonky2_field" 3 | description = "Finite field arithmetic" 4 | version = "0.1.0" 5 | edition = "2021" 6 | 7 | [dependencies] 8 | log = "0.4.14" 9 | rustacuda = "0.1.3" 10 | rustacuda_core = "0.1.2" 11 | 12 | anyhow = { version = "1.0.40", default-features = false } 13 | itertools = { version = "0.10.0", default-features = false, features = ["use_alloc"] } 14 | num = { version = "0.4", default-features = false, features = ["alloc", "rand"] } 15 | plonky2_util = { path = "../util", default-features = false } 16 | rand = { version = "0.8.5", default-features = false, features = ["getrandom"] } 17 | serde = { version = "1.0", default-features = false, features = ["alloc", "derive"] } 18 | static_assertions = { version = "1.1.0", default-features = false } 19 | unroll = { version = "0.1.5", default-features = false } 20 | -------------------------------------------------------------------------------- /field/src/arch/mod.rs: -------------------------------------------------------------------------------- 1 | #[cfg(target_arch = "x86_64")] 2 | pub mod x86_64; 3 | -------------------------------------------------------------------------------- /field/src/arch/x86_64/mod.rs: -------------------------------------------------------------------------------- 1 | #[cfg(all( 2 | target_feature = "avx2", 3 | not(all( 4 | target_feature = "avx512bw", 5 | target_feature = "avx512cd", 6 | target_feature = "avx512dq", 7 | target_feature = "avx512f", 8 | target_feature = "avx512vl" 9 | )) 10 | ))] 11 | pub mod avx2_goldilocks_field; 12 | 13 | #[cfg(all( 14 | target_feature = "avx512bw", 15 | target_feature = "avx512cd", 16 | target_feature = "avx512dq", 17 | target_feature = "avx512f", 18 | target_feature = "avx512vl" 19 | ))] 20 | pub mod avx512_goldilocks_field; 21 | -------------------------------------------------------------------------------- /field/src/batch_util.rs: -------------------------------------------------------------------------------- 1 | use crate::packable::Packable; 2 | use crate::packed::PackedField; 3 | use crate::types::Field; 4 | 5 | fn pack_with_leftovers_split_point(slice: &[P::Scalar]) -> usize { 6 | let n = slice.len(); 7 | let n_leftover = n % P::WIDTH; 8 | n - n_leftover 9 | } 10 | 11 | fn pack_slice_with_leftovers(slice: &[P::Scalar]) -> (&[P], &[P::Scalar]) { 12 | let split_point = pack_with_leftovers_split_point::

(slice); 13 | let (slice_packable, slice_leftovers) = slice.split_at(split_point); 14 | let slice_packed = P::pack_slice(slice_packable); 15 | (slice_packed, slice_leftovers) 16 | } 17 | 18 | fn pack_slice_with_leftovers_mut( 19 | slice: &mut [P::Scalar], 20 | ) -> (&mut [P], &mut [P::Scalar]) { 21 | let split_point = pack_with_leftovers_split_point::

(slice); 22 | let (slice_packable, slice_leftovers) = slice.split_at_mut(split_point); 23 | let slice_packed = P::pack_slice_mut(slice_packable); 24 | (slice_packed, slice_leftovers) 25 | } 26 | 27 | /// Elementwise inplace multiplication of two slices of field elements. 28 | /// Implementation be faster than the trivial for loop. 29 | pub fn batch_multiply_inplace(out: &mut [F], a: &[F]) { 30 | let n = out.len(); 31 | assert_eq!(n, a.len(), "both arrays must have the same length"); 32 | 33 | // Split out slice of vectors, leaving leftovers as scalars 34 | let (out_packed, out_leftovers) = 35 | pack_slice_with_leftovers_mut::<::Packing>(out); 36 | let (a_packed, a_leftovers) = pack_slice_with_leftovers::<::Packing>(a); 37 | 38 | // Multiply packed and the leftovers 39 | for (x_out, x_a) in out_packed.iter_mut().zip(a_packed) { 40 | *x_out *= *x_a; 41 | } 42 | for (x_out, x_a) in out_leftovers.iter_mut().zip(a_leftovers) { 43 | *x_out *= *x_a; 44 | } 45 | } 46 | 47 | /// Elementwise inplace addition of two slices of field elements. 48 | /// Implementation be faster than the trivial for loop. 49 | pub fn batch_add_inplace(out: &mut [F], a: &[F]) { 50 | let n = out.len(); 51 | assert_eq!(n, a.len(), "both arrays must have the same length"); 52 | 53 | // Split out slice of vectors, leaving leftovers as scalars 54 | let (out_packed, out_leftovers) = 55 | pack_slice_with_leftovers_mut::<::Packing>(out); 56 | let (a_packed, a_leftovers) = pack_slice_with_leftovers::<::Packing>(a); 57 | 58 | // Add packed and the leftovers 59 | for (x_out, x_a) in out_packed.iter_mut().zip(a_packed) { 60 | *x_out += *x_a; 61 | } 62 | for (x_out, x_a) in out_leftovers.iter_mut().zip(a_leftovers) { 63 | *x_out += *x_a; 64 | } 65 | } 66 | -------------------------------------------------------------------------------- /field/src/cosets.rs: -------------------------------------------------------------------------------- 1 | use alloc::vec::Vec; 2 | 3 | use num::bigint::BigUint; 4 | 5 | use crate::types::Field; 6 | 7 | /// Finds a set of shifts that result in unique cosets for the multiplicative subgroup of size 8 | /// `2^subgroup_bits`. 9 | pub fn get_unique_coset_shifts(subgroup_size: usize, num_shifts: usize) -> Vec { 10 | // From Lagrange's theorem. 11 | let num_cosets = (F::order() - 1u32) / (subgroup_size as u32); 12 | assert!( 13 | BigUint::from(num_shifts) <= num_cosets, 14 | "The subgroup does not have enough distinct cosets" 15 | ); 16 | 17 | // Let g be a generator of the entire multiplicative group. Let n be the order of the subgroup. 18 | // The subgroup can be written as . We can use g^0, ..., g^(num_shifts - 1) as our 19 | // shifts, since g^i are distinct cosets provided i < |F*| / n, which we checked. 20 | F::MULTIPLICATIVE_GROUP_GENERATOR 21 | .powers() 22 | .take(num_shifts) 23 | .collect() 24 | } 25 | 26 | #[cfg(test)] 27 | mod tests { 28 | use std::collections::HashSet; 29 | 30 | use crate::cosets::get_unique_coset_shifts; 31 | use crate::goldilocks_field::GoldilocksField; 32 | use crate::types::Field; 33 | 34 | #[test] 35 | fn distinct_cosets() { 36 | type F = GoldilocksField; 37 | const SUBGROUP_BITS: usize = 5; 38 | const NUM_SHIFTS: usize = 50; 39 | 40 | let generator = F::primitive_root_of_unity(SUBGROUP_BITS); 41 | let subgroup_size = 1 << SUBGROUP_BITS; 42 | 43 | let shifts = get_unique_coset_shifts::(subgroup_size, NUM_SHIFTS); 44 | 45 | let mut union = HashSet::new(); 46 | for shift in shifts { 47 | let coset = F::cyclic_subgroup_coset_known_order(generator, shift, subgroup_size); 48 | assert!( 49 | coset.into_iter().all(|x| union.insert(x)), 50 | "Duplicate element!" 51 | ); 52 | } 53 | } 54 | } 55 | -------------------------------------------------------------------------------- /field/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![allow(incomplete_features)] 2 | #![allow(clippy::new_without_default)] 3 | #![allow(clippy::too_many_arguments)] 4 | #![allow(clippy::type_complexity)] 5 | #![allow(clippy::len_without_is_empty)] 6 | #![allow(clippy::needless_range_loop)] 7 | #![allow(clippy::return_self_not_must_use)] 8 | #![feature(generic_const_exprs)] 9 | #![feature(stdsimd)] 10 | #![feature(specialization)] 11 | #![cfg_attr(not(test), no_std)] 12 | 13 | extern crate alloc; 14 | 15 | mod inversion; 16 | 17 | pub(crate) mod arch; 18 | 19 | pub mod batch_util; 20 | pub mod cosets; 21 | pub mod extension; 22 | pub mod fft; 23 | pub mod goldilocks_extensions; 24 | pub mod goldilocks_field; 25 | pub mod interpolation; 26 | pub mod ops; 27 | pub mod packable; 28 | pub mod packed; 29 | pub mod polynomial; 30 | pub mod secp256k1_base; 31 | pub mod secp256k1_scalar; 32 | pub mod types; 33 | pub mod zero_poly_coset; 34 | 35 | #[cfg(test)] 36 | mod field_testing; 37 | 38 | #[cfg(test)] 39 | mod prime_field_testing; 40 | -------------------------------------------------------------------------------- /field/src/ops.rs: -------------------------------------------------------------------------------- 1 | use core::ops::Mul; 2 | 3 | pub trait Square { 4 | fn square(&self) -> Self; 5 | } 6 | 7 | impl + Copy> Square for F { 8 | default fn square(&self) -> Self { 9 | *self * *self 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /field/src/packable.rs: -------------------------------------------------------------------------------- 1 | use crate::packed::PackedField; 2 | use crate::types::Field; 3 | 4 | /// Points us to the default packing for a particular field. There may me multiple choices of 5 | /// PackedField for a particular Field (e.g. every Field is also a PackedField), but this is the 6 | /// recommended one. The recommended packing varies by target_arch and target_feature. 7 | pub trait Packable: Field { 8 | type Packing: PackedField; 9 | } 10 | 11 | impl Packable for F { 12 | default type Packing = Self; 13 | } 14 | 15 | #[cfg(all( 16 | target_arch = "x86_64", 17 | target_feature = "avx2", 18 | not(all( 19 | target_feature = "avx512bw", 20 | target_feature = "avx512cd", 21 | target_feature = "avx512dq", 22 | target_feature = "avx512f", 23 | target_feature = "avx512vl" 24 | )) 25 | ))] 26 | impl Packable for crate::goldilocks_field::GoldilocksField { 27 | type Packing = crate::arch::x86_64::avx2_goldilocks_field::Avx2GoldilocksField; 28 | } 29 | 30 | #[cfg(all( 31 | target_arch = "x86_64", 32 | target_feature = "avx512bw", 33 | target_feature = "avx512cd", 34 | target_feature = "avx512dq", 35 | target_feature = "avx512f", 36 | target_feature = "avx512vl" 37 | ))] 38 | impl Packable for crate::goldilocks_field::GoldilocksField { 39 | type Packing = crate::arch::x86_64::avx512_goldilocks_field::Avx512GoldilocksField; 40 | } 41 | -------------------------------------------------------------------------------- /field/src/zero_poly_coset.rs: -------------------------------------------------------------------------------- 1 | use alloc::vec::Vec; 2 | 3 | use crate::packed::PackedField; 4 | use crate::types::Field; 5 | 6 | /// Precomputations of the evaluation of `Z_H(X) = X^n - 1` on a coset `gK` with `H <= K`. 7 | pub struct ZeroPolyOnCoset { 8 | /// `n = |H|`. 9 | pub n: F, 10 | /// `rate = |K|/|H|`. 11 | pub rate: usize, 12 | /// Holds `g^n * (w^n)^i - 1 = g^n * v^i - 1` for `i in 0..rate`, with `w` a generator of `K` and `v` a 13 | /// `rate`-primitive root of unity. 14 | pub evals: Vec, 15 | /// Holds the multiplicative inverses of `evals`. 16 | pub inverses: Vec, 17 | } 18 | 19 | impl ZeroPolyOnCoset { 20 | pub fn new(n_log: usize, rate_bits: usize) -> Self { 21 | let g_pow_n = F::coset_shift().exp_power_of_2(n_log); 22 | let evals = F::two_adic_subgroup(rate_bits) 23 | .into_iter() 24 | .map(|x| g_pow_n * x - F::ONE) 25 | .collect::>(); 26 | let inverses = F::batch_multiplicative_inverse(&evals); 27 | Self { 28 | n: F::from_canonical_usize(1 << n_log), 29 | rate: 1 << rate_bits, 30 | evals, 31 | inverses, 32 | } 33 | } 34 | 35 | /// Returns `Z_H(g * w^i)`. 36 | pub fn eval(&self, i: usize) -> F { 37 | self.evals[i % self.rate] 38 | } 39 | 40 | /// Returns `1 / Z_H(g * w^i)`. 41 | pub fn eval_inverse(&self, i: usize) -> F { 42 | self.inverses[i % self.rate] 43 | } 44 | 45 | /// Like `eval_inverse`, but for a range of indices starting with `i_start`. 46 | pub fn eval_inverse_packed>(&self, i_start: usize) -> P { 47 | let mut packed = P::ZEROS; 48 | packed 49 | .as_slice_mut() 50 | .iter_mut() 51 | .enumerate() 52 | .for_each(|(j, packed_j)| *packed_j = self.eval_inverse(i_start + j)); 53 | packed 54 | } 55 | 56 | /// Returns `L_0(x) = Z_H(x)/(n * (x - 1))` with `x = w^i`. 57 | pub fn eval_l_0(&self, i: usize, x: F) -> F { 58 | // Could also precompute the inverses using Montgomery. 59 | self.eval(i) * (self.n * (x - F::ONE)).inverse() 60 | } 61 | } 62 | -------------------------------------------------------------------------------- /insertion/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "plonky2_insertion" 3 | description = "Circuit implementation of list insertion" 4 | version = "0.1.0" 5 | edition = "2021" 6 | 7 | [dependencies] 8 | anyhow = { version = "1.0.40", default-features = false } 9 | plonky2 = { path = "../plonky2", default-features = false } 10 | 11 | [dev-dependencies] 12 | plonky2 = { path = "../plonky2" } 13 | 14 | -------------------------------------------------------------------------------- /insertion/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![allow(clippy::new_without_default)] 2 | #![allow(clippy::too_many_arguments)] 3 | #![allow(clippy::type_complexity)] 4 | #![allow(clippy::len_without_is_empty)] 5 | #![allow(clippy::needless_range_loop)] 6 | #![allow(clippy::return_self_not_must_use)] 7 | #![no_std] 8 | 9 | extern crate alloc; 10 | 11 | pub mod insert_gadget; 12 | pub mod insertion_gate; 13 | -------------------------------------------------------------------------------- /maybe_rayon/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "maybe_rayon" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | [features] 7 | parallel = ["rayon"] 8 | 9 | [dependencies] 10 | rayon = { version = "1.5.3", optional = true } 11 | -------------------------------------------------------------------------------- /plonky2/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "plonky2" 3 | description = "Recursive SNARKs based on PLONK and FRI" 4 | version = "0.1.0" 5 | authors = ["Polygon Zero "] 6 | readme = "README.md" 7 | repository = "https://github.com/mir-protocol/plonky2" 8 | keywords = ["cryptography", "SNARK", "PLONK", "FRI"] 9 | categories = ["cryptography"] 10 | edition = "2021" 11 | default-run = "generate_constants" 12 | 13 | [features] 14 | default = ["gate_testing", "parallel", "rand_chacha", "std", "timing"] 15 | gate_testing = [] 16 | parallel = ["hashbrown/rayon", "maybe_rayon/parallel"] 17 | std = ["anyhow/std", "rand/std"] 18 | timing = ["std"] 19 | 20 | [dependencies] 21 | ahash = { version = "0.7.6", default-features = false, features = ["compile-time-rng"] } # NOTE: Be sure to keep this version the same as the dependency in `hashbrown`. 22 | anyhow = { version = "1.0.40", default-features = false } 23 | hashbrown = { version = "0.12.3", default-features = false, features = ["ahash", "serde"] } # NOTE: When upgrading, see `ahash` dependency. 24 | itertools = { version = "0.10.0", default-features = false } 25 | keccak-hash = { version = "0.8.0", default-features = false } 26 | log = { version = "0.4.14", default-features = false } 27 | maybe_rayon = { path = "../maybe_rayon", default-features = false } 28 | num = { version = "0.4", default-features = false, features = ["rand"] } 29 | plonky2_field = { path = "../field", default-features = false } 30 | plonky2_util = { path = "../util", default-features = false } 31 | rand = { version = "0.8.4", default-features = false } 32 | rand_chacha = { version = "0.3.1", optional = true, default-features = false } 33 | serde = { version = "1.0", default-features = false, features = ["derive"] } 34 | static_assertions = { version = "1.1.0", default-features = false } 35 | unroll = { version = "0.1.5", default-features = false } 36 | 37 | plonky2_cuda = {path = "../cuda"} 38 | rustacuda = "0.1.3" 39 | rustacuda_core = "0.1.2" 40 | 41 | [dev-dependencies] 42 | criterion = { version = "0.4.0", default-features = false } 43 | env_logger = { version = "0.9.0", default-features = false } 44 | num_cpus = { version = "1.14.0", default-features = false } 45 | plonky2 = { path = "." } 46 | rand = { version = "0.8.4", default-features = false, features = ["getrandom"] } 47 | rand_chacha = { version = "0.3.1", default-features = false } 48 | serde_cbor = { version = "0.11.2" } 49 | structopt = { version = "0.3.26", default-features = false } 50 | tynm = { version = "0.1.6", default-features = false } 51 | 52 | [target.'cfg(not(target_env = "msvc"))'.dev-dependencies] 53 | jemallocator = "0.5.0" 54 | 55 | [[bin]] 56 | name = "generate_constants" 57 | required-features = ["rand_chacha"] 58 | 59 | [[bench]] 60 | name = "field_arithmetic" 61 | harness = false 62 | 63 | [[bench]] 64 | name = "ffts" 65 | harness = false 66 | 67 | [[bench]] 68 | name = "hashing" 69 | harness = false 70 | 71 | [[bench]] 72 | name = "merkle" 73 | harness = false 74 | 75 | [[bench]] 76 | name = "transpose" 77 | harness = false 78 | 79 | [[bench]] 80 | name = "reverse_index_bits" 81 | harness = false 82 | -------------------------------------------------------------------------------- /plonky2/benches/allocator/mod.rs: -------------------------------------------------------------------------------- 1 | // Set up Jemalloc 2 | #[cfg(not(target_env = "msvc"))] 3 | use jemallocator::Jemalloc; 4 | 5 | #[cfg(not(target_env = "msvc"))] 6 | #[global_allocator] 7 | static GLOBAL: Jemalloc = Jemalloc; 8 | -------------------------------------------------------------------------------- /plonky2/benches/ffts.rs: -------------------------------------------------------------------------------- 1 | mod allocator; 2 | 3 | use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion}; 4 | use plonky2::field::goldilocks_field::GoldilocksField; 5 | use plonky2::field::polynomial::PolynomialCoeffs; 6 | use plonky2::field::types::Field; 7 | use tynm::type_name; 8 | 9 | pub(crate) fn bench_ffts(c: &mut Criterion) { 10 | let mut group = c.benchmark_group(&format!("fft<{}>", type_name::())); 11 | 12 | for size_log in [13, 14, 15, 16] { 13 | let size = 1 << size_log; 14 | group.bench_with_input(BenchmarkId::from_parameter(size), &size, |b, _| { 15 | let coeffs = PolynomialCoeffs::new(F::rand_vec(size)); 16 | b.iter(|| coeffs.clone().fft_with_options(None, None)); 17 | }); 18 | } 19 | } 20 | 21 | pub(crate) fn bench_ldes(c: &mut Criterion) { 22 | const RATE_BITS: usize = 3; 23 | 24 | let mut group = c.benchmark_group(&format!("lde<{}>", type_name::())); 25 | 26 | for size_log in [13, 14, 15, 16] { 27 | let orig_size = 1 << (size_log - RATE_BITS); 28 | let lde_size = 1 << size_log; 29 | 30 | group.bench_with_input(BenchmarkId::from_parameter(lde_size), &lde_size, |b, _| { 31 | let coeffs = PolynomialCoeffs::new(F::rand_vec(orig_size)); 32 | b.iter(|| { 33 | let padded_coeffs = coeffs.lde(RATE_BITS); 34 | padded_coeffs.fft_with_options(Some(RATE_BITS), None) 35 | }); 36 | }); 37 | } 38 | } 39 | 40 | fn criterion_benchmark(c: &mut Criterion) { 41 | bench_ffts::(c); 42 | bench_ldes::(c); 43 | } 44 | 45 | criterion_group!(benches, criterion_benchmark); 46 | criterion_main!(benches); 47 | -------------------------------------------------------------------------------- /plonky2/benches/hashing.rs: -------------------------------------------------------------------------------- 1 | mod allocator; 2 | 3 | use criterion::{criterion_group, criterion_main, BatchSize, Criterion}; 4 | use plonky2::field::goldilocks_field::GoldilocksField; 5 | use plonky2::field::types::Sample; 6 | use plonky2::hash::hash_types::{BytesHash, RichField}; 7 | use plonky2::hash::hashing::SPONGE_WIDTH; 8 | use plonky2::hash::keccak::KeccakHash; 9 | use plonky2::hash::poseidon::Poseidon; 10 | use plonky2::plonk::config::Hasher; 11 | use tynm::type_name; 12 | 13 | pub(crate) fn bench_keccak(c: &mut Criterion) { 14 | c.bench_function("keccak256", |b| { 15 | b.iter_batched( 16 | || (BytesHash::<32>::rand(), BytesHash::<32>::rand()), 17 | |(left, right)| as Hasher>::two_to_one(left, right), 18 | BatchSize::SmallInput, 19 | ) 20 | }); 21 | } 22 | 23 | pub(crate) fn bench_poseidon(c: &mut Criterion) { 24 | c.bench_function( 25 | &format!("poseidon<{}, {SPONGE_WIDTH}>", type_name::()), 26 | |b| { 27 | b.iter_batched( 28 | || F::rand_array::(), 29 | |state| F::poseidon(state), 30 | BatchSize::SmallInput, 31 | ) 32 | }, 33 | ); 34 | } 35 | 36 | fn criterion_benchmark(c: &mut Criterion) { 37 | bench_poseidon::(c); 38 | bench_keccak::(c); 39 | } 40 | 41 | criterion_group!(benches, criterion_benchmark); 42 | criterion_main!(benches); 43 | -------------------------------------------------------------------------------- /plonky2/benches/merkle.rs: -------------------------------------------------------------------------------- 1 | mod allocator; 2 | 3 | use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion}; 4 | use plonky2::field::goldilocks_field::GoldilocksField; 5 | use plonky2::hash::hash_types::RichField; 6 | use plonky2::hash::keccak::KeccakHash; 7 | use plonky2::hash::merkle_tree::MerkleTree; 8 | use plonky2::hash::poseidon::PoseidonHash; 9 | use plonky2::plonk::config::Hasher; 10 | use tynm::type_name; 11 | 12 | const ELEMS_PER_LEAF: usize = 135; 13 | 14 | pub(crate) fn bench_merkle_tree>(c: &mut Criterion) { 15 | let mut group = c.benchmark_group(&format!( 16 | "merkle-tree<{}, {}>", 17 | type_name::(), 18 | type_name::() 19 | )); 20 | group.sample_size(10); 21 | 22 | for size_log in [13, 14, 15] { 23 | let size = 1 << size_log; 24 | group.bench_with_input(BenchmarkId::from_parameter(size), &size, |b, _| { 25 | let leaves = vec![F::rand_vec(ELEMS_PER_LEAF); size]; 26 | b.iter(|| MerkleTree::::new(leaves.clone(), 0)); 27 | }); 28 | } 29 | } 30 | 31 | fn criterion_benchmark(c: &mut Criterion) { 32 | bench_merkle_tree::(c); 33 | bench_merkle_tree::>(c); 34 | } 35 | 36 | criterion_group!(benches, criterion_benchmark); 37 | criterion_main!(benches); 38 | -------------------------------------------------------------------------------- /plonky2/benches/reverse_index_bits.rs: -------------------------------------------------------------------------------- 1 | mod allocator; 2 | 3 | use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion}; 4 | use plonky2::field::goldilocks_field::GoldilocksField; 5 | use plonky2::field::types::Sample; 6 | use plonky2_util::{reverse_index_bits, reverse_index_bits_in_place}; 7 | 8 | type F = GoldilocksField; 9 | 10 | fn benchmark_in_place(c: &mut Criterion) { 11 | let mut group = c.benchmark_group("reverse-index-bits-in-place"); 12 | for width in [1 << 8, 1 << 16, 1 << 24] { 13 | group.bench_with_input(BenchmarkId::from_parameter(width), &width, |b, _| { 14 | let mut values = F::rand_vec(width); 15 | b.iter(|| reverse_index_bits_in_place(&mut values)); 16 | }); 17 | } 18 | } 19 | 20 | fn benchmark_out_of_place(c: &mut Criterion) { 21 | let mut group = c.benchmark_group("reverse-index-bits"); 22 | for width in [1 << 8, 1 << 16, 1 << 24] { 23 | group.bench_with_input(BenchmarkId::from_parameter(width), &width, |b, _| { 24 | let values = F::rand_vec(width); 25 | b.iter(|| reverse_index_bits(&values)); 26 | }); 27 | } 28 | } 29 | 30 | criterion_group!(benches_in_place, benchmark_in_place); 31 | criterion_group!(benches_out_of_place, benchmark_out_of_place); 32 | criterion_main!(benches_in_place, benches_out_of_place); 33 | -------------------------------------------------------------------------------- /plonky2/benches/transpose.rs: -------------------------------------------------------------------------------- 1 | mod allocator; 2 | 3 | use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion}; 4 | use plonky2::field::goldilocks_field::GoldilocksField; 5 | use plonky2::field::types::Sample; 6 | use plonky2::util::transpose; 7 | 8 | fn criterion_benchmark(c: &mut Criterion) { 9 | type F = GoldilocksField; 10 | 11 | // In practice, for the matrices we care about, each row is associated with a polynomial of 12 | // degree 2^13, and has been low-degree extended to a length of 2^16. 13 | const WIDTH: usize = 1 << 16; 14 | 15 | let mut group = c.benchmark_group("transpose"); 16 | 17 | // We have matrices with various numbers of polynomials. For example, the witness matrix 18 | // involves 100+ polynomials. 19 | for height in [5, 50, 100, 150] { 20 | group.bench_with_input(BenchmarkId::from_parameter(height), &height, |b, _| { 21 | let matrix = (0..height).map(|_| F::rand_vec(WIDTH)).collect::>(); 22 | b.iter(|| transpose(&matrix)); 23 | }); 24 | } 25 | } 26 | 27 | criterion_group!(benches, criterion_benchmark); 28 | criterion_main!(benches); 29 | -------------------------------------------------------------------------------- /plonky2/examples/factorial.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Result; 2 | use plonky2::field::types::Field; 3 | use plonky2::iop::witness::{PartialWitness, WitnessWrite}; 4 | use plonky2::plonk::circuit_builder::CircuitBuilder; 5 | use plonky2::plonk::circuit_data::CircuitConfig; 6 | use plonky2::plonk::config::{GenericConfig, PoseidonGoldilocksConfig}; 7 | 8 | /// An example of using Plonky2 to prove a statement of the form 9 | /// "I know n * (n + 1) * ... * (n + 99)". 10 | /// When n == 1, this is proving knowledge of 100!. 11 | fn main() -> Result<()> { 12 | const D: usize = 2; 13 | type C = PoseidonGoldilocksConfig; 14 | type F = >::F; 15 | 16 | let config = CircuitConfig::standard_recursion_config(); 17 | let mut builder = CircuitBuilder::::new(config); 18 | 19 | // The arithmetic circuit. 20 | let initial = builder.add_virtual_target(); 21 | let mut cur_target = initial; 22 | for i in 2..101 { 23 | let i_target = builder.constant(F::from_canonical_u32(i)); 24 | cur_target = builder.mul(cur_target, i_target); 25 | } 26 | 27 | // Public inputs are the initial value (provided below) and the result (which is generated). 28 | builder.register_public_input(initial); 29 | builder.register_public_input(cur_target); 30 | 31 | let mut pw = PartialWitness::new(); 32 | pw.set_target(initial, F::ONE); 33 | 34 | let data = builder.build::(); 35 | let proof = data.prove(pw)?; 36 | 37 | println!( 38 | "Factorial starting at {} is {}", 39 | proof.public_inputs[0], proof.public_inputs[1] 40 | ); 41 | 42 | data.verify(proof) 43 | } 44 | -------------------------------------------------------------------------------- /plonky2/examples/fibonacci.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Result; 2 | use plonky2::field::types::Field; 3 | use plonky2::iop::witness::{PartialWitness, WitnessWrite}; 4 | use plonky2::plonk::circuit_builder::CircuitBuilder; 5 | use plonky2::plonk::circuit_data::CircuitConfig; 6 | use plonky2::plonk::config::{GenericConfig, PoseidonGoldilocksConfig}; 7 | 8 | /// An example of using Plonky2 to prove a statement of the form 9 | /// "I know the 100th element of the Fibonacci sequence, starting with constants a and b." 10 | /// When a == 0 and b == 1, this is proving knowledge of the 100th (standard) Fibonacci number. 11 | fn main() -> Result<()> { 12 | const D: usize = 2; 13 | type C = PoseidonGoldilocksConfig; 14 | type F = >::F; 15 | 16 | let config = CircuitConfig::standard_recursion_config(); 17 | let mut builder = CircuitBuilder::::new(config); 18 | 19 | // The arithmetic circuit. 20 | let initial_a = builder.add_virtual_target(); 21 | let initial_b = builder.add_virtual_target(); 22 | let mut prev_target = initial_a; 23 | let mut cur_target = initial_b; 24 | for _ in 0..99 { 25 | let temp = builder.add(prev_target, cur_target); 26 | prev_target = cur_target; 27 | cur_target = temp; 28 | } 29 | 30 | // Public inputs are the two initial values (provided below) and the result (which is generated). 31 | builder.register_public_input(initial_a); 32 | builder.register_public_input(initial_b); 33 | builder.register_public_input(cur_target); 34 | 35 | // Provide initial values. 36 | let mut pw = PartialWitness::new(); 37 | pw.set_target(initial_a, F::ZERO); 38 | pw.set_target(initial_b, F::ONE); 39 | 40 | let data = builder.build::(); 41 | let proof = data.prove(pw)?; 42 | 43 | println!( 44 | "100th Fibonacci number mod |F| (starting with {}, {}) is: {}", 45 | proof.public_inputs[0], proof.public_inputs[1], proof.public_inputs[2] 46 | ); 47 | 48 | data.verify(proof) 49 | } 50 | -------------------------------------------------------------------------------- /plonky2/examples/square_root.rs: -------------------------------------------------------------------------------- 1 | use core::marker::PhantomData; 2 | 3 | use anyhow::Result; 4 | use plonky2::field::types::{PrimeField, Sample}; 5 | use plonky2::hash::hash_types::RichField; 6 | use plonky2::iop::generator::{GeneratedValues, SimpleGenerator}; 7 | use plonky2::iop::target::Target; 8 | use plonky2::iop::witness::{PartialWitness, PartitionWitness, Witness, WitnessWrite}; 9 | use plonky2::plonk::circuit_builder::CircuitBuilder; 10 | use plonky2::plonk::circuit_data::CircuitConfig; 11 | use plonky2::plonk::config::{GenericConfig, PoseidonGoldilocksConfig}; 12 | use plonky2_field::extension::Extendable; 13 | use plonky2_field::types::Field; 14 | 15 | /// A generator used by the prover to calculate the square root (`x`) of a given value 16 | /// (`x_squared`), outside of the circuit, in order to supply it as an additional public input. 17 | #[derive(Debug)] 18 | struct SquareRootGenerator, const D: usize> { 19 | x: Target, 20 | x_squared: Target, 21 | _phantom: PhantomData, 22 | } 23 | 24 | impl, const D: usize> SimpleGenerator 25 | for SquareRootGenerator 26 | { 27 | fn dependencies(&self) -> Vec { 28 | vec![self.x_squared] 29 | } 30 | 31 | fn run_once(&self, witness: &PartitionWitness, out_buffer: &mut GeneratedValues) { 32 | let x_squared = witness.get_target(self.x_squared); 33 | let x = x_squared.sqrt().unwrap(); 34 | // let x = F::from_canonical_u32(2); 35 | println!("Square root: {x}"); 36 | 37 | out_buffer.set_target(self.x, x); 38 | } 39 | } 40 | 41 | /// An example of using Plonky2 to prove a statement of the form 42 | /// "I know the square root of this field element." 43 | fn main() -> Result<()> { 44 | const D: usize = 2; 45 | type C = PoseidonGoldilocksConfig; 46 | type F = >::F; 47 | 48 | let config = CircuitConfig::standard_recursion_config(); 49 | 50 | let mut builder = CircuitBuilder::::new(config); 51 | 52 | let x = builder.add_virtual_target(); 53 | let x_squared = builder.square(x); 54 | 55 | builder.register_public_input(x_squared); 56 | 57 | builder.add_simple_generator(SquareRootGenerator:: { 58 | x, 59 | x_squared, 60 | _phantom: PhantomData, 61 | }); 62 | 63 | // Randomly generate the value of x^2: any quadratic residue in the field works. 64 | let x_squared_value = F::from_canonical_u32(4); 65 | // let x_squared_value = { 66 | // let mut val = F::rand(); 67 | // while !val.is_quadratic_residue() { 68 | // val = F::rand(); 69 | // } 70 | // val 71 | // }; 72 | 73 | let mut pw = PartialWitness::new(); 74 | pw.set_target(x_squared, x_squared_value); 75 | 76 | let data = builder.build::(); 77 | let proof = data.prove(pw.clone())?; 78 | 79 | let x_squared_actual = proof.public_inputs[0]; 80 | println!("Field element (square): {x_squared_actual}"); 81 | println!("pub: {:?}, degree: {}", proof.public_inputs, data.); 82 | let res = data.verify(proof); 83 | println!("{:?}", res); 84 | res 85 | } 86 | -------------------------------------------------------------------------------- /plonky2/src/bin/generate_constants.rs: -------------------------------------------------------------------------------- 1 | //! Generates random constants using ChaCha20, seeded with zero. 2 | 3 | #![allow(clippy::needless_range_loop)] 4 | 5 | use plonky2::field::goldilocks_field::GoldilocksField; 6 | use plonky2::field::types::Field64; 7 | use rand::{Rng, SeedableRng}; 8 | use rand_chacha::ChaCha8Rng; 9 | 10 | const SAMPLE_RANGE_END: u64 = GoldilocksField::ORDER; 11 | 12 | const N: usize = 12 * 30; // For Poseidon-12 13 | 14 | pub(crate) fn main() { 15 | let mut rng = ChaCha8Rng::seed_from_u64(0); 16 | let mut constants = [0u64; N]; 17 | for i in 0..N { 18 | constants[i] = rng.gen_range(0..SAMPLE_RANGE_END); 19 | } 20 | 21 | // Print the constants in the format we prefer in our code. 22 | for chunk in constants.chunks(4) { 23 | for (i, c) in chunk.iter().enumerate() { 24 | print!("{c:#018x},"); 25 | if i != chunk.len() - 1 { 26 | print!(" "); 27 | } 28 | } 29 | println!(); 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /plonky2/src/fri/mod.rs: -------------------------------------------------------------------------------- 1 | use alloc::vec::Vec; 2 | 3 | use crate::fri::reduction_strategies::FriReductionStrategy; 4 | 5 | mod challenges; 6 | pub mod oracle; 7 | pub mod proof; 8 | pub mod prover; 9 | pub mod recursive_verifier; 10 | pub mod reduction_strategies; 11 | pub mod structure; 12 | mod validate_shape; 13 | pub mod verifier; 14 | pub mod witness_util; 15 | 16 | #[derive(Debug, Clone, Eq, PartialEq)] 17 | pub struct FriConfig { 18 | /// `rate = 2^{-rate_bits}`. 19 | pub rate_bits: usize, 20 | 21 | /// Height of Merkle tree caps. 22 | pub cap_height: usize, 23 | 24 | pub proof_of_work_bits: u32, 25 | 26 | pub reduction_strategy: FriReductionStrategy, 27 | 28 | /// Number of query rounds to perform. 29 | pub num_query_rounds: usize, 30 | } 31 | 32 | impl FriConfig { 33 | pub fn rate(&self) -> f64 { 34 | 1.0 / ((1 << self.rate_bits) as f64) 35 | } 36 | 37 | pub fn fri_params(&self, degree_bits: usize, hiding: bool) -> FriParams { 38 | let reduction_arity_bits = self.reduction_strategy.reduction_arity_bits( 39 | degree_bits, 40 | self.rate_bits, 41 | self.cap_height, 42 | self.num_query_rounds, 43 | ); 44 | FriParams { 45 | config: self.clone(), 46 | hiding, 47 | degree_bits, 48 | reduction_arity_bits, 49 | } 50 | } 51 | 52 | pub fn num_cap_elements(&self) -> usize { 53 | 1 << self.cap_height 54 | } 55 | } 56 | 57 | /// FRI parameters, including generated parameters which are specific to an instance size, in 58 | /// contrast to `FriConfig` which is user-specified and independent of instance size. 59 | #[derive(Debug, Clone, Eq, PartialEq)] 60 | pub struct FriParams { 61 | /// User-specified FRI configuration. 62 | pub config: FriConfig, 63 | 64 | /// Whether to use a hiding variant of Merkle trees (where random salts are added to leaves). 65 | pub hiding: bool, 66 | 67 | /// The degree of the purported codeword, measured in bits. 68 | pub degree_bits: usize, 69 | 70 | /// The arity of each FRI reduction step, expressed as the log2 of the actual arity. 71 | /// For example, `[3, 2, 1]` would describe a FRI reduction tree with 8-to-1 reduction, then 72 | /// a 4-to-1 reduction, then a 2-to-1 reduction. After these reductions, the reduced polynomial 73 | /// is sent directly. 74 | pub reduction_arity_bits: Vec, 75 | } 76 | 77 | impl FriParams { 78 | pub fn total_arities(&self) -> usize { 79 | self.reduction_arity_bits.iter().sum() 80 | } 81 | 82 | pub(crate) fn max_arity_bits(&self) -> Option { 83 | self.reduction_arity_bits.iter().copied().max() 84 | } 85 | 86 | pub fn lde_bits(&self) -> usize { 87 | self.degree_bits + self.config.rate_bits 88 | } 89 | 90 | pub fn lde_size(&self) -> usize { 91 | 1 << self.lde_bits() 92 | } 93 | 94 | pub fn final_poly_bits(&self) -> usize { 95 | self.degree_bits - self.total_arities() 96 | } 97 | 98 | pub fn final_poly_len(&self) -> usize { 99 | 1 << self.final_poly_bits() 100 | } 101 | } 102 | -------------------------------------------------------------------------------- /plonky2/src/fri/structure.rs: -------------------------------------------------------------------------------- 1 | //! Information about the structure of a FRI instance, in terms of the oracles and polynomials 2 | //! involved, and the points they are opened at. 3 | 4 | use alloc::vec::Vec; 5 | use core::ops::Range; 6 | 7 | use crate::field::extension::Extendable; 8 | use crate::hash::hash_types::RichField; 9 | use crate::iop::ext_target::ExtensionTarget; 10 | 11 | /// Describes an instance of a FRI-based batch opening. 12 | pub struct FriInstanceInfo, const D: usize> { 13 | /// The oracles involved, not counting oracles created during the commit phase. 14 | pub oracles: Vec, 15 | /// Batches of openings, where each batch is associated with a particular point. 16 | pub batches: Vec>, 17 | } 18 | 19 | /// Describes an instance of a FRI-based batch opening. 20 | pub struct FriInstanceInfoTarget { 21 | /// The oracles involved, not counting oracles created during the commit phase. 22 | pub oracles: Vec, 23 | /// Batches of openings, where each batch is associated with a particular point. 24 | pub batches: Vec>, 25 | } 26 | 27 | #[derive(Copy, Clone)] 28 | pub struct FriOracleInfo { 29 | pub num_polys: usize, 30 | pub blinding: bool, 31 | } 32 | 33 | /// A batch of openings at a particular point. 34 | pub struct FriBatchInfo, const D: usize> { 35 | pub point: F::Extension, 36 | pub polynomials: Vec, 37 | } 38 | 39 | /// A batch of openings at a particular point. 40 | pub struct FriBatchInfoTarget { 41 | pub point: ExtensionTarget, 42 | pub polynomials: Vec, 43 | } 44 | 45 | #[derive(Copy, Clone, Debug)] 46 | pub struct FriPolynomialInfo { 47 | /// Index into `FriInstanceInfo`'s `oracles` list. 48 | pub oracle_index: usize, 49 | /// Index of the polynomial within the oracle. 50 | pub polynomial_index: usize, 51 | } 52 | 53 | impl FriPolynomialInfo { 54 | pub fn from_range( 55 | oracle_index: usize, 56 | polynomial_indices: Range, 57 | ) -> Vec { 58 | polynomial_indices 59 | .map(|polynomial_index| FriPolynomialInfo { 60 | oracle_index, 61 | polynomial_index, 62 | }) 63 | .collect() 64 | } 65 | } 66 | 67 | /// Opened values of each polynomial. 68 | pub struct FriOpenings, const D: usize> { 69 | pub batches: Vec>, 70 | } 71 | 72 | /// Opened values of each polynomial that's opened at a particular point. 73 | pub struct FriOpeningBatch, const D: usize> { 74 | pub values: Vec, 75 | } 76 | 77 | /// Opened values of each polynomial. 78 | pub struct FriOpeningsTarget { 79 | pub batches: Vec>, 80 | } 81 | 82 | /// Opened values of each polynomial that's opened at a particular point. 83 | pub struct FriOpeningBatchTarget { 84 | pub values: Vec>, 85 | } 86 | -------------------------------------------------------------------------------- /plonky2/src/fri/validate_shape.rs: -------------------------------------------------------------------------------- 1 | use anyhow::ensure; 2 | 3 | use crate::field::extension::Extendable; 4 | use crate::fri::proof::{FriProof, FriQueryRound, FriQueryStep}; 5 | use crate::fri::structure::FriInstanceInfo; 6 | use crate::fri::FriParams; 7 | use crate::hash::hash_types::RichField; 8 | use crate::plonk::config::GenericConfig; 9 | use crate::plonk::plonk_common::salt_size; 10 | 11 | pub(crate) fn validate_fri_proof_shape( 12 | proof: &FriProof, 13 | instance: &FriInstanceInfo, 14 | params: &FriParams, 15 | ) -> anyhow::Result<()> 16 | where 17 | F: RichField + Extendable, 18 | C: GenericConfig, 19 | { 20 | let FriProof { 21 | commit_phase_merkle_caps, 22 | query_round_proofs, 23 | final_poly, 24 | pow_witness: _pow_witness, 25 | } = proof; 26 | 27 | let cap_height = params.config.cap_height; 28 | for cap in commit_phase_merkle_caps { 29 | ensure!(cap.height() == cap_height); 30 | } 31 | 32 | for query_round in query_round_proofs { 33 | let FriQueryRound { 34 | initial_trees_proof, 35 | steps, 36 | } = query_round; 37 | 38 | ensure!(initial_trees_proof.evals_proofs.len() == instance.oracles.len()); 39 | for ((leaf, merkle_proof), oracle) in initial_trees_proof 40 | .evals_proofs 41 | .iter() 42 | .zip(&instance.oracles) 43 | { 44 | ensure!(leaf.len() == oracle.num_polys + salt_size(oracle.blinding && params.hiding)); 45 | ensure!(merkle_proof.len() + cap_height == params.lde_bits()); 46 | } 47 | 48 | ensure!(steps.len() == params.reduction_arity_bits.len()); 49 | let mut codeword_len_bits = params.lde_bits(); 50 | for (step, arity_bits) in steps.iter().zip(¶ms.reduction_arity_bits) { 51 | let FriQueryStep { 52 | evals, 53 | merkle_proof, 54 | } = step; 55 | 56 | let arity = 1 << arity_bits; 57 | codeword_len_bits -= arity_bits; 58 | 59 | ensure!(evals.len() == arity); 60 | ensure!(merkle_proof.len() + cap_height == codeword_len_bits); 61 | } 62 | } 63 | 64 | ensure!(final_poly.len() == params.final_poly_len()); 65 | 66 | Ok(()) 67 | } 68 | -------------------------------------------------------------------------------- /plonky2/src/fri/witness_util.rs: -------------------------------------------------------------------------------- 1 | use itertools::Itertools; 2 | 3 | use crate::field::extension::Extendable; 4 | use crate::fri::proof::{FriProof, FriProofTarget}; 5 | use crate::hash::hash_types::RichField; 6 | use crate::iop::witness::WitnessWrite; 7 | use crate::plonk::config::AlgebraicHasher; 8 | 9 | /// Set the targets in a `FriProofTarget` to their corresponding values in a `FriProof`. 10 | pub fn set_fri_proof_target( 11 | witness: &mut W, 12 | fri_proof_target: &FriProofTarget, 13 | fri_proof: &FriProof, 14 | ) where 15 | F: RichField + Extendable, 16 | W: WitnessWrite + ?Sized, 17 | H: AlgebraicHasher, 18 | { 19 | witness.set_target(fri_proof_target.pow_witness, fri_proof.pow_witness); 20 | 21 | for (&t, &x) in fri_proof_target 22 | .final_poly 23 | .0 24 | .iter() 25 | .zip_eq(&fri_proof.final_poly.coeffs) 26 | { 27 | witness.set_extension_target(t, x); 28 | } 29 | 30 | for (t, x) in fri_proof_target 31 | .commit_phase_merkle_caps 32 | .iter() 33 | .zip_eq(&fri_proof.commit_phase_merkle_caps) 34 | { 35 | witness.set_cap_target(t, x); 36 | } 37 | 38 | for (qt, q) in fri_proof_target 39 | .query_round_proofs 40 | .iter() 41 | .zip_eq(&fri_proof.query_round_proofs) 42 | { 43 | for (at, a) in qt 44 | .initial_trees_proof 45 | .evals_proofs 46 | .iter() 47 | .zip_eq(&q.initial_trees_proof.evals_proofs) 48 | { 49 | for (&t, &x) in at.0.iter().zip_eq(&a.0) { 50 | witness.set_target(t, x); 51 | } 52 | for (&t, &x) in at.1.siblings.iter().zip_eq(&a.1.siblings) { 53 | witness.set_hash_target(t, x); 54 | } 55 | } 56 | 57 | for (st, s) in qt.steps.iter().zip_eq(&q.steps) { 58 | for (&t, &x) in st.evals.iter().zip_eq(&s.evals) { 59 | witness.set_extension_target(t, x); 60 | } 61 | for (&t, &x) in st 62 | .merkle_proof 63 | .siblings 64 | .iter() 65 | .zip_eq(&s.merkle_proof.siblings) 66 | { 67 | witness.set_hash_target(t, x); 68 | } 69 | } 70 | } 71 | } 72 | -------------------------------------------------------------------------------- /plonky2/src/gadgets/hash.rs: -------------------------------------------------------------------------------- 1 | use crate::field::extension::Extendable; 2 | use crate::hash::hash_types::{HashOutTarget, RichField}; 3 | use crate::hash::hashing::SPONGE_WIDTH; 4 | use crate::iop::target::{BoolTarget, Target}; 5 | use crate::plonk::circuit_builder::CircuitBuilder; 6 | use crate::plonk::config::AlgebraicHasher; 7 | 8 | impl, const D: usize> CircuitBuilder { 9 | pub fn permute>( 10 | &mut self, 11 | inputs: [Target; SPONGE_WIDTH], 12 | ) -> [Target; SPONGE_WIDTH] { 13 | // We don't want to swap any inputs, so set that wire to 0. 14 | let _false = self._false(); 15 | self.permute_swapped::(inputs, _false) 16 | } 17 | 18 | /// Conditionally swap two chunks of the inputs (useful in verifying Merkle proofs), then apply 19 | /// a cryptographic permutation. 20 | pub(crate) fn permute_swapped>( 21 | &mut self, 22 | inputs: [Target; SPONGE_WIDTH], 23 | swap: BoolTarget, 24 | ) -> [Target; SPONGE_WIDTH] { 25 | H::permute_swapped(inputs, swap, self) 26 | } 27 | 28 | pub fn public_inputs_hash>( 29 | &mut self, 30 | inputs: Vec, 31 | ) -> HashOutTarget { 32 | H::public_inputs_hash(inputs, self) 33 | } 34 | } 35 | -------------------------------------------------------------------------------- /plonky2/src/gadgets/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod arithmetic; 2 | pub mod arithmetic_extension; 3 | pub mod hash; 4 | pub mod polynomial; 5 | pub mod random_access; 6 | pub mod range_check; 7 | pub mod select; 8 | pub mod split_base; 9 | pub(crate) mod split_join; 10 | -------------------------------------------------------------------------------- /plonky2/src/gadgets/polynomial.rs: -------------------------------------------------------------------------------- 1 | use alloc::vec::Vec; 2 | 3 | use crate::field::extension::Extendable; 4 | use crate::hash::hash_types::RichField; 5 | use crate::iop::ext_target::{ExtensionAlgebraTarget, ExtensionTarget}; 6 | use crate::iop::target::Target; 7 | use crate::plonk::circuit_builder::CircuitBuilder; 8 | use crate::util::reducing::ReducingFactorTarget; 9 | 10 | #[derive(Clone, Debug)] 11 | pub struct PolynomialCoeffsExtTarget(pub Vec>); 12 | 13 | impl PolynomialCoeffsExtTarget { 14 | pub fn len(&self) -> usize { 15 | self.0.len() 16 | } 17 | 18 | pub fn is_empty(&self) -> bool { 19 | self.len() == 0 20 | } 21 | 22 | pub fn eval_scalar>( 23 | &self, 24 | builder: &mut CircuitBuilder, 25 | point: Target, 26 | ) -> ExtensionTarget { 27 | let point = builder.convert_to_ext(point); 28 | let mut point = ReducingFactorTarget::new(point); 29 | point.reduce(&self.0, builder) 30 | } 31 | 32 | pub fn eval>( 33 | &self, 34 | builder: &mut CircuitBuilder, 35 | point: ExtensionTarget, 36 | ) -> ExtensionTarget { 37 | let mut point = ReducingFactorTarget::new(point); 38 | point.reduce(&self.0, builder) 39 | } 40 | } 41 | 42 | pub struct PolynomialCoeffsExtAlgebraTarget(pub Vec>); 43 | 44 | impl PolynomialCoeffsExtAlgebraTarget { 45 | pub fn eval_scalar( 46 | &self, 47 | builder: &mut CircuitBuilder, 48 | point: ExtensionTarget, 49 | ) -> ExtensionAlgebraTarget 50 | where 51 | F: RichField + Extendable, 52 | { 53 | let mut acc = builder.zero_ext_algebra(); 54 | for &c in self.0.iter().rev() { 55 | acc = builder.scalar_mul_add_ext_algebra(point, acc, c); 56 | } 57 | acc 58 | } 59 | 60 | pub fn eval( 61 | &self, 62 | builder: &mut CircuitBuilder, 63 | point: ExtensionAlgebraTarget, 64 | ) -> ExtensionAlgebraTarget 65 | where 66 | F: RichField + Extendable, 67 | { 68 | let mut acc = builder.zero_ext_algebra(); 69 | for &c in self.0.iter().rev() { 70 | acc = builder.mul_add_ext_algebra(point, acc, c); 71 | } 72 | acc 73 | } 74 | 75 | /// Evaluate the polynomial at a point given its powers. The first power is the point itself, not 1. 76 | pub fn eval_with_powers( 77 | &self, 78 | builder: &mut CircuitBuilder, 79 | powers: &[ExtensionAlgebraTarget], 80 | ) -> ExtensionAlgebraTarget 81 | where 82 | F: RichField + Extendable, 83 | { 84 | debug_assert_eq!(self.0.len(), powers.len() + 1); 85 | let acc = self.0[0]; 86 | self.0[1..] 87 | .iter() 88 | .zip(powers) 89 | .fold(acc, |acc, (&x, &c)| builder.mul_add_ext_algebra(c, x, acc)) 90 | } 91 | } 92 | -------------------------------------------------------------------------------- /plonky2/src/gadgets/range_check.rs: -------------------------------------------------------------------------------- 1 | use alloc::vec; 2 | use alloc::vec::Vec; 3 | 4 | use crate::field::extension::Extendable; 5 | use crate::hash::hash_types::RichField; 6 | use crate::iop::generator::{GeneratedValues, SimpleGenerator}; 7 | use crate::iop::target::{BoolTarget, Target}; 8 | use crate::iop::witness::{PartitionWitness, Witness, WitnessWrite}; 9 | use crate::plonk::circuit_builder::CircuitBuilder; 10 | 11 | impl, const D: usize> CircuitBuilder { 12 | /// Checks that `x < 2^n_log` using a `BaseSumGate`. 13 | pub fn range_check(&mut self, x: Target, n_log: usize) { 14 | self.split_le(x, n_log); 15 | } 16 | 17 | /// Returns the first `num_low_bits` little-endian bits of `x`. 18 | pub fn low_bits(&mut self, x: Target, num_low_bits: usize, num_bits: usize) -> Vec { 19 | let mut res = self.split_le(x, num_bits); 20 | res.truncate(num_low_bits); 21 | res 22 | } 23 | 24 | /// Returns `(a,b)` such that `x = a + 2^n_log * b` with `a < 2^n_log`. 25 | /// `x` is assumed to be range-checked for having `num_bits` bits. 26 | pub fn split_low_high(&mut self, x: Target, n_log: usize, num_bits: usize) -> (Target, Target) { 27 | let low = self.add_virtual_target(); 28 | let high = self.add_virtual_target(); 29 | 30 | self.add_simple_generator(LowHighGenerator { 31 | integer: x, 32 | n_log, 33 | low, 34 | high, 35 | }); 36 | 37 | self.range_check(low, n_log); 38 | self.range_check(high, num_bits - n_log); 39 | 40 | let pow2 = self.constant(F::from_canonical_u64(1 << n_log)); 41 | let comp_x = self.mul_add(high, pow2, low); 42 | self.connect(x, comp_x); 43 | 44 | (low, high) 45 | } 46 | 47 | pub fn assert_bool(&mut self, b: BoolTarget) { 48 | let z = self.mul_sub(b.target, b.target, b.target); 49 | let zero = self.zero(); 50 | self.connect(z, zero); 51 | } 52 | } 53 | 54 | #[derive(Debug)] 55 | struct LowHighGenerator { 56 | integer: Target, 57 | n_log: usize, 58 | low: Target, 59 | high: Target, 60 | } 61 | 62 | impl SimpleGenerator for LowHighGenerator { 63 | fn dependencies(&self) -> Vec { 64 | vec![self.integer] 65 | } 66 | 67 | fn run_once(&self, witness: &PartitionWitness, out_buffer: &mut GeneratedValues) { 68 | let integer_value = witness.get_target(self.integer).to_canonical_u64(); 69 | let low = integer_value & ((1 << self.n_log) - 1); 70 | let high = integer_value >> self.n_log; 71 | 72 | out_buffer.set_target(self.low, F::from_canonical_u64(low)); 73 | out_buffer.set_target(self.high, F::from_canonical_u64(high)); 74 | } 75 | } 76 | -------------------------------------------------------------------------------- /plonky2/src/gadgets/select.rs: -------------------------------------------------------------------------------- 1 | use crate::field::extension::Extendable; 2 | use crate::hash::hash_types::RichField; 3 | use crate::iop::ext_target::ExtensionTarget; 4 | use crate::iop::target::{BoolTarget, Target}; 5 | use crate::plonk::circuit_builder::CircuitBuilder; 6 | 7 | impl, const D: usize> CircuitBuilder { 8 | /// Selects `x` or `y` based on `b`, i.e., this returns `if b { x } else { y }`. 9 | pub fn select_ext( 10 | &mut self, 11 | b: BoolTarget, 12 | x: ExtensionTarget, 13 | y: ExtensionTarget, 14 | ) -> ExtensionTarget { 15 | let b_ext = self.convert_to_ext(b.target); 16 | self.select_ext_generalized(b_ext, x, y) 17 | } 18 | 19 | /// Like `select_ext`, but accepts a condition input which does not necessarily have to be 20 | /// binary. In this case, it computes the arithmetic generalization of `if b { x } else { y }`, 21 | /// i.e. `bx - (by-y)`. 22 | pub fn select_ext_generalized( 23 | &mut self, 24 | b: ExtensionTarget, 25 | x: ExtensionTarget, 26 | y: ExtensionTarget, 27 | ) -> ExtensionTarget { 28 | let tmp = self.mul_sub_extension(b, y, y); 29 | self.mul_sub_extension(b, x, tmp) 30 | } 31 | 32 | /// See `select_ext`. 33 | pub fn select(&mut self, b: BoolTarget, x: Target, y: Target) -> Target { 34 | let tmp = self.mul_sub(b.target, y, y); 35 | self.mul_sub(b.target, x, tmp) 36 | } 37 | } 38 | 39 | #[cfg(test)] 40 | mod tests { 41 | use anyhow::Result; 42 | 43 | use crate::field::types::Sample; 44 | use crate::iop::witness::{PartialWitness, WitnessWrite}; 45 | use crate::plonk::circuit_builder::CircuitBuilder; 46 | use crate::plonk::circuit_data::CircuitConfig; 47 | use crate::plonk::config::{GenericConfig, PoseidonGoldilocksConfig}; 48 | use crate::plonk::verifier::verify; 49 | 50 | #[test] 51 | fn test_select() -> Result<()> { 52 | const D: usize = 2; 53 | type C = PoseidonGoldilocksConfig; 54 | type F = >::F; 55 | type FF = >::FE; 56 | let config = CircuitConfig::standard_recursion_config(); 57 | let mut pw = PartialWitness::::new(); 58 | let mut builder = CircuitBuilder::::new(config); 59 | 60 | let (x, y) = (FF::rand(), FF::rand()); 61 | let xt = builder.add_virtual_extension_target(); 62 | let yt = builder.add_virtual_extension_target(); 63 | let truet = builder._true(); 64 | let falset = builder._false(); 65 | 66 | pw.set_extension_target(xt, x); 67 | pw.set_extension_target(yt, y); 68 | 69 | let should_be_x = builder.select_ext(truet, xt, yt); 70 | let should_be_y = builder.select_ext(falset, xt, yt); 71 | 72 | builder.connect_extension(should_be_x, xt); 73 | builder.connect_extension(should_be_y, yt); 74 | 75 | let data = builder.build::(); 76 | let proof = data.prove(pw)?; 77 | 78 | verify(proof, &data.verifier_only, &data.common) 79 | } 80 | } 81 | -------------------------------------------------------------------------------- /plonky2/src/gates/mod.rs: -------------------------------------------------------------------------------- 1 | // Gates have `new` methods that return `GateRef`s. 2 | #![allow(clippy::new_ret_no_self)] 3 | 4 | pub mod arithmetic_base; 5 | pub mod arithmetic_extension; 6 | pub mod base_sum; 7 | pub mod constant; 8 | pub mod exponentiation; 9 | pub mod gate; 10 | pub mod high_degree_interpolation; 11 | pub mod interpolation; 12 | pub mod low_degree_interpolation; 13 | pub mod multiplication_extension; 14 | pub mod noop; 15 | pub mod packed_util; 16 | pub mod poseidon; 17 | pub mod poseidon_mds; 18 | pub mod public_input; 19 | pub mod random_access; 20 | pub mod reducing; 21 | pub mod reducing_extension; 22 | pub(crate) mod selectors; 23 | pub mod util; 24 | 25 | // Can't use #[cfg(test)] here because it needs to be visible to other crates. 26 | // See https://github.com/rust-lang/cargo/issues/8379 27 | #[cfg(any(feature = "gate_testing", test))] 28 | pub mod gate_testing; 29 | -------------------------------------------------------------------------------- /plonky2/src/gates/noop.rs: -------------------------------------------------------------------------------- 1 | use alloc::boxed::Box; 2 | use alloc::string::String; 3 | use alloc::vec::Vec; 4 | 5 | use crate::field::extension::Extendable; 6 | use crate::gates::gate::Gate; 7 | use crate::hash::hash_types::RichField; 8 | use crate::iop::ext_target::ExtensionTarget; 9 | use crate::iop::generator::WitnessGenerator; 10 | use crate::plonk::circuit_builder::CircuitBuilder; 11 | use crate::plonk::vars::{EvaluationTargets, EvaluationVars, EvaluationVarsBaseBatch}; 12 | 13 | /// A gate which does nothing. 14 | pub struct NoopGate; 15 | 16 | impl, const D: usize> Gate for NoopGate { 17 | fn id(&self) -> String { 18 | "NoopGate".into() 19 | } 20 | 21 | fn export_circom_verification_code(&self) -> String { 22 | todo!() 23 | } 24 | fn export_solidity_verification_code(&self) -> String { 25 | todo!() 26 | } 27 | 28 | fn eval_unfiltered(&self, _vars: EvaluationVars) -> Vec { 29 | Vec::new() 30 | } 31 | 32 | fn eval_unfiltered_base_batch(&self, _vars: EvaluationVarsBaseBatch) -> Vec { 33 | Vec::new() 34 | } 35 | 36 | fn eval_unfiltered_circuit( 37 | &self, 38 | _builder: &mut CircuitBuilder, 39 | _vars: EvaluationTargets, 40 | ) -> Vec> { 41 | Vec::new() 42 | } 43 | 44 | fn generators(&self, _row: usize, _local_constants: &[F]) -> Vec>> { 45 | Vec::new() 46 | } 47 | 48 | fn num_wires(&self) -> usize { 49 | 0 50 | } 51 | 52 | fn num_constants(&self) -> usize { 53 | 0 54 | } 55 | 56 | fn degree(&self) -> usize { 57 | 0 58 | } 59 | 60 | fn num_constraints(&self) -> usize { 61 | 0 62 | } 63 | } 64 | 65 | #[cfg(test)] 66 | mod tests { 67 | use crate::field::goldilocks_field::GoldilocksField; 68 | use crate::gates::gate_testing::{test_eval_fns, test_low_degree}; 69 | use crate::gates::noop::NoopGate; 70 | use crate::plonk::config::{GenericConfig, PoseidonGoldilocksConfig}; 71 | 72 | #[test] 73 | fn low_degree() { 74 | test_low_degree::(NoopGate) 75 | } 76 | 77 | #[test] 78 | fn eval_fns() -> anyhow::Result<()> { 79 | const D: usize = 2; 80 | type C = PoseidonGoldilocksConfig; 81 | type F = >::F; 82 | test_eval_fns::(NoopGate) 83 | } 84 | } 85 | -------------------------------------------------------------------------------- /plonky2/src/gates/packed_util.rs: -------------------------------------------------------------------------------- 1 | use alloc::vec; 2 | use alloc::vec::Vec; 3 | 4 | use crate::field::extension::Extendable; 5 | use crate::field::packable::Packable; 6 | use crate::field::packed::PackedField; 7 | use crate::gates::gate::Gate; 8 | use crate::gates::util::StridedConstraintConsumer; 9 | use crate::hash::hash_types::RichField; 10 | use crate::plonk::vars::{EvaluationVarsBaseBatch, EvaluationVarsBasePacked}; 11 | 12 | pub trait PackedEvaluableBase, const D: usize>: Gate { 13 | fn eval_unfiltered_base_packed>( 14 | &self, 15 | vars_base: EvaluationVarsBasePacked

, 16 | yield_constr: StridedConstraintConsumer

, 17 | ); 18 | 19 | /// Evaluates entire batch of points. Returns a matrix of constraints. Constraint `j` for point 20 | /// `i` is at `index j * batch_size + i`. 21 | fn eval_unfiltered_base_batch_packed(&self, vars_batch: EvaluationVarsBaseBatch) -> Vec { 22 | let mut res = vec![F::ZERO; vars_batch.len() * self.num_constraints()]; 23 | let (vars_packed_iter, vars_leftovers_iter) = vars_batch.pack::<::Packing>(); 24 | let leftovers_start = vars_batch.len() - vars_leftovers_iter.len(); 25 | for (i, vars_packed) in vars_packed_iter.enumerate() { 26 | self.eval_unfiltered_base_packed( 27 | vars_packed, 28 | StridedConstraintConsumer::new( 29 | &mut res[..], 30 | vars_batch.len(), 31 | ::Packing::WIDTH * i, 32 | ), 33 | ); 34 | } 35 | for (i, vars_leftovers) in vars_leftovers_iter.enumerate() { 36 | self.eval_unfiltered_base_packed( 37 | vars_leftovers, 38 | StridedConstraintConsumer::new(&mut res[..], vars_batch.len(), leftovers_start + i), 39 | ); 40 | } 41 | res 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /plonky2/src/gates/util.rs: -------------------------------------------------------------------------------- 1 | use core::marker::PhantomData; 2 | 3 | use crate::field::packed::PackedField; 4 | 5 | /// Writes constraints yielded by a gate to a buffer, with a given stride. 6 | /// Permits us to abstract the underlying memory layout. In particular, we can make a matrix of 7 | /// constraints where every column is an evaluation point and every row is a constraint index, with 8 | /// the matrix stored in row-contiguous form. 9 | pub struct StridedConstraintConsumer<'a, P: PackedField> { 10 | // This is a particularly neat way of doing this, more so than a slice. We increase start by 11 | // stride at every step and terminate when it equals end. 12 | start: *mut P::Scalar, 13 | end: *mut P::Scalar, 14 | stride: usize, 15 | _phantom: PhantomData<&'a mut [P::Scalar]>, 16 | } 17 | 18 | impl<'a, P: PackedField> StridedConstraintConsumer<'a, P> { 19 | pub fn new(buffer: &'a mut [P::Scalar], stride: usize, offset: usize) -> Self { 20 | assert!(stride >= P::WIDTH); 21 | assert!(offset < stride); 22 | assert_eq!(buffer.len() % stride, 0); 23 | let ptr_range = buffer.as_mut_ptr_range(); 24 | // `wrapping_add` is needed to avoid undefined behavior. Plain `add` causes UB if 'the ... 25 | // resulting pointer [is neither] in bounds or one byte past the end of the same allocated 26 | // object'; the UB results even if the pointer is not dereferenced. `end` will be more than 27 | // one byte past the buffer unless `offset` is 0. The same applies to `start` if the buffer 28 | // has length 0 and the offset is not 0. 29 | // We _could_ do pointer arithmetic without `wrapping_add`, but the logic would be 30 | // unnecessarily complicated. 31 | let start = ptr_range.start.wrapping_add(offset); 32 | let end = ptr_range.end.wrapping_add(offset); 33 | Self { 34 | start, 35 | end, 36 | stride, 37 | _phantom: PhantomData, 38 | } 39 | } 40 | 41 | /// Emit one constraint. 42 | pub fn one(&mut self, constraint: P) { 43 | if self.start != self.end { 44 | // # Safety 45 | // The checks in `new` guarantee that this points to valid space. 46 | unsafe { 47 | *self.start.cast() = constraint; 48 | } 49 | // See the comment in `new`. `wrapping_add` is needed to avoid UB if we've just 50 | // exhausted our buffer (and hence we're setting `self.start` to point past the end). 51 | self.start = self.start.wrapping_add(self.stride); 52 | } else { 53 | panic!("gate produced too many constraints"); 54 | } 55 | } 56 | 57 | /// Convenience method that calls `.one()` multiple times. 58 | pub fn many>(&mut self, constraints: I) { 59 | constraints 60 | .into_iter() 61 | .for_each(|constraint| self.one(constraint)); 62 | } 63 | } 64 | -------------------------------------------------------------------------------- /plonky2/src/hash/arch/aarch64/mod.rs: -------------------------------------------------------------------------------- 1 | #[cfg(target_feature = "neon")] 2 | pub(crate) mod poseidon_goldilocks_neon; 3 | -------------------------------------------------------------------------------- /plonky2/src/hash/arch/mod.rs: -------------------------------------------------------------------------------- 1 | #[cfg(target_arch = "x86_64")] 2 | pub(crate) mod x86_64; 3 | 4 | #[cfg(target_arch = "aarch64")] 5 | pub(crate) mod aarch64; 6 | -------------------------------------------------------------------------------- /plonky2/src/hash/arch/x86_64/mod.rs: -------------------------------------------------------------------------------- 1 | // // Requires: 2 | // // - AVX2 3 | // // - BMI2 (for MULX and SHRX) 4 | // #[cfg(all(target_feature = "avx2", target_feature = "bmi2"))] 5 | // pub(crate) mod poseidon_goldilocks_avx2_bmi2; 6 | -------------------------------------------------------------------------------- /plonky2/src/hash/keccak.rs: -------------------------------------------------------------------------------- 1 | use alloc::vec; 2 | use alloc::vec::Vec; 3 | use core::iter; 4 | use core::mem::size_of; 5 | 6 | use itertools::Itertools; 7 | use keccak_hash::keccak; 8 | 9 | use crate::hash::hash_types::{BytesHash, RichField}; 10 | use crate::hash::hashing::{PlonkyPermutation, SPONGE_WIDTH}; 11 | use crate::plonk::config::Hasher; 12 | use crate::util::serialization::Write; 13 | 14 | /// Keccak-256 pseudo-permutation (not necessarily one-to-one) used in the challenger. 15 | /// A state `input: [F; 12]` is sent to the field representation of `H(input) || H(H(input)) || H(H(H(input)))` 16 | /// where `H` is the Keccak-256 hash. 17 | pub struct KeccakPermutation; 18 | impl PlonkyPermutation for KeccakPermutation { 19 | fn permute(input: [F; SPONGE_WIDTH]) -> [F; SPONGE_WIDTH] { 20 | let mut state = vec![0u8; SPONGE_WIDTH * size_of::()]; 21 | for i in 0..SPONGE_WIDTH { 22 | state[i * size_of::()..(i + 1) * size_of::()] 23 | .copy_from_slice(&input[i].to_canonical_u64().to_le_bytes()); 24 | } 25 | 26 | let hash_onion = iter::repeat_with(|| { 27 | let output = keccak(state.clone()).to_fixed_bytes(); 28 | state = output.to_vec(); 29 | output 30 | }); 31 | 32 | let hash_onion_u64s = hash_onion.flat_map(|output| { 33 | output 34 | .chunks_exact(size_of::()) 35 | .map(|word| u64::from_le_bytes(word.try_into().unwrap())) 36 | .collect_vec() 37 | }); 38 | 39 | // Parse field elements from u64 stream, using rejection sampling such that words that don't 40 | // fit in F are ignored. 41 | let hash_onion_elems = hash_onion_u64s 42 | .filter(|&word| word < F::ORDER) 43 | .map(F::from_canonical_u64); 44 | 45 | hash_onion_elems 46 | .take(SPONGE_WIDTH) 47 | .collect_vec() 48 | .try_into() 49 | .unwrap() 50 | } 51 | } 52 | 53 | /// Keccak-256 hash function. 54 | #[derive(Copy, Clone, Debug, Eq, PartialEq)] 55 | pub struct KeccakHash; 56 | 57 | impl Hasher for KeccakHash { 58 | const HASH_SIZE: usize = N; 59 | type Hash = BytesHash; 60 | type Permutation = KeccakPermutation; 61 | 62 | fn hash_no_pad(input: &[F]) -> Self::Hash { 63 | let mut buffer = Vec::new(); 64 | buffer.write_field_vec(input).unwrap(); 65 | let mut arr = [0; N]; 66 | let hash_bytes = keccak(buffer).0; 67 | arr.copy_from_slice(&hash_bytes[..N]); 68 | BytesHash(arr) 69 | } 70 | 71 | fn hash_public_inputs(input: &[F]) -> Self::Hash { 72 | KeccakHash::hash_no_pad(input) 73 | } 74 | 75 | fn two_to_one(left: Self::Hash, right: Self::Hash) -> Self::Hash { 76 | let mut v = vec![0; N * 2]; 77 | v[0..N].copy_from_slice(&left.0); 78 | v[N..].copy_from_slice(&right.0); 79 | let mut arr = [0; N]; 80 | arr.copy_from_slice(&keccak(v).0[..N]); 81 | BytesHash(arr) 82 | } 83 | } 84 | -------------------------------------------------------------------------------- /plonky2/src/hash/mod.rs: -------------------------------------------------------------------------------- 1 | mod arch; 2 | pub mod hash_types; 3 | pub mod hashing; 4 | pub mod keccak; 5 | pub mod merkle_proofs; 6 | pub mod merkle_tree; 7 | pub mod path_compression; 8 | pub mod poseidon; 9 | pub mod poseidon_goldilocks; 10 | -------------------------------------------------------------------------------- /plonky2/src/hash/poseidon_crandall.rs: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /plonky2/src/iop/mod.rs: -------------------------------------------------------------------------------- 1 | //! Logic common to multiple IOPs. 2 | 3 | pub mod challenger; 4 | pub mod ext_target; 5 | pub mod generator; 6 | pub mod target; 7 | pub mod wire; 8 | pub mod witness; 9 | -------------------------------------------------------------------------------- /plonky2/src/iop/target.rs: -------------------------------------------------------------------------------- 1 | use alloc::vec::Vec; 2 | use core::ops::Range; 3 | 4 | use crate::iop::ext_target::ExtensionTarget; 5 | use crate::iop::wire::Wire; 6 | use crate::plonk::circuit_data::CircuitConfig; 7 | 8 | /// A location in the witness. 9 | #[derive(Copy, Clone, Eq, PartialEq, Hash, Debug)] 10 | pub enum Target { 11 | Wire(Wire), 12 | /// A target that doesn't have any inherent location in the witness (but it can be copied to 13 | /// another target that does). This is useful for representing intermediate values in witness 14 | /// generation. 15 | VirtualTarget { 16 | index: usize, 17 | }, 18 | } 19 | 20 | impl Target { 21 | pub fn wire(row: usize, column: usize) -> Self { 22 | Self::Wire(Wire { row, column }) 23 | } 24 | 25 | pub fn is_routable(&self, config: &CircuitConfig) -> bool { 26 | match self { 27 | Target::Wire(wire) => wire.is_routable(config), 28 | Target::VirtualTarget { .. } => true, 29 | } 30 | } 31 | 32 | pub fn wires_from_range(row: usize, range: Range) -> Vec { 33 | range.map(|i| Self::wire(row, i)).collect() 34 | } 35 | 36 | pub fn index(&self, num_wires: usize, degree: usize) -> usize { 37 | match self { 38 | Target::Wire(Wire { row, column }) => row * num_wires + column, 39 | Target::VirtualTarget { index } => degree * num_wires + index, 40 | } 41 | } 42 | 43 | /// Conversion to an `ExtensionTarget`. 44 | pub fn to_ext_target(self, zero: Self) -> ExtensionTarget { 45 | let mut arr = [zero; D]; 46 | arr[0] = self; 47 | ExtensionTarget(arr) 48 | } 49 | } 50 | 51 | /// A `Target` which has already been constrained such that it can only be 0 or 1. 52 | #[derive(Copy, Clone, Debug)] 53 | #[allow(clippy::manual_non_exhaustive)] 54 | pub struct BoolTarget { 55 | pub target: Target, 56 | /// This private field is here to force all instantiations to go through `new_unsafe`. 57 | _private: (), 58 | } 59 | 60 | impl BoolTarget { 61 | pub fn new_unsafe(target: Target) -> BoolTarget { 62 | BoolTarget { 63 | target, 64 | _private: (), 65 | } 66 | } 67 | } 68 | -------------------------------------------------------------------------------- /plonky2/src/iop/wire.rs: -------------------------------------------------------------------------------- 1 | use alloc::vec::Vec; 2 | use core::ops::Range; 3 | 4 | use crate::plonk::circuit_data::CircuitConfig; 5 | 6 | /// Represents a wire in the circuit, seen as a `degree x num_wires` table. 7 | #[derive(Copy, Clone, Eq, PartialEq, Hash, Debug)] 8 | pub struct Wire { 9 | /// Row index of the wire. 10 | pub row: usize, 11 | /// Column index of the wire. 12 | pub column: usize, 13 | } 14 | 15 | impl Wire { 16 | pub fn is_routable(&self, config: &CircuitConfig) -> bool { 17 | self.column < config.num_routed_wires 18 | } 19 | 20 | pub fn from_range(gate: usize, range: Range) -> Vec { 21 | range 22 | .map(|i| Wire { 23 | row: gate, 24 | column: i, 25 | }) 26 | .collect() 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /plonky2/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![feature(inherent_associated_types)] 2 | #![feature(allocator_api)] 3 | #![allow(clippy::too_many_arguments)] 4 | #![allow(clippy::needless_range_loop)] 5 | #![cfg_attr(not(feature = "std"), no_std)] 6 | 7 | extern crate alloc; 8 | 9 | #[doc(inline)] 10 | pub use plonky2_field as field; 11 | 12 | pub mod fri; 13 | pub mod gadgets; 14 | pub mod gates; 15 | pub mod hash; 16 | pub mod iop; 17 | pub mod plonk; 18 | pub mod recursion; 19 | pub mod util; 20 | -------------------------------------------------------------------------------- /plonky2/src/plonk/copy_constraint.rs: -------------------------------------------------------------------------------- 1 | use alloc::string::String; 2 | 3 | use crate::iop::target::Target; 4 | 5 | /// A named copy constraint. 6 | pub struct CopyConstraint { 7 | pub pair: (Target, Target), 8 | pub name: String, 9 | } 10 | 11 | impl From<(Target, Target)> for CopyConstraint { 12 | fn from(pair: (Target, Target)) -> Self { 13 | Self { 14 | pair, 15 | name: String::new(), 16 | } 17 | } 18 | } 19 | 20 | impl CopyConstraint { 21 | pub fn new(pair: (Target, Target), name: String) -> Self { 22 | Self { pair, name } 23 | } 24 | } 25 | -------------------------------------------------------------------------------- /plonky2/src/plonk/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod circuit_builder; 2 | pub mod circuit_data; 3 | pub mod config; 4 | pub(crate) mod copy_constraint; 5 | mod get_challenges; 6 | pub(crate) mod permutation_argument; 7 | pub mod plonk_common; 8 | pub mod proof; 9 | pub mod prover; 10 | mod validate_shape; 11 | pub(crate) mod vanishing_poly; 12 | pub mod vars; 13 | pub mod verifier; 14 | -------------------------------------------------------------------------------- /plonky2/src/plonk/validate_shape.rs: -------------------------------------------------------------------------------- 1 | use anyhow::ensure; 2 | 3 | use crate::field::extension::Extendable; 4 | use crate::hash::hash_types::RichField; 5 | use crate::plonk::circuit_data::CommonCircuitData; 6 | use crate::plonk::config::GenericConfig; 7 | use crate::plonk::proof::{OpeningSet, Proof, ProofWithPublicInputs}; 8 | 9 | pub(crate) fn validate_proof_with_pis_shape( 10 | proof_with_pis: &ProofWithPublicInputs, 11 | common_data: &CommonCircuitData, 12 | ) -> anyhow::Result<()> 13 | where 14 | F: RichField + Extendable, 15 | C: GenericConfig, 16 | { 17 | let ProofWithPublicInputs { 18 | proof, 19 | public_inputs, 20 | } = proof_with_pis; 21 | validate_proof_shape(proof, common_data)?; 22 | ensure!( 23 | public_inputs.len() == common_data.num_public_inputs, 24 | "Number of public inputs doesn't match circuit data." 25 | ); 26 | Ok(()) 27 | } 28 | 29 | fn validate_proof_shape( 30 | proof: &Proof, 31 | common_data: &CommonCircuitData, 32 | ) -> anyhow::Result<()> 33 | where 34 | F: RichField + Extendable, 35 | C: GenericConfig, 36 | { 37 | let config = &common_data.config; 38 | let Proof { 39 | wires_cap, 40 | plonk_zs_partial_products_cap, 41 | quotient_polys_cap, 42 | openings, 43 | // The shape of the opening proof will be checked in the FRI verifier (see 44 | // validate_fri_proof_shape), so we ignore it here. 45 | opening_proof: _, 46 | } = proof; 47 | let OpeningSet { 48 | constants, 49 | plonk_sigmas, 50 | wires, 51 | plonk_zs, 52 | plonk_zs_next, 53 | partial_products, 54 | quotient_polys, 55 | } = openings; 56 | let cap_height = common_data.fri_params.config.cap_height; 57 | ensure!(wires_cap.height() == cap_height); 58 | ensure!(plonk_zs_partial_products_cap.height() == cap_height); 59 | ensure!(quotient_polys_cap.height() == cap_height); 60 | ensure!(constants.len() == common_data.num_constants); 61 | ensure!(plonk_sigmas.len() == config.num_routed_wires); 62 | ensure!(wires.len() == config.num_wires); 63 | ensure!(plonk_zs.len() == config.num_challenges); 64 | ensure!(plonk_zs_next.len() == config.num_challenges); 65 | ensure!(partial_products.len() == config.num_challenges * common_data.num_partial_products); 66 | ensure!(quotient_polys.len() == common_data.num_quotient_polys()); 67 | Ok(()) 68 | } 69 | -------------------------------------------------------------------------------- /plonky2/src/recursion/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod conditional_recursive_verifier; 2 | pub mod cyclic_recursion; 3 | pub mod dummy_circuit; 4 | pub mod recursive_verifier; 5 | pub mod tree_recursion; 6 | -------------------------------------------------------------------------------- /starky/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "starky" 3 | description = "Implementation of STARKs" 4 | version = "0.1.0" 5 | edition = "2021" 6 | 7 | [features] 8 | default = ["parallel", "std", "timing"] 9 | parallel = ["plonky2/parallel", "maybe_rayon/parallel"] 10 | std = ["anyhow/std", "plonky2/std"] 11 | timing = ["plonky2/timing"] 12 | 13 | [dependencies] 14 | anyhow = { version = "1.0.40", default-features = false } 15 | itertools = { version = "0.10.0", default-features = false } 16 | log = { version = "0.4.14", default-features = false } 17 | maybe_rayon = { path = "../maybe_rayon", default-features = false } 18 | plonky2 = { path = "../plonky2", default-features = false } 19 | 20 | [dev-dependencies] 21 | env_logger = { version = "0.9.0", default-features = false } 22 | -------------------------------------------------------------------------------- /starky/src/config.rs: -------------------------------------------------------------------------------- 1 | use plonky2::fri::reduction_strategies::FriReductionStrategy; 2 | use plonky2::fri::{FriConfig, FriParams}; 3 | 4 | pub struct StarkConfig { 5 | pub security_bits: usize, 6 | 7 | /// The number of challenge points to generate, for IOPs that have soundness errors of (roughly) 8 | /// `degree / |F|`. 9 | pub num_challenges: usize, 10 | 11 | pub fri_config: FriConfig, 12 | } 13 | 14 | impl StarkConfig { 15 | /// A typical configuration with a rate of 2, resulting in fast but large proofs. 16 | /// Targets ~100 bit conjectured security. 17 | pub fn standard_fast_config() -> Self { 18 | Self { 19 | security_bits: 100, 20 | num_challenges: 2, 21 | fri_config: FriConfig { 22 | rate_bits: 1, 23 | cap_height: 4, 24 | proof_of_work_bits: 16, 25 | reduction_strategy: FriReductionStrategy::ConstantArityBits(4, 5), 26 | num_query_rounds: 84, 27 | }, 28 | } 29 | } 30 | 31 | pub(crate) fn fri_params(&self, degree_bits: usize) -> FriParams { 32 | self.fri_config.fri_params(degree_bits, false) 33 | } 34 | } 35 | -------------------------------------------------------------------------------- /starky/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![allow(incomplete_features)] 2 | #![allow(clippy::too_many_arguments)] 3 | #![allow(clippy::type_complexity)] 4 | #![feature(generic_const_exprs)] 5 | #![cfg_attr(not(feature = "std"), no_std)] 6 | 7 | extern crate alloc; 8 | 9 | mod get_challenges; 10 | 11 | pub mod config; 12 | pub mod constraint_consumer; 13 | pub mod permutation; 14 | pub mod proof; 15 | pub mod prover; 16 | pub mod recursive_verifier; 17 | pub mod stark; 18 | pub mod stark_testing; 19 | pub mod util; 20 | pub mod vanishing_poly; 21 | pub mod vars; 22 | pub mod verifier; 23 | 24 | #[cfg(test)] 25 | pub mod fibonacci_stark; 26 | -------------------------------------------------------------------------------- /starky/src/util.rs: -------------------------------------------------------------------------------- 1 | use alloc::vec::Vec; 2 | 3 | use itertools::Itertools; 4 | use plonky2::field::polynomial::PolynomialValues; 5 | use plonky2::field::types::Field; 6 | use plonky2::util::transpose; 7 | 8 | /// A helper function to transpose a row-wise trace and put it in the format that `prove` expects. 9 | pub fn trace_rows_to_poly_values( 10 | trace_rows: Vec<[F; COLUMNS]>, 11 | ) -> Vec> { 12 | let trace_row_vecs = trace_rows.into_iter().map(|row| row.to_vec()).collect_vec(); 13 | let trace_col_vecs: Vec> = transpose(&trace_row_vecs); 14 | trace_col_vecs 15 | .into_iter() 16 | .map(|column| PolynomialValues::new(column)) 17 | .collect() 18 | } 19 | -------------------------------------------------------------------------------- /starky/src/vanishing_poly.rs: -------------------------------------------------------------------------------- 1 | use plonky2::field::extension::{Extendable, FieldExtension}; 2 | use plonky2::field::packed::PackedField; 3 | use plonky2::hash::hash_types::RichField; 4 | use plonky2::plonk::circuit_builder::CircuitBuilder; 5 | use plonky2::plonk::config::GenericConfig; 6 | 7 | use crate::config::StarkConfig; 8 | use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer}; 9 | use crate::permutation::{ 10 | eval_permutation_checks, eval_permutation_checks_circuit, PermutationCheckDataTarget, 11 | PermutationCheckVars, 12 | }; 13 | use crate::stark::Stark; 14 | use crate::vars::{StarkEvaluationTargets, StarkEvaluationVars}; 15 | 16 | pub(crate) fn eval_vanishing_poly( 17 | stark: &S, 18 | config: &StarkConfig, 19 | vars: StarkEvaluationVars, 20 | permutation_data: Option>, 21 | consumer: &mut ConstraintConsumer

, 22 | ) where 23 | F: RichField + Extendable, 24 | FE: FieldExtension, 25 | P: PackedField, 26 | C: GenericConfig, 27 | S: Stark, 28 | [(); S::COLUMNS]:, 29 | [(); S::PUBLIC_INPUTS]:, 30 | { 31 | stark.eval_packed_generic(vars, consumer); 32 | if let Some(permutation_data) = permutation_data { 33 | eval_permutation_checks::( 34 | stark, 35 | config, 36 | vars, 37 | permutation_data, 38 | consumer, 39 | ); 40 | } 41 | } 42 | 43 | pub(crate) fn eval_vanishing_poly_circuit( 44 | builder: &mut CircuitBuilder, 45 | stark: &S, 46 | config: &StarkConfig, 47 | vars: StarkEvaluationTargets, 48 | permutation_data: Option>, 49 | consumer: &mut RecursiveConstraintConsumer, 50 | ) where 51 | F: RichField + Extendable, 52 | C: GenericConfig, 53 | S: Stark, 54 | [(); S::COLUMNS]:, 55 | [(); S::PUBLIC_INPUTS]:, 56 | { 57 | stark.eval_ext_circuit(builder, vars, consumer); 58 | if let Some(permutation_data) = permutation_data { 59 | eval_permutation_checks_circuit::( 60 | builder, 61 | stark, 62 | config, 63 | vars, 64 | permutation_data, 65 | consumer, 66 | ); 67 | } 68 | } 69 | -------------------------------------------------------------------------------- /starky/src/vars.rs: -------------------------------------------------------------------------------- 1 | use plonky2::field::packed::PackedField; 2 | use plonky2::field::types::Field; 3 | use plonky2::iop::ext_target::ExtensionTarget; 4 | 5 | #[derive(Debug, Copy, Clone)] 6 | pub struct StarkEvaluationVars<'a, F, P, const COLUMNS: usize, const PUBLIC_INPUTS: usize> 7 | where 8 | F: Field, 9 | P: PackedField, 10 | { 11 | pub local_values: &'a [P; COLUMNS], 12 | pub next_values: &'a [P; COLUMNS], 13 | pub public_inputs: &'a [P::Scalar; PUBLIC_INPUTS], 14 | } 15 | 16 | #[derive(Debug, Copy, Clone)] 17 | pub struct StarkEvaluationTargets< 18 | 'a, 19 | const D: usize, 20 | const COLUMNS: usize, 21 | const PUBLIC_INPUTS: usize, 22 | > { 23 | pub local_values: &'a [ExtensionTarget; COLUMNS], 24 | pub next_values: &'a [ExtensionTarget; COLUMNS], 25 | pub public_inputs: &'a [ExtensionTarget; PUBLIC_INPUTS], 26 | } 27 | -------------------------------------------------------------------------------- /system_zero/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "system_zero" 3 | description = "A VM whose execution can be verified with STARKs; designed for proving Ethereum transactions" 4 | version = "0.1.0" 5 | edition = "2021" 6 | 7 | [dependencies] 8 | anyhow = "1.0.40" 9 | itertools = "0.10.0" 10 | log = "0.4.14" 11 | plonky2 = { path = "../plonky2" } 12 | plonky2_util = { path = "../util" } 13 | rand = "0.8.4" 14 | rand_chacha = "0.3.1" 15 | starky = { path = "../starky" } 16 | 17 | [dev-dependencies] 18 | criterion = "0.4.0" 19 | env_logger = "0.10.0" 20 | 21 | [[bench]] 22 | name = "lookup_permuted_cols" 23 | harness = false 24 | 25 | [target.'cfg(not(target_env = "msvc"))'.dev-dependencies] 26 | jemallocator = "0.5.0" 27 | -------------------------------------------------------------------------------- /system_zero/benches/allocator/mod.rs: -------------------------------------------------------------------------------- 1 | // Set up Jemalloc 2 | #[cfg(not(target_env = "msvc"))] 3 | use jemallocator::Jemalloc; 4 | 5 | #[cfg(not(target_env = "msvc"))] 6 | #[global_allocator] 7 | static GLOBAL: Jemalloc = Jemalloc; 8 | -------------------------------------------------------------------------------- /system_zero/benches/lookup_permuted_cols.rs: -------------------------------------------------------------------------------- 1 | mod allocator; 2 | 3 | use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion}; 4 | use itertools::Itertools; 5 | use plonky2::field::goldilocks_field::GoldilocksField; 6 | use plonky2::field::types::Field; 7 | use rand::{thread_rng, Rng}; 8 | use system_zero::lookup::permuted_cols; 9 | 10 | type F = GoldilocksField; 11 | 12 | fn criterion_benchmark(c: &mut Criterion) { 13 | let mut group = c.benchmark_group("lookup-permuted-cols"); 14 | 15 | for size_log in [16, 17, 18] { 16 | let size = 1 << size_log; 17 | group.bench_with_input(BenchmarkId::from_parameter(size), &size, |b, _| { 18 | // We could benchmark a table of random values with 19 | // let table = F::rand_vec(size); 20 | // But in practice we currently use tables that are pre-sorted, which makes 21 | // permuted_cols cheaper since it will sort the table. 22 | let table = (0..size).map(F::from_canonical_usize).collect_vec(); 23 | let input = (0..size) 24 | .map(|_| table[thread_rng().gen_range(0..size)]) 25 | .collect_vec(); 26 | b.iter(|| permuted_cols(&input, &table)); 27 | }); 28 | } 29 | } 30 | 31 | criterion_group!(benches, criterion_benchmark); 32 | criterion_main!(benches); 33 | -------------------------------------------------------------------------------- /system_zero/src/alu/addition.rs: -------------------------------------------------------------------------------- 1 | use plonky2::field::extension::Extendable; 2 | use plonky2::field::packed::PackedField; 3 | use plonky2::field::types::{Field, PrimeField64}; 4 | use plonky2::hash::hash_types::RichField; 5 | use plonky2::iop::ext_target::ExtensionTarget; 6 | use plonky2::plonk::circuit_builder::CircuitBuilder; 7 | use plonky2::plonk::plonk_common::reduce_with_powers_ext_circuit; 8 | use starky::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer}; 9 | 10 | use crate::registers::alu::*; 11 | use crate::registers::NUM_COLUMNS; 12 | 13 | pub(crate) fn generate_addition(values: &mut [F; NUM_COLUMNS]) { 14 | let in_1 = values[COL_ADD_INPUT_0].to_canonical_u64(); 15 | let in_2 = values[COL_ADD_INPUT_1].to_canonical_u64(); 16 | let in_3 = values[COL_ADD_INPUT_2].to_canonical_u64(); 17 | let output = in_1 + in_2 + in_3; 18 | 19 | values[COL_ADD_OUTPUT_0] = F::from_canonical_u16(output as u16); 20 | values[COL_ADD_OUTPUT_1] = F::from_canonical_u16((output >> 16) as u16); 21 | values[COL_ADD_OUTPUT_2] = F::from_canonical_u16((output >> 32) as u16); 22 | } 23 | 24 | pub(crate) fn eval_addition>( 25 | local_values: &[P; NUM_COLUMNS], 26 | yield_constr: &mut ConstraintConsumer

, 27 | ) { 28 | let is_add = local_values[IS_ADD]; 29 | let in_1 = local_values[COL_ADD_INPUT_0]; 30 | let in_2 = local_values[COL_ADD_INPUT_1]; 31 | let in_3 = local_values[COL_ADD_INPUT_2]; 32 | let out_1 = local_values[COL_ADD_OUTPUT_0]; 33 | let out_2 = local_values[COL_ADD_OUTPUT_1]; 34 | let out_3 = local_values[COL_ADD_OUTPUT_2]; 35 | 36 | let weight_2 = F::from_canonical_u64(1 << 16); 37 | let weight_3 = F::from_canonical_u64(1 << 32); 38 | // Note that this can't overflow. Since each output limb has been range checked as 16-bits, 39 | // this sum can be around 48 bits at most. 40 | let out = out_1 + out_2 * weight_2 + out_3 * weight_3; 41 | 42 | let computed_out = in_1 + in_2 + in_3; 43 | 44 | yield_constr.constraint(is_add * (out - computed_out)); 45 | } 46 | 47 | pub(crate) fn eval_addition_circuit, const D: usize>( 48 | builder: &mut CircuitBuilder, 49 | local_values: &[ExtensionTarget; NUM_COLUMNS], 50 | yield_constr: &mut RecursiveConstraintConsumer, 51 | ) { 52 | let is_add = local_values[IS_ADD]; 53 | let in_1 = local_values[COL_ADD_INPUT_0]; 54 | let in_2 = local_values[COL_ADD_INPUT_1]; 55 | let in_3 = local_values[COL_ADD_INPUT_2]; 56 | let out_1 = local_values[COL_ADD_OUTPUT_0]; 57 | let out_2 = local_values[COL_ADD_OUTPUT_1]; 58 | let out_3 = local_values[COL_ADD_OUTPUT_2]; 59 | 60 | let limb_base = builder.constant(F::from_canonical_u64(1 << 16)); 61 | // Note that this can't overflow. Since each output limb has been range checked as 16-bits, 62 | // this sum can be around 48 bits at most. 63 | let out = reduce_with_powers_ext_circuit(builder, &[out_1, out_2, out_3], limb_base); 64 | 65 | let computed_out = builder.add_many_extension([in_1, in_2, in_3]); 66 | 67 | let diff = builder.sub_extension(out, computed_out); 68 | let filtered_diff = builder.mul_extension(is_add, diff); 69 | yield_constr.constraint(builder, filtered_diff); 70 | } 71 | -------------------------------------------------------------------------------- /system_zero/src/alu/subtraction.rs: -------------------------------------------------------------------------------- 1 | use plonky2::field::extension::Extendable; 2 | use plonky2::field::packed::PackedField; 3 | use plonky2::field::types::{Field, PrimeField64}; 4 | use plonky2::hash::hash_types::RichField; 5 | use plonky2::iop::ext_target::ExtensionTarget; 6 | use plonky2::plonk::circuit_builder::CircuitBuilder; 7 | use starky::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer}; 8 | 9 | use crate::registers::alu::*; 10 | use crate::registers::NUM_COLUMNS; 11 | 12 | pub(crate) fn generate_subtraction(values: &mut [F; NUM_COLUMNS]) { 13 | let in_1 = values[COL_SUB_INPUT_0].to_canonical_u64() as u32; 14 | let in_2 = values[COL_SUB_INPUT_1].to_canonical_u64() as u32; 15 | 16 | // in_1 - in_2 == diff - br*2^32 17 | let (diff, br) = in_1.overflowing_sub(in_2); 18 | 19 | values[COL_SUB_OUTPUT_0] = F::from_canonical_u16(diff as u16); 20 | values[COL_SUB_OUTPUT_1] = F::from_canonical_u16((diff >> 16) as u16); 21 | values[COL_SUB_OUTPUT_BORROW] = F::from_bool(br); 22 | } 23 | 24 | pub(crate) fn eval_subtraction>( 25 | local_values: &[P; NUM_COLUMNS], 26 | yield_constr: &mut ConstraintConsumer

, 27 | ) { 28 | let is_sub = local_values[IS_SUB]; 29 | let in_1 = local_values[COL_SUB_INPUT_0]; 30 | let in_2 = local_values[COL_SUB_INPUT_1]; 31 | let out_1 = local_values[COL_SUB_OUTPUT_0]; 32 | let out_2 = local_values[COL_SUB_OUTPUT_1]; 33 | let out_br = local_values[COL_SUB_OUTPUT_BORROW]; 34 | 35 | let base = F::from_canonical_u64(1 << 16); 36 | let base_sqr = F::from_canonical_u64(1 << 32); 37 | 38 | let out_br = out_br * base_sqr; 39 | let lhs = (out_br + in_1) - in_2; 40 | let rhs = out_1 + out_2 * base; 41 | 42 | yield_constr.constraint(is_sub * (lhs - rhs)); 43 | 44 | // We don't need to check that out_br is in {0, 1} because it's 45 | // checked by boolean::col_bit(0) in the ALU. 46 | } 47 | 48 | pub(crate) fn eval_subtraction_circuit, const D: usize>( 49 | builder: &mut CircuitBuilder, 50 | local_values: &[ExtensionTarget; NUM_COLUMNS], 51 | yield_constr: &mut RecursiveConstraintConsumer, 52 | ) { 53 | let is_sub = local_values[IS_SUB]; 54 | let in_1 = local_values[COL_SUB_INPUT_0]; 55 | let in_2 = local_values[COL_SUB_INPUT_1]; 56 | let out_1 = local_values[COL_SUB_OUTPUT_0]; 57 | let out_2 = local_values[COL_SUB_OUTPUT_1]; 58 | let out_br = local_values[COL_SUB_OUTPUT_BORROW]; 59 | 60 | let base = builder.constant_extension(F::Extension::from_canonical_u64(1 << 16)); 61 | #[allow(unused)] // TODO 62 | let base_sqr = builder.constant_extension(F::Extension::from_canonical_u64(1 << 32)); 63 | 64 | // lhs = (out_br + in_1) - in_2 65 | let lhs = builder.add_extension(out_br, in_1); 66 | let lhs = builder.sub_extension(lhs, in_2); 67 | 68 | // rhs = out_1 + base * out_2 69 | let rhs = builder.mul_add_extension(out_2, base, out_1); 70 | 71 | // filtered_diff = is_sub * (lhs - rhs) 72 | let diff = builder.sub_extension(lhs, rhs); 73 | let filtered_diff = builder.mul_extension(is_sub, diff); 74 | 75 | yield_constr.constraint(builder, filtered_diff); 76 | } 77 | -------------------------------------------------------------------------------- /system_zero/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![feature(array_zip)] 2 | 3 | mod alu; 4 | mod core_registers; 5 | pub mod lookup; 6 | mod memory; 7 | mod permutation_unit; 8 | mod public_input_layout; 9 | mod registers; 10 | pub mod system_zero; 11 | -------------------------------------------------------------------------------- /system_zero/src/memory.rs: -------------------------------------------------------------------------------- 1 | #[derive(Default)] 2 | pub struct TransactionMemory { 3 | pub calls: Vec, 4 | } 5 | 6 | /// A virtual memory space specific to the current contract call. 7 | pub struct ContractMemory { 8 | pub code: MemorySegment, 9 | pub main: MemorySegment, 10 | pub calldata: MemorySegment, 11 | pub returndata: MemorySegment, 12 | } 13 | 14 | pub struct MemorySegment { 15 | pub content: Vec, 16 | } 17 | -------------------------------------------------------------------------------- /system_zero/src/public_input_layout.rs: -------------------------------------------------------------------------------- 1 | /// The previous state root, before these transactions were executed. 2 | const PI_OLD_STATE_ROOT: usize = 0; 3 | 4 | /// The updated state root, after these transactions were executed. 5 | const PI_NEW_STATE_ROOT: usize = PI_OLD_STATE_ROOT + 1; 6 | 7 | pub(crate) const NUM_PUBLIC_INPUTS: usize = PI_NEW_STATE_ROOT + 1; 8 | -------------------------------------------------------------------------------- /system_zero/src/registers/boolean.rs: -------------------------------------------------------------------------------- 1 | //! Boolean unit. Contains columns whose values must be 0 or 1. 2 | 3 | const NUM_BITS: usize = 128; 4 | 5 | pub const fn col_bit(index: usize) -> usize { 6 | debug_assert!(index < NUM_BITS); 7 | super::START_BOOLEAN + index 8 | } 9 | 10 | pub(super) const END: usize = super::START_BOOLEAN + NUM_BITS; 11 | -------------------------------------------------------------------------------- /system_zero/src/registers/core.rs: -------------------------------------------------------------------------------- 1 | //! Core registers. 2 | 3 | /// A cycle counter. Starts at 0; increments by 1. 4 | pub(crate) const COL_CLOCK: usize = super::START_CORE; 5 | 6 | /// A column which contains the values `[0, ... 2^16 - 1]`, potentially with duplicates. Used for 7 | /// 16-bit range checks. 8 | /// 9 | /// For ease of verification, we enforce that it must begin with 0 and end with `2^16 - 1`, and each 10 | /// delta must be either 0 or 1. 11 | pub(crate) const COL_RANGE_16: usize = COL_CLOCK + 1; 12 | 13 | /// Pointer to the current instruction. 14 | pub(crate) const COL_INSTRUCTION_PTR: usize = COL_RANGE_16 + 1; 15 | /// Pointer to the base of the current call's stack frame. 16 | pub(crate) const COL_FRAME_PTR: usize = COL_INSTRUCTION_PTR + 1; 17 | /// Pointer to the tip of the current call's stack frame. 18 | pub(crate) const COL_STACK_PTR: usize = COL_FRAME_PTR + 1; 19 | 20 | pub(super) const END: usize = COL_STACK_PTR + 1; 21 | -------------------------------------------------------------------------------- /system_zero/src/registers/logic.rs: -------------------------------------------------------------------------------- 1 | //! Logic unit. 2 | 3 | pub(super) const END: usize = super::START_LOGIC; 4 | -------------------------------------------------------------------------------- /system_zero/src/registers/lookup.rs: -------------------------------------------------------------------------------- 1 | //! Lookup unit. 2 | //! See https://zcash.github.io/halo2/design/proving-system/lookup.html 3 | 4 | const START_UNIT: usize = super::START_LOOKUP; 5 | 6 | pub(crate) const NUM_LOOKUPS: usize = 7 | super::range_check_16::NUM_RANGE_CHECKS + super::range_check_degree::NUM_RANGE_CHECKS; 8 | 9 | pub(crate) const fn col_input(i: usize) -> usize { 10 | if i < super::range_check_16::NUM_RANGE_CHECKS { 11 | super::range_check_16::col_rc_16_input(i) 12 | } else { 13 | super::range_check_degree::col_rc_degree_input(i - super::range_check_16::NUM_RANGE_CHECKS) 14 | } 15 | } 16 | 17 | /// This column contains a permutation of the input values. 18 | pub(crate) const fn col_permuted_input(i: usize) -> usize { 19 | debug_assert!(i < NUM_LOOKUPS); 20 | START_UNIT + 2 * i 21 | } 22 | 23 | pub(crate) const fn col_table(i: usize) -> usize { 24 | if i < super::range_check_16::NUM_RANGE_CHECKS { 25 | super::core::COL_RANGE_16 26 | } else { 27 | super::core::COL_CLOCK 28 | } 29 | } 30 | 31 | /// This column contains a permutation of the table values. 32 | pub(crate) const fn col_permuted_table(i: usize) -> usize { 33 | debug_assert!(i < NUM_LOOKUPS); 34 | START_UNIT + 2 * i + 1 35 | } 36 | 37 | pub(super) const END: usize = START_UNIT + NUM_LOOKUPS * 2; 38 | -------------------------------------------------------------------------------- /system_zero/src/registers/memory.rs: -------------------------------------------------------------------------------- 1 | //! Memory unit. 2 | 3 | pub(super) const END: usize = super::START_MEMORY; 4 | -------------------------------------------------------------------------------- /system_zero/src/registers/mod.rs: -------------------------------------------------------------------------------- 1 | pub(crate) mod alu; 2 | pub(crate) mod boolean; 3 | pub(crate) mod core; 4 | pub(crate) mod logic; 5 | pub(crate) mod lookup; 6 | pub(crate) mod memory; 7 | pub(crate) mod permutation; 8 | pub(crate) mod range_check_16; 9 | pub(crate) mod range_check_degree; 10 | 11 | const START_ALU: usize = 0; 12 | const START_BOOLEAN: usize = alu::END; 13 | const START_CORE: usize = boolean::END; 14 | const START_LOGIC: usize = core::END; 15 | const START_LOOKUP: usize = logic::END; 16 | const START_MEMORY: usize = lookup::END; 17 | const START_PERMUTATION: usize = memory::END; 18 | const START_RANGE_CHECK_16: usize = permutation::END; 19 | const START_RANGE_CHECK_DEGREE: usize = range_check_16::END; 20 | pub(crate) const NUM_COLUMNS: usize = range_check_degree::END; 21 | -------------------------------------------------------------------------------- /system_zero/src/registers/permutation.rs: -------------------------------------------------------------------------------- 1 | //! Permutation unit. 2 | 3 | use plonky2::hash::hashing::SPONGE_WIDTH; 4 | use plonky2::hash::poseidon; 5 | 6 | const START_FULL_FIRST: usize = super::START_PERMUTATION + SPONGE_WIDTH; 7 | 8 | pub const fn col_full_first_mid_sbox(round: usize, i: usize) -> usize { 9 | debug_assert!(round < poseidon::HALF_N_FULL_ROUNDS); 10 | debug_assert!(i < SPONGE_WIDTH); 11 | START_FULL_FIRST + 2 * round * SPONGE_WIDTH + i 12 | } 13 | 14 | pub const fn col_full_first_after_mds(round: usize, i: usize) -> usize { 15 | debug_assert!(round < poseidon::HALF_N_FULL_ROUNDS); 16 | debug_assert!(i < SPONGE_WIDTH); 17 | START_FULL_FIRST + (2 * round + 1) * SPONGE_WIDTH + i 18 | } 19 | 20 | const START_PARTIAL: usize = 21 | col_full_first_after_mds(poseidon::HALF_N_FULL_ROUNDS - 1, SPONGE_WIDTH - 1) + 1; 22 | 23 | pub const fn col_partial_mid_sbox(round: usize) -> usize { 24 | debug_assert!(round < poseidon::N_PARTIAL_ROUNDS); 25 | START_PARTIAL + 2 * round 26 | } 27 | 28 | pub const fn col_partial_after_sbox(round: usize) -> usize { 29 | debug_assert!(round < poseidon::N_PARTIAL_ROUNDS); 30 | START_PARTIAL + 2 * round + 1 31 | } 32 | 33 | const START_FULL_SECOND: usize = col_partial_after_sbox(poseidon::N_PARTIAL_ROUNDS - 1) + 1; 34 | 35 | pub const fn col_full_second_mid_sbox(round: usize, i: usize) -> usize { 36 | debug_assert!(round <= poseidon::HALF_N_FULL_ROUNDS); 37 | debug_assert!(i < SPONGE_WIDTH); 38 | START_FULL_SECOND + 2 * round * SPONGE_WIDTH + i 39 | } 40 | 41 | pub const fn col_full_second_after_mds(round: usize, i: usize) -> usize { 42 | debug_assert!(round <= poseidon::HALF_N_FULL_ROUNDS); 43 | debug_assert!(i < SPONGE_WIDTH); 44 | START_FULL_SECOND + (2 * round + 1) * SPONGE_WIDTH + i 45 | } 46 | 47 | pub const fn col_input(i: usize) -> usize { 48 | debug_assert!(i < SPONGE_WIDTH); 49 | super::START_PERMUTATION + i 50 | } 51 | 52 | pub const fn col_output(i: usize) -> usize { 53 | debug_assert!(i < SPONGE_WIDTH); 54 | col_full_second_after_mds(poseidon::HALF_N_FULL_ROUNDS - 1, i) 55 | } 56 | 57 | pub(super) const END: usize = col_output(SPONGE_WIDTH - 1) + 1; 58 | -------------------------------------------------------------------------------- /system_zero/src/registers/range_check_16.rs: -------------------------------------------------------------------------------- 1 | //! Range check unit which checks that values are in `[0, 2^16)`. 2 | 3 | pub(super) const NUM_RANGE_CHECKS: usize = 6; 4 | 5 | /// The input of the `i`th range check, i.e. the value being range checked. 6 | pub(crate) const fn col_rc_16_input(i: usize) -> usize { 7 | debug_assert!(i < NUM_RANGE_CHECKS); 8 | super::START_RANGE_CHECK_16 + i 9 | } 10 | 11 | pub(super) const END: usize = super::START_RANGE_CHECK_16 + NUM_RANGE_CHECKS; 12 | -------------------------------------------------------------------------------- /system_zero/src/registers/range_check_degree.rs: -------------------------------------------------------------------------------- 1 | //! Range check unit which checks that values are in `[0, degree)`. 2 | 3 | pub(crate) const NUM_RANGE_CHECKS: usize = 5; 4 | 5 | /// The input of the `i`th range check, i.e. the value being range checked. 6 | pub(crate) const fn col_rc_degree_input(i: usize) -> usize { 7 | debug_assert!(i < NUM_RANGE_CHECKS); 8 | super::START_RANGE_CHECK_DEGREE + i 9 | } 10 | 11 | pub(super) const END: usize = super::START_RANGE_CHECK_DEGREE + NUM_RANGE_CHECKS; 12 | -------------------------------------------------------------------------------- /u32/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "plonky2_u32" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | [dependencies] 7 | anyhow = { version = "1.0.40", default-features = false } 8 | itertools = { version = "0.10.0", default-features = false } 9 | num = { version = "0.4", default-features = false } 10 | plonky2 = { path = "../plonky2", default-features = false } 11 | 12 | [dev-dependencies] 13 | plonky2 = { path = "../plonky2", default-features = false, features = ["gate_testing"] } 14 | rand = { version = "0.8.4", default-features = false, features = ["getrandom"] } 15 | -------------------------------------------------------------------------------- /u32/src/gadgets/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod arithmetic_u32; 2 | pub mod multiple_comparison; 3 | pub mod range_check; 4 | -------------------------------------------------------------------------------- /u32/src/gadgets/range_check.rs: -------------------------------------------------------------------------------- 1 | use alloc::vec; 2 | use alloc::vec::Vec; 3 | 4 | use plonky2::field::extension::Extendable; 5 | use plonky2::hash::hash_types::RichField; 6 | use plonky2::iop::target::Target; 7 | use plonky2::plonk::circuit_builder::CircuitBuilder; 8 | 9 | use crate::gadgets::arithmetic_u32::U32Target; 10 | use crate::gates::range_check_u32::U32RangeCheckGate; 11 | 12 | pub fn range_check_u32_circuit, const D: usize>( 13 | builder: &mut CircuitBuilder, 14 | vals: Vec, 15 | ) { 16 | let num_input_limbs = vals.len(); 17 | let gate = U32RangeCheckGate::::new(num_input_limbs); 18 | let row = builder.add_gate(gate, vec![]); 19 | 20 | for i in 0..num_input_limbs { 21 | builder.connect(Target::wire(row, gate.wire_ith_input_limb(i)), vals[i].0); 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /u32/src/gates/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod add_many_u32; 2 | pub mod arithmetic_u32; 3 | pub mod comparison; 4 | pub mod range_check_u32; 5 | pub mod subtraction_u32; 6 | -------------------------------------------------------------------------------- /u32/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![allow(clippy::needless_range_loop)] 2 | #![no_std] 3 | 4 | extern crate alloc; 5 | 6 | pub mod gadgets; 7 | pub mod gates; 8 | pub mod witness; 9 | -------------------------------------------------------------------------------- /u32/src/witness.rs: -------------------------------------------------------------------------------- 1 | use plonky2::field::types::{Field, PrimeField64}; 2 | use plonky2::iop::generator::GeneratedValues; 3 | use plonky2::iop::witness::{Witness, WitnessWrite}; 4 | 5 | use crate::gadgets::arithmetic_u32::U32Target; 6 | 7 | pub trait WitnessU32: Witness { 8 | fn set_u32_target(&mut self, target: U32Target, value: u32); 9 | fn get_u32_target(&self, target: U32Target) -> (u32, u32); 10 | } 11 | 12 | impl, F: PrimeField64> WitnessU32 for T { 13 | fn set_u32_target(&mut self, target: U32Target, value: u32) { 14 | self.set_target(target.0, F::from_canonical_u32(value)); 15 | } 16 | 17 | fn get_u32_target(&self, target: U32Target) -> (u32, u32) { 18 | let x_u64 = self.get_target(target.0).to_canonical_u64(); 19 | let low = x_u64 as u32; 20 | let high = (x_u64 >> 32) as u32; 21 | (low, high) 22 | } 23 | } 24 | 25 | pub trait GeneratedValuesU32 { 26 | fn set_u32_target(&mut self, target: U32Target, value: u32); 27 | } 28 | 29 | impl GeneratedValuesU32 for GeneratedValues { 30 | fn set_u32_target(&mut self, target: U32Target, value: u32) { 31 | self.set_target(target.0, F::from_canonical_u32(value)) 32 | } 33 | } 34 | -------------------------------------------------------------------------------- /util/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "plonky2_util" 3 | description = "Utilities used by Plonky2" 4 | version = "0.1.0" 5 | edition = "2021" 6 | 7 | [dev-dependencies] 8 | rand = { version = "0.8.5", default-features = false, features = ["getrandom"] } 9 | -------------------------------------------------------------------------------- /waksman/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "plonky2_waksman" 3 | description = "A circuit implementation AS-Waksman networks, useful for checking permutations and sorting" 4 | version = "0.1.0" 5 | edition = "2021" 6 | 7 | [dependencies] 8 | anyhow = "1.0.40" 9 | array_tool = "1.0.3" 10 | bimap = "0.6.1" 11 | itertools = "0.10.0" 12 | "plonky2" = { path = "../plonky2" } 13 | "plonky2_field" = { path = "../field" } 14 | "plonky2_util" = { path = "../util" } 15 | rand = "0.8.4" 16 | -------------------------------------------------------------------------------- /waksman/src/bimap.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashMap; 2 | use std::hash::Hash; 3 | 4 | use bimap::BiMap; 5 | 6 | /// Given two lists which are permutations of one another, creates a BiMap which maps an index in 7 | /// one list to an index in the other list with the same associated value. 8 | /// 9 | /// If the lists contain duplicates, then multiple permutations with this property exist, and an 10 | /// arbitrary one of them will be returned. 11 | pub fn bimap_from_lists(a: Vec, b: Vec) -> BiMap { 12 | assert_eq!(a.len(), b.len(), "Vectors differ in length"); 13 | 14 | let mut b_values_to_indices = HashMap::new(); 15 | for (i, value) in b.iter().enumerate() { 16 | b_values_to_indices 17 | .entry(value) 18 | .or_insert_with(Vec::new) 19 | .push(i); 20 | } 21 | 22 | let mut bimap = BiMap::new(); 23 | for (i, value) in a.iter().enumerate() { 24 | if let Some(j) = b_values_to_indices.get_mut(&value).and_then(Vec::pop) { 25 | bimap.insert(i, j); 26 | } else { 27 | panic!("Value in first list not found in second list"); 28 | } 29 | } 30 | 31 | bimap 32 | } 33 | 34 | #[cfg(test)] 35 | mod tests { 36 | use crate::bimap::bimap_from_lists; 37 | 38 | #[test] 39 | fn empty_lists() { 40 | let empty: Vec = Vec::new(); 41 | let bimap = bimap_from_lists(empty.clone(), empty); 42 | assert!(bimap.is_empty()); 43 | } 44 | 45 | #[test] 46 | fn without_duplicates() { 47 | let bimap = bimap_from_lists(vec!['a', 'b', 'c'], vec!['b', 'c', 'a']); 48 | assert_eq!(bimap.get_by_left(&0), Some(&2)); 49 | assert_eq!(bimap.get_by_left(&1), Some(&0)); 50 | assert_eq!(bimap.get_by_left(&2), Some(&1)); 51 | } 52 | 53 | #[test] 54 | fn with_duplicates() { 55 | let first = vec!['a', 'a', 'b']; 56 | let second = vec!['a', 'b', 'a']; 57 | let bimap = bimap_from_lists(first.clone(), second.clone()); 58 | for i in 0..3 { 59 | let j = *bimap.get_by_left(&i).unwrap(); 60 | assert_eq!(first[i], second[j]); 61 | } 62 | } 63 | 64 | #[test] 65 | #[should_panic] 66 | fn lengths_differ() { 67 | bimap_from_lists(vec!['a', 'a', 'b'], vec!['a', 'b']); 68 | } 69 | 70 | #[test] 71 | #[should_panic] 72 | fn not_a_permutation() { 73 | bimap_from_lists(vec!['a', 'a', 'b'], vec!['a', 'b', 'b']); 74 | } 75 | } 76 | -------------------------------------------------------------------------------- /waksman/src/gates/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod assert_le; 2 | pub mod switch; 3 | -------------------------------------------------------------------------------- /waksman/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![allow(clippy::new_without_default)] 2 | #![allow(clippy::too_many_arguments)] 3 | #![allow(clippy::type_complexity)] 4 | #![allow(clippy::len_without_is_empty)] 5 | #![allow(clippy::needless_range_loop)] 6 | #![allow(clippy::return_self_not_must_use)] 7 | 8 | pub mod bimap; 9 | pub mod gates; 10 | pub mod permutation; 11 | pub mod sorting; 12 | --------------------------------------------------------------------------------