├── .envrc ├── bin └── lurk ├── lurk-macros ├── .clippy.toml └── Cargo.toml ├── src ├── coroutine │ ├── mod.rs │ └── memoset │ │ └── multiset.rs ├── circuit │ ├── mod.rs │ ├── gadgets │ │ ├── mod.rs │ │ ├── case.rs │ │ ├── data.rs │ │ ├── hashes.rs │ │ └── circom │ │ │ └── mod.rs │ └── circuit_frame.rs ├── lem │ ├── coroutine │ │ └── mod.rs │ ├── tests │ │ ├── mod.rs │ │ ├── nivc_steps.rs │ │ └── stream.rs │ ├── var_map.rs │ ├── tag.rs │ └── pointers.rs ├── public_parameters │ ├── error.rs │ └── disk_cache.rs ├── main.rs ├── error.rs ├── parser.rs ├── lib.rs ├── cli │ ├── backend.rs │ ├── commitment.rs │ ├── paths.rs │ └── circom.rs ├── z_data │ ├── serde │ │ └── mod.rs │ ├── z_ptr.rs │ └── z_store.rs ├── uint.rs ├── proof │ └── tests │ │ ├── supernova_tests.rs │ │ └── stream.rs ├── dual_channel.rs ├── parser │ ├── position.rs │ └── base.rs ├── coprocessor │ ├── circom │ │ ├── error.rs │ │ └── README.md │ └── sha256.rs ├── syntax_macros.rs ├── package.rs └── syntax.rs ├── .gitmodules ├── .github ├── tables.toml ├── workflows │ ├── codecov.yml │ ├── docs.yml │ ├── gpu-ci.yml │ ├── bench-pr-comment.yml │ ├── ci.yml │ ├── bench-deploy.yml │ ├── gpu-bench-workflow-dispatch.yml │ ├── merge-tests.yml │ ├── gpu-bench-manual-comparative.yml │ └── nightly.yml ├── PERF_REGRESSION.md ├── dependabot.yml └── CODEOWNERS ├── benches ├── bench.env ├── common │ ├── mod.rs │ └── fib.rs ├── justfile ├── public_params.rs ├── trie_nivc.rs └── synthesis.rs ├── chain-server ├── build.rs ├── proto │ └── chain-server.proto ├── Cargo.toml ├── README.md └── src │ ├── lib.rs │ └── client.rs ├── .gitignore ├── rust-toolchain.toml ├── _typos.toml ├── demo ├── simple.lurk ├── functional-commitment.lurk ├── protocol.lurk ├── chained-functional-commitment.lurk └── vdf.lurk ├── rustfmt.toml ├── .clippy.toml ├── .config └── nextest.toml ├── tests ├── lurk-nivc-test.rs ├── lurk-files-tests.rs └── lurk-cli-tests.rs ├── lurk-metrics ├── Cargo.toml └── src │ ├── recorder.rs │ └── data.rs ├── foil └── Cargo.toml ├── LICENSE-MIT ├── flake.nix ├── .cargo └── config.toml ├── notes └── eval.md ├── examples ├── itcalc.rs ├── sha256_ivc.rs └── sha256_nivc.rs ├── flake.lock └── CODE_OF_CONDUCT.md /.envrc: -------------------------------------------------------------------------------- 1 | use flake 2 | -------------------------------------------------------------------------------- /bin/lurk: -------------------------------------------------------------------------------- 1 | cargo run --release -- $@ 2 | 3 | -------------------------------------------------------------------------------- /lurk-macros/.clippy.toml: -------------------------------------------------------------------------------- 1 | ../.clippy.toml -------------------------------------------------------------------------------- /src/coroutine/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod memoset; 2 | -------------------------------------------------------------------------------- /src/circuit/mod.rs: -------------------------------------------------------------------------------- 1 | #[macro_use] 2 | pub mod gadgets; 3 | mod circuit_frame; 4 | -------------------------------------------------------------------------------- /src/lem/coroutine/mod.rs: -------------------------------------------------------------------------------- 1 | mod eval; 2 | mod synthesis; 3 | pub mod toplevel; 4 | -------------------------------------------------------------------------------- /src/lem/tests/mod.rs: -------------------------------------------------------------------------------- 1 | mod eval_tests; 2 | mod misc; 3 | mod nivc_steps; 4 | mod stream; 5 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "lurk-lib"] 2 | path = lurk-lib 3 | url = https://github.com/argumentcomputer/lurk-lib.git 4 | -------------------------------------------------------------------------------- /.github/tables.toml: -------------------------------------------------------------------------------- 1 | [table_comments] 2 | 3 | [top_comments] 4 | Overview = """ 5 | This benchmark report shows the Fibonacci GPU benchmark. 6 | """ -------------------------------------------------------------------------------- /benches/bench.env: -------------------------------------------------------------------------------- 1 | # Lurk config, used only in `justfile` by default 2 | LURK_PERF=fully-parallel 3 | LURK_RC=100,600 4 | LURK_BENCH_NOISE_THRESHOLD=0.05 5 | -------------------------------------------------------------------------------- /chain-server/build.rs: -------------------------------------------------------------------------------- 1 | fn main() -> Result<(), Box> { 2 | tonic_build::compile_protos("proto/chain-server.proto")?; 3 | Ok(()) 4 | } 5 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | result* 3 | /scratch 4 | /.direnv 5 | *.commit 6 | Cargo.lock 7 | 8 | # Configurations for VSCode 9 | .vscode/ 10 | 11 | # Configuration for Jetbrains 12 | .idea/ -------------------------------------------------------------------------------- /rust-toolchain.toml: -------------------------------------------------------------------------------- 1 | [toolchain] 2 | # The default profile includes rustc, rust-std, cargo, rust-docs, rustfmt and clippy. 3 | profile = "default" 4 | channel = "1.79" 5 | targets = [ "wasm32-unknown-unknown" ] 6 | 7 | -------------------------------------------------------------------------------- /src/circuit/gadgets/mod.rs: -------------------------------------------------------------------------------- 1 | #[macro_use] 2 | pub(crate) mod macros; 3 | 4 | pub(crate) mod case; 5 | pub mod circom; 6 | pub mod constraints; 7 | pub(crate) mod data; 8 | pub(crate) mod hashes; 9 | pub mod pointer; 10 | -------------------------------------------------------------------------------- /.github/workflows/codecov.yml: -------------------------------------------------------------------------------- 1 | name: Generate and deploy Codecov results 2 | on: 3 | schedule: 4 | - cron: '0 0 * * *' 5 | 6 | jobs: 7 | codecov-grcov: 8 | uses: argumentcomputer/ci-workflows/.github/workflows/codecov.yml@main 9 | -------------------------------------------------------------------------------- /_typos.toml: -------------------------------------------------------------------------------- 1 | [default] 2 | extend-ignore-identifiers-re = [ 3 | "[Aa]bomonation", 4 | "[Ff]o", 5 | "noo", 6 | # Ignores e.g. \"supernova_bn256_10_18748ce7ba3dd0e7560ec64983d6b01d84a6303880b3b0b24878133aa1b4a6bb\" 7 | "[\".*_.*_.*_.*\\\\\"]" 8 | ] 9 | -------------------------------------------------------------------------------- /demo/simple.lurk: -------------------------------------------------------------------------------- 1 | 123 2 | 3 | (+ 1 1) 4 | 5 | !(def square (lambda (x) (* x x))) 6 | 7 | (square 8) 8 | 9 | !(def make-adder 10 | (lambda (n) 11 | (lambda (x) 12 | (+ x n)))) 13 | 14 | !(def five-plus 15 | (make-adder 5)) 16 | 17 | (five-plus 3) 18 | -------------------------------------------------------------------------------- /rustfmt.toml: -------------------------------------------------------------------------------- 1 | edition = "2021" # Rust edition 2 | newline_style = "Unix" # never allow Windows' \n\r newlines 3 | 4 | use_field_init_shorthand = true # replace Foo { x: x } with Foo { x } 5 | use_try_shorthand = true # replace try! with ? 6 | -------------------------------------------------------------------------------- /.github/PERF_REGRESSION.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: ":rotating_light: Performance regression in #{{ env.PR_NUMBER }}" 3 | labels: P-Performance, automated issue 4 | --- 5 | Regression >= {{ env.NOISE_THRESHOLD }}% found during merge of: #{{ env.PR_NUMBER }} 6 | Commit: {{ env.GIT_SHA }} 7 | Triggered by: {{ env.WORKFLOW_URL }} -------------------------------------------------------------------------------- /.github/workflows/docs.yml: -------------------------------------------------------------------------------- 1 | name: Generate and deploy crate docs 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | paths: 8 | - "**.rs" 9 | - "Cargo.toml" 10 | - "Cargo.lock" 11 | workflow_dispatch: 12 | 13 | jobs: 14 | docs: 15 | uses: argumentcomputer/ci-workflows/.github/workflows/docs.yml@main -------------------------------------------------------------------------------- /src/public_parameters/error.rs: -------------------------------------------------------------------------------- 1 | use std::io; 2 | use thiserror::Error; 3 | 4 | #[non_exhaustive] 5 | #[derive(Error, Debug)] 6 | pub enum Error { 7 | #[error("IO error: {0}")] 8 | IO(#[from] io::Error), 9 | #[error("Cache error: {0}")] 10 | Cache(String), 11 | #[error("JSON error: {0}")] 12 | Json(#[from] serde_json::Error), 13 | } 14 | -------------------------------------------------------------------------------- /.clippy.toml: -------------------------------------------------------------------------------- 1 | type-complexity-threshold = 1200 2 | too-many-arguments-threshold = 20 3 | disallowed-methods = [ 4 | # we use strict naming for pasta fields 5 | { path = "pasta_curves::Fp", reason = "use pasta_curves::pallas::Base or pasta_curves::vesta::Scalar instead to communicate your intent" }, 6 | { path = "pasta_curves::Fq", reason = "use pasta_curves::pallas::Scalar or pasta_curves::vesta::Base instead to communicate your intent" }, 7 | ] 8 | allow-dbg-in-tests = true 9 | -------------------------------------------------------------------------------- /chain-server/proto/chain-server.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package chain_prover; 4 | 5 | service ChainProver { 6 | rpc Config (ConfigRequest) returns (ConfigResponse); 7 | rpc Chain (ChainRequest) returns (ChainResponse); 8 | } 9 | 10 | message ConfigRequest {} 11 | 12 | message ConfigResponse { 13 | bytes config_response_data = 1; 14 | } 15 | 16 | message ChainRequest { 17 | bytes chain_request_data = 1; 18 | } 19 | 20 | message ChainResponse { 21 | bytes chain_response_data = 1; 22 | } 23 | 24 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: cargo 4 | directory: / 5 | pull-request-branch-name: 6 | separator: "-" 7 | schedule: 8 | interval: weekly 9 | groups: 10 | rust-dependencies: 11 | patterns: 12 | - "*" 13 | update-types: 14 | - "minor" 15 | - "patch" 16 | 17 | open-pull-requests-limit: 5 18 | 19 | - package-ecosystem: "github-actions" 20 | directory: "/" 21 | schedule: 22 | interval: "weekly" 23 | -------------------------------------------------------------------------------- /.github/workflows/gpu-ci.yml: -------------------------------------------------------------------------------- 1 | # Runs the test suite on a self-hosted GPU machine with CUDA enabled 2 | name: GPU tests 3 | 4 | on: 5 | pull_request: 6 | types: [opened, synchronize, reopened, ready_for_review] 7 | branches: [main] 8 | merge_group: 9 | 10 | 11 | concurrency: 12 | group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} 13 | cancel-in-progress: true 14 | 15 | jobs: 16 | gpu-ci: 17 | name: Rust GPU tests 18 | uses: argumentcomputer/ci-workflows/.github/workflows/gpu-ci-cuda.yml@main 19 | -------------------------------------------------------------------------------- /.config/nextest.toml: -------------------------------------------------------------------------------- 1 | [profile.ci] 2 | # Print out output for failing tests as soon as they fail, and also at the end 3 | # of the run (for easy scrollability). 4 | failure-output = "immediate-final" 5 | # Show skipped tests in the CI output. 6 | status-level = "skip" 7 | # Do not cancel the test run on the first failure. 8 | fail-fast = false 9 | # Mark tests as slow after 5mins, kill them after 20mins 10 | slow-timeout = { period = "300s", terminate-after = 4 } 11 | # Retry failed tests once, marked flaky if test then passes 12 | retries = 1 13 | -------------------------------------------------------------------------------- /tests/lurk-nivc-test.rs: -------------------------------------------------------------------------------- 1 | use assert_cmd::prelude::*; 2 | use std::process::Command; 3 | 4 | /// TODO: replace this test for more granular ones, specific for the NIVC 5 | /// pipeline steps 6 | #[test] 7 | #[ignore] 8 | fn test_sha256_nivc() { 9 | let mut cmd = Command::new("cargo"); 10 | cmd.args(["run", "--release", "--example", "sha256_nivc"]); 11 | cmd.assert().success(); 12 | } 13 | 14 | #[test] 15 | #[ignore] 16 | fn test_sha256_ivc() { 17 | let mut cmd = Command::new("cargo"); 18 | cmd.args(["run", "--release", "--example", "sha256_ivc"]); 19 | cmd.assert().success(); 20 | } 21 | -------------------------------------------------------------------------------- /benches/common/mod.rs: -------------------------------------------------------------------------------- 1 | pub(crate) mod fib; 2 | 3 | use camino::Utf8PathBuf; 4 | use lurk::cli::paths::lurk_default_dir; 5 | use lurk::config::lurk_config; 6 | use once_cell::sync::Lazy; 7 | 8 | /// Edit this path to use a config file specific to benchmarking 9 | /// E.g. `Utf8PathBuf::from("/home//lurk-beta/lurk-bench.toml");` 10 | pub(crate) static BENCH_CONFIG_PATH: Lazy = 11 | Lazy::new(|| lurk_default_dir().join("lurk.toml")); 12 | 13 | /// Sets the config settings with the given file 14 | pub(crate) fn set_bench_config() { 15 | lurk_config(Some(&BENCH_CONFIG_PATH), None); 16 | } 17 | -------------------------------------------------------------------------------- /lurk-metrics/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "lurk-metrics" 3 | version = "0.2.0" 4 | description = "Metrics Sink for lurk" 5 | edition.workspace = true 6 | repository.workspace = true 7 | authors.workspace = true 8 | homepage.workspace = true 9 | license.workspace = true 10 | rust-version.workspace = true 11 | 12 | [dependencies] 13 | metrics = { workspace = true } 14 | once_cell = { workspace = true } 15 | hdrhistogram = { version = "7.5.2", default-features = false } 16 | tracing = { workspace = true } 17 | 18 | [dev-dependencies] 19 | regex = { version = "1.9.4", features = ["unicode-case"] } 20 | tracing-test = { version = "0.2", features = ["no-env-filter"] } 21 | -------------------------------------------------------------------------------- /.github/CODEOWNERS: -------------------------------------------------------------------------------- 1 | # Global Owners: @porcuquine, @huitseeker 2 | * @argumentcomputer/lurk-noncircuit 3 | 4 | # CI maintenance 5 | /.circleci/** @argumentcomputer/lurk-ci 6 | /.config/** @argumentcomputer/lurk-ci 7 | /.github/** @argumentcomputer/lurk-ci 8 | 9 | # Benchmarks 10 | /benches/** @argumentcomputer/lurk-benches 11 | 12 | # CLI tools 13 | /src/cli/** @argumentcomputer/lurk-cli-tools 14 | 15 | # Circuit 16 | /src/circuit/** @argumentcomputer/lurk-circuit 17 | /src/lem/circuit.rs @argumentcomputer/lurk-circuit 18 | /src/coprocessor/** @argumentcomputer/lurk-circuit 19 | /src/coroutine/** @argumentcomputer/lurk-circuit 20 | 21 | # Evaluation model 22 | /src/lem/eval.rs @argumentcomputer/lurk-lem 23 | -------------------------------------------------------------------------------- /src/main.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Result; 2 | use tracing_subscriber::{fmt, prelude::*, EnvFilter, Registry}; 3 | use tracing_texray::TeXRayLayer; 4 | 5 | fn main() -> Result<()> { 6 | // this handle should be held until the end of the program, 7 | // do not replace by let _ = ... 8 | let _metrics_handle = lurk_metrics::MetricsSink::init(); 9 | 10 | let subscriber = Registry::default() 11 | .with(fmt::layer().pretty()) 12 | .with(EnvFilter::from_default_env()) 13 | // note: we don't `tracing_texray::examine` anywhere in lurkrs, so no spans are printed *yet* 14 | .with(TeXRayLayer::new()); 15 | tracing::subscriber::set_global_default(subscriber).unwrap(); 16 | 17 | println!( 18 | "commit: {} {}", 19 | env!("VERGEN_GIT_COMMIT_DATE"), 20 | env!("VERGEN_GIT_SHA") 21 | ); 22 | 23 | lurk::cli::parse_and_run() 24 | } 25 | -------------------------------------------------------------------------------- /src/error.rs: -------------------------------------------------------------------------------- 1 | use crate::store; 2 | 3 | use bellpepper_core::SynthesisError; 4 | use nova::errors::NovaError; 5 | use nova::supernova::error::SuperNovaError; 6 | use thiserror::Error; 7 | 8 | #[derive(Error, Debug)] 9 | pub enum ProofError { 10 | #[error("Nova error")] 11 | Nova(#[from] NovaError), 12 | #[error("SuperNova error")] 13 | SuperNova(#[from] SuperNovaError), 14 | #[error("Synthesis error: {0}")] 15 | Synthesis(#[from] SynthesisError), 16 | #[error("Reduction error: {0}")] 17 | Reduction(#[from] ReductionError), 18 | } 19 | 20 | impl From for ProofError { 21 | fn from(e: store::Error) -> Self { 22 | Self::Reduction(e.into()) 23 | } 24 | } 25 | 26 | #[derive(Error, Debug, Clone)] 27 | pub enum ReductionError { 28 | #[error("Miscellaneous error: {0}")] 29 | Misc(String), 30 | #[error("Lookup error: {0}")] 31 | Store(#[from] store::Error), 32 | } 33 | -------------------------------------------------------------------------------- /src/circuit/gadgets/case.rs: -------------------------------------------------------------------------------- 1 | use crate::field::LurkField; 2 | 3 | use bellpepper_core::num::AllocatedNum; 4 | use std::fmt::Debug; 5 | 6 | /// Initialized map entry for a fixed `key` with 7 | /// an allocated `value` computed at runtime. 8 | #[allow(dead_code)] 9 | pub(crate) struct CaseClause<'a, F: LurkField> { 10 | pub(crate) key: F, 11 | pub(crate) value: &'a AllocatedNum, 12 | } 13 | 14 | impl Debug for CaseClause<'_, F> { 15 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 16 | f.debug_struct("CaseClause") 17 | .field("key", &self.key) 18 | .field( 19 | "value", 20 | &format!( 21 | "AllocatedNum {{ value: {:?}, variable: {:?} }}", 22 | self.value.get_value(), 23 | self.value.get_variable() 24 | ), 25 | ) 26 | .finish() 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /src/parser.rs: -------------------------------------------------------------------------------- 1 | use thiserror; 2 | 3 | pub mod base; 4 | pub mod error; 5 | pub mod position; 6 | pub mod string; 7 | pub mod syntax; 8 | 9 | pub type Span<'a> = nom_locate::LocatedSpan<&'a str>; 10 | pub type ParseResult<'a, F, T> = nom::IResult, T, error::ParseError, F>>; 11 | 12 | // see https://github.com/sg16-unicode/sg16/issues/69 13 | pub static LURK_WHITESPACE: [char; 27] = [ 14 | '\u{0009}', '\u{000A}', '\u{000B}', '\u{000C}', '\u{000D}', '\u{0020}', '\u{0085}', '\u{200E}', 15 | '\u{200F}', '\u{2028}', '\u{2029}', '\u{20A0}', '\u{1680}', '\u{2000}', '\u{2001}', '\u{2002}', 16 | '\u{2003}', '\u{2004}', '\u{2005}', '\u{2006}', '\u{2007}', '\u{2008}', '\u{2009}', '\u{200A}', 17 | '\u{202F}', '\u{205F}', '\u{3000}', 18 | ]; 19 | 20 | #[derive(thiserror::Error, Debug, Clone)] 21 | pub enum Error { 22 | #[error("Empty input error")] 23 | NoInput, 24 | #[error("Syntax error: {0}")] 25 | Syntax(String), 26 | } 27 | -------------------------------------------------------------------------------- /chain-server/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "chain-server" 3 | version = "0.1.0" 4 | authors.workspace = true 5 | edition.workspace = true 6 | homepage.workspace = true 7 | license.workspace = true 8 | repository.workspace = true 9 | rust-version.workspace = true 10 | 11 | [[bin]] 12 | name = "server" 13 | path = "src/server.rs" 14 | 15 | [[bin]] 16 | name = "client" 17 | path = "src/client.rs" 18 | 19 | [dependencies] 20 | abomonation = { workspace = true } 21 | anyhow = { workspace = true } 22 | camino = { workspace = true} 23 | clap = { workspace = true} 24 | ff = { workspace = true } 25 | lurk = { path = "../" } 26 | halo2curves = { version = "0.6.0", features = ["bits", "derive_serde"] } 27 | nova = { workspace = true } 28 | once_cell = {workspace = true } 29 | prost = "0.13" 30 | rustyline = "14.0" 31 | serde = { workspace = true } 32 | tokio = { version = "1.0", features = ["macros", "rt-multi-thread"] } 33 | tonic = "0.12" 34 | 35 | [build-dependencies] 36 | tonic-build = "0.12" 37 | -------------------------------------------------------------------------------- /demo/functional-commitment.lurk: -------------------------------------------------------------------------------- 1 | ;; Let's define a function: f(x) = 3x^2 + 9x + 2 2 | 3 | !(def f (lambda (x) (+ (* 3 (* x x)) (+ (* 9 x) 2)))) 4 | 5 | !(assert-eq (f 5) 122) 6 | 7 | ;; We can create a cryptographic commitment to f. 8 | 9 | !(commit f) 10 | 11 | ;; We open the functional commitment on input 5: Evaluate f(5). 12 | 13 | !(call 0x1a1975f9bc4e30d4c0e80482418a314c9d2a5bc0eea2770da310886f455825d1 5) 14 | 15 | ;; We can prove the functional-commitment opening. 16 | 17 | !(prove) 18 | 19 | ;; We can inspect the input/output expressions of the proof. 20 | 21 | !(inspect "supernova_bn256_10_15c837e5040ac70c00030c228b61fde2c164d930ba6ea396353b3cfcaa16609d") 22 | 23 | ;; Or the full proof claim 24 | 25 | !(inspect-full "supernova_bn256_10_15c837e5040ac70c00030c228b61fde2c164d930ba6ea396353b3cfcaa16609d") 26 | 27 | ;; Finally, and most importantly, we can verify the proof. 28 | 29 | !(verify "supernova_bn256_10_15c837e5040ac70c00030c228b61fde2c164d930ba6ea396353b3cfcaa16609d") 30 | -------------------------------------------------------------------------------- /.github/workflows/bench-pr-comment.yml: -------------------------------------------------------------------------------- 1 | # Creates a PR benchmark comment with a comparison to main 2 | name: Benchmark pull requests 3 | on: 4 | issue_comment: 5 | types: [created] 6 | 7 | concurrency: 8 | group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} 9 | cancel-in-progress: true 10 | 11 | jobs: 12 | benchmark: 13 | name: Comparative PR benchmark comment 14 | if: 15 | github.event.issue.pull_request 16 | && github.event.issue.state == 'open' 17 | && (contains(github.event.comment.body, '!benchmark') || contains(github.event.comment.body, '!gpu-benchmark')) 18 | && (github.event.comment.author_association == 'MEMBER' || github.event.comment.author_association == 'OWNER') 19 | uses: argumentcomputer/ci-workflows/.github/workflows/bench-pr-comment.yml@main 20 | with: 21 | default-runner: "self-hosted,gpu-bench" 22 | default-benches: "fibonacci" 23 | default-env: "LURK_BENCH_OUTPUT=pr-comment LURK_RC=100,600" 24 | -------------------------------------------------------------------------------- /foil/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "foil" 3 | version = "0.1.0" 4 | edition = "2021" 5 | authors = ["porcuquine "] 6 | description = "Flat Optimization Intermediate Language" 7 | repository = "https://github.com/argumentcomputer/lurk-beta" 8 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 9 | 10 | [dependencies] 11 | anyhow = { workspace = true } 12 | bellpepper = { workspace = true } 13 | bellpepper-core = { workspace = true } 14 | ff = { workspace = true } 15 | indexmap = { version = "2.2.3", features = ["rayon"] } 16 | generic-array = "1.1.0" 17 | lurk = { path = "../" } 18 | lurk-macros = { path = "../lurk-macros" } 19 | tracing = { workspace = true } 20 | neptune = { workspace = true, features = ["arity2","arity4","arity8","arity16","pasta","bls"] } 21 | once_cell = { workspace = true } 22 | pasta_curves = { workspace = true, features = ["repr-c", "serde"] } 23 | 24 | [dev-dependencies] 25 | env_logger = "*" 26 | test-log = "0.2.12" 27 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | #![doc = include_str!("../README.md")] 2 | #![deny(unreachable_pub)] 3 | 4 | #[macro_use] 5 | pub mod circuit; 6 | pub mod cli; 7 | pub mod config; 8 | pub mod coprocessor; 9 | pub mod coroutine; 10 | pub mod dual_channel; 11 | pub mod error; 12 | pub mod field; 13 | mod hash; 14 | pub mod lang; 15 | pub mod lem; 16 | mod num; 17 | mod package; 18 | pub mod parser; 19 | pub mod proof; 20 | pub mod public_parameters; 21 | pub mod state; 22 | pub mod symbol; 23 | mod syntax; 24 | mod syntax_macros; 25 | pub mod tag; 26 | mod uint; 27 | pub mod z_data; 28 | pub use num::Num; 29 | pub use symbol::Symbol; 30 | pub use uint::UInt; 31 | 32 | pub use z_data::{z_cont, z_expr, z_ptr, z_store}; 33 | 34 | mod store { 35 | #[derive(thiserror::Error, Debug, Clone)] 36 | pub struct Error(pub String); 37 | 38 | impl std::fmt::Display for Error { 39 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 40 | write!(f, "StoreError: {}", self.0) 41 | } 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /lurk-macros/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "lurk-macros" 3 | version = "0.2.0" 4 | description = "Custom derives for `lurk`" 5 | edition.workspace = true 6 | repository.workspace = true 7 | authors.workspace = true 8 | homepage.workspace = true 9 | license.workspace = true 10 | rust-version.workspace = true 11 | 12 | [lib] 13 | proc-macro = true 14 | 15 | [dependencies] 16 | proc-macro2 = "1.0.66" 17 | quote = "1.0.31" 18 | syn = { version = "1.0.109", features = ["derive", "extra-traits", "full"] } 19 | 20 | [dev-dependencies] 21 | anyhow.workspace = true 22 | bincode = { workspace = true } 23 | lurk_crate = { path = "../", package = "lurk" } 24 | pasta_curves = { workspace = true, features = ["repr-c", "serde"] } 25 | proptest = { workspace = true } 26 | proptest-derive = { workspace = true } 27 | serde = { workspace = true, features = ["derive"] } 28 | 29 | # `cargo udeps` seems unable to detect dev-dependency usage in a proc macro crate 30 | [package.metadata.cargo-udeps.ignore] 31 | development = ["anyhow", "bincode", "lurk_crate", "pasta_curves", "proptest", "proptest-derive", "serde"] 32 | -------------------------------------------------------------------------------- /LICENSE-MIT: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 Argument Computer Corporation 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /demo/protocol.lurk: -------------------------------------------------------------------------------- 1 | ;; A protocol that requires knowledge of a hash that opens to a pair such that 2 | ;; its components add up to 30. If the proof is accepted, further require that 3 | ;; the first component of the pair be greater than 10. 4 | !(defprotocol my-protocol (hash pair) 5 | (let ((list6 (lambda (a b c d e f) (cons a (cons b (cons c (cons d (cons e (cons f nil)))))))) 6 | (mk-open-expr (lambda (hash) (cons 'open (cons hash nil))))) 7 | (cons 8 | (if (= (+ (car pair) (cdr pair)) 30) 9 | (list6 (mk-open-expr hash) (empty-env) :outermost pair (empty-env) :terminal) 10 | nil) 11 | (lambda () (> (car pair) 10)))) 12 | :backend "supernova" 13 | :rc 10 14 | :descr "demo protocol") 15 | 16 | ;; This is the prover's pair, whose hash is 17 | ;; 0x237fe43a25f3830ab6ac86451b93e74e8ef6ef1e8735a3f53478b7fe76b1a466 18 | (commit '(13 . 17)) 19 | 20 | ;; Let's prove it and write the proof to the file protocol-proof 21 | !(prove-protocol my-protocol 22 | "protocol-proof" 23 | 0x237fe43a25f3830ab6ac86451b93e74e8ef6ef1e8735a3f53478b7fe76b1a466 24 | '(13 . 17)) 25 | 26 | ;; Now it can be verified 27 | !(verify-protocol my-protocol "protocol-proof") 28 | -------------------------------------------------------------------------------- /flake.nix: -------------------------------------------------------------------------------- 1 | { 2 | inputs = { 3 | nixpkgs.url = "nixpkgs/nixos-unstable"; 4 | flake-utils.url = "github:numtide/flake-utils"; 5 | naersk = { 6 | url = "github:nix-community/naersk"; 7 | inputs.nixpkgs.follows = "nixpkgs"; 8 | }; 9 | fenix = { 10 | url = "github:nix-community/fenix"; 11 | inputs.nixpkgs.follows = "nixpkgs"; 12 | }; 13 | }; 14 | 15 | outputs = { self, nixpkgs, flake-utils, naersk, fenix }: 16 | flake-utils.lib.eachDefaultSystem (system: 17 | let 18 | pkgs = (import nixpkgs) { 19 | inherit system; 20 | }; 21 | 22 | toolchain = with fenix.packages.${system}; fromToolchainFile { 23 | file = ./rust-toolchain.toml; # alternatively, dir = ./.; 24 | sha256 = "sha256-e4mlaJehWBymYxJGgnbuCObVlqMlQSilZ8FljG9zPHY="; 25 | }; 26 | 27 | in rec { 28 | defaultPackage = (naersk.lib.${system}.override { 29 | # For `nix build` & `nix run`: 30 | cargo = toolchain; 31 | rustc = toolchain; 32 | }).buildPackage { 33 | src = ./.; 34 | }; 35 | 36 | # For `nix develop` or `direnv allow`: 37 | devShell = pkgs.mkShell { 38 | buildInputs = with pkgs; [ 39 | pkg-config 40 | openssl 41 | ocl-icd 42 | toolchain 43 | rust-analyzer 44 | clang 45 | ]; 46 | }; 47 | } 48 | ); 49 | } 50 | -------------------------------------------------------------------------------- /src/cli/backend.rs: -------------------------------------------------------------------------------- 1 | use anyhow::{bail, Result}; 2 | use clap::ValueEnum; 3 | use serde::Deserialize; 4 | 5 | use crate::field::LanguageField; 6 | 7 | #[derive(Clone, Default, Debug, Deserialize, ValueEnum, PartialEq, Eq)] 8 | #[clap(rename_all = "lowercase")] 9 | pub(crate) enum Backend { 10 | Nova, 11 | #[default] 12 | SuperNova, 13 | } 14 | 15 | impl std::fmt::Display for Backend { 16 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 17 | match self { 18 | Self::Nova => write!(f, "nova"), 19 | Self::SuperNova => write!(f, "supernova"), 20 | } 21 | } 22 | } 23 | 24 | impl Backend { 25 | fn compatible_fields(&self) -> Vec { 26 | use LanguageField::{Pallas, BN256}; 27 | match self { 28 | Self::Nova | Self::SuperNova => vec![BN256, Pallas], 29 | } 30 | } 31 | 32 | pub(crate) fn validate_field(&self, field: &LanguageField) -> Result<()> { 33 | let compatible_fields = self.compatible_fields(); 34 | if !compatible_fields.contains(field) { 35 | bail!( 36 | "Backend {self} is incompatible with field {field}. Compatible fields are:\n {}", 37 | compatible_fields 38 | .iter() 39 | .map(|f| f.to_string()) 40 | .collect::>() 41 | .join(", ") 42 | ) 43 | } 44 | Ok(()) 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /benches/justfile: -------------------------------------------------------------------------------- 1 | # Install with `cargo install just` 2 | # Usage: `just ` 3 | set dotenv-load 4 | set dotenv-filename := "bench.env" 5 | 6 | commit := `git rev-parse HEAD` 7 | 8 | # Run CPU benchmarks 9 | bench +benches: 10 | #!/bin/sh 11 | printenv PATH 12 | if [ '{{benches}}' != '' ]; then 13 | for bench in {{benches}}; do 14 | cargo criterion --bench $bench 15 | done 16 | else 17 | echo "Invalid input, enter at least one non-empty string" 18 | fi 19 | 20 | # Run CUDA benchmarks on GPU 21 | gpu-bench +benches: 22 | #!/bin/sh 23 | # The `compute`/`sm` number corresponds to the Nvidia GPU architecture 24 | # In this case, the self-hosted machine uses the Ampere architecture, but we want this to be configurable 25 | # See https://arnon.dk/matching-sm-architectures-arch-and-gencode-for-various-nvidia-cards/ 26 | export CUDA_ARCH=$(nvidia-smi --query-gpu=compute_cap --format=csv,noheader | sed 's/\.//g') 27 | export EC_GPU_CUDA_NVCC_ARGS="--fatbin --gpu-architecture=sm_$CUDA_ARCH --generate-code=arch=compute_$CUDA_ARCH,code=sm_$CUDA_ARCH" 28 | export EC_GPU_FRAMEWORK="cuda" 29 | for bench in {{benches}}; do 30 | cargo criterion --bench $bench --features "cuda" 31 | done 32 | 33 | # Run CUDA benchmarks on GPU, tuned for CI 34 | gpu-bench-ci +benches: 35 | #!/bin/sh 36 | printenv PATH 37 | for bench in {{benches}}; do 38 | cargo criterion --bench $bench --features "cuda" --message-format=json > "$bench-{{commit}}".json 39 | done -------------------------------------------------------------------------------- /src/coroutine/memoset/multiset.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashMap; 2 | use std::default::Default; 3 | use std::hash::Hash; 4 | 5 | #[derive(PartialEq, Eq, Debug, Default, Clone)] 6 | pub(crate) struct MultiSet { 7 | map: HashMap, 8 | cardinality: usize, 9 | } 10 | 11 | impl MultiSet { 12 | pub(crate) fn new() -> Self { 13 | Self { 14 | map: Default::default(), 15 | cardinality: 0, 16 | } 17 | } 18 | pub(crate) fn add(&mut self, element: T) { 19 | *self.map.entry(element).or_insert(0) += 1; 20 | self.cardinality += 1; 21 | } 22 | 23 | pub(crate) fn get(&self, element: &T) -> Option { 24 | self.map.get(element).copied() 25 | } 26 | 27 | #[allow(dead_code)] 28 | pub(crate) fn cardinality(&self) -> usize { 29 | self.cardinality 30 | } 31 | 32 | #[allow(dead_code)] 33 | pub(crate) fn len(&self) -> usize { 34 | self.map.len() 35 | } 36 | } 37 | 38 | #[cfg(test)] 39 | mod test { 40 | use super::*; 41 | 42 | #[test] 43 | fn test_multiset() { 44 | let mut m = MultiSet::::new(); 45 | let mut c = 0; 46 | let n = 5; 47 | 48 | for i in 1..n { 49 | for _ in 0..i { 50 | m.add(i); 51 | } 52 | c += i; 53 | assert_eq!(i, m.len()); 54 | assert_eq!(c, m.cardinality()); 55 | assert_eq!(Some(i), m.get(&i)); 56 | assert_eq!(None, m.get(&(i + n))); 57 | } 58 | } 59 | } 60 | -------------------------------------------------------------------------------- /.cargo/config.toml: -------------------------------------------------------------------------------- 1 | [alias] 2 | # Collection of project wide clippy lints. This is done via an alias because 3 | # clippy doesn't currently allow for specifiying project-wide lints in a 4 | # configuration file. This is a similar workaround to the ones presented here: 5 | # 6 | xclippy = [ 7 | "clippy", "--workspace", "--all-targets", "--", 8 | "-Wclippy::all", 9 | "-Wclippy::cast_lossless", 10 | "-Wclippy::checked_conversions", 11 | "-Wclippy::dbg_macro", 12 | "-Wclippy::disallowed_methods", 13 | "-Wclippy::derive_partial_eq_without_eq", 14 | "-Wclippy::enum_glob_use", 15 | "-Wclippy::filter_map_next", 16 | "-Wclippy::flat_map_option", 17 | "-Wclippy::from_iter_instead_of_collect", 18 | "-Wclippy::implicit_clone", 19 | "-Wclippy::inefficient_to_string", 20 | "-Wclippy::large_stack_arrays", 21 | "-Wclippy::large_types_passed_by_value", 22 | "-Wclippy::macro_use_imports", 23 | "-Wclippy::manual_assert", 24 | "-Wclippy::manual_ok_or", 25 | "-Wclippy::map_err_ignore", 26 | "-Wclippy::map_flatten", 27 | "-Wclippy::map_unwrap_or", 28 | "-Wclippy::match_same_arms", 29 | "-Wclippy::match_wild_err_arm", 30 | "-Wclippy::needless_borrow", 31 | "-Wclippy::needless_continue", 32 | "-Wclippy::needless_for_each", 33 | "-Wclippy::needless_pass_by_value", 34 | "-Wclippy::option_option", 35 | "-Wclippy::same_functions_in_if_condition", 36 | "-Wclippy::single_match_else", 37 | "-Wclippy::trait_duplication_in_bounds", 38 | "-Wclippy::unnecessary_wraps", 39 | "-Wclippy::unnested_or_patterns", 40 | "-Wnonstandard_style", 41 | "-Wrust_2018_idioms", 42 | "-Wtrivial_numeric_casts", 43 | "-Wunused_lifetimes", 44 | "-Wunreachable_pub" 45 | ] 46 | -------------------------------------------------------------------------------- /src/cli/commitment.rs: -------------------------------------------------------------------------------- 1 | use anyhow::{anyhow, Result}; 2 | use serde::{Deserialize, Serialize}; 3 | 4 | use crate::{ 5 | field::LurkField, 6 | lem::{ 7 | pointers::{Ptr, ZPtr}, 8 | store::Store, 9 | }, 10 | }; 11 | 12 | use super::{ 13 | field_data::{dump, HasFieldModulus}, 14 | paths::commitment_path, 15 | zstore::ZStore, 16 | }; 17 | 18 | /// Holds data for commitments. 19 | /// 20 | /// **Warning**: holds private data. The `ZStore` contains the secret used to 21 | /// hide the original payload. 22 | #[derive(Serialize, Deserialize)] 23 | pub(crate) struct Commitment { 24 | pub(crate) hash: F, 25 | pub(crate) z_store: ZStore, 26 | } 27 | 28 | impl HasFieldModulus for Commitment { 29 | fn field_modulus() -> String { 30 | F::MODULUS.to_owned() 31 | } 32 | } 33 | 34 | impl Commitment { 35 | pub(crate) fn new(secret: Option, payload: Ptr, store: &Store) -> Self { 36 | let secret = secret.unwrap_or(F::NON_HIDING_COMMITMENT_SECRET); 37 | let (hash, z_payload) = store.hide_and_return_z_payload(secret, payload); 38 | let mut z_store = ZStore::::default(); 39 | z_store.populate_with_simple(&payload, store); 40 | z_store.add_comm(hash, secret, z_payload); 41 | Self { hash, z_store } 42 | } 43 | 44 | #[inline] 45 | pub(crate) fn open(&self) -> Result<&(F, ZPtr)> { 46 | self.z_store 47 | .open(self.hash) 48 | .ok_or_else(|| anyhow!("Couldn't open commitment")) 49 | } 50 | } 51 | 52 | impl Commitment { 53 | #[inline] 54 | pub(crate) fn persist(self) -> Result<()> { 55 | let hash_str = &self.hash.hex_digits(); 56 | dump(self, &commitment_path(hash_str)) 57 | } 58 | } 59 | -------------------------------------------------------------------------------- /src/circuit/gadgets/data.rs: -------------------------------------------------------------------------------- 1 | use bellpepper_core::{boolean::Boolean, num::AllocatedNum, ConstraintSystem}; 2 | 3 | use crate::field::LurkField; 4 | use crate::tag::{ContTag, ExprTag, Op1, Op2, Tag}; 5 | 6 | pub(crate) fn allocate_constant>( 7 | cs: &mut CS, 8 | val: F, 9 | ) -> AllocatedNum { 10 | let allocated = AllocatedNum::::alloc_infallible(cs.namespace(|| "allocate"), || val); 11 | 12 | // allocated * 1 = val 13 | cs.enforce( 14 | || "enforce constant", 15 | |lc| lc + allocated.get_variable(), 16 | |lc| lc + CS::one(), 17 | |_| Boolean::Constant(true).lc(CS::one(), val), 18 | ); 19 | 20 | allocated 21 | } 22 | 23 | impl ExprTag { 24 | pub fn allocate_constant>( 25 | &self, 26 | cs: &mut CS, 27 | ) -> AllocatedNum { 28 | allocate_constant(ns!(cs, format!("{self:?} tag")), self.to_field()) 29 | } 30 | } 31 | 32 | impl ContTag { 33 | pub fn allocate_constant>( 34 | &self, 35 | cs: &mut CS, 36 | ) -> AllocatedNum { 37 | allocate_constant( 38 | ns!(cs, format!("{self:?} base continuation tag")), 39 | self.to_field(), 40 | ) 41 | } 42 | } 43 | 44 | impl Op1 { 45 | pub fn allocate_constant>( 46 | &self, 47 | cs: &mut CS, 48 | ) -> AllocatedNum { 49 | allocate_constant(ns!(cs, format!("{self:?} tag")), self.to_field()) 50 | } 51 | } 52 | 53 | impl Op2 { 54 | pub fn allocate_constant>( 55 | &self, 56 | cs: &mut CS, 57 | ) -> AllocatedNum { 58 | allocate_constant(ns!(cs, format!("{self:?} tag")), self.to_field()) 59 | } 60 | } 61 | -------------------------------------------------------------------------------- /tests/lurk-files-tests.rs: -------------------------------------------------------------------------------- 1 | use std::process::Command; 2 | 3 | use assert_cmd::prelude::{CommandCargoExt, OutputAssertExt}; 4 | use camino::Utf8PathBuf; 5 | use rayon::prelude::{IntoParallelIterator, ParallelIterator}; 6 | 7 | #[inline] 8 | fn lurk_cmd() -> Command { 9 | Command::cargo_bin("lurk").unwrap() 10 | } 11 | 12 | #[test] 13 | #[ignore] 14 | fn test_lurk_lib() { 15 | const LURK_LIB_EXAMPLES_DIR: &str = "lurk-lib/example"; 16 | 17 | assert!( 18 | Utf8PathBuf::from(LURK_LIB_EXAMPLES_DIR).exists(), 19 | "The lurk-lib example directory does not exist. \ 20 | Please update the submodule by running the following commands:\n\ 21 | git submodule init\n\ 22 | git submodule update" 23 | ); 24 | 25 | let lurk_lib_examples = [ 26 | "test.lurk", 27 | "micro-tests.lurk", 28 | "meta-tests.lurk", 29 | "meta-letrec-tests.lurk", 30 | "fibonacci-tests.lurk", 31 | "tests/spec.lurk", 32 | "tests/eval.lurk", 33 | "tests/begin.lurk", 34 | "tests/auto-curry.lurk", 35 | ]; 36 | 37 | lurk_lib_examples.into_par_iter().for_each(|f| { 38 | let mut cmd = lurk_cmd(); 39 | cmd.current_dir(LURK_LIB_EXAMPLES_DIR); 40 | cmd.arg(f); 41 | cmd.assert().success(); 42 | }); 43 | } 44 | 45 | #[test] 46 | #[ignore] 47 | fn test_demo() { 48 | // proving involved! 49 | let demo_examples = [ 50 | "demo/simple.lurk", 51 | "demo/functional-commitment.lurk", 52 | "demo/chained-functional-commitment.lurk", 53 | "demo/bank.lurk", 54 | "demo/vdf.lurk", 55 | "demo/protocol.lurk", 56 | ]; 57 | 58 | demo_examples.into_par_iter().for_each(|f| { 59 | let mut cmd = lurk_cmd(); 60 | cmd.arg(f); 61 | cmd.assert().success(); 62 | }); 63 | } 64 | -------------------------------------------------------------------------------- /demo/chained-functional-commitment.lurk: -------------------------------------------------------------------------------- 1 | ;; First, we define a stateful function that adds its input to an internal counter, initialized to 0. 2 | 3 | ;; The function returns a new counter value and a commitment to a replacement function wrapping the new counter. 4 | 5 | !(commit (letrec ((add (lambda (counter x) 6 | (let ((counter (+ counter x))) 7 | (cons counter (commit (add counter))))))) 8 | (add 0))) 9 | 10 | ;; We chain a next commitment by applying the committed function to a value of 9. 11 | 12 | !(chain 0x2b444b40b27bac0dff8416c0f3c708a505a636d86ba66bdbe86497c515afb651 9) 13 | 14 | ;; The new counter value is 9, and the function returns a new functional commitment. 15 | 16 | ;; This new commitment is now the head of the chain. 17 | 18 | ;; Next, we ccreate a proof of this transition. 19 | 20 | !(prove) 21 | 22 | ;; We can verify the proof. 23 | 24 | !(verify "supernova_bn256_10_0f54f9e56fa6c436618597c971daa7b525ad80ac48be11226284fd4f8167e60a") 25 | 26 | ;; Now let's chain another call to the new head, adding 12 to the counter. 27 | 28 | !(chain (comm 0x1d10fb6dea15a5865565d571efbcaf535750ab93ba4d9018bd6b7b803e86d986) 12) 29 | 30 | ;; Now the counter is 21, and we have a new head commitment. 31 | 32 | ;; Prove it. 33 | 34 | !(prove) 35 | 36 | ;; And verify. 37 | 38 | !(verify "supernova_bn256_10_281771b7af2f96cac51cb7579d94f0a6f56e9a9d951b753f8514b2b4ec6ce4db") 39 | 40 | ;; One more time, we'll add 14 to the head commitment's internal state. 41 | 42 | !(chain 0x1a95f14fa34e01ce6cf44b4eb419c59e8b50978618139d9314304979cad1770f 14) 43 | 44 | ;; 21 + 14 = 35, as expected. 45 | 46 | ;; Prove. 47 | 48 | !(prove) 49 | 50 | ;; Verify. 51 | 52 | !(verify "supernova_bn256_10_22ab68c1fa6e75f54d213a3ada71edd21331bf58826263a79e3fdd32f1c4c62d") 53 | 54 | ;; Repeat indefinitely. 55 | 56 | ;; At every step, we have proof that the head commitment was correctly derived from the previous and some input. 57 | -------------------------------------------------------------------------------- /src/lem/var_map.rs: -------------------------------------------------------------------------------- 1 | use std::collections::hash_map::Entry; 2 | 3 | use anyhow::{bail, Result}; 4 | use fxhash::FxHashMap; 5 | use tracing::info; 6 | 7 | use super::Var; 8 | 9 | /// `VarMap` is a wrapper around a `HashMap` whose keys are `Var`s. It's meant 10 | /// to be more ergonomic under the assumption that a LEM must always define 11 | /// variables before using them, so we don't expect to need some piece of 12 | /// information from a variable that hasn't been defined. 13 | #[derive(Clone, Debug)] 14 | pub struct VarMap(FxHashMap); 15 | 16 | impl Default for VarMap { 17 | fn default() -> VarMap { 18 | VarMap(FxHashMap::default()) 19 | } 20 | } 21 | 22 | impl VarMap { 23 | /// Creates an empty `VarMap` 24 | #[inline] 25 | pub(crate) fn new() -> VarMap { 26 | VarMap(FxHashMap::default()) 27 | } 28 | 29 | /// Inserts new data into a `VarMap` 30 | pub(crate) fn insert(&mut self, var: Var, v: V) -> Option { 31 | match self.0.entry(var) { 32 | Entry::Vacant(vacant_entry) => { 33 | vacant_entry.insert(v); 34 | None 35 | } 36 | Entry::Occupied(mut o) => { 37 | let v = o.insert(v); 38 | info!("Variable {} has been overwritten", o.key()); 39 | Some(v) 40 | } 41 | } 42 | } 43 | 44 | /// Retrieves data from a `VarMap`. Errors if there's no data for the `Var` 45 | pub(crate) fn get(&self, var: &Var) -> Result<&V> { 46 | match self.0.get(var) { 47 | Some(v) => Ok(v), 48 | None => bail!("Data for variable {var} not found"), 49 | } 50 | } 51 | } 52 | 53 | impl VarMap { 54 | #[inline] 55 | pub(crate) fn get_cloned(&self, var: &Var) -> Result { 56 | self.get(var).cloned() 57 | } 58 | 59 | pub(crate) fn get_many_cloned(&self, args: &[Var]) -> Result> { 60 | args.iter().map(|arg| self.get_cloned(arg)).collect() 61 | } 62 | } 63 | -------------------------------------------------------------------------------- /src/z_data/serde/mod.rs: -------------------------------------------------------------------------------- 1 | use thiserror::Error; 2 | 3 | mod de; 4 | mod ser; 5 | 6 | pub use de::from_z_data; 7 | pub use ser::to_z_data; 8 | 9 | #[derive(Error, Debug)] 10 | pub enum SerdeError { 11 | #[error("Function error")] 12 | Function(String), 13 | #[error("Type error")] 14 | Type(String), 15 | } 16 | 17 | impl serde::ser::Error for SerdeError { 18 | fn custom(msg: T) -> Self { 19 | Self::Function(msg.to_string()) 20 | } 21 | } 22 | 23 | impl serde::de::Error for SerdeError { 24 | fn custom(msg: T) -> Self { 25 | Self::Function(msg.to_string()) 26 | } 27 | } 28 | 29 | #[cfg(test)] 30 | mod tests { 31 | use crate::field::FWrap; 32 | use crate::z_data::{from_z_data, to_z_data}; 33 | use halo2curves::bn256::Fr as Scalar; 34 | use proptest::prelude::*; 35 | use serde::{Deserialize, Serialize}; 36 | use std::collections::BTreeMap; 37 | 38 | fn test_roundtrip(zd: &T) 39 | where 40 | T: Serialize + for<'de> Deserialize<'de> + PartialEq + std::fmt::Debug, 41 | { 42 | assert_eq!(*zd, from_z_data(&to_z_data(zd).unwrap()).unwrap()); 43 | } 44 | 45 | #[test] 46 | fn serde_simple_roundtrip() { 47 | test_roundtrip(&(1u8, 2u8)); 48 | test_roundtrip(&(1u32, 2u64)); 49 | test_roundtrip(&String::from("Hello world")); 50 | test_roundtrip(&['a', 'b', 'c']); 51 | test_roundtrip(&[0u8, 1u8, 2u8]); 52 | test_roundtrip(&[String::from("Hello"), String::from("World")]); 53 | test_roundtrip(&BTreeMap::from([ 54 | (String::from("Hello"), 0u8), 55 | (String::from("World"), 1u8), 56 | ])); 57 | let f = FWrap(Scalar::one()); 58 | let ser = to_z_data(f).unwrap(); 59 | assert_eq!(f, from_z_data(&ser).unwrap()); 60 | } 61 | 62 | proptest! { 63 | #[test] 64 | fn ser_err_isize(x in any::()) { 65 | assert!(to_z_data(x).is_err()); 66 | } 67 | 68 | #[test] 69 | fn ser_err_f32(x in any::()) { 70 | assert!(to_z_data(x).is_err()); 71 | } 72 | } 73 | } 74 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: CI tests 2 | 3 | on: 4 | merge_group: 5 | pull_request: 6 | types: [opened, synchronize, reopened, ready_for_review] 7 | branches: [main] 8 | workflow_dispatch: 9 | 10 | concurrency: 11 | group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} 12 | cancel-in-progress: true 13 | 14 | jobs: 15 | linux: 16 | runs-on: warp-ubuntu-2404-x64-16x 17 | steps: 18 | - uses: actions/checkout@v4 19 | with: 20 | repository: argumentcomputer/ci-workflows 21 | - uses: ./.github/actions/ci-env 22 | - uses: ./.github/actions/install-deps 23 | with: 24 | packages: 'pkg-config libssl-dev protobuf-compiler libprotobuf-dev' 25 | - uses: actions/checkout@v4 26 | with: 27 | submodules: recursive 28 | - uses: dtolnay/rust-toolchain@stable 29 | - uses: taiki-e/install-action@nextest 30 | - uses: Swatinem/rust-cache@v2 31 | - name: Linux Tests 32 | run: | 33 | cargo nextest run --profile ci --workspace --cargo-profile dev-ci 34 | - name: Linux Gadget Tests w/o debug assertions 35 | run: | 36 | cargo nextest run --profile ci --workspace --cargo-profile dev-no-assertions -E 'test(circuit::gadgets)' 37 | 38 | # Rustfmt, clippy, doctests 39 | code-quality: 40 | uses: argumentcomputer/ci-workflows/.github/workflows/lints.yml@main 41 | with: 42 | packages: 'protobuf-compiler libprotobuf-dev' 43 | 44 | # Checks `cargo build --target wasm32-unknown-unknown` 45 | wasm: 46 | uses: argumentcomputer/ci-workflows/.github/workflows/wasm.yml@main 47 | 48 | # Checks MSRV specified by `rust-version` in `Cargo.toml` 49 | msrv: 50 | uses: argumentcomputer/ci-workflows/.github/workflows/msrv.yml@main 51 | with: 52 | packages: 'protobuf-compiler libprotobuf-dev' 53 | 54 | # Check documentation links aren't broken 55 | link-checker: 56 | uses: argumentcomputer/ci-workflows/.github/workflows/links-check.yml@main 57 | with: 58 | fail-fast: true 59 | 60 | # Lint dependencies for licensing and auditing issues as per https://github.com/argumentcomputer/lurk-beta/blob/main/deny.toml 61 | licenses-audits: 62 | uses: argumentcomputer/ci-workflows/.github/workflows/licenses-audits.yml@main 63 | -------------------------------------------------------------------------------- /notes/eval.md: -------------------------------------------------------------------------------- 1 | Eval Spec (`eval.rs`) - (WIP) 2 | --------------------- 3 | 4 | _For a high-level overview of the reduction step, see [Reduction Notes](reduction-notes.md)._ 5 | 6 | ### Evaluator 7 | The Evaluator consists of an expression, an environment, a store and an iteration limit. 8 | 9 | ### `eval` 10 | The eval function evaluates the expression in the environment for a number of reduction steps, growing the store and stopping (at most) at the iteration limit. 11 | 12 | ### environment 13 | The environment provides bindings between variables and values in lexical scope. 14 | 15 | ### store 16 | The store keeps track of all objects created. This is where `cons` becomes `hash-cons`. 17 | The store is mutable. 18 | 19 | ### IO 20 | Inputs and outputs (IO) consist of an expression, an environment, and a continuation, all represented as pointers to the store. 21 | 22 | ### continuation 23 | The continuation represents the rest of the computation. 24 | Continuations are defunctionalized, so there is one continuation per possible rest of computation. 25 | Initially, the continuation is outermost. 26 | 27 | ### frame 28 | A frame consists of an input, an output, a sequence index and a witness. 29 | 30 | ### witness 31 | The witness consists of output expression, environment and continuation, and extended closure, and a continuation continuation (which is redundant, but only present if the control was tagged with continuation). 32 | The witness remembers results that can be used in proofs. 33 | 34 | ### reduction 35 | In one reduction step, a frame steps to the next frame, threading the store. 36 | 37 | The reduction rules specify the semantics of taking a step. 38 | A step takes an input and a store to an output and a witness. 39 | 40 | To simplify the translation to circuits, the implementation of reduction wraps the output in a `Control`, tagging it with `Return`, `MakeThunk`, or `ApplyContinuation`. 41 | 42 | In reduction: 43 | - The tag `Return` is used in most cases, in particular where a new continuation is formed. 44 | - The tag `MakeThunk` is used for unary and binary operations. When a thunk is not used, this allows for some optimizations with respect to tail and outermost continuations. 45 | - The tag `ApplyContinuation` is used to return immediately with the input continuation. 46 | 47 | To understand the detailed semantics, we defer to the case analyses in `reduce_with_witness` and `apply_continuation`. 48 | -------------------------------------------------------------------------------- /src/uint.rs: -------------------------------------------------------------------------------- 1 | #[cfg(not(target_arch = "wasm32"))] 2 | use lurk_macros::serde_test; 3 | #[cfg(not(target_arch = "wasm32"))] 4 | use proptest_derive::Arbitrary; 5 | use serde::{Deserialize, Serialize}; 6 | use std::{ 7 | fmt::Display, 8 | ops::{Add, Div, Mul, Rem, Sub}, 9 | }; 10 | 11 | /// Unsigned fixed-width integer type for Lurk. 12 | #[derive(Debug, Copy, Clone, PartialEq, PartialOrd, Eq, Serialize, Deserialize)] 13 | #[cfg_attr(not(target_arch = "wasm32"), derive(Arbitrary))] 14 | #[cfg_attr(not(target_arch = "wasm32"), serde_test)] 15 | pub enum UInt { 16 | U64(u64), 17 | } 18 | 19 | impl UInt { 20 | pub fn is_zero(&self) -> bool { 21 | match self { 22 | UInt::U64(n) => *n == 0, 23 | } 24 | } 25 | } 26 | 27 | impl From for UInt { 28 | fn from(n: u64) -> Self { 29 | Self::U64(n) 30 | } 31 | } 32 | 33 | impl From for u64 { 34 | fn from(u: UInt) -> u64 { 35 | match u { 36 | UInt::U64(n) => n, 37 | } 38 | } 39 | } 40 | 41 | impl Display for UInt { 42 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 43 | match self { 44 | UInt::U64(n) => write!(f, "{n}"), 45 | } 46 | } 47 | } 48 | 49 | impl Add for UInt { 50 | type Output = Self; 51 | fn add(self, other: Self) -> Self { 52 | match (self, other) { 53 | (UInt::U64(a), UInt::U64(b)) => UInt::U64(a.wrapping_add(b)), 54 | } 55 | } 56 | } 57 | 58 | impl Sub for UInt { 59 | type Output = Self; 60 | fn sub(self, other: Self) -> Self { 61 | match (self, other) { 62 | (UInt::U64(a), UInt::U64(b)) => UInt::U64(a.wrapping_sub(b)), 63 | } 64 | } 65 | } 66 | impl Div for UInt { 67 | type Output = Self; 68 | fn div(self, other: Self) -> Self { 69 | match (self, other) { 70 | (UInt::U64(a), UInt::U64(b)) => UInt::U64(a / b), 71 | } 72 | } 73 | } 74 | impl Mul for UInt { 75 | type Output = Self; 76 | fn mul(self, other: Self) -> Self { 77 | match (self, other) { 78 | (UInt::U64(a), UInt::U64(b)) => UInt::U64(a.wrapping_mul(b)), 79 | } 80 | } 81 | } 82 | impl Rem for UInt { 83 | type Output = Self; 84 | fn rem(self, other: Self) -> Self { 85 | match (self, other) { 86 | (UInt::U64(a), UInt::U64(b)) => UInt::U64(a % b), 87 | } 88 | } 89 | } 90 | -------------------------------------------------------------------------------- /src/lem/tag.rs: -------------------------------------------------------------------------------- 1 | use anyhow::{bail, Result}; 2 | use serde::{Deserialize, Serialize}; 3 | 4 | use crate::{ 5 | field::LurkField, 6 | tag::{ContTag, ExprTag, Op1, Op2, Tag as TagTrait}, 7 | }; 8 | 9 | /// The LEM `Tag` is a wrapper around other types that are used as tags 10 | #[derive(Copy, Debug, PartialEq, Clone, Eq, Hash, PartialOrd, Ord, Serialize, Deserialize)] 11 | pub enum Tag { 12 | Expr(ExprTag), 13 | Cont(ContTag), 14 | Op1(Op1), 15 | Op2(Op2), 16 | } 17 | 18 | impl TryFrom for Tag { 19 | type Error = anyhow::Error; 20 | 21 | fn try_from(val: u16) -> Result { 22 | if let Ok(tag) = ExprTag::try_from(val) { 23 | Ok(Tag::Expr(tag)) 24 | } else if let Ok(tag) = ContTag::try_from(val) { 25 | Ok(Tag::Cont(tag)) 26 | } else if let Ok(tag) = Op1::try_from(val) { 27 | Ok(Tag::Op1(tag)) 28 | } else if let Ok(tag) = Op2::try_from(val) { 29 | Ok(Tag::Op2(tag)) 30 | } else { 31 | bail!("Invalid u16 for Tag: {val}") 32 | } 33 | } 34 | } 35 | 36 | impl From for u16 { 37 | fn from(val: Tag) -> Self { 38 | match val { 39 | Tag::Expr(tag) => tag.into(), 40 | Tag::Cont(tag) => tag.into(), 41 | Tag::Op1(tag) => tag.into(), 42 | Tag::Op2(tag) => tag.into(), 43 | } 44 | } 45 | } 46 | 47 | impl TagTrait for Tag { 48 | fn from_field(f: &F) -> Option { 49 | Self::try_from(f.to_u16()?).ok() 50 | } 51 | 52 | fn to_field(&self) -> F { 53 | Tag::to_field(self) 54 | } 55 | } 56 | 57 | impl Tag { 58 | #[inline] 59 | pub fn to_field(&self) -> F { 60 | match self { 61 | Self::Expr(tag) => tag.to_field(), 62 | Self::Cont(tag) => tag.to_field(), 63 | Self::Op1(tag) => tag.to_field(), 64 | Self::Op2(tag) => tag.to_field(), 65 | } 66 | } 67 | } 68 | 69 | impl std::fmt::Display for Tag { 70 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 71 | use Tag::{Cont, Expr, Op1, Op2}; 72 | match self { 73 | Expr(tag) => write!(f, "expr.{}", tag), 74 | Cont(tag) => write!(f, "cont.{}", tag), 75 | Op1(tag) => write!(f, "op1.{}", tag), 76 | Op2(tag) => write!(f, "op2.{}", tag), 77 | } 78 | } 79 | } 80 | -------------------------------------------------------------------------------- /benches/public_params.rs: -------------------------------------------------------------------------------- 1 | use criterion::{black_box, criterion_group, criterion_main, Criterion, SamplingMode}; 2 | use lurk::{lang::Lang, proof::nova}; 3 | use std::sync::Arc; 4 | use std::time::Duration; 5 | 6 | const DEFAULT_REDUCTION_COUNT: usize = 10; 7 | 8 | /// To run these benchmarks, do `cargo criterion public_params_benchmark`. 9 | /// For flamegraphs, run: 10 | /// ```cargo criterion public_params_benchmark --features flamegraph -- --profile-time ``` 11 | fn public_params_benchmark(c: &mut Criterion) { 12 | let mut group = c.benchmark_group("public_params_benchmark"); 13 | group.sampling_mode(SamplingMode::Flat); 14 | let lang = Lang::::new(); 15 | let lang_rc = Arc::new(lang); 16 | 17 | let reduction_count = DEFAULT_REDUCTION_COUNT; 18 | 19 | group.bench_function("public_params_nova", |b| { 20 | b.iter(|| { 21 | let result = nova::public_params(reduction_count, lang_rc.clone()); 22 | black_box(result) 23 | }) 24 | }); 25 | } 26 | 27 | cfg_if::cfg_if! { 28 | if #[cfg(feature = "flamegraph")] { 29 | // In order to collect a flamegraph, you need to indicate a profile time, see 30 | // https://github.com/tikv/pprof-rs#integrate-with-criterion 31 | // Example usage : 32 | // cargo criterion --bench public_params --features flamegraph -- --profile-time 5 33 | // Warning: it is not recommended to run this on an M1 Mac, as making pprof work well there is hard. 34 | criterion_group! { 35 | name = benches; 36 | config = Criterion::default() 37 | .with_profiler(pprof::criterion::PProfProfiler::new(100, pprof::criterion::Output::Flamegraph(None))); 38 | targets = public_params_benchmark 39 | } 40 | } else { 41 | criterion_group! { 42 | name = benches; 43 | config = Criterion::default() 44 | .measurement_time(Duration::from_secs(120)) 45 | .sample_size(10); 46 | targets = public_params_benchmark 47 | } 48 | } 49 | } 50 | 51 | // To run these benchmarks, first download `criterion` with `cargo install cargo install cargo-criterion`. 52 | // Then `cargo criterion --bench public_params`. The results are located in `target/criterion/data/`. 53 | // For flamegraphs, run `cargo criterion --bench public_params --features flamegraph -- --profile-time `. 54 | // The results are located in `target/criterion/profile/`. 55 | criterion_main!(benches); 56 | -------------------------------------------------------------------------------- /.github/workflows/bench-deploy.yml: -------------------------------------------------------------------------------- 1 | name: GPU benchmark on `main` 2 | on: 3 | push: 4 | branches: 5 | - main 6 | 7 | jobs: 8 | # TODO: Account for different `justfile` and `bench.env` files 9 | # One option is to upload them to gh-pages for qualitative comparison 10 | # TODO: Fall back to a default if `justfile`/`bench.env` not present 11 | benchmark: 12 | name: Bench and deploy 13 | runs-on: [self-hosted, gpu-bench, gh-pages] 14 | steps: 15 | # Install deps 16 | - uses: actions/checkout@v4 17 | with: 18 | repository: argumentcomputer/ci-workflows 19 | - uses: ./.github/actions/gpu-setup 20 | with: 21 | gpu-framework: 'cuda' 22 | - uses: ./.github/actions/ci-env 23 | - uses: actions/checkout@v4 24 | - uses: dtolnay/rust-toolchain@stable 25 | - uses: Swatinem/rust-cache@v2 26 | - uses: taiki-e/install-action@v2 27 | with: 28 | tool: just@1.22.0 29 | # Run benchmarks and deploy 30 | - name: Get old benchmarks 31 | uses: actions/checkout@v4 32 | with: 33 | ref: gh-pages 34 | path: gh-pages 35 | - run: mkdir -p target; cp -r gh-pages/benchmarks/criterion target; 36 | - name: Install criterion 37 | run: cargo install cargo-criterion 38 | - name: Run benchmarks 39 | run: | 40 | just gpu-bench-ci fibonacci 41 | cp fibonacci-${{ github.sha }}.json .. 42 | working-directory: ${{ github.workspace }}/benches 43 | # TODO: Prettify labels for easier viewing 44 | # Compress the benchmark file and metadata for later analysis 45 | - name: Compress artifacts 46 | run: | 47 | echo $LABELS > labels.md 48 | tar -cvzf fibonacci-${{ github.sha }}.tar.gz Cargo.lock fibonacci-${{ github.sha }}.json labels.md 49 | working-directory: ${{ github.workspace }} 50 | - name: Deploy latest benchmark report 51 | uses: peaceiris/actions-gh-pages@v4 52 | with: 53 | github_token: ${{ secrets.GITHUB_TOKEN }} 54 | publish_dir: ./target/criterion 55 | destination_dir: benchmarks/criterion 56 | - name: Copy benchmark json to history 57 | run: mkdir history; cp fibonacci-${{ github.sha }}.tar.gz history/ 58 | - name: Deploy benchmark history 59 | uses: peaceiris/actions-gh-pages@v4 60 | with: 61 | github_token: ${{ secrets.GITHUB_TOKEN }} 62 | publish_dir: history/ 63 | destination_dir: benchmarks/history 64 | keep_files: true -------------------------------------------------------------------------------- /src/proof/tests/supernova_tests.rs: -------------------------------------------------------------------------------- 1 | use halo2curves::bn256::Fr; 2 | use std::sync::Arc; 3 | 4 | use crate::{ 5 | dual_channel::dummy_terminal, 6 | lang::Lang, 7 | lem::{ 8 | eval::{evaluate, make_cprocs_funcs_from_lang, make_eval_step_from_config, EvalConfig}, 9 | store::Store, 10 | }, 11 | proof::{supernova::SuperNovaProver, RecursiveSNARKTrait}, 12 | public_parameters::{instance::Instance, supernova_public_params}, 13 | state::user_sym, 14 | }; 15 | 16 | #[test] 17 | fn test_nil_nil_lang() { 18 | use crate::coprocessor::test::NilNil; 19 | let mut lang = Lang::>::new(); 20 | lang.add_coprocessor(user_sym("nil-nil"), NilNil::new()); 21 | 22 | let eval_config = EvalConfig::new_nivc(&lang); 23 | let lurk_step = make_eval_step_from_config(&eval_config); 24 | let cprocs = make_cprocs_funcs_from_lang(&lang); 25 | 26 | let store = Arc::new(Store::default()); 27 | let expr = store.read_with_default_state("(nil-nil)").unwrap(); 28 | let frames = evaluate( 29 | Some((&lurk_step, &cprocs, &lang)), 30 | expr, 31 | &store, 32 | 50, 33 | &dummy_terminal(), 34 | ) 35 | .unwrap(); 36 | 37 | // iteration 1: main circuit sets up a call to the coprocessor 38 | // iteration 2: coprocessor does its job 39 | // iteration 3: main circuit sets termination to terminal 40 | assert_eq!(frames.len(), 3); 41 | 42 | let first_frame = frames.first().unwrap(); 43 | let last_frame = frames.last().unwrap(); 44 | let output = &last_frame.output; 45 | 46 | // the result is the (nil . nil) pair 47 | let nil = store.intern_nil(); 48 | assert!(store.ptr_eq(&output[0], &store.cons(nil, nil))); 49 | 50 | // computation must end with the terminal continuation 51 | assert!(store.ptr_eq(&output[2], &store.cont_terminal())); 52 | 53 | let supernova_prover = SuperNovaProver::new(5, Arc::new(lang)); 54 | let instance = Instance::new_supernova(&supernova_prover, true); 55 | let pp = supernova_public_params(&instance).unwrap(); 56 | 57 | let (proof, ..) = supernova_prover 58 | .prove_from_frames(&pp, &frames, &store, None) 59 | .unwrap(); 60 | 61 | let input_scalar = store.to_scalar_vector(&first_frame.input); 62 | let output_scalar = store.to_scalar_vector(output); 63 | 64 | // uncompressed proof verifies 65 | assert!(proof.verify(&pp, &input_scalar, &output_scalar).unwrap()); 66 | 67 | // compressed proof verifies 68 | let proof = proof.compress(&pp).unwrap(); 69 | assert!(proof.verify(&pp, &input_scalar, &output_scalar).unwrap()); 70 | } 71 | -------------------------------------------------------------------------------- /src/circuit/gadgets/hashes.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashMap; 2 | 3 | use bellpepper_core::{num::AllocatedNum, ConstraintSystem, SynthesisError}; 4 | 5 | use neptune::circuit2::poseidon_hash_allocated as poseidon_hash; 6 | use neptune::circuit2_witness::poseidon_hash_allocated_witness; 7 | 8 | use crate::field::{FWrap, LurkField}; 9 | use crate::hash::HashConst; 10 | 11 | pub(crate) type WitnessBlock = Vec; 12 | pub(crate) type Digest = F; 13 | 14 | type HashCircuitWitnessCache = HashMap>, (Vec, F)>; 15 | 16 | impl<'a, F: LurkField> HashConst<'a, F> { 17 | pub(crate) fn hash>( 18 | &self, 19 | cs: &mut CS, 20 | preimage: Vec>, 21 | hash_circuit_witness_cache: Option<&mut HashCircuitWitnessCache>, 22 | ) -> Result, SynthesisError> { 23 | let witness_block = if cs.is_witness_generator() { 24 | hash_circuit_witness_cache.map(|cache| { 25 | let key = preimage 26 | .iter() 27 | .map(|allocated| FWrap(allocated.get_value().unwrap())) 28 | .collect::>(); 29 | 30 | let cached = cache.get(&key).unwrap(); 31 | cached 32 | }) 33 | } else { 34 | None 35 | }; 36 | 37 | self.hash_with_witness(cs, preimage, witness_block) 38 | } 39 | 40 | fn hash_with_witness>( 41 | &self, 42 | cs: &mut CS, 43 | preimage: Vec>, 44 | circuit_witness: Option<&(WitnessBlock, Digest)>, 45 | ) -> Result, SynthesisError> { 46 | macro_rules! hash { 47 | ($c:ident) => { 48 | if cs.is_witness_generator() { 49 | if let Some((aux_buf, res)) = circuit_witness { 50 | cs.extend_aux(aux_buf); 51 | 52 | AllocatedNum::alloc(cs, || Ok(*res)) 53 | } else { 54 | // We have no cache, just allocate the witness. 55 | poseidon_hash_allocated_witness(cs, &preimage, $c) 56 | } 57 | } else { 58 | // CS is not a witness generator, just hash. 59 | poseidon_hash(cs, preimage, $c) 60 | } 61 | }; 62 | } 63 | match self { 64 | HashConst::A3(c) => hash!(c), 65 | HashConst::A4(c) => hash!(c), 66 | HashConst::A6(c) => hash!(c), 67 | HashConst::A8(c) => hash!(c), 68 | } 69 | } 70 | } 71 | -------------------------------------------------------------------------------- /src/circuit/circuit_frame.rs: -------------------------------------------------------------------------------- 1 | use bellpepper::util_cs::Comparable; 2 | 3 | use crate::field::LurkField; 4 | 5 | /// Prints out the full CS for debugging purposes 6 | #[allow(dead_code)] 7 | pub(crate) fn print_cs>(this: &C) -> String { 8 | let mut out = String::new(); 9 | out += &format!("num_inputs: {}\n", this.num_inputs()); 10 | out += &format!("num_constraints: {}\n", this.num_constraints()); 11 | out += "\ninputs:\n"; 12 | for (i, input) in this.inputs().iter().enumerate() { 13 | out += &format!("{i}: {input}\n"); 14 | } 15 | out += "\nconstraints:\n"; 16 | for (i, cs) in this.constraints().iter().enumerate() { 17 | out += &format!( 18 | "{}: {}:\n {:?}\n {:?}\n {:?}\n", 19 | i, 20 | cs.3, 21 | cs.0.iter().collect::>(), 22 | cs.1.iter().collect::>(), 23 | cs.2.iter().collect::>() 24 | ); 25 | } 26 | 27 | out 28 | } 29 | 30 | #[cfg(test)] 31 | mod tests { 32 | use super::*; 33 | use crate::circuit::gadgets::constraints::implies_pack; 34 | use crate::circuit::gadgets::constraints::popcount_equal; 35 | use bellpepper_core::boolean::Boolean; 36 | use bellpepper_core::num::AllocatedNum; 37 | use bellpepper_core::test_cs::TestConstraintSystem; 38 | use bellpepper_core::ConstraintSystem; 39 | 40 | use halo2curves::bn256::Fr; 41 | 42 | #[test] 43 | fn test_enforce_popcount() { 44 | let mut cs = TestConstraintSystem::::new(); 45 | 46 | for x in 0..128 { 47 | let alloc_a = AllocatedNum::alloc(ns!(cs, x.to_string()), || Ok(Fr::from(x))).unwrap(); 48 | let bits = alloc_a.to_bits_le(ns!(cs, format!("bits_{x}"))).unwrap(); 49 | let popcount_result = 50 | AllocatedNum::alloc(ns!(cs, format!("alloc popcount {x}")), || { 51 | Ok(Fr::from(u64::from(x.count_ones()))) 52 | }) 53 | .unwrap(); 54 | 55 | popcount_equal( 56 | ns!(cs, format!("popcount {x}")), 57 | &bits, 58 | popcount_result.get_variable(), 59 | ); 60 | } 61 | 62 | assert!(cs.is_satisfied()); 63 | } 64 | 65 | #[test] 66 | fn test_enforce_pack() { 67 | let mut cs = TestConstraintSystem::::new(); 68 | let a_num = AllocatedNum::alloc_infallible(ns!(cs, "a num"), || Fr::from_u64(42)); 69 | let bits = a_num.to_bits_le(ns!(cs, "bits")).unwrap(); 70 | implies_pack(&mut cs, &Boolean::Constant(true), &bits, &a_num); 71 | assert!(cs.is_satisfied()); 72 | } 73 | } 74 | -------------------------------------------------------------------------------- /tests/lurk-cli-tests.rs: -------------------------------------------------------------------------------- 1 | use assert_cmd::prelude::*; 2 | use camino::Utf8Path; 3 | use std::fs::File; 4 | use std::io::prelude::*; 5 | use std::process::Command; 6 | use tempfile::Builder; 7 | 8 | fn lurk_cmd() -> Command { 9 | Command::cargo_bin("lurk").unwrap() 10 | } 11 | 12 | #[test] 13 | fn test_help_subcommand() { 14 | let mut cmd = lurk_cmd(); 15 | 16 | cmd.arg("help"); 17 | cmd.assert().success(); 18 | } 19 | 20 | #[test] 21 | fn test_help_flag_command() { 22 | let mut cmd = lurk_cmd(); 23 | 24 | cmd.arg("--help"); 25 | cmd.assert().success(); 26 | } 27 | 28 | #[test] 29 | fn test_repl_command() { 30 | let mut cmd = lurk_cmd(); 31 | 32 | cmd.arg("repl"); 33 | cmd.assert().success(); 34 | } 35 | 36 | #[test] 37 | fn test_bad_command() { 38 | let tmp_dir = Builder::new().prefix("tmp").tempdir().unwrap(); 39 | let bad_file = tmp_dir.path().join("uiop"); 40 | 41 | let mut cmd = lurk_cmd(); 42 | cmd.arg(bad_file.to_str().unwrap()); 43 | cmd.assert().failure(); 44 | } 45 | 46 | // TODO: Use a snapshot test for the proof ID and/or test the REPL process 47 | #[test] 48 | fn test_prove_and_verify() { 49 | let tmp_dir = Builder::new().prefix("tmp").tempdir().unwrap(); 50 | let tmp_dir = Utf8Path::from_path(tmp_dir.path()).unwrap(); 51 | let public_param_dir = tmp_dir.join("public_params"); 52 | let proof_dir = tmp_dir.join("proofs"); 53 | let commit_dir = tmp_dir.join("commits"); 54 | let lurk_file = tmp_dir.join("prove_verify.lurk"); 55 | 56 | let mut file = File::create(lurk_file.clone()).unwrap(); 57 | file.write_all(b"!(prove (+ 1 1))\n").unwrap(); 58 | file.write_all(b"!(verify \"supernova_bn256_10_18748ce7ba3dd0e7560ec64983d6b01d84a6303880b3b0b24878133aa1b4a6bb\")\n").unwrap(); 59 | 60 | let mut cmd = lurk_cmd(); 61 | cmd.env("LURK_PERF", "fully-parallel"); 62 | cmd.arg("load"); 63 | cmd.arg(lurk_file.into_string()); 64 | cmd.arg("--public-params-dir"); 65 | cmd.arg(public_param_dir); 66 | cmd.arg("--proofs-dir"); 67 | cmd.arg(proof_dir); 68 | cmd.arg("--commits-dir"); 69 | cmd.arg(commit_dir); 70 | 71 | cmd.assert().success(); 72 | } 73 | 74 | #[test] 75 | fn test_repl_panic() { 76 | let tmp_dir = Builder::new().prefix("tmp").tempdir().unwrap(); 77 | let tmp_dir = Utf8Path::from_path(tmp_dir.path()).unwrap(); 78 | let lurk_file = tmp_dir.join("panic.lurk"); 79 | 80 | let mut file = File::create(lurk_file.clone()).unwrap(); 81 | // `x` is not bound 82 | file.write_all(b"x\n").unwrap(); 83 | 84 | let mut cmd = lurk_cmd(); 85 | cmd.arg("load"); 86 | cmd.arg(lurk_file.into_string()); 87 | cmd.assert().failure(); 88 | } 89 | -------------------------------------------------------------------------------- /chain-server/README.md: -------------------------------------------------------------------------------- 1 | # chain-server 2 | 3 | A server for proving chained calls built on top of Lurk. 4 | 5 | > [!IMPORTANT] 6 | > This crate depends on [`tonic`](https://docs.rs/tonic/latest/tonic/), which uses [Protocol Buffers](https://protobuf.dev/) to specify the server/client communication. 7 | > If `protoc` is installed and this crate had trouble finding it, you can set the `PROTOC` environment variable with the specific path to your installed `protoc` binary. 8 | > Otherwise, you can download it from https://github.com/protocolbuffers/protobuf/releases. 9 | > On debian, try `apt-get install protobuf-compiler`. 10 | 11 | ### Initiating the server with a functional commitment 12 | 13 | Go to Lurk's REPL and persist the commitment to the chaining function: 14 | 15 | ```lisp 16 | !(commit 17 | (letrec 18 | ((sum (lambda (xs acc) 19 | (if (eq xs nil) acc (sum (cdr xs) (+ acc (car xs)))))) 20 | (add (lambda (counter xs) 21 | (let ((counter (+ counter (sum xs 0)))) 22 | (cons counter (commit (add counter))))))) 23 | (add 0))) 24 | ``` 25 | 26 | Initiate the server with 27 | 28 | ``` 29 | $ cargo run --release --bin server --\ 30 | init 0x1653706ed881366619d2334fc2175c28e1a74b8ae6a8b9cc230026371e187bd6 --comm --port 50051 31 | ``` 32 | 33 | Where `0x1653706ed881366619d2334fc2175c28e1a74b8ae6a8b9cc230026371e187bd6` is the hash of the functional commitment. 34 | 35 | ### Initiating the server with a regular function as the callable state 36 | 37 | Go to Lurk's REPL and persist a the chaining function as Lurk data: 38 | 39 | ```lisp 40 | !(dump-data 41 | (letrec 42 | ((sum (lambda (xs acc) 43 | (if (eq xs nil) acc (sum (cdr xs) (+ acc (car xs)))))) 44 | (add (lambda (counter xs) 45 | (let ((counter (+ counter (sum xs 0)))) 46 | (cons counter (add counter)))))) 47 | (add 0)) 48 | "my-function") 49 | ``` 50 | 51 | Initiate the server with 52 | 53 | ``` 54 | $ cargo run --release --bin server -- init my-function --port 50051 55 | ``` 56 | 57 | ### Initiating the demo client 58 | 59 | Once the server is alive, run 60 | 61 | ``` 62 | cargo run --release --bin client -- 50051 63 | ``` 64 | 65 | Here is the output of a short interaction with it: 66 | 67 | ``` 68 | > '(2 5) 69 | ( (quote (2 5))) 70 | ↳ (7 . ) ✓ 71 | > '(13 17) 72 | ( (quote (13 17))) 73 | ↳ (37 . ) ✓ 74 | ``` 75 | -------------------------------------------------------------------------------- /src/cli/paths.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Result; 2 | use camino::{Utf8Path, Utf8PathBuf}; 3 | 4 | use std::fs; 5 | 6 | use crate::cli::config::cli_config; 7 | use crate::public_parameters::disk_cache::public_params_dir; 8 | 9 | #[cfg(not(target_arch = "wasm32"))] 10 | fn home_dir() -> Utf8PathBuf { 11 | Utf8PathBuf::from_path_buf(home::home_dir().expect("missing home directory")) 12 | .expect("path contains invalid Unicode") 13 | } 14 | 15 | #[cfg(not(target_arch = "wasm32"))] 16 | pub fn lurk_default_dir() -> Utf8PathBuf { 17 | home_dir().join(Utf8Path::new(".lurk")) 18 | } 19 | 20 | #[cfg(target_arch = "wasm32")] 21 | pub fn lurk_default_dir() -> Utf8PathBuf { 22 | Utf8PathBuf::from(".lurk") 23 | } 24 | 25 | pub(crate) fn proofs_default_dir() -> Utf8PathBuf { 26 | lurk_default_dir().join("proofs") 27 | } 28 | 29 | pub(crate) fn commits_default_dir() -> Utf8PathBuf { 30 | lurk_default_dir().join("commits") 31 | } 32 | 33 | pub(crate) fn circom_default_dir() -> Utf8PathBuf { 34 | lurk_default_dir().join("circom") 35 | } 36 | 37 | pub(crate) fn proofs_dir() -> &'static Utf8PathBuf { 38 | &cli_config(None, None).proofs_dir 39 | } 40 | 41 | pub(crate) fn commits_dir() -> &'static Utf8PathBuf { 42 | &cli_config(None, None).commits_dir 43 | } 44 | 45 | pub(crate) fn circom_dir() -> &'static Utf8PathBuf { 46 | &cli_config(None, None).circom_dir 47 | } 48 | 49 | fn lurk_leaf_dirs() -> [&'static Utf8PathBuf; 4] { 50 | [ 51 | proofs_dir(), 52 | commits_dir(), 53 | public_params_dir(), 54 | circom_dir(), 55 | ] 56 | } 57 | 58 | // Creates dirs for public params, proofs, commits, and circom 59 | // NOTE: call this function after `cli_config()` or `lurk_config()` if non-default 60 | // config settings are desired, as it will initialize them if unset 61 | pub(crate) fn create_lurk_dirs() -> Result<()> { 62 | for dir in lurk_leaf_dirs() { 63 | fs::create_dir_all(dir)?; 64 | } 65 | Ok(()) 66 | } 67 | 68 | // Not currently configurable 69 | pub(crate) fn repl_history() -> Utf8PathBuf { 70 | lurk_default_dir().join(Utf8Path::new("repl-history")) 71 | } 72 | 73 | pub(crate) fn commitment_path(name: &str) -> Utf8PathBuf { 74 | commits_dir().join(Utf8Path::new(&format!("{name}.commit"))) 75 | } 76 | 77 | pub(crate) fn proof_path(name: &str) -> Utf8PathBuf { 78 | proofs_dir() 79 | .join(Utf8Path::new(name)) 80 | .with_extension("proof") 81 | } 82 | 83 | pub(crate) fn proof_meta_path(name: &str) -> Utf8PathBuf { 84 | proofs_dir() 85 | .join(Utf8Path::new(name)) 86 | .with_extension("meta") 87 | } 88 | 89 | pub(crate) fn circom_binary_path() -> Utf8PathBuf { 90 | circom_dir().join("circom") 91 | } 92 | -------------------------------------------------------------------------------- /src/public_parameters/disk_cache.rs: -------------------------------------------------------------------------------- 1 | use std::fs::create_dir_all; 2 | use std::io::{BufReader, BufWriter, Read}; 3 | use std::marker::PhantomData; 4 | 5 | use abomonation::{encode, Abomonation}; 6 | use camino::{Utf8Path, Utf8PathBuf}; 7 | 8 | use crate::config::lurk_config; 9 | use crate::coprocessor::Coprocessor; 10 | use crate::proof::nova::{CurveCycleEquipped, PublicParams}; 11 | use crate::public_parameters::error::Error; 12 | 13 | use super::instance::Instance; 14 | 15 | /// Returns the public parameter disk cache directory, which has 16 | /// either been configured or defaults to `$HOME/.lurk/public_params` 17 | pub(crate) fn public_params_dir() -> &'static Utf8PathBuf { 18 | &lurk_config(None, None).public_params_dir 19 | } 20 | 21 | pub(crate) struct DiskCache 22 | where 23 | F: CurveCycleEquipped, 24 | C: Coprocessor, 25 | { 26 | dir: Utf8PathBuf, 27 | _t: PhantomData<(F, C)>, 28 | } 29 | 30 | impl> DiskCache { 31 | pub(crate) fn new(disk_cache_path: &Utf8Path) -> Result { 32 | create_dir_all(disk_cache_path)?; 33 | 34 | Ok(Self { 35 | dir: disk_cache_path.to_owned(), 36 | _t: Default::default(), 37 | }) 38 | } 39 | 40 | pub(crate) fn read(&self, instance: &Instance) -> Result, Error> { 41 | let file = instance.open(&self.dir)?; 42 | let reader = BufReader::new(file); 43 | bincode::deserialize_from(reader) 44 | .map_err(|e| Error::Cache(format!("Public param cache deserialization error: {}", e))) 45 | } 46 | 47 | pub(crate) fn read_bytes( 48 | &self, 49 | instance: &Instance, 50 | byte_sink: &mut Vec, 51 | ) -> Result<(), Error> { 52 | let file = instance.open(&self.dir)?; 53 | let mut reader = BufReader::new(file); 54 | reader.read_to_end(byte_sink)?; 55 | Ok(()) 56 | } 57 | 58 | pub(crate) fn write( 59 | &self, 60 | instance: &Instance, 61 | data: &PublicParams, 62 | ) -> Result<(), Error> { 63 | let file = instance.create(&self.dir)?; 64 | let writer = BufWriter::new(&file); 65 | bincode::serialize_into(writer, data) 66 | .map_err(|e| Error::Cache(format!("Public param cache serialization error: {}", e))) 67 | } 68 | 69 | pub(crate) fn write_abomonated( 70 | &self, 71 | instance: &Instance, 72 | data: &V, 73 | ) -> Result<(), Error> { 74 | let mut file = instance.create(&self.dir)?; 75 | unsafe { encode(data, &mut file).expect("failed to encode") }; 76 | Ok(()) 77 | } 78 | } 79 | -------------------------------------------------------------------------------- /src/dual_channel.rs: -------------------------------------------------------------------------------- 1 | #![deny(missing_docs)] 2 | 3 | //! This module implements `ChannelTerminal`, meant to be used in pairs of its 4 | //! instances with crossed `Sender`s and `Receiver` from `mpsc::channel`. This 5 | //! crossing is performed in `pair_terminals`. The idea is that one terminal can 6 | //! send/receive messages to/from the other. 7 | 8 | use anyhow::{anyhow, Result}; 9 | use std::sync::mpsc::{channel, Iter, Receiver, Sender}; 10 | 11 | /// Holds a `Sender` and a `Receiver` which are not expected to be paired with 12 | /// each other 13 | pub struct ChannelTerminal { 14 | sender: Sender, 15 | receiver: Receiver, 16 | } 17 | 18 | impl ChannelTerminal { 19 | /// Sends a message through its inner `Sender` 20 | #[inline] 21 | pub fn send(&self, t: T) -> Result<()> { 22 | self.sender.send(t).map_err(|e| anyhow!(e.to_string())) 23 | } 24 | 25 | /// Receives a message through its inner `Receiver`, blocking the current thread 26 | #[inline] 27 | #[allow(dead_code)] 28 | pub fn recv(&self) -> Result { 29 | self.receiver.recv().map_err(|e| anyhow!(e.to_string())) 30 | } 31 | 32 | /// Collects all the messages received so far and materializes them in a 33 | /// vector without blocking the current thread 34 | #[inline] 35 | pub fn collect(&self) -> Vec { 36 | self.receiver.try_iter().collect() 37 | } 38 | 39 | #[inline] 40 | /// Returns a thread-blocking iterator for the received messages 41 | pub fn iter(&self) -> Iter<'_, T> { 42 | self.receiver.iter() 43 | } 44 | } 45 | 46 | /// Creates a pair of `ChannelTerminal` with crossed senders and receivers such 47 | /// that one terminal can send/receive messages to/from the other 48 | pub fn pair_terminals() -> (ChannelTerminal, ChannelTerminal) { 49 | let (sender_terminal_1, receiver_terminal_2) = channel(); 50 | let (sender_terminal_2, receiver_terminal_1) = channel(); 51 | ( 52 | ChannelTerminal { 53 | sender: sender_terminal_1, 54 | receiver: receiver_terminal_1, 55 | }, 56 | ChannelTerminal { 57 | sender: sender_terminal_2, 58 | receiver: receiver_terminal_2, 59 | }, 60 | ) 61 | } 62 | 63 | /// Creates a dummy `ChannelTerminal` that just sends messages to itself 64 | #[inline] 65 | pub fn dummy_terminal() -> ChannelTerminal { 66 | let (sender, receiver) = channel(); 67 | ChannelTerminal { sender, receiver } 68 | } 69 | 70 | #[cfg(test)] 71 | pub mod tests { 72 | #[test] 73 | fn test_terminals() { 74 | let (t1, t2) = super::pair_terminals::<&str>(); 75 | 76 | t1.send("hi from t1").unwrap(); 77 | t2.send("hi from t2").unwrap(); 78 | 79 | assert_eq!(t1.recv().unwrap(), "hi from t2"); 80 | assert_eq!(t2.recv().unwrap(), "hi from t1"); 81 | } 82 | } 83 | -------------------------------------------------------------------------------- /examples/itcalc.rs: -------------------------------------------------------------------------------- 1 | use ascii_table::AsciiTable; 2 | 3 | #[derive(Debug, Clone, Copy)] 4 | struct Prog { 5 | setup_iterations: usize, 6 | loop_iterations: usize, 7 | } 8 | 9 | fn real_iterations(prog: Prog, n: usize) -> usize { 10 | prog.setup_iterations + prog.loop_iterations * n 11 | } 12 | 13 | fn ceiling(n: usize, m: usize) -> usize { 14 | n / m + usize::from(n % m != 0) 15 | } 16 | 17 | enum Opt { 18 | Some(T), 19 | None, 20 | Empty, 21 | } 22 | 23 | impl core::fmt::Display for Opt { 24 | fn fmt(&self, fmt: &mut core::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> { 25 | match self { 26 | Opt::None => "-".fmt(fmt), 27 | Opt::Some(x) => x.fmt(fmt), 28 | Opt::Empty => "".fmt(fmt), 29 | } 30 | } 31 | } 32 | 33 | fn total_iterations(real_iterations: usize, rc: usize) -> Opt { 34 | let steps = ceiling(real_iterations, rc); 35 | let total_iterations = steps * rc; 36 | 37 | if real_iterations < rc { 38 | Opt::None 39 | } else { 40 | Opt::Some(total_iterations) 41 | } 42 | } 43 | fn rc_total_iterations(prog: Prog, n: usize, rc: usize) -> Opt { 44 | let real_iterations = real_iterations(prog, n); 45 | total_iterations(real_iterations, rc) 46 | } 47 | 48 | fn analyze_rcs(prog: Prog, n: usize, rcs: &[usize]) -> Vec> { 49 | let mut analysis = Vec::with_capacity(rcs.len() + 2); 50 | analysis.push(Opt::Some(n)); 51 | analysis.push(Opt::Empty); 52 | analysis.extend(rcs.iter().map(|rc| rc_total_iterations(prog, n, *rc))); 53 | analysis 54 | } 55 | 56 | fn analyze_ncs_rcs(prog: Prog, ns: &[usize], rcs: &[usize]) -> Vec>> { 57 | ns.iter().map(|n| analyze_rcs(prog, *n, rcs)).collect() 58 | } 59 | 60 | /// Produces a table of 'real Lurk iterations' proved per loop-iteration/rc combination. 61 | /// If the program has fewer real iterations than rc, no value is produced. 62 | /// Otherwise, the number of total iterations (including padding) is used. 63 | fn main() { 64 | let args = std::env::args().collect::>(); 65 | 66 | let setup_iterations: usize = args[1].parse().unwrap(); 67 | let loop_iterations: usize = args[2].parse().unwrap(); 68 | let ns = [10, 20, 30, 40, 50, 60, 100, 200]; 69 | let rcs = [100, 200, 300, 400, 500, 600, 700, 800, 900, 1000]; 70 | 71 | let prog = Prog { 72 | setup_iterations, 73 | loop_iterations, 74 | }; 75 | let analysis = analyze_ncs_rcs(prog, &ns, &rcs); 76 | let mut table = AsciiTable::default(); 77 | 78 | table.column(0).set_header("n"); 79 | table.column(1).set_header("rc"); 80 | for (i, rc) in rcs.into_iter().enumerate() { 81 | table.column(i + 2).set_header(rc.to_string()); 82 | } 83 | 84 | println!("\nSetup iterations: {setup_iterations}; Iterations per loop: {loop_iterations}."); 85 | table.print(analysis); 86 | } 87 | -------------------------------------------------------------------------------- /src/parser/position.rs: -------------------------------------------------------------------------------- 1 | use crate::parser::Span; 2 | #[cfg(not(target_arch = "wasm32"))] 3 | use proptest::prelude::*; 4 | 5 | /// Source code position of an expression in a file 6 | #[derive(Clone, Copy, Debug)] 7 | pub enum Pos { 8 | No, 9 | Pos { 10 | from_offset: usize, 11 | from_line: usize, 12 | from_column: usize, 13 | upto_offset: usize, 14 | upto_line: usize, 15 | upto_column: usize, 16 | }, 17 | } 18 | 19 | // This is so we can easily use derive(PartialEq) on datatypes like `Syntax` which contain `Pos`, 20 | // since the source position an AST node comes from doesn't effect its equality 21 | impl PartialEq for Pos { 22 | fn eq(&self, _other: &Self) -> bool { 23 | true 24 | } 25 | } 26 | 27 | impl Eq for Pos {} 28 | 29 | #[cfg(not(target_arch = "wasm32"))] 30 | impl Arbitrary for Pos { 31 | type Parameters = (); 32 | type Strategy = BoxedStrategy; 33 | 34 | fn arbitrary_with(_args: Self::Parameters) -> Self::Strategy { 35 | any::<()>().prop_map(|_| Pos::No).boxed() 36 | } 37 | } 38 | 39 | impl Pos { 40 | /// Use the range information in a Position to pretty-print that range within 41 | /// a string 42 | pub fn range( 43 | input: &str, 44 | from_line: usize, 45 | from_column: usize, 46 | upto_line: usize, 47 | upto_column: usize, 48 | ) -> String { 49 | let mut res = String::new(); 50 | let gutter = format!("{upto_line}").len(); 51 | let pad = format!("{from_line: >gutter$}").len() + 3 + from_column; 52 | res.push_str(&format!("{}▼\n", " ".to_owned().repeat(pad))); 53 | for (line_number, line) in input.lines().enumerate() { 54 | if ((line_number + 1) >= from_line) && ((line_number + 1) <= upto_line) { 55 | res.push_str(&format!( 56 | "{: >gutter$} | {}\n", 57 | line_number + 1, 58 | line, 59 | gutter = gutter 60 | )); 61 | } 62 | } 63 | let pad = format!("{upto_line: >gutter$}").len() + 3 + upto_column; 64 | res.push_str(&format!("{}▲", " ".to_owned().repeat(pad))); 65 | res 66 | } 67 | 68 | /// Construct a position from the difference of two Spans 69 | pub fn from_upto(from: Span<'_>, upto: Span<'_>) -> Self { 70 | Self::Pos { 71 | from_offset: from.location_offset(), 72 | from_line: from.location_line() as usize, 73 | from_column: from.get_utf8_column(), 74 | upto_offset: (upto.location_offset()), 75 | upto_line: upto.location_line() as usize, 76 | upto_column: upto.get_utf8_column(), 77 | } 78 | } 79 | 80 | /// Retrieves the `from_offset` attribute, if present 81 | pub fn get_from_offset(&self) -> Option { 82 | match self { 83 | Self::No => None, 84 | Self::Pos { from_offset, .. } => Some(*from_offset), 85 | } 86 | } 87 | } 88 | -------------------------------------------------------------------------------- /.github/workflows/gpu-bench-workflow-dispatch.yml: -------------------------------------------------------------------------------- 1 | # Run GPU benchmark on a local branch when manually triggered 2 | name: Manual GPU benchmarks 3 | 4 | on: 5 | workflow_dispatch: 6 | 7 | concurrency: 8 | group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} 9 | cancel-in-progress: true 10 | 11 | jobs: 12 | gpu-benchmark: 13 | name: Manual GPU bench 14 | runs-on: [self-hosted, gpu-bench] 15 | steps: 16 | - uses: actions/checkout@v4 17 | with: 18 | repository: argumentcomputer/ci-workflows 19 | - uses: ./.github/actions/gpu-setup 20 | with: 21 | gpu-framework: 'cuda' 22 | - uses: ./.github/actions/ci-env 23 | - uses: actions/checkout@v4 24 | # Install dependencies 25 | - uses: dtolnay/rust-toolchain@stable 26 | - uses: Swatinem/rust-cache@v2 27 | - uses: taiki-e/install-action@v2 28 | with: 29 | tool: just@1.22 30 | - name: Install criterion 31 | run: | 32 | cargo install cargo-criterion 33 | cargo install criterion-table 34 | - name: Set bench output format type 35 | run: echo "LURK_BENCH_OUTPUT=commit-comment" | tee -a $GITHUB_ENV 36 | - name: Run GPU bench on branch 37 | run: | 38 | just gpu-bench-ci fibonacci 39 | cp fibonacci-${{ github.sha }}.json .. 40 | working-directory: ${{ github.workspace }}/benches 41 | - name: copy the benchmark template and prepare it with data 42 | run: | 43 | cp .github/tables.toml . 44 | # Get GPU name 45 | GPU_NAME=$(nvidia-smi --query-gpu=gpu_name --format=csv,noheader,nounits | tail -n1) 46 | # Get CPU model 47 | CPU_MODEL=$(grep '^model name' /proc/cpuinfo | head -1 | awk -F ': ' '{ print $2 }') 48 | # Get vCPU count 49 | NUM_VCPUS=$(nproc --all) 50 | # Get total RAM in GB 51 | TOTAL_RAM=$(grep MemTotal /proc/meminfo | awk '{$2=$2/(1024^2); print $2, "GB RAM";}') 52 | WORKFLOW_URL="https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}" 53 | 54 | # Use conditionals to ensure that only non-empty variables are inserted 55 | [[ ! -z "$GPU_NAME" ]] && sed -i "/^\"\"\"$/i $GPU_NAME" tables.toml 56 | [[ ! -z "$CPU_MODEL" ]] && sed -i "/^\"\"\"$/i $CPU_MODEL" tables.toml 57 | [[ ! -z "$NUM_VCPUS" ]] && sed -i "/^\"\"\"$/i $NUM_VCPUS vCPUs" tables.toml 58 | [[ ! -z "$TOTAL_RAM" ]] && sed -i "/^\"\"\"$/i $TOTAL_RAM" tables.toml 59 | sed -i "/^\"\"\"$/i Workflow run: $WORKFLOW_URL" tables.toml 60 | working-directory: ${{ github.workspace }} 61 | # Create a `criterion-table` and write in commit comment 62 | - name: Run `criterion-table` 63 | run: cat fibonacci-${{ github.sha }}.json | criterion-table > BENCHMARKS.md 64 | working-directory: ${{ github.workspace }} 65 | - name: Write bench on commit comment 66 | uses: peter-evans/commit-comment@v3 67 | with: 68 | body-path: BENCHMARKS.md 69 | 70 | -------------------------------------------------------------------------------- /lurk-metrics/src/recorder.rs: -------------------------------------------------------------------------------- 1 | use metrics::{ 2 | Counter, CounterFn, Gauge, GaugeFn, Histogram, HistogramFn, Key, KeyName, Metadata, Recorder, 3 | SharedString, Unit, 4 | }; 5 | 6 | use crate::data::MetricType; 7 | use crate::ThreadMetricsSinkHandle; 8 | use std::sync::Arc; 9 | 10 | /// An implementation of the [metrics::Recorder] trait that emits metrics to a thread-local metrics 11 | /// sink. 12 | pub(crate) struct MetricsRecorder; 13 | 14 | impl Recorder for MetricsRecorder { 15 | fn describe_counter(&self, _key: KeyName, _unit: Option, _description: SharedString) {} 16 | 17 | fn describe_gauge(&self, _key: KeyName, _unit: Option, _description: SharedString) {} 18 | 19 | fn describe_histogram(&self, _key: KeyName, _unit: Option, _description: SharedString) {} 20 | 21 | fn register_counter(&self, key: &Key, _metadata: &Metadata<'_>) -> Counter { 22 | Counter::from_arc(Arc::new(CounterImpl(key.clone()))) 23 | } 24 | 25 | fn register_gauge(&self, key: &Key, _metadata: &Metadata<'_>) -> Gauge { 26 | Gauge::from_arc(Arc::new(GaugeImpl(key.clone()))) 27 | } 28 | 29 | fn register_histogram(&self, key: &Key, _metadata: &Metadata<'_>) -> Histogram { 30 | Histogram::from_arc(Arc::new(HistogramImpl(key.clone()))) 31 | } 32 | } 33 | 34 | struct CounterImpl(Key); 35 | 36 | impl CounterFn for CounterImpl { 37 | fn increment(&self, value: u64) { 38 | ThreadMetricsSinkHandle::with(|handle| handle.increment_counter(&self.0, value)) 39 | } 40 | 41 | fn absolute(&self, _value: u64) { 42 | panic!("absolute counter values are not supported"); 43 | } 44 | } 45 | 46 | struct GaugeImpl(Key); 47 | 48 | impl GaugeFn for GaugeImpl { 49 | fn increment(&self, _value: f64) { 50 | panic!("increment gauge values are not support") 51 | } 52 | 53 | fn decrement(&self, _value: f64) { 54 | panic!("decrement gauge values are not support") 55 | } 56 | 57 | fn set(&self, value: f64) { 58 | ThreadMetricsSinkHandle::with(|handle| handle.set_gauge(&self.0, value)) 59 | } 60 | } 61 | 62 | struct HistogramImpl(Key); 63 | 64 | impl HistogramFn for HistogramImpl { 65 | fn record(&self, value: f64) { 66 | ThreadMetricsSinkHandle::with(|handle| handle.increment_histogram(&self.0, value as u64)); 67 | } 68 | } 69 | 70 | impl ThreadMetricsSinkHandle { 71 | fn increment_counter(&self, key: &Key, value: u64) { 72 | self.inner 73 | .lock() 74 | .unwrap() 75 | .metrics 76 | .get_mut(MetricType::Counter, key) 77 | .increment(value); 78 | } 79 | 80 | fn increment_histogram(&self, key: &Key, value: u64) { 81 | self.inner 82 | .lock() 83 | .unwrap() 84 | .metrics 85 | .get_mut(MetricType::Histogram, key) 86 | .increment(value); 87 | } 88 | 89 | fn set_gauge(&self, key: &Key, value: f64) { 90 | self.inner 91 | .lock() 92 | .unwrap() 93 | .metrics 94 | .get_mut(MetricType::Gauge, key) 95 | .set(value); 96 | } 97 | } 98 | -------------------------------------------------------------------------------- /src/coprocessor/circom/error.rs: -------------------------------------------------------------------------------- 1 | use crate::circuit::gadgets::circom::CircomGadgetReference; 2 | use thiserror::Error; 3 | 4 | /// Enum related to error happening while dealing with the Circom Coprocessor. 5 | #[derive(Error, Debug)] 6 | pub enum CircomCoprocessorError { 7 | /// Error if we could not find the specified gadget either locally or on Github. 8 | #[error(" 9 | {prelude}: no circom gadget with reference `{reference}` found locally or on Github. Make sure that the reference 10 | is properly formatted as `/`. 11 | 12 | If you want to setup a new circom gadget `{reference}`, place your circom files in a designated folder and 13 | create a file called `{name}.circom`. The circom binary expects `<{name}_FOLDER>/{name}.circom` 14 | as the input file; in this file you must declare your circom main component. 15 | 16 | Then run `lurk coprocessor --reference {reference} <{name}_FOLDER>` to instantiate a new gadget with reference `{reference}`.")] 17 | GadgetNotFound { 18 | prelude: String, 19 | reference: CircomGadgetReference, 20 | name: String, 21 | }, 22 | /// Error if we could not find the specified gadget locally and no release version is specified 23 | /// for Github while we have checked that the desired gadget exists. 24 | #[error(" 25 | {prelude}: no circom gadget with reference `{reference}` found locally. A repository with the given reference 26 | was found: 27 | 28 | https://github.com/{reference} 29 | 30 | However, the gadget has no version specified. Please provide a correct release tag and retry.")] 31 | MissingGadgetVersion { prelude: String, reference: String }, 32 | /// Error if we wanted to initiate a remote HTTP call but encountered an error. 33 | #[error( 34 | " 35 | {prelude}: no circom gadget with reference `{reference}` found locally. We tried to look for it on 36 | Github but we encountered an error: 37 | 38 | {source} 39 | 40 | Please retry." 41 | )] 42 | RemoteCallFailure { 43 | prelude: String, 44 | reference: CircomGadgetReference, 45 | #[source] 46 | source: Box, 47 | }, 48 | /// Error if we try to create the directories for the gadget on the file system but failed. 49 | #[error( 50 | " 51 | {prelude}: we tried to create the necessary assets for the gadget on the file system but we encountered 52 | an error: 53 | 54 | {source} 55 | 56 | Please retry." 57 | )] 58 | AssetCreationFailure { 59 | prelude: String, 60 | reference: CircomGadgetReference, 61 | #[source] 62 | source: Box, 63 | }, 64 | /// Error when we got a satic file from Github but could not process it. 65 | #[error(" 66 | {prelude}: no circom gadget with reference `{reference}` found locally. We tried to download the resource 67 | at {asset_url}, but encountered an error: 68 | 69 | {source} 70 | 71 | Please make sure that the resource corresponds to a valid r1cs or wasm file and retry.")] 72 | PayloadProcessingError { 73 | prelude: String, 74 | reference: CircomGadgetReference, 75 | #[source] 76 | source: Box, 77 | asset_url: String, 78 | }, 79 | } 80 | -------------------------------------------------------------------------------- /.github/workflows/merge-tests.yml: -------------------------------------------------------------------------------- 1 | # Run final tests only when attempting to merge, shown as skipped status checks beforehand 2 | name: Merge group tests 3 | 4 | on: 5 | pull_request: 6 | types: [opened, synchronize, reopened, ready_for_review] 7 | branches: [main] 8 | merge_group: 9 | 10 | concurrency: 11 | group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} 12 | cancel-in-progress: true 13 | 14 | jobs: 15 | linux-ignored: 16 | if: github.event_name != 'pull_request' || github.event.action == 'enqueued' 17 | runs-on: warp-ubuntu-2404-x64-16x 18 | env: 19 | RUSTFLAGS: -D warnings 20 | steps: 21 | - uses: actions/checkout@v4 22 | with: 23 | repository: argumentcomputer/ci-workflows 24 | - uses: ./.github/actions/install-deps 25 | with: 26 | packages: 'pkg-config libssl-dev protobuf-compiler libprotobuf-dev' 27 | - uses: actions/checkout@v4 28 | with: 29 | submodules: recursive 30 | - uses: dtolnay/rust-toolchain@stable 31 | - uses: taiki-e/install-action@nextest 32 | - uses: Swatinem/rust-cache@v2 33 | - name: Linux Tests 34 | run: | 35 | cargo nextest run --profile ci --workspace --cargo-profile dev-ci --run-ignored ignored-only -E 'all() - test(test_demo)' 36 | 37 | linux-arm: 38 | if: github.event_name != 'pull_request' || github.event.action == 'enqueued' 39 | runs-on: warp-ubuntu-latest-arm64-16x 40 | env: 41 | RUSTFLAGS: -D warnings 42 | steps: 43 | - uses: actions/checkout@v4 44 | with: 45 | repository: argumentcomputer/ci-workflows 46 | - uses: ./.github/actions/install-deps 47 | with: 48 | packages: 'pkg-config libssl-dev protobuf-compiler libprotobuf-dev' 49 | - uses: actions/checkout@v4 50 | with: 51 | submodules: recursive 52 | - uses: dtolnay/rust-toolchain@stable 53 | - uses: taiki-e/install-action@nextest 54 | - uses: Swatinem/rust-cache@v2 55 | - name: Linux Tests 56 | run: | 57 | cargo nextest run --profile ci --workspace --cargo-profile dev-ci 58 | - name: Linux Gadget Tests w/o debug assertions 59 | run: | 60 | cargo nextest run --profile ci --workspace --cargo-profile dev-no-assertions -E 'test(circuit::gadgets)' 61 | 62 | mac-m1: 63 | if: github.event_name != 'pull_request' || github.event.action == 'enqueued' 64 | runs-on: macos-latest-xlarge 65 | steps: 66 | - uses: actions/checkout@v4 67 | with: 68 | repository: argumentcomputer/ci-workflows 69 | - name: Install dependencies 70 | run: | 71 | brew install protobuf 72 | protoc --version 73 | - uses: actions/checkout@v4 74 | with: 75 | submodules: recursive 76 | - uses: dtolnay/rust-toolchain@stable 77 | - uses: taiki-e/install-action@nextest 78 | - uses: Swatinem/rust-cache@v2 79 | - name: Linux Tests 80 | run: | 81 | cargo nextest run --profile ci --workspace --cargo-profile dev-ci 82 | - name: Linux Gadget Tests w/o debug assertions 83 | run: | 84 | cargo nextest run --profile ci --workspace --cargo-profile dev-no-assertions -E 'test(circuit::gadgets)' 85 | -------------------------------------------------------------------------------- /src/syntax_macros.rs: -------------------------------------------------------------------------------- 1 | #[macro_export] 2 | macro_rules! num { 3 | ($f:ty, $i:literal) => { 4 | $crate::syntax::Syntax::<$f>::Num(Pos::No, ($i).into()) 5 | }; 6 | ($i:literal) => { 7 | $crate::syntax::Syntax::Num(Pos::No, ($i).into()) 8 | }; 9 | ($i:expr) => { 10 | $crate::syntax::Syntax::Num(Pos::No, $i) 11 | }; 12 | } 13 | 14 | #[macro_export] 15 | macro_rules! uint { 16 | ($f:ty, $i:literal) => { 17 | $crate::syntax::Syntax::<$f>::UInt(Pos::No, $crate::uint::UInt::U64($i)) 18 | }; 19 | ($i:literal) => { 20 | $crate::syntax::Syntax::UInt(Pos::No, $crate::uint::UInt::U64($i)) 21 | }; 22 | } 23 | 24 | #[macro_export] 25 | macro_rules! str { 26 | ($f:ty, $i:literal) => { 27 | $crate::syntax::Syntax::<$f>::String(Pos::No, $i.to_string()) 28 | }; 29 | ($i:literal) => { 30 | $crate::syntax::Syntax::String(Pos::No, $i.to_string()) 31 | }; 32 | } 33 | 34 | #[macro_export] 35 | macro_rules! char { 36 | ($f:ty, $i:literal) => { 37 | $crate::syntax::Syntax::<$f>::Char(Pos::No, $i as char) 38 | }; 39 | ($i:literal) => { 40 | $crate::syntax::Syntax::Char(Pos::No, $i as char) 41 | }; 42 | } 43 | 44 | #[macro_export] 45 | macro_rules! symbol { 46 | ( [$( $x:expr ),*] ) => { 47 | { 48 | let temp_vec = vec![ $( $x.to_string() ),* ]; 49 | $crate::syntax::Syntax::Symbol(Pos::No, $crate::symbol::Symbol::sym_from_vec(temp_vec).into()) 50 | } 51 | }; 52 | ( $f:ty, [$( $x:expr ),*] ) => { 53 | { 54 | let temp_vec = vec![ $( $x.to_owned() ),* ]; 55 | $crate::syntax::Syntax::<$f>::Symbol(Pos::No, $crate::symbol::Symbol::sym_from_vec(temp_vec).into()) 56 | } 57 | }; 58 | } 59 | 60 | #[macro_export] 61 | macro_rules! keyword { 62 | ( [$( $x:expr ),*] ) => { 63 | { 64 | let temp_vec = vec![ $( $x.to_string() ),* ]; 65 | $crate::syntax::Syntax::Symbol(Pos::No, $crate::symbol::Symbol::key_from_vec(temp_vec).into()) 66 | } 67 | }; 68 | ( $f:ty, [$( $x:expr ),*] ) => { 69 | { 70 | let temp_vec = vec![ $( $x.to_owned() ),* ]; 71 | $crate::syntax::Syntax::<$f>::Path(Pos::No, $crate::symbol::Symbol::key_from_vec(temp_vec).into()) 72 | } 73 | }; 74 | } 75 | 76 | #[macro_export] 77 | macro_rules! list { 78 | ([$( $x:expr ),*], $end:expr ) => { 79 | { 80 | let temp_vec = vec![ $( $x ),* ]; 81 | $crate::syntax::Syntax::Improper(Pos::No, temp_vec, Box::new($end)) 82 | } 83 | }; 84 | ([$( $x:expr ),*] ) => { 85 | { 86 | let temp_vec = vec![ $( $x ),* ]; 87 | $crate::syntax::Syntax::List(Pos::No, temp_vec) 88 | } 89 | }; 90 | ($f:ty, [$( $x:expr ),*], $end:expr ) => { 91 | { 92 | let temp_vec = vec![ $( $x ),* ]; 93 | $crate::syntax::Syntax::<$f>::Improper(Pos::No, temp_vec, Box::new($end)) 94 | } 95 | }; 96 | ($f:ty, [$( $x:expr ),*] ) => { 97 | { 98 | let temp_vec = vec![ $( $x ),* ]; 99 | $crate::syntax::Syntax::<$f>::List(Pos::No, temp_vec) 100 | } 101 | }; 102 | } 103 | -------------------------------------------------------------------------------- /src/package.rs: -------------------------------------------------------------------------------- 1 | use anyhow::{bail, Result}; 2 | use std::{ 3 | collections::{HashMap, HashSet}, 4 | sync::Arc, 5 | }; 6 | 7 | use crate::Symbol; 8 | 9 | pub(crate) type SymbolRef = Arc; 10 | 11 | #[derive(Debug)] 12 | pub struct Package { 13 | name: SymbolRef, 14 | symbols: HashMap, 15 | names: HashMap, 16 | local: HashSet, 17 | } 18 | 19 | impl Package { 20 | #[inline] 21 | pub fn new(name: SymbolRef) -> Self { 22 | Self { 23 | name, 24 | symbols: Default::default(), 25 | names: Default::default(), 26 | local: Default::default(), 27 | } 28 | } 29 | 30 | #[inline] 31 | pub const fn name(&self) -> &SymbolRef { 32 | &self.name 33 | } 34 | 35 | #[inline] 36 | pub fn resolve(&self, symbol_name: &str) -> Option<&SymbolRef> { 37 | self.symbols.get(symbol_name) 38 | } 39 | 40 | /// Given a symbol name, returns the corresponding symbol if it's accessible 41 | /// in the package. If it's not, make it so by creating a new symbol prefixed 42 | /// by the package's name. 43 | pub fn intern>(&mut self, symbol_name: T) -> SymbolRef { 44 | let symbol_name = symbol_name.into(); 45 | self.symbols 46 | .entry(symbol_name) 47 | .or_insert_with_key(|symbol_name| { 48 | let symbol: SymbolRef = self.name.direct_child(symbol_name).into(); 49 | self.names.insert(symbol.clone(), symbol_name.clone()); 50 | self.local.insert(symbol.clone()); 51 | symbol 52 | }) 53 | .clone() 54 | } 55 | 56 | /// Tries to import a list of symbols so they become accessible in the package. 57 | /// If some symbol can't be imported due to an error (i.e. it conflicts with 58 | /// another accessible symbol), none of the symbols are effectively imported. 59 | /// In other words, importing is an atomic operation. 60 | pub fn import(&mut self, symbols: &[SymbolRef]) -> Result<()> { 61 | let mut symbols_names = Vec::with_capacity(symbols.len()); 62 | // first we look for potential errors 63 | for symbol in symbols { 64 | let symbol_name = symbol.name()?; 65 | // check conflicts with accessible symbols 66 | if let Some(symbol_resolved) = self.resolve(symbol_name) { 67 | if symbol != symbol_resolved { 68 | bail!("{symbol} conflicts with {symbol_resolved}, which is already accessible") 69 | } 70 | } 71 | // memoize the symbols' names for efficiency 72 | symbols_names.push(symbol_name); 73 | } 74 | // now we finally import as an atomic operation 75 | for (symbol, symbol_name) in symbols.iter().zip(symbols_names) { 76 | self.symbols.insert(symbol_name.to_string(), symbol.clone()); 77 | self.names.insert(symbol.clone(), symbol_name.to_string()); 78 | } 79 | Ok(()) 80 | } 81 | 82 | /// Import the local symbols of another package 83 | pub fn use_package(&mut self, package: &Package) -> Result<()> { 84 | self.import(&package.local.iter().cloned().collect::>()) 85 | } 86 | 87 | pub fn fmt_to_string(&self, symbol: &SymbolRef) -> String { 88 | match self.names.get(symbol) { 89 | None => symbol.fmt_to_string(), 90 | Some(name) => Symbol::fmt_path_component_to_string(name), 91 | } 92 | } 93 | } 94 | -------------------------------------------------------------------------------- /src/proof/tests/stream.rs: -------------------------------------------------------------------------------- 1 | use expect_test::{expect, Expect}; 2 | use halo2curves::bn256::Fr; 3 | use rayon::iter::{IntoParallelIterator, ParallelIterator}; 4 | use std::sync::Arc; 5 | 6 | use crate::{ 7 | dual_channel::{dummy_terminal, pair_terminals}, 8 | lang::{Coproc, Lang}, 9 | lem::{ 10 | eval::{evaluate_simple, resume_stream, start_stream}, 11 | pointers::Ptr, 12 | store::Store, 13 | }, 14 | proof::{supernova::SuperNovaProver, RecursiveSNARKTrait}, 15 | public_parameters::{instance::Instance, supernova_public_params}, 16 | }; 17 | 18 | const LIMIT: usize = 200; 19 | 20 | fn get_callable(callable_str: &str, store: &Store) -> Ptr { 21 | let callable = store.read_with_default_state(callable_str).unwrap(); 22 | let (io, _) = 23 | evaluate_simple::>(None, callable, store, LIMIT, &dummy_terminal()).unwrap(); 24 | io[0] 25 | } 26 | 27 | #[inline] 28 | fn expect_eq(computed: usize, expected: &Expect) { 29 | expected.assert_eq(&computed.to_string()); 30 | } 31 | 32 | #[test] 33 | fn test_continued_proof() { 34 | let callable_str = "(letrec ((add (lambda (counter x) 35 | (let ((counter (+ counter x))) 36 | (cons counter (add counter)))))) 37 | (add 0))"; 38 | let store = Arc::new(Store::::default()); 39 | let callable = get_callable(callable_str, &store); 40 | let expected_iterations = &expect!["14"]; 41 | 42 | let lang = Arc::new(Lang::>::new()); 43 | 44 | [1, 3, 5].into_par_iter().for_each(|rc| { 45 | let prover = SuperNovaProver::new(rc, lang.clone()); 46 | let instance = Instance::new_supernova(&prover, true); 47 | let pp = supernova_public_params(&instance).unwrap(); 48 | 49 | let (t1, t2) = pair_terminals(); 50 | t2.send(store.num_u64(123)).unwrap(); 51 | let frames = start_stream::>(None, callable, &store, LIMIT, &t1).unwrap(); 52 | 53 | // this input will be used to construct the public input of every proof 54 | let z0 = store.to_scalar_vector(&frames.first().unwrap().input); 55 | 56 | expect_eq(frames.len(), expected_iterations); 57 | let output = &frames.last().unwrap().output; 58 | let (result, _) = store.fetch_cons(&output[0]).unwrap(); 59 | assert_eq!(result, &store.num_u64(123)); 60 | 61 | let (proof, ..) = prover 62 | .prove_from_frames(&pp, &frames, &store, None) 63 | .unwrap(); 64 | 65 | proof 66 | .verify(&pp, &z0, &store.to_scalar_vector(output)) 67 | .unwrap(); 68 | 69 | let base_snark = proof.get_recursive(); 70 | assert!(base_snark.is_some()); 71 | 72 | // into the next stream cycle 73 | t2.send(store.intern_nil()).unwrap(); // send nil to skip stuttering 74 | t2.send(store.num_u64(321)).unwrap(); 75 | let frames = 76 | resume_stream::>(None, output.clone(), &store, LIMIT, &t1).unwrap(); 77 | 78 | expect_eq(frames.len(), expected_iterations); 79 | let output = &frames.last().unwrap().output; 80 | let (result, _) = store.fetch_cons(&output[0]).unwrap(); 81 | assert_eq!(result, &store.num_u64(444)); 82 | 83 | let (proof, ..) = prover 84 | .prove_from_frames(&pp, &frames, &store, base_snark) 85 | .unwrap(); 86 | 87 | let zi = store.to_scalar_vector(output); 88 | proof.verify(&pp, &z0, &zi).unwrap(); 89 | 90 | let proof = proof.compress(&pp).unwrap(); 91 | proof.verify(&pp, &z0, &zi).unwrap(); 92 | }); 93 | } 94 | -------------------------------------------------------------------------------- /flake.lock: -------------------------------------------------------------------------------- 1 | { 2 | "nodes": { 3 | "fenix": { 4 | "inputs": { 5 | "nixpkgs": [ 6 | "nixpkgs" 7 | ], 8 | "rust-analyzer-src": "rust-analyzer-src" 9 | }, 10 | "locked": { 11 | "lastModified": 1707978135, 12 | "narHash": "sha256-Xje6vjTcVUfPg3+X4PUSlgDxA/MSqzmtjOTW47NRwwM=", 13 | "owner": "nix-community", 14 | "repo": "fenix", 15 | "rev": "09ef6ec17141904ca28ddd62f2697f63c2aaa799", 16 | "type": "github" 17 | }, 18 | "original": { 19 | "owner": "nix-community", 20 | "repo": "fenix", 21 | "type": "github" 22 | } 23 | }, 24 | "flake-utils": { 25 | "inputs": { 26 | "systems": "systems" 27 | }, 28 | "locked": { 29 | "lastModified": 1705309234, 30 | "narHash": "sha256-uNRRNRKmJyCRC/8y1RqBkqWBLM034y4qN7EprSdmgyA=", 31 | "owner": "numtide", 32 | "repo": "flake-utils", 33 | "rev": "1ef2e671c3b0c19053962c07dbda38332dcebf26", 34 | "type": "github" 35 | }, 36 | "original": { 37 | "owner": "numtide", 38 | "repo": "flake-utils", 39 | "type": "github" 40 | } 41 | }, 42 | "naersk": { 43 | "inputs": { 44 | "nixpkgs": [ 45 | "nixpkgs" 46 | ] 47 | }, 48 | "locked": { 49 | "lastModified": 1698420672, 50 | "narHash": "sha256-/TdeHMPRjjdJub7p7+w55vyABrsJlt5QkznPYy55vKA=", 51 | "owner": "nix-community", 52 | "repo": "naersk", 53 | "rev": "aeb58d5e8faead8980a807c840232697982d47b9", 54 | "type": "github" 55 | }, 56 | "original": { 57 | "owner": "nix-community", 58 | "repo": "naersk", 59 | "type": "github" 60 | } 61 | }, 62 | "nixpkgs": { 63 | "locked": { 64 | "lastModified": 1707956935, 65 | "narHash": "sha256-ZL2TrjVsiFNKOYwYQozpbvQSwvtV/3Me7Zwhmdsfyu4=", 66 | "owner": "NixOS", 67 | "repo": "nixpkgs", 68 | "rev": "a4d4fe8c5002202493e87ec8dbc91335ff55552c", 69 | "type": "github" 70 | }, 71 | "original": { 72 | "id": "nixpkgs", 73 | "ref": "nixos-unstable", 74 | "type": "indirect" 75 | } 76 | }, 77 | "root": { 78 | "inputs": { 79 | "fenix": "fenix", 80 | "flake-utils": "flake-utils", 81 | "naersk": "naersk", 82 | "nixpkgs": "nixpkgs" 83 | } 84 | }, 85 | "rust-analyzer-src": { 86 | "flake": false, 87 | "locked": { 88 | "lastModified": 1707925466, 89 | "narHash": "sha256-2xxcezb4tvssbVCU69DnTDSMB2lqwEp63JNQt8zuzcs=", 90 | "owner": "rust-lang", 91 | "repo": "rust-analyzer", 92 | "rev": "dba59970bcccfb3c6fc16ea0d0d79da875f22316", 93 | "type": "github" 94 | }, 95 | "original": { 96 | "owner": "rust-lang", 97 | "ref": "nightly", 98 | "repo": "rust-analyzer", 99 | "type": "github" 100 | } 101 | }, 102 | "systems": { 103 | "locked": { 104 | "lastModified": 1681028828, 105 | "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", 106 | "owner": "nix-systems", 107 | "repo": "default", 108 | "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", 109 | "type": "github" 110 | }, 111 | "original": { 112 | "owner": "nix-systems", 113 | "repo": "default", 114 | "type": "github" 115 | } 116 | } 117 | }, 118 | "root": "root", 119 | "version": 7 120 | } 121 | -------------------------------------------------------------------------------- /src/coprocessor/circom/README.md: -------------------------------------------------------------------------------- 1 | # Circom Coprocessor 2 | 3 | The [Circom](https://github.com/iden3/circom) Coprocessor is a special Coprocessor that is included in our library. It 4 | allows users to use Circom-based circuits in a folding step, alongside the Lurk reduction process. This feature 5 | pushes forward the flexibility of our stack and opens it up to a myriad of developer and researchers already familiar 6 | with this particular programing language. 7 | 8 | ## Overview 9 | 10 | There are two key components to have in mind when thinking about the Circom Coprocessor: 11 | - `CircomCoprocessor`: The shim that exposes a Circom gadget as a Lurk Coprocessor. It leverages [`circom-scotia`](https://github.com/argumentcomputer/circom-scotia) 12 | to compile Circom-based circuits to [Bellpepper](https://github.com/argumentcomputer/bellpepper), thus making them usable in our stack. 13 | - `CircomGadget`: The Circom gadget proper. Refers to the r1cs and wasm files compiled from the targeted 14 | circom circuit that will be used in our proving flow. Those compiled files can live either locally or in a Github release. 15 | 16 | ## Circom Gadgets 17 | 18 | A `CircomGadget` is an interface that allows our `CircomCoprocessor` to prepare everything needed to use `circom-scotia`. 19 | It has 3 main purposes: 20 | 1. Redirect to Circom assets: The compiled r1cs and wasm files from Circom can currently live either on the local file 21 | system or on a remote Github release. We will detail later on how this works. 22 | 2. Input conversion: A defined way to take a list of Lurk input pointers and turn them into a Circom input. We do not 23 | enforce the shapes of either the Lurk end or the Circom end, so users should take care to define what shape they expect. 24 | 3. Evaluation logic: A defined way *Lurk* should evaluate what this gadget does. This is then the implementation used in 25 | the `Coprocessor` trait. 26 | 27 | ### Compiled Circom files location 28 | 29 | The r1cs and wasm files can live either directly on the local file system or on a remote Github 30 | repository. 31 | 32 | For local import of Circom circuits the Lurk CLI can be used. In short, it will directly compile the pointed Circom circuit 33 | and make the files available to the `CircomCoprocessor` if they are correctly referenced by its inner `CircomGadget`. 34 | 35 | For remote gadgets, the `CircomCoprocessor` will use the _reference_ of the `CircomGadget` to search a corresponding 36 | Github repository. In this case, there are a few constraints that needs to be followed: 37 | 1. _reference_ format: **must** be formatted as `/` as would a Github repository 38 | 2. Compiled Circom files available in a release: the compiled circom files, r1cs and wasm, **must** be made available in an release 39 | in the repository. To help fulfill this constraint, we provide [a template of a Gadget repository](https://github.com/argumentcomputer/template-circom-gadget). 40 | 3. Compiled Circom files names: The name of the compiled circom files **must** be the same as the repository (e.g.: `argumentcomputer/keccak` -> `keccak.wasm` & `keccak.r1cs`). 41 | This effectively limits the number of circuit available per repository to one. 42 | 4. _version_ specification: As we are looking for file in a specific release the `CircomGadget` **must** be specified with 43 | a correct release version. 44 | If all these constraints are passed, the `CircomCoprocessor` will import the circuit in the local file system to use it 45 | later on. 46 | 47 | In the computation flow of the `CircomCoprocessor`, it will by default look first in the local file system to check if the 48 | specified gadget already exists. This allows us to skip any cumbersome interaction with a remote host. 49 | -------------------------------------------------------------------------------- /demo/vdf.lurk: -------------------------------------------------------------------------------- 1 | ;; Hat tip to JP Aumasson. 2 | !(defrec fastexp (lambda (b e) 3 | (if (= e 0) 1 4 | (if (< (/ e 2) 0) ; is e odd? 5 | (* b (fastexp (* b b) (/ (- e 1) 2))) 6 | (fastexp (* b b) (/ e 2)))))) 7 | 8 | (fastexp 2 5) 9 | 10 | ;; (4p - 3) / 5 11 | !(def r 17510594297471420177797124596205820070838691520332827474958563349260646796493) 12 | 13 | !(def fifth-root (lambda (n) (fastexp n r))) 14 | 15 | !(def fifth (lambda (n) (fastexp n 5))) 16 | 17 | (fifth-root 42) 18 | 19 | (fifth 0x2a7b0ae43e98616d25287ce004700ac747ed8f5e95761ee4869ac0a851b3d3ee) 20 | 21 | !(def round (lambda (state) 22 | (let ((x (car state)) 23 | (y (car (cdr state))) 24 | (i (car (cdr (cdr state))))) 25 | (cons (fifth-root (+ x y)) 26 | (cons (+ x i) 27 | (cons (+ i 1) nil)))))) 28 | 29 | !(def inverse-round (lambda (state) 30 | (let ((x (car state)) 31 | (y (car (cdr state))) 32 | (i (car (cdr (cdr state)))) 33 | (new-i (- i 1)) 34 | (new-x (- y new-i)) 35 | (new-y (- (fifth x) new-x))) 36 | (cons new-x (cons new-y (cons new-i nil)))))) 37 | 38 | !(defrec minroot (lambda (state rounds) 39 | (if (= rounds 0) 40 | state 41 | (minroot (round state) (- rounds 1))))) 42 | 43 | !(defrec minroot-inverse (lambda (state rounds) 44 | (if (= rounds 0) 45 | state 46 | (minroot-inverse (inverse-round state) (- rounds 1))))) 47 | 48 | (minroot '(123 456 1) 10) 49 | 50 | (minroot-inverse '(0x1896bbc5df5819f436df3f86f6c707bdee372c1046bfa70196cf0f358497acc2 0x0ba0c60958b05f0ca1153cab833cbc315f7f10c4e1b9630ec8829cea907cb688 11) 10) 51 | 52 | !(prove) 53 | 54 | !(verify "supernova_bn256_10_2b96123c7e47229622beeef7080c005e9d18ed0cfd2c0b7f06e1d9f1cfcf83a8") 55 | 56 | !(def timelock-encrypt (lambda (secret-key plaintext rounds) 57 | (let ((ciphertext (+ secret-key plaintext)) 58 | (timelocked-key-state (minroot-inverse (cons secret-key '(0 1)) rounds))) 59 | (cons timelocked-key-state ciphertext)))) 60 | 61 | !(def timelock-decrypt (lambda (timelocked-key-state ciphertext rounds) 62 | (let ((secret-key (car (minroot timelocked-key-state rounds))) 63 | (plaintext (- ciphertext secret-key))) 64 | plaintext))) 65 | 66 | ; (timelock-encrypt (num (commit )) 10000) 67 | 68 | ; [2370068 iterations] => ((0x2b7a3b8ddd37f5729671b40f14ea588eb74e0474516503cae76114c80c3e68b3 0x39766ed0c1d5a61b0a0b5146585f01ea78bac01860ce0f8653bb098d42efcce3 0x40000000000000000000000000000000224698fc0994a8dd8c46eb20ffffd8f2) . 0x0fbc16c244caeec63f5e0316c9b36ad5eba0b1c10f7ecf5d681a911e9dfa74d0) 69 | 70 | ; (timelock-decrypt ;; timelocked key state 71 | ; '(0x2b7a3b8ddd37f5729671b40f14ea588eb74e0474516503cae76114c80c3e68b3 72 | ; 0x39766ed0c1d5a61b0a0b5146585f01ea78bac01860ce0f8653bb098d42efcce3 73 | ; 0x40000000000000000000000000000000224698fc0994a8dd8c46eb20ffffd8f2) 74 | ; ;; ciphertext 75 | ; 0x0fbc16c244caeec63f5e0316c9b36ad5eba0b1c10f7ecf5d681a911e9dfa74d0 76 | ; ;; rounds 77 | ; 10000) 78 | 79 | ;; [97420052 iterations] => 80 | -------------------------------------------------------------------------------- /src/lem/tests/nivc_steps.rs: -------------------------------------------------------------------------------- 1 | use halo2curves::bn256::Fr; 2 | 3 | use crate::{ 4 | coprocessor::test::DumbCoprocessor, 5 | dual_channel::dummy_terminal, 6 | lang::Lang, 7 | lem::{ 8 | eval::{evaluate, make_cprocs_funcs_from_lang, make_eval_step_from_config, EvalConfig}, 9 | store::Store, 10 | Tag, 11 | }, 12 | state::user_sym, 13 | tag::{ContTag, ExprTag}, 14 | }; 15 | 16 | #[test] 17 | fn test_nivc_steps() { 18 | let mut lang = Lang::>::new(); 19 | let dumb = DumbCoprocessor::new(); 20 | let name = user_sym("cproc-dumb"); 21 | 22 | let store = Store::::default(); 23 | lang.add_coprocessor(name, dumb); 24 | 25 | let lurk_step = make_eval_step_from_config(&EvalConfig::new_nivc(&lang)); 26 | let cprocs = make_cprocs_funcs_from_lang(&lang); 27 | 28 | // this `Lang` only has one coprocessor, so we should get one `Func` 29 | assert_eq!(cprocs.len(), 1); 30 | let cproc = &cprocs[0]; 31 | 32 | // 9^2 + 8 = 89 33 | let expr = store.read_with_default_state("(cproc-dumb 9 8)").unwrap(); 34 | 35 | let frames = evaluate( 36 | Some((&lurk_step, &cprocs, &lang)), 37 | expr, 38 | &store, 39 | 10, 40 | &dummy_terminal(), 41 | ) 42 | .unwrap(); 43 | 44 | // Iteration 1: evaluate first argument 45 | // Iteration 2: evaluate second argument 46 | // Iteration 3: the list of arguments is empty, so set up coprocessor call 47 | // Iteration 4: reduce with coprocessor 48 | // Iteration 5: outermost -> terminal 49 | assert_eq!(frames.len(), 5); 50 | 51 | // this computation terminates 52 | assert!(matches!( 53 | frames[4].output[2].tag(), 54 | Tag::Cont(ContTag::Terminal) 55 | )); 56 | 57 | let dt = &dummy_terminal(); 58 | 59 | // `cproc` can't reduce the first input, which is meant for `lurk_step` 60 | let first_input = &frames[0].input; 61 | assert!(cproc 62 | .call_simple(first_input, &store, &lang, 0, dt) 63 | .is_err()); 64 | 65 | // the fourth frame is the one reduced by the coprocessor 66 | let cproc_frame = &frames[3]; 67 | assert_eq!(cproc_frame.pc, 1); 68 | let mut cproc_input = cproc_frame.input.clone(); 69 | assert!(matches!(cproc_input[0].tag(), Tag::Expr(ExprTag::Cproc))); 70 | 71 | // `lurk_step` stutters on the cproc input 72 | let output = &lurk_step 73 | .call_simple(&cproc_input, &store, &lang, 0, dt) 74 | .unwrap() 75 | .output; 76 | assert_eq!(&cproc_input, output); 77 | 78 | // `cproc` *can* reduce the cproc input 79 | let output = &cproc 80 | .call_simple(&cproc_input, &store, &lang, 1, dt) 81 | .unwrap() 82 | .output; 83 | assert_ne!(&cproc_input, output); 84 | assert_eq!(output, &cproc_frame.output); 85 | 86 | // now, we set up a coprocessor call just like the previous one, except that 87 | // the coprocessor name is wrong 88 | let cont = cproc_input.pop().unwrap(); 89 | let env = cproc_input.pop().unwrap(); 90 | let expr = cproc_input.pop().unwrap(); 91 | 92 | let idx = expr.get_tuple2_idx().unwrap(); 93 | let [_, args] = store.expect_tuple2(idx); 94 | let new_name = user_sym("cproc-dumb-not"); 95 | let new_expr = store.intern_tuple2( 96 | [store.intern_symbol(&new_name), *args], 97 | Tag::Expr(ExprTag::Cproc), 98 | None, 99 | ); 100 | 101 | // `cproc` can't reduce the altered cproc input (with the wrong name) 102 | let cproc_input = vec![new_expr, env, cont]; 103 | assert!(cproc 104 | .call_simple(&cproc_input, &store, &lang, 0, dt) 105 | .is_err()); 106 | } 107 | -------------------------------------------------------------------------------- /benches/trie_nivc.rs: -------------------------------------------------------------------------------- 1 | use criterion::{ 2 | black_box, criterion_group, criterion_main, measurement, BenchmarkGroup, BenchmarkId, 3 | Criterion, SamplingMode, 4 | }; 5 | use halo2curves::bn256::Fr; 6 | use std::{sync::Arc, time::Duration}; 7 | 8 | use lurk::{ 9 | coprocessor::trie::{install, TrieCoproc}, 10 | dual_channel::dummy_terminal, 11 | lang::Lang, 12 | lem::{ 13 | eval::{evaluate, make_cprocs_funcs_from_lang, make_eval_step_from_config, EvalConfig}, 14 | interpreter::Frame, 15 | store::Store, 16 | }, 17 | proof::supernova::{public_params, SuperNovaProver}, 18 | state::State, 19 | }; 20 | 21 | const CODE: &str = " 22 | (let ((fib (letrec ((next (lambda (a b n target) 23 | (if (eq n target) 24 | a 25 | (next b 26 | (+ a b) 27 | (+ 1 n) 28 | target)))) 29 | (fib (next 0 1 0))) 30 | fib)) 31 | (fib-trie (.lurk.trie.new)) 32 | (fib-trie (.lurk.trie.insert fib-trie 40 (fib 40))) 33 | (fib-trie (.lurk.trie.insert fib-trie 50 (fib 50)))) 34 | (+ (num (.lurk.trie.lookup fib-trie 40)) (num (.lurk.trie.lookup fib-trie 50))))"; 35 | 36 | fn prove( 37 | name: &str, 38 | reduction_count: usize, 39 | lang: &Arc>>, 40 | store: &Arc>, 41 | frames: &[Frame], 42 | c: &mut BenchmarkGroup<'_, M>, 43 | ) { 44 | c.bench_with_input( 45 | BenchmarkId::new(name.to_string(), reduction_count), 46 | &reduction_count, 47 | |b, reduction_count| { 48 | let rc = *reduction_count; 49 | let prover = SuperNovaProver::new(rc, lang.clone()); 50 | let pp = public_params(rc, lang.clone()); 51 | b.iter(|| { 52 | let (proof, ..) = prover.prove_from_frames(&pp, frames, store, None).unwrap(); 53 | let _ = black_box(proof); 54 | }) 55 | }, 56 | ); 57 | } 58 | 59 | fn trie_nivc(c: &mut Criterion) { 60 | let batch_sizes = [5, 10, 100, 200]; 61 | let mut group: BenchmarkGroup<'_, _> = c.benchmark_group("trie-nivc"); 62 | group.sampling_mode(SamplingMode::Flat); // This can take a *while* 63 | group.sample_size(10); 64 | 65 | let state = State::init_lurk_state().rccell(); 66 | let mut lang = Lang::new(); 67 | install(&state, &mut lang); 68 | let lang = Arc::new(lang); 69 | 70 | let store = Arc::new(Store::::default()); 71 | let expr = store.read(state, CODE).unwrap(); 72 | 73 | let lurk_step = make_eval_step_from_config(&EvalConfig::new_nivc(&lang)); 74 | let cprocs = make_cprocs_funcs_from_lang(&lang); 75 | let frames = evaluate( 76 | Some((&lurk_step, &cprocs, &lang)), 77 | expr, 78 | &store, 79 | 1_000_000, 80 | &dummy_terminal(), 81 | ) 82 | .unwrap(); 83 | 84 | assert_eq!(frames.last().unwrap().output[0], store.num_u64(12688603180)); 85 | 86 | for size in batch_sizes { 87 | prove("rc", size, &lang, &store, &frames, &mut group); 88 | } 89 | } 90 | 91 | cfg_if::cfg_if! { 92 | if #[cfg(feature = "flamegraph")] { 93 | criterion_group! { 94 | name = benches; 95 | config = Criterion::default() 96 | .measurement_time(Duration::from_secs(120)) 97 | .sample_size(10) 98 | .with_profiler(pprof::criterion::PProfProfiler::new(100, pprof::criterion::Output::Flamegraph(None))); 99 | targets = 100 | trie_nivc, 101 | } 102 | } else { 103 | criterion_group! { 104 | name = benches; 105 | config = Criterion::default() 106 | .measurement_time(Duration::from_secs(120)) 107 | .sample_size(10); 108 | targets = 109 | trie_nivc, 110 | } 111 | } 112 | } 113 | 114 | criterion_main!(benches); 115 | -------------------------------------------------------------------------------- /benches/synthesis.rs: -------------------------------------------------------------------------------- 1 | use std::{sync::Arc, time::Duration}; 2 | 3 | use bellpepper::util_cs::witness_cs::WitnessCS; 4 | use bellpepper_core::{Circuit, ConstraintSystem}; 5 | use criterion::{ 6 | black_box, criterion_group, criterion_main, measurement, BatchSize, BenchmarkGroup, 7 | BenchmarkId, Criterion, SamplingMode, 8 | }; 9 | use halo2curves::bn256::Fr as Bn; 10 | 11 | use lurk::{ 12 | dual_channel::dummy_terminal, 13 | field::LurkField, 14 | lang::{Coproc, Lang}, 15 | lem::{eval::evaluate, multiframe::MultiFrame, pointers::Ptr, store::Store}, 16 | proof::supernova::FoldingConfig, 17 | state::{State, StateRcCell}, 18 | }; 19 | 20 | fn fib(store: &Store, state: StateRcCell, a: u64) -> Ptr { 21 | let program = format!( 22 | r#" 23 | (let ((fib (lambda (target) 24 | (letrec ((next (lambda (a b target) 25 | (if (= 0 target) 26 | a 27 | (next b 28 | (+ a b) 29 | (- target 1)))))) 30 | (next 0 1 target))))) 31 | (fib {a})) 32 | "# 33 | ); 34 | 35 | store.read(state, &program).unwrap() 36 | } 37 | 38 | fn synthesize( 39 | name: &str, 40 | reduction_count: usize, 41 | c: &mut BenchmarkGroup<'_, M>, 42 | ) { 43 | let limit = 1_000_000; 44 | let lang_rc = Arc::new(Lang::::new()); 45 | let state = State::init_lurk_state().rccell(); 46 | 47 | c.bench_with_input( 48 | BenchmarkId::new(name.to_string(), reduction_count), 49 | &reduction_count, 50 | |b, reduction_count| { 51 | let store = Arc::new(Store::default()); 52 | let fib_n = (reduction_count / 3) as u64; // Heuristic, since one fib is 35 iterations. 53 | let ptr = fib::(&store, state.clone(), black_box(fib_n)); 54 | let frames = 55 | evaluate::>(None, ptr, &store, limit, &dummy_terminal()).unwrap(); 56 | 57 | let folding_config = 58 | Arc::new(FoldingConfig::new_ivc(lang_rc.clone(), *reduction_count)); 59 | 60 | let multiframe = MultiFrame::from_frames(&frames, &store, &folding_config)[0].clone(); 61 | 62 | b.iter_batched( 63 | || (multiframe.clone()), // avoid cloning the frames in the benchmark 64 | |multiframe| { 65 | let mut cs = WitnessCS::new(); 66 | let result = multiframe.synthesize(&mut cs); 67 | let _ = black_box(result); 68 | }, 69 | BatchSize::LargeInput, 70 | ) 71 | }, 72 | ); 73 | } 74 | 75 | fn fibonacci_synthesize(c: &mut Criterion) { 76 | let batch_sizes = [5, 10, 100, 200]; 77 | let mut group: BenchmarkGroup<'_, _> = c.benchmark_group("synthesis"); 78 | group.sampling_mode(SamplingMode::Flat); // This can take a *while* 79 | group.sample_size(10); 80 | 81 | for size in batch_sizes.iter() { 82 | synthesize("Synthesis-rc", *size, &mut group); 83 | } 84 | } 85 | 86 | cfg_if::cfg_if! { 87 | if #[cfg(feature = "flamegraph")] { 88 | criterion_group! { 89 | name = benches; 90 | config = Criterion::default() 91 | .measurement_time(Duration::from_secs(120)) 92 | .sample_size(10) 93 | .with_profiler(pprof::criterion::PProfProfiler::new(100, pprof::criterion::Output::Flamegraph(None))); 94 | targets = 95 | fibonacci_synthesize 96 | } 97 | } else { 98 | criterion_group! { 99 | name = benches; 100 | config = Criterion::default() 101 | .measurement_time(Duration::from_secs(120)) 102 | .sample_size(10); 103 | targets = 104 | fibonacci_synthesize, 105 | } 106 | } 107 | } 108 | 109 | criterion_main!(benches); 110 | -------------------------------------------------------------------------------- /.github/workflows/gpu-bench-manual-comparative.yml: -------------------------------------------------------------------------------- 1 | # Run GPU benchmark on `main` and a local branch when manually triggered on the latter 2 | # Currently `main` benchmarks are uncached 3 | name: Manual GPU benchmarks compared to main 4 | 5 | on: 6 | workflow_dispatch: 7 | 8 | concurrency: 9 | group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} 10 | cancel-in-progress: true 11 | 12 | jobs: 13 | gpu-benchmark: 14 | name: Manual GPU bench comparison 15 | runs-on: [self-hosted, gpu-bench] 16 | steps: 17 | - uses: actions/checkout@v4 18 | with: 19 | repository: argumentcomputer/ci-workflows 20 | - uses: ./.github/actions/gpu-setup 21 | with: 22 | gpu-framework: 'cuda' 23 | - uses: ./.github/actions/ci-env 24 | - uses: actions/checkout@v4 25 | - uses: actions/checkout@v4 26 | with: 27 | ref: main 28 | path: main 29 | # Install dependencies 30 | - uses: dtolnay/rust-toolchain@stable 31 | - uses: Swatinem/rust-cache@v2 32 | - uses: taiki-e/install-action@v2 33 | with: 34 | tool: just@1.22 35 | - name: Install criterion 36 | run: | 37 | cargo install cargo-criterion 38 | cargo install criterion-table 39 | - name: Set bench output format type 40 | run: | 41 | echo "LURK_BENCH_OUTPUT=commit-comment" | tee -a $GITHUB_ENV 42 | echo "BASE_COMMIT=$(git rev-parse HEAD)" | tee -a $GITHUB_ENV 43 | echo "GPU_NAME=$(nvidia-smi --query-gpu=gpu_name --format=csv,noheader,nounits | tail -n1)" | tee -a $GITHUB_ENV 44 | echo "GPU_ID=$(echo ${{ env.GPU_NAME }} | awk '{ print $NF }')" | tee -a $GITHUB_ENV 45 | working-directory: ${{ github.workspace }}/main 46 | - name: Run GPU bench on base branch 47 | run: | 48 | # Copy justfile & env to main, overwriting existing config with that of PR branch 49 | cp ../benches/justfile ../benches/bench.env . 50 | just gpu-bench-ci fibonacci 51 | # Copy bench output to PR branch 52 | cp fibonacci-${{ env.BASE_COMMIT }}.json .. 53 | working-directory: ${{ github.workspace }}/main 54 | - name: Run GPU bench on PR branch 55 | run: | 56 | just gpu-bench-ci fibonacci 57 | cp fibonacci-${{ github.sha }}.json .. 58 | working-directory: ${{ github.workspace }}/benches 59 | - name: copy the benchmark template and prepare it with data 60 | run: | 61 | cp .github/tables.toml . 62 | # Get GPU name 63 | GPU_NAME=$(nvidia-smi --query-gpu=gpu_name --format=csv,noheader,nounits | tail -n1) 64 | # Get CPU model 65 | CPU_MODEL=$(grep '^model name' /proc/cpuinfo | head -1 | awk -F ': ' '{ print $2 }') 66 | # Get vCPU count 67 | NUM_VCPUS=$(nproc --all) 68 | # Get total RAM in GB 69 | TOTAL_RAM=$(grep MemTotal /proc/meminfo | awk '{$2=$2/(1024^2); print $2, "GB RAM";}') 70 | WORKFLOW_URL="https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}" 71 | 72 | # Use conditionals to ensure that only non-empty variables are inserted 73 | [[ ! -z "$GPU_NAME" ]] && sed -i "/^\"\"\"$/i $GPU_NAME" tables.toml 74 | [[ ! -z "$CPU_MODEL" ]] && sed -i "/^\"\"\"$/i $CPU_MODEL" tables.toml 75 | [[ ! -z "$NUM_VCPUS" ]] && sed -i "/^\"\"\"$/i $NUM_VCPUS vCPUs" tables.toml 76 | [[ ! -z "$TOTAL_RAM" ]] && sed -i "/^\"\"\"$/i $TOTAL_RAM" tables.toml 77 | sed -i "/^\"\"\"$/i Workflow run: $WORKFLOW_URL" tables.toml 78 | working-directory: ${{ github.workspace }} 79 | # Create a `criterion-table` and write in commit comment 80 | - name: Run `criterion-table` 81 | run: cat fibonacci-${{ env.BASE_COMMIT }}.json fibonacci-${{ github.sha }}.json | criterion-table > BENCHMARKS.md 82 | working-directory: ${{ github.workspace }} 83 | - name: Write bench on commit comment 84 | uses: peter-evans/commit-comment@v3 85 | with: 86 | body-path: BENCHMARKS.md 87 | 88 | -------------------------------------------------------------------------------- /src/lem/tests/stream.rs: -------------------------------------------------------------------------------- 1 | use expect_test::{expect, Expect}; 2 | use halo2curves::bn256::Fr; 3 | 4 | use crate::{ 5 | dual_channel::{dummy_terminal, pair_terminals}, 6 | lang::Coproc, 7 | lem::{ 8 | eval::{evaluate_simple, resume_stream_simple, start_stream_simple}, 9 | pointers::Ptr, 10 | store::Store, 11 | }, 12 | }; 13 | 14 | const LIMIT: usize = 200; 15 | 16 | fn get_callable(callable_str: &str, store: &Store) -> Ptr { 17 | let callable = store.read_with_default_state(callable_str).unwrap(); 18 | let (io, _) = 19 | evaluate_simple::>(None, callable, store, LIMIT, &dummy_terminal()).unwrap(); 20 | io[0] 21 | } 22 | 23 | #[inline] 24 | fn expect_eq(computed: usize, expected: &Expect) { 25 | expected.assert_eq(&computed.to_string()); 26 | } 27 | 28 | fn assert_start_stream( 29 | callable: Ptr, 30 | arg: Ptr, 31 | store: &Store, 32 | expected_result: &Ptr, 33 | expected_iterations: &Expect, 34 | ) -> Vec { 35 | let (t1, t2) = pair_terminals(); 36 | t2.send(arg).unwrap(); 37 | let (output, iterations) = 38 | start_stream_simple::>(None, callable, store, LIMIT, &t1).unwrap(); 39 | let (result, _) = store.fetch_cons(&output[0]).unwrap(); 40 | assert_eq!(result, expected_result); 41 | assert_eq!(output[1], store.intern_empty_env()); 42 | expect_eq(iterations, expected_iterations); 43 | output 44 | } 45 | 46 | fn assert_resume_stream( 47 | input: Vec, 48 | arg: Ptr, 49 | store: &Store, 50 | expected_result: &Ptr, 51 | expected_iterations: &Expect, 52 | ) -> Vec { 53 | let (t1, t2) = pair_terminals(); 54 | t2.send(store.intern_nil()).unwrap(); // send nil to skip stuttering 55 | t2.send(arg).unwrap(); 56 | let (output, iterations) = 57 | resume_stream_simple::>(None, input, store, LIMIT, &t1).unwrap(); 58 | let (result, _) = store.fetch_cons(&output[0]).unwrap(); 59 | assert_eq!(result, expected_result); 60 | assert_eq!(output[1], store.intern_empty_env()); 61 | expect_eq(iterations, expected_iterations); 62 | output 63 | } 64 | 65 | #[test] 66 | fn test_comm_callable() { 67 | let callable_str = "(commit (letrec ((add (lambda (counter x) 68 | (let ((counter (+ counter x))) 69 | (cons counter (commit (add counter))))))) 70 | (add 0)))"; 71 | let store = Store::::default(); 72 | let callable = get_callable(callable_str, &store); 73 | let expected_iterations = &expect!["16"]; 74 | 75 | let output = assert_start_stream( 76 | callable, 77 | store.num_u64(123), 78 | &store, 79 | &store.num_u64(123), 80 | expected_iterations, 81 | ); 82 | let output = assert_resume_stream( 83 | output, 84 | store.num_u64(321), 85 | &store, 86 | &store.num_u64(444), 87 | expected_iterations, 88 | ); 89 | assert_resume_stream( 90 | output, 91 | store.num_u64(111), 92 | &store, 93 | &store.num_u64(555), 94 | expected_iterations, 95 | ); 96 | } 97 | 98 | #[test] 99 | fn test_fun_callable() { 100 | let callable_str = "(letrec ((add (lambda (counter x) 101 | (let ((counter (+ counter x))) 102 | (cons counter (add counter)))))) 103 | (add 0))"; 104 | let store = Store::::default(); 105 | let callable = get_callable(callable_str, &store); 106 | let expected_iterations = &expect!["14"]; 107 | 108 | let output = assert_start_stream( 109 | callable, 110 | store.num_u64(123), 111 | &store, 112 | &store.num_u64(123), 113 | expected_iterations, 114 | ); 115 | let output = assert_resume_stream( 116 | output, 117 | store.num_u64(321), 118 | &store, 119 | &store.num_u64(444), 120 | expected_iterations, 121 | ); 122 | assert_resume_stream( 123 | output, 124 | store.num_u64(111), 125 | &store, 126 | &store.num_u64(555), 127 | expected_iterations, 128 | ); 129 | } 130 | -------------------------------------------------------------------------------- /benches/common/fib.rs: -------------------------------------------------------------------------------- 1 | // Without this, code is considered dead unless used in all benchmark targets 2 | #![allow(dead_code)] 3 | 4 | use lurk::{ 5 | dual_channel::dummy_terminal, 6 | field::LurkField, 7 | lang::{Coproc, Lang}, 8 | lem::{ 9 | eval::{eval_step, evaluate_simple}, 10 | pointers::Ptr, 11 | store::Store, 12 | }, 13 | state::user_sym, 14 | }; 15 | 16 | pub(crate) fn fib_expr(store: &Store) -> Ptr { 17 | let program = r#" 18 | (letrec ((next (lambda (a b) (next b (+ a b)))) 19 | (fib (next 0 1))) 20 | (fib)) 21 | "#; 22 | 23 | store.read_with_default_state(program).unwrap() 24 | } 25 | 26 | const LIN_COEF: usize = 7; 27 | const ANG_COEF: usize = 7; 28 | 29 | // The env output in the `fib_frame`th frame of the above, infinite Fibonacci computation contains a binding of the 30 | // nth Fibonacci number to `a`. 31 | pub(crate) fn fib_frame(n: usize) -> usize { 32 | LIN_COEF + ANG_COEF * n 33 | } 34 | 35 | // Set the limit so the last step will be filled exactly, since Lurk currently only pads terminal/error continuations. 36 | pub(crate) fn fib_limit(n: usize, rc: usize) -> usize { 37 | let frame = fib_frame(n); 38 | rc * (frame / rc + usize::from(frame % rc != 0)) 39 | } 40 | 41 | fn lurk_fib(store: &Store, n: usize) -> &Ptr { 42 | let frame_idx = fib_frame(n); 43 | let limit = frame_idx; 44 | let fib_expr = fib_expr(store); 45 | 46 | let (output, ..) = 47 | evaluate_simple::>(None, fib_expr, store, limit, &dummy_terminal()).unwrap(); 48 | 49 | let target_env = &output[1]; 50 | 51 | // The result is the value of the second binding (of `a`), in the target env. 52 | // See relevant excerpt of execution trace below: 53 | // 54 | // INFO lurk::lem::eval: Frame: 7 55 | // Expr: (.lurk.user.next .lurk.user.b (+ .lurk.user.a .lurk.user.b)) 56 | // Env: ((.lurk.user.b . 1) (.lurk.user.a . 0) ((.lurk.user.next . ))) 57 | // Cont: LetRec{ var: .lurk.user.fib, 58 | // saved_env: (((.lurk.user.next . ))), 59 | // body: (.lurk.user.fib), continuation: Outermost } 60 | 61 | let [_, _, rest_bindings] = store.pop_binding(target_env).unwrap(); 62 | let [_, val, _] = store.pop_binding(rest_bindings).unwrap(); 63 | val 64 | } 65 | 66 | // Returns the linear and angular coefficients for the iteration count of fib 67 | fn compute_coeffs(store: &Store) -> (usize, usize) { 68 | let mut input = vec![ 69 | fib_expr(store), 70 | store.intern_empty_env(), 71 | store.cont_outermost(), 72 | ]; 73 | let lang: Lang = Lang::new(); 74 | let mut coef_lin = 0; 75 | let coef_ang; 76 | let step_func = eval_step(); 77 | let mut iteration = 0; 78 | loop { 79 | if let Some((elts, _)) = store.fetch_list(&input[0]) { 80 | if store.fetch_symbol(&elts[0]) == Some(user_sym("next")) 81 | && store.fetch_symbol(&elts[1]) == Some(user_sym("b")) 82 | { 83 | if coef_lin == 0 { 84 | // first occurrence of `(next b ...)` 85 | coef_lin = iteration; 86 | } else { 87 | // second occurrence of `(next b ...)` 88 | coef_ang = iteration - coef_lin; 89 | break; 90 | } 91 | } 92 | } 93 | let frame = step_func 94 | .call_simple(&input, store, &lang, 0, &dummy_terminal()) 95 | .unwrap(); 96 | input.clone_from(&frame.output); 97 | iteration += 1; 98 | } 99 | (coef_lin, coef_ang) 100 | } 101 | 102 | pub(crate) fn test_coeffs() { 103 | let store = Store::::default(); 104 | assert_eq!(compute_coeffs(&store), (LIN_COEF, ANG_COEF)); 105 | } 106 | 107 | pub(crate) fn test_fib_io_matches() { 108 | let store = Store::::default(); 109 | let fib_9 = store.num_u64(34); 110 | let fib_10 = store.num_u64(55); 111 | let fib_11 = store.num_u64(89); 112 | assert_eq!(&fib_9, lurk_fib(&store, 9)); 113 | assert_eq!(&fib_10, lurk_fib(&store, 10)); 114 | assert_eq!(&fib_11, lurk_fib(&store, 11)); 115 | } 116 | -------------------------------------------------------------------------------- /src/syntax.rs: -------------------------------------------------------------------------------- 1 | use std::fmt; 2 | 3 | use crate::field::LurkField; 4 | use crate::num::Num; 5 | use crate::package::SymbolRef; 6 | use crate::parser::position::Pos; 7 | use crate::uint::UInt; 8 | 9 | #[cfg(not(target_arch = "wasm32"))] 10 | use proptest::prelude::*; 11 | 12 | /// Lurk's syntax for parsing 13 | #[derive(Clone, Debug, PartialEq, Eq)] 14 | pub enum Syntax { 15 | /// An element of the finite field `F` 16 | Num(Pos, Num), 17 | /// A u64 integer: 1u64, 0xffu64 18 | UInt(Pos, UInt), 19 | /// A hierarchical symbol: foo, foo.bar.baz or keyword :foo 20 | Symbol(Pos, SymbolRef), 21 | /// A string literal: "foobar", "foo\nbar" 22 | String(Pos, String), 23 | /// A character literal: 'A', 'λ' 24 | Char(Pos, char), 25 | /// A quoted expression: 'a, '(1 2) 26 | Quote(Pos, Box>), 27 | /// A nil-terminated cons-list of expressions: (1 2 3) 28 | List(Pos, Vec>), 29 | /// An improper cons-list of expressions: (1 2 . 3) 30 | Improper(Pos, Vec>, Box>), 31 | } 32 | 33 | impl Syntax { 34 | /// Retrieves the `Pos` attribute 35 | pub fn get_pos(&self) -> &Pos { 36 | match self { 37 | Self::Num(pos, _) 38 | | Self::UInt(pos, _) 39 | | Self::Symbol(pos, _) 40 | | Self::String(pos, _) 41 | | Self::Char(pos, _) 42 | | Self::Quote(pos, _) 43 | | Self::List(pos, _) 44 | | Self::Improper(pos, ..) => pos, 45 | } 46 | } 47 | } 48 | 49 | #[cfg(not(target_arch = "wasm32"))] 50 | impl Arbitrary for Syntax { 51 | type Parameters = (); 52 | type Strategy = BoxedStrategy; 53 | 54 | fn arbitrary_with(_args: Self::Parameters) -> Self::Strategy { 55 | use crate::Symbol; 56 | let leaf = prop_oneof![ 57 | any::>().prop_map(|x| Syntax::Num(Pos::No, x)), 58 | any::().prop_map(|x| Syntax::UInt(Pos::No, x)), 59 | any::().prop_map(|x| Syntax::Symbol(Pos::No, x.into())), 60 | any::().prop_map(|x| Syntax::String(Pos::No, x)), 61 | any::().prop_map(|x| Syntax::Char(Pos::No, x)) 62 | ]; 63 | leaf.prop_recursive(8, 256, 10, |inner| { 64 | prop_oneof![ 65 | inner 66 | .clone() 67 | .prop_map(|x| Syntax::Quote(Pos::No, Box::new(x))), 68 | prop::collection::vec(inner.clone(), 0..10).prop_map(|x| Syntax::List(Pos::No, x)), 69 | prop::collection::vec(inner, 2..12).prop_map(|mut xs| { 70 | let x = xs.pop().unwrap(); 71 | Syntax::Improper(Pos::No, xs, Box::new(x)) 72 | }) 73 | ] 74 | }) 75 | .boxed() 76 | } 77 | } 78 | 79 | impl fmt::Display for Syntax { 80 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 81 | match self { 82 | Self::Num(_, x) => write!(f, "{x}"), 83 | Self::UInt(_, x) => write!(f, "{x}u64"), 84 | Self::Symbol(_, x) => write!(f, "{x}"), 85 | Self::String(_, x) => write!(f, "\"{}\"", x.escape_default()), 86 | Self::Char(_, x) => { 87 | if *x == '(' || *x == ')' { 88 | write!(f, "'\\{x}'") 89 | } else { 90 | write!(f, "'{}'", x.escape_default()) 91 | } 92 | } 93 | Self::Quote(_, x) => write!(f, "'{x}"), 94 | Self::List(_, xs) => { 95 | let mut iter = xs.iter().peekable(); 96 | write!(f, "(")?; 97 | while let Some(x) = iter.next() { 98 | match iter.peek() { 99 | Some(_) => write!(f, "{x} ")?, 100 | None => write!(f, "{x}")?, 101 | } 102 | } 103 | write!(f, ")") 104 | } 105 | Self::Improper(_, xs, end) => { 106 | let mut iter = xs.iter().peekable(); 107 | write!(f, "(")?; 108 | while let Some(x) = iter.next() { 109 | match iter.peek() { 110 | Some(_) => write!(f, "{x} ")?, 111 | None => write!(f, "{} . {}", x, *end)?, 112 | } 113 | } 114 | write!(f, ")") 115 | } 116 | } 117 | } 118 | } 119 | -------------------------------------------------------------------------------- /.github/workflows/nightly.yml: -------------------------------------------------------------------------------- 1 | # Nightly sanity checks 2 | name: nightly 3 | 4 | on: 5 | workflow_dispatch: {} 6 | # Once per day at 00:00 UTC 7 | schedule: 8 | - cron: "0 0 * * *" 9 | 10 | jobs: 11 | linux_exhaustive: 12 | runs-on: warp-ubuntu-2404-x64-32x 13 | steps: 14 | - uses: actions/checkout@v4 15 | with: 16 | repository: argumentcomputer/ci-workflows 17 | - uses: ./.github/actions/ci-env 18 | - uses: actions/checkout@v4 19 | with: 20 | submodules: recursive 21 | - name: Install dependencies 22 | run: sudo apt-get install -y pkg-config libssl-dev 23 | - uses: dtolnay/rust-toolchain@stable 24 | - uses: taiki-e/install-action@nextest 25 | - uses: Swatinem/rust-cache@v2 26 | - name: Linux Tests 27 | id: tests 28 | run: | 29 | cargo nextest run --profile ci --workspace --cargo-profile dev-ci --run-ignored all 30 | continue-on-error: true 31 | - name: Benches build successfully 32 | id: benches 33 | run: | 34 | cargo bench --no-run --profile dev-ci 35 | continue-on-error: true 36 | - name: Linux Doc Tests 37 | id: doctests 38 | run: | 39 | cargo test --doc --workspace --profile dev-ci 40 | continue-on-error: true 41 | 42 | - name: Gather status in a single variable 43 | if: steps.tests.outcome == 'success' && steps.benches.outcome == 'success' && steps.doctests.outcome == 'success' 44 | run: echo "status=true" >> $GITHUB_ENV 45 | 46 | - name: Debug 47 | run: | 48 | echo ${{ steps.tests.outcome }} 49 | echo ${{ steps.benches.outcome }} 50 | echo ${{ steps.doctests.outcome }} 51 | echo ${{ env.status }} 52 | 53 | - name: Amend MESSAGE for tests 54 | if: steps.tests.outcome != 'success' 55 | run: echo "MESSAGE=${{ env.MESSAGE }} Exhaustive test run failed in https://github.com/argumentcomputer/lurk-beta/actions/runs/${{ github.run_id }}" >> $GITHUB_ENV 56 | 57 | - name: Amend MESSAGE for benches 58 | if: steps.benches.outcome != 'success' 59 | run: echo "MESSAGE=${{ env.MESSAGE }} Bench compilation failed in https://github.com/argumentcomputer/lurk-beta/actions/runs/${{ github.run_id }}" >> $GITHUB_ENV 60 | 61 | - name: Amend MESSAGE for doctests 62 | if: steps.doctests.outcome != 'success' 63 | run: echo "MESSAGE=${{ env.MESSAGE }} Doc test run failed in https://github.com/argumentcomputer/lurk-beta/actions/runs/${{ github.run_id }}" >> $GITHUB_ENV 64 | 65 | - name: Find the last report issue open 66 | id: last_issue 67 | uses: micalevisk/last-issue-action@v2 68 | with: 69 | state: open 70 | labels: | 71 | nightly 72 | automated issue 73 | env: 74 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 75 | 76 | - name: Close last report open issue 77 | if: env.status == 'true' && steps.last_issue.outputs.has-found == 'true' 78 | uses: peter-evans/close-issue@v3 79 | with: 80 | issue-number: ${{ steps.last_issue.outputs.issue-number }} 81 | comment: "All nightly tests succeeded" 82 | 83 | - name: Update last report open issue 84 | if: env.status != 'true' && steps.last_issue.outputs.has-found == 'true' 85 | uses: peter-evans/create-or-update-comment@v4 86 | with: 87 | issue-number: ${{ steps.last_issue.outputs.issue-number }} 88 | body: ${{ env.MESSAGE }} 89 | edit-mode: replace 90 | 91 | - name: Create file for issue 92 | if: env.status != 'true' && steps.last_issue.outputs.has-found == 'false' 93 | run: echo "${{ env.MESSAGE }}" > ./_body.md 94 | 95 | - name: Create issue from report 96 | if: env.status != 'true' && steps.last_issue.outputs.has-found == 'false' 97 | uses: peter-evans/create-issue-from-file@v5 98 | with: 99 | title: Nightly run failed 100 | content-filepath: ./_body.md 101 | labels: | 102 | nightly 103 | automated issue 104 | 105 | unused-dependencies: 106 | uses: argumentcomputer/ci-workflows/.github/workflows/unused-deps.yml@main 107 | with: 108 | packages: 'protobuf-compiler libprotobuf-dev' 109 | 110 | rust-version-check: 111 | uses: argumentcomputer/ci-workflows/.github/workflows/rust-version-check.yml@main 112 | 113 | typos: 114 | uses: argumentcomputer/ci-workflows/.github/workflows/typos.yml@main 115 | -------------------------------------------------------------------------------- /src/circuit/gadgets/circom/mod.rs: -------------------------------------------------------------------------------- 1 | //! # Usage of circom coprocessors. 2 | //! 3 | //! See `examples/keccak.rs` for a quick example of how to declare a circom coprocessor. 4 | 5 | use crate::cli::paths::circom_dir; 6 | use crate::coprocessor::circom::error::CircomCoprocessorError; 7 | use crate::{ 8 | field::LurkField, 9 | lem::{pointers::Ptr, store::Store}, 10 | }; 11 | use anyhow::{bail, Result}; 12 | use bellpepper_core::{ConstraintSystem, SynthesisError}; 13 | use camino::Utf8PathBuf; 14 | use circom_scotia::r1cs::CircomInput; 15 | use std::fmt::{Debug, Display, Formatter}; 16 | 17 | use super::pointer::AllocatedPtr; 18 | 19 | /// An interface to declare a new type of Circom gadget. 20 | /// It requires 3 things: 21 | /// 1. The user defined [`CircomGadgetReference`] of the gadget. This _must_ have a format of /. 22 | /// The reference _must_ either exist into the file system (loaded via the CLI with 23 | /// `lurk coprocessor --name `) or be a valid gadget repository following 24 | /// our standard layout. 25 | /// 2. The desired release of the gadget to use. This is only relevant when dealing with remote gadget, 26 | /// not for gadget only existing on the file system. 27 | /// 3. A defined way to take a list of Lurk input pointers and turn them into a Circom input. We do not enforce the shapes 28 | /// of either the Lurk end or the Circom end, so users should take care to define what shape they expect. 29 | /// 4. A defined way *Lurk* should evaluate what this gadget does. This is then the implementation used in the 30 | /// `Coprocessor` trait. 31 | pub trait CircomGadget: Send + Sync + Clone { 32 | fn reference(&self) -> &CircomGadgetReference; 33 | 34 | fn version(&self) -> Option<&str> { 35 | None 36 | } 37 | 38 | fn into_circom_input>( 39 | self, 40 | cs: &mut CS, 41 | g: &crate::lem::circuit::GlobalAllocator, 42 | s: &Store, 43 | not_dummy: &bellpepper_core::boolean::Boolean, 44 | input: &[AllocatedPtr], 45 | ) -> Result>, SynthesisError>; 46 | 47 | fn evaluate_simple(&self, s: &Store, args: &[Ptr]) -> Ptr; 48 | 49 | fn arity(&self) -> usize; 50 | } 51 | 52 | #[derive(Clone, Default)] 53 | pub struct CircomGadgetReference { 54 | author: String, 55 | name: String, 56 | } 57 | 58 | impl CircomGadgetReference { 59 | pub fn new(reference: &str) -> Result { 60 | let reference_split: Vec<&str> = reference.split('/').collect(); 61 | if reference_split.len() != 2 62 | || reference_split[0].is_empty() 63 | || reference_split[1].is_empty() 64 | { 65 | bail!("Expected a reference of format \"/\", got \"{reference}\""); 66 | } 67 | 68 | Ok(Self { 69 | author: reference_split[0].parse()?, 70 | name: reference_split[1].parse()?, 71 | }) 72 | } 73 | 74 | pub fn identifier(&self) -> String { 75 | format!("{}/{}", self.author, self.name) 76 | } 77 | 78 | pub fn author(&self) -> &str { 79 | &self.author 80 | } 81 | 82 | pub fn name(&self) -> &str { 83 | &self.name 84 | } 85 | 86 | pub fn confirm_or_create_local( 87 | &self, 88 | create_if_missing: bool, 89 | ) -> Result, CircomCoprocessorError> { 90 | let gadget_dir = circom_dir().join(self.identifier()); 91 | 92 | if !gadget_dir.exists() && !create_if_missing { 93 | return Ok(None); 94 | } else if !gadget_dir.exists() && create_if_missing { 95 | std::fs::create_dir_all(&gadget_dir).map_err(|err| { 96 | CircomCoprocessorError::AssetCreationFailure { 97 | prelude: String::from("error"), 98 | reference: self.clone(), 99 | source: err.into(), 100 | } 101 | })?; 102 | 103 | return Ok(None); 104 | } 105 | 106 | Ok(Some(gadget_dir)) 107 | } 108 | } 109 | 110 | impl Display for CircomGadgetReference { 111 | fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { 112 | write!(f, "{}/{}", self.author, &self.name) 113 | } 114 | } 115 | 116 | impl Debug for CircomGadgetReference { 117 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 118 | f.debug_struct("CircomGadgetReference") 119 | .field("author", &self.author) 120 | .field("name", &self.name) 121 | .finish() 122 | } 123 | } 124 | -------------------------------------------------------------------------------- /src/coprocessor/sha256.rs: -------------------------------------------------------------------------------- 1 | use bellpepper::gadgets::{multipack::pack_bits, sha256::sha256}; 2 | use bellpepper_core::{boolean::Boolean, ConstraintSystem, SynthesisError}; 3 | use lurk_macros::Coproc; 4 | use serde::{Deserialize, Serialize}; 5 | use sha2::{Digest, Sha256}; 6 | use std::marker::PhantomData; 7 | 8 | use crate::{ 9 | self as lurk, 10 | circuit::gadgets::pointer::AllocatedPtr, 11 | field::LurkField, 12 | lem::{ 13 | pointers::{Ptr, ZPtr}, 14 | store::Store, 15 | }, 16 | tag::{ExprTag, Tag}, 17 | }; 18 | 19 | use super::{CoCircuit, Coprocessor}; 20 | 21 | #[derive(Clone, Debug, Serialize, Deserialize)] 22 | pub struct Sha256Coprocessor { 23 | n: usize, 24 | pub(crate) _p: PhantomData, 25 | } 26 | 27 | fn synthesize_sha256>( 28 | cs: &mut CS, 29 | ptrs: &[AllocatedPtr], 30 | ) -> Result, SynthesisError> { 31 | let zero = Boolean::constant(false); 32 | 33 | let mut bits = vec![]; 34 | 35 | let pad_to_next_len_multiple_of_8 = |bits: &mut Vec<_>| { 36 | bits.resize((bits.len() + 7) / 8 * 8, zero.clone()); 37 | }; 38 | 39 | for ptr in ptrs { 40 | let tag_bits = ptr.tag().to_bits_le_strict(ns!(cs, "preimage_tag_bits"))?; 41 | let hash_bits = ptr 42 | .hash() 43 | .to_bits_le_strict(ns!(cs, "preimage_hash_bits"))?; 44 | 45 | bits.extend(tag_bits); 46 | pad_to_next_len_multiple_of_8(&mut bits); 47 | bits.extend(hash_bits); 48 | pad_to_next_len_multiple_of_8(&mut bits); 49 | } 50 | 51 | bits.reverse(); 52 | 53 | let mut digest_bits = sha256(cs.namespace(|| "digest_bits"), &bits)?; 54 | 55 | digest_bits.reverse(); 56 | 57 | // Fine to lose the last <1 bit of precision. 58 | let digest_scalar = pack_bits(cs.namespace(|| "digest_scalar"), &digest_bits)?; 59 | AllocatedPtr::alloc_tag( 60 | ns!(cs, "output_expr"), 61 | ExprTag::Num.to_field(), 62 | digest_scalar, 63 | ) 64 | } 65 | 66 | fn compute_sha256(n: usize, z_ptrs: &[ZPtr]) -> F { 67 | let mut hasher = Sha256::new(); 68 | 69 | let mut input = vec![0u8; 64 * n]; 70 | 71 | for (i, z_ptr) in z_ptrs.iter().enumerate() { 72 | let tag_zptr: F = z_ptr.tag().to_field(); 73 | let hash_zptr = z_ptr.hash(); 74 | input[(64 * i)..(64 * i + 32)].copy_from_slice(&tag_zptr.to_bytes()); 75 | input[(64 * i + 32)..(64 * (i + 1))].copy_from_slice(&hash_zptr.to_bytes()); 76 | } 77 | 78 | input.reverse(); 79 | 80 | hasher.update(input); 81 | let mut bytes = hasher.finalize(); 82 | 83 | // The pack_bits gadget used by the synthesize_sha256 function 84 | // sets the n most significant bits of the hash output to zero, 85 | // where n is 256 minus the field's capacity. We do the same 86 | // here to match the output. 87 | discard_bits::(&mut bytes); 88 | bytes.reverse(); 89 | F::from_bytes(&bytes).unwrap() 90 | } 91 | 92 | impl CoCircuit for Sha256Coprocessor { 93 | fn arity(&self) -> usize { 94 | self.n 95 | } 96 | 97 | #[inline] 98 | fn synthesize_simple>( 99 | &self, 100 | cs: &mut CS, 101 | _g: &lurk::lem::circuit::GlobalAllocator, 102 | _s: &lurk::lem::store::Store, 103 | _not_dummy: &Boolean, 104 | args: &[AllocatedPtr], 105 | ) -> Result, SynthesisError> { 106 | synthesize_sha256(cs, args) 107 | } 108 | } 109 | 110 | impl Coprocessor for Sha256Coprocessor { 111 | fn has_circuit(&self) -> bool { 112 | true 113 | } 114 | 115 | fn evaluate_simple(&self, s: &Store, args: &[Ptr]) -> Ptr { 116 | let z_ptrs = args.iter().map(|ptr| s.hash_ptr(ptr)).collect::>(); 117 | s.num(compute_sha256(self.n, &z_ptrs)) 118 | } 119 | } 120 | 121 | impl Sha256Coprocessor { 122 | pub fn new(n: usize) -> Self { 123 | Self { 124 | n, 125 | _p: Default::default(), 126 | } 127 | } 128 | } 129 | 130 | // Retains the Scalar::CAPACITY last bits of a big-endian input 131 | fn discard_bits(bytes: &mut [u8]) { 132 | let bits_to_zero = 256 - Scalar::CAPACITY as usize; 133 | let full_bytes_to_zero = bits_to_zero / 8; 134 | let partial_bits_to_zero = bits_to_zero % 8; 135 | 136 | bytes[..full_bytes_to_zero].iter_mut().for_each(|b| *b = 0); 137 | 138 | if partial_bits_to_zero > 0 { 139 | let mask = 0xFF >> partial_bits_to_zero; 140 | bytes[full_bytes_to_zero] &= mask; 141 | } 142 | } 143 | 144 | #[derive(Clone, Debug, Coproc, Serialize, Deserialize)] 145 | pub enum Sha256Coproc { 146 | SC(Sha256Coprocessor), 147 | } 148 | -------------------------------------------------------------------------------- /chain-server/src/lib.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Result; 2 | use nova::supernova::snark::CompressedSNARK; 3 | use serde::{Deserialize, Serialize}; 4 | use std::collections::HashMap; 5 | 6 | use lurk::{ 7 | cli::{field_data::HasFieldModulus, zstore::ZDag}, 8 | field::LurkField, 9 | lem::{ 10 | pointers::{Ptr, ZPtr}, 11 | store::Store, 12 | }, 13 | proof::{ 14 | nova::{CurveCycleEquipped, E1}, 15 | supernova::{SS1, SS2}, 16 | }, 17 | }; 18 | 19 | #[derive(Serialize, Deserialize)] 20 | pub struct ConfigResponseData { 21 | rc: usize, 22 | callable: ZPtr, 23 | stream_init_callable: Option>, 24 | z_dag: ZDag, 25 | } 26 | 27 | impl ConfigResponseData { 28 | pub fn new( 29 | rc: usize, 30 | callable: &Ptr, 31 | stream_init_callable: Option<&Ptr>, 32 | store: &Store, 33 | ) -> Self { 34 | let mut z_dag = ZDag::default(); 35 | let cache = &mut HashMap::default(); 36 | let callable = z_dag.populate_with(callable, store, cache); 37 | let stream_init_callable = 38 | stream_init_callable.map(|x| z_dag.populate_with(x, store, cache)); 39 | Self { 40 | rc, 41 | callable, 42 | stream_init_callable, 43 | z_dag, 44 | } 45 | } 46 | 47 | pub fn interned(&self, store: &Store) -> Result<(Ptr, Option)> { 48 | let cache = &mut HashMap::default(); 49 | let callable = self.z_dag.populate_store(&self.callable, store, cache)?; 50 | let stream_init_callable = if let Some(z_ptr) = &self.stream_init_callable { 51 | Some(self.z_dag.populate_store(z_ptr, store, cache)?) 52 | } else { 53 | None 54 | }; 55 | Ok((callable, stream_init_callable)) 56 | } 57 | 58 | #[inline] 59 | pub fn get_rc(&self) -> usize { 60 | self.rc 61 | } 62 | } 63 | 64 | impl HasFieldModulus for ConfigResponseData { 65 | fn field_modulus() -> String { 66 | F::MODULUS.to_string() 67 | } 68 | } 69 | 70 | #[derive(Serialize, Deserialize)] 71 | pub struct ChainRequestData { 72 | callable: ZPtr, 73 | argument: ZPtr, 74 | z_dag: ZDag, 75 | } 76 | 77 | impl ChainRequestData { 78 | pub fn new(callable: &Ptr, argument: &Ptr, store: &Store) -> Self { 79 | let cache = &mut HashMap::default(); 80 | let mut z_dag = ZDag::default(); 81 | let callable = z_dag.populate_with(callable, store, cache); 82 | let argument = z_dag.populate_with(argument, store, cache); 83 | Self { 84 | callable, 85 | argument, 86 | z_dag, 87 | } 88 | } 89 | 90 | pub fn interned(&self, store: &Store) -> Result<(Ptr, Ptr)> { 91 | let cache = &mut HashMap::default(); 92 | let callable = self.z_dag.populate_store(&self.callable, store, cache)?; 93 | let argument = self.z_dag.populate_store(&self.argument, store, cache)?; 94 | Ok((callable, argument)) 95 | } 96 | } 97 | 98 | impl HasFieldModulus for ChainRequestData { 99 | fn field_modulus() -> String { 100 | F::MODULUS.to_string() 101 | } 102 | } 103 | 104 | #[derive(Serialize, Deserialize)] 105 | pub struct ChainResponseData { 106 | result: ZPtr, 107 | next_callable: ZPtr, 108 | z_dag: ZDag, 109 | proof: CompressedSNARK, SS1, SS2>, 110 | } 111 | 112 | impl ChainResponseData { 113 | pub fn new( 114 | result: &Ptr, 115 | next_callable: &Ptr, 116 | store: &Store, 117 | proof: CompressedSNARK, SS1, SS2>, 118 | ) -> Self { 119 | let cache = &mut HashMap::default(); 120 | let mut z_dag = ZDag::default(); 121 | let result = z_dag.populate_with(result, store, cache); 122 | let next_callable = z_dag.populate_with(next_callable, store, cache); 123 | Self { 124 | result, 125 | next_callable, 126 | z_dag, 127 | proof, 128 | } 129 | } 130 | 131 | pub fn interned(&self, store: &Store) -> Result<(Ptr, Ptr)> { 132 | let cache = &mut HashMap::default(); 133 | let result = self.z_dag.populate_store(&self.result, store, cache)?; 134 | let next_callable = self 135 | .z_dag 136 | .populate_store(&self.next_callable, store, cache)?; 137 | Ok((result, next_callable)) 138 | } 139 | 140 | #[inline] 141 | pub fn get_proof(&self) -> &CompressedSNARK, SS1, SS2> { 142 | &self.proof 143 | } 144 | } 145 | 146 | impl HasFieldModulus for ChainResponseData { 147 | fn field_modulus() -> String { 148 | F::MODULUS.to_string() 149 | } 150 | } 151 | -------------------------------------------------------------------------------- /chain-server/src/client.rs: -------------------------------------------------------------------------------- 1 | //! A demo client for illustrative purposes 2 | 3 | use halo2curves::bn256::Fr; 4 | use nova::supernova::snark::CompressedSNARK; 5 | use rustyline::{error::ReadlineError, DefaultEditor}; 6 | use std::{ 7 | env, 8 | io::{stdout, Write}, 9 | sync::Arc, 10 | }; 11 | use tonic::Request; 12 | 13 | use lurk::{ 14 | cli::field_data::{de, ser}, 15 | lang::{Coproc, Lang}, 16 | lem::store::Store, 17 | proof::{ 18 | nova::{Dual, E1}, 19 | supernova::{PublicParams, SS1, SS2}, 20 | }, 21 | public_parameters::{ 22 | instance::{Instance, Kind}, 23 | supernova_public_params, 24 | }, 25 | }; 26 | 27 | pub mod chain_prover { 28 | #![allow(unreachable_pub)] 29 | #![allow(clippy::derive_partial_eq_without_eq)] 30 | tonic::include_proto!("chain_prover"); 31 | } 32 | 33 | use chain_prover::{ 34 | chain_prover_client::ChainProverClient, ChainRequest, ChainResponse, ConfigRequest, 35 | ConfigResponse, 36 | }; 37 | 38 | use chain_server::{ChainRequestData, ChainResponseData, ConfigResponseData}; 39 | 40 | fn verify( 41 | proof: &CompressedSNARK, SS1, SS2>, 42 | pp: &PublicParams, 43 | z0_primary: &[Fr], 44 | zi_primary: &[Fr], 45 | ) -> Result> { 46 | let z0_secondary = vec![Dual::::zero()]; 47 | let zi_secondary = &z0_secondary; 48 | let (zi_primary_verified, zi_secondary_verified) = 49 | proof.verify(&pp.pp, pp.vk(), z0_primary, &z0_secondary)?; 50 | Ok(zi_primary == zi_primary_verified && zi_secondary == &zi_secondary_verified) 51 | } 52 | 53 | #[tokio::main] 54 | async fn main() -> Result<(), Box> { 55 | let port = env::args().collect::>()[1].parse::()?; 56 | let mut client = ChainProverClient::connect(format!("http://127.0.0.1:{port}")).await?; 57 | 58 | let ConfigResponse { 59 | config_response_data, 60 | } = client 61 | .config(Request::new(ConfigRequest {})) 62 | .await? 63 | .into_inner(); 64 | let config_response_data = de::>(&config_response_data)?; 65 | let rc = config_response_data.get_rc(); 66 | 67 | let store = Store::::default(); 68 | 69 | let (mut callable, stream_init_callable) = config_response_data.interned(&store)?; 70 | 71 | let empty_env = store.intern_empty_env(); 72 | 73 | let (cont_in, cont_out) = if stream_init_callable.is_some() { 74 | (store.cont_stream_start(), store.cont_stream_pause()) 75 | } else { 76 | (store.cont_outermost(), store.cont_terminal()) 77 | }; 78 | 79 | let instance = Instance::new( 80 | rc, 81 | Arc::new(Lang::>::new()), 82 | true, 83 | Kind::SuperNovaAuxParams, 84 | ); 85 | let pp = supernova_public_params(&instance)?; 86 | 87 | let mut editor = DefaultEditor::new()?; 88 | let mut stdout = stdout(); 89 | loop { 90 | match editor.readline("> ") { 91 | Ok(input) => { 92 | let argument = store.read_with_default_state(&input)?; 93 | let chain_request_data = ser(ChainRequestData::new(&callable, &argument, &store))?; 94 | let request = Request::new(ChainRequest { chain_request_data }); 95 | 96 | let ChainResponse { 97 | chain_response_data, 98 | } = client.chain(request).await?.into_inner(); 99 | let chain_response_data = de::>(&chain_response_data)?; 100 | let (result, next_callable) = chain_response_data.interned(&store)?; 101 | let proof = chain_response_data.get_proof(); 102 | 103 | let expr_in = 104 | stream_init_callable.unwrap_or_else(|| store.list([callable, argument])); 105 | 106 | let expr_out = store.cons(result, next_callable); 107 | 108 | print!( 109 | "{}\n↳ {}", 110 | expr_in.fmt_to_string_simple(&store), 111 | expr_out.fmt_to_string_simple(&store) 112 | ); 113 | stdout.flush()?; 114 | 115 | let public_inputs = store.to_scalar_vector(&[expr_in, empty_env, cont_in]); 116 | let public_outputs = store.to_scalar_vector(&[expr_out, empty_env, cont_out]); 117 | if verify(proof, &pp, &public_inputs, &public_outputs)? { 118 | println!(" ✓"); 119 | } else { 120 | println!(" ✗\nServer's proof didn't verify!"); 121 | } 122 | 123 | callable = next_callable 124 | } 125 | Err(ReadlineError::Interrupted | ReadlineError::Eof) => { 126 | println!("Exiting..."); 127 | break; 128 | } 129 | Err(e) => { 130 | eprintln!("Read line error: {e}"); 131 | break; 132 | } 133 | } 134 | } 135 | Ok(()) 136 | } 137 | -------------------------------------------------------------------------------- /examples/sha256_ivc.rs: -------------------------------------------------------------------------------- 1 | use halo2curves::bn256::Fr as Bn; 2 | use std::{sync::Arc, time::Instant}; 3 | use tracing_subscriber::{fmt, prelude::*, EnvFilter, Registry}; 4 | use tracing_texray::TeXRayLayer; 5 | 6 | use lurk::{ 7 | coprocessor::sha256::{Sha256Coproc, Sha256Coprocessor}, 8 | dual_channel::dummy_terminal, 9 | field::LurkField, 10 | lang::Lang, 11 | lem::{pointers::Ptr, store::Store}, 12 | proof::{nova::NovaProver, Prover, RecursiveSNARKTrait}, 13 | public_parameters::{ 14 | instance::{Instance, Kind}, 15 | public_params, 16 | }, 17 | state::user_sym, 18 | }; 19 | 20 | const REDUCTION_COUNT: usize = 10; 21 | 22 | fn sha256_ivc(store: &Store, n: usize, input: &[usize]) -> Ptr { 23 | assert_eq!(n, input.len()); 24 | let input = input 25 | .iter() 26 | .map(|i| i.to_string()) 27 | .collect::>() 28 | .join(" "); 29 | let input = format!("'({input})"); 30 | let program = format!( 31 | r#" 32 | (letrec ((encode-1 (lambda (term) 33 | (let ((type (car term)) 34 | (value (cdr term))) 35 | (if (eq 'sha256 type) 36 | (eval (cons 'sha256_ivc_{n} value)) 37 | (if (eq 'lurk type) 38 | (commit value) 39 | (if (eq 'id type) 40 | value)))))) 41 | (encode (lambda (input) 42 | (if input 43 | (cons 44 | (encode-1 (car input)) 45 | (encode (cdr input))))))) 46 | (encode '((sha256 . {input})))) 47 | "# 48 | ); 49 | 50 | store.read_with_default_state(&program).unwrap() 51 | } 52 | 53 | /// Run the example in this file with 54 | /// `cargo run --release --example sha256_ivc ` 55 | /// where `n` is the needed arity 56 | fn main() { 57 | let subscriber = Registry::default() 58 | .with(fmt::layer().pretty()) 59 | .with(EnvFilter::from_default_env()) 60 | .with(TeXRayLayer::new()); 61 | tracing::subscriber::set_global_default(subscriber).unwrap(); 62 | 63 | let args = std::env::args().collect::>(); 64 | let n = args.get(1).unwrap_or(&"1".into()).parse().unwrap(); 65 | 66 | let store = Arc::new(Store::default()); 67 | let cproc_sym = user_sym(&format!("sha256_ivc_{n}")); 68 | 69 | let call = sha256_ivc(&*store, n, &(0..n).collect::>()); 70 | 71 | let mut lang = Lang::>::new(); 72 | lang.add_coprocessor(cproc_sym, Sha256Coprocessor::new(n)); 73 | let lang_rc = Arc::new(lang.clone()); 74 | 75 | let nova_prover = NovaProver::>::new(REDUCTION_COUNT, lang_rc.clone()); 76 | 77 | println!("Setting up public parameters (rc = {REDUCTION_COUNT})..."); 78 | 79 | let pp_start = Instant::now(); 80 | let instance = Instance::new(REDUCTION_COUNT, lang_rc, true, Kind::NovaPublicParams); 81 | // see the documentation on `public_params` 82 | let pp = public_params(&instance).unwrap(); 83 | let pp_end = pp_start.elapsed(); 84 | println!("Public parameters took {:?}", pp_end); 85 | 86 | println!("Beginning proof step..."); 87 | let proof_start = Instant::now(); 88 | let (proof, z0, zi, _num_steps) = tracing_texray::examine(tracing::info_span!("bang!")) 89 | .in_scope(|| { 90 | nova_prover 91 | .evaluate_and_prove( 92 | &pp, 93 | call, 94 | store.intern_empty_env(), 95 | &store, 96 | 10000, 97 | &dummy_terminal(), 98 | ) 99 | .unwrap() 100 | }); 101 | let proof_end = proof_start.elapsed(); 102 | 103 | println!("Proofs took {:?}", proof_end); 104 | 105 | println!("Verifying proof..."); 106 | 107 | let verify_start = Instant::now(); 108 | assert!(proof.verify(&pp, &z0, &zi).unwrap()); 109 | let verify_end = verify_start.elapsed(); 110 | 111 | println!("Verify took {:?}", verify_end); 112 | 113 | println!("Compressing proof.."); 114 | let compress_start = Instant::now(); 115 | let compressed_proof = proof.compress(&pp).unwrap(); 116 | let compress_end = compress_start.elapsed(); 117 | 118 | println!("Compression took {:?}", compress_end); 119 | 120 | let buf = bincode::serialize(&compressed_proof).unwrap(); 121 | println!("proof size : {:}B", buf.len()); 122 | 123 | let compressed_verify_start = Instant::now(); 124 | let res = compressed_proof.verify(&pp, &z0, &zi).unwrap(); 125 | let compressed_verify_end = compressed_verify_start.elapsed(); 126 | 127 | println!("Final verification took {:?}", compressed_verify_end); 128 | 129 | if res { 130 | println!( 131 | "Congratulations! You proved, verified, compressed, and verified (again!) an IVC SHA256 hash calculation in {:?} time!", 132 | verify_end + proof_end + verify_end + compress_end 133 | ); 134 | } 135 | } 136 | -------------------------------------------------------------------------------- /src/parser/base.rs: -------------------------------------------------------------------------------- 1 | use std::{borrow::ToOwned, string::String, vec::Vec}; 2 | 3 | use crate::field::LurkField; 4 | use base_x; 5 | use nom::{ 6 | branch::alt, 7 | bytes::complete::{tag, take_till}, 8 | character::complete::satisfy, 9 | combinator::value, 10 | error::context, 11 | InputTakeAtPosition, 12 | }; 13 | 14 | use crate::parser::{ 15 | error::{map_parse_err, ParseError, ParseErrorKind}, 16 | ParseResult, Span, 17 | }; 18 | 19 | #[derive(PartialEq, Eq, Clone, Copy, Debug)] 20 | pub enum LitBase { 21 | Bin, 22 | Oct, 23 | Dec, 24 | Hex, 25 | } 26 | 27 | impl Default for LitBase { 28 | fn default() -> Self { 29 | Self::Hex 30 | } 31 | } 32 | 33 | impl LitBase { 34 | pub fn parse_code(i: Span<'_>) -> ParseResult<'_, F, Self> { 35 | alt(( 36 | value(Self::Bin, tag("b")), 37 | value(Self::Oct, tag("o")), 38 | value(Self::Dec, tag("d")), 39 | value(Self::Hex, tag("x")), 40 | ))(i) 41 | } 42 | 43 | /// Get the code corresponding to the base algorithm. 44 | pub fn code(&self) -> char { 45 | match self { 46 | Self::Bin => 'b', 47 | Self::Oct => 'o', 48 | Self::Dec => 'd', 49 | Self::Hex => 'x', 50 | } 51 | } 52 | 53 | pub fn base_digits(&self) -> &str { 54 | match self { 55 | Self::Bin => "01", 56 | Self::Oct => "01234567", 57 | Self::Dec => "0123456789", 58 | Self::Hex => "0123456789abcdef", 59 | } 60 | } 61 | 62 | pub fn radix(&self) -> u32 { 63 | match self { 64 | Self::Bin => 2, 65 | Self::Oct => 8, 66 | Self::Dec => 10, 67 | Self::Hex => 16, 68 | } 69 | } 70 | 71 | pub fn is_digit(&self, x: char) -> bool { 72 | let x = x.to_ascii_lowercase(); 73 | self.base_digits().chars().any(|y| x == y) 74 | } 75 | 76 | pub fn encode>(&self, input: I) -> String { 77 | base_x::encode(self.base_digits(), input.as_ref()) 78 | } 79 | 80 | pub fn decode<'a, F: LurkField>(&self, input: Span<'a>) -> ParseResult<'a, F, Vec> { 81 | let (i, o) = input.split_at_position_complete(|x| !self.is_digit(x))?; 82 | match base_x::decode(self.base_digits(), o.fragment()) { 83 | Ok(bytes) => Ok((i, bytes)), 84 | Err(_) => Err(nom::Err::Error(ParseError::new( 85 | i, 86 | ParseErrorKind::InvalidBaseEncoding(*self), 87 | ))), 88 | } 89 | } 90 | } 91 | 92 | macro_rules! define_parse_digits { 93 | ($name:ident, $base:ident, $digit_str:expr, $digits_str:expr, $map_fn:expr) => { 94 | pub fn $name() -> impl Fn(Span<'_>) -> ParseResult<'_, F, String> { 95 | move |from: Span<'_>| { 96 | let (i, d) = context($digit_str, satisfy(|x| LitBase::$base.is_digit(x)))(from)?; 97 | let (i, ds) = context( 98 | $digits_str, 99 | take_till(|x| !(LitBase::$base.is_digit(x) || x == '_')), 100 | )(i)?; 101 | let ds: String = core::iter::once(d) 102 | .chain((*ds.fragment()).to_owned().chars()) 103 | .filter(|x| *x != '_') 104 | .map($map_fn) 105 | .collect(); 106 | Ok((i, ds)) 107 | } 108 | } 109 | }; 110 | } 111 | 112 | define_parse_digits!( 113 | parse_bin_digits, 114 | Bin, 115 | "binary digit", 116 | "binary digits", 117 | |x| x 118 | ); 119 | define_parse_digits!(parse_oct_digits, Oct, "octal digit", "octal digits", |x| x); 120 | define_parse_digits!( 121 | parse_dec_digits, 122 | Dec, 123 | "decimal digit", 124 | "decimal digits", 125 | |x| x 126 | ); 127 | define_parse_digits!( 128 | parse_hex_digits, 129 | Hex, 130 | "hexadecimal digit", 131 | "hexadecimal digits", 132 | |x| x.to_ascii_lowercase() 133 | ); 134 | 135 | pub fn parse_litbase_code() -> impl Fn(Span<'_>) -> ParseResult<'_, F, LitBase> { 136 | move |from: Span<'_>| { 137 | map_parse_err( 138 | alt(( 139 | value(LitBase::Bin, tag("b")), 140 | value(LitBase::Oct, tag("o")), 141 | value(LitBase::Dec, tag("d")), 142 | value(LitBase::Hex, tag("x")), 143 | ))(from), 144 | |_| ParseError::new(from, ParseErrorKind::UnknownBaseCode), 145 | ) 146 | } 147 | } 148 | 149 | #[allow(clippy::type_complexity)] 150 | pub fn parse_litbase_digits( 151 | base: LitBase, 152 | ) -> Box) -> ParseResult<'_, F, String>> { 153 | Box::new(move |from: Span<'_>| match base { 154 | LitBase::Bin => parse_bin_digits()(from), 155 | LitBase::Oct => parse_oct_digits()(from), 156 | LitBase::Dec => parse_dec_digits()(from), 157 | LitBase::Hex => parse_hex_digits()(from), 158 | }) 159 | } 160 | -------------------------------------------------------------------------------- /examples/sha256_nivc.rs: -------------------------------------------------------------------------------- 1 | use halo2curves::bn256::Fr as Bn; 2 | use std::{sync::Arc, time::Instant}; 3 | use tracing_subscriber::{fmt, prelude::*, EnvFilter, Registry}; 4 | use tracing_texray::TeXRayLayer; 5 | 6 | use lurk::{ 7 | coprocessor::sha256::{Sha256Coproc, Sha256Coprocessor}, 8 | dual_channel::dummy_terminal, 9 | field::LurkField, 10 | lang::Lang, 11 | lem::{ 12 | eval::{evaluate, make_cprocs_funcs_from_lang, make_eval_step_from_config, EvalConfig}, 13 | pointers::Ptr, 14 | store::Store, 15 | }, 16 | proof::{supernova::SuperNovaProver, RecursiveSNARKTrait}, 17 | public_parameters::{ 18 | instance::{Instance, Kind}, 19 | supernova_public_params, 20 | }, 21 | state::user_sym, 22 | }; 23 | 24 | const REDUCTION_COUNT: usize = 10; 25 | 26 | fn sha256_nivc(store: &Store, n: usize, input: &[usize]) -> Ptr { 27 | assert_eq!(n, input.len()); 28 | let input = input 29 | .iter() 30 | .map(|i| i.to_string()) 31 | .collect::>() 32 | .join(" "); 33 | let input = format!("({})", input); 34 | let program = format!( 35 | r#" 36 | (letrec ((encode-1 (lambda (term) 37 | (let ((type (car term)) 38 | (value (cdr term))) 39 | (if (eq 'sha256 type) 40 | (eval (cons 'sha256_nivc_{n} value)) 41 | (if (eq 'lurk type) 42 | (commit value) 43 | (if (eq 'id type) 44 | value)))))) 45 | (encode (lambda (input) 46 | (if input 47 | (cons 48 | (encode-1 (car input)) 49 | (encode (cdr input))))))) 50 | (encode '((sha256 . {input})))) 51 | "# 52 | ); 53 | 54 | store.read_with_default_state(&program).unwrap() 55 | } 56 | 57 | /// Run the example in this file with 58 | /// `cargo run --release --example sha256_nivc ` 59 | /// where `n` is the needed arity (default is 1) 60 | fn main() { 61 | let subscriber = Registry::default() 62 | .with(fmt::layer().pretty()) 63 | .with(EnvFilter::from_default_env()) 64 | .with(TeXRayLayer::new()); 65 | tracing::subscriber::set_global_default(subscriber).unwrap(); 66 | 67 | let args = std::env::args().collect::>(); 68 | let n = args.get(1).unwrap_or(&"1".into()).parse().unwrap(); 69 | 70 | let store = Arc::new(Store::default()); 71 | let cproc_sym = user_sym(&format!("sha256_nivc_{n}")); 72 | 73 | let call = sha256_nivc(&*store, n, &(0..n).collect::>()); 74 | 75 | let mut lang = Lang::>::new(); 76 | lang.add_coprocessor(cproc_sym, Sha256Coprocessor::new(n)); 77 | let lang_rc = Arc::new(lang.clone()); 78 | 79 | let lurk_step = make_eval_step_from_config(&EvalConfig::new_nivc(&lang)); 80 | let cprocs = make_cprocs_funcs_from_lang(&lang); 81 | let frames = evaluate( 82 | Some((&lurk_step, &cprocs, &lang)), 83 | call, 84 | &*store, 85 | 1000, 86 | &dummy_terminal(), 87 | ) 88 | .unwrap(); 89 | 90 | let supernova_prover = 91 | SuperNovaProver::>::new(REDUCTION_COUNT, lang_rc.clone()); 92 | 93 | println!("Setting up running claim parameters (rc = {REDUCTION_COUNT})..."); 94 | let pp_start = Instant::now(); 95 | 96 | let instance_primary = Instance::new(REDUCTION_COUNT, lang_rc, true, Kind::SuperNovaAuxParams); 97 | let pp = supernova_public_params(&instance_primary).unwrap(); 98 | 99 | let pp_end = pp_start.elapsed(); 100 | println!("Running claim parameters took {:?}", pp_end); 101 | 102 | println!("Beginning proof step..."); 103 | let proof_start = Instant::now(); 104 | let (proof, z0, zi, _num_steps) = tracing_texray::examine(tracing::info_span!("bang!")) 105 | .in_scope(|| { 106 | supernova_prover 107 | .prove_from_frames(&pp, &frames, &store, None) 108 | .unwrap() 109 | }); 110 | let proof_end = proof_start.elapsed(); 111 | 112 | println!("Proofs took {:?}", proof_end); 113 | 114 | println!("Verifying proof..."); 115 | 116 | let verify_start = Instant::now(); 117 | assert!(proof.verify(&pp, &z0, &zi).unwrap()); 118 | let verify_end = verify_start.elapsed(); 119 | 120 | println!("Verify took {:?}", verify_end); 121 | 122 | println!("Compressing proof.."); 123 | let compress_start = Instant::now(); 124 | let compressed_proof = proof.compress(&pp).unwrap(); 125 | let compress_end = compress_start.elapsed(); 126 | 127 | println!("Compression took {:?}", compress_end); 128 | 129 | let buf = bincode::serialize(&compressed_proof).unwrap(); 130 | println!("proof size : {:}B", buf.len()); 131 | 132 | let compressed_verify_start = Instant::now(); 133 | let res = compressed_proof.verify(&pp, &z0, &zi).unwrap(); 134 | let compressed_verify_end = compressed_verify_start.elapsed(); 135 | 136 | println!("Final verification took {:?}", compressed_verify_end); 137 | 138 | if res { 139 | println!( 140 | "Congratulations! You proved, verified, compressed, and verified (again!) an NIVC SHA256 hash calculation in {:?} time!", 141 | verify_end + proof_end + verify_end + compress_end 142 | ); 143 | } 144 | } 145 | -------------------------------------------------------------------------------- /src/z_data/z_ptr.rs: -------------------------------------------------------------------------------- 1 | use anyhow::anyhow; 2 | use base32ct::{Base32Unpadded, Encoding}; 3 | #[cfg(not(target_arch = "wasm32"))] 4 | use lurk_macros::serde_test; 5 | #[cfg(not(target_arch = "wasm32"))] 6 | use proptest_derive::Arbitrary; 7 | use serde::{Deserialize, Serialize}; 8 | use std::fmt; 9 | use std::fmt::{Display, Formatter}; 10 | use std::hash::Hash; 11 | 12 | #[cfg(not(target_arch = "wasm32"))] 13 | use proptest::prelude::*; 14 | 15 | #[cfg(not(target_arch = "wasm32"))] 16 | use crate::field::FWrap; 17 | use crate::field::LurkField; 18 | use crate::tag::{ContTag, ExprTag, Tag}; 19 | 20 | #[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] 21 | #[cfg_attr(not(target_arch = "wasm32"), derive(Arbitrary))] 22 | #[cfg_attr( 23 | not(target_arch = "wasm32"), 24 | serde_test( 25 | types(ExprTag, halo2curves::bn256::Fr), 26 | types(ContTag, halo2curves::bn256::Fr), 27 | zdata(true) 28 | ) 29 | )] 30 | // Note: the trait bound E: Tag is not necessary in the struct, but it makes the proptest strategy more efficient. 31 | /// A struct representing a scalar pointer with a tag and a value. 32 | /// 33 | /// The `ZPtr` struct is used to store a tagged scalar pointer, where `E` is its tag, and `F` the field for its values. 34 | /// It has two important aliases, `ZExprPtr` and `ZContPtr`, which are used respectively with `ExprTag` and `ContTag`, 35 | /// i.e. the type of expressions and the type of continuations. 36 | pub struct ZPtr( 37 | pub E, 38 | #[cfg_attr( 39 | not(target_arch = "wasm32"), 40 | proptest(strategy = "any::>().prop_map(|x| x.0)") 41 | )] 42 | pub F, 43 | ); 44 | 45 | impl Display for ZPtr { 46 | fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { 47 | let tag = self.0; 48 | let trimmed_f = self.1.trimmed_hex_digits(); 49 | write!(f, "(ptr->{tag}, {trimmed_f})",) 50 | } 51 | } 52 | 53 | impl PartialOrd for ZPtr { 54 | fn partial_cmp(&self, other: &Self) -> Option { 55 | Some(self.cmp(other)) 56 | } 57 | } 58 | 59 | impl Ord for ZPtr { 60 | fn cmp(&self, other: &Self) -> core::cmp::Ordering { 61 | ( 62 | self.0.to_field_bytes::().as_ref(), 63 | self.1.to_repr().as_ref(), 64 | ) 65 | .cmp(&( 66 | other.0.to_field_bytes::().as_ref(), 67 | other.1.to_repr().as_ref(), 68 | )) 69 | } 70 | } 71 | 72 | #[allow(clippy::derived_hash_with_manual_eq)] 73 | impl Hash for ZPtr { 74 | fn hash(&self, state: &mut H) { 75 | self.0.to_field_bytes::().as_ref().hash(state); 76 | self.1.to_repr().as_ref().hash(state); 77 | } 78 | } 79 | 80 | impl ZPtr { 81 | /// Creates a ZPtr from a tag and a value 82 | pub fn from_parts(tag: E, value: F) -> Self { 83 | ZPtr(tag, value) 84 | } 85 | 86 | /// Returns the tag 87 | pub fn tag(&self) -> &E { 88 | &self.0 89 | } 90 | 91 | /// Returns the tag in field representation 92 | pub fn tag_field(&self) -> F { 93 | self.0.to_field::() 94 | } 95 | 96 | /// Returns the value 97 | pub fn value(&self) -> &F { 98 | &self.1 99 | } 100 | 101 | pub fn parts(&self) -> (F, F) { 102 | (self.tag_field(), self.1) 103 | } 104 | 105 | // TODO: Create a permanent format for ZPtr strings/ZIDs 106 | /// Converts the ZPtr to a base32-encoded string 107 | pub fn to_base32(&self) -> String { 108 | let tag_b32 = Base32Unpadded::encode_string(&self.0.into().to_le_bytes()); 109 | let val_b32 = Base32Unpadded::encode_string(self.1.to_repr().as_ref()); 110 | format!("{tag_b32}z{val_b32}") 111 | } 112 | 113 | /// Converts a base32-encoded string to a ZPtr 114 | pub fn from_base32(zptr: &str) -> Result { 115 | let tag_bytes = Base32Unpadded::decode_vec(&zptr[0..4]) 116 | .map_err(|e| anyhow!(format!("Failed to decode base32: {}", e)))?; 117 | let val_bytes = Base32Unpadded::decode_vec(&zptr[5..]) 118 | .map_err(|e| anyhow!(format!("Failed to decode base32: {}", e)))?; 119 | let tag = E::try_from(u16::from_le_bytes(tag_bytes[..2].try_into().unwrap())) 120 | .map_err(|e| anyhow!(format!("Failed to decode tag: {}", e)))?; 121 | let val = F::from_bytes(&val_bytes).ok_or_else(|| anyhow!("Failed to decode field"))?; 122 | Ok(Self::from_parts(tag, val)) 123 | } 124 | } 125 | 126 | /// Alias for an expression pointer 127 | pub type ZExprPtr = ZPtr; 128 | 129 | /// Alias for a continuation pointer 130 | pub type ZContPtr = ZPtr; 131 | 132 | #[cfg(test)] 133 | mod tests { 134 | use super::*; 135 | use halo2curves::bn256::Fr as Scalar; 136 | 137 | proptest! { 138 | #[test] 139 | fn prop_base32_z_expr_ptr(x in any::>()) { 140 | assert_eq!(x, ZPtr::from_base32(&x.to_base32()).unwrap()); 141 | } 142 | } 143 | 144 | #[test] 145 | fn unit_base32_z_expr_ptr() { 146 | let zptr = ZExprPtr::from_parts(ExprTag::Nil, Scalar::zero()); 147 | assert_eq!(zptr, ZPtr::from_base32(&zptr.to_base32()).unwrap()); 148 | } 149 | } 150 | -------------------------------------------------------------------------------- /src/z_data/z_store.rs: -------------------------------------------------------------------------------- 1 | #[cfg(not(target_arch = "wasm32"))] 2 | use lurk_macros::serde_test; 3 | #[cfg(not(target_arch = "wasm32"))] 4 | use proptest::prelude::*; 5 | #[cfg(not(target_arch = "wasm32"))] 6 | use proptest_derive::Arbitrary; 7 | use serde::{Deserialize, Serialize}; 8 | 9 | use std::collections::BTreeMap; 10 | 11 | use crate::hash::PoseidonCache; 12 | use crate::symbol::Symbol; 13 | use crate::tag::ExprTag; 14 | use crate::uint::UInt; 15 | use crate::z_cont::ZCont; 16 | use crate::z_expr::ZExpr; 17 | use crate::z_ptr::ZContPtr; 18 | use crate::z_ptr::ZExprPtr; 19 | use crate::z_ptr::ZPtr; 20 | 21 | use crate::field::LurkField; 22 | 23 | #[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone, Default)] 24 | #[cfg_attr(not(target_arch = "wasm32"), derive(Arbitrary))] 25 | #[cfg_attr(not(target_arch = "wasm32"), proptest(no_bound))] 26 | #[cfg_attr( 27 | not(target_arch = "wasm32"), 28 | serde_test(types(halo2curves::bn256::Fr), zdata(true)) 29 | )] 30 | /// A `ZStore` is a content-addressed, serializable representation of a Lurk store 31 | /// 32 | /// Whereas a `Store` contains caches of each type of Lurk data, a `ZStore` 33 | /// contains a generic map of pointers to expressions and a map of pointers to 34 | /// continuations that can each be retrieved by traversing their `ZPtr` DAG 35 | pub struct ZStore { 36 | pub expr_map: BTreeMap, Option>>, 37 | pub cont_map: BTreeMap, Option>>, 38 | } 39 | 40 | impl ZStore { 41 | /// Creates a new, empty `ZStore` 42 | pub fn new() -> Self { 43 | ZStore { 44 | expr_map: BTreeMap::new(), 45 | cont_map: BTreeMap::new(), 46 | } 47 | } 48 | 49 | /// Returns the `ZExpr` immediately corresponding to the `ZExprPtr`, where "immediate" means 50 | /// that the `ZExprPtr`'s field element contains the literal value associated with the tag, 51 | /// so we can return the value without needing to retrieve it from the ZStore. 52 | /// 53 | /// E.g. in `ZExprPtr { ExprTag::U64, F::zero() }`, the `F::zero()` is the field representation 54 | /// of the number 0, displayed as `0x000000<...>`. Because we know the value's type is `ExprTag::U64`, 55 | /// we can infer that this pointer refers to a `ZExpr::UInt(UInt::U64(0u64)))` and return it. 56 | pub fn immediate_z_expr(ptr: &ZExprPtr) -> Option> { 57 | match ptr { 58 | ZPtr(ExprTag::U64, val) => { 59 | let x = F::to_u64(val)?; 60 | Some(ZExpr::UInt(UInt::U64(x))) 61 | } 62 | ZPtr(ExprTag::Char, val) => { 63 | let x = F::to_char(val)?; 64 | Some(ZExpr::Char(x)) 65 | } 66 | ZPtr(ExprTag::Num, val) => Some(ZExpr::Num(*val)), 67 | ZPtr(ExprTag::Str, val) if *val == F::ZERO => Some(ZExpr::EmptyStr), 68 | ZPtr(ExprTag::Sym, val) if *val == F::ZERO => Some(ZExpr::RootSym), 69 | ZPtr(ExprTag::Key, val) if *val == F::ZERO => Some(ZExpr::RootSym), 70 | _ => None, 71 | } 72 | } 73 | 74 | /// Returns the owned `ZExpr` corresponding to `ptr` if the former exists 75 | pub fn get_expr(&self, ptr: &ZExprPtr) -> Option> { 76 | ZStore::immediate_z_expr(ptr).or_else(|| self.expr_map.get(ptr).cloned()?) 77 | } 78 | 79 | /// If the entry is not present, or the pointer is immediate, returns `None`, 80 | /// otherwise updates the value in the `ZStore` and returns the old value. 81 | pub fn insert_z_expr( 82 | &mut self, 83 | ptr: &ZExprPtr, 84 | expr: Option>, 85 | ) -> Option>> { 86 | if ZStore::immediate_z_expr(ptr).is_some() { 87 | None 88 | } else { 89 | self.expr_map.insert(*ptr, expr) 90 | } 91 | } 92 | 93 | /// Returns the owned `ZCont` corresponding to `ptr` if the former exists 94 | pub fn get_cont(&self, ptr: &ZContPtr) -> Option> { 95 | self.cont_map.get(ptr).cloned()? 96 | } 97 | 98 | /// Stores a null symbol in the `ZStore` and returns the resulting pointer 99 | pub fn nil_z_ptr(&mut self, poseidon_cache: &PoseidonCache) -> ZExprPtr { 100 | let z_ptr = self 101 | .put_symbol(&crate::state::lurk_sym("nil"), poseidon_cache) 102 | .0; 103 | ZPtr(ExprTag::Nil, z_ptr.1) 104 | } 105 | 106 | /// Stores a string in the `ZStore` and returns the resulting pointer and `ZExpr` 107 | pub fn put_string( 108 | &mut self, 109 | string: &str, 110 | poseidon_cache: &PoseidonCache, 111 | ) -> (ZExprPtr, ZExpr) { 112 | let mut expr = ZExpr::EmptyStr; 113 | let mut ptr = expr.z_ptr(poseidon_cache); 114 | for c in string.chars().rev() { 115 | expr = ZExpr::Str(ZPtr(ExprTag::Char, F::from_char(c)), ptr); 116 | ptr = expr.z_ptr(poseidon_cache); 117 | } 118 | self.insert_z_expr(&ptr, Some(expr.clone())); 119 | (ptr, expr) 120 | } 121 | 122 | /// Stores a symbol in the `ZStore` and returns the resulting pointer and `ZExpr` 123 | pub fn put_symbol( 124 | &mut self, 125 | sym: &Symbol, 126 | poseidon_cache: &PoseidonCache, 127 | ) -> (ZExprPtr, ZExpr) { 128 | let mut expr = ZExpr::RootSym; 129 | let mut ptr = expr.z_ptr(poseidon_cache); 130 | for s in sym.path() { 131 | let (str_ptr, _) = self.put_string(s, poseidon_cache); 132 | expr = ZExpr::Sym(str_ptr, ptr); 133 | ptr = expr.z_ptr(poseidon_cache); 134 | } 135 | self.insert_z_expr(&ptr, Some(expr.clone())); 136 | (ptr, expr) 137 | } 138 | } 139 | -------------------------------------------------------------------------------- /lurk-metrics/src/data.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashMap; 2 | use std::fmt::{self, Display, Formatter}; 3 | 4 | use metrics::Key; 5 | use tracing::info; 6 | 7 | pub const METRICS_TARGET_NAME: &str = "lurk::metrics"; 8 | 9 | /// A map of metrics data 10 | #[derive(Debug, Default)] 11 | pub(crate) struct Metrics(HashMap); 12 | 13 | impl Metrics { 14 | /// Get a mutable reference to a metric, creating it if it doesn't already exist in the map 15 | pub(crate) fn get_mut(&mut self, typ: MetricType, key: &Key) -> &mut Metric { 16 | if !self.0.contains_key(key) { 17 | self.0.insert(key.clone(), Metric::new(typ)); 18 | } 19 | self.0.get_mut(key).unwrap() 20 | } 21 | 22 | /// Aggregate another [Metrics] into this one 23 | pub(crate) fn aggregate(&mut self, other: Metrics) { 24 | for (key, data) in other.0 { 25 | match self.0.get_mut(&key) { 26 | Some(me) => { 27 | me.aggregate(data); 28 | } 29 | None => { 30 | self.0.insert(key, data); 31 | } 32 | } 33 | } 34 | } 35 | 36 | /// Emit this [Metrics] object 37 | pub(crate) fn emit(self) { 38 | let mut keys = self.0.keys().collect::>(); 39 | keys.sort(); 40 | for key in keys { 41 | let metric = self.0.get(key).unwrap(); 42 | let labels = if key.labels().len() == 0 { 43 | String::new() 44 | } else { 45 | format!( 46 | "[{}]", 47 | key.labels() 48 | .map(|label| format!("{}={}", label.key(), label.value())) 49 | .collect::>() 50 | .join(",") 51 | ) 52 | }; 53 | info!( 54 | target: METRICS_TARGET_NAME, 55 | "{}{}: {}", 56 | key.name(), 57 | labels, 58 | metric, 59 | ); 60 | } 61 | } 62 | 63 | #[cfg(test)] 64 | pub(crate) fn iter(&self) -> impl Iterator { 65 | self.0.iter() 66 | } 67 | } 68 | 69 | #[derive(Debug, Clone, Copy)] 70 | pub(crate) enum MetricType { 71 | Counter, 72 | Gauge, 73 | Histogram, 74 | } 75 | 76 | #[derive(Debug)] 77 | pub(crate) enum Metric { 78 | Counter(ValueAndCount), 79 | Gauge(ValueAndCount), 80 | // Fixed scaling configuration for histograms, tuned for 81 | // microsecond-scale latency timers. It saturates at 60 seconds. 82 | Histogram(hdrhistogram::Histogram), 83 | } 84 | 85 | impl Metric { 86 | fn new(typ: MetricType) -> Self { 87 | match typ { 88 | MetricType::Counter => Metric::Counter(Default::default()), 89 | MetricType::Gauge => Metric::Gauge(Default::default()), 90 | MetricType::Histogram => Metric::Histogram( 91 | hdrhistogram::Histogram::new_with_bounds(1, 60 * 1000 * 1000, 2).unwrap(), 92 | ), 93 | } 94 | } 95 | 96 | pub(crate) fn increment(&mut self, value: u64) { 97 | match self { 98 | Metric::Counter(inner) => { 99 | inner.sum += value; 100 | inner.n += 1; 101 | } 102 | Metric::Gauge(_inner) => { 103 | panic!("increment gauge values are not supported"); 104 | } 105 | Metric::Histogram(inner) => { 106 | inner.saturating_record(value); 107 | } 108 | } 109 | } 110 | 111 | pub(crate) fn set(&mut self, value: f64) { 112 | match self { 113 | Metric::Counter(_inner) => panic!("set counter values are not supported"), 114 | Metric::Gauge(inner) => { 115 | inner.sum = value; 116 | inner.n = 1; 117 | } 118 | Metric::Histogram(_inner) => panic!("set histogram values are not supported"), 119 | } 120 | } 121 | 122 | fn aggregate(&mut self, other: Metric) { 123 | match (self, other) { 124 | (Metric::Counter(me), Metric::Counter(other)) => { 125 | me.sum += other.sum; 126 | me.n += other.n; 127 | } 128 | (Metric::Gauge(me), Metric::Gauge(other)) => { 129 | me.sum += other.sum; 130 | me.n += other.n; 131 | } 132 | (Metric::Histogram(me), Metric::Histogram(other)) => { 133 | me.add(other).unwrap(); 134 | } 135 | _ => debug_assert!(false, "can't aggregate different types"), 136 | } 137 | } 138 | } 139 | 140 | impl Display for Metric { 141 | fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { 142 | match self { 143 | Metric::Counter(inner) => { 144 | if inner.sum == inner.n { 145 | f.write_fmt(format_args!("{}", inner.sum)) 146 | } else { 147 | f.write_fmt(format_args!("{} (n={})", inner.sum, inner.n)) 148 | } 149 | } 150 | Metric::Gauge(inner) => f.write_fmt(format_args!("{} (n={})", inner.sum, inner.n)), 151 | Metric::Histogram(inner) => f.write_fmt(format_args!( 152 | "n={}: min={} p10={} p50={} avg={:.2} p90={} p99={} p99.9={} max={}", 153 | inner.len(), 154 | inner.min(), 155 | inner.value_at_quantile(0.1), 156 | inner.value_at_quantile(0.5), 157 | inner.mean(), 158 | inner.value_at_quantile(0.9), 159 | inner.value_at_quantile(0.99), 160 | inner.value_at_quantile(0.999), 161 | inner.max(), 162 | )), 163 | } 164 | } 165 | } 166 | 167 | #[derive(Debug, Default)] 168 | pub(crate) struct ValueAndCount { 169 | pub(crate) sum: T, 170 | pub(crate) n: u64, 171 | } 172 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | 2 | # Contributor Covenant Code of Conduct 3 | 4 | ## Our Pledge 5 | 6 | We as members, contributors, and leaders pledge to make participation in our 7 | community a harassment-free experience for everyone, regardless of age, body 8 | size, visible or invisible disability, ethnicity, sex characteristics, gender 9 | identity and expression, level of experience, education, socio-economic status, 10 | nationality, personal appearance, race, caste, color, religion, or sexual 11 | identity and orientation. 12 | 13 | We pledge to act and interact in ways that contribute to an open, welcoming, 14 | diverse, inclusive, and healthy community. 15 | 16 | ## Our Standards 17 | 18 | Examples of behavior that contributes to a positive environment for our 19 | community include: 20 | 21 | * Demonstrating empathy and kindness toward other people 22 | * Being respectful of differing opinions, viewpoints, and experiences 23 | * Giving and gracefully accepting constructive feedback 24 | * Accepting responsibility and apologizing to those affected by our mistakes, 25 | and learning from the experience 26 | * Focusing on what is best not just for us as individuals, but for the overall 27 | community 28 | 29 | Examples of unacceptable behavior include: 30 | 31 | * The use of sexualized language or imagery, and sexual attention or advances of 32 | any kind 33 | * Trolling, insulting or derogatory comments, and personal or political attacks 34 | * Public or private harassment 35 | * Publishing others' private information, such as a physical or email address, 36 | without their explicit permission 37 | * Other conduct which could reasonably be considered inappropriate in a 38 | professional setting 39 | 40 | ## Enforcement Responsibilities 41 | 42 | Community leaders are responsible for clarifying and enforcing our standards of 43 | acceptable behavior and will take appropriate and fair corrective action in 44 | response to any behavior that they deem inappropriate, threatening, offensive, 45 | or harmful. 46 | 47 | Community leaders have the right and responsibility to remove, edit, or reject 48 | comments, commits, code, wiki edits, issues, and other contributions that are 49 | not aligned to this Code of Conduct, and will communicate reasons for moderation 50 | decisions when appropriate. 51 | 52 | ## Scope 53 | 54 | This Code of Conduct applies within all community spaces, and also applies when 55 | an individual is officially representing the community in public spaces. 56 | Examples of representing our community include using an official e-mail address, 57 | posting via an official social media account, or acting as an appointed 58 | representative at an online or offline event. 59 | 60 | ## Enforcement 61 | 62 | Instances of abusive, harassing, or otherwise unacceptable behavior may be 63 | reported to the community leaders responsible for enforcement at community@argument.xyz. 64 | All complaints will be reviewed and investigated promptly and fairly. 65 | 66 | All community leaders are obligated to respect the privacy and security of the 67 | reporter of any incident. 68 | 69 | ## Enforcement Guidelines 70 | 71 | Community leaders will follow these Community Impact Guidelines in determining 72 | the consequences for any action they deem in violation of this Code of Conduct: 73 | 74 | ### 1. Correction 75 | 76 | **Community Impact**: Use of inappropriate language or other behavior deemed 77 | unprofessional or unwelcome in the community. 78 | 79 | **Consequence**: A private, written warning from community leaders, providing 80 | clarity around the nature of the violation and an explanation of why the 81 | behavior was inappropriate. A public apology may be requested. 82 | 83 | ### 2. Warning 84 | 85 | **Community Impact**: A violation through a single incident or series of 86 | actions. 87 | 88 | **Consequence**: A warning with consequences for continued behavior. No 89 | interaction with the people involved, including unsolicited interaction with 90 | those enforcing the Code of Conduct, for a specified period of time. This 91 | includes avoiding interactions in community spaces as well as external channels 92 | like social media. Violating these terms may lead to a temporary or permanent 93 | ban. 94 | 95 | ### 3. Temporary Ban 96 | 97 | **Community Impact**: A serious violation of community standards, including 98 | sustained inappropriate behavior. 99 | 100 | **Consequence**: A temporary ban from any sort of interaction or public 101 | communication with the community for a specified period of time. No public or 102 | private interaction with the people involved, including unsolicited interaction 103 | with those enforcing the Code of Conduct, is allowed during this period. 104 | Violating these terms may lead to a permanent ban. 105 | 106 | ### 4. Permanent Ban 107 | 108 | **Community Impact**: Demonstrating a pattern of violation of community 109 | standards, including sustained inappropriate behavior, harassment of an 110 | individual, or aggression toward or disparagement of classes of individuals. 111 | 112 | **Consequence**: A permanent ban from any sort of public interaction within the 113 | community. 114 | 115 | ## Attribution 116 | 117 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], 118 | version 2.1, available at 119 | [https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1]. 120 | 121 | Community Impact Guidelines were inspired by 122 | [Mozilla's code of conduct enforcement ladder][Mozilla CoC]. 123 | 124 | For answers to common questions about this code of conduct, see the FAQ at 125 | [https://www.contributor-covenant.org/faq][FAQ]. Translations are available at 126 | [https://www.contributor-covenant.org/translations][translations]. 127 | 128 | [homepage]: https://www.contributor-covenant.org 129 | [v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html 130 | [Mozilla CoC]: https://github.com/mozilla/diversity 131 | [FAQ]: https://www.contributor-covenant.org/faq 132 | [translations]: https://www.contributor-covenant.org/translations 133 | -------------------------------------------------------------------------------- /src/lem/pointers.rs: -------------------------------------------------------------------------------- 1 | use match_opt::match_opt; 2 | use serde::{Deserialize, Serialize}; 3 | 4 | use crate::{ 5 | field::{FWrap, LurkField}, 6 | tag::{ 7 | ExprTag::{Cons, Fun, Nil, Num, Str, Sym}, 8 | Tag as TagTrait, 9 | }, 10 | }; 11 | 12 | use super::Tag; 13 | 14 | /// An ergonomic pair type for tagged pointer semantics 15 | #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, Serialize, Deserialize)] 16 | pub struct GPtr { 17 | pub tag: T, 18 | pub val: V, 19 | } 20 | 21 | impl GPtr { 22 | #[inline] 23 | pub fn new(tag: T, val: V) -> Self { 24 | Self { tag, val } 25 | } 26 | 27 | #[inline] 28 | pub fn tag(&self) -> &T { 29 | &self.tag 30 | } 31 | 32 | #[inline] 33 | pub fn val(&self) -> &V { 34 | &self.val 35 | } 36 | 37 | #[inline] 38 | pub fn parts(&self) -> (&T, &V) { 39 | let Self { tag, val } = self; 40 | (tag, val) 41 | } 42 | 43 | #[inline] 44 | pub fn into_parts(self) -> (T, V) { 45 | let Self { tag, val } = self; 46 | (tag, val) 47 | } 48 | } 49 | 50 | /// Encoding for pointer children that are stored in index-based data structures 51 | #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] 52 | pub enum IVal { 53 | /// Holds the index of leaf data 54 | Atom(usize), 55 | /// Holds the index of two children 56 | Tuple2(usize), 57 | /// Holds the index of three children 58 | Tuple3(usize), 59 | /// Holds the index of four children 60 | Tuple4(usize), 61 | /// Similar to `Tuple3`, but ignores the tags of the first and third children 62 | /// for content-addressing 63 | Compact(usize), 64 | } 65 | 66 | impl IVal { 67 | #[inline] 68 | pub fn is_atom(&self) -> bool { 69 | matches!(self, IVal::Atom(_)) 70 | } 71 | 72 | #[inline] 73 | pub fn is_compound(&self) -> bool { 74 | !self.is_atom() 75 | } 76 | 77 | #[inline] 78 | pub fn get_atom_idx(&self) -> Option { 79 | match_opt!(self, IVal::Atom(idx) => *idx) 80 | } 81 | 82 | #[inline] 83 | pub fn get_tuple2_idx(&self) -> Option { 84 | match_opt!(self, IVal::Tuple2(idx) => *idx) 85 | } 86 | 87 | #[inline] 88 | pub fn get_tuple3_idx(&self) -> Option { 89 | match_opt!(self, IVal::Tuple3(idx) => *idx) 90 | } 91 | 92 | #[inline] 93 | pub fn get_tuple4_idx(&self) -> Option { 94 | match_opt!(self, IVal::Tuple4(idx) => *idx) 95 | } 96 | 97 | #[inline] 98 | pub fn get_compact_idx(&self) -> Option { 99 | match_opt!(self, IVal::Compact(idx) => *idx) 100 | } 101 | } 102 | 103 | /// A `GPtr` that is generic on the `tag` type and uses `IVal` as the `val` type 104 | pub type IPtr = GPtr; 105 | 106 | /// Specialization of `IPtr` that uses LEM tags 107 | pub type Ptr = IPtr; 108 | 109 | impl Ptr { 110 | #[inline] 111 | pub fn has_tag(&self, tag: &Tag) -> bool { 112 | self.tag() == tag 113 | } 114 | 115 | #[inline] 116 | pub fn has_tag_in(&self, tags: &[Tag]) -> bool { 117 | tags.contains(self.tag()) 118 | } 119 | 120 | #[inline] 121 | pub fn is_sym(&self) -> bool { 122 | self.has_tag(&Tag::Expr(Sym)) 123 | } 124 | 125 | #[inline] 126 | pub fn is_num(&self) -> bool { 127 | self.has_tag(&Tag::Expr(Num)) 128 | } 129 | 130 | #[inline] 131 | pub fn is_str(&self) -> bool { 132 | self.has_tag(&Tag::Expr(Str)) 133 | } 134 | 135 | #[inline] 136 | pub fn is_fun(&self) -> bool { 137 | self.has_tag(&Tag::Expr(Fun)) 138 | } 139 | 140 | #[inline] 141 | pub fn is_nil(&self) -> bool { 142 | self.has_tag(&Tag::Expr(Nil)) 143 | } 144 | 145 | #[inline] 146 | pub fn is_cons(&self) -> bool { 147 | self.has_tag(&Tag::Expr(Cons)) 148 | } 149 | 150 | #[inline] 151 | pub fn is_list(&self) -> bool { 152 | self.has_tag_in(&[Tag::Expr(Cons), Tag::Expr(Nil)]) 153 | } 154 | 155 | #[inline] 156 | pub fn cast(self, tag: Tag) -> Self { 157 | Ptr { tag, val: self.val } 158 | } 159 | 160 | #[inline] 161 | pub fn get_atom_idx(&self) -> Option { 162 | self.val().get_atom_idx() 163 | } 164 | 165 | #[inline] 166 | pub fn get_tuple2_idx(&self) -> Option { 167 | self.val().get_tuple2_idx() 168 | } 169 | 170 | #[inline] 171 | pub fn get_tuple3_idx(&self) -> Option { 172 | self.val().get_tuple3_idx() 173 | } 174 | 175 | #[inline] 176 | pub fn get_tuple4_idx(&self) -> Option { 177 | self.val().get_tuple4_idx() 178 | } 179 | 180 | #[inline] 181 | pub fn atom(tag: Tag, idx: usize) -> Ptr { 182 | Ptr { 183 | tag, 184 | val: IVal::Atom(idx), 185 | } 186 | } 187 | } 188 | 189 | /// A `ZPtr` is the content-addressed representation of a `Ptr` that is used to 190 | /// uniquely identify arbitrary DAGs with raw field elements (modulo unlikely 191 | /// hash collisions). 192 | /// 193 | /// In principle, `ZPtr`s could be used in place of `Ptr`, but it is important to 194 | /// note that content-addressing can be expensive, especially in the context of 195 | /// interpretation, because of the Poseidon hashes. That's why we operate on `Ptr`s 196 | /// when interpreting LEMs and delay the need for `ZPtr`s as much as possible. 197 | pub type ZPtr = GPtr>; 198 | 199 | impl ZPtr { 200 | #[inline] 201 | pub fn dummy() -> Self { 202 | GPtr::new(Tag::Expr(Nil), FWrap(F::ZERO)) 203 | } 204 | 205 | #[inline] 206 | pub fn hash(&self) -> &F { 207 | self.val().get() 208 | } 209 | 210 | #[inline] 211 | pub fn from_parts(tag: Tag, hash: F) -> Self { 212 | Self::new(tag, FWrap(hash)) 213 | } 214 | } 215 | 216 | impl GPtr> { 217 | #[inline] 218 | pub fn tag_field(&self) -> F { 219 | self.tag().to_field() 220 | } 221 | } 222 | -------------------------------------------------------------------------------- /src/cli/circom.rs: -------------------------------------------------------------------------------- 1 | use std::{fs, path::Path, process::Command}; 2 | 3 | #[cfg(unix)] 4 | use std::os::unix::fs::PermissionsExt; 5 | 6 | use ansi_term::Colour::{Green, Red}; 7 | use anyhow::{anyhow, bail, Result}; 8 | use camino::Utf8PathBuf; 9 | 10 | use crate::cli::paths::{circom_binary_path, circom_dir}; 11 | 12 | const CIRCOM_VERSION: &str = "2.1.6"; 13 | 14 | #[cfg(target_arch = "wasm32")] 15 | fn download_circom_binary(_path: impl AsRef) -> Result { 16 | bail!("wasm does not support downloading") 17 | } 18 | 19 | #[cfg(not(target_arch = "wasm32"))] 20 | fn download_circom_binary(path: impl AsRef) -> Result { 21 | use std::io::Write; 22 | 23 | let url = match std::env::consts::OS { 24 | "linux" => format!("https://github.com/iden3/circom/releases/download/v{CIRCOM_VERSION}/circom-linux-amd64"), 25 | "macos" => format!("https://github.com/iden3/circom/releases/download/v{CIRCOM_VERSION}/circom-macos-amd64"), 26 | "windows" => { 27 | format!("https://github.com/iden3/circom/releases/download/v{CIRCOM_VERSION}/circom-windows-amd64.exe") 28 | } 29 | os => { 30 | bail!("Unsupported OS: {os}. Unable to automatically download the necessary circom binary, please manually download Circom v{CIRCOM_VERSION} to `.lurk/circom/circom`"); 31 | } 32 | }; 33 | 34 | let response = reqwest::blocking::get(url)?.bytes()?; 35 | let mut out = fs::File::create(path.as_ref())?; 36 | out.write_all(&response)?; 37 | 38 | #[cfg(unix)] 39 | fs::set_permissions(path.as_ref(), fs::Permissions::from_mode(0o755))?; 40 | 41 | Ok(Command::new(path.as_ref().as_os_str())) 42 | } 43 | 44 | /// We try to find the circom binary at `/circom`, 45 | /// where `` can be configured via the config file, 46 | /// a environment variable, or through a CLI argument, in that order. 47 | /// 48 | /// We *do not* consider the case where the user already has some 49 | /// `circom` binary available in their `$PATH`. The user will have two 50 | /// possibly conflicting circom binaries floating around. However, things 51 | /// should be kept separate as Lurk will never touch the user binary 52 | /// and the user should never manually call the Lurk Circom binary. 53 | /// 54 | /// Whatever path is chosen, we then test if the `circom` binary 55 | /// exists. If it does, we return the path. Otherwise we download 56 | /// the binary to the location and return the path. 57 | fn get_circom_binary() -> Result { 58 | let circom_path = circom_binary_path(); 59 | 60 | let output = Command::new(&circom_path).arg("--version").output(); 61 | 62 | let success = match output { 63 | Ok(output) => { 64 | // TODO: in future add back checksum check? 65 | output.status.success() 66 | && String::from_utf8_lossy(&output.stdout).contains(CIRCOM_VERSION) 67 | } 68 | Err(_) => false, 69 | }; 70 | 71 | if success { 72 | Ok(Command::new(circom_path)) 73 | } else { 74 | download_circom_binary(circom_path) 75 | } 76 | } 77 | 78 | /// This method will compile a designated Circom circuit and store the generated static files in our 79 | /// lurk folder. 80 | pub(crate) fn create_circom_gadget(circom_folder: &Utf8PathBuf, reference: &str) -> Result<()> { 81 | let circom_gadget = circom_dir().join(reference); 82 | 83 | // We expect a format / for the name. 84 | let reference_split: Vec<&str> = reference.split('/').collect(); 85 | if reference_split.len() != 2 || reference_split[0].is_empty() || reference_split[1].is_empty() 86 | { 87 | bail!("Expected a reference of format \"/\", got \"{reference}\""); 88 | } 89 | 90 | let circom_file = circom_folder 91 | .join(reference_split[1]) 92 | .with_extension("circom"); 93 | 94 | // TODO: support for other fields 95 | let default_field = "vesta"; 96 | let field = if let Ok(lurk_field) = std::env::var("LURK_FIELD") { 97 | // FG: The prime is actually the reverse of the field in $LURK_FIELD, 98 | // because circom and lurk have different semantics about which field should be specified 99 | // (circom wants the base field and lurk the scalar field). 100 | match lurk_field.as_str() { 101 | "PALLAS" => "vesta", 102 | "VESTA" => "pallas", 103 | _ => bail!("Unsupported field: {lurk_field}"), 104 | } 105 | } else { 106 | default_field 107 | }; 108 | 109 | println!("Running circom binary to generate r1cs and witness files to {circom_gadget:?}"); 110 | fs::create_dir_all(&circom_gadget) 111 | .map_err(|err| anyhow!("Couldn't create folder for static files: {err}"))?; 112 | let output = get_circom_binary()? 113 | .args(&[ 114 | circom_file, 115 | "--r1cs".into(), 116 | "--wasm".into(), 117 | "--output".into(), 118 | circom_gadget.clone(), 119 | "--prime".into(), 120 | field.into(), 121 | ]) 122 | .output() 123 | .expect("circom failed"); 124 | 125 | if !output.status.success() { 126 | eprintln!( 127 | "{} Please check that your input files are correct,", 128 | Red.bold().paint("Circom failed.") 129 | ); 130 | eprintln!(" and refer to the circom stderr output for further information:\n"); 131 | bail!("{}", String::from_utf8_lossy(&output.stderr)); 132 | } 133 | 134 | // Get out _js/.wasm and .r1cs and put them in ///*. 135 | fs::copy( 136 | circom_gadget.join(format!( 137 | "{}_js/{}.wasm", 138 | &reference_split[1], &reference_split[1] 139 | )), 140 | circom_gadget.join(format!("{}.wasm", &reference_split[1])), 141 | ) 142 | .map_err(|err| { 143 | anyhow!( 144 | "Couldn't move compilation artifacts to Lurk folder: {}", 145 | err 146 | ) 147 | })?; 148 | fs::remove_dir_all(circom_gadget.join(format!("{}_js", &reference_split[1]))) 149 | .map_err(|err| anyhow!("Couldn't clean up temporary artifacts: {err}"))?; 150 | 151 | println!("{}", Green.bold().paint("Circom success")); 152 | Ok(()) 153 | } 154 | --------------------------------------------------------------------------------