├── .gitignore ├── .python-version ├── Cargo.toml ├── README.md ├── bfv-gkr ├── Cargo.toml └── src │ ├── constants │ ├── mod.rs │ ├── sk_enc_constants_1024_1x27_65537.rs │ ├── sk_enc_constants_16384_8x54_65537.rs │ ├── sk_enc_constants_2048_1x52_65537.rs │ ├── sk_enc_constants_32768_16x59_65537.rs │ ├── sk_enc_constants_4096_2x55_65537.rs │ └── sk_enc_constants_8192_4x55_65537.rs │ ├── data │ ├── bn254 │ │ ├── sk_enc_1024_1x27_65537.json │ │ ├── sk_enc_16384_8x54_65537.json │ │ ├── sk_enc_2048_1x52_65537.json │ │ ├── sk_enc_4096_2x55_65537.json │ │ └── sk_enc_8192_4x55_65537.json │ ├── data.zip │ └── goldilocks │ │ ├── sk_enc_1024_1x27_65537.json │ │ ├── sk_enc_16384_8x54_65537.json │ │ ├── sk_enc_2048_1x52_65537.json │ │ ├── sk_enc_32768_16x59_65537.json │ │ ├── sk_enc_4096_2x55_65537.json │ │ └── sk_enc_8192_4x55_65537.json │ ├── lib.rs │ ├── poly.rs │ ├── sk_encryption_circuit.rs │ ├── test.rs │ └── transcript.rs ├── lasso ├── Cargo.toml └── src │ ├── lasso.rs │ ├── lib.rs │ ├── memory_checking │ ├── mod.rs │ ├── prover.rs │ └── verifier.rs │ ├── table.rs │ └── table │ └── range.rs └── scripts ├── .gitignore ├── circuit_sk.py ├── requirements.txt └── utils.py /.gitignore: -------------------------------------------------------------------------------- 1 | # Generated by Cargo 2 | # will have compiled files and executables 3 | debug/ 4 | target/ 5 | 6 | # Remove Cargo.lock from gitignore if creating an executable, leave it for libraries 7 | # More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html 8 | Cargo.lock 9 | 10 | # These are backup files generated by rustfmt 11 | **/*.rs.bk 12 | 13 | # MSVC Windows builds of rustc generate these, which store debugging information 14 | *.pdb 15 | 16 | # RustRover 17 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 18 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 19 | # and can be added to the global gitignore or merged into this file. For a more nuclear 20 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 21 | #.idea/ 22 | 23 | **/data/bn254/sk_enc_32768_16x59_65537.json 24 | -------------------------------------------------------------------------------- /.python-version: -------------------------------------------------------------------------------- 1 | 3.12.0 2 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [workspace] 2 | members = ["bfv-gkr", "lasso"] 3 | 4 | [workspace.dependencies] 5 | rand = "0.8.5" 6 | serde = { version = "=1.0", default-features = false, features = ["derive"] } 7 | serde_json = { version = "1.0", default-features = false, features = ["std"] } 8 | itertools = "0.11" 9 | prettytable = "0.10.0" 10 | gkr = { git = "https://github.com/han0110/gkr" } 11 | rayon = "1.8.0" 12 | digest = "0.10.1" 13 | typenum = "1.12.0" 14 | tracing = "0.1.40" 15 | tracing-subscriber = { version = "0.3.17", features = ["std", "env-filter"] } 16 | tracing-forest = { version = "0.1.6", features = ["ansi", "smallvec"] } 17 | plonkish_backend = { git = "https://github.com/han0110/plonkish" } 18 | strum = "0.26.0" 19 | strum_macros = "0.26.4" 20 | enum_dispatch = "0.3.12" 21 | ark-std = { version = "0.4.0" } 22 | fixedbitset = "0.5" 23 | paste = "1.0" 24 | 25 | # Dev dependencies 26 | serial_test = "3.1" 27 | criterion = "0.5.1" 28 | goldilocks = { git = "https://github.com/han0110/goldilocks", branch = "feature/qe_op_b" } 29 | halo2_curves = { version = "0.7.0", package = "halo2curves", features = [ 30 | "derive_serde", 31 | ] } 32 | blake3 = { version = "1", features = ["traits-preview"] } 33 | 34 | [profile.dev] 35 | opt-level = 3 36 | debug = 2 # change to 0 or 2 for more or less debug info 37 | overflow-checks = true 38 | incremental = true 39 | 40 | # Local "release" mode, more optimized than dev but faster to compile than release 41 | [profile.local] 42 | inherits = "dev" 43 | opt-level = 3 44 | # Set this to 1 or 2 to get more useful backtraces 45 | debug = 1 46 | debug-assertions = true 47 | panic = 'unwind' 48 | # better recompile times 49 | incremental = true 50 | lto = "thin" 51 | codegen-units = 16 52 | 53 | [profile.release] 54 | opt-level = 3 55 | debug = false 56 | debug-assertions = false 57 | lto = "fat" 58 | # `codegen-units = 1` can lead to WORSE performance - always bench to find best profile for your machine! 59 | # codegen-units = 1 60 | panic = "abort" 61 | incremental = false 62 | 63 | [patch."https://github.com/han0110/gkr"] 64 | gkr = { git = "https://github.com/nulltea/gkr-lasso" } 65 | # gkr = { path = "../examples/gkr/gkr" } 66 | 67 | [patch."https://github.com/han0110/goldilocks"] 68 | goldilocks = { git = "https://github.com/nulltea/goldilocks", branch = "to_canonical_repr" } 69 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Hyper-Greco: Verifiable FHE with GKR 2 | 3 | GKR prover for [BFV](https://www.inferati.com/blog/fhe-schemes-bfv) Secret Key Encryption based on [Greco](https://github.com/privacy-scaling-explorations/greco) scheme. 4 | 5 | > [!WARNING] 6 | > This is a research project and hasn't been audited. Don't use it production. 7 | 8 | ## Approach 9 | 10 | - This implementation uses linear-time GKR ([Libra](https://eprint.iacr.org/2019/317)) prove system. 11 | - FHE input uni polys generated via [circuit_sk.py](https://github.com/nulltea/gkreco/blob/1019a9a0a9a174785232cc8e2a21e04861d30ed2/scripts/circuit_sk.py) are converted to multilinear polys on a boolean domain. 12 | - Polynomial multiplication in the circuit is computed as `evaluation (FFT) -> dot product -> interpolation (IFFT)` 13 | - see 1-D convolution in [zkCNN paper](https://eprint.iacr.org/2021/673.pdf#page=9) 14 | - Range checks for all input polys are batched and proved via a single Lasso node. 15 | - No intermediate commitments; prover can commit to private inputs according to application needs outside this scheme. 16 | 17 | ### Lasso vs LogUp-GKR 18 | 19 | LogUp was a first choice for range checks. It works really well for smaller tables (e.g. `S_BOUND` and `E_BOUND`) but cannot be naively applied for larger ones. Notably, the `R2_BOUNDS` table is too large, `2**55+`, to be materialized. The [halo2-lib range chip](https://github.com/axiom-crypto/halo2-lib/blob/community-edition/halo2-base/src/gates/range/mod.rs) approach could've been used to remedy this (decomposition and lookup combination), but then Lasso is arguably a better realization of this technique. 20 | 21 | Another downside of using LogUp is committing to lookup tables and multiplicities. Lasso avoids this when lookup tables are structured, which is the case for range checks. 22 | 23 | Finally, while multiple lookup input columns can be batched for a single lookup table in LogUp, batching multiple lookup types (range bounds in this case) is impossible with LogUp, afaik. On the contrary, one can batch many lookup types and verify them via a single primary (collation) sum check and two grand product argument GKRs. The approach is inspired by Jolt's [instructions lookups](https://jolt.a16zcrypto.com/how/instruction_lookups.html). 24 | 25 | I should note that since `S_BOUND` and `E_BOUND` are small and each only requires a single subtable/dimension, when batching together with other larger-table lookups (which require multiple dimensions per suitable) -- there is a somewhat redundant overhead compared to, say running `S_BOUND` and `E_BOUND` as LogUp nodes. However, when accounting for witness generation and commitment/opening time also needed for LogUp batched, Lasso is still more practical. One approach that has not yet been explored is to have two Lasso nodes: one for small tables and one for large ones. 26 | 27 | Version with LogUp checks and LogUP IOP can be found in this [commit](https://github.com/nulltea/gkreco/blob/67eccb9f57a4291a929e38503b8d246d7d7dc8a1/src/sk_encryption_circuit.rs). 28 | 29 | ## Results 30 | 31 | Benchmarks run on M1 Macbook Pro with 10 cores and 32GB of RAM. 32 | 33 | The parameters have been chosen targeting 128-bit security level for different values of n. For more information on parameters choise, please check [Homomorphic Encryption Standard](https://homomorphicencryption.org/wp-content/uploads/2018/11/HomomorphicEncryptionStandardv1.1.pdf). 34 | 35 | Field/extension field: `Goldilocks, GoldilocksExt2` 36 | 37 | | $n$ | $\log q_i$ | $k$ | Witness Gen | Proof Gen | Proof Verify | 38 | | :----: | :--------: | :-: | :---------: | :-------: | :----------: | 39 | | 1024 | 27 | 1 | 7.23 ms | 103 ms | 10.9ms | 40 | | 2048 | 52 | 1 | 11.9 ms | 159 ms | 9.84ms | 41 | | 4096 | 55 | 2 | 24.73 ms | 265 ms | 10.8ms | 42 | | 8192 | 55 | 4 | 81.5 ms | 588 ms | 20.9ms | 43 | | 16384 | 54 | 8 | 310 ms | 1.51 s | 84.9ms | 44 | | 32768 | 59 | 16 | 1.04s | 5.06 s | 107.9ms | 45 | 46 | 47 | Field/extension field: `BN254, BN254` 48 | 49 | | $n$ | $\log q_i$ | $k$ | Witness Gen | Proof Gen | Proof Verify | 50 | | :----: | :--------: | :-: | :---------: | :-------: | :----------: | 51 | | 1024 | 27 | 1 | 39.0 ms | 236 ms | 22.0 ms | 52 | | 2048 | 52 | 1 | 77.8 ms | 308 ms | 10.1 ms | 53 | | 4096 | 55 | 2 | 232.2 ms | 575 ms | 16.3 ms | 54 | | 8192 | 55 | 4 | 845 ms | 1.65 s | 36.0ms | 55 | | 16384 | 54 | 8 | 3.55 s | 4.87 s | 166 ms | 56 | | 32768 | 59 | 16 | 12.2 s | 28.8 s | 529 ms | 57 | 58 | 59 | For comparison, see original [Greco benchmarks](https://github.com/privacy-scaling-explorations/greco?tab=readme-ov-file#results) (proved via Halo2 on M2 Macbook Pro with 12 cores and 32GB of RAM) 60 | 61 | ### Run yourself 62 | ```bash 63 | cargo test -r test_sk_enc_valid -- --nocapture 64 | ``` 65 | 66 | ## Profiling charts 67 | 68 | ```mermaid 69 | %%{ 70 | init: { 71 | 'theme': 'dark', 72 | 'themeVariables': { 73 | 'primaryColor': '#FFEC3D', 74 | 'primaryTextColor': '#fff', 75 | 'pie1': '#F5004F', 76 | 'pie2': '#FFEC3D', 77 | 'pie3': '#88D66C' 78 | } 79 | } 80 | }%% 81 | pie showData title GKR Prove (Goldilocks) - 1.88s 82 | "Lasso" : 1.430 83 | "Poly mult (FFT/IFFT)" : 0.291 84 | "Other operations" : 0.128 85 | ``` 86 | 87 | ```mermaid 88 | %%{ 89 | init: { 90 | 'theme': 'dark', 91 | 'themeVariables': { 92 | 'primaryColor': '#FFEC3D', 93 | 'primaryTextColor': '#fff', 94 | 'pie1': '#F5004F', 95 | 'pie2': '#FFEC3D', 96 | 'pie3': '#88D66C' 97 | } 98 | } 99 | }%% 100 | pie showData title GKR Verify (Goldilocks) - 79.7ms 101 | "Lasso" : 39.5 102 | "Poly mult (FFT/IFFT)" : 5.1 103 | "Other operations" : 35.1 104 | ``` 105 | 106 | ## Known issues & limitations 107 | - GKR library used is not zero knowledge, thus may leak some sensitive information 108 | - Memory checking in Lasso uses challenge values sampled from the base field (not the extension field), which isn't secure enough when proving over the Goldilocks field 109 | - Number of ciphertexts ($k$) must be a power of two 110 | 111 | ## Acknowledgements 112 | - [privacy-scaling-explorations/greco](https://github.com/privacy-scaling-explorations/greco) 113 | - [han0110/gkr](https://github.com/han0110/gkr) 114 | - [DoHoonKim8/halo2-lasso](https://github.com/DoHoonKim8/halo2-lasso) 115 | - [a16z/jolt](https://github.com/a16z/jolt) 116 | -------------------------------------------------------------------------------- /bfv-gkr/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "bfv-gkr" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | [dependencies] 7 | rand.workspace = true 8 | serde.workspace = true 9 | serde_json.workspace = true 10 | itertools.workspace = true 11 | prettytable.workspace = true 12 | gkr.workspace = true 13 | rayon= {workspace = true } 14 | digest.workspace = true 15 | typenum.workspace = true 16 | tracing.workspace = true 17 | tracing-subscriber.workspace = true 18 | tracing-forest.workspace = true 19 | plonkish_backend.workspace = true 20 | strum.workspace = true 21 | strum_macros.workspace = true 22 | enum_dispatch.workspace = true 23 | ark-std.workspace = true 24 | fixedbitset.workspace = true 25 | paste.workspace = true 26 | lasso-gkr = { path = "../lasso" } 27 | 28 | [dev-dependencies] 29 | paste.workspace = true 30 | serial_test.workspace = true 31 | gkr = { workspace = true, features = ["dev"] } 32 | criterion.workspace = true 33 | goldilocks.workspace = true 34 | halo2_curves.workspace = true 35 | blake3.workspace = true 36 | 37 | 38 | 39 | 40 | -------------------------------------------------------------------------------- /bfv-gkr/src/constants/mod.rs: -------------------------------------------------------------------------------- 1 | mod sk_enc_constants_1024_1x27_65537; 2 | mod sk_enc_constants_2048_1x52_65537; 3 | mod sk_enc_constants_4096_2x55_65537; 4 | mod sk_enc_constants_8192_4x55_65537; 5 | mod sk_enc_constants_16384_8x54_65537; 6 | mod sk_enc_constants_32768_16x59_65537; 7 | 8 | pub use sk_enc_constants_1024_1x27_65537::SkEnc1024_1x27_65537; 9 | pub use sk_enc_constants_16384_8x54_65537::SkEnc16384_8x54_65537; 10 | pub use sk_enc_constants_2048_1x52_65537::SkEnc2048_1x52_65537; 11 | pub use sk_enc_constants_4096_2x55_65537::SkEnc4096_2x55_65537; 12 | pub use sk_enc_constants_8192_4x55_65537::SkEnc8192_4x55_65537; 13 | pub use sk_enc_constants_32768_16x59_65537::SkEnc32768_16x59_65537; 14 | 15 | 16 | pub trait BfvSkEncryptConstans { 17 | /// `N` is the degree of the cyclotomic polynomial defining the ring `Rq = Zq[X]/(X^N + 1)`. 18 | const N: usize; 19 | /// `N_LOG2` is the logarithm of `N`. 20 | const N_LOG2: usize = Self::N.ilog2() as usize; 21 | /// The coefficients of the polynomial `e` should exist in the interval `[-E_BOUND, E_BOUND]` where `E_BOUND` is the upper bound of the gaussian distribution with 𝜎 = 3.2 22 | const E_BOUND: u64; 23 | /// The coefficients of `k1` should exist in the interval `[-K1_BOUND, K1_BOUND]` where `K1_BOUND` is equal to `(t-1)/2` 24 | const K1_BOUND: u64; 25 | /// The coefficients of the polynomial `s` should exist in the interval `[-S_BOUND, S_BOUND]`. 26 | const S_BOUND: u64; 27 | /// The coefficients of the polynomials `r1is` should exist in the interval `[-R1_BOUND[i], R1_BOUND[i]]` where `R1_BOUND[i]` is equal to `(qi-1)/2` 28 | const R1_BOUNDS: [u64; K]; 29 | /// The coefficients of the polynomials `r2is` should exist in the interval `[-R2_BOUND[i], R2_BOUND[i]]` where `R2_BOUND[i]` is equal to $\frac{(N+2) \cdot \frac{q_i - 1}{2} + B + \frac{t - 1}{2} \cdot |K_{0,i}|}{q_i}$ 30 | const R2_BOUNDS: [u64; K]; 31 | /// List of scalars `qis` such that `qis[i]` is the modulus of the i-th CRT basis of `q` (ciphertext space modulus) 32 | const QIS: [&str; K]; 33 | /// List of scalars `k0is` such that `k0i[i]` is equal to the negative of the multiplicative inverses of t mod qi. 34 | const K0IS: [&str; K]; 35 | } 36 | -------------------------------------------------------------------------------- /bfv-gkr/src/constants/sk_enc_constants_1024_1x27_65537.rs: -------------------------------------------------------------------------------- 1 | use super::BfvSkEncryptConstans; 2 | 3 | pub struct SkEnc1024_1x27_65537; 4 | 5 | impl BfvSkEncryptConstans<1> for SkEnc1024_1x27_65537 { 6 | const N: usize = 1024; 7 | const E_BOUND: u64 = 19; 8 | const S_BOUND: u64 = 1; 9 | const R1_BOUNDS: [u64; 1] = [1246]; 10 | const R2_BOUNDS: [u64; 1] = [41319090]; 11 | const K1_BOUND: u64 = 32768; 12 | const QIS: [&str; 1] = ["82638181"]; 13 | const K0IS: [&str; 1] = ["1849798"]; 14 | } 15 | -------------------------------------------------------------------------------- /bfv-gkr/src/constants/sk_enc_constants_16384_8x54_65537.rs: -------------------------------------------------------------------------------- 1 | use super::BfvSkEncryptConstans; 2 | 3 | pub struct SkEnc16384_8x54_65537; 4 | 5 | impl BfvSkEncryptConstans<8> for SkEnc16384_8x54_65537 { 6 | const N: usize = 16384; 7 | const E_BOUND: u64 = 19; 8 | const S_BOUND: u64 = 1; 9 | const R1_BOUNDS: [u64; 8] = [16113, 24818, 37593, 34517, 12912, 32948, 38917, 29910]; 10 | const R2_BOUNDS: [u64; 8] = [ 11 | 6452516132120967, 12 | 6452516132120968, 13 | 6452516132120969, 14 | 6452516132120970, 15 | 6452516132120973, 16 | 6452516132120975, 17 | 6452516132120976, 18 | 6452516132120978, 19 | ]; 20 | const K1_BOUND: u64 = 32768; 21 | const QIS: [&'static str; 8] = [ 22 | "12905032264241935", 23 | "12905032264241937", 24 | "12905032264241939", 25 | "12905032264241941", 26 | "12905032264241947", 27 | "12905032264241951", 28 | "12905032264241953", 29 | "12905032264241957", 30 | ]; 31 | const K0IS: [&'static str; 8] = [ 32 | "3119285534855982", 33 | "6547723161734179", 34 | "11578631950954274", 35 | "10367425251572977", 36 | "1858653883183236", 37 | "9749317979689080", 38 | "12100252264181577", 39 | "8552879692347062", 40 | ]; 41 | } 42 | -------------------------------------------------------------------------------- /bfv-gkr/src/constants/sk_enc_constants_2048_1x52_65537.rs: -------------------------------------------------------------------------------- 1 | use super::BfvSkEncryptConstans; 2 | 3 | pub struct SkEnc2048_1x52_65537; 4 | 5 | impl BfvSkEncryptConstans<1> for SkEnc2048_1x52_65537 { 6 | const N: usize = 2048; 7 | const E_BOUND: u64 = 19; 8 | const S_BOUND: u64 = 1; 9 | const R1_BOUNDS: [u64; 1] = [29882]; 10 | const R2_BOUNDS: [u64; 1] = [1434875766359798]; 11 | const K1_BOUND: u64 = 32768; 12 | const QIS: [&'static str; 1] = ["2869751532719597"]; 13 | const K0IS: [&'static str; 1] = ["2527239722765942"]; 14 | } 15 | -------------------------------------------------------------------------------- /bfv-gkr/src/constants/sk_enc_constants_32768_16x59_65537.rs: -------------------------------------------------------------------------------- 1 | use super::BfvSkEncryptConstans; 2 | 3 | pub struct SkEnc32768_16x59_65537; 4 | 5 | impl BfvSkEncryptConstans<16> for SkEnc32768_16x59_65537 { 6 | const N: usize = 32768; 7 | const E_BOUND: u64 = 19; 8 | const S_BOUND: u64 = 1; 9 | const R1_BOUNDS: [u64; 16] = [17449, 44739, 23337, 23971, 38527, 41231, 47178, 25438, 29323, 21130, 36215, 41226, 47080, 20341, 18613, 40562]; 10 | const R2_BOUNDS: [u64; 16] = [238750987231488128, 238750987231488128, 238750987231488128, 238750987231488128, 238750987231488128, 238750987231488128, 238750987231488128, 238750987231488128, 238750987231488160, 238750987231488160, 238750987231488160, 238750987231488160, 238750987231488160, 238750987231488160, 238750987231488160, 238750987231488192]; 11 | const K1_BOUND: u64 = 32768; 12 | const QIS: [&'static str; 16] = ["477501974462976263", "477501974462976265", "477501974462976267", "477501974462976269", "477501974462976271", "477501974462976277", "477501974462976283", "477501974462976289", "477501974462976293", "477501974462976299", "477501974462976301", "477501974462976307", "477501974462976311", "477501974462976313", "477501974462976317", "477501974462976361"]; 13 | const K0IS: [&'static str; 16] = ["15519160254606397", "413181248299753132", "101318987089463173", "110550337344198528", "322660099471945682", "362070023329770101", "448729597070750619", "131927434145614106", "188539582116643080", "69158624007851611", "288969678337063080", "361989877431741640", "447294256896967800", "57654044645399264", "32480946673725505", "352321367733214595"]; 14 | } 15 | -------------------------------------------------------------------------------- /bfv-gkr/src/constants/sk_enc_constants_4096_2x55_65537.rs: -------------------------------------------------------------------------------- 1 | use super::BfvSkEncryptConstans; 2 | 3 | pub struct SkEnc4096_2x55_65537; 4 | 5 | impl BfvSkEncryptConstans<2> for SkEnc4096_2x55_65537 { 6 | const N: usize = 4096; 7 | const E_BOUND: u64 = 19; 8 | const S_BOUND: u64 = 1; 9 | const R1_BOUNDS: [u64; 2] = [25966, 19503]; 10 | const R2_BOUNDS: [u64; 2] = [13712101976447600, 13712101976447600]; 11 | const K1_BOUND: u64 = 32768; 12 | const QIS: [&'static str; 2] = ["27424203952895201", "27424203952895203"]; 13 | const K0IS: [&'static str; 2] = ["20017153978526555", "14608220699689817"]; 14 | } 15 | -------------------------------------------------------------------------------- /bfv-gkr/src/constants/sk_enc_constants_8192_4x55_65537.rs: -------------------------------------------------------------------------------- 1 | use super::BfvSkEncryptConstans; 2 | 3 | pub struct SkEnc8192_4x55_65537; 4 | 5 | impl BfvSkEncryptConstans<4> for SkEnc8192_4x55_65537 { 6 | const N: usize = 8192; 7 | const E_BOUND: u64 = 19; 8 | const S_BOUND: u64 = 1; 9 | const R1_BOUNDS: [u64; 4] = [28014, 21551, 24676, 29416]; 10 | const R2_BOUNDS: [u64; 4] = [ 11 | 13712101976447600, 12 | 13712101976447600, 13 | 13712101976447602, 14 | 13712101976447604, 15 | ]; 16 | const K1_BOUND: u64 = 32768; 17 | const QIS: [&'static str; 4] = [ 18 | "27424203952895201", 19 | "27424203952895203", 20 | "27424203952895205", 21 | "27424203952895207", 22 | ]; 23 | const K0IS: [&'static str; 4] = [ 24 | "20017153978526555", 25 | "14608220699689817", 26 | "17223556688605927", 27 | "21190079862835656", 28 | ]; 29 | } 30 | -------------------------------------------------------------------------------- /bfv-gkr/src/data/data.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nulltea/hyper-greco/344eedb1df9574c57f15bfdf09382f041f7f3773/bfv-gkr/src/data/data.zip -------------------------------------------------------------------------------- /bfv-gkr/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![allow(non_snake_case)] 2 | #![allow(clippy::needless_range_loop)] 3 | #![feature(generic_arg_infer)] 4 | 5 | pub mod constants; 6 | pub mod poly; 7 | pub mod sk_encryption_circuit; 8 | pub mod transcript; 9 | 10 | #[cfg(test)] 11 | pub mod test; 12 | -------------------------------------------------------------------------------- /bfv-gkr/src/poly.rs: -------------------------------------------------------------------------------- 1 | use gkr::ff_ext::ff::PrimeField; 2 | use itertools::Itertools; 3 | 4 | /// Struct to store the coefficients of a polynomial as Field Elements 5 | /// The coefficients are stored starting from the highest degree term 6 | #[derive(Clone, Debug)] 7 | pub struct Poly { 8 | pub coefficients: Vec, 9 | } 10 | 11 | impl Poly { 12 | pub fn new(coefficients: Vec) -> Self { 13 | let coefficients = coefficients 14 | .iter() 15 | .map(|coeff| F::from_str_vartime(coeff).unwrap()) 16 | .collect(); 17 | Poly { coefficients } 18 | } 19 | 20 | pub fn new_padded(coefficients: Vec, log2_size: usize) -> Self { 21 | let mut coefficients = coefficients 22 | .iter() 23 | .map(|coeff| F::from_str_vartime(coeff).unwrap()) 24 | .collect_vec(); 25 | coefficients.resize(1 << log2_size, F::ZERO); 26 | 27 | Poly { coefficients } 28 | } 29 | 30 | pub fn new_shifted(coefficients: Vec, size: usize) -> Self { 31 | let coefficients = coefficients 32 | .iter() 33 | .map(|coeff| F::from_str_vartime(coeff).unwrap()) 34 | .collect_vec(); 35 | let padding_size = size.saturating_sub(coefficients.len()); 36 | 37 | let mut shifted = vec![F::ZERO; padding_size]; 38 | shifted.extend(coefficients); 39 | shifted.resize(size.next_power_of_two(), F::ZERO); 40 | 41 | Poly { 42 | coefficients: shifted, 43 | } 44 | } 45 | 46 | pub fn to_vec(&self) -> Vec { 47 | self.coefficients.clone() 48 | } 49 | } 50 | 51 | impl AsRef<[F]> for Poly { 52 | fn as_ref(&self) -> &[F] { 53 | &self.coefficients 54 | } 55 | } 56 | -------------------------------------------------------------------------------- /bfv-gkr/src/sk_encryption_circuit.rs: -------------------------------------------------------------------------------- 1 | use crate::constants::BfvSkEncryptConstans; 2 | use crate::{transcript::Keccak256Transcript, poly::Poly}; 3 | use gkr::izip_eq; 4 | use gkr::{ 5 | chain_par, 6 | circuit::{ 7 | connect, 8 | node::{EvalClaim, FftNode, InputNode, VanillaGate, VanillaNode}, 9 | Circuit, NodeId, 10 | }, 11 | ff_ext::ff::PrimeField, 12 | poly::{box_dense_poly, BoxMultilinearPoly}, 13 | transcript::Transcript, 14 | util::{arithmetic::ExtensionField, Itertools}, 15 | verify_gkr, 16 | }; 17 | use itertools::chain; 18 | use lasso_gkr::{table::range::RangeLookup, LassoNode, LassoPreprocessing}; 19 | use plonkish_backend::pcs::PolynomialCommitmentScheme; 20 | use plonkish_backend::poly::multilinear::MultilinearPolynomial; 21 | use plonkish_backend::util::hash::{Keccak256, Output}; 22 | use rand::RngCore; 23 | use rayon::iter::ParallelIterator; 24 | use serde::Deserialize; 25 | use std::cmp::min; 26 | use std::iter; 27 | use tracing::info_span; 28 | 29 | const LIMB_BITS: usize = 16; 30 | const C: usize = 4; 31 | const M: usize = 1 << LIMB_BITS; 32 | 33 | pub type ProverKey< 34 | F, 35 | E, 36 | // Pcs: PolynomialCommitmentScheme< 37 | // F, 38 | // Polynomial = MultilinearPolynomial, 39 | // CommitmentChunk = Output, 40 | // >, 41 | > = LassoPreprocessing; 42 | 43 | pub type VerifierKey< 44 | F, 45 | E, 46 | // Pcs: PolynomialCommitmentScheme< 47 | // F, 48 | // Polynomial = MultilinearPolynomial, 49 | // CommitmentChunk = Output, 50 | // >, 51 | > = LassoPreprocessing; 52 | 53 | /// `BfvSkEncryptionCircuit` is a circuit that checks the correct formation of a ciphertext resulting from BFV secret key encryption 54 | /// All the polynomials coefficients and scalars are normalized to be in the range `[0, p)` where p is the modulus of the prime field of the circuit 55 | /// 56 | /// # Parameters: 57 | /// * `s`: secret polynomial, sampled from ternary distribution. 58 | /// * `e`: error polynomial, sampled from discrete Gaussian distribution. 59 | /// * `k1`: scaled message polynomial. 60 | /// * `r2is`: list of r2i polynomials for each i-th CRT basis . 61 | /// * `r1is`: list of r1i polynomials for each CRT i-th CRT basis. 62 | /// * `ais`: list of ai polynomials for each CRT i-th CRT basis. 63 | /// * `ct0is`: list of ct0i (first component of the ciphertext cti) polynomials for each CRT i-th CRT basis. 64 | #[derive(Deserialize, Clone)] 65 | pub struct BfvSkEncryptArgs { 66 | s: Vec, 67 | e: Vec, 68 | k1: Vec, 69 | r2is: Vec>, 70 | r1is: Vec>, 71 | ais: Vec>, 72 | ct0is: Vec>, 73 | } 74 | 75 | pub struct BfvEncryptBlock, const K: usize> { 76 | num_reps: usize, 77 | _marker: std::marker::PhantomData, 78 | } 79 | 80 | impl, const K: usize> BfvEncryptBlock { 81 | pub const fn log2_size(&self) -> usize { 82 | Params::N_LOG2 + 1 83 | } 84 | 85 | // single block 86 | pub fn configure>( 87 | &self, 88 | circuit: &mut Circuit, 89 | s: NodeId, 90 | e: NodeId, 91 | k1: NodeId, 92 | preprocessing: LassoPreprocessing, 93 | ) -> NodeId { 94 | let poly_log2_size = Params::N_LOG2; 95 | let log2_size = self.log2_size(); 96 | 97 | let es = { 98 | let gates = (0..self.num_reps) 99 | .flat_map(|_| (0..(1usize << log2_size)).map(move |j| VanillaGate::relay((0, j)))) 100 | .collect_vec(); 101 | 102 | circuit.insert(VanillaNode::new(1, log2_size, gates.clone(), 1)) 103 | }; 104 | 105 | let k1kis = { 106 | let gates = (0..self.num_reps) 107 | .flat_map(|i| { 108 | (0..(1usize << log2_size)).map(move |j| { 109 | relay_mul_const((0, j), F::from_str_vartime(Params::K0IS[i]).unwrap()) 110 | }) 111 | }) 112 | .collect_vec(); 113 | 114 | circuit.insert(VanillaNode::new(1, log2_size, gates.clone(), 1)) 115 | }; 116 | 117 | connect!(circuit { 118 | es <- e; 119 | k1kis <- k1; 120 | }); 121 | 122 | let ais = iter::repeat_with(|| circuit.insert(InputNode::new(log2_size, 1))) 123 | .take(self.num_reps) 124 | .collect_vec(); 125 | 126 | let r1is = iter::repeat_with(|| circuit.insert(InputNode::new(log2_size, 1))) 127 | .take(self.num_reps) 128 | .collect_vec(); 129 | 130 | let r1iqis = { 131 | let r1i_size = 1usize << log2_size; 132 | let gates = (0..self.num_reps) 133 | .flat_map(|i| { 134 | (0..r1i_size).map(move |j| { 135 | relay_mul_const((i, j), F::from_str_vartime(Params::QIS[i]).unwrap()) 136 | }) 137 | }) 138 | .collect_vec(); 139 | 140 | circuit.insert(VanillaNode::new(self.num_reps, log2_size, gates.clone(), 1)) 141 | }; 142 | 143 | r1is.iter() 144 | .take(self.num_reps) 145 | .for_each(|&r1i| circuit.connect(r1i, r1iqis)); 146 | 147 | let r2is = circuit.insert(InputNode::new(poly_log2_size, self.num_reps)); 148 | 149 | let r2is_log2_sise = self.log2_size_with_num_reps(poly_log2_size); 150 | let r2is_chunks = (0..1 << r2is_log2_sise) 151 | .chunks(1 << log2_size) 152 | .into_iter() 153 | .map(|chunk| { 154 | let mut gates = chunk.map(move |j| VanillaGate::relay((0, j))).collect_vec(); 155 | gates.resize(1 << log2_size, VanillaGate::constant(F::ZERO)); 156 | 157 | let node = circuit.insert(VanillaNode::new(1, r2is_log2_sise, gates, 1)); 158 | circuit.connect(r2is, node); 159 | node 160 | }) 161 | .collect_vec(); 162 | 163 | let lasso_inputs_batched = { 164 | let gates = chain![ 165 | Params::R1_BOUNDS.iter().take(self.num_reps).copied(), 166 | iter::repeat(Params::R2_BOUNDS[0]).take(r2is_chunks.len()), 167 | vec![Params::S_BOUND, Params::E_BOUND, Params::K1_BOUND], 168 | ] 169 | .enumerate() 170 | .flat_map(|(i, bound)| { 171 | (0..(1usize << log2_size)).map(move |j| relay_add_const((i, j), F::from(bound))) 172 | }) 173 | .collect_vec(); 174 | 175 | circuit.insert(VanillaNode::new( 176 | r2is_chunks.len() + self.num_reps + 3, 177 | log2_size, 178 | gates, 179 | 1, 180 | )) 181 | }; 182 | let lasso_ranges = { 183 | let r2i_log2_size = if self.num_reps == 1 { 184 | log2_size // since zero-padded to log2_size in r2is_chunks 185 | } else { 186 | poly_log2_size 187 | }; 188 | let lookups = chain![ 189 | Params::R1_BOUNDS 190 | .iter() 191 | .take(self.num_reps) 192 | .flat_map(|&bound| iter::repeat(RangeLookup::id_for(bound * 2 + 1)) 193 | .take(1 << log2_size)), 194 | Params::R2_BOUNDS 195 | .iter() 196 | .take(self.num_reps) 197 | .flat_map(|&bound| iter::repeat(RangeLookup::id_for(bound * 2 + 1)) 198 | .take(1 << r2i_log2_size)), 199 | iter::repeat(RangeLookup::id_for(Params::S_BOUND * 2 + 1)).take(1 << log2_size), 200 | iter::repeat(RangeLookup::id_for(Params::E_BOUND * 2 + 1)).take(1 << log2_size), 201 | iter::repeat(RangeLookup::id_for(Params::K1_BOUND * 2 + 1)).take(1 << log2_size), 202 | ] 203 | .collect_vec(); 204 | let num_vars = lookups.len().next_power_of_two().ilog2() as usize; 205 | circuit.insert(LassoNode::::new( 206 | preprocessing, 207 | num_vars, 208 | lookups, 209 | )) 210 | }; 211 | r1is.iter() 212 | .take(self.num_reps) 213 | .for_each(|&r1i| circuit.connect(r1i, lasso_inputs_batched)); 214 | 215 | r2is_chunks 216 | .iter() 217 | .for_each(|&r2i| circuit.connect(r2i, lasso_inputs_batched)); 218 | 219 | connect!(circuit { 220 | lasso_inputs_batched <- s, e, k1; 221 | lasso_ranges <- lasso_inputs_batched; 222 | }); 223 | 224 | let s_eval = circuit.insert(FftNode::forward(log2_size)); 225 | circuit.connect(s, s_eval); 226 | 227 | let s_eval_copy = circuit.insert(VanillaNode::new( 228 | 1, 229 | log2_size, 230 | (0..1usize << log2_size) 231 | .map(|i| VanillaGate::relay((0, i))) 232 | .collect_vec(), 233 | 1, 234 | )); 235 | circuit.connect(s_eval, s_eval_copy); 236 | 237 | let sai_par = { 238 | let gates = (0..self.num_reps) 239 | .flat_map(|i| (0..(1usize << log2_size)).map(move |j| VanillaGate::relay((i, j)))) 240 | .collect_vec(); 241 | 242 | circuit.insert(VanillaNode::new(self.num_reps, log2_size, gates.clone(), 1)) 243 | }; 244 | 245 | for &ai in ais.iter().take(self.num_reps) { 246 | let gates = (0..1usize << log2_size) 247 | .map(|i| VanillaGate::mul((0, i), (1, i))) 248 | .collect_vec(); 249 | let ai_eval = circuit.insert(FftNode::forward(log2_size)); 250 | let sai_eval = circuit.insert(VanillaNode::new(2, log2_size, gates, 1)); 251 | let sai = circuit.insert(FftNode::inverse(log2_size)); 252 | 253 | connect!(circuit { 254 | ai_eval <- ai; 255 | sai_eval <- s_eval_copy, ai_eval; 256 | sai <- sai_eval; 257 | }); 258 | 259 | circuit.connect(sai, sai_par); 260 | } 261 | 262 | let r2i_cyclo = { 263 | let r2i_size = (1usize << poly_log2_size) - 1; 264 | let gates = chain![ 265 | (0..r2i_size).map(|i| VanillaGate::relay((0, i))), 266 | [VanillaGate::constant(F::ZERO)], 267 | (0..r2i_size).map(|i| VanillaGate::relay((0, i))), 268 | [VanillaGate::constant(F::ZERO)] 269 | ] 270 | .collect_vec(); 271 | 272 | circuit.insert(VanillaNode::new( 273 | 1, 274 | poly_log2_size, 275 | gates.clone(), 276 | self.num_reps, 277 | )) 278 | }; 279 | 280 | let sum = { 281 | let gates = (0..1usize << log2_size) 282 | .map(|i| VanillaGate::sum(vec![(0, i), (1, i), (2, i), (3, i), (4, i)])) 283 | .collect(); 284 | circuit.insert(VanillaNode::new(5, log2_size, gates, self.num_reps)) 285 | }; 286 | 287 | connect!(circuit { 288 | r2i_cyclo <- r2is; 289 | sum <- sai_par, es, k1kis, r1iqis, r2i_cyclo; 290 | }); 291 | 292 | k1 293 | } 294 | 295 | fn log2_size_with_num_reps(&self, poly_log2_size: usize) -> usize { 296 | poly_log2_size + self.num_reps.ilog2() as usize 297 | } 298 | } 299 | 300 | pub struct BfvEncrypt, const K: usize> { 301 | block: BfvEncryptBlock, 302 | } 303 | 304 | impl, const K: usize> BfvEncrypt { 305 | pub fn new(num_reps: usize) -> Self { 306 | Self { 307 | block: BfvEncryptBlock { 308 | num_reps, 309 | _marker: std::marker::PhantomData, 310 | }, 311 | } 312 | } 313 | 314 | pub const fn log2_size(&self) -> usize { 315 | Params::N_LOG2 + 1 316 | } 317 | 318 | #[allow(clippy::type_complexity)] 319 | pub fn setup< 320 | F: PrimeField, 321 | E: ExtensionField, 322 | Pcs: PolynomialCommitmentScheme>, 323 | >( 324 | &self, 325 | _rng: impl RngCore + Clone, 326 | ) -> (ProverKey, VerifierKey) { 327 | let mut lasso_preprocessing = LassoPreprocessing::::preprocess::(chain![ 328 | [ 329 | RangeLookup::new_boxed(Params::S_BOUND * 2 + 1), 330 | RangeLookup::new_boxed(Params::E_BOUND * 2 + 1), 331 | RangeLookup::new_boxed(Params::K1_BOUND * 2 + 1) 332 | ], 333 | Params::R1_BOUNDS 334 | .iter() 335 | .take(self.block.num_reps) 336 | .map(|&bound| RangeLookup::new_boxed(bound * 2 + 1)), 337 | Params::R2_BOUNDS 338 | .iter() 339 | .take(self.block.num_reps) 340 | .map(|&bound| RangeLookup::new_boxed(bound * 2 + 1)) 341 | ]); 342 | 343 | let lasso_verifier = lasso_preprocessing.to_verifier_preprocessing(); 344 | 345 | let pk = lasso_preprocessing; 346 | let vk = lasso_verifier; 347 | 348 | (pk, vk) 349 | } 350 | 351 | pub fn configure>( 352 | &self, 353 | circuit: &mut Circuit, 354 | preprocessing: LassoPreprocessing, 355 | ) -> NodeId { 356 | let log2_size = self.log2_size(); 357 | 358 | let s = circuit.insert(InputNode::new(log2_size, 1)); 359 | let e = circuit.insert(InputNode::new(log2_size, 1)); 360 | let k1 = circuit.insert(InputNode::new(log2_size, 1)); 361 | 362 | self.block.configure(circuit, s, e, k1, preprocessing) 363 | } 364 | 365 | pub fn get_inputs>( 366 | &self, 367 | args: &BfvSkEncryptArgs, 368 | ) -> ( 369 | Vec>, 370 | BoxMultilinearPoly<'static, F, E>, 371 | ) { 372 | let log2_size = self.log2_size(); 373 | 374 | let s = Poly::::new_padded(args.s.clone(), log2_size); 375 | let e = Poly::::new_shifted(args.e.clone(), (1 << log2_size) - 1); 376 | let k1 = Poly::::new_shifted(args.k1.clone(), (1 << log2_size) - 1); 377 | 378 | let mut r2is = vec![]; 379 | let mut r1is = vec![]; 380 | let mut ais = vec![]; 381 | let mut ct0is = vec![]; 382 | 383 | for z in 0..min(args.ct0is.len(), self.block.num_reps) { 384 | let r2i = Poly::::new(args.r2is[z].clone()); 385 | r2is.push(r2i.to_vec()); 386 | 387 | let r1i = Poly::::new_padded(args.r1is[z].clone(), log2_size); 388 | r1is.push(r1i.to_vec()); 389 | 390 | let ai = Poly::::new_padded(args.ais[z].clone(), log2_size); 391 | ais.push(ai.to_vec()); 392 | 393 | let ct0i = Poly::::new_shifted(args.ct0is[z].clone(), 1 << log2_size); 394 | let mut ct0i = ct0i.as_ref()[1..].to_vec(); 395 | ct0i.push(F::ZERO); 396 | ct0is.extend(ct0i); 397 | } 398 | 399 | let r2is = r2is 400 | .into_iter() 401 | .take(self.block.num_reps) 402 | .flat_map(|mut r2i| { 403 | r2i.push(F::ZERO); 404 | r2i 405 | }) 406 | .collect_vec(); 407 | 408 | let inputs = chain_par![[s.to_vec(), e.to_vec(), k1.to_vec()], ais, r1is, [r2is],] 409 | .map(box_dense_poly) 410 | .collect(); 411 | 412 | let output = box_dense_poly(ct0is); 413 | 414 | (inputs, output) 415 | } 416 | 417 | pub fn prove< 418 | F: PrimeField, 419 | E: ExtensionField, 420 | Pcs: PolynomialCommitmentScheme< 421 | F, 422 | Polynomial = MultilinearPolynomial, 423 | CommitmentChunk = Output, 424 | >, 425 | >( 426 | &self, 427 | args: &BfvSkEncryptArgs, 428 | pk: ProverKey, 429 | ) -> Vec { 430 | let preprocessing = pk; 431 | let mut transcript = Keccak256Transcript::>::default(); 432 | 433 | let circuit = info_span!("init circuit").in_scope(|| { 434 | let mut circuit = Circuit::::default(); 435 | self.configure(&mut circuit, preprocessing); 436 | circuit 437 | }); 438 | 439 | let (values, output_claims) = info_span!("wintess gen").in_scope(|| { 440 | let (inputs, ctis_poly) = info_span!("parse inputs").in_scope(|| self.get_inputs(args)); 441 | 442 | let values = info_span!("eval circuit").in_scope(|| circuit.evaluate(inputs)); 443 | 444 | let ct0is_claim = info_span!("eval output").in_scope(|| { 445 | let point = transcript.squeeze_challenges(self.ct0is_log2_size()); 446 | let value = ctis_poly.evaluate(&point); 447 | EvalClaim::new(point.clone(), value) 448 | }); 449 | 450 | let output_claims = vec![EvalClaim::new(vec![], E::ZERO), ct0is_claim]; 451 | 452 | (values, output_claims) 453 | }); 454 | 455 | let _claims = info_span!("GKR prove") 456 | .in_scope(|| gkr::prove_gkr(&circuit, &values, &output_claims, &mut transcript)) 457 | .unwrap(); 458 | 459 | transcript.into_proof() 460 | } 461 | 462 | pub fn verify< 463 | F: PrimeField, 464 | E: ExtensionField, 465 | Pcs: PolynomialCommitmentScheme< 466 | F, 467 | Polynomial = MultilinearPolynomial, 468 | CommitmentChunk = Output, 469 | >, 470 | >( 471 | &self, 472 | vk: VerifierKey, 473 | inputs: Vec>, 474 | ct0is: Vec>, 475 | proof: &[u8], 476 | ) { 477 | let preprocessing = vk; 478 | let mut transcript = Keccak256Transcript::from_proof(proof); 479 | 480 | let output_claims = info_span!("eval output claim").in_scope(|| { 481 | let ct0is_claim = { 482 | let point = transcript.squeeze_challenges(self.ct0is_log2_size()); 483 | let ct0is = box_dense_poly( 484 | ct0is 485 | .into_iter() 486 | .take(self.block.num_reps) 487 | .flat_map(|ct0i| { 488 | let ct0i = Poly::::new_shifted(ct0i, 1 << self.log2_size()); 489 | let mut ct0i = ct0i.as_ref()[1..].to_vec(); 490 | ct0i.push(F::ZERO); 491 | ct0i 492 | }) 493 | .collect_vec(), 494 | ); 495 | let value = ct0is.evaluate(&point); 496 | 497 | EvalClaim::new(point, value) 498 | }; 499 | 500 | vec![EvalClaim::new(vec![], E::ZERO), ct0is_claim] 501 | }); 502 | 503 | let circuit = info_span!("init circuit").in_scope(|| { 504 | let mut circuit = Circuit::::default(); 505 | self.configure(&mut circuit, preprocessing); 506 | circuit 507 | }); 508 | 509 | let input_claims = info_span!("GKR verify") 510 | .in_scope(|| verify_gkr(&circuit, &output_claims, &mut transcript).unwrap()); 511 | 512 | izip_eq!(inputs, input_claims).for_each(|(input, claims)| { 513 | claims 514 | .iter() 515 | .for_each(|claim| assert_eq!(input.evaluate(claim.point()), claim.value())) 516 | }); 517 | } 518 | 519 | fn ct0is_log2_size(&self) -> usize { 520 | assert!(self.block.num_reps.is_power_of_two()); 521 | self.log2_size() + self.block.num_reps.next_power_of_two().ilog2() as usize 522 | } 523 | } 524 | 525 | fn relay_mul_const(w: (usize, usize), c: F) -> VanillaGate { 526 | VanillaGate::new(None, vec![(Some(c), w)], Vec::new()) 527 | } 528 | 529 | fn relay_add_const(w: (usize, usize), c: F) -> VanillaGate { 530 | VanillaGate::new(Some(c), vec![(None, w)], Vec::new()) 531 | } 532 | 533 | #[cfg(test)] 534 | mod test { 535 | use crate::generate_sk_enc_test; 536 | 537 | use super::*; 538 | use gkr::util::dev::seeded_std_rng; 539 | use goldilocks::{Goldilocks, GoldilocksExt2}; 540 | use halo2_curves::bn256::Fr; 541 | 542 | use paste::paste; 543 | use plonkish_backend::{pcs::multilinear::MultilinearBrakedown, util::code::BrakedownSpec6}; 544 | use std::{fs::File, io::Read}; 545 | use tracing::info_span; 546 | use tracing_forest::ForestLayer; 547 | use tracing_subscriber::{layer::SubscriberExt, EnvFilter, Registry}; 548 | 549 | pub type Brakedown = 550 | MultilinearBrakedown; 551 | 552 | // Goldilocks prime tests 553 | 554 | generate_sk_enc_test!( 555 | "goldilocks", 556 | Goldilocks, 557 | GoldilocksExt2, 558 | Brakedown, 559 | 1024, 560 | 1, 561 | 27 562 | ); 563 | 564 | generate_sk_enc_test!( 565 | "goldilocks", 566 | Goldilocks, 567 | GoldilocksExt2, 568 | Brakedown, 569 | 2048, 570 | 1, 571 | 52 572 | ); 573 | 574 | generate_sk_enc_test!( 575 | "goldilocks", 576 | Goldilocks, 577 | GoldilocksExt2, 578 | Brakedown, 579 | 4096, 580 | 2, 581 | 55 582 | ); 583 | 584 | generate_sk_enc_test!( 585 | "goldilocks", 586 | Goldilocks, 587 | GoldilocksExt2, 588 | Brakedown, 589 | 8192, 590 | 4, 591 | 55 592 | ); 593 | 594 | generate_sk_enc_test!( 595 | "goldilocks", 596 | Goldilocks, 597 | GoldilocksExt2, 598 | Brakedown, 599 | 16384, 600 | 8, 601 | 54 602 | ); 603 | 604 | generate_sk_enc_test!( 605 | "goldilocks", 606 | Goldilocks, 607 | GoldilocksExt2, 608 | Brakedown, 609 | 32768, 610 | 16, 611 | 59 612 | ); 613 | 614 | // Bn254 prime tests 615 | 616 | generate_sk_enc_test!("bn254", Fr, Fr, Brakedown, 1024, 1, 27); 617 | 618 | generate_sk_enc_test!("bn254", Fr, Fr, Brakedown, 2048, 1, 52); 619 | 620 | generate_sk_enc_test!("bn254", Fr, Fr, Brakedown, 4096, 2, 55); 621 | 622 | generate_sk_enc_test!("bn254", Fr, Fr, Brakedown, 8192, 4, 55); 623 | 624 | generate_sk_enc_test!("bn254", Fr, Fr, Brakedown, 16384, 8, 54); 625 | 626 | generate_sk_enc_test!("bn254", Fr, Fr, Brakedown, 32768, 16, 59); 627 | } 628 | -------------------------------------------------------------------------------- /bfv-gkr/src/test.rs: -------------------------------------------------------------------------------- 1 | #[macro_export] 2 | macro_rules! generate_sk_enc_test { 3 | ($prime_name:expr, $F:ty, $E:ty, $Pcs:ty, $N:expr, $K:expr, $K_BITSIZE:expr) => { 4 | paste! { 5 | #[test] 6 | #[serial_test::serial] 7 | pub fn []() { 8 | type Params = $crate::constants:: []; 9 | let env_filter = EnvFilter::builder() 10 | .with_default_directive(tracing::Level::INFO.into()) 11 | .from_env_lossy(); 12 | 13 | let subscriber = Registry::default() 14 | .with(env_filter) 15 | .with(ForestLayer::default()); 16 | 17 | let _ = tracing::subscriber::set_global_default(subscriber); 18 | 19 | let rng = seeded_std_rng(); 20 | 21 | let file_path = format!( 22 | "src/data/{}/sk_enc_{}_{}x{}_65537.json", 23 | $prime_name, 24 | Params::N, 25 | $K, 26 | $K_BITSIZE, 27 | ); 28 | let mut file = File::open(&file_path).expect("Failed to open file"); 29 | let mut data = String::new(); 30 | file.read_to_string(&mut data).expect("Failed to read file"); 31 | let bfv = BfvEncrypt::::new($K); 32 | let args = 33 | serde_json::from_str::(&data).expect("Failed to parse JSON"); 34 | 35 | let (pk, vk) = 36 | info_span!("setup").in_scope(|| bfv.setup::<$F, $E, $Pcs>(rng.clone())); 37 | let proof = info_span!("FHE_enc prove") 38 | .in_scope(|| bfv.prove::<$F, $E, $Pcs>(&args, pk)); 39 | 40 | let (inputs, _) = 41 | info_span!("parse inputs").in_scope(|| bfv.get_inputs(&args)); 42 | 43 | info_span!("FHE_enc verify") 44 | .in_scope(|| bfv.verify::<$F, $E, $Pcs>(vk, inputs, args.ct0is, &proof)); 45 | } 46 | } 47 | }; 48 | } 49 | -------------------------------------------------------------------------------- /bfv-gkr/src/transcript.rs: -------------------------------------------------------------------------------- 1 | use gkr::{ 2 | util::{ 3 | arithmetic::{ExtensionField, PrimeField}, 4 | RngCore, SeedableRng, StdRng, 5 | }, 6 | Error, 7 | }; 8 | use itertools::Itertools; 9 | use plonkish_backend::util::{ 10 | arithmetic::fe_mod_from_le_bytes, 11 | hash::{Hash, Keccak256, Output, Update}, 12 | transcript::{ 13 | FieldTranscript, FieldTranscriptRead, FieldTranscriptWrite, Transcript, TranscriptRead, 14 | TranscriptWrite, 15 | }, 16 | }; 17 | use std::{ 18 | fmt::Debug, 19 | io::{self}, 20 | iter, 21 | }; 22 | 23 | pub type StdRngTranscript = RngTranscript; 24 | 25 | #[derive(Debug)] 26 | pub struct RngTranscript { 27 | stream: S, 28 | rng: P, 29 | } 30 | 31 | impl

RngTranscript, P> { 32 | pub fn into_proof(self) -> Vec { 33 | self.stream 34 | } 35 | } 36 | 37 | impl<'a> RngTranscript<&'a [u8], StdRng> { 38 | pub fn from_proof(proof: &'a [u8]) -> Self { 39 | Self::new(proof) 40 | } 41 | } 42 | 43 | impl RngTranscript { 44 | pub fn new(stream: S) -> Self { 45 | Self { 46 | stream, 47 | rng: StdRng::seed_from_u64(0), 48 | } 49 | } 50 | } 51 | 52 | impl Default for RngTranscript, StdRng> { 53 | fn default() -> Self { 54 | Self::new(Vec::new()) 55 | } 56 | } 57 | 58 | impl, S: Debug, P: Debug + RngCore> 59 | gkr::transcript::Transcript for RngTranscript 60 | { 61 | fn squeeze_challenge(&mut self) -> E { 62 | let bases = iter::repeat_with(|| F::random(&mut self.rng)) 63 | .take(E::DEGREE) 64 | .collect_vec(); 65 | E::from_bases(&bases) 66 | } 67 | 68 | fn common_felt(&mut self, _: &F) {} 69 | } 70 | 71 | impl, R: Debug + io::Read, P: Debug + RngCore> 72 | gkr::transcript::TranscriptRead for RngTranscript 73 | { 74 | fn read_felt(&mut self) -> Result { 75 | let mut repr = ::Repr::default(); 76 | self.stream 77 | .read_exact(repr.as_mut()) 78 | .map_err(|err| Error::Transcript(err.kind(), err.to_string()))?; 79 | repr.as_mut().reverse(); 80 | let felt = F::from_repr_vartime(repr).ok_or_else(err_invalid_felt)?; 81 | Ok(felt) 82 | } 83 | 84 | fn read_felt_ext(&mut self) -> Result { 85 | let bases = iter::repeat_with(|| gkr::transcript::TranscriptRead::::read_felt(self)) 86 | .take(E::DEGREE) 87 | .try_collect::<_, Vec<_>, _>()?; 88 | Ok(E::from_bases(&bases)) 89 | } 90 | } 91 | 92 | impl, W: Debug + io::Write, P: Debug + RngCore> 93 | gkr::transcript::TranscriptWrite for RngTranscript 94 | { 95 | fn write_felt(&mut self, felt: &F) -> Result<(), Error> { 96 | let mut repr = felt.to_repr(); 97 | repr.as_mut().reverse(); 98 | self.stream 99 | .write_all(repr.as_ref()) 100 | .map_err(|err| Error::Transcript(err.kind(), err.to_string())) 101 | } 102 | 103 | fn write_felt_ext(&mut self, felt: &E) -> Result<(), Error> { 104 | felt.as_bases() 105 | .iter() 106 | .try_for_each(|base| gkr::transcript::TranscriptWrite::::write_felt(self, base)) 107 | } 108 | } 109 | 110 | fn err_invalid_felt() -> Error { 111 | Error::Transcript( 112 | io::ErrorKind::Other, 113 | "Invalid field element read from stream".to_string(), 114 | ) 115 | } 116 | 117 | pub type Keccak256Transcript = FiatShamirTranscript; 118 | 119 | #[derive(Debug, Default)] 120 | pub struct FiatShamirTranscript { 121 | state: H, 122 | stream: S, 123 | } 124 | 125 | impl Keccak256Transcript> { 126 | pub fn into_proof(self) -> Vec { 127 | self.stream 128 | } 129 | } 130 | 131 | impl<'a> Keccak256Transcript<&'a [u8]> { 132 | pub fn from_proof(proof: &'a [u8]) -> Keccak256Transcript> { 133 | Keccak256Transcript::new(io::Cursor::new(proof)) 134 | } 135 | } 136 | 137 | impl Keccak256Transcript { 138 | pub fn new(stream: S) -> Self { 139 | Self { 140 | stream, 141 | state: Keccak256::default(), 142 | } 143 | } 144 | } 145 | 146 | impl, S: Debug> gkr::transcript::Transcript 147 | for Keccak256Transcript 148 | { 149 | fn squeeze_challenge(&mut self) -> E { 150 | E::from_bases(&>::squeeze_challenges( 151 | self, 152 | E::DEGREE, 153 | )) 154 | } 155 | 156 | fn common_felt(&mut self, _: &F) {} 157 | } 158 | 159 | impl, S: Debug + io::Read> gkr::transcript::TranscriptRead 160 | for Keccak256Transcript 161 | { 162 | fn read_felt(&mut self) -> Result { 163 | let mut repr = ::Repr::default(); 164 | self.stream 165 | .read_exact(repr.as_mut()) 166 | .map_err(|err| Error::Transcript(err.kind(), err.to_string()))?; 167 | repr.as_mut().reverse(); 168 | let felt = F::from_repr_vartime(repr).ok_or_else(err_invalid_felt)?; 169 | Ok(felt) 170 | } 171 | 172 | fn read_felt_ext(&mut self) -> Result { 173 | let bases = iter::repeat_with(|| gkr::transcript::TranscriptRead::::read_felt(self)) 174 | .take(E::DEGREE) 175 | .try_collect::<_, Vec<_>, _>()?; 176 | Ok(E::from_bases(&bases)) 177 | } 178 | } 179 | 180 | impl, W: Debug + io::Write> 181 | gkr::transcript::TranscriptWrite for Keccak256Transcript 182 | { 183 | fn write_felt(&mut self, felt: &F) -> Result<(), Error> { 184 | let mut repr = felt.to_repr(); 185 | repr.as_mut().reverse(); 186 | self.stream 187 | .write_all(repr.as_ref()) 188 | .map_err(|err| Error::Transcript(err.kind(), err.to_string())) 189 | } 190 | 191 | fn write_felt_ext(&mut self, felt: &E) -> Result<(), Error> { 192 | felt.as_bases() 193 | .iter() 194 | .try_for_each(|base| gkr::transcript::TranscriptWrite::::write_felt(self, base)) 195 | } 196 | } 197 | 198 | impl FieldTranscript for FiatShamirTranscript { 199 | fn squeeze_challenge(&mut self) -> F { 200 | let hash = self.state.finalize_fixed_reset(); 201 | self.state.update(&hash); 202 | fe_mod_from_le_bytes(hash) 203 | } 204 | 205 | fn common_field_element(&mut self, fe: &F) -> Result<(), plonkish_backend::Error> { 206 | self.state.update_field_element(fe); 207 | Ok(()) 208 | } 209 | } 210 | 211 | impl FieldTranscriptRead for FiatShamirTranscript { 212 | fn read_field_element(&mut self) -> Result { 213 | let mut repr = ::Repr::default(); 214 | self.stream 215 | .read_exact(repr.as_mut()) 216 | .map_err(|err| plonkish_backend::Error::Transcript(err.kind(), err.to_string()))?; 217 | repr.as_mut().reverse(); 218 | let fe = F::from_repr_vartime(repr).ok_or_else(|| { 219 | plonkish_backend::Error::Transcript( 220 | io::ErrorKind::Other, 221 | "Invalid field element encoding in proof".to_string(), 222 | ) 223 | })?; 224 | self.common_field_element(&fe)?; 225 | Ok(fe) 226 | } 227 | } 228 | 229 | impl FieldTranscriptWrite for FiatShamirTranscript { 230 | fn write_field_element(&mut self, fe: &F) -> Result<(), plonkish_backend::Error> { 231 | self.common_field_element(fe)?; 232 | let mut repr = fe.to_repr(); 233 | repr.as_mut().reverse(); 234 | self.stream 235 | .write_all(repr.as_ref()) 236 | .map_err(|err| plonkish_backend::Error::Transcript(err.kind(), err.to_string())) 237 | } 238 | } 239 | 240 | impl Transcript, F> for Keccak256Transcript { 241 | fn common_commitment( 242 | &mut self, 243 | comm: &Output, 244 | ) -> Result<(), plonkish_backend::Error> { 245 | self.state.update(comm); 246 | Ok(()) 247 | } 248 | } 249 | 250 | impl TranscriptRead, F> for Keccak256Transcript { 251 | fn read_commitment(&mut self) -> Result, plonkish_backend::Error> { 252 | let mut hash = Output::::default(); 253 | self.stream 254 | .read_exact(hash.as_mut()) 255 | .map_err(|err| plonkish_backend::Error::Transcript(err.kind(), err.to_string()))?; 256 | Ok(hash) 257 | } 258 | } 259 | 260 | impl TranscriptWrite, F> for Keccak256Transcript { 261 | fn write_commitment( 262 | &mut self, 263 | hash: &Output, 264 | ) -> Result<(), plonkish_backend::Error> { 265 | self.stream 266 | .write_all(hash) 267 | .map_err(|err| plonkish_backend::Error::Transcript(err.kind(), err.to_string()))?; 268 | Ok(()) 269 | } 270 | } 271 | -------------------------------------------------------------------------------- /lasso/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "lasso-gkr" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | [dependencies] 7 | rand.workspace = true 8 | serde.workspace = true 9 | serde_json.workspace = true 10 | itertools.workspace = true 11 | prettytable.workspace = true 12 | gkr.workspace = true 13 | rayon= {workspace = true } 14 | digest.workspace = true 15 | typenum.workspace = true 16 | tracing.workspace = true 17 | tracing-subscriber.workspace = true 18 | tracing-forest.workspace = true 19 | plonkish_backend.workspace = true 20 | strum.workspace = true 21 | strum_macros.workspace = true 22 | enum_dispatch.workspace = true 23 | ark-std.workspace = true 24 | fixedbitset.workspace = true 25 | paste.workspace = true 26 | 27 | [features] 28 | sanity-check = [] 29 | 30 | [dev-dependencies] 31 | gkr = { workspace = true, features = ["dev"] } 32 | goldilocks.workspace = true 33 | -------------------------------------------------------------------------------- /lasso/src/lasso.rs: -------------------------------------------------------------------------------- 1 | use crate::memory_checking::{Chunk, Memory, MemoryCheckingProver}; 2 | use crate::table::LookupId; 3 | pub use crate::table::{LassoSubtable, LookupType, SubtableId, SubtableIndices}; 4 | use ark_std::log2; 5 | use gkr::{ 6 | circuit::node::{CombinedEvalClaim, EvalClaim, Node}, 7 | ff_ext::ff::PrimeField, 8 | izip_par, 9 | poly::{box_dense_poly, BoxMultilinearPoly, DensePolynomial, MultilinearPoly}, 10 | sum_check::{generic::Generic, prove_sum_check, verify_sum_check, SumCheckPoly}, 11 | transcript::{TranscriptRead, TranscriptWrite}, 12 | util::{ 13 | arithmetic::{ExtensionField, Field}, 14 | chain, 15 | expression::Expression, 16 | izip, Itertools, 17 | }, 18 | Error, 19 | }; 20 | use plonkish_backend::{ 21 | poly::multilinear::MultilinearPolynomial, util::arithmetic::usize_from_bits_le, 22 | }; 23 | use rayon::prelude::*; 24 | use std::{ 25 | collections::{BTreeMap, HashMap}, 26 | iter, 27 | sync::Arc, 28 | }; 29 | use tracing::info_span; 30 | 31 | #[derive(Debug)] 32 | pub struct LassoNode { 33 | num_vars: usize, 34 | preprocessing: LassoPreprocessing, 35 | lookups: Vec, 36 | } 37 | 38 | impl, const C: usize, const M: usize> Node 39 | for LassoNode 40 | { 41 | fn is_input(&self) -> bool { 42 | false 43 | } 44 | 45 | fn log2_input_size(&self) -> usize { 46 | self.num_vars.max(log2(M) as usize) 47 | } 48 | 49 | fn log2_output_size(&self) -> usize { 50 | 0 51 | } 52 | 53 | fn evaluate(&self, _: Vec<&BoxMultilinearPoly>) -> BoxMultilinearPoly<'static, F, E> { 54 | box_dense_poly([F::ZERO]) 55 | } 56 | 57 | #[tracing::instrument(skip_all, name = "LassoNode::prove_claim_reduction")] 58 | fn prove_claim_reduction( 59 | &self, 60 | _: CombinedEvalClaim, 61 | inputs: Vec<&BoxMultilinearPoly>, 62 | transcript: &mut dyn TranscriptWrite, 63 | ) -> Result>>, Error> { 64 | let polys = self.polynomialize(inputs[0]); 65 | let mock_lookup = self.preprocessing.lookups.values().next().unwrap(); 66 | 67 | let LassoPolynomials { 68 | dims, 69 | read_cts: read_ts_polys, 70 | final_cts: final_cts_polys, 71 | e_polys, 72 | lookup_outputs, 73 | lookup_flag_polys, 74 | lookup_flag_bitvectors: _, 75 | } = polys; 76 | 77 | assert!(inputs[0].to_dense() == lookup_outputs.to_dense()); 78 | 79 | let num_vars = lookup_outputs.num_vars(); 80 | assert_eq!(num_vars, self.num_vars); 81 | 82 | assert_eq!(final_cts_polys[0].num_vars(), log2(M) as usize); 83 | 84 | // should this be passed from CombinedEvalClaim? 85 | let r = transcript.squeeze_challenges(num_vars); 86 | 87 | let res = self.prove_collation_sum_check( 88 | &lookup_outputs, 89 | mock_lookup, 90 | &e_polys, 91 | &lookup_flag_polys, 92 | &r, 93 | num_vars, 94 | transcript, 95 | )?; 96 | 97 | let lookup_output_eval_claim = res.into_iter().take(1).collect_vec(); 98 | 99 | let [gamma, tau] = transcript.squeeze_challenges(2).try_into().unwrap(); 100 | 101 | // memory_checking 102 | Self::prove_memory_checking( 103 | &self.preprocessing, 104 | &dims, 105 | &read_ts_polys, 106 | &final_cts_polys, 107 | &e_polys, 108 | &gamma, 109 | &tau, 110 | transcript, 111 | )?; 112 | 113 | Ok(lookup_output_eval_claim) 114 | } 115 | 116 | #[tracing::instrument(skip_all, name = "LassoNode::verify_claim_reduction")] 117 | fn verify_claim_reduction( 118 | &self, 119 | _: CombinedEvalClaim, 120 | transcript: &mut dyn TranscriptRead, 121 | ) -> Result>>, Error> { 122 | let lookup = self.preprocessing.lookups.values().next().unwrap(); 123 | let num_vars = self.num_vars; 124 | let r = transcript.squeeze_challenges(num_vars); 125 | 126 | let g = self.collation_sum_check_function(lookup, num_vars); 127 | let claimed_sum = transcript.read_felt_ext()?; 128 | 129 | let _ = info_span!("LassoNode::verify_collation_sum_check") 130 | .in_scope(|| verify_sum_check(&g, claimed_sum, transcript))?; 131 | 132 | // // Round n+1 133 | let [gamma, tau] = transcript.squeeze_challenges(2).try_into().unwrap(); 134 | 135 | // // memory checking 136 | Self::verify_memory_checking(&self.preprocessing, num_vars, &gamma, &tau, transcript)?; 137 | 138 | Ok(chain![iter::once(vec![EvalClaim::new(r.to_vec(), claimed_sum)])].collect_vec()) 139 | } 140 | } 141 | 142 | impl, const C: usize, const M: usize> LassoNode { 143 | pub fn new( 144 | // table: Box>, 145 | preprocessing: LassoPreprocessing, 146 | num_vars: usize, 147 | lookups: Vec, 148 | ) -> Self { 149 | Self { 150 | num_vars, 151 | preprocessing, 152 | lookups, 153 | } 154 | } 155 | 156 | #[tracing::instrument(skip_all, name = "LassoNode::polynomialize")] 157 | pub fn polynomialize<'a>( 158 | &self, 159 | inputs: &BoxMultilinearPoly, 160 | ) -> LassoPolynomials<'a, F, E> { 161 | let num_reads = inputs.len().next_power_of_two(); 162 | 163 | // subtable_lookup_indices : [[usize; num_rows]; num_chunks] 164 | let subtable_lookup_indices = self.subtable_lookup_indices(inputs); 165 | 166 | let lookup_inputs = izip!(0..inputs.len(), self.lookups.clone()) 167 | .map(|(i, lookup_id)| (i, self.preprocessing.lookup_id_to_index[&lookup_id])) 168 | .collect_vec(); 169 | 170 | let polys: Vec<_> = (0..self.preprocessing.num_memories) 171 | .into_par_iter() 172 | .map(|memory_index| { 173 | let dim_index = self.preprocessing.memory_to_dimension_index[memory_index]; 174 | let subtable_index = self.preprocessing.memory_to_subtable_index[memory_index]; 175 | let access_sequence = &subtable_lookup_indices[dim_index]; 176 | 177 | let mut final_cts_i = vec![0usize; M]; 178 | let mut read_cts_i = vec![0usize; num_reads]; 179 | let mut subtable_lookups = vec![F::ZERO; num_reads]; 180 | 181 | for (j, lookup) in &lookup_inputs { 182 | let memories_used = &self.preprocessing.lookup_to_memory_indices[*lookup]; 183 | if memories_used.contains(&memory_index) { 184 | let memory_address = access_sequence[*j]; 185 | debug_assert!(memory_address < M); 186 | 187 | let counter = final_cts_i[memory_address]; 188 | read_cts_i[*j] = counter; 189 | final_cts_i[memory_address] = counter + 1; 190 | subtable_lookups[*j] = self 191 | .preprocessing 192 | .materialized_subtables 193 | .as_ref() 194 | .expect("subtables not materialized")[subtable_index][memory_address]; 195 | } 196 | } 197 | 198 | ( 199 | DensePolynomial::from_usize::(&read_cts_i), 200 | DensePolynomial::from_usize::(&final_cts_i), 201 | box_dense_poly(subtable_lookups), 202 | ) 203 | }) 204 | .collect(); 205 | 206 | // Vec<(DensePolynomial, DensePolynomial, DensePolynomial)> -> (Vec>, Vec>, Vec>) 207 | let (read_cts, final_cts, e_polys) = polys.into_iter().fold( 208 | (Vec::new(), Vec::new(), Vec::new()), 209 | |(mut read_acc, mut final_acc, mut e_acc), (read, f, e)| { 210 | read_acc.push(read); 211 | final_acc.push(f); 212 | e_acc.push(e); 213 | (read_acc, final_acc, e_acc) 214 | }, 215 | ); 216 | let dims: Vec<_> = subtable_lookup_indices 217 | .into_par_iter() 218 | .take(C) 219 | .map(|mut access_sequence| { 220 | access_sequence.resize(access_sequence.len().next_power_of_two(), 0); 221 | DensePolynomial::from_usize(&access_sequence) 222 | }) 223 | .collect(); 224 | 225 | let mut lookup_flag_bitvectors: Vec> = 226 | vec![vec![0u64; num_reads]; self.preprocessing.lookups.len()]; 227 | 228 | for (j, lookup_idx) in lookup_inputs.into_iter() { 229 | lookup_flag_bitvectors[lookup_idx][j] = 1; 230 | } 231 | 232 | let lookup_flag_polys: Vec<_> = lookup_flag_bitvectors 233 | .par_iter() 234 | .map(|flag_bitvector| DensePolynomial::from_u64(flag_bitvector)) 235 | .collect(); 236 | 237 | let mut lookup_outputs = self.compute_lookup_outputs(inputs); 238 | lookup_outputs.resize(num_reads, F::ZERO); 239 | let lookup_outputs = box_dense_poly(lookup_outputs); 240 | 241 | LassoPolynomials { 242 | dims, 243 | read_cts, 244 | final_cts, 245 | lookup_flag_polys, 246 | lookup_flag_bitvectors, 247 | e_polys, 248 | lookup_outputs, 249 | } 250 | } 251 | 252 | #[allow(clippy::too_many_arguments)] 253 | #[tracing::instrument(skip_all, name = "LassoNode::prove_collation_sum_check")] 254 | pub fn prove_collation_sum_check( 255 | &self, 256 | lookup_output_poly: &BoxMultilinearPoly, 257 | lookup: &Box>, 258 | e_polys: &[BoxMultilinearPoly], 259 | flag_polys: &[BoxMultilinearPoly], 260 | r: &[E], 261 | num_vars: usize, 262 | transcript: &mut dyn TranscriptWrite, 263 | ) -> Result>>, Error> { 264 | let claimed_sum = self.sum_check_claim(r, e_polys, flag_polys); 265 | if cfg!(feature = "sanity-check") { 266 | assert_eq!(claimed_sum, lookup_output_poly.evaluate(r)); 267 | } 268 | 269 | transcript.write_felt_ext(&claimed_sum)?; 270 | 271 | let g = self.collation_sum_check_function(lookup, num_vars); 272 | 273 | let polys = e_polys 274 | .iter() 275 | .map(|e_poly| SumCheckPoly::Base::<_, _, _, BoxMultilinearPoly>(e_poly)) 276 | .collect_vec(); 277 | 278 | let (_claim, r_x_prime, e_polys_evals) = 279 | prove_sum_check(&g, claimed_sum, polys, transcript)?; 280 | 281 | Ok(chain![ 282 | iter::once(vec![EvalClaim::new(r.to_vec(), claimed_sum)]), 283 | e_polys_evals 284 | .into_iter() 285 | .map(|e| vec![EvalClaim::new(r_x_prime.clone(), e)]) 286 | ] 287 | .collect_vec()) 288 | } 289 | 290 | #[allow(clippy::too_many_arguments)] 291 | #[tracing::instrument(skip_all, name = "LassoNode::prove_memory_checking")] 292 | fn prove_memory_checking<'a>( 293 | preprocessing: &'a LassoPreprocessing, 294 | dims: &'a [BoxMultilinearPoly<'a, F, E>], 295 | read_ts_polys: &'a [BoxMultilinearPoly<'a, F, E>], 296 | final_cts_polys: &'a [BoxMultilinearPoly<'a, F, E>], 297 | e_polys: &'a [BoxMultilinearPoly<'a, F, E>], 298 | gamma: &E, 299 | tau: &E, 300 | transcript: &mut dyn TranscriptWrite, 301 | ) -> Result<(), Error> { 302 | // key: chunk index, value: chunk 303 | let mut chunk_map: HashMap> = HashMap::new(); 304 | 305 | let num_memories = preprocessing.num_memories; 306 | let memories = (0..num_memories).map(|memory_index| { 307 | let subtable_poly = &preprocessing 308 | .materialized_subtables 309 | .as_ref() 310 | .expect("subtables not materialized") 311 | [preprocessing.memory_to_subtable_index[memory_index]]; 312 | Memory::::new(subtable_poly, &e_polys[memory_index]) 313 | }); 314 | memories.enumerate().for_each(|(memory_index, memory)| { 315 | let chunk_index = preprocessing.memory_to_dimension_index[memory_index]; 316 | if let std::collections::hash_map::Entry::Vacant(e) = chunk_map.entry(chunk_index) { 317 | let dim = &dims[chunk_index]; 318 | let read_ts_poly = &read_ts_polys[chunk_index]; 319 | let final_cts_poly = &final_cts_polys[chunk_index]; 320 | e.insert(Chunk::new( 321 | chunk_index, 322 | dim, 323 | read_ts_poly, 324 | final_cts_poly, 325 | memory, 326 | )); 327 | } else { 328 | chunk_map.entry(chunk_index).and_modify(|chunk| { 329 | chunk.add_memory(memory); 330 | }); 331 | } 332 | }); 333 | 334 | let mut chunks = chunk_map.into_iter().collect_vec(); 335 | chunks.sort_by_key(|(chunk_index, _)| *chunk_index); 336 | let chunks = chunks.into_iter().map(|(_, chunk)| chunk).collect_vec(); 337 | 338 | MemoryCheckingProver::new(chunks, tau, gamma).prove(transcript) 339 | } 340 | 341 | #[tracing::instrument(skip_all, name = "LassoNode::verify_memory_checking")] 342 | fn verify_memory_checking( 343 | preprocessing: &LassoPreprocessing, 344 | num_vars: usize, 345 | gamma: &E, 346 | tau: &E, 347 | transcript: &mut dyn TranscriptRead, 348 | ) -> Result<(), Error> { 349 | let num_memories = preprocessing.num_memories; 350 | let mut chunk_map: HashMap> = 351 | HashMap::new(); 352 | (0..num_memories).for_each(|memory_index| { 353 | let chunk_index = preprocessing.memory_to_dimension_index[memory_index]; 354 | let subtable_poly = &preprocessing.subtables_by_idx.as_ref().unwrap() 355 | [preprocessing.memory_to_subtable_index[memory_index]]; 356 | let memory = 357 | crate::memory_checking::verifier::Memory::new(memory_index, subtable_poly.clone()); 358 | 359 | if let std::collections::hash_map::Entry::Vacant(e) = chunk_map.entry(chunk_index) { 360 | e.insert(crate::memory_checking::verifier::Chunk::new( 361 | chunk_index, 362 | log2(M) as usize, 363 | memory, 364 | )); 365 | } else { 366 | chunk_map.entry(chunk_index).and_modify(|chunk| { 367 | chunk.add_memory(memory); 368 | }); 369 | } 370 | }); 371 | 372 | let mut chunks = chunk_map.into_iter().collect_vec(); 373 | chunks.sort_by_key(|(chunk_index, _)| *chunk_index); 374 | let chunks = chunks.into_iter().map(|(_, chunk)| chunk).collect_vec(); 375 | 376 | let mem_check = crate::memory_checking::verifier::MemoryCheckingVerifier::new(chunks); 377 | mem_check.verify(num_vars, gamma, tau, transcript) 378 | } 379 | 380 | #[tracing::instrument(skip_all, name = "LassoNode::subtable_lookup_indices")] 381 | fn subtable_lookup_indices(&self, inputs: &BoxMultilinearPoly) -> Vec> { 382 | let num_rows: usize = inputs.len(); 383 | let num_chunks = C; 384 | 385 | let indices: Vec<_> = izip_par!((0..num_rows), &self.lookups) 386 | .map(|(i, lookup_id)| { 387 | let lookup = &self.preprocessing.lookups[lookup_id]; 388 | let mut index_bits = fe_to_bits_le(inputs[i]); 389 | index_bits.truncate(lookup.chunk_bits(M).iter().sum()); 390 | if cfg!(feature = "sanity-check") { 391 | assert_eq!( 392 | usize_from_bits_le(&fe_to_bits_le(inputs[i])), 393 | usize_from_bits_le(&index_bits), 394 | "index {i} out of range", 395 | ); 396 | } 397 | let mut chunked_index = iter::repeat(0).take(num_chunks).collect_vec(); 398 | let chunked_index_bits = lookup.subtable_indices(index_bits, M.ilog2() as usize); 399 | chunked_index 400 | .iter_mut() 401 | .zip(chunked_index_bits) 402 | .map(|(chunked_index, index_bits)| { 403 | *chunked_index = usize_from_bits_le(&index_bits); 404 | }) 405 | .collect_vec(); 406 | chunked_index 407 | }) 408 | .collect(); 409 | 410 | let lookup_indices = (0..num_chunks) 411 | .map(|i| indices.iter().map(|indices| indices[i]).collect_vec()) 412 | .collect_vec(); 413 | lookup_indices 414 | } 415 | 416 | fn compute_lookup_outputs(&self, inputs: &BoxMultilinearPoly) -> Vec { 417 | izip_par!(inputs.as_dense().unwrap(), &self.lookups) 418 | .map(|(i, lookup_id)| self.preprocessing.lookups[lookup_id].output(i)) 419 | .collect() 420 | } 421 | 422 | pub fn sum_check_claim( 423 | &self, 424 | r: &[E], // claim: CombinedEvalClaim, 425 | e_polys: &[BoxMultilinearPoly], 426 | flag_polys: &[BoxMultilinearPoly], 427 | ) -> E { 428 | let num_memories = self.preprocessing.num_memories; 429 | assert_eq!(e_polys.len(), num_memories); 430 | let num_vars = e_polys[0].num_vars(); 431 | let bh_size = 1 << num_vars; 432 | let eq = MultilinearPolynomial::eq_xy(r); 433 | // \sum_{k \in \{0, 1\}^{\log m}} (\tilde{eq}(r, k) * g(E_1(k), ..., E_{\alpha}(k))) 434 | let claim = (0..bh_size) 435 | .into_par_iter() 436 | .map(|k| { 437 | eq[k] 438 | * izip!(flag_polys, self.preprocessing.lookups.values()) 439 | .enumerate() 440 | .map(|(lookup_idx, (flag_poly, lookup))| { 441 | let operands: Vec<_> = self.preprocessing.lookup_to_memory_indices 442 | [lookup_idx] 443 | .par_iter() 444 | .map(|memory_index| e_polys[*memory_index][k]) 445 | .collect(); 446 | 447 | flag_poly[k] * lookup.combine_lookups(&operands, C, M) 448 | }) 449 | .sum::() 450 | }) 451 | .sum(); 452 | 453 | claim 454 | } 455 | 456 | // (\tilde{eq}(r, k) * g(E_1(k), ..., E_{\alpha}(k))) 457 | pub fn collation_sum_check_function( 458 | &self, 459 | lookup: &Box>, 460 | num_vars: usize, 461 | ) -> Generic { 462 | let num_memories = self.preprocessing.num_memories; 463 | // TODO: expression with flag polys 464 | let exprs = lookup.combine_lookup_expressions( 465 | (0..num_memories) 466 | .map(|idx| Expression::poly(idx)) 467 | .collect_vec(), 468 | C, 469 | M, 470 | ); 471 | 472 | let eq_r_x = &Expression::poly(0); 473 | 474 | Generic::new(num_vars, &(eq_r_x * exprs)) 475 | } 476 | } 477 | 478 | /// All polynomials associated with Jolt instruction lookups. 479 | pub struct LassoPolynomials<'a, F: PrimeField, E: ExtensionField> { 480 | /// `C` sized vector of `DensePolynomials` whose evaluations correspond to 481 | /// indices at which the memories will be evaluated. Each `DensePolynomial` has size 482 | /// `m` (# lookups). 483 | pub dims: Vec>, 484 | 485 | /// `NUM_MEMORIES` sized vector of `DensePolynomials` whose evaluations correspond to 486 | /// read access counts to the memory. Each `DensePolynomial` has size `m` (# lookups). 487 | pub read_cts: Vec>, 488 | 489 | /// `NUM_MEMORIES` sized vector of `DensePolynomials` whose evaluations correspond to 490 | /// final access counts to the memory. Each `DensePolynomial` has size M, AKA subtable size. 491 | pub final_cts: Vec>, 492 | 493 | /// `NUM_MEMORIES` sized vector of `DensePolynomials` whose evaluations correspond to 494 | /// the evaluation of memory accessed at each step of the CPU. Each `DensePolynomial` has 495 | /// size `m` (# lookups). 496 | pub e_polys: Vec>, 497 | 498 | /// Polynomial encodings for flag polynomials for each instruction. 499 | /// If using a single instruction this will be empty. 500 | /// NUM_INSTRUCTIONS sized, each polynomial of length 'm' (# lookups). 501 | /// 502 | /// Stored independently for use in sumcheck, combined into single DensePolynomial for commitment. 503 | pub lookup_flag_polys: Vec>, 504 | 505 | /// Instruction flag polynomials as bitvectors, kept in this struct for more efficient 506 | /// construction of the memory flag polynomials in `read_write_grand_product`. 507 | pub lookup_flag_bitvectors: Vec>, 508 | /// The lookup output for each instruction of the execution trace. 509 | pub lookup_outputs: BoxMultilinearPoly<'a, F, E>, 510 | } 511 | 512 | #[derive(Debug)] 513 | pub struct LassoPreprocessing { 514 | subtable_to_memory_indices: Vec>, 515 | lookup_to_memory_indices: Vec>, 516 | memory_to_subtable_index: Vec, 517 | memory_to_dimension_index: Vec, 518 | materialized_subtables: Option>>, 519 | subtables_by_idx: Option>>>, 520 | lookups: Arc>>>, 521 | lookup_id_to_index: HashMap, 522 | num_memories: usize, 523 | } 524 | 525 | impl> LassoPreprocessing { 526 | #[tracing::instrument(skip_all, name = "LassoNode::preprocess")] 527 | pub fn preprocess( 528 | lookups: impl IntoIterator>>, 529 | ) -> Self { 530 | let lookups = BTreeMap::from_iter( 531 | lookups 532 | .into_iter() 533 | .map(|lookup| (lookup.lookup_id(), lookup)), 534 | ); 535 | 536 | let lookup_id_to_index: HashMap<_, _> = HashMap::from_iter( 537 | lookups 538 | .keys() 539 | .enumerate() 540 | .map(|(i, lookup_id)| (lookup_id.clone(), i)), 541 | ); 542 | 543 | let subtables = lookups 544 | .values() 545 | .flat_map(|lookup| { 546 | lookup 547 | .subtables(C, M) 548 | .into_iter() 549 | .map(|(subtable, _)| subtable) 550 | }) 551 | .unique_by(|subtable| subtable.subtable_id()) 552 | .collect_vec(); 553 | 554 | // Build a mapping from subtable type => chunk indices that access that subtable type 555 | let mut subtable_indices: Vec = 556 | vec![SubtableIndices::with_capacity(C); subtables.len()]; 557 | let mut subtables_by_idx = vec![None; subtables.len()]; 558 | let mut subtable_id_to_index = HashMap::with_capacity(subtables.len()); 559 | for (_, lookup) in &lookups { 560 | for (subtable, indices) in lookup.subtables(C, M).into_iter() { 561 | let subtable_idx = subtable_id_to_index 562 | .entry(subtable.subtable_id()) 563 | .or_insert_with(|| { 564 | subtables 565 | .iter() 566 | .position(|s| s.subtable_id() == subtable.subtable_id()) 567 | .expect("Subtable not found") 568 | }); 569 | subtables_by_idx[*subtable_idx].get_or_insert(subtable); 570 | subtable_indices[*subtable_idx].union_with(&indices); 571 | } 572 | } 573 | 574 | let mut subtable_to_memory_indices = Vec::with_capacity(subtables.len()); 575 | let mut memory_to_subtable_index = vec![]; 576 | let mut memory_to_dimension_index = vec![]; 577 | 578 | let mut memory_index = 0; 579 | for (subtable_index, dimension_indices) in subtable_indices.iter().enumerate() { 580 | subtable_to_memory_indices 581 | .push((memory_index..memory_index + dimension_indices.len()).collect_vec()); 582 | memory_to_subtable_index.extend(vec![subtable_index; dimension_indices.len()]); 583 | memory_to_dimension_index.extend(dimension_indices.iter()); 584 | memory_index += dimension_indices.len(); 585 | } 586 | let num_memories = memory_index; 587 | 588 | // instruction is a type of lookup 589 | // assume all instreuctions are the same first 590 | let mut lookup_to_memory_indices = vec![vec![]; lookups.len()]; 591 | for (lookup_index, lookup_type) in lookups.values().enumerate() { 592 | for (subtable, dimension_indices) in lookup_type.subtables(C, M) { 593 | let memory_indices: Vec<_> = subtable_to_memory_indices 594 | [subtable_id_to_index[&subtable.subtable_id()]] 595 | .iter() 596 | .filter(|memory_index| { 597 | dimension_indices.contains(memory_to_dimension_index[**memory_index]) 598 | }) 599 | .collect(); 600 | lookup_to_memory_indices[lookup_index].extend(memory_indices); 601 | } 602 | } 603 | 604 | let materialized_subtables = Some( 605 | Self::materialize_subtables::(&subtables) 606 | .into_iter() 607 | .map(box_dense_poly) 608 | .collect_vec(), 609 | ); 610 | 611 | Self { 612 | num_memories, 613 | materialized_subtables, 614 | subtable_to_memory_indices, 615 | memory_to_subtable_index, 616 | memory_to_dimension_index, 617 | lookup_to_memory_indices, 618 | subtables_by_idx: Some( 619 | subtables_by_idx 620 | .into_iter() 621 | .map(|s| s.unwrap()) 622 | .collect_vec(), 623 | ), 624 | lookup_id_to_index, 625 | lookups: Arc::new(lookups), 626 | } 627 | } 628 | 629 | fn materialize_subtables( 630 | subtables: &[Box>], 631 | ) -> Vec> { 632 | let mut s = Vec::with_capacity(subtables.len()); 633 | for subtable in subtables.iter() { 634 | s.push(subtable.materialize(M)); 635 | } 636 | s 637 | } 638 | 639 | pub fn to_verifier_preprocessing(&mut self) -> LassoPreprocessing { 640 | LassoPreprocessing { 641 | subtable_to_memory_indices: self.subtable_to_memory_indices.clone(), 642 | lookup_to_memory_indices: self.lookup_to_memory_indices.clone(), 643 | memory_to_subtable_index: self.memory_to_subtable_index.clone(), 644 | memory_to_dimension_index: self.memory_to_dimension_index.clone(), 645 | materialized_subtables: None, 646 | subtables_by_idx: self.subtables_by_idx.take(), 647 | num_memories: self.num_memories, 648 | lookup_id_to_index: self.lookup_id_to_index.clone(), 649 | lookups: self.lookups.clone(), 650 | } 651 | } 652 | } 653 | 654 | pub fn fe_to_bits_le(fe: F) -> Vec { 655 | let repr = fe.to_repr(); 656 | let bytes = repr.as_ref(); 657 | bytes 658 | .iter() 659 | .flat_map(|byte| { 660 | let value = u8::from_le(*byte); 661 | let mut bits = vec![]; 662 | for i in 0..8 { 663 | let mask = 1 << i; 664 | bits.push(value & mask > 0); 665 | } 666 | bits 667 | }) 668 | .collect_vec() 669 | } 670 | -------------------------------------------------------------------------------- /lasso/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![allow(non_snake_case)] 2 | #![allow(clippy::needless_range_loop)] 3 | #![feature(generic_arg_infer)] 4 | 5 | pub mod memory_checking; 6 | pub mod table; 7 | pub mod lasso; 8 | 9 | pub use lasso::{LassoNode, LassoPreprocessing}; 10 | -------------------------------------------------------------------------------- /lasso/src/memory_checking/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod prover; 2 | pub mod verifier; 3 | 4 | use gkr::{ 5 | ff_ext::{ff::PrimeField, ExtensionField}, 6 | poly::BoxMultilinearPoly, 7 | }; 8 | use itertools::{chain, Itertools}; 9 | pub use prover::MemoryCheckingProver; 10 | 11 | #[derive(Debug)] 12 | struct MemoryGKR<'a, F: PrimeField, E: ExtensionField> { 13 | init: BoxMultilinearPoly<'a, F, E>, 14 | read: BoxMultilinearPoly<'a, F, E>, 15 | write: BoxMultilinearPoly<'a, F, E>, 16 | final_read: BoxMultilinearPoly<'a, F, E>, 17 | } 18 | 19 | impl<'a, F: PrimeField, E: ExtensionField> MemoryGKR<'a, F, E> { 20 | pub fn new( 21 | init: BoxMultilinearPoly<'a, F, E>, 22 | read: BoxMultilinearPoly<'a, F, E>, 23 | write: BoxMultilinearPoly<'a, F, E>, 24 | final_read: BoxMultilinearPoly<'a, F, E>, 25 | ) -> Self { 26 | Self { 27 | init, 28 | read, 29 | write, 30 | final_read, 31 | } 32 | } 33 | } 34 | 35 | #[derive(Clone, Debug)] 36 | pub struct Chunk<'a, F: PrimeField, E: ExtensionField> { 37 | pub(super) chunk_index: usize, 38 | pub(super) dim: &'a BoxMultilinearPoly<'a, F, E>, 39 | pub(super) read_ts_poly: &'a BoxMultilinearPoly<'a, F, E>, 40 | pub(super) final_cts_poly: &'a BoxMultilinearPoly<'a, F, E>, 41 | pub(super) memories: Vec>, 42 | } 43 | 44 | impl<'a, F: PrimeField, E: ExtensionField> Chunk<'a, F, E> { 45 | pub(in crate) fn new( 46 | chunk_index: usize, 47 | dim: &'a BoxMultilinearPoly<'a, F, E>, 48 | read_ts_poly: &'a BoxMultilinearPoly<'a, F, E>, 49 | final_cts_poly: &'a BoxMultilinearPoly<'a, F, E>, 50 | memory: Memory<'a, F, E>, 51 | ) -> Self { 52 | // sanity check 53 | assert_eq!(dim.num_vars(), read_ts_poly.num_vars()); 54 | 55 | Self { 56 | chunk_index, 57 | dim, 58 | read_ts_poly, 59 | final_cts_poly, 60 | memories: vec![memory], 61 | } 62 | } 63 | 64 | pub fn chunk_index(&self) -> usize { 65 | self.chunk_index 66 | } 67 | 68 | pub fn chunk_bits(&self) -> usize { 69 | self.final_cts_poly.num_vars() 70 | } 71 | 72 | pub fn num_reads(&self) -> usize { 73 | 1 << self.dim.num_vars() 74 | } 75 | 76 | pub fn chunk_polys(&self) -> impl Iterator> { 77 | chain!([self.dim, self.read_ts_poly, self.final_cts_poly]) 78 | } 79 | 80 | pub fn chunk_poly_evals(&self, x: &[E], y: &[E]) -> Vec { 81 | vec![ 82 | self.dim.evaluate(x), 83 | self.read_ts_poly.evaluate(x), 84 | self.final_cts_poly.evaluate(y), 85 | ] 86 | } 87 | 88 | pub fn e_poly_evals(&self, x: &[E]) -> Vec { 89 | self.memories 90 | .iter() 91 | .map(|memory| memory.e_poly.evaluate(x)) 92 | .collect_vec() 93 | } 94 | 95 | pub(super) fn memories(&self) -> impl Iterator> { 96 | self.memories.iter() 97 | } 98 | 99 | pub(super) fn add_memory(&mut self, memory: Memory<'a, F, E>) { 100 | // sanity check 101 | let chunk_bits = self.chunk_bits(); 102 | let num_reads = self.num_reads(); 103 | assert_eq!(chunk_bits, memory.subtable_poly.num_vars()); 104 | assert_eq!(num_reads, 1 << memory.e_poly.num_vars()); 105 | 106 | self.memories.push(memory); 107 | } 108 | } 109 | 110 | #[derive(Clone, Debug)] 111 | pub(super) struct Memory<'a, F: PrimeField, E: ExtensionField> { 112 | subtable_poly: &'a BoxMultilinearPoly<'a, F, E>, 113 | pub(crate) e_poly: &'a BoxMultilinearPoly<'a, F, E>, 114 | } 115 | 116 | impl<'a, F: PrimeField, E: ExtensionField> Memory<'a, F, E> { 117 | pub(crate) fn new( 118 | subtable_poly: &'a BoxMultilinearPoly<'a, F, E>, 119 | e_poly: &'a BoxMultilinearPoly<'a, F, E>, 120 | ) -> Self { 121 | Self { 122 | subtable_poly, 123 | e_poly, 124 | } 125 | } 126 | 127 | pub fn polys(&'a self) -> impl Iterator> { 128 | chain!([self.subtable_poly, self.e_poly]) 129 | } 130 | } 131 | -------------------------------------------------------------------------------- /lasso/src/memory_checking/prover.rs: -------------------------------------------------------------------------------- 1 | use itertools::{chain, izip, Itertools}; 2 | use plonkish_backend::util::parallel::{num_threads, parallelize_iter}; 3 | use rayon::prelude::{IntoParallelIterator, ParallelIterator}; 4 | use std::{array, iter}; 5 | 6 | use gkr::{ 7 | ff_ext::{ 8 | ff::{Field, PrimeField}, 9 | ExtensionField, 10 | }, 11 | poly::{box_dense_poly, BoxMultilinearPoly}, 12 | sum_check::{generic::Generic, prove_sum_check, SumCheckPoly}, 13 | transcript::TranscriptWrite, 14 | util::{ 15 | arithmetic::{div_ceil, inner_product, powers}, 16 | expression::Expression, 17 | }, 18 | Error, 19 | }; 20 | 21 | use super::{Chunk, MemoryGKR}; 22 | 23 | pub struct MemoryCheckingProver<'a, F: PrimeField, E: ExtensionField> { 24 | /// chunks with the same bits size 25 | chunks: Vec>, 26 | /// GKR initial polynomials for each memory 27 | memories: Vec>, 28 | // gamma: F, 29 | } 30 | 31 | impl<'a, F: PrimeField + Field, E: ExtensionField> MemoryCheckingProver<'a, F, E> { 32 | // T_1[dim_1(x)], ..., T_k[dim_1(x)], 33 | // ... 34 | // T_{\alpha-k+1}[dim_c(x)], ..., T_{\alpha}[dim_c(x)] 35 | pub fn new(chunks: Vec>, tau: &E, gamma: &E) -> Self { 36 | // warning: this cast is insecure 37 | // TODO: need to rewrite memory checking to work over the extension field 38 | let tau = tau.as_bases()[0]; 39 | let gamma = gamma.as_bases()[0]; 40 | 41 | let num_reads = chunks[0].num_reads(); 42 | let memory_size = 1 << chunks[0].chunk_bits(); 43 | 44 | let hash = |a: &F, v: &F, t: &F| -> F { *a + *v * gamma + *t * gamma.square() - tau }; 45 | 46 | let memories_gkr: Vec> = (0..chunks.len()) 47 | .into_par_iter() 48 | .flat_map(|i| { 49 | let chunk = &chunks[i]; 50 | let chunk_polys = chunk.chunk_polys().collect_vec(); 51 | let (dim, read_ts_poly, final_cts_poly) = 52 | (chunk_polys[0], chunk_polys[1], chunk_polys[2]); 53 | chunk 54 | .memories() 55 | .map(|memory| { 56 | let memory_polys = memory.polys().collect_vec(); 57 | let (subtable_poly, e_poly) = (memory_polys[0], memory_polys[1]); 58 | let mut init = vec![]; 59 | let mut read = vec![]; 60 | let mut write = vec![]; 61 | let mut final_read = vec![]; 62 | (0..memory_size).for_each(|i| { 63 | init.push(hash(&F::from(i as u64), &subtable_poly[i], &F::ZERO)); 64 | final_read.push(hash( 65 | &F::from(i as u64), 66 | &subtable_poly[i], 67 | &final_cts_poly[i], 68 | )); 69 | }); 70 | (0..num_reads).for_each(|i| { 71 | read.push(hash(&dim[i], &e_poly[i], &read_ts_poly[i])); 72 | write.push(hash(&dim[i], &e_poly[i], &(read_ts_poly[i] + F::ONE))); 73 | }); 74 | MemoryGKR::new( 75 | box_dense_poly(init), 76 | box_dense_poly(read), 77 | box_dense_poly(write), 78 | box_dense_poly(final_read), 79 | ) 80 | }) 81 | .collect_vec() 82 | }) 83 | .collect(); 84 | 85 | Self { 86 | chunks, 87 | memories: memories_gkr, 88 | } 89 | } 90 | 91 | pub fn inits(&self) -> impl Iterator> { 92 | self.memories.iter().map(|memory| &memory.init) 93 | } 94 | 95 | pub fn reads(&self) -> impl Iterator> { 96 | self.memories.iter().map(|memory| &memory.read) 97 | } 98 | 99 | pub fn writes(&self) -> impl Iterator> { 100 | self.memories.iter().map(|memory| &memory.write) 101 | } 102 | 103 | pub fn final_reads(&self) -> impl Iterator> { 104 | self.memories.iter().map(|memory| &memory.final_read) 105 | } 106 | 107 | pub fn iter( 108 | &self, 109 | ) -> impl Iterator< 110 | Item = ( 111 | &BoxMultilinearPoly, 112 | &BoxMultilinearPoly, 113 | &BoxMultilinearPoly, 114 | &BoxMultilinearPoly, 115 | ), 116 | > { 117 | self.memories.iter().map(|memory| { 118 | ( 119 | &memory.init, 120 | &memory.read, 121 | &memory.write, 122 | &memory.final_read, 123 | ) 124 | }) 125 | } 126 | 127 | pub fn claimed_v_0s(&self) -> impl IntoIterator>> { 128 | let (claimed_read_0s, claimed_write_0s, claimed_init_0s, claimed_final_read_0s) = self 129 | .iter() 130 | .map(|(init, read, write, final_read)| { 131 | let claimed_init_0 = init.to_dense().iter().product(); 132 | let claimed_read_0 = read.to_dense().iter().product(); 133 | let claimed_write_0 = write.to_dense().iter().product(); 134 | let claimed_final_read_0 = final_read.to_dense().iter().product(); 135 | 136 | // sanity check 137 | debug_assert_eq!( 138 | claimed_init_0 * claimed_write_0, 139 | claimed_read_0 * claimed_final_read_0, 140 | "Multiset hashes don't match", 141 | ); 142 | ( 143 | Some(claimed_read_0), 144 | Some(claimed_write_0), 145 | Some(claimed_init_0), 146 | Some(claimed_final_read_0), 147 | ) 148 | }) 149 | .multiunzip::<(Vec<_>, Vec<_>, Vec<_>, Vec<_>)>(); 150 | chain!([ 151 | claimed_read_0s, 152 | claimed_write_0s, 153 | claimed_init_0s, 154 | claimed_final_read_0s 155 | ]) 156 | } 157 | 158 | pub fn prove(&mut self, transcript: &mut dyn TranscriptWrite) -> Result<(), Error> { 159 | let num_batching = self.memories.len() * 2; 160 | 161 | let (_, x) = Self::prove_grand_product( 162 | num_batching, 163 | chain!(self.reads(), self.writes()), 164 | transcript, 165 | )?; 166 | 167 | let (_, y) = Self::prove_grand_product( 168 | num_batching, 169 | chain!(self.inits(), self.final_reads()), 170 | transcript, 171 | )?; 172 | 173 | self.chunks.iter().for_each(|chunk| { 174 | let chunk_poly_evals = chunk.chunk_poly_evals(&x, &y); 175 | let e_poly_xs = chunk.e_poly_evals(&x); 176 | transcript.write_felt_exts(&chunk_poly_evals).unwrap(); 177 | transcript.write_felt_exts(&e_poly_xs).unwrap(); 178 | }); 179 | 180 | Ok(()) 181 | } 182 | 183 | fn prove_grand_product<'b>( 184 | num_batching: usize, 185 | vs: impl Iterator>, 186 | transcript: &mut dyn TranscriptWrite, 187 | ) -> Result<(Vec, Vec), Error> 188 | where 189 | 'a: 'b, 190 | { 191 | let bottom_layers = vs.map(Layer::bottom).collect_vec(); 192 | let layers = iter::successors(bottom_layers.into(), |layers| { 193 | (layers[0].num_vars() > 0).then(|| layers.iter().map(Layer::up).collect()) 194 | }) 195 | .collect_vec(); 196 | 197 | let claimed_v_0s = { 198 | let v_0s = chain![layers.last().unwrap()] 199 | .map(|layer| { 200 | let [v_l, v_r] = layer.polys().map(|poly| poly[0]); 201 | (v_l * v_r).into() 202 | }) 203 | .collect_vec(); 204 | 205 | let mut hash_to_transcript = |claimed: Vec>, computed: Vec<_>| { 206 | izip!(claimed, computed) 207 | .map(|(claimed, computed)| match claimed { 208 | Some(claimed) => { 209 | if cfg!(feature = "sanity-check") { 210 | assert_eq!(claimed, computed) 211 | } 212 | transcript.common_felts(computed.as_bases()); 213 | Ok(computed) 214 | } 215 | None => transcript.write_felt_ext(&computed).map(|_| computed), 216 | }) 217 | .try_collect::<_, Vec<_>, _>() 218 | }; 219 | 220 | hash_to_transcript(iter::repeat(None).take(num_batching).collect_vec(), v_0s)? 221 | }; 222 | 223 | layers 224 | .iter() 225 | .rev() 226 | .try_fold((claimed_v_0s, Vec::new()), |result, layers| { 227 | let (claimed_v_ys, _y) = result; 228 | 229 | let num_vars = layers[0].num_vars(); 230 | let polys = layers.iter().flat_map(|layer| layer.polys()).collect_vec(); 231 | 232 | let (mut x, evals) = if num_vars == 0 { 233 | ( 234 | vec![], 235 | polys.into_iter().map(|poly| poly[0].into()).collect_vec(), 236 | ) 237 | } else { 238 | let gamma = transcript.squeeze_challenge(); 239 | 240 | let g = Self::sum_check_function(num_vars, num_batching, gamma); 241 | 242 | let (_, x, evals) = { 243 | let claim = Self::sum_check_claim(&claimed_v_ys, gamma); 244 | let polys = polys 245 | .into_iter() 246 | .map(|e_poly| { 247 | SumCheckPoly::Base::<_, _, _, BoxMultilinearPoly>(e_poly) 248 | }) 249 | .collect_vec(); 250 | 251 | prove_sum_check(&g, claim, polys, transcript)? 252 | }; 253 | 254 | (x, evals) 255 | }; 256 | 257 | transcript.write_felt_exts(&evals)?; 258 | 259 | let mu = transcript.squeeze_challenge(); 260 | 261 | let v_xs = Self::layer_down_claim(&evals, mu); 262 | x.push(mu); 263 | 264 | Ok((v_xs, x)) 265 | }) 266 | } 267 | 268 | pub fn sum_check_function(num_vars: usize, num_batching: usize, gamma: E) -> Generic { 269 | let exprs = &(0..2 * num_batching) 270 | .map(|idx| Expression::poly(idx)) 271 | .tuples() 272 | .map(|(ref v_l, ref v_r)| v_l * v_r) 273 | .collect_vec(); 274 | let expr = Expression::distribute_powers(exprs, gamma); 275 | 276 | let eq_r_x = Expression::poly(0); 277 | 278 | Generic::new(num_vars, &(eq_r_x * expr)) 279 | } 280 | 281 | pub fn sum_check_claim(claimed_v_ys: &[E], gamma: E) -> E { 282 | inner_product( 283 | claimed_v_ys.to_vec(), 284 | powers(gamma).take(claimed_v_ys.len()).collect_vec(), 285 | ) 286 | } 287 | 288 | pub fn layer_down_claim(evals: &[E], mu: E) -> Vec { 289 | evals 290 | .iter() 291 | .tuples() 292 | .map(|(&v_l, &v_r)| v_l + mu * (v_r - v_l)) 293 | .collect_vec() 294 | } 295 | } 296 | 297 | struct Layer<'a, F, E> { 298 | v_l: BoxMultilinearPoly<'a, F, E>, 299 | v_r: BoxMultilinearPoly<'a, F, E>, 300 | } 301 | 302 | impl<'a, F: PrimeField, E: ExtensionField> From<[Vec; 2]> for Layer<'a, F, E> { 303 | fn from(values: [Vec; 2]) -> Self { 304 | let [v_l, v_r] = values.map(box_dense_poly); 305 | Self { v_l, v_r } 306 | } 307 | } 308 | 309 | impl<'a, F: PrimeField, E: ExtensionField> Layer<'a, F, E> { 310 | fn bottom<'b>(v: &BoxMultilinearPoly<'a, F, E>) -> Layer<'b, F, E> { 311 | let mid = v.to_dense().len() >> 1; 312 | [&v.to_dense()[..mid], &v.to_dense()[mid..]] 313 | .map(ToOwned::to_owned) 314 | .into() 315 | } 316 | 317 | fn num_vars(&self) -> usize { 318 | self.v_l.num_vars() 319 | } 320 | 321 | fn polys(&self) -> [&BoxMultilinearPoly<'a, F, E>; 2] { 322 | [&self.v_l, &self.v_r] 323 | } 324 | 325 | fn poly_chunks(&self, chunk_size: usize) -> impl Iterator { 326 | let [v_l, v_r] = self 327 | .polys() 328 | .map(|poly| poly.as_dense().unwrap().chunks(chunk_size)); 329 | izip!(v_l, v_r) 330 | } 331 | 332 | fn up<'b>(&self) -> Layer<'b, F, E> { 333 | assert!(self.num_vars() != 0); 334 | 335 | let len = 1 << self.num_vars(); 336 | let chunk_size = div_ceil(len, num_threads()).next_power_of_two(); 337 | 338 | let mut outputs: [_; 2] = array::from_fn(|_| vec![F::ZERO; len >> 1]); 339 | let (v_up_l, v_up_r) = outputs.split_at_mut(1); 340 | 341 | parallelize_iter( 342 | izip!( 343 | chain![v_up_l, v_up_r].flat_map(|v_up| v_up.chunks_mut(chunk_size)), 344 | self.poly_chunks(chunk_size), 345 | ), 346 | |(v_up, (v_l, v_r))| { 347 | izip!(v_up, v_l, v_r).for_each(|(v_up, v_l, v_r)| { 348 | *v_up = *v_l * *v_r; 349 | }) 350 | }, 351 | ); 352 | 353 | outputs.into() 354 | } 355 | } 356 | -------------------------------------------------------------------------------- /lasso/src/memory_checking/verifier.rs: -------------------------------------------------------------------------------- 1 | use gkr::{ 2 | ff_ext::{ 3 | ff::{Field, PrimeField}, 4 | ExtensionField, 5 | }, 6 | sum_check::verify_sum_check, 7 | transcript::TranscriptRead, 8 | util::arithmetic::inner_product, 9 | Error, 10 | }; 11 | use itertools::{izip, Itertools}; 12 | use std::{iter, marker::PhantomData}; 13 | 14 | use crate::lasso::LassoSubtable; 15 | 16 | use super::MemoryCheckingProver; 17 | 18 | #[derive(Debug)] 19 | pub struct Chunk { 20 | chunk_index: usize, 21 | chunk_bits: usize, 22 | pub(crate) memory: Vec>, 23 | } 24 | 25 | impl, const M: usize> Chunk { 26 | pub fn chunk_polys_index(&self, offset: usize, num_chunks: usize) -> Vec { 27 | let dim_poly_index = offset + 1 + self.chunk_index; 28 | let read_ts_poly_index = offset + 1 + num_chunks + self.chunk_index; 29 | let final_cts_poly_index = offset + 1 + 2 * num_chunks + self.chunk_index; 30 | vec![dim_poly_index, read_ts_poly_index, final_cts_poly_index] 31 | } 32 | 33 | pub fn new(chunk_index: usize, chunk_bits: usize, memory: Memory) -> Self { 34 | Self { 35 | chunk_index, 36 | chunk_bits, 37 | memory: vec![memory], 38 | } 39 | } 40 | 41 | pub fn num_memories(&self) -> usize { 42 | self.memory.len() 43 | } 44 | 45 | pub fn chunk_bits(&self) -> usize { 46 | self.chunk_bits 47 | } 48 | 49 | pub fn add_memory(&mut self, memory: Memory) { 50 | self.memory.push(memory); 51 | } 52 | 53 | pub fn memory_indices(&self) -> Vec { 54 | self.memory 55 | .iter() 56 | .map(|memory| memory.memory_index) 57 | .collect_vec() 58 | } 59 | 60 | #[allow(clippy::too_many_arguments)] 61 | pub fn verify_memories( 62 | &self, 63 | read_xs: &[E], 64 | write_xs: &[E], 65 | init_ys: &[E], 66 | final_read_ys: &[E], 67 | y: &[E], 68 | hash: impl Fn(&E, &E, &E) -> E, 69 | transcript: &mut dyn TranscriptRead, 70 | ) -> Result<(), Error> { 71 | let [dim_x, read_ts_poly_x, final_cts_poly_y] = 72 | transcript.read_felt_exts(3)?.try_into().unwrap(); 73 | let e_poly_xs = transcript.read_felt_exts(self.num_memories())?; 74 | let id_poly_y = inner_product( 75 | iter::successors(Some(E::ONE), |power_of_two| Some(power_of_two.double())) 76 | .take(y.len()), 77 | y.to_vec(), 78 | ); 79 | self.memory.iter().enumerate().for_each(|(i, memory)| { 80 | assert_eq!(read_xs[i], hash(&dim_x, &e_poly_xs[i], &read_ts_poly_x)); 81 | assert_eq!( 82 | write_xs[i], 83 | hash(&dim_x, &e_poly_xs[i], &(read_ts_poly_x + E::ONE)) 84 | ); 85 | let subtable_poly_y = memory.subtable.evaluate_mle(y, M); 86 | 87 | assert_eq!(init_ys[i], hash(&id_poly_y, &subtable_poly_y, &E::ZERO)); 88 | assert_eq!( 89 | final_read_ys[i], 90 | hash(&id_poly_y, &subtable_poly_y, &final_cts_poly_y) 91 | ); 92 | }); 93 | // Ok((dim_x, read_ts_poly_x, final_cts_poly_y, e_poly_xs)) 94 | Ok(()) 95 | } 96 | } 97 | 98 | #[derive(Debug)] 99 | pub struct Memory { 100 | memory_index: usize, 101 | subtable: Box>, 102 | } 103 | 104 | impl Memory { 105 | pub fn new(memory_index: usize, subtable: Box>) -> Self { 106 | Self { 107 | memory_index, 108 | subtable, 109 | } 110 | } 111 | } 112 | 113 | #[derive(Debug)] 114 | pub struct MemoryCheckingVerifier, const M: usize> { 115 | /// chunks with the same bits size 116 | chunks: Vec>, 117 | _marker: PhantomData, 118 | _marker_e: PhantomData, 119 | } 120 | 121 | impl, const M: usize> MemoryCheckingVerifier { 122 | pub fn new(chunks: Vec>) -> Self { 123 | Self { 124 | chunks, 125 | _marker: PhantomData, 126 | _marker_e: PhantomData, 127 | } 128 | } 129 | 130 | pub fn verify( 131 | &self, 132 | num_reads: usize, 133 | gamma: &E, 134 | tau: &E, 135 | transcript: &mut dyn TranscriptRead, 136 | ) -> Result<(), Error> { 137 | // warning: this cast is insecure 138 | // TODO: need to rewrite memory checking to work over the extension field 139 | let tau = tau.as_bases()[0]; 140 | let gamma = gamma.as_bases()[0]; 141 | 142 | let num_memories: usize = self.chunks.iter().map(|chunk| chunk.num_memories()).sum(); 143 | let memory_bits = self.chunks[0].chunk_bits(); 144 | let (read_write_xs, _) = Self::verify_grand_product( 145 | num_reads, 146 | iter::repeat(None).take(2 * num_memories), 147 | transcript, 148 | )?; 149 | let (read_xs, write_xs) = read_write_xs.split_at(num_memories); 150 | 151 | let (init_final_read_ys, y) = Self::verify_grand_product( 152 | memory_bits, 153 | iter::repeat(None).take(2 * num_memories), 154 | transcript, 155 | )?; 156 | let (init_ys, final_read_ys) = init_final_read_ys.split_at(num_memories); 157 | 158 | let hash = |a: &E, v: &E, t: &E| -> E { *a + *v * gamma + *t * gamma.square() - tau }; 159 | let mut offset = 0; 160 | 161 | self.chunks.iter().for_each(|chunk| { 162 | let num_memories = chunk.num_memories(); 163 | let _ = chunk.verify_memories( 164 | &read_xs[offset..offset + num_memories], 165 | &write_xs[offset..offset + num_memories], 166 | &init_ys[offset..offset + num_memories], 167 | &final_read_ys[offset..offset + num_memories], 168 | &y, 169 | hash, 170 | transcript, 171 | ); 172 | offset += num_memories; 173 | }); 174 | 175 | Ok(()) 176 | } 177 | 178 | fn verify_grand_product( 179 | num_vars: usize, 180 | claimed_v_0s: impl IntoIterator>, 181 | transcript: &mut dyn TranscriptRead, 182 | ) -> Result<(Vec, Vec), Error> { 183 | let claimed_v_0s = claimed_v_0s.into_iter().collect_vec(); 184 | let num_batching = claimed_v_0s.len(); 185 | 186 | assert!(num_batching != 0); 187 | let claimed_v_0s = { 188 | claimed_v_0s 189 | .into_iter() 190 | .map(|claimed| match claimed { 191 | Some(claimed) => { 192 | transcript.common_felts(claimed.as_bases()); 193 | Ok(claimed) 194 | } 195 | None => transcript.read_felt_ext(), 196 | }) 197 | .try_collect::<_, Vec<_>, _>()? 198 | }; 199 | 200 | (0..num_vars).try_fold((claimed_v_0s, Vec::new()), |result, num_vars| { 201 | let (claimed_v_ys, _) = result; 202 | 203 | let (mut x, evals) = if num_vars == 0 { 204 | let evals = transcript.read_felt_exts(2 * num_batching)?; 205 | for (claimed_v, (&v_l, &v_r)) in izip!(claimed_v_ys, evals.iter().tuples()) { 206 | if claimed_v != v_l * v_r { 207 | return Err(Error::InvalidSumCheck( 208 | "unmatched sum check output".to_string(), 209 | )); 210 | } 211 | } 212 | 213 | (Vec::new(), evals) 214 | } else { 215 | let gamma = transcript.squeeze_challenge(); 216 | let g = MemoryCheckingProver::sum_check_function(num_vars, num_batching, gamma); 217 | 218 | let (_x_eval, x) = { 219 | let claim = MemoryCheckingProver::sum_check_claim(&claimed_v_ys, gamma); 220 | verify_sum_check(&g, claim, transcript)? 221 | }; 222 | 223 | let evals = transcript.read_felt_exts(2 * num_batching)?; 224 | 225 | (x, evals) 226 | }; 227 | 228 | let mu = transcript.squeeze_challenge(); 229 | 230 | let v_xs = MemoryCheckingProver::layer_down_claim(&evals, mu); 231 | x.push(mu); 232 | 233 | Ok((v_xs, x)) 234 | }) 235 | } 236 | } 237 | -------------------------------------------------------------------------------- /lasso/src/table.rs: -------------------------------------------------------------------------------- 1 | use fixedbitset::FixedBitSet; 2 | use gkr::{ 3 | ff_ext::{ff::PrimeField, ExtensionField}, 4 | poly::MultilinearPolyTerms, 5 | util::expression::Expression, 6 | }; 7 | use std::fmt::Debug; 8 | use std::ops::Range; 9 | 10 | pub mod range; 11 | // for some reason #[enum_dispatch] needs this 12 | 13 | pub type SubtableId = String; 14 | pub type LookupId = String; 15 | 16 | pub trait LassoSubtable>: 17 | 'static + Sync + Debug + SubtableClone 18 | { 19 | /// Returns the TypeId of this subtable. 20 | /// The `Jolt` trait has associated enum types `InstructionSet` and `Subtables`. 21 | /// This function is used to resolve the many-to-many mapping between `InstructionSet` variants 22 | /// and `Subtables` variants, 23 | fn subtable_id(&self) -> SubtableId; 24 | 25 | /// Fully materializes a subtable of size `M`, reprensented as a Vec of length `M`. 26 | fn materialize(&self, M: usize) -> Vec; 27 | 28 | fn evaluate_mle(&self, point: &[E], M: usize) -> E; 29 | 30 | /// Expression to evaluate the multilinear extension polynomial for this subtable at the given `point`, 31 | /// interpreted to be of size log_2(M), where M is the size of the subtable. 32 | fn evaluate_mle_expr(&self, log2_M: usize) -> MultilinearPolyTerms; 33 | } 34 | 35 | pub trait LookupType>: 36 | 'static + Send + Sync + Debug + LookupClone 37 | { 38 | /// Returns the identifier of this lookup type. 39 | fn lookup_id(&self) -> LookupId; 40 | 41 | /// The `g` function that computes T[r] = g(T_1[r_1], ..., T_k[r_1], T_{k+1}[r_2], ..., T_{\alpha}[r_c]) 42 | fn combine_lookups(&self, operands: &[F], C: usize, M: usize) -> F; 43 | 44 | fn combine_lookup_expressions( 45 | &self, 46 | expressions: Vec>, 47 | C: usize, 48 | M: usize, 49 | ) -> Expression; 50 | 51 | /// Returns a Vec of the unique subtable types used by this instruction. For some instructions, 52 | /// e.g. SLL, the list of subtables depends on the dimension `C`. 53 | fn subtables(&self, C: usize, M: usize) 54 | -> Vec<(Box>, SubtableIndices)>; 55 | 56 | // fn to_indices(&self, value: &F) -> Vec; 57 | 58 | fn output(&self, index: &F) -> F; 59 | 60 | fn chunk_bits(&self, M: usize) -> Vec; 61 | 62 | /// Returns the indices of each subtable lookups 63 | /// The length of `index_bits` is same as actual bit length of table index 64 | fn subtable_indices(&self, index_bits: Vec, log_M: usize) -> Vec>; 65 | 66 | // fn num_memories(&self) -> usize; 67 | } 68 | 69 | pub trait LookupClone { 70 | fn clone_box(&self) -> Box>; 71 | } 72 | 73 | impl> LookupClone for T 74 | where 75 | T: LookupType + Clone + 'static, 76 | { 77 | fn clone_box(&self) -> Box> { 78 | Box::new(self.clone()) 79 | } 80 | } 81 | 82 | impl Clone for Box> { 83 | fn clone(&self) -> Self { 84 | self.clone_box() 85 | } 86 | } 87 | 88 | pub trait SubtableClone { 89 | fn clone_box(&self) -> Box>; 90 | } 91 | 92 | impl> SubtableClone for T 93 | where 94 | T: LassoSubtable + Clone + 'static, 95 | { 96 | fn clone_box(&self) -> Box> { 97 | Box::new(self.clone()) 98 | } 99 | } 100 | 101 | impl Clone for Box> { 102 | fn clone(&self) -> Self { 103 | self.clone_box() 104 | } 105 | } 106 | 107 | #[derive(Clone)] 108 | pub struct SubtableIndices { 109 | bitset: FixedBitSet, 110 | } 111 | 112 | impl SubtableIndices { 113 | pub fn with_capacity(capacity: usize) -> Self { 114 | Self { 115 | bitset: FixedBitSet::with_capacity(capacity), 116 | } 117 | } 118 | 119 | pub fn union_with(&mut self, other: &Self) { 120 | self.bitset.union_with(&other.bitset); 121 | } 122 | 123 | pub fn iter(&self) -> impl Iterator + '_ { 124 | self.bitset.ones() 125 | } 126 | 127 | #[allow(clippy::len_without_is_empty)] 128 | pub fn len(&self) -> usize { 129 | self.bitset.count_ones(..) 130 | } 131 | 132 | pub fn contains(&self, index: usize) -> bool { 133 | self.bitset.contains(index) 134 | } 135 | } 136 | 137 | impl From for SubtableIndices { 138 | fn from(index: usize) -> Self { 139 | let mut bitset = FixedBitSet::new(); 140 | bitset.grow_and_insert(index); 141 | Self { bitset } 142 | } 143 | } 144 | 145 | impl From> for SubtableIndices { 146 | fn from(range: Range) -> Self { 147 | let bitset = FixedBitSet::from_iter(range); 148 | Self { bitset } 149 | } 150 | } 151 | -------------------------------------------------------------------------------- /lasso/src/table/range.rs: -------------------------------------------------------------------------------- 1 | use gkr::{ 2 | ff_ext::{ff::PrimeField, ExtensionField}, 3 | poly::{MultilinearPolyTerms, PolyExpr}, 4 | util::{arithmetic::inner_product, expression::Expression}, 5 | }; 6 | use itertools::Itertools; 7 | use std::{iter, marker::PhantomData}; 8 | 9 | use super::{LassoSubtable, LookupType, SubtableIndices}; 10 | 11 | #[derive(Clone, Debug, Default)] 12 | pub struct FullLimbSubtable(PhantomData<(F, E)>); 13 | 14 | impl> LassoSubtable for FullLimbSubtable { 15 | fn materialize(&self, M: usize) -> Vec { 16 | (0..M).map(|x| F::from(x as u64)).collect_vec() 17 | } 18 | 19 | fn evaluate_mle(&self, point: &[E], _: usize) -> E { 20 | let b = point.len(); 21 | let mut result = E::ZERO; 22 | for i in 0..b { 23 | result += point[i] * F::from(1u64 << (i)); 24 | } 25 | result 26 | } 27 | 28 | fn evaluate_mle_expr(&self, log2_M: usize) -> MultilinearPolyTerms { 29 | let limb_init = PolyExpr::Var(0); 30 | let mut limb_terms = vec![limb_init]; 31 | (1..log2_M).for_each(|i| { 32 | let coeff = PolyExpr::Pow(Box::new(PolyExpr::Const(F::from(2))), i as u32); 33 | let x = PolyExpr::Var(i); 34 | let term = PolyExpr::Prod(vec![coeff, x]); 35 | limb_terms.push(term); 36 | }); 37 | MultilinearPolyTerms::new(log2_M, PolyExpr::Sum(limb_terms)) 38 | } 39 | 40 | fn subtable_id(&self) -> super::SubtableId { 41 | "full".to_string() 42 | } 43 | } 44 | 45 | impl FullLimbSubtable { 46 | pub fn new() -> Self { 47 | Self(PhantomData) 48 | } 49 | } 50 | 51 | #[derive(Clone, Debug, Default)] 52 | pub struct BoundSubtable { 53 | bound: u64, 54 | _marker: PhantomData<(F, E)>, 55 | } 56 | 57 | impl> LassoSubtable for BoundSubtable { 58 | fn materialize(&self, M: usize) -> Vec { 59 | let bound_bits = self.bound.ilog2() as usize; 60 | let reminder = 1 << (bound_bits % M.ilog2() as usize); 61 | let cutoff = (reminder + self.bound % M as u64) as usize; 62 | 63 | (0..M) 64 | .map(|i| { 65 | if i < cutoff { 66 | F::from(i as u64) 67 | } else { 68 | F::ZERO 69 | } 70 | }) 71 | .collect() 72 | } 73 | 74 | fn evaluate_mle(&self, point: &[E], M: usize) -> E { 75 | let log2_M = M.ilog2() as usize; 76 | let b = point.len(); 77 | 78 | let bound_bits = self.bound.ilog2() as usize; 79 | let reminder = 1 << (bound_bits % log2_M); 80 | let cutoff = reminder + self.bound % (1 << log2_M); 81 | let cutoff_log2 = cutoff.ilog2() as usize; 82 | 83 | let g_base = 1 << cutoff_log2; 84 | let num_extra = cutoff - g_base; 85 | 86 | let mut result = E::ZERO; 87 | for i in 0..b { 88 | if i < cutoff_log2 { 89 | result += point[i] * F::from(1u64 << (i)); 90 | } else { 91 | let mut g_value = E::ZERO; 92 | 93 | if i == cutoff_log2 { 94 | for k in 0..num_extra { 95 | let mut term: E = F::from(g_base + k).into(); 96 | for j in 0..cutoff_log2 { 97 | if (k & (1 << j)) != 0 { 98 | term *= point[j]; 99 | } else { 100 | term *= E::ONE - point[j]; 101 | } 102 | } 103 | g_value += term; 104 | } 105 | } 106 | 107 | result = (E::ONE - point[i]) * result + point[i] * g_value 108 | } 109 | } 110 | 111 | result 112 | } 113 | 114 | fn evaluate_mle_expr(&self, log2_M: usize) -> MultilinearPolyTerms { 115 | let M = 1 << log2_M; 116 | let bound_bits = self.bound.ilog2() as usize; 117 | let reminder = 1 << (bound_bits % log2_M); 118 | let cutoff = reminder + self.bound % M; 119 | let cutoff_log2 = cutoff.ilog2() as usize; 120 | 121 | let rem_init = PolyExpr::Var(0); 122 | let mut terms = vec![rem_init]; 123 | (1..cutoff_log2).for_each(|i| { 124 | let coeff = PolyExpr::Pow(Box::new(PolyExpr::Const(F::from(2))), i as u32); 125 | let x = PolyExpr::Var(i); 126 | let term = PolyExpr::Prod(vec![coeff, x]); 127 | terms.push(term); 128 | }); 129 | 130 | let mut result = PolyExpr::Sum(terms); 131 | 132 | let g_base = 1 << cutoff_log2; 133 | let num_extra = cutoff - g_base; 134 | 135 | (cutoff_log2..log2_M).for_each(|i| { 136 | if num_extra > 0 && i == cutoff_log2 { 137 | let mut g_value = PolyExpr::ZERO; 138 | for k in 0..num_extra { 139 | let mut term = PolyExpr::u64(g_base + k); 140 | for j in 0..cutoff_log2 { 141 | if (k & (1 << j)) != 0 { 142 | term = PolyExpr::mul(term, PolyExpr::Var(j)); 143 | } else { 144 | let t = PolyExpr::sub(PolyExpr::Const(F::ONE), PolyExpr::Var(j)); 145 | term = PolyExpr::mul(term, t); 146 | } 147 | } 148 | g_value = PolyExpr::add(term, g_value); 149 | } 150 | let x = PolyExpr::Var(i); 151 | let left = PolyExpr::mul(PolyExpr::sub(PolyExpr::ONE, x.clone()), result.clone()); 152 | let right = PolyExpr::mul(x, g_value); 153 | result = PolyExpr::add(left, right); 154 | } else { 155 | let term = PolyExpr::sub(PolyExpr::ONE, PolyExpr::Var(i)); 156 | result = PolyExpr::mul(result.clone(), term); 157 | } 158 | }); 159 | 160 | MultilinearPolyTerms::new(log2_M, result) 161 | } 162 | 163 | fn subtable_id(&self) -> super::SubtableId { 164 | format!("bound_{}", self.bound) 165 | } 166 | } 167 | 168 | impl BoundSubtable { 169 | pub fn new(bound: u64) -> Self { 170 | Self { 171 | bound, 172 | _marker: PhantomData, 173 | } 174 | } 175 | } 176 | 177 | #[derive(Clone, Debug, Default, Copy)] 178 | pub struct RangeLookup { 179 | bound: u64, 180 | _marker: PhantomData<(F, E)>, 181 | } 182 | 183 | impl> LookupType for RangeLookup { 184 | fn combine_lookups(&self, operands: &[F], _: usize, M: usize) -> F { 185 | let weight = F::from(M as u64); 186 | inner_product( 187 | operands, 188 | iter::successors(Some(F::ONE), |power_of_weight| { 189 | Some(*power_of_weight * weight) 190 | }) 191 | .take(operands.len()) 192 | .collect_vec() 193 | .iter(), 194 | ) 195 | } 196 | 197 | fn combine_lookup_expressions( 198 | &self, 199 | expressions: Vec>, 200 | _C: usize, 201 | M: usize, 202 | ) -> Expression { 203 | Expression::distribute_powers(expressions, F::from(M as u64).into()) 204 | } 205 | 206 | // SubtableIndices map subtable to memories 207 | fn subtables( 208 | &self, 209 | _C: usize, 210 | M: usize, 211 | ) -> Vec<(Box>, SubtableIndices)> { 212 | let full = Box::new(FullLimbSubtable::::new()); 213 | let log_M = M.ilog2() as usize; 214 | let bound_bits = self.bound.ilog2() as usize; 215 | let num_chunks = bound_bits / log_M; 216 | let rem = Box::new(BoundSubtable::::new(self.bound)); 217 | 218 | if self.bound % M as u64 == 0 { 219 | vec![(full, SubtableIndices::from(0..num_chunks))] 220 | } else if self.bound < M as u64 { 221 | vec![(rem, SubtableIndices::from(0))] 222 | } else { 223 | vec![ 224 | (full, SubtableIndices::from(0..num_chunks)), 225 | (rem, SubtableIndices::from(num_chunks)), 226 | ] 227 | } 228 | } 229 | 230 | fn output(&self, index: &F) -> F { 231 | *index 232 | } 233 | 234 | fn chunk_bits(&self, M: usize) -> Vec { 235 | let log2_M = M.ilog2() as usize; 236 | let bound_bits = self.bound.ilog2() as usize; 237 | 238 | let remainder_bits = if self.bound % M as u64 != 0 { 239 | let reminder = 1 << (bound_bits % log2_M); 240 | let cutoff = reminder + self.bound % M as u64; 241 | let cutoff_log2 = cutoff.ilog2() as usize; 242 | vec![cutoff_log2] 243 | } else { 244 | vec![] 245 | }; 246 | iter::repeat(log2_M) 247 | .take(bound_bits / log2_M) 248 | .chain(remainder_bits) 249 | .collect_vec() 250 | } 251 | 252 | fn subtable_indices(&self, index_bits: Vec, log_M: usize) -> Vec> { 253 | index_bits.chunks(log_M).map(Vec::from).collect_vec() 254 | } 255 | 256 | fn lookup_id(&self) -> super::LookupId { 257 | format!("range_{}", self.bound) 258 | } 259 | } 260 | 261 | impl> RangeLookup { 262 | pub fn new_boxed(bound: u64) -> Box> { 263 | Box::new(Self { 264 | bound, 265 | _marker: PhantomData, 266 | }) 267 | } 268 | } 269 | 270 | impl RangeLookup<(), ()> { 271 | pub fn id_for(bound: u64) -> super::LookupId { 272 | format!("range_{}", bound) 273 | } 274 | } 275 | 276 | #[cfg(test)] 277 | mod test { 278 | use super::*; 279 | use gkr::ff_ext::ff::Field; 280 | use gkr::util::dev::std_rng; 281 | use gkr::{poly::box_dense_poly, util::dev::seeded_std_rng}; 282 | use goldilocks::Goldilocks; 283 | use rand::RngCore; 284 | 285 | use crate::lasso::LassoSubtable; 286 | 287 | use super::BoundSubtable; 288 | 289 | type F = Goldilocks; 290 | const LOG2_M: usize = 16; 291 | const M: usize = 1 << LOG2_M; 292 | 293 | #[test] 294 | fn full_subtable_mle_eval_correct() { 295 | let s = FullLimbSubtable::::new(); 296 | 297 | let poly = box_dense_poly::(s.materialize(M)); 298 | let num_vars = poly.num_vars(); 299 | let r = (0..num_vars) 300 | .map(|_| F::random(seeded_std_rng())) 301 | .collect::>(); 302 | 303 | let full_poly_eval = poly.evaluate(&r); 304 | let func_eval = s.evaluate_mle(&r, M); 305 | let term_eval = s.evaluate_mle_expr(LOG2_M).evaluate(&r); 306 | 307 | assert_eq!(full_poly_eval, func_eval); 308 | assert_eq!(full_poly_eval, term_eval); 309 | } 310 | 311 | #[test] 312 | fn bound_subtable_mle_eval_correct() { 313 | const BOUND: u64 = (1 << 55) + 55; 314 | 315 | let s = BoundSubtable::::new(BOUND); 316 | let evals = s.materialize(M); 317 | 318 | let poly = box_dense_poly::(evals); 319 | let num_vars = poly.num_vars(); 320 | let rng = &mut std_rng(); 321 | let r = (0..num_vars) 322 | .map(|_| F::from(rng.next_u64())) 323 | .collect::>(); 324 | 325 | let full_poly_eval = poly.evaluate(&r); 326 | let func_eval = s.evaluate_mle(&r, M); 327 | let term_eval = s.evaluate_mle_expr(LOG2_M).evaluate(&r); 328 | 329 | assert_eq!(full_poly_eval, func_eval); 330 | assert_eq!(full_poly_eval, term_eval); 331 | } 332 | } 333 | -------------------------------------------------------------------------------- /scripts/.gitignore: -------------------------------------------------------------------------------- 1 | # Scripts for internal use 2 | scripts 3 | /error_log.txt 4 | 5 | # Byte-compiled / optimized / DLL files 6 | __pycache__/ 7 | *.py[cod] 8 | *$py.class 9 | 10 | # C extensions 11 | *.so 12 | 13 | # Distribution / packaging 14 | .Python 15 | build/ 16 | develop-eggs/ 17 | dist/ 18 | downloads/ 19 | eggs/ 20 | .eggs/ 21 | lib/ 22 | lib64/ 23 | parts/ 24 | sdist/ 25 | var/ 26 | wheels/ 27 | share/python-wheels/ 28 | *.egg-info/ 29 | .installed.cfg 30 | *.egg 31 | MANIFEST 32 | 33 | # PyInstaller 34 | # Usually these files are written by a python script from a template 35 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 36 | *.manifest 37 | *.spec 38 | 39 | # Installer logs 40 | pip-log.txt 41 | pip-delete-this-directory.txt 42 | 43 | # Unit test / coverage reports 44 | htmlcov/ 45 | .tox/ 46 | .nox/ 47 | .coverage 48 | .coverage.* 49 | .cache 50 | nosetests.xml 51 | coverage.xml 52 | *.cover 53 | *.py,cover 54 | .hypothesis/ 55 | .pytest_cache/ 56 | cover/ 57 | 58 | # Translations 59 | *.mo 60 | *.pot 61 | 62 | # Django stuff: 63 | *.log 64 | local_settings.py 65 | db.sqlite3 66 | db.sqlite3-journal 67 | 68 | # Flask stuff: 69 | instance/ 70 | .webassets-cache 71 | 72 | # Scrapy stuff: 73 | .scrapy 74 | 75 | # Sphinx documentation 76 | docs/_build/ 77 | 78 | # PyBuilder 79 | .pybuilder/ 80 | target/ 81 | 82 | # Jupyter Notebook 83 | .ipynb_checkpoints 84 | 85 | # IPython 86 | profile_default/ 87 | ipython_config.py 88 | 89 | # pyenv 90 | # For a library or package, you might want to ignore these files since the code is 91 | # intended to run in multiple environments; otherwise, check them in: 92 | # .python-version 93 | 94 | # pipenv 95 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 96 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 97 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 98 | # install all needed dependencies. 99 | #Pipfile.lock 100 | 101 | # poetry 102 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 103 | # This is especially recommended for binary packages to ensure reproducibility, and is more 104 | # commonly ignored for libraries. 105 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 106 | #poetry.lock 107 | 108 | # pdm 109 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 110 | #pdm.lock 111 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 112 | # in version control. 113 | # https://pdm.fming.dev/#use-with-ide 114 | .pdm.toml 115 | 116 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 117 | __pypackages__/ 118 | 119 | # Celery stuff 120 | celerybeat-schedule 121 | celerybeat.pid 122 | 123 | # SageMath parsed files 124 | *.sage.py 125 | 126 | # Environments 127 | .env 128 | .venv 129 | env/ 130 | venv/ 131 | ENV/ 132 | env.bak/ 133 | venv.bak/ 134 | 135 | # Spyder project settings 136 | .spyderproject 137 | .spyproject 138 | 139 | # Rope project settings 140 | .ropeproject 141 | 142 | # mkdocs documentation 143 | /site 144 | 145 | # mypy 146 | .mypy_cache/ 147 | .dmypy.json 148 | dmypy.json 149 | 150 | # Pyre type checker 151 | .pyre/ 152 | 153 | # pytype static type analyzer 154 | .pytype/ 155 | 156 | # Cython debug symbols 157 | cython_debug/ 158 | 159 | # PyCharm 160 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 161 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 162 | # and can be added to the global gitignore or merged into this file. For a more nuclear 163 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 164 | #.idea/ 165 | -------------------------------------------------------------------------------- /scripts/circuit_sk.py: -------------------------------------------------------------------------------- 1 | import os 2 | from bfv.crt import CRTModuli 3 | from bfv.bfv import BFVCrt 4 | from bfv.discrete_gauss import DiscreteGaussian 5 | from bfv.polynomial import Polynomial, poly_div 6 | from random import randint 7 | import copy 8 | from utils import assign_to_circuit, count_advice_cells_needed_for_poly_range_check, print_advice_cells_info 9 | import argparse 10 | import json 11 | 12 | def main(args): 13 | 14 | ''' 15 | ENCRYPTION PHASE - performed outside the circuit. 16 | ''' 17 | 18 | n = args.n 19 | qis = args.qis 20 | qis = json.loads(qis) 21 | t = args.t 22 | 23 | crt_moduli = CRTModuli(qis) 24 | sigma = 3.2 25 | discrete_gaussian = DiscreteGaussian(sigma) 26 | bfv_crt = BFVCrt(crt_moduli, n, t, discrete_gaussian) 27 | 28 | # Perform encryption of m in each CRT basis 29 | s = bfv_crt.SecretKeyGen() 30 | e = bfv_crt.bfv_q.rlwe.SampleFromErrorDistribution() 31 | ais = [] 32 | for i in range(len(crt_moduli.qis)): 33 | ais.append(bfv_crt.bfv_qis[i].rlwe.Rq.sample_polynomial()) 34 | 35 | m = bfv_crt.bfv_q.rlwe.Rt.sample_polynomial() 36 | 37 | ctis = bfv_crt.SecretKeyEncrypt(s, ais, e, m) 38 | 39 | # Sanity check for valid decryption 40 | message_prime = bfv_crt.Decrypt(s, ctis) 41 | 42 | assert m == message_prime 43 | 44 | # k1 = [QM]t namely the scaled message polynomial 45 | k1 = m.scalar_mul(crt_moduli.q) 46 | k1.reduce_coefficients_by_modulus(t) 47 | 48 | # `p` is the modulus of the prime field of the circuit 49 | p = 18446744069414584321 50 | # p = 21888242871839275222246405745257275088548364400416034343698204186575808495617 51 | 52 | # `r2is` are the polynomials r2i for each i-th CRT basis. 53 | r2is = [] 54 | 55 | # `r1is` are the polynomials r1i for each i-th CRT basis. 56 | r1is = [] 57 | 58 | # `k0is` are the negative multiplicative inverses of t modulo each qi. 59 | k0is = [] 60 | 61 | # `ct0is` are the polynomials ct0i for each CRT basis. 62 | ct0is = [] 63 | 64 | # `ct0is_hat` are the polynomials ct0i_hat for each CRT basis. 65 | ct0is_hat = [] 66 | 67 | ''' 68 | SETUP PHASE - performed outside the circuit 69 | For each CRT basis, we need to compute the polynomials r1i and r2i (check this doc for more details: https://hackmd.io/@gaussian/r1W98Kqqa) 70 | ''' 71 | 72 | cyclo = [1] + [0] * (n - 1) + [1] 73 | cyclo = Polynomial(cyclo) 74 | 75 | for i, cti in enumerate(ctis): 76 | 77 | ct0i = cti[0] 78 | ai = cti[1].scalar_mul(-1) 79 | 80 | k0i = pow(-t, -1, qis[i]) 81 | 82 | # k0i = -t^{-1} namely the multiplicative inverse of t modulo qi 83 | ct0i_hat = ai * s + e + k1.scalar_mul(k0i) 84 | assert(len(ct0i_hat.coefficients) - 1 == 2 * n - 2) 85 | 86 | # ai * s + e + k0i * k1 = ct0i mod Rqi 87 | # assert that ct0i_hat = ct0i mod Rqi 88 | ct0i_hat_clone = copy.deepcopy(ct0i_hat) 89 | # mod Rqi means that we need to: 90 | # - reduce the coefficients of ct0i_hat_clone by the cyclotomic polynomial 91 | # - reduce the coefficients of ct0i_hat_clone by the modulus 92 | ct0i_hat_clone.reduce_coefficients_by_cyclo(cyclo.coefficients) 93 | ct0i_hat_clone.reduce_coefficients_by_modulus(qis[i]) 94 | assert ct0i_hat_clone == ct0i 95 | 96 | # Calculate r2i 97 | # divide ct0i - ct0i_hat by the cyclotomic polynomial over Zqi to get r2i 98 | num = ct0i + ct0i_hat.scalar_mul(-1) 99 | # reduce the coefficients of num by the modulus qi 100 | num.reduce_coefficients_by_modulus(qis[i]) 101 | (quotient, rem) = poly_div(num.coefficients, cyclo.coefficients) 102 | # assert that the remainder is zero 103 | assert rem == [] 104 | r2i = Polynomial(quotient) 105 | # assert that the degree of r2i is equal to n - 2 106 | assert len(r2i.coefficients) - 1 == n - 2 107 | 108 | # Assert that ct0i - ct0i_hat = r2i * cyclo mod Zqi 109 | lhs = ct0i + ct0i_hat.scalar_mul(-1) 110 | rhs = r2i * cyclo 111 | 112 | # reduce the coefficients of lhs by the modulus qi 113 | lhs.reduce_coefficients_by_modulus(qis[i]) 114 | assert lhs == rhs 115 | 116 | # Calculate r1i 117 | # divide ct0i - ct0i_hat - r2i * cyclo by the modulus qi to get r1i 118 | num = ct0i + ct0i_hat.scalar_mul(-1) + (r2i * cyclo).scalar_mul(-1) 119 | (quotient, rem) = poly_div(num.coefficients, [qis[i]]) 120 | # assert that the remainder is zero 121 | assert rem == [] 122 | r1i = Polynomial(quotient) 123 | # assert that the degree of r1i is 2n - 2 124 | assert len(r1i.coefficients) - 1 == 2 * n - 2 125 | 126 | # Assert that ct0i = ct0i_hat + r1i * qi + r2i * cyclo mod Zp 127 | lhs = ct0i 128 | rhs = ct0i_hat + (r1i.scalar_mul(qis[i])) + (r2i * cyclo) 129 | 130 | # remove the leading zeroes from rhs until the length of rhs.coefficients is equal to n 131 | while len(rhs.coefficients) > n and rhs.coefficients[0] == 0: 132 | rhs.coefficients.pop(0) 133 | 134 | assert lhs == rhs 135 | 136 | r2is.append(r2i) 137 | r1is.append(r1i) 138 | k0is.append(k0i) 139 | ct0is.append(ct0i) 140 | ct0is_hat.append(ct0i_hat) 141 | 142 | # `r1_bounds` are the bounds for the coefficients of r1i for each CRT basis 143 | r1_bounds = [] 144 | 145 | # `r2_bounds` are the bounds for the coefficients of r2i for each CRT basis 146 | r2_bounds = [] 147 | 148 | # initiate counters for the number of advice cells needed for each constraint phase 149 | phase_0_assignment_advice_cell_count = 0 150 | phase_1_range_check_advice_cell_count = 0 151 | phase_1_eval_at_gamma_constraint_advice_cell_count = 0 152 | phase_1_encryption_constraint_advice_cell_count = 0 153 | 154 | ''' 155 | CIRCUIT - PHASE 0 - ASSIGNMENT 156 | ''' 157 | 158 | # Every assigned value must be an element of the field Zp. Negative coefficients `-z` are assigned as `p - z` 159 | s_assigned = assign_to_circuit(s, p) 160 | e_assigned = assign_to_circuit(e, p) 161 | k1_assigned = assign_to_circuit(k1, p) 162 | 163 | phase_0_assignment_advice_cell_count += len(s_assigned.coefficients) 164 | phase_0_assignment_advice_cell_count += len(e_assigned.coefficients) 165 | phase_0_assignment_advice_cell_count += len(k1_assigned.coefficients) 166 | 167 | r1is_assigned = [] 168 | r2is_assigned = [] 169 | ais_assigned = [] 170 | ct0is_assigned = [] 171 | 172 | for i in range(len(ctis)): 173 | r1i_assigned = assign_to_circuit(r1is[i], p) 174 | r2i_assigned = assign_to_circuit(r2is[i], p) 175 | r1is_assigned.append(r1i_assigned) 176 | r2is_assigned.append(r2i_assigned) 177 | 178 | phase_0_assignment_advice_cell_count += len(r1i_assigned.coefficients) 179 | phase_0_assignment_advice_cell_count += len(r2i_assigned.coefficients) 180 | 181 | ai_assigned = assign_to_circuit(ais[i], p) 182 | ct0i_assigned = assign_to_circuit(ct0is[i], p) 183 | ais_assigned.append(ai_assigned) 184 | ct0is_assigned.append(ct0i_assigned) 185 | 186 | phase_0_assignment_advice_cell_count += len(ai_assigned.coefficients) 187 | phase_0_assignment_advice_cell_count += len(ct0i_assigned.coefficients) 188 | 189 | # For the sake of simplicity, we generate a random challenge here 190 | gamma = randint(0, 1000) 191 | 192 | ''' 193 | CIRCUIT - PHASE 1 194 | ''' 195 | 196 | # Every assigned value must be an element of the field Zp. Negative coefficients `-z` are assigned as `p - z` 197 | # ais_at_gamma_assigned = [] 198 | # ct0is_at_gamma_assigned = [] 199 | qi_constants = [] 200 | k0i_constants = [] 201 | 202 | for i in range(len(ctis)): 203 | # ai_at_gamma = ais[i].evaluate(gamma) 204 | # ai_at_gamma_assigned = assign_to_circuit(Polynomial([ai_at_gamma]), p).coefficients[0] 205 | # ais_at_gamma_assigned.append(ai_at_gamma_assigned) 206 | 207 | # phase_1_assignment_advice_cell_count += 1 208 | 209 | # ct0i_at_gamma = ctis[i][0].evaluate(gamma) 210 | # ct0i_at_gamma_assigned = assign_to_circuit(Polynomial([ct0i_at_gamma]), p).coefficients[0] 211 | # ct0is_at_gamma_assigned.append(ct0i_at_gamma_assigned) 212 | 213 | # phase_1_assignment_advice_cell_count += 1 214 | 215 | qi_constants.append(qis[i]) 216 | 217 | k0i_constant = assign_to_circuit(Polynomial([k0is[i]]), p).coefficients[0] 218 | k0i_constants.append(k0i_constant) 219 | 220 | ''' 221 | CIRCUIT - PHASE 1 - RANGE CHECK 222 | ''' 223 | 224 | lookup_bits = 8 225 | 226 | s_bound = 1 227 | # constraint. The coefficients of s should be in the range [-1, 0, 1] 228 | assert all(coeff >= -s_bound and coeff <= s_bound for coeff in s.coefficients) 229 | # After the circuit assignement, the coefficients of s_assigned must be in [0, 1, p - 1] 230 | assert all(coeff in range(0, s_bound+1) or coeff in range(p - s_bound, p) for coeff in s_assigned.coefficients) 231 | # To perform a range check with a smaller lookup table, we shift the coefficients of s_assigned to be in [0, 1, 2] (the shift operation is constrained inside the circuit) 232 | s_shifted = Polynomial([(coeff + 1) % p for coeff in s_assigned.coefficients]) 233 | assert all(coeff >= 0 and coeff <= 2*s_bound for coeff in s_shifted.coefficients) 234 | 235 | phase_1_range_check_advice_cell_count += count_advice_cells_needed_for_poly_range_check(s_assigned, 2*s_bound + 1, lookup_bits) 236 | 237 | # constraint. The coefficients of e should be in the range [-B, B] where B is the upper bound of the discrete Gaussian distribution 238 | b = int(discrete_gaussian.z_upper) 239 | assert all(coeff >= -b and coeff <= b for coeff in e.coefficients) 240 | # After the circuit assignement, the coefficients of e_assigned must be in [0, B] or [p - B, p - 1] 241 | assert all(coeff in range(0, b+1) or coeff in range(p - b, p) for coeff in e_assigned.coefficients) 242 | # To perform a range check with a smaller lookup table, we shift the coefficients of e_assigned to be in [0, 2B] (the shift operation is constrained inside the circuit) 243 | e_shifted = Polynomial([(coeff + b) % p for coeff in e_assigned.coefficients]) 244 | assert all(coeff >= 0 and coeff <= 2*b for coeff in e_shifted.coefficients) 245 | 246 | phase_1_range_check_advice_cell_count += count_advice_cells_needed_for_poly_range_check(e_assigned, 2*b + 1, lookup_bits) 247 | 248 | # constraint. The coefficients of k1 should be in the range [-(t-1)/2, (t-1)/2] 249 | k1_bound = int((t - 1) / 2) 250 | assert all(coeff >= -k1_bound and coeff <= k1_bound for coeff in k1.coefficients) 251 | # After the circuit assignement, the coefficients of k1_assigned must be in [0, k1_bound] or [p - k1_bound, p - 1] 252 | assert all(coeff in range(0, int(k1_bound) + 1) or coeff in range(p - int(k1_bound), p) for coeff in k1_assigned.coefficients) 253 | # To perform a range check with a smaller lookup table, we shift the coefficients of k1_assigned to be in [0, 2*k1_bound] (the shift operation is constrained inside the circuit) 254 | k1_shifted = Polynomial([(coeff + int(k1_bound)) % p for coeff in k1_assigned.coefficients]) 255 | assert all(coeff >= 0 and coeff <= 2*k1_bound for coeff in k1_shifted.coefficients) 256 | 257 | phase_1_range_check_advice_cell_count += count_advice_cells_needed_for_poly_range_check(k1_assigned, 2*k1_bound + 1, lookup_bits) 258 | 259 | s_at_gamma_assigned = s_assigned.evaluate(gamma) 260 | phase_1_eval_at_gamma_constraint_advice_cell_count += len(s_assigned.coefficients) * 2 - 1 261 | 262 | e_at_gamma_assigned = e_assigned.evaluate(gamma) 263 | phase_1_eval_at_gamma_constraint_advice_cell_count += len(e_assigned.coefficients) * 2 - 1 264 | 265 | k1_at_gamma_assigned = k1_assigned.evaluate(gamma) 266 | phase_1_eval_at_gamma_constraint_advice_cell_count += len(k1_assigned.coefficients) * 2 - 1 267 | 268 | cyclo_at_gamma = cyclo.evaluate(gamma) 269 | cyclo_at_gamma_assigned = assign_to_circuit(Polynomial([cyclo_at_gamma]), p).coefficients[0] 270 | n_bits_N = n.bit_length() 271 | 272 | # This corresponds to `load_rlc_cache` 273 | phase_1_eval_at_gamma_constraint_advice_cell_count += (n_bits_N - 1) * 4 274 | # This corresponds to `add` 275 | phase_1_eval_at_gamma_constraint_advice_cell_count += 4 276 | 277 | for i in range(len(ctis)): 278 | # sanity check. The coefficients of ct0i should be in the range [-(qi-1)/2, (qi-1)/2] 279 | bound = int((qis[i] - 1) / 2) 280 | assert all(coeff >= -bound and coeff <= bound for coeff in ct0is[i].coefficients) 281 | 282 | # sanity check. The coefficients of ai should be in the range [-(qi-1)/2, (qi-1)/2] 283 | bound = int((qis[i] - 1) / 2) 284 | assert all(coeff >= -bound and coeff <= bound for coeff in ais[i].coefficients) 285 | 286 | # sanity check. The coefficients of ai * s should be in the range $[-N \cdot \frac{q_i - 1}{2}, N \cdot \frac{q_i - 1}{2}]$ 287 | bound = int((qis[i] - 1) / 2) * n 288 | res = ais[i] * s 289 | assert all(coeff >= -bound and coeff <= bound for coeff in res.coefficients) 290 | 291 | # sanity check. The coefficients of ai * s + e should be in the range $- (N \cdot \frac{q_i - 1}{2} + B), N \cdot \frac{q_i - 1}{2} + B]$ 292 | bound = int((qis[i] - 1) / 2) * n + b 293 | res = ais[i] * s + e 294 | assert all(coeff >= -bound and coeff <= bound for coeff in res.coefficients) 295 | 296 | # constraint. The coefficients of r2i should be in the range [-(qi-1)/2, (qi-1)/2] 297 | r2i_bound = int((qis[i] - 1) / 2) 298 | r2_bounds.append(r2i_bound) 299 | assert all(coeff >= -r2i_bound and coeff <= r2i_bound for coeff in r2is[i].coefficients) 300 | # After the circuit assignement, the coefficients of r2i_assigned must be in [0, r2i_bound] or [p - r2i_bound, p - 1] 301 | assert all(coeff in range(0, int(r2i_bound) + 1) or coeff in range(p - int(r2i_bound), p) for coeff in r2is_assigned[i].coefficients) 302 | # To perform a range check with a smaller lookup table, we shift the coefficients of r2i_assigned to be in [0, 2*r2i_bound] (the shift operation is constrained inside the circuit) 303 | r2i_shifted = Polynomial([(coeff + int(r2i_bound)) % p for coeff in r2is_assigned[i].coefficients]) 304 | assert all(coeff >= 0 and coeff <= 2*r2i_bound for coeff in r2i_shifted.coefficients) 305 | 306 | phase_1_range_check_advice_cell_count += count_advice_cells_needed_for_poly_range_check(r2is_assigned[i], 2*r2i_bound + 1, lookup_bits) 307 | 308 | # sanity check. The coefficients of r2i * cyclo should be in the range [-(qi-1)/2, (qi-1)/2] 309 | bound = int((qis[i] - 1) / 2) 310 | res = r2is[i] * cyclo 311 | assert all(coeff >= -bound and coeff <= bound for coeff in res.coefficients) 312 | 313 | # sanity check. The coefficients of k1 * k0i should be in the range $[-\frac{t - 1}{2} \cdot |K_{0,i}|, \frac{t - 1}{2} \cdot |K_{0,i}|]$ 314 | bound = int((t - 1) / 2) * abs(k0is[i]) 315 | res = k1.scalar_mul(k0is[i]) 316 | assert all(coeff >= -bound and coeff <= bound for coeff in res.coefficients) 317 | 318 | # sanity check. The coefficients of ct0i_hat (ai * s + e + k1 * k0i) should be in the range $[- (N \cdot \frac{q_i - 1}{2} + B +\frac{t - 1}{2} \cdot |K_{0,i}|), N \cdot \frac{q_i - 1}{2} + B + \frac{t - 1}{2} \cdot |K_{0,i}|]$ 319 | bound = int((qis[i] - 1) / 2) * n + b + int((t - 1) / 2) * abs(k0is[i]) 320 | assert all(coeff >= -bound and coeff <= bound for coeff in ct0is_hat[i].coefficients) 321 | 322 | # sanity check. The coefficients of ct0i - ct0i_hat should be in the range $ [- ((N+1) \cdot \frac{q_i - 1}{2} + B +\frac{t - 1}{2} \cdot |K_{0,i}|), (N+1) \cdot \frac{q_i - 1}{2} + B + \frac{t - 1}{2} \cdot |K_{0,i}|]$ 323 | bound = int((qis[i] - 1) / 2) * (n + 1) + b + int((t - 1) / 2) * abs(k0is[i]) 324 | sub = ct0is[i] + (ct0is_hat[i].scalar_mul(-1)) 325 | assert all(coeff >= -bound and coeff <= bound for coeff in sub.coefficients) 326 | 327 | # sanity check. The coefficients of ct0i - ct0i_hat - r2i * cyclo should be in the range $[- ((N+2) \cdot \frac{q_i - 1}{2} + B +\frac{t - 1}{2} \cdot |K_{0,i}|), (N+2) \cdot \frac{q_i - 1}{2} + B + \frac{t - 1}{2} \cdot |K_{0,i}|]$ 328 | bound = ((qis[i] - 1) / 2) * (n + 2) + b + ((t - 1) / 2) * abs(k0is[i]) 329 | sub = ct0is[i] + (ct0is_hat[i].scalar_mul(-1)) + (r2is[i] * cyclo).scalar_mul(-1) 330 | assert all(coeff >= -bound and coeff <= bound for coeff in sub.coefficients) 331 | 332 | # constraint. The coefficients of (ct0i - ct0i_hat - r2i * cyclo) / qi = r1i should be in the range $[\frac{- ((N+2) \cdot \frac{q_i - 1}{2} + B +\frac{t - 1}{2} \cdot |K_{0,i}|)}{q_i}, \frac{(N+2) \cdot \frac{q_i - 1}{2} + B + \frac{t - 1}{2} \cdot |K_{0,i}|}{q_i}]$ 333 | r1i_bound = (int((qis[i] - 1) / 2) * (n + 2) + b + int((t - 1) / 2) * abs(k0is[i])) / qis[i] 334 | # round bound to the nearest integer 335 | r1i_bound = int(r1i_bound) 336 | r1_bounds.append(r1i_bound) 337 | assert all(coeff >= -r1i_bound and coeff <= r1i_bound for coeff in r1is[i].coefficients) 338 | # After the circuit assignement, the coefficients of r1i_assigned must be in [0, r1i_bound] or [p - r1i_bound, p - 1] 339 | assert all(coeff in range(0, int(r1i_bound) + 1) or coeff in range(p - int(r1i_bound), p) for coeff in r1is_assigned[i].coefficients) 340 | # To perform a range check with a smaller lookup table, we shift the coefficients of r1i_assigned to be in [0, 2*r1i_bound] (the shift operation is constrained inside the circuit) 341 | r1i_shifted = Polynomial([(coeff + int(r1i_bound)) % p for coeff in r1is_assigned[i].coefficients]) 342 | assert all(coeff >= 0 and coeff <= 2*r1i_bound for coeff in r1i_shifted.coefficients) 343 | 344 | phase_1_range_check_advice_cell_count += count_advice_cells_needed_for_poly_range_check(r1is_assigned[i], 2*r1i_bound + 1, lookup_bits) 345 | 346 | ''' 347 | CIRCUIT - PHASE 1 - EVALUATION AT GAMMA CONSTRAINT 348 | ''' 349 | 350 | r1i_gamma_assigned = r1is_assigned[i].evaluate(gamma) 351 | r2i_gamma_assigned = r2is_assigned[i].evaluate(gamma) 352 | 353 | phase_1_eval_at_gamma_constraint_advice_cell_count += len(r1is_assigned[i].coefficients) * 2 - 1 354 | phase_1_eval_at_gamma_constraint_advice_cell_count += len(r2is_assigned[i].coefficients) * 2 - 1 355 | 356 | ai_gamma_assigned = ais_assigned[i].evaluate(gamma) 357 | ct0i_gamma_assigned = ct0is_assigned[i].evaluate(gamma) 358 | 359 | phase_1_eval_at_gamma_constraint_advice_cell_count += len(ais_assigned[i].coefficients) * 2 - 1 360 | phase_1_eval_at_gamma_constraint_advice_cell_count += len(ct0is_assigned[i].coefficients) * 2 - 1 361 | 362 | ''' 363 | CIRCUIT - PHASE 1 - CORRECT ENCRYPTION CONSTRAINT 364 | ''' 365 | 366 | lhs = ct0i_gamma_assigned 367 | rhs = (ai_gamma_assigned * s_at_gamma_assigned + e_at_gamma_assigned + (k1_at_gamma_assigned * k0i_constants[i]) + (r1i_gamma_assigned * qi_constants[i]) + (r2i_gamma_assigned * cyclo_at_gamma_assigned)) 368 | phase_1_encryption_constraint_advice_cell_count += 16 369 | 370 | assert lhs % p == rhs % p 371 | 372 | ''' 373 | VERIFICATION PHASE 374 | ''' 375 | 376 | cyclo_at_gamma = cyclo.evaluate(gamma) 377 | cyclo_at_gamma_assigned_expected = assign_to_circuit(Polynomial([cyclo_at_gamma]), p).coefficients[0] 378 | assert cyclo_at_gamma_assigned == cyclo_at_gamma_assigned_expected 379 | 380 | ai_gamma_assigned_expected = ais_assigned[i].evaluate(gamma) 381 | assert ai_gamma_assigned == ai_gamma_assigned_expected 382 | 383 | ct0i_gamma_assigned_expected = ct0is_assigned[i].evaluate(gamma) 384 | assert ct0i_gamma_assigned == ct0i_gamma_assigned_expected 385 | 386 | assert qis[i] == qi_constants[i] 387 | 388 | k0i_assigned_expected = assign_to_circuit(Polynomial([k0is[i]]), p).coefficients[0] 389 | assert k0i_constants[i] == k0i_assigned_expected 390 | 391 | total_advice_cell_count = phase_0_assignment_advice_cell_count + phase_1_range_check_advice_cell_count + phase_1_eval_at_gamma_constraint_advice_cell_count + phase_1_encryption_constraint_advice_cell_count 392 | 393 | print_advice_cells_info(total_advice_cell_count, phase_0_assignment_advice_cell_count, phase_1_range_check_advice_cell_count, phase_1_eval_at_gamma_constraint_advice_cell_count, phase_1_encryption_constraint_advice_cell_count) 394 | # ais and ct0is need to be parsed such that their coefficients are in the range [0, p - 1] 395 | # we don't call them assigned because they are never assigned to the circuit 396 | ais_in_p = [assign_to_circuit(ai, p) for ai in ais] 397 | ct0is_in_p = [assign_to_circuit(ct0i, p) for ct0i in ct0is] 398 | 399 | # Parse the inputs into a JSON format such this can be used as input for the (real) circuit 400 | json_input = { 401 | "s": [str(coef) for coef in s_assigned.coefficients], 402 | "e": [str(coef) for coef in e_assigned.coefficients], 403 | "k1": [str(coef) for coef in k1_assigned.coefficients], 404 | "r2is": [[str(coef) for coef in r2i.coefficients] for r2i in r2is_assigned], 405 | "r1is": [[str(coef) for coef in r1i.coefficients] for r1i in r1is_assigned], 406 | "ais": [[str(coef) for coef in ai_in_p.coefficients] for ai_in_p in ais_in_p], 407 | "ct0is": [[str(coef) for coef in ct0i_in_p.coefficients] for ct0i_in_p in ct0is_in_p], 408 | } 409 | 410 | # Calculate the bit size of the largest qi in qis for the filename 411 | qis_bitsize = max(qis).bit_length() 412 | qis_len = len(qis) 413 | 414 | # Construct the dynamic filename 415 | filename = f"sk_enc_{args.n}_{qis_len}x{qis_bitsize}_{args.t}.json" 416 | 417 | output_path = os.path.join("bfv-gkr", "src", "data", filename) 418 | 419 | with open(output_path, 'w') as f: 420 | json.dump(json_input, f) 421 | 422 | output_path = os.path.join("bfv-gkr", "src", "constants", f"sk_enc_constants_{args.n}_{qis_len}x{qis_bitsize}_{args.t}.rs") 423 | 424 | type_name = f"SkEnc{args.n}_{qis_len}x{qis_bitsize}_{args.t}" 425 | with open(output_path, 'w') as f: 426 | f.write(f"use super::BfvSkEncryptConstans;\n\n") 427 | f.write(f"pub struct {type_name};\n\n") 428 | f.write(f"impl BfvSkEncryptConstans<{qis_len}> for {type_name}" + " {\n") 429 | f.write(f" const N: usize = {n};\n") 430 | f.write(f" const E_BOUND: u64 = {b};\n") 431 | f.write(f" const S_BOUND: u64 = {1};\n") 432 | f.write(f" const R1_BOUNDS: [u64; {len(r1_bounds)}] = [{', '.join(map(str, r1_bounds))}];\n") 433 | f.write(f" const R2_BOUNDS: [u64; {len(r2_bounds)}] = [{', '.join(map(str, r2_bounds))}];\n") 434 | f.write(f" const K1_BOUND: u64 = {k1_bound};\n") 435 | qis_str = ', '.join(f'"{q}"' for q in qi_constants) 436 | f.write(f" const QIS: [&'static str; {len(qi_constants)}] = [{qis_str}];\n") 437 | k0is_str = ', '.join(f'"{k0i}"' for k0i in k0i_constants) 438 | f.write(f" const K0IS: [&'static str; {len(k0i_constants)}] = [{k0is_str}];\n") 439 | f.write("}\n") 440 | 441 | 442 | 443 | if __name__ == "__main__": 444 | parser = argparse.ArgumentParser( 445 | description="Generate rust constants and json inputs for BFV zk proof of secret key encryption circuit" 446 | ) 447 | parser.add_argument( 448 | "-n", type=int, required=True, help="Degree of f(x), must be a power of 2." 449 | ) 450 | parser.add_argument( 451 | "-qis", type=str, required=True, help="List of qis such that qis[i] is the modulus of the i-th CRT basis of the modulus q of the ciphertext space." 452 | ) 453 | parser.add_argument( 454 | "-t", type=int, required=True, help="Modulus t of the plaintext space." 455 | ) 456 | 457 | args = parser.parse_args() 458 | main(args) 459 | -------------------------------------------------------------------------------- /scripts/requirements.txt: -------------------------------------------------------------------------------- 1 | bfv @ git+https://github.com/nulltea/bfv-py.git@ad4785805824b8171c7588e97835cd9e1f6a2f0e 2 | -------------------------------------------------------------------------------- /scripts/utils.py: -------------------------------------------------------------------------------- 1 | from bfv.polynomial import Polynomial 2 | import math 3 | 4 | def assign_to_circuit(poly: Polynomial, p: int) -> Polynomial: 5 | ''' 6 | This function takes a polynomial and returns its coefficients in the field Zp 7 | `poly` is the polynomial to be assigned to the circuit 8 | `p` is the field modulus 9 | ''' 10 | assigned_coefficients = [] 11 | for coeff in poly.coefficients: 12 | if coeff < 0: 13 | coeff = coeff % p 14 | if coeff > p: 15 | coeff = coeff % p 16 | assigned_coefficients.append(coeff) 17 | 18 | return Polynomial(assigned_coefficients) 19 | 20 | def count_advice_cells_needed_for_poly_range_check(poly: Polynomial, bound: int, lookup_bits: int) -> int: 21 | ''' 22 | This function takes a polynomial and a bound and returns the number of advice cells needed for a range check 23 | `poly` is the polynomial to be checked 24 | `bound` is the upper bound for the range check 25 | `lookup_bits` is the number of bits used for the lookup table 26 | ''' 27 | 28 | count = 0 29 | 30 | # 4 advice cells for each coefficient needed for the shift addition operation 31 | count += 4 * len(poly.coefficients) 32 | 33 | # further advice cells for range check inside `check_less_than_safe` 34 | bound_bits = bound.bit_length() 35 | range_bits = math.ceil(bound_bits / lookup_bits) * lookup_bits 36 | num_limbs = math.ceil(range_bits / lookup_bits) 37 | 38 | if num_limbs > 1: 39 | # 1 + (3 * (num_limbs - 1)) advice cells 40 | count += (1 + (3 * (num_limbs - 1))) * len(poly.coefficients) 41 | else: 42 | # count is not updated if num_limbs is 1 43 | pass 44 | 45 | # 7 advice cells required for the `check_less_than` constraint inside `check_less_than_safe` 46 | count += 7 * len(poly.coefficients) 47 | 48 | # the check_less_than_advice_cells constraint also performs a range check on the check_cell in range_bits 49 | if num_limbs > 1: 50 | # 1 + (3 * (num_limbs - 1)) advice cells for the check_less_than_advice_cells constraint 51 | count += (1 + (3 * (num_limbs - 1))) * len(poly.coefficients) 52 | else: 53 | # count is not updated if num_limbs is 1 54 | pass 55 | 56 | return count 57 | 58 | def print_advice_cells_info(total_advice_cell_count, phase_0_count, phase_1_range_check_count, phase_1_eval_at_gamma_count, phase_1_encryption_constraint_count): 59 | print("Halo2 Circuit Profile:") 60 | print(f"Total Advice Cells Needed: {total_advice_cell_count}") 61 | 62 | print("\nPhase 0 - Assignment:") 63 | print(f" - Count: {phase_0_count}, Percentage: {(phase_0_count / total_advice_cell_count) * 100:.2f}%") 64 | 65 | print("\nPhase 1 - Range Check:") 66 | print(f" - Count: {phase_1_range_check_count}, Percentage: {(phase_1_range_check_count / total_advice_cell_count) * 100:.2f}%") 67 | 68 | print("\nPhase 1 - Evaluation at Gamma Constraint:") 69 | print(f" - Count: {phase_1_eval_at_gamma_count}, Percentage: {(phase_1_eval_at_gamma_count / total_advice_cell_count) * 100:.2f}%") 70 | 71 | print("\nPhase 1 - Correct Encryption Constraint:") 72 | print(f" - Count: {phase_1_encryption_constraint_count}, Percentage: {(phase_1_encryption_constraint_count / total_advice_cell_count) * 100:.2f}%") 73 | --------------------------------------------------------------------------------