├── .gitignore ├── .vscode └── settings.json ├── Cargo.lock ├── Cargo.toml ├── LICENSE ├── Makefile ├── README.md ├── docs ├── WEEK-1.md ├── WEEK-2.md ├── WEEK-3.md ├── WEEK-4.md ├── WEEK-5.md ├── WEEK-6.md ├── WEEK-7.md └── WEEK-8.md ├── exercises ├── interview │ ├── Cargo.toml │ ├── README.md │ └── src │ │ └── lib.rs ├── ntt │ ├── Cargo.toml │ ├── README.md │ └── src │ │ ├── lib.rs │ │ ├── main.rs │ │ └── ntt.rs ├── rsa │ ├── Cargo.toml │ └── src │ │ └── lib.rs ├── shamir-secret-share │ ├── Cargo.toml │ ├── README.md │ └── src │ │ └── lib.rs ├── sumcheck │ ├── Cargo.toml │ ├── README.md │ ├── examples │ │ ├── adjacency_matrix.csv │ │ └── graph_triangles.rs │ └── src │ │ ├── lib.rs │ │ ├── main.rs │ │ ├── sumcheck.rs │ │ └── utils.rs └── vault-of-loki │ ├── Cargo.toml │ ├── README.md │ ├── src │ └── main.rs │ └── srs.bin └── snarks ├── babysnark ├── Cargo.toml ├── README.md └── src │ └── lib.rs └── stark101 ├── Cargo.toml ├── README.md ├── proof.json └── src ├── field.rs ├── fri.rs ├── lib.rs ├── main.rs ├── program.rs └── proof.rs /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | -------------------------------------------------------------------------------- /.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "rust-analyzer.cargo.features": "all", 3 | "github.copilot.enable": { 4 | "markdown": true 5 | }, 6 | // https://raw.githubusercontent.com/PKief/vscode-material-icon-theme/main/images/folderIcons.png 7 | "material-icon-theme.folders.associations": { 8 | "curves": "Animation", 9 | "fields": "Hook", 10 | "polynomials": "Include", 11 | "numbers": "Delta", 12 | "cryptosystems": "Secure", 13 | "challenges": "Console", 14 | "exercises": "Coverage", 15 | "snarks": "Moon", 16 | "rsa": "Delta", 17 | "ntt": "Animation", 18 | "shamir-secret-share": "Shared", 19 | "sumcheck": "Import", 20 | "vault-of-loki": "Private", 21 | "interview": "Custom", 22 | "babysnark": "Moon", 23 | "plonk": "Stack", 24 | "stark101": "Template" 25 | }, 26 | // https://raw.githubusercontent.com/PKief/vscode-material-icon-theme/main/images/fileIcons.png 27 | "material-icon-theme.files.associations": {} 28 | } 29 | -------------------------------------------------------------------------------- /Cargo.lock: -------------------------------------------------------------------------------- 1 | # This file is automatically @generated by Cargo. 2 | # It is not intended for manual editing. 3 | version = 3 4 | 5 | [[package]] 6 | name = "aho-corasick" 7 | version = "1.1.3" 8 | source = "registry+https://github.com/rust-lang/crates.io-index" 9 | checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" 10 | dependencies = [ 11 | "memchr", 12 | ] 13 | 14 | [[package]] 15 | name = "anstream" 16 | version = "0.6.14" 17 | source = "registry+https://github.com/rust-lang/crates.io-index" 18 | checksum = "418c75fa768af9c03be99d17643f93f79bbba589895012a80e3452a19ddda15b" 19 | dependencies = [ 20 | "anstyle", 21 | "anstyle-parse", 22 | "anstyle-query", 23 | "anstyle-wincon", 24 | "colorchoice", 25 | "is_terminal_polyfill", 26 | "utf8parse", 27 | ] 28 | 29 | [[package]] 30 | name = "anstyle" 31 | version = "1.0.7" 32 | source = "registry+https://github.com/rust-lang/crates.io-index" 33 | checksum = "038dfcf04a5feb68e9c60b21c9625a54c2c0616e79b72b0fd87075a056ae1d1b" 34 | 35 | [[package]] 36 | name = "anstyle-parse" 37 | version = "0.2.4" 38 | source = "registry+https://github.com/rust-lang/crates.io-index" 39 | checksum = "c03a11a9034d92058ceb6ee011ce58af4a9bf61491aa7e1e59ecd24bd40d22d4" 40 | dependencies = [ 41 | "utf8parse", 42 | ] 43 | 44 | [[package]] 45 | name = "anstyle-query" 46 | version = "1.1.0" 47 | source = "registry+https://github.com/rust-lang/crates.io-index" 48 | checksum = "ad186efb764318d35165f1758e7dcef3b10628e26d41a44bc5550652e6804391" 49 | dependencies = [ 50 | "windows-sys", 51 | ] 52 | 53 | [[package]] 54 | name = "anstyle-wincon" 55 | version = "3.0.3" 56 | source = "registry+https://github.com/rust-lang/crates.io-index" 57 | checksum = "61a38449feb7068f52bb06c12759005cf459ee52bb4adc1d5a7c4322d716fb19" 58 | dependencies = [ 59 | "anstyle", 60 | "windows-sys", 61 | ] 62 | 63 | [[package]] 64 | name = "autocfg" 65 | version = "1.3.0" 66 | source = "registry+https://github.com/rust-lang/crates.io-index" 67 | checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0" 68 | 69 | [[package]] 70 | name = "baby-snark" 71 | version = "0.7.0" 72 | source = "git+https://github.com/lambdaclass/lambdaworks.git?branch=main#e465d7c791dce405c0c630f6afd44b12f277ab0a" 73 | dependencies = [ 74 | "lambdaworks-crypto 0.7.0", 75 | "lambdaworks-math 0.7.0", 76 | "rand", 77 | "rand_chacha", 78 | ] 79 | 80 | [[package]] 81 | name = "babysnark" 82 | version = "0.1.0" 83 | dependencies = [ 84 | "baby-snark", 85 | "lambdaworks-crypto 0.9.0", 86 | "lambdaworks-math 0.9.0", 87 | ] 88 | 89 | [[package]] 90 | name = "block-buffer" 91 | version = "0.10.4" 92 | source = "registry+https://github.com/rust-lang/crates.io-index" 93 | checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" 94 | dependencies = [ 95 | "generic-array", 96 | ] 97 | 98 | [[package]] 99 | name = "cfg-if" 100 | version = "1.0.0" 101 | source = "registry+https://github.com/rust-lang/crates.io-index" 102 | checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" 103 | 104 | [[package]] 105 | name = "colorchoice" 106 | version = "1.0.1" 107 | source = "registry+https://github.com/rust-lang/crates.io-index" 108 | checksum = "0b6a852b24ab71dffc585bcb46eaf7959d175cb865a7152e35b348d1b2960422" 109 | 110 | [[package]] 111 | name = "cpufeatures" 112 | version = "0.2.12" 113 | source = "registry+https://github.com/rust-lang/crates.io-index" 114 | checksum = "53fe5e26ff1b7aef8bca9c6080520cfb8d9333c7568e1829cef191a9723e5504" 115 | dependencies = [ 116 | "libc", 117 | ] 118 | 119 | [[package]] 120 | name = "crossbeam-deque" 121 | version = "0.8.5" 122 | source = "registry+https://github.com/rust-lang/crates.io-index" 123 | checksum = "613f8cc01fe9cf1a3eb3d7f488fd2fa8388403e97039e2f73692932e291a770d" 124 | dependencies = [ 125 | "crossbeam-epoch", 126 | "crossbeam-utils", 127 | ] 128 | 129 | [[package]] 130 | name = "crossbeam-epoch" 131 | version = "0.9.18" 132 | source = "registry+https://github.com/rust-lang/crates.io-index" 133 | checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e" 134 | dependencies = [ 135 | "crossbeam-utils", 136 | ] 137 | 138 | [[package]] 139 | name = "crossbeam-utils" 140 | version = "0.8.19" 141 | source = "registry+https://github.com/rust-lang/crates.io-index" 142 | checksum = "248e3bacc7dc6baa3b21e405ee045c3047101a49145e7e9eca583ab4c2ca5345" 143 | 144 | [[package]] 145 | name = "crypto-common" 146 | version = "0.1.6" 147 | source = "registry+https://github.com/rust-lang/crates.io-index" 148 | checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" 149 | dependencies = [ 150 | "generic-array", 151 | "typenum", 152 | ] 153 | 154 | [[package]] 155 | name = "csv" 156 | version = "1.3.0" 157 | source = "registry+https://github.com/rust-lang/crates.io-index" 158 | checksum = "ac574ff4d437a7b5ad237ef331c17ccca63c46479e5b5453eb8e10bb99a759fe" 159 | dependencies = [ 160 | "csv-core", 161 | "itoa", 162 | "ryu", 163 | "serde", 164 | ] 165 | 166 | [[package]] 167 | name = "csv-core" 168 | version = "0.1.11" 169 | source = "registry+https://github.com/rust-lang/crates.io-index" 170 | checksum = "5efa2b3d7902f4b634a20cae3c9c4e6209dc4779feb6863329607560143efa70" 171 | dependencies = [ 172 | "memchr", 173 | ] 174 | 175 | [[package]] 176 | name = "digest" 177 | version = "0.10.7" 178 | source = "registry+https://github.com/rust-lang/crates.io-index" 179 | checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" 180 | dependencies = [ 181 | "block-buffer", 182 | "crypto-common", 183 | ] 184 | 185 | [[package]] 186 | name = "either" 187 | version = "1.10.0" 188 | source = "registry+https://github.com/rust-lang/crates.io-index" 189 | checksum = "11157ac094ffbdde99aa67b23417ebdd801842852b500e395a45a9c0aac03e4a" 190 | 191 | [[package]] 192 | name = "env_filter" 193 | version = "0.1.0" 194 | source = "registry+https://github.com/rust-lang/crates.io-index" 195 | checksum = "a009aa4810eb158359dda09d0c87378e4bbb89b5a801f016885a4707ba24f7ea" 196 | dependencies = [ 197 | "log", 198 | "regex", 199 | ] 200 | 201 | [[package]] 202 | name = "env_logger" 203 | version = "0.11.3" 204 | source = "registry+https://github.com/rust-lang/crates.io-index" 205 | checksum = "38b35839ba51819680ba087cd351788c9a3c476841207e0b8cee0b04722343b9" 206 | dependencies = [ 207 | "anstream", 208 | "anstyle", 209 | "env_filter", 210 | "humantime", 211 | "log", 212 | ] 213 | 214 | [[package]] 215 | name = "generic-array" 216 | version = "0.14.7" 217 | source = "registry+https://github.com/rust-lang/crates.io-index" 218 | checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" 219 | dependencies = [ 220 | "typenum", 221 | "version_check", 222 | ] 223 | 224 | [[package]] 225 | name = "getrandom" 226 | version = "0.2.14" 227 | source = "registry+https://github.com/rust-lang/crates.io-index" 228 | checksum = "94b22e06ecb0110981051723910cbf0b5f5e09a2062dd7663334ee79a9d1286c" 229 | dependencies = [ 230 | "cfg-if", 231 | "libc", 232 | "wasi", 233 | ] 234 | 235 | [[package]] 236 | name = "hex" 237 | version = "0.4.3" 238 | source = "registry+https://github.com/rust-lang/crates.io-index" 239 | checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" 240 | 241 | [[package]] 242 | name = "humantime" 243 | version = "2.1.0" 244 | source = "registry+https://github.com/rust-lang/crates.io-index" 245 | checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" 246 | 247 | [[package]] 248 | name = "interview" 249 | version = "0.1.0" 250 | dependencies = [ 251 | "lambdaworks-math 0.9.0", 252 | ] 253 | 254 | [[package]] 255 | name = "is_terminal_polyfill" 256 | version = "1.70.0" 257 | source = "registry+https://github.com/rust-lang/crates.io-index" 258 | checksum = "f8478577c03552c21db0e2724ffb8986a5ce7af88107e6be5d2ee6e158c12800" 259 | 260 | [[package]] 261 | name = "itoa" 262 | version = "1.0.10" 263 | source = "registry+https://github.com/rust-lang/crates.io-index" 264 | checksum = "b1a46d1a171d865aa5f83f92695765caa047a9b4cbae2cbf37dbd613a793fd4c" 265 | 266 | [[package]] 267 | name = "keccak" 268 | version = "0.1.5" 269 | source = "registry+https://github.com/rust-lang/crates.io-index" 270 | checksum = "ecc2af9a1119c51f12a14607e783cb977bde58bc069ff0c3da1095e635d70654" 271 | dependencies = [ 272 | "cpufeatures", 273 | ] 274 | 275 | [[package]] 276 | name = "lambdaworks-crypto" 277 | version = "0.7.0" 278 | source = "git+https://github.com/lambdaclass/lambdaworks.git?branch=main#e465d7c791dce405c0c630f6afd44b12f277ab0a" 279 | dependencies = [ 280 | "lambdaworks-math 0.7.0", 281 | "sha2", 282 | "sha3", 283 | ] 284 | 285 | [[package]] 286 | name = "lambdaworks-crypto" 287 | version = "0.9.0" 288 | source = "registry+https://github.com/rust-lang/crates.io-index" 289 | checksum = "719a902cc588fd601d2f9530e3de96def04d335297c3527a8cded239d4fa4c65" 290 | dependencies = [ 291 | "lambdaworks-math 0.9.0", 292 | "serde", 293 | "sha2", 294 | "sha3", 295 | ] 296 | 297 | [[package]] 298 | name = "lambdaworks-math" 299 | version = "0.7.0" 300 | source = "git+https://github.com/lambdaclass/lambdaworks.git?branch=main#e465d7c791dce405c0c630f6afd44b12f277ab0a" 301 | 302 | [[package]] 303 | name = "lambdaworks-math" 304 | version = "0.9.0" 305 | source = "registry+https://github.com/rust-lang/crates.io-index" 306 | checksum = "030a60407d8b7bfef27f591acd5c570a7729d81f99531e992f6d61189b89e72d" 307 | dependencies = [ 308 | "rayon", 309 | "serde", 310 | "serde_json", 311 | ] 312 | 313 | [[package]] 314 | name = "libc" 315 | version = "0.2.153" 316 | source = "registry+https://github.com/rust-lang/crates.io-index" 317 | checksum = "9c198f91728a82281a64e1f4f9eeb25d82cb32a5de251c6bd1b5154d63a8e7bd" 318 | 319 | [[package]] 320 | name = "log" 321 | version = "0.4.21" 322 | source = "registry+https://github.com/rust-lang/crates.io-index" 323 | checksum = "90ed8c1e510134f979dbc4f070f87d4313098b704861a105fe34231c70a3901c" 324 | 325 | [[package]] 326 | name = "memchr" 327 | version = "2.7.4" 328 | source = "registry+https://github.com/rust-lang/crates.io-index" 329 | checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" 330 | 331 | [[package]] 332 | name = "ntt" 333 | version = "0.1.0" 334 | dependencies = [ 335 | "env_logger", 336 | "lambdaworks-math 0.9.0", 337 | "log", 338 | ] 339 | 340 | [[package]] 341 | name = "num-bigint" 342 | version = "0.4.5" 343 | source = "registry+https://github.com/rust-lang/crates.io-index" 344 | checksum = "c165a9ab64cf766f73521c0dd2cfdff64f488b8f0b3e621face3462d3db536d7" 345 | dependencies = [ 346 | "num-integer", 347 | "num-traits", 348 | ] 349 | 350 | [[package]] 351 | name = "num-integer" 352 | version = "0.1.46" 353 | source = "registry+https://github.com/rust-lang/crates.io-index" 354 | checksum = "7969661fd2958a5cb096e56c8e1ad0444ac2bbcd0061bd28660485a44879858f" 355 | dependencies = [ 356 | "num-traits", 357 | ] 358 | 359 | [[package]] 360 | name = "num-traits" 361 | version = "0.2.19" 362 | source = "registry+https://github.com/rust-lang/crates.io-index" 363 | checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" 364 | dependencies = [ 365 | "autocfg", 366 | ] 367 | 368 | [[package]] 369 | name = "ppv-lite86" 370 | version = "0.2.17" 371 | source = "registry+https://github.com/rust-lang/crates.io-index" 372 | checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" 373 | 374 | [[package]] 375 | name = "proc-macro2" 376 | version = "1.0.79" 377 | source = "registry+https://github.com/rust-lang/crates.io-index" 378 | checksum = "e835ff2298f5721608eb1a980ecaee1aef2c132bf95ecc026a11b7bf3c01c02e" 379 | dependencies = [ 380 | "unicode-ident", 381 | ] 382 | 383 | [[package]] 384 | name = "quote" 385 | version = "1.0.35" 386 | source = "registry+https://github.com/rust-lang/crates.io-index" 387 | checksum = "291ec9ab5efd934aaf503a6466c5d5251535d108ee747472c3977cc5acc868ef" 388 | dependencies = [ 389 | "proc-macro2", 390 | ] 391 | 392 | [[package]] 393 | name = "rand" 394 | version = "0.8.5" 395 | source = "registry+https://github.com/rust-lang/crates.io-index" 396 | checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" 397 | dependencies = [ 398 | "libc", 399 | "rand_chacha", 400 | "rand_core", 401 | ] 402 | 403 | [[package]] 404 | name = "rand_chacha" 405 | version = "0.3.1" 406 | source = "registry+https://github.com/rust-lang/crates.io-index" 407 | checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" 408 | dependencies = [ 409 | "ppv-lite86", 410 | "rand_core", 411 | ] 412 | 413 | [[package]] 414 | name = "rand_core" 415 | version = "0.6.4" 416 | source = "registry+https://github.com/rust-lang/crates.io-index" 417 | checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" 418 | dependencies = [ 419 | "getrandom", 420 | ] 421 | 422 | [[package]] 423 | name = "rayon" 424 | version = "1.9.0" 425 | source = "registry+https://github.com/rust-lang/crates.io-index" 426 | checksum = "e4963ed1bc86e4f3ee217022bd855b297cef07fb9eac5dfa1f788b220b49b3bd" 427 | dependencies = [ 428 | "either", 429 | "rayon-core", 430 | ] 431 | 432 | [[package]] 433 | name = "rayon-core" 434 | version = "1.12.1" 435 | source = "registry+https://github.com/rust-lang/crates.io-index" 436 | checksum = "1465873a3dfdaa8ae7cb14b4383657caab0b3e8a0aa9ae8e04b044854c8dfce2" 437 | dependencies = [ 438 | "crossbeam-deque", 439 | "crossbeam-utils", 440 | ] 441 | 442 | [[package]] 443 | name = "regex" 444 | version = "1.10.5" 445 | source = "registry+https://github.com/rust-lang/crates.io-index" 446 | checksum = "b91213439dad192326a0d7c6ee3955910425f441d7038e0d6933b0aec5c4517f" 447 | dependencies = [ 448 | "aho-corasick", 449 | "memchr", 450 | "regex-automata", 451 | "regex-syntax", 452 | ] 453 | 454 | [[package]] 455 | name = "regex-automata" 456 | version = "0.4.7" 457 | source = "registry+https://github.com/rust-lang/crates.io-index" 458 | checksum = "38caf58cc5ef2fed281f89292ef23f6365465ed9a41b7a7754eb4e26496c92df" 459 | dependencies = [ 460 | "aho-corasick", 461 | "memchr", 462 | "regex-syntax", 463 | ] 464 | 465 | [[package]] 466 | name = "regex-syntax" 467 | version = "0.8.4" 468 | source = "registry+https://github.com/rust-lang/crates.io-index" 469 | checksum = "7a66a03ae7c801facd77a29370b4faec201768915ac14a721ba36f20bc9c209b" 470 | 471 | [[package]] 472 | name = "rsa" 473 | version = "0.1.0" 474 | dependencies = [ 475 | "num-bigint", 476 | "num-traits", 477 | ] 478 | 479 | [[package]] 480 | name = "ryu" 481 | version = "1.0.17" 482 | source = "registry+https://github.com/rust-lang/crates.io-index" 483 | checksum = "e86697c916019a8588c99b5fac3cead74ec0b4b819707a682fd4d23fa0ce1ba1" 484 | 485 | [[package]] 486 | name = "serde" 487 | version = "1.0.197" 488 | source = "registry+https://github.com/rust-lang/crates.io-index" 489 | checksum = "3fb1c873e1b9b056a4dc4c0c198b24c3ffa059243875552b2bd0933b1aee4ce2" 490 | dependencies = [ 491 | "serde_derive", 492 | ] 493 | 494 | [[package]] 495 | name = "serde_derive" 496 | version = "1.0.197" 497 | source = "registry+https://github.com/rust-lang/crates.io-index" 498 | checksum = "7eb0b34b42edc17f6b7cac84a52a1c5f0e1bb2227e997ca9011ea3dd34e8610b" 499 | dependencies = [ 500 | "proc-macro2", 501 | "quote", 502 | "syn", 503 | ] 504 | 505 | [[package]] 506 | name = "serde_json" 507 | version = "1.0.114" 508 | source = "registry+https://github.com/rust-lang/crates.io-index" 509 | checksum = "c5f09b1bd632ef549eaa9f60a1f8de742bdbc698e6cee2095fc84dde5f549ae0" 510 | dependencies = [ 511 | "itoa", 512 | "ryu", 513 | "serde", 514 | ] 515 | 516 | [[package]] 517 | name = "sha2" 518 | version = "0.10.8" 519 | source = "registry+https://github.com/rust-lang/crates.io-index" 520 | checksum = "793db75ad2bcafc3ffa7c68b215fee268f537982cd901d132f89c6343f3a3dc8" 521 | dependencies = [ 522 | "cfg-if", 523 | "cpufeatures", 524 | "digest", 525 | ] 526 | 527 | [[package]] 528 | name = "sha3" 529 | version = "0.10.8" 530 | source = "registry+https://github.com/rust-lang/crates.io-index" 531 | checksum = "75872d278a8f37ef87fa0ddbda7802605cb18344497949862c0d4dcb291eba60" 532 | dependencies = [ 533 | "digest", 534 | "keccak", 535 | ] 536 | 537 | [[package]] 538 | name = "shamir-secret-share" 539 | version = "0.1.0" 540 | dependencies = [ 541 | "lambdaworks-crypto 0.9.0", 542 | "lambdaworks-math 0.9.0", 543 | "rand", 544 | ] 545 | 546 | [[package]] 547 | name = "stark101" 548 | version = "0.1.0" 549 | dependencies = [ 550 | "env_logger", 551 | "hex", 552 | "lambdaworks-crypto 0.9.0", 553 | "lambdaworks-math 0.9.0", 554 | "log", 555 | "rand", 556 | "serde", 557 | "serde_json", 558 | ] 559 | 560 | [[package]] 561 | name = "sumcheck" 562 | version = "0.1.0" 563 | dependencies = [ 564 | "csv", 565 | "env_logger", 566 | "lambdaworks-crypto 0.9.0", 567 | "lambdaworks-math 0.9.0", 568 | "log", 569 | "rand", 570 | ] 571 | 572 | [[package]] 573 | name = "syn" 574 | version = "2.0.52" 575 | source = "registry+https://github.com/rust-lang/crates.io-index" 576 | checksum = "b699d15b36d1f02c3e7c69f8ffef53de37aefae075d8488d4ba1a7788d574a07" 577 | dependencies = [ 578 | "proc-macro2", 579 | "quote", 580 | "unicode-ident", 581 | ] 582 | 583 | [[package]] 584 | name = "typenum" 585 | version = "1.17.0" 586 | source = "registry+https://github.com/rust-lang/crates.io-index" 587 | checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" 588 | 589 | [[package]] 590 | name = "unicode-ident" 591 | version = "1.0.12" 592 | source = "registry+https://github.com/rust-lang/crates.io-index" 593 | checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" 594 | 595 | [[package]] 596 | name = "utf8parse" 597 | version = "0.2.2" 598 | source = "registry+https://github.com/rust-lang/crates.io-index" 599 | checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" 600 | 601 | [[package]] 602 | name = "vault-of-loki" 603 | version = "0.1.0" 604 | dependencies = [ 605 | "lambdaworks-crypto 0.9.0", 606 | "lambdaworks-math 0.9.0", 607 | "rand", 608 | ] 609 | 610 | [[package]] 611 | name = "version_check" 612 | version = "0.9.4" 613 | source = "registry+https://github.com/rust-lang/crates.io-index" 614 | checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" 615 | 616 | [[package]] 617 | name = "wasi" 618 | version = "0.11.0+wasi-snapshot-preview1" 619 | source = "registry+https://github.com/rust-lang/crates.io-index" 620 | checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" 621 | 622 | [[package]] 623 | name = "windows-sys" 624 | version = "0.52.0" 625 | source = "registry+https://github.com/rust-lang/crates.io-index" 626 | checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" 627 | dependencies = [ 628 | "windows-targets", 629 | ] 630 | 631 | [[package]] 632 | name = "windows-targets" 633 | version = "0.52.5" 634 | source = "registry+https://github.com/rust-lang/crates.io-index" 635 | checksum = "6f0713a46559409d202e70e28227288446bf7841d3211583a4b53e3f6d96e7eb" 636 | dependencies = [ 637 | "windows_aarch64_gnullvm", 638 | "windows_aarch64_msvc", 639 | "windows_i686_gnu", 640 | "windows_i686_gnullvm", 641 | "windows_i686_msvc", 642 | "windows_x86_64_gnu", 643 | "windows_x86_64_gnullvm", 644 | "windows_x86_64_msvc", 645 | ] 646 | 647 | [[package]] 648 | name = "windows_aarch64_gnullvm" 649 | version = "0.52.5" 650 | source = "registry+https://github.com/rust-lang/crates.io-index" 651 | checksum = "7088eed71e8b8dda258ecc8bac5fb1153c5cffaf2578fc8ff5d61e23578d3263" 652 | 653 | [[package]] 654 | name = "windows_aarch64_msvc" 655 | version = "0.52.5" 656 | source = "registry+https://github.com/rust-lang/crates.io-index" 657 | checksum = "9985fd1504e250c615ca5f281c3f7a6da76213ebd5ccc9561496568a2752afb6" 658 | 659 | [[package]] 660 | name = "windows_i686_gnu" 661 | version = "0.52.5" 662 | source = "registry+https://github.com/rust-lang/crates.io-index" 663 | checksum = "88ba073cf16d5372720ec942a8ccbf61626074c6d4dd2e745299726ce8b89670" 664 | 665 | [[package]] 666 | name = "windows_i686_gnullvm" 667 | version = "0.52.5" 668 | source = "registry+https://github.com/rust-lang/crates.io-index" 669 | checksum = "87f4261229030a858f36b459e748ae97545d6f1ec60e5e0d6a3d32e0dc232ee9" 670 | 671 | [[package]] 672 | name = "windows_i686_msvc" 673 | version = "0.52.5" 674 | source = "registry+https://github.com/rust-lang/crates.io-index" 675 | checksum = "db3c2bf3d13d5b658be73463284eaf12830ac9a26a90c717b7f771dfe97487bf" 676 | 677 | [[package]] 678 | name = "windows_x86_64_gnu" 679 | version = "0.52.5" 680 | source = "registry+https://github.com/rust-lang/crates.io-index" 681 | checksum = "4e4246f76bdeff09eb48875a0fd3e2af6aada79d409d33011886d3e1581517d9" 682 | 683 | [[package]] 684 | name = "windows_x86_64_gnullvm" 685 | version = "0.52.5" 686 | source = "registry+https://github.com/rust-lang/crates.io-index" 687 | checksum = "852298e482cd67c356ddd9570386e2862b5673c85bd5f88df9ab6802b334c596" 688 | 689 | [[package]] 690 | name = "windows_x86_64_msvc" 691 | version = "0.52.5" 692 | source = "registry+https://github.com/rust-lang/crates.io-index" 693 | checksum = "bec47e5bfd1bff0eeaf6d8b485cc1074891a197ab4225d504cb7a1ab88b02bf0" 694 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [workspace] 2 | resolver = "2" 3 | members = [ 4 | "exercises/vault-of-loki", 5 | "exercises/interview", 6 | "exercises/rsa", 7 | "exercises/ntt", 8 | "exercises/shamir-secret-share", 9 | "exercises/sumcheck", 10 | "snarks/babysnark", 11 | "snarks/stark101", 12 | # "snarks/plonk", # todo hopefully 13 | ] 14 | 15 | [workspace.package] 16 | version = "0.1.0" 17 | edition = "2021" 18 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 19 | 20 | [workspace.dependencies] 21 | lambdaworks-crypto = "0.9.0" 22 | lambdaworks-math = { version = "0.9.0", features = [ 23 | "lambdaworks-serde-string", 24 | "lambdaworks-serde-binary", 25 | ] } 26 | 27 | log = "*" 28 | env_logger = "*" 29 | hex = "*" 30 | rand = "0.8.5" 31 | rand_chacha = "0.3.1" 32 | serde = "*" 33 | serde_json = "*" 34 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 Erhan Tezcan 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: stark101 2 | stark101: 3 | @cargo run --release --bin stark101 4 | 5 | .PHONY: sumcheck 6 | sumcheck: 7 | @cargo run --release --bin sumcheck 8 | 9 | .PHONY: ntt 10 | ntt: 11 | @cargo run --release --bin ntt 12 | 13 | .PHONY: babysnark 14 | babysnark: 15 | @cargo test -p babysnark 16 | 17 | .PHONY: rsa 18 | rsa: 19 | @cargo test -p rsa 20 | 21 | .PHONY: shamir 22 | shamir: 23 | @cargo test -p shamir-secret-share 24 | 25 | .PHONY: vault-of-loki 26 | vault-of-loki: 27 | @cargo run --release --bin vault-of-loki 28 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Sparkling Water Bootcamp 0b10 2 | 3 | This repository is an umbrella repository for my notes during [Sparkling Water Bootcamp in Cryptography 01b0](https://github.com/lambdaclass/lambdaworks/blob/bootcamp0b10/bootcamp/sparkling_water_0b10.md). The lecture notes are given below: 4 | 5 | - [Week 1](./docs/WEEK-1.md): Arithmetic, Abstract Algebra, Polynomials, RSA, FFT. 6 | - [Week 2](./docs/WEEK-2.md): Elliptic Curves, Commitments & Hashing, Pairings. 7 | - [Week 3](./docs/WEEK-3.md): SNARKs, KZG, BabySNARK. 8 | - [Week 4](./docs/WEEK-4.md): STARKs, FRI. 9 | - [Week 5](./docs/WEEK-5.md): PlonK. 10 | - [Week 6](./docs/WEEK-6.md) (_skipped due to conference_) 11 | - [Week 7](./docs/WEEK-7.md): MLE, Sumcheck, Binius, Brakedown. 12 | - [Week 8](./docs/WEEK-8.md): Plookup, zkVMs. 13 | 14 | Exercises are given below: 15 | 16 | - [Interview](./exercises/interview/README.md): bootcamp interview questions & answers. 17 | - [RSA](./exercises/rsa/): very basic implementation of **RSA cryptosystem**. 18 | - [Shamir](./exercises/shamir-secret-share/README.md): a basic **Shamir's Secret Sharing** implementation. 19 | - [NTT](./exercises/ntt/README.md): a very basic fast radix-2 **Number Theoretic Transform** implementation. 20 | - [Vault of Loki](./exercises/vault-of-loki/README.md): **KZG** fake proof challenge by LambdaClass. 21 | - [BabySnark](./snarks/babysnark/): small example circuits using **BabySnark**. 22 | - [Stark101](./snarks/stark101/README.md): the **Stark101** prover, based on its blog posts. 23 | - [Sumcheck](./exercises/sumcheck/README.md): an implementation of **Sumcheck protocol** for multilinear polynomials. 24 | 25 | You can run any of these via the Makefile, just see: 26 | 27 | ```sh 28 | make interview 29 | make rsa 30 | make shamir 31 | make ntt 32 | make vault-of-loki 33 | make stark101 34 | make sumcheck 35 | ``` 36 | -------------------------------------------------------------------------------- /docs/WEEK-2.md: -------------------------------------------------------------------------------- 1 | > # Week 2 2 | > 3 | > We talked about Elliptic Curves, mostly on Short Weierstrass Curve and its operations. We also talked about commitments and how to commit to a polynomial using an elliptic curve. We also talked about Pairings and how they can be used to prove evaluations of a committed polynomial. 4 | 5 | # Elliptic Curves: Short Weierstrass 6 | 7 | There are several forms of elliptic curve definitions. We will describe the most notable one, that is the Short Weierstrass form. The elliptic curve is defined by the set of pairs $(x, y) \in \mathbb{F}_p \times \mathbb{F}_p$ that satisfy the following curve equation: 8 | 9 | $$ 10 | y^2 = x^3 + ax + b 11 | $$ 12 | 13 | where $4a^3 + 27b^2 \ne 0$. Notice that this means we are looking for $x$ where $x^3 + ax + b$ is a square (also denoted as **Quadratic Residue**). TODO: why the constraint of 4a^3 and such? 14 | 15 | ## Point Addition 16 | 17 | Elliptic curves form a group under the operation of point addition that is $+:E\times E \to E$. The identity element is the point at infinity $\mathcal{O}$ and we kind of add this as an _extra_ point. 18 | 19 | **Addition / Chord Rule**: To add two points $P, Q$ you take a straight line through the two points, which will intersect the curve at a third point $R$. The sum of $P + Q$ is the reflection of $R$ over the x-axis. Given, $P= (x_1, y_1)$ and $Q = (x_2, y_2)$, the sum $P + Q = (x_3, y_3)$ is given by: 20 | 21 | $$ 22 | \begin{align*} 23 | s &= \frac{y_2 - y_1}{x_2 - x_1} \\ 24 | x_3 &= s^2 - x_1 - x_2 \\ 25 | y_3 &= s(x_1 - x_3) - y_1 \\ 26 | \end{align*} 27 | $$ 28 | 29 | **Doubling / Tangent Rule**: There is the case where $P = Q$, in this case we take the tangent line at $P$ and find the intersection point $R$. Given, $P = (x_1, y_1)$ the double $2P = (x_3, y_3)$ is given by the same formula, but with a different slope $s$: 30 | 31 | $$ 32 | \begin{align*} 33 | s &= \frac{3x_1^2 + a}{2y_1} \\ 34 | x_3 &= s^2 - 2x_1 \\ 35 | y_3 &= s(x_1 - x_3) - y_1 \\ 36 | \end{align*} 37 | $$ 38 | 39 | > Notice that the formula is a bit different when $P = Q$ because the slope is different. **Twisted Edwards** curves have a simpler formula for such a case, both chord and tangent rule are the same! 40 | 41 | See for a nice animation of this operation. 42 | 43 | ### Point Inversion 44 | 45 | Given a point $P = (x, y)$ the inverse is given by $-P = (x, -y)$, that is the reflection over the x-axis. As such, $P - P = \mathcal{O}$. 46 | 47 | ### Scalar Multiplication 48 | 49 | We can add a point to itself multiple times, this is called scalar multiplication. Given a point $P$ and a scalar $a$, we can compute $aP$ by adding $P$ to itself $a$ times. We use the efficient "double-and-add" algorithm for this. Notice that this is called "square-and-multiply" in multiplicative operations. For example, $5P = 2(2P) + P$ which uses 2 "doubles" and one addition. 50 | 51 | ## Number of Points 52 | 53 | It is quite important to know how many points there are on the curve $r$, over a finite field $\mathbb{F}_p$. The number of points has a bound that is given by the [Hasse's theorem](https://en.wikipedia.org/wiki/Hasse%27s_theorem_on_elliptic_curves): 54 | 55 | $$ 56 | |r - (p + 1)| \le 2\sqrt{p} 57 | $$ 58 | 59 | This means that the number of points $r$ is close to the size of the field $p$ following the inequality below: 60 | 61 | $$ 62 | p + 1 - 2\sqrt{p} \le r \le p + 1 + 2\sqrt{p} 63 | $$ 64 | 65 | It is generally not easy to find the number of points in the curve. In the best case, we would like to number of points on the curve to be some large prime number. However, we are still okay with large numbers with some large prime factor. 66 | 67 | > Sometimes you have "families of curves" and there you may have a formula to calculate the number of points. See for example BN254 curve. There, the number of points can be simply computed from a given parameter, which is much more efficient that using a more complicated algorithm such as [Schoof's Algorithm](https://en.wikipedia.org/wiki/Schoof%27s_algorithm) to find the number of points. 68 | 69 | ### Curves for Recursion 70 | 71 | Pasta curves are quite interesting, i.e. the two curves Pallas and Vesta. Both curves are defined over the equation $y^2 = x^3 + 5$. 72 | 73 | - Pallas curve is defined over $\mathbb{F}_p$ base field, and has $r$ points. 74 | - Vesta curve is defined over $\mathbb{F}_{r}$ extension field, and has $p$ points. 75 | 76 | [Mina Protocol](https://o1-labs.github.io/proof-systems/specs/pasta.html) uses these curves for efficient verification! Similarly, [Nova folding scheme](microsoft) uses these curves for efficient verification. 77 | 78 | ## Generator Point 79 | 80 | In a prime order group, we would like to find a generator element $g \in E$ such that: 81 | 82 | $$ 83 | \{0g, g, 2g, 3g, \ldots, (r-1)g\} = E 84 | $$ 85 | 86 | In groups with non-prime order but with a large prime factor, we instead go for a generator point $g$ that generates the large prime order subgroup, not the entire group! 87 | 88 | So, to make sure we have a safe generator point, we need to make sure that: 89 | 90 | - The generator is within the curve 91 | - The generator generates the large prime order subgroup, meaning that its order is equal to the large prime factor! 92 | 93 | > How many generators are there in a finite field of size $p$? There are $\phi(p)$ generators, where $\phi$ is the Euler's totient function. Conveniently, if the order is prime, then you have $p-1$ generators, all elements except the identity! 94 | > 95 | > If the order is not prime but has a large prime factor, with some small co-factors, you can do something called "co-factor clearing" to get a generator of the large prime order subgroup. 96 | 97 | ### Pohlig-Hellman Attack 98 | 99 | What happens if we pick a generator $g'$ that generates the entire curve instead? Meaning that its order is $n = r \times h$ where $r$ is some prime (not necessarily the largest) and $h$ is a cofactor. 100 | 101 | $g'$ has order $n$, and $h \times g'$ has order $r$ (i.e. cofactor clearing). With that, you can find the modulo of a secret key within that small subgroup (which is much easier) and then reveal parts of the secret key. 102 | 103 | Using the small subgroups, you can find the secret key $d$ modulo $r$ for many factors of $n=r_1, r_2, \ldots, r_k$ and then use the **Chinese Remainder Theorem** to find the secret key $d$ modulo $n$. 104 | 105 | TODO: watch this again / explain more 106 | 107 | > This attack was used in several Capture-the-Flag events, such as ZKHACK or Lambda-Ingonyama ZK-CTF. In these challenges, there was either a faulty generator thats in the wrong subgroup, or something that leaked information about the discrete log, enabling the Chinese Remainder Theorem to take place in the attack. 108 | 109 | ## Point Representations 110 | 111 | When we store points in the curve, we usually store them in the **Affine** form. This is the form $(x, y)$ where $x, y \in \mathbb{F}_p$. However, this is not the most efficient way to store points. 112 | 113 | Consider addition like $P+Q+R+S+\ldots$, and we compute a slope $s$ in each addition. This slope has a division, so we need to compute the multiplicative inverse of a field element, e.g. using Extended Euclidean Algorithm. 114 | 115 | As an alternative, we can use the **Projective** form (Homogeneous projective coordinates) to store points. This is the form $(X, Y, Z)$ where $X, Y, Z \in \mathbb{F}_p$ and $Z \ne 0$. The point $(x, y)$ is represented as $(x, y, 1)$. To go from projective to affine, you can simply do $(X : Y : Z) \to (X/Z, Y/Z)$. 116 | 117 | In projective coordinates, you can add points without doing field inversions. The formulas are a bit more complex, but they are more efficient. 118 | 119 | There is also the **Jacobian** form, which is a bit more efficient than projective. This is the form $(X, Y, Z)$ where $X, Y, Z \in \mathbb{F}_p$ and $Z \ne 0$. The point $(x, y)$ is represented as $(x, y, 1)$. To go from jacobian to affine, you can simply do $(X : Y : Z) \to (X/Z^2, Y/Z^3)$. 120 | 121 | > There are many more representations, each with different levels of efficiency. You can see different point representations for Short Weierstrass at . 122 | 123 | > A point can be stored efficiently as well. For example, a curve point is given by the pair $(x, y)$, but you can only store $x$ if you want to; because $y$ can be derived from $x$ by taking the square of curve equation's $x$-side. A single extra bit to indicate the positive / negative solution is enough to store the point. 124 | 125 | # Elliptic Curve Cryptography 126 | 127 | The public key in Elliptic Curve Cryptography is derived using scalar multiplication. Given a private key $d$ and a base point $G$, the public key is $Q = dG$. This is based on the assumption that the discrete logarithm problem is hard to solve, i.e. given $Q = dG$ and $G$, it is hard to find $d$. 128 | 129 | The best algorithms to solve Discrete Logarithm are **Pollard's Rho** and **Baby-Step Giant-Step**. They run in time $\mathcal{O}(\sqrt{r})$ where $r$ is the number of points in the curve. For this reason, the level of security is given by the number of bits in $\sqrt{r}$. For example, $r \approx 2^{256}$ gives a security level of 128 bits. 130 | 131 | > **BN254** was initially though to have 128 bits of security, but it was later subject to more clever attacks that reduced the security level to ~100 bits. (See ) 132 | 133 | > In many cases $a = 0$ is picked in the curve, which simplifies the formulas as $y^2 = x^3 + b$ and makes operations a bit more efficient. Some examples are: [Secp256k1](https://neuromancer.sk/std/secg/secp256k1), [BN254](https://neuromancer.sk/std/bn/bn254), [BLS12-381](https://neuromancer.sk/std/bls/BLS12-381). 134 | 135 | ## Diffie-Hellman Key Exchange 136 | 137 | The Diffie-Hellman key exchange is a protocol that allows two parties to agree on a shared secret key over an insecure channel. The protocol is based on the hardness of the discrete logarithm problem. 138 | 139 | Alice and Bob would like to agree on a key, but first they have to "exchange" this key securely. They do this by exchanging public keys and then computing the shared secret key. 140 | 141 | 1. Alice and Bob agree on a curve $E$ over a **base field** $\mathbb{F}_p$ and a generator point $G$. This curve has $r$ points, meaning that its **scalar field** is $\mathbb{F}_r$. 142 | 143 | 1. Alice picks a private key $a \in \mathbb{F}_r$ and computes the public key $A = aG$. Send this to Bob. 144 | 145 | 1. Bob picks a private key $b \in \mathbb{F}_r$ and computes the public key $B = bG$. Send this to Alice. 146 | 147 | 1. Alice computes the shared secret key $S = aB = a(bG)$. 148 | 149 | 1. Bob computes the shared secret key $S = bA = b(aG)$. 150 | 151 | 1. Et viola, the shared secret key is the same because $aB = bA = abG$. No one can break this because it is hard to find the discrete log! 152 | 153 | 1. Now, they can derive the symmetric key they like using a key derivation function (KDF) using the secret $(ab)G$. 154 | 155 | This is good an all, but it is not _authenticated_. This means that an attacker could intercept the public keys and replace them with their own. This is called a **Man in the Middle** attack. 156 | 157 | ## Digital Signatures 158 | 159 | ECDSA, Schnorr signatures and BLS signatures all are defined using an elliptic curve. 160 | 161 | ### Schnorr Signatures 162 | 163 | One of the simplest examples of signatures is the Schnorr signature. Consider a group $G$ of prime order $q$ with a generator $g$ and a hash function $H : \{0, 1\}^* \to \mathbb{Z}_q$. The algorithms are below: 164 | 165 | - **Key Generation**: The private key is a randomly picked $x \in \mathbb{Z}_q$ and the public key is $y = g^{-x}$. 166 | 167 | - **Signing**: To sign a message $m$, the signer picks a random $k \in \mathbb{Z}_q$ and computes $r = g^k$ and $e = H(r || m)$. Then, the signer computes $s = k + xe$ and the signature is $(s, e)$. 168 | 169 | - **Verification**: To verify the signature $(s, e)$ on message $m$, the verifier computes $r_v = g^s y^e$ and $e_v = H(r_v || m)$. The signature is valid if $e = e_v$. 170 | 171 | ## Implementation 172 | 173 | LambdaWorks have quite a lot of implementations for elliptic curves. See . They use the projective form for efficiency within their operations, and they allow conversion to affine form if needed. 174 | 175 | # Commitments 176 | 177 | Commitments are a way to **commit** to a value without revealing it; think of it like having a piece of data and putting the data inside an envelope. This is useful in many cryptographic protocols. A cryptographic commitment scheme has two important properties: 178 | 179 | - **Hiding**: The commitment should hide the value $m$, one cannot know what is committed just by looking at the commitment. 180 | - **Binding**: The commitment should bind the value $m$ to the commitment $C$. 181 | 182 | ## Hash Functions 183 | 184 | A cryptographic hash function is a **one-way function**, they are hard to invert! This means that given $h \gets H(m)$ it is hard to find $m$ just by looking at $h$, so $m \gets H^{-1}(h)$ is infeasible. 185 | 186 | > SHA-2 is based on Merkle-Damgard construction, which uses a Compression function. Merkle-Damgard construction has a length-extension attack. 187 | 188 | > SHA-3 is based on Sponge construction, which has an "absorb" step and a "squeeze" step. It begins by absorbing the input, and then squeezing the sponge results in bits of the hash. 189 | 190 | A hash function can be used within a commitment scheme. 191 | 192 | ### Merkle Trees 193 | 194 | A [Merkle Tree](https://en.wikipedia.org/wiki/Merkle_tree) is a method of comitting to a vector of values. Consider $\vec{a} = (a_0, a_1, \ldots, a_{n-1})$ where $n=2^m$, we can commit to this vector by creating a tree of hashes. The leaves of the tree are the values $a_i$, and the internal nodes are the hashes of their children. 195 | 196 | We can use any cryptographic hash function within our Merkle Tree, but most people use SHA-2, SHA-3, Blake2, or Blake3; there are mostly based on bitwise operations. Within the zero-knowledge space, people use more "circuit-friendly" hashes such as Poseidon, Monolith, and Rescue; these are mostly based on Algebraic operations. 197 | 198 | When we create a binary tree of hashes, we can commit to a value by revealing the root of the tree. This is a commitment to the entire vector of values, also denoted as the **Merkle Root**. 199 | 200 | In particular, we will use the Merkle Trees as a way of committing to polynomials! Consider a polynomial with coefficients $(a_0, a_1, \ldots, a_{n-1})$, we can commit to this polynomial by creating a Merkle Tree from this list of values, treated as a vector. Using this, we will actually be able to build a **polynomial commitment scheme**. In particular, we would like to prove **evaluations** of a committed polynomial. 201 | 202 | ## Using Elliptic Curves for Commitments 203 | 204 | Now, we look at a commitment scheme known as [KZG (Kate-Zaverucha-Goldberg)](https://www.iacr.org/archive/asiacrypt2010/6477178/6477178.pdf) commitment scheme. The main idea of KZG is to evaluate a polynomial $P(x)$ at a secret point $s$ (or also shown as tau: $\tau$). This will be hiding and binding, meaning that the evaluated point is hidden & the comitted polynomial is bound to. 205 | 206 | Consider an elliptic curve $E$ with prime order. One way of committing to a polynomial $P$ would be to evaluate the polynomial at a point $s$ to obtain $P(s)$, and then commit to the evaluation using a generator $g$ by doing $P(s)g$. The resulting commitment is just a point in the curve. 207 | 208 | If you were the one who received the commitment, you would have to solve discrete-log to find out the polynomial, but that is hard. This is a **hiding** commitment scheme. However, this is not binding, you could simply pick the constant polynomial $Q(x) = P(s)$. Is there are a way to commit without knowing $s$? Yes! Imagine a set of points like: 209 | 210 | $$ 211 | \{s^0g, s^1g, s^2g, s^3g, \ldots, s^{n-1}g\} 212 | $$ 213 | 214 | This is basically a set of points $\{P_0, P_1, \ldots, P_{n-1}\}$. We refer to this as a **Structured Reference String** (SRS). Now, you can do the following: 215 | 216 | $$ 217 | P(S)g = \sum a_iP_i = a_0P_0 + a_1P_1 + a_2P_2 + \ldots + a_{n-1}P_{n-1} 218 | $$ 219 | 220 | So, no need to know what $s$ is to evaluate the polynomial at that point! Notice that given any $P_i = s^ig$, you cant find $s$ thanks to discrete-log. This operation is called **Multi-Scalar Multiplication** (MSM) and is the main bottleneck within the zk-SNARKs. One of the most efficient algorithms on this is called the **Pippenger's Algorithm**. This commitment is also a particular case of the **Pedersen Commitment**. 221 | 222 | > In one CTF, the trick was to look at the SRS and see that the points were repeating from some point on! There, $s$ belonged to a small order subgroup. 223 | 224 | Thanks to this new method, we now have a commitment scheme that is both hiding and binding. We have computed $P_s = P(s)g$ without knowing $s$ and we can't change the polynomial without knowing $s$ to break the binding property. 225 | 226 | > [MOV Attack](https://www.dima.unige.it/~morafe/MaterialeCTC/p80-menezes.pdf) and [Cheon's Attack](https://iacr.org/archive/eurocrypt2006/40040001/40040001.pdf) are attacks on the discrete log problem in the context of pairing-based cryptography. 227 | 228 | ## Pairings 229 | 230 | So imagine I have a commitment $\text{commit}(P) = P_s = P(s)g$ (computed with MSM). Now, I want to show you $P(z) = v$ for some $z$. What I will do is make use of the property: 231 | 232 | $$ 233 | P(x) - v = (x-z)Q(x) 234 | $$ 235 | 236 | What I will do is to send you $z$, the value $v$ along with an evaluation proof. For that, I will give you a commitment $\text{commit}(Q) = Q_s$. We will need **Pairing** for this part! Pairing is a bilinear map $e : G_1 \times G_2 \to G_T$ that takes two inputs and returns a new element. The pairing that we will use (a type-3 pairing) has the property: 237 | 238 | $$ 239 | e(ag_1, bg_2) = e(g_1, g_2)^{ab} 240 | $$ 241 | 242 | This is a bilinear map, meaning that it is linear in both arguments. This is a very useful property for zero-knowledge proofs. A bilinear pairing has the following properties: 243 | 244 | - $e(g_1, g_2) \ne 1$ (non-degenerate) 245 | - $e(g_1 + g_3, g_2) = e(g_1, g_2) e(g_3, g_2)$ 246 | 247 | Now, notice that: 248 | 249 | $$ 250 | e(P_s, g_2) = e(P(s)g_1, g_2) = e(g_1, g_2)^{P(s)} 251 | $$ 252 | 253 | The non-degenerancy is helpful here because $e(g_1, g_2) \ne 1$, so we have some non-identity element that we are raising to some power. Keep in mind that we need a different SRS for the second group, $g_2$ as well. There, instead of $s^ig_1$, we have $s^ig_2$ for the points. 254 | 255 | > See also "[A taxonomy of pairing-friendly elliptic curves](https://eprint.iacr.org/2006/372)". 256 | 257 | ### Pairing-Based Polynomial Evaluation Proof 258 | 259 | > See [this blog](https://blog.lambdaclass.com/mina-to-ethereum-bridge/) for info on KZG. 260 | 261 | As described above with Multi-Scalar Multiplication (MSM), we have a commitment $P_s = P(s)g$ and we want to prove that $P(z) = v$ for some $z$. We will use the pairing to do this. 262 | 263 | If $P(z) = v$, it follows immediately that $P(z) - v = 0$. This means that there exists a polynomial $Q(x)$ such that $P(x) - v = (x-z)Q(x)$. In other words, $P(x) - v$ should be divisible by $(x-z)$. This is the most important idea here. 264 | 265 | We can commit to this polynomial $Q$ as $Q_s = Q(s)g$ using MSM as before. Since we only have the commitments, but not the polynomials, we need a different way to check these evaluations! We will use two pairings to do this: 266 | 267 | $$ 268 | e(P_s - vg_1, g_2) = e((P(s) - v)g_1, g_2) = e(g_1, g_2)^{P(s) - v} 269 | $$ 270 | 271 | $$ 272 | e(Q_s, sg_2 - zg_2) = e(Q(s)g_1, (s-z)g_2) = e(g_1, g_2)^{Q(s)(s-z)} 273 | $$ 274 | 275 | Since both $g_1, g_2$ are not the point at infinity, and that the pairing is non-degenerate, this result is some point on the curve. We can compare these two pairings, and check if they are equal. So, the pairing allows us to ensure $P(x) - v = (x-z)Q(x)$. This works over a random point thanks to the Schwartz-Zippel Lemma. 276 | 277 | > How did we get the $s$ within $(s-z)$ if we don't know the $s$? That's because the pairing makes use of $s g_2$, which is the second element in the SRS of the respective set of points. 278 | 279 | ## Batching 280 | 281 | KZG commitments are additively homomorphic! 282 | 283 | $$ 284 | \text{Commit}(\alpha p(x) + \beta q(x)) = \alpha \text{Commit}(p(x)) + \beta \text{Commit}(q(x)) 285 | $$ 286 | 287 | This is useful for batching, where you can commit to multiple polynomials at once. [Halo](https://eprint.iacr.org/2019/1021) protocol made use of this trick. 288 | 289 | Say that we have $k$ evaluations of $k$ polynomials, $P_i(x)$ is evaluated at $z$ (a point chosen by verifier) to obtain $v_i$ for $i = 1..k$. We can have the verifier choose $k$ random coefficients $\alpha_i$ for $i = 1..k$ and then compute the linear combination: 290 | 291 | $$ 292 | P(x) = \sum_{i=1}^k \alpha_i P_i(x) 293 | $$ 294 | 295 | When you evaluate this polynomial at $z$, you get: 296 | 297 | $$ 298 | P(z) = \sum_{i=1}^k \alpha_i P_i(x) = \sum_{i=1}^k \alpha_i v_i 299 | $$ 300 | 301 | Finally, we will do the division trick over this final polynomial: 302 | 303 | $$ 304 | Q(x) = \frac{P(x) - P(z)}{x-z} 305 | $$ 306 | 307 | We will commit to all the polynomials $\text{Commit}(P_i)$ along with evaluation $v_i$, and to this final polynomial $\text{Commit}(Q)$. 308 | 309 | The verifier will check the linear combination: 310 | 311 | $$ 312 | \text{Commit}(P) = \sum_{i=1}^k \alpha_i \text{Commit}(P_i) 313 | $$ 314 | 315 | They will compute the evaluation point $v' = \sum_{i=1}^k \alpha_i v_i$ and then check the division trick with the pairing: 316 | 317 | $$ 318 | e(\text{Commit}(P) - v'g_1, g_2) = e(\text{Commit}(Q), zg_2 - sg_2) 319 | $$ 320 | 321 | ## Cheating 322 | 323 | Suppose that I want to show you that $1 + 1 = 3$. In other words, $P(x) = 1 + x$ evaluates to $3$ at $x=1$. In other words, for $z=1$ I want to have $v=3$. We want to prove show: 324 | 325 | $$ 326 | P(s) - 3 = (s-1)Q(s) 327 | $$ 328 | 329 | If we had access to $s$ (toxic waste) we could construct the fake proof: 330 | 331 | $$ 332 | (P(s)-3)(s-1)^{-1} = Q(s) 333 | $$ 334 | 335 | ``` 336 | 337 | ``` 338 | -------------------------------------------------------------------------------- /docs/WEEK-3.md: -------------------------------------------------------------------------------- 1 | > # Week 3 2 | > 3 | > We talked about SNARKs, BabySNARK (do do doo) in particular. We further went into field extensions. 4 | 5 | # SNARKs 6 | 7 | A SNARK is a "Succinct Non-interactive Arugment of Knowledge". Note that an "argument" is different than a "proof". 8 | 9 | Consider a language $\mathcal{L}$. Given an **instance** (public input) $x \in \mathcal{L}$, a SNARK is an argument of knowledge that a prover knows a **witness** (secret input) $w$ such that $R(x, w) = 1$ where $R$ is a relation. 10 | 11 | We usually formalize this relation using an arithmetic circuit $C$ that takes $x$ and $w$ as input and outputs 1 if $R(x, w) = 1$. Here is an example circuit for $x_1 \times x_2 + 2 = 0$: 12 | 13 | ```mermaid 14 | graph LR 15 | x1[x1]; x2[x2] 16 | m((x)); a((+)) 17 | x1 & x2 --> m 18 | 2 & m --> a 19 | a --> 0 20 | ``` 21 | 22 | What we want from a SNARK is that it should have: 23 | 24 | - **fast computation**: something like $\mathcal{O}(n \log n)$ or $\mathcal{O}(\sqrt{n})$ (e.g. Binius). 25 | - **low communication**: sending the entire witness $w$ would be the naive way, but we would like to be much efficient than doing that. 26 | 27 | ## Problems in NP Class 28 | 29 | A problem is in the NP class if given a solution, we can verify it in polynomial time. For example, the problem of finding a Hamiltonian cycle in a graph is in NP class. 30 | 31 | **NP-complete** problems are such problems that any NP problem can be reduce to an NP-complete problem. Some examples of NP-complete problems are: 32 | 33 | - Boolean circuit satisfiability 34 | - Arithmetic circuit satisfiability 35 | - Three-coloring problem 36 | 37 | In the context of SNARKS, the idea is the following reduction: 38 | 39 | $$ 40 | \text{Problem} \to \text{NP-complete Problem} \to \text{SNARK} 41 | $$ 42 | 43 | # BabySnark 44 | 45 | BabySNARK is a minimal implementation that has the SNARK "magic", and is fairly digestible to understand as a beginner. It is originally based on [this](https://eprint.iacr.org/2014/718.pdf) paper, and the repository can be found at [initc3/babySNARK](https://github.com/initc3/babySNARK/). 46 | 47 | ## Square Span Problems 48 | 49 | A square span program is a program that takes a vector $z$ and a matrix $U$. If $z$ is a valid assignment, then: 50 | 51 | $$ 52 | (U \cdot z) \cdot (U \cdot z) = 1 53 | $$ 54 | 55 | In short: 56 | 57 | $$ 58 | (U . z)^2 = 1 59 | $$ 60 | 61 | We can describe boolean circuits using square span programs! The vector $z$ will have a constant term 1, the instance terms $x_0, x_1, \ldots, x_n$ and the witness terms $w_0, w_1, \ldots, w_m$. So: 62 | 63 | $$ 64 | z = (1, x_0, x_1, \ldots, x_n, w_0, w_1, \ldots, w_m) 65 | $$ 66 | 67 | For convenience, we usually write this as $z = (1, x, w)$. 68 | 69 | Example: Consider the XOR operation $a \oplus b = c$. 70 | 71 | - $a$ must be a bit, so $a(1 - a) = 0$ which we can write as a Square Span: $(2a - 1)^2 = 1$ 72 | - $b$ must be a bit, so $b(1 - b) = 0$ which we can write as a Square Span: $(2b - 1)^2 = 1$ 73 | - $c$ must be a bit, so $c(1 - c) = 0$ which we can write as a Square Span: $(2c - 1)^2 = 1$ 74 | - $c = a + b - 2ab$ corresponds to XOR operation, but this is hard to write as a Square Span. Instead, notice that $a+b+c-1 \in \{-1, 1\}$, so we can actually write the Square Span: $(a+b+c-1)^2 = 1$. 75 | 76 | So we have the system of equations: 77 | 78 | $$ 79 | \begin{align*} 80 | (2a - 1)^2 &= 1 \\ 81 | (2b - 1)^2 &= 1 \\ 82 | (2c - 1)^2 &= 1 \\ 83 | (a + b + c - 1)^2 &= 1 84 | \end{align*} 85 | $$ 86 | 87 | Now lets write this as a matrix equation: 88 | 89 | ```math 90 | \begin{pmatrix} 91 | -1 & 2 & 0 & 0 \\ 92 | -1 & 0 & 2 & 0 \\ 93 | -1 & 0 & 0 & 2 \\ 94 | -1 & 1 & 1 & 1 95 | \end{pmatrix} 96 | \begin{pmatrix} 97 | 1 \\ a \\ b \\ c 98 | \end{pmatrix} 99 | = 100 | \begin{pmatrix} 101 | 1 \\ 1 \\ 1 \\ 1 102 | \end{pmatrix} 103 | ``` 104 | 105 | Square Span problems are "modular" in the sense that we can "connect" them together to form a larger Square Span problem. For example, continuing from the XOR above, if we wanted to compute another XOR of $c \oplus d = e$, we could just add another row to the matrix: 106 | 107 | ```math 108 | \begin{pmatrix} 109 | -1 & 2 & 0 & 0 & 0 & 0 \\ 110 | -1 & 0 & 2 & 0 & 0 & 0 \\ 111 | -1 & 0 & 0 & 2 & 0 & 0 \\ 112 | -1 & 1 & 1 & 1 & 0 & 0 \\ 113 | -1 & 0 & 0 & 2 & 0 & 0 \\ 114 | -1 & 0 & 0 & 0 & 2 & 0 \\ 115 | -1 & 0 & 0 & 0 & 0 & 2 \\ 116 | -1 & 0 & 0 & 1 & 1 & 1 117 | \end{pmatrix} 118 | \begin{pmatrix} 119 | 1 \\ a \\ b \\ c \\ d \\ e 120 | \end{pmatrix} 121 | = 122 | \begin{pmatrix} 123 | 1 \\ 1 \\ 1 \\ 1 \\ 1 \\ 1 124 | \end{pmatrix} 125 | ``` 126 | 127 | Notice that the first 4 rows are the same as the previous matrix, and the last rows are for the new XOR operation. Further notice that 3rd and 5th rows are the same, making the constraint two times gains us nothing, we could optimize the constraints there if we wanted to. 128 | 129 | ## Programs to Problems 130 | 131 | What we often have is that people write "gadgets", such as boolean gates, field operations and such. Then, these "modular" gadgets can be connected together to form a larger problem. Then, a higher-level tool takes in a program that describes the problem and converts it into a SNARK-friendly format. 132 | 133 | - Above, we wrote a problem in **Square Span** format. Square Span is not really efficient, but there are other mentioned formats are quite useful, also known as "SNARK-friendly" formats. 134 | - In **Groth16**, you write the program in **R1CS** (Rank-1 Constraint System) format. 135 | - In **Plonk**, you write the problem in **Plonkish** format. 136 | - In zkVMs, you often write the problem in **AIR** (Algebraic Intermediate Representation) format. 137 | 138 | Some tools that convert a problem into a SNARK-friendly format are: 139 | 140 | - [Circom](https://iden3.io/circom) 141 | - [Noir](https://noir-lang.org/) 142 | - [Cairo](https://www.cairo-lang.org/) 143 | 144 | ## Zero-Knowledge Property 145 | 146 | So to recap Square Span, we have $(U . z)^2 = 1$ which we can write as a linear system of equations: 147 | 148 | $$ 149 | (\sum u_{ij} z_j)^2 - 1 = 0 150 | $$ 151 | 152 | I want to show that I know $w$ such that the equation above holds for $z = (1, x, w)$. There is an important property about SNARKs though, the **Zero-Knowledge Property**: we don't want to reveal anything about the witness $w$. 153 | 154 | We can make use of univariate polynomials here. Consider a polynomial $u_j(x)$ such that: 155 | 156 | $$ 157 | (\sum u_{j}(x) z_j)^2 - 1 = p(x) 158 | $$ 159 | 160 | We will view the columns as evaluations of the polynomial at different points. Consider the matrix: 161 | 162 | $$ 163 | U = 164 | \begin{pmatrix} 165 | a_{00} & a_{01} & a_{02} \\ 166 | a_{10} & a_{11} & a_{12} \\ 167 | a_{20} & a_{21} & a_{22} 168 | \end{pmatrix} 169 | $$ 170 | 171 | So for example at $j=0$ we have the evaluations $a_{00}, a_{10}, a_{20}$. For our evaluation points, we can pick points $\{g^0, g^1, g^2\}$ generated by a generator $g$. In fact, we can use roots of unity for $g$. This means that $u_j(g^i) = a_{ij}$. Now, our equation becomes: 172 | 173 | $$ 174 | (\sum u_{j}(g^i) z_j)^2 - 1 = p(x) 175 | $$ 176 | 177 | The trick about our result polynomial $p(x)$ is that given our evaluation domain $D$: 178 | 179 | $$ 180 | (U.z)^2 - 1 = 0 \iff p(x) = 0 \forall x \in D 181 | $$ 182 | 183 | If this is indeed true, then $p(x) = Z(x) . h(x)$ or in other words $p(x)$ is divisible by the vanishing polynomial of domain $D$ that is $Z(x)$. The vanishing polynomial is simply: 184 | 185 | $$ 186 | Z(x) = (x-x_0)(x-x_1)\ldots(x-x_{k-1}) = \prod_{x_i \in D} (x - x_i) 187 | $$ 188 | 189 | We have chosen our points carefully, i.e. they are in the subgroup of order $k$ (such as roots of unity) and this has a really nice property: 190 | 191 | $$ 192 | Z(x) = (x-g_0)(x-g_1)\ldots(x-g_{k-1}) = x^{k} - 1 193 | $$ 194 | 195 | Also remember that we can use FFT to evaluate polynomials really efficiently, and now not only that but we also compute the vanishing polynomial efficiently as well! 196 | 197 | As the last step, a random point $s$ is picked, and the following equality is checked: 198 | 199 | $$ 200 | p(s) = Z(s) . h(s) 201 | $$ 202 | 203 | In other words $r(x) = 0$ at some random point $s$ where: 204 | 205 | $$ 206 | p(x) - Z(x) . h(x) = r(x) 207 | $$ 208 | 209 | > [!WARNING] 210 | > 211 | > This part may be incomplete. 212 | 213 | ## Trusted Setup 214 | 215 | Within our protocol, we have made several MSMs. For this, we need a trusted setup specific to our circuit, We will have two "keys": 216 | 217 | **Verifying Key** is used to verify a proof, it has: 218 | 219 | 1. $u_0(s)g_1, u_1(s)g_1, \ldots, u_n(s)g_1$ which are $n+1$ points as $P_0, P_1, \ldots, P_n$. Thanks to these points, we can compute $V_u(s) = \sum u_k P_k$ (MSM) 220 | 2. $u_0(s)g_2, u_1(s)g_2, \ldots, u_n(s)g_2$ which are $n+1$ points, but using the generator $g_2$ instead. 221 | 3. $Z(s) g_2$ 222 | 4. a constant $e(g_1, g_2)^{-1}$ 223 | 5. $\beta \gamma g_1$ 224 | 6. $\gamma g_2$ 225 | 226 | **Proving Key** is used to generate a proof, it has: 227 | 228 | 1. $g_1, s g_1, s^2 g_1, \ldots, s^n g_1$ which are $n+1$ points as $Q_0, Q_1, \ldots, Q_n$. Thanks to these points, we can compute $h(s) = \sum s^k g$ (MSM) 229 | 2. $u_{n+1}(s)g_1, u_{n+2}(s)g_1, \ldots, u_{n+m}(s)g_1$ which are $m$ points 230 | 3. $u_{n+1}(s)g_2, u_{n+1}(s)g_2, \ldots, u_{n+1}(s)g_2$ which are $m$ points, but using the generator $g_2$ instead. 231 | 4. $\beta u_{n+1}(s)g_1, \beta u_{n+2}(s)g_1, \ldots, \beta u_{n+m}(s)g_1$ which are $m$ points similar to step 2, but multiplied with a constant $\beta$ as well. 232 | 233 | The secret values here are $s, \beta$ and $\gamma$. No one should know these, other than the trusted party who have done these computations. 234 | 235 | ## Proving Phase 236 | 237 | 1. Compute the commitment $\boxed{V_w}_1 = V_w(s)g_1$ and $\boxed{V_w}_2 = V_w(s)g_2$ and $\boxed{B_w}_1 = B_w(s)g_1 = \beta V_w(s)g_1$ 238 | 239 | 2. Compute $p(x) / Z(x) = h(x)$. This part can be done efficiently using **FFT**. We could get evaluations of $p$ and $Z$ on some random domain, and divide the evaluations, and then interpolate the result to obtain $h$. This is faster than long-division of polynomials. 240 | 241 | 3. Commit to the result $\boxed{h}_1 = h(s)g_1$. 242 | 243 | 4. Output proof $\pi = (\boxed{h}_1, \boxed{V_w}_1, \boxed{V_w}_2, \boxed{B_w}_1)$. 244 | 245 | The proof used 4 MSMs, and just outputs 4 curve elements. Not only this is much more efficient than sending the entire witness, but this proof size is constant as well! It does not matter how large your circuit is. 246 | 247 | ## Verification Phase 248 | 249 | 1. Parse proof to obtain $\pi \to (\boxed{h}_1, \boxed{V_w}_1, \boxed{V_w}_2, \boxed{B_w}_1)$. 250 | 251 | 2. Check the pairing $e(\boxed{V_w}_1, g_2) = e(g_1, \boxed{V_w}_2)$ to ensure that prover has used the same input for both commitments. 252 | 253 | 3. Check $e(\boxed{B_w}_1, \beta g_2) = e(\beta \gamma g_1, \boxed{V_w}_2)$ to ensure that prover did not cheat (thanks to $\beta$) 254 | 255 | 4. Compute $V_u(s)g_1 = \boxed{V_u}_1$ and $V_u(s)g_2 = \boxed{V_u}_2$ from the public inputs. 256 | 257 | 5. Check $e(\boxed{V_u}_1 + \boxed{V_w}_1, \boxed{V_u}_2 + \boxed{V_w}_2) e(g_1, g_2)^{-1} = e(\boxed{h}_1, \boxed{Z}_2)$ where $\boxed{Z}_2 = Z(s)g_2$. 258 | 259 | If all checks pass, the proof is valid! 260 | 261 | ## Implementation 262 | 263 | BabySNARK is implemented in LambdaWorks! See . 264 | 265 | # Field Extensions 266 | 267 | Recall that we had fields defined over integers modulo some prime $p$, denoted as $\mathbb{F}_p$. The elements of this field obeyed the addition and multiplication laws in mod $p$. We can go beyond that. 268 | 269 | ## Over Real Numbers & Complex Numbers 270 | 271 | Consider a ring of polynomials over real numbers $\mathbb{R}[x] = a_0 + a_1x + a_2x^2 + \ldots + a_nx^n$. Then, consider an irreducible polynomial such as $I(x) = x^2 + 1$. We can define a field extension $\mathbb{R}[x] / I(x)$ where we can have elements such as $a + bx$ where $a, b \in \mathbb{R}$. 272 | 273 | What happens in this example is that, whenever the degree of polynomial in $\mathbb{R}[x]$ is greater than or equal to $I(x)$, we divide the polynomials and look at the remainder, just like we do in modular arithmetic. So, every polynomial in $\mathbb{R}[x]$ can be written as $a + bx$ where $a, b \in \mathbb{R}$. 274 | 275 | When working with polynomials, we can define addition and multiplication laws as well. For the example above: 276 | 277 | - $(a_0 + a_1x) + (b_0 + b_1x) = (a_0 + b_0) + (a_1 + b_1)x$ 278 | - $(a_0 + a_1x) + (b_0 + b_1x) = a_0b_0 + (a_0b_1 + a_1b_0) + a_1b_1x^2$ 279 | 280 | Now, that multiplication above has a term $x^2$ which is **not** in our field extension. We can reduce this term using the irreducible polynomial $I(x)$. We will simply divide the result by $I(x)$ and look at the remainder. 281 | 282 | > In this case, $a_0b_0 + (a_0b_1 + a_1b_0) + a_1b_1x^2 \bmod{x^2 + 1}$ results in $(a_0b_0 - a_1b_1) + (a_0b_1 + a_1b_0)x$. This is actually equivalent to the Complex number multiplication! In other words, $x^2$ became $-1$. 283 | 284 | An irredicuble polynomial can not be factorized into smaller polynomials, doing that would imply that there is a root, in other words: $I(x) = (x - a)Q(x)$ would mean that $a$ is a root of $I(x)$. 285 | 286 | Another interesting fact about field extensions is that we still have the definitions for a multiplicative inverse! That is, for $p(x)$ we can find a polynoimal $q(x)$ such that $p(x)q(x) \equiv 1 \pmod{I(x)}$. 287 | 288 | > In this example, we have used $I(x) = x^2 + 1$. We call our field extension a "degree-2 extension", or a "quadratic extension". When $(x, y) \in \mathbb{R}$, we can view the field extension as a "$\mathbb{R}$-vector space of dimension 2". 289 | 290 | A question one may ask here, why $x^2 + 1$ but not something like $x^2 + 3$? Well, its because both field extensions would be **isomorphic**, so they are essentially the same thing. Using $x^2 + 1$ is just a convention and is much more efficient. 291 | 292 | > There are some more efficient multiplication methods in field extensions, such as [Karatsuba](https://en.wikipedia.org/wiki/Karatsuba_algorithm) and [Toom-Cook](https://en.wikipedia.org/wiki/Toom%E2%80%93Cook_multiplication). The funny thing is, uou can even use FFT if you really want and it has the "best asymptotic complexity" for multiplication; but it requires a 1729 dimensional FFT! This result comes from the [Schönhage-Strassen](https://en.wikipedia.org/wiki/Sch%C3%B6nhage%E2%80%93Strassen_algorithm) algorithm. This is not practical in real life usage, making this algorithm a [_Galactic algorithm_](https://en.wikipedia.org/wiki/Galactic_algorithm). 293 | 294 | ## Over Finite Fields: Binary Field 295 | 296 | Consider the finite field $\mathbb{F}_2 = \{0, 1\}$. The addition and multiplication laws are defined as: 297 | 298 | - $0 + 0 = 0$, $0 + 1 = 1$, $1 + 0 = 1$, $1 + 1 = 0$. This is just our ordinary XOR operation. 299 | - $0 \times 0 = 0$, $0 \times 1 = 0$, $1 \times 0 = 0$, $1 \times 1 = 1$. This is just our ordinary AND operation. 300 | 301 | > So things are pretty simple as we can see, and quite efficient as we can use bitwise operations. 302 | 303 | A binary field extension of degree $m$ is shown as $\mathbb{F}_{2^m}$ (or alternatively $GF(2^m)$ due to Galois, inventor of fields). Let's pick an irreducible polynoimal. $I(x) = x^2 + 1$ is not irreducible in a binary field! Simply, $I(1) = 1 + 1 = 0$. So, we can actually write it as $I(x) = (x+1)(x+1)$. 304 | 305 | Instead, we can pick $I(x) = x^2 + x + 1$, which is irreducible. We can define a field extension $\mathbb{F}\_2[x] / I(x)$ where we can have elements such as $a + bx$ where $a, b \in \mathbb{F}\_2$. Notice that we can look at the coefficients as bit-strings, i.e. the elements of $\mathbb{F}\_{2^2}$. 306 | 307 | ```rs 308 | 0 + 0*x = (0,0) = 00 309 | 0 + 1*x = (0,1) = 01 310 | 1 + 0*x = (1,0) = 10 311 | 1 + 1*x = (1,1) = 11 312 | ``` 313 | 314 | Lets look at the multiplications of these elements: 315 | 316 | ```rs 317 | * 00 10 01 11 318 | 00 00 00 00 00 319 | 10 00 10 01 11 320 | 01 00 01 11 10 321 | 11 00 11 10 01 322 | ``` 323 | 324 | > This is the type of multiplication that Vitalik did in his post. 325 | 326 | We can have much higher degrees of extensions as well, we just need an irreducible polynomial. For example, $I(x) = x^8 + x^4 + x^3 + x + 1$ is an irreducible polynomial in $\mathbb{F}\_2$, and it yields an extension $\mathbb{F}_{2^8}$. 327 | 328 | ### Towering Fields 329 | 330 | Suppose you have the extension $\mathbb{F}\_{2^2}$ and you want to extend it further. You can pick an irreducible polynomial $I(y)$ in $\mathbb{F}\_{2^2}$ and define a field extension $\mathbb{F}\_{2^2}[y] / I(y)$. This is called "towering". So this would result in the extension $\mathbb{F}_{2^{2^2}}$. 331 | 332 | The elements of this field extension would be $a_0 + a_1y$ where $a_0, a_1 \in \mathbb{F}\_{2^2}$. We can open this up further to see that the elements are $(a_{00} + a_{01}x) + (a_{10} + a_{11}x)y$ where $a_{00}, a_{01}, a_{10}, a_{11} \in \mathbb{F}\_2$. 333 | 334 | Suppose that you want to build a degree 12 extension over $\mathbb{F}_p$. You have two alternatives: 335 | 336 | 1. **Find an irreducible polynomial** over $\mathbb{F}_p$ of degree 12. 337 | 338 | > For example, in BN254 elliptic curve we have $I(x) = x^{12} - 18x^6 + 82$ with which we can build the extension. 339 | 340 | 2. **Build extensions towers** to obtain the desired degree. 341 | 342 | > For example, given $\mathbb{F}\_p$ how can we obtain an extension of degree 12? 343 | > 344 | > 1. $\mathbb{F}\_p \to \mathbb{F}_{p^2}$ using $I(x) = x^2 + 1$. 345 | > 2. $\mathbb{F}\_{p^2} \to \mathbb{F}_{p^6}$ using $I(y) = y^3 - (9 + x)$. 346 | > 3. $\mathbb{F}\_{p^6} \to \mathbb{F}_{p^{12}}$ using $I(z) = z^2 - y$. 347 | 348 | Using any of these methods, the resulting extensions will be isomorphic! 349 | 350 | > Binius works over binary field extensions, and they use more tricks than the ones above to build their extension tower. 351 | 352 | ## Pairings & Extensions 353 | 354 | Recall that we talked about type-3 pairings $e : G_1 \times G_2 \to G_T$ and all these groups had order $r$. For example, in BN254 elliptic curve $r$ is the number of points on the curve. $G_1$ is the group itself, but we want $G_2$ to be another group of order $r$. So, we must somehow build a field extension to obtain a sub-group of order $r$. 355 | 356 | The trick of finding these extensions come from the **embedding degree** $k$, which is the smallest $k$ such that $p^k - 1$ is divisible by $r$. 357 | 358 | > For example, in BN254 $k = 12$. This means that we can build a degree 12 extension over $\mathbb{F}_p$ and obtain a sub-group of order $r$, because the multiplicative order of that group is $p^{12} - 1$. In fact: 359 | > 360 | > - $G_T$ in the pairing for BN254 is a subgroup of $\mathbb{F}_{p^{12}}$. 361 | > - $G_1$ will be points on the curve $(x, y) \ in \mathbb{F}_p \times \mathbb{F}_p$, essentially using the group itself. 362 | > - $G_2$ will be points on the curve $(x, y) \ in \mathbb{F}_{p^2} \times \mathbb{F}_{p^2}$. 363 | 364 | In a pairing operation, we have two parts: 365 | 366 | - **[Miller Loop](https://crypto.stanford.edu/miller/)**: takes in a pair of points $(P_1 \in G_1, P_2 \in G_2)$ and output a point in $F_{p^{k}}$. 367 | - **[Cofactor Clearing](https://loup-vaillant.fr/tutorials/cofactor)**: takes the resulting point from Miller loop and map it to the subgroup of order $r$ by exponentiating it to $(p^{k} - 1) / r$. 368 | 369 | The cost of exponentiation (i.e. cofactor clearing) is fixed, bu the Miller Loop has a variable cost. 370 | 371 | > In STARKs, we may work with small fields such as Baby Goldilocks, but take our "samples" from larger extensions of this field to achieve cryptographic security. 372 | 373 | > This is a really good library of cryptographic implementations: . 374 | -------------------------------------------------------------------------------- /docs/WEEK-4.md: -------------------------------------------------------------------------------- 1 | > # Week 4 2 | > 3 | > We talked about FRI, STARK. 4 | 5 | # FRI 6 | 7 | **Fast Reed-Solomon Interactive Oracle Proof of Proximity** (FRI) is a commitment scheme that is based on hash functions and proximity tests. You show that a given polynomial is "close" to a "low-degree" polynomial. 8 | 9 | FRI can be simply shown as: 10 | 11 | 1. Receive random $\beta$ 12 | 2. Apply the **FRI Operator** 13 | 3. Commit 14 | 4. Go-to step 1 until you reach a constant polynomial (i.e. degree $<1$) 15 | 5. Send all commitments & the final constant polynomial 16 | 17 | The goal of FRI Operator is to go from "prove a function (over domain of size $N$) is close to a polynomial of degree $k$. 43 | 44 | $$ 45 | (a_0, a_1, \ldots, a_k) \to (b_0, b_1, \ldots, b_k, \ldots, b_n) 46 | $$ 47 | 48 | The distance between two codewords is the number of points where they differ. The Reed-Solomon code has distance $d = n-k+1$. 49 | 50 | ## Commit Phase 51 | 52 | In FRI, we will first choose a domain: 53 | 54 | $$ 55 | D_0 := \{h\omega^k : k \in \mathbb{Z}_n\} 56 | $$ 57 | 58 | Here, $\omega$ is a primitive $n$-th root of unity, and $h$ is called the _offset_. The offset is for convenience, and it is not necessary to have it. 59 | 60 | Using this domain, we will compute $P(x_k)$ for $x_k \in D_0$ which is essentially producing the Reed-Solomon encoding of $P$. This gives us $n$ evaluations $P(h), P(h\omega), \ldots, P(h\omega^{n-1})$. We can use FFT for this, since the domain is built over roots of unity. In FRI, this evaluation part is the most computationally expensive part. 61 | 62 | Using these $n$ evaluations, we will create a **Merkle Tree** and compute the **Merkle Root**. This root is the **commitment**. Computing the tree & root given a polynomial and its domain is the **Low-Degree Extension** (LDE). 63 | 64 | Notice that the verifier can query an evaluation from $D_0$, all the prover has to do is provide the Merkle Path so that verifier can compute the root. 65 | 66 | Now how do we know that the committed values are the evaluations of a polynomial, and not some other garbage? We will use a **proximity test** for this. However, proximity tests over a large domain can be expensive. So, we will use a **low-degree test** instead. 67 | 68 | Again, consider the polynomial $P$ with its $n$ evaluations over the domain $D_0$, its Merkle Tree $\text{Tree}_0$ and Merkle Root $\text{root}_0$ as defined above. Let $P = P_0$, as our starting polynomial. We will now compute smaller polynomials by splitting the polynomials into even and odd parts, just like radix-2 FFT. 69 | 70 | $$ 71 | P_0(x) = P_{0e}(x^2) + xP_{0o}(x^2) 72 | $$ 73 | 74 | If $P_0$ has $n$ coefficients, then $P_{0e}$ and $P_{0o}$ will have $n/2$ coefficients each. Now, we _ask the verifier_ for some random coefficients $\beta_0$, and compute our next polynomial $P_1$ as: 75 | 76 | $$ 77 | P_1(y = x^2) = P_{0e}(y) + \beta_0 P_{0o}(y) 78 | $$ 79 | 80 | Now, we compute the domain for this polynomial $D_1 = \{x_k^2 : x_k \in D_0\}$ and its evaluations $P_1(x_k^2)$. Remember that if $\omega$ is a primitive $n$-th root of unity, then $\omega^2$ is a primitive $n/2$-th root of unity. 81 | 82 | Finally, we will compute the evaluations of $P_1$ over $D_1$ and we will create a Merkle Tree $\text{Tree}_1$ and compute the Merkle Root $\text{root}_1$. We can continue reducing the degree of polynomial just like before: 83 | 84 | $$ 85 | P_1(y) = P_{1e}(y^2) + yP_{1o}(y^2) 86 | $$ 87 | 88 | Verifier gives us $\beta_1$ and we compute $P_2$: 89 | 90 | $$ 91 | P_2(z = y^2) = P_{1e}(z) + \beta_1 P_{1o}(z) 92 | $$ 93 | 94 | As we can see, this is a recursive process and we continue this until at $\log{n}$ steps we end up with a constant polynomial (i.e. has degree 0), along with a series of Merkle Trees and Merkle Roots. 95 | 96 | This process of splitting the polynomial in two, asking for a random coefficient, and computing a half-degree polynomial from that is called **random-folding**. 97 | 98 | ```mermaid 99 | sequenceDiagram 100 | actor P as Prover 101 | actor V as Verifier 102 | 103 | note over P: P_0(x) 104 | P ->> V: root_0 105 | V ->> P: beta_0 106 | 107 | note over P: P_1(y=x^2) 108 | P ->> V: root_1 109 | V ->> P: beta_1 110 | 111 | note over P, V: and so on... 112 | ``` 113 | 114 | ### Fiat-Shamir Transform 115 | 116 | Instead of asking the verifier for randomness, we can use **Fiat-Shamir transform** to generate the randomness without any interactions with the verifier. To do this, a transcript of all prover steps is kept within the protocol, and a hash of this transcript is used to generate the randomness, under the random-oracle model. 117 | 118 | You can see this within the code as well, see: . 119 | 120 | ### Blow-up Factor 121 | 122 | The number of coefficients in our polynomial is $n$, but our domain $D$ can be of larger size. The ratio $D/n$ is called the blow-up factor. 123 | 124 | If you have a larger blow-up factor, you can make less queries for the same amount of security. This means that the proof size is smaller. However, the computations will be more expensive the the memory usage will be higher, due to storing Merkle Trees. 125 | 126 | ## Query Phase 127 | 128 | In the query phase, the verifier will ask for evaluations of the polynomial at some random points. The prover will provide the evaluations and the Merkle Paths to the verifier. The verifier can then compute the Merkle Roots and verify that the evaluations are correct. 129 | 130 | Recall that $P_i(x^2)$ is written as a linear combination of $P_{{i-1}o}(x^2)$ and $P_{{i-1}e}(x^2)$. With a quick observation, we can see that both the even and odd polynomials can be represented as an evaluation of the original polynomial: 131 | 132 | $$ 133 | \begin{align*} 134 | P_{0e}(x^2) &= \frac{P_0(x) + P_0(-x)}{2} \\ 135 | P_{0o}(x^2) &= \frac{P_0(x) - P_0(-x)}{2x} 136 | \end{align*} 137 | $$ 138 | 139 | So, we can imagine how an evaluation "folds" with a pictoral example such as below: 140 | 141 | ```mermaid 142 | graph TD 143 | subgraph evaluations of P_2 144 | direction TB 145 | 20[" "]; 21[" "]; 146 | end 147 | 148 | subgraph evaluations of P_1 149 | direction TB 150 | 10[" "]; 11[" "]; 12[" "]; 13[" "]; 151 | end 152 | 153 | subgraph evaluations of P_0 154 | direction TB 155 | 00[" "]; 01[" "]; 02[" "]; 03[" "]; 04[" "]; 05[" "]; 06[" "]; 07[" "]; 156 | end 157 | 158 | 03["P0(x)"] & 07["P0(-x)"] --> 10 159 | 10["P1(x^2)"] & 12["P1(-x^2)"] --> 21 160 | 21["P1(x^4)"] 161 | ``` 162 | 163 | As shown above, we have 2 evaluations for each step. In the end, the resulting evaluation is that of the constant polynomial. In $\log{n}$ steps, we have reduced the polynomial to a constant value which is the **commitment** itself, meaning that the polynomial is close to a low-degree polynomial. Otherwise, we wouldn't be able to reduce it to a constant polynomial is that many steps. 164 | 165 | For each evaluation, the prover provides an authentication path (the path of the respective Merkle Tree for that evaluation) which has $\log{n}$ elements. The total size is therefore $\log^2{n}$. 166 | 167 | Now that we have made sure of the degree of polynomial, we must also make sure that it evaluates to some value at a point that verifier picked, i.e. $P(z) = v$. We can do this by using an idea similar to what we have done in KZG. If $P(z) = v$, then $P(z) - v = 0$ and $P(x) - v$ is divisble by $(x-z)$. We can use this to compute the quotient polynomial $Q(x)$: 168 | 169 | $$ 170 | \frac{P(x) - v}{x - z} = Q(x) 171 | $$ 172 | 173 | Now, all we have to do is apply FRI to this quotient polynomial instead, and we can be sure that the polynomial is close to a low-degree polynomial & it evaluates to $v$ at $z$. 174 | 175 | ## Batch Proofs 176 | 177 | If you have a series of polynomials $p_0, p_1, \ldots, p_k$ and some random coefficients $a_0, a_1, \ldots, a_k$ you can use FRI to commit to all of them by committing to their linear combination instead: 178 | 179 | $$ 180 | P(x) = \sum_{i=0}^k a_i p_i(x) 181 | $$ 182 | 183 | # STARK 184 | 185 | In STARKs, instead of using Square Span programs that was based on $(U\cdot z)^2 = 1$, we will using a different type of representation called **Algebraic Intermediate Representation** (AIR). There are two components of AIR: 186 | 187 | - Execution Trace 188 | - Set of Polynomial Constraints 189 | 190 | An execution trace is to be thought of like a table with some columns, where every column denotes a register (as in CPU register) and a column denotes the "clock". In this table, to ensure correctness you have to: 191 | 192 | - Check that **initial** values are correct 193 | - Check that jumps between each step (i.e. **transitions**) are correct 194 | - Check that **final** values are correct 195 | - Variables are consistent / well-formed, i.e. some register is boolean or such 196 | 197 | There are several types of constraints: 198 | 199 | - Boundary constraints 200 | - Consistency constraints 201 | - Transition constraints 202 | 203 | These constraints will have steps where they apply, along with the registers that they apply to. 204 | 205 | - $r_1(t=0)=5$ means that register 1 at time 0 should be 5 206 | - $r_3(t)(1 - r_3(t)) = 0$ means that register 3 is a boolean (bit) for all steps 207 | 208 | Transition steps are given as a multivariate polynomial $P(x, y)$ where $x$ is current step and $y$ is next step. For example, if register 2 is to be squared every step, this can be captured as $P(x, y) = y - x^2$ which makes all correct steps a root of this polynomial. 209 | 210 | ## Simple Example 211 | 212 | Consider the table with columns $t, x$ with constraints: 213 | 214 | - $x_0 = 2$ 215 | - $x_{i+1} = x_i^2$ 216 | 217 | You are given the execution trace (i.e. witness): 218 | 219 | | $t$ | $x$ | 220 | | -------- | --------- | 221 | | 0 | 2 | 222 | | 1 | 4 | 223 | | 2 | 16 | 224 | | $\ldots$ | $\ldots$ | 225 | | $n$ | $2^{2^n}$ | 226 | 227 | Our set of polynomial constraints are: 228 | 229 | - Initial: $x(t = 0) = 2$ 230 | - Transitions: $x_{i+1} = x_i^2$ captured by $P(x, y) = y - x^2$ 231 | - Final: $x(t=n) = 2^{2^n}$ 232 | 233 | ## STARK Protocol: Prover 234 | 235 | We will now describe the non-interactive protocol (i.e. using Fiat-Shamir transform): 236 | 237 | 1. Start with a transcript containing all the public inputs 238 | 239 | 2. Suppose the number of steps ($n+1$ in this case) is a power of two; even if its not you can add dummy rows to ensure this. Choose an **interpolation domain** $D_i = \{g^0, g^1, \ldots, g^n\}$ where $g$ is a primitive $n$-th root of unity that generates the group of order power of two. Then, view $x_i$ as the evaluations of $t(x)$ over $D_i$, as in $t(g^i) = x_i$. Interpolate this polynomial (using FFT) to find $t(x)$ which represents the trace. 240 | 241 | 3. Commit to the trace polynomial $t(x)$. Choose a **low-degree extension domain** $D_0$ (such that $|D_0| > |D_i|$ and $|D_0| = 2^b|D_i|$). 242 | 243 | > Here, $D_0 = \{h\omega, h\omega^1, h\omega^2, \ldots\}$ where $h$ is offset and $\omega$ is a primitive root of unity. Note that $h$ is not in this set by itself. 244 | 245 | 4. Append the Merkle root of $t$ to the transcript. Once the root is appended, we are committed to this transcript. Now, we are ready to ask the Verifier for some random challenge. 246 | 247 | 5. Compose the polynomial $t(x)$ with the constraints. Notice that by multiplying $x$ with $g$ in the polynomial, you can reach to later steps, i.e. $t(gx)$ goes to the evaluation of $x$ at one step forward. 248 | 249 | $$ 250 | P(t(x), t(gx)) = t(gx) - t(x)^2 = C(x) 251 | $$ 252 | 253 | 6. Sample random values (i.e. hash your transcript to obtain randomness) to create a linear combination of polynomials (i.e. Batch FRI), obtaining the **Composition Polynomial**. 254 | 255 | $$ 256 | CP(x) = \alpha\frac{C_t(x)}{Z(x)} + \beta \frac{t(x)-2}{x-1} + \delta \frac{t(x) - 2^{2^n}}{x - g^n} 257 | $$ 258 | 259 | > Here, $Z(x)$ is the vanishing polynomial over domain $D_i$ except the final element. 260 | > 261 | > $$ 262 | > Z(x) = \prod_{i=0}^{n-1} (x - g^i) = \prod_{i=0}^{n} (x - g^i)(x - g^n)^{-1} = \frac{x^{n+1} - 1}{x - g^n} 263 | > $$ 264 | > 265 | > The last reduction to obtain $x^{n+1} - 1$ from the product itself is due to the fact that $g^n$ is a primitive root of unity. 266 | 267 | 7. Commit to $CP(x)$ using $D_0$, where you do a low-degree extension to obtain a Merkle root. 268 | 269 | 8. Append this root to transcript. 270 | 271 | 9. Sample an out-of-domain point $z$ that is neither within $D_i$ nor $D_0$. 272 | 273 | 10. Compute $CP(z)$, $t(z)$ and $t(gz)$; this is also known as "Mask". Here, the verifier will check that indeed $CP(z)$ can be obtained from $t(z)$ and $t(gz)$ as shown in step 6 above. Using an out-of-domain sample here makes things more secure, and makes things harder for a malicious prover. 274 | 275 | > You can ignore this step (along with step 9) if you want, but you will have to do more consistency checks otherwise. 276 | 277 | 11. Append $CP(z)$, $t(z)$ and $t(gz)$ to the transcript. 278 | 279 | 12. Sample random $\delta, \epsilon, \zeta$ for the next step. 280 | 281 | 13. Compute $P_0$ which we call **DEEP ALI** (Algebraic Linking Identity). This is done to ensure that evaluations related to point $z$ belong to the actual polynomials $CP$ and $t$. 282 | 283 | $$ 284 | P_0(x) = \delta \frac{CP(x) - CP(z)}{x-z} + \epsilon \frac{t(x) - t(z)}{x-z} + \zeta \frac{t(x) - t(gz)}{x-gz} 285 | $$ 286 | 287 | 14. Apply FRI to $P_0(x)$ (i.e. Commitment and Query phases of FRI). 288 | 289 | The resulting proof has some evaluations alongs with the authentication paths (Merkle Tree path) and roots, and the out-of-domain evaluations. 290 | 291 | > You will have one polynomial for each column in the table. In our example above we have only one column for $x$. 292 | 293 | ## STARK Protocol: Verifier 294 | 295 | The verifier is a bit more simpler than the prover, still plenty of work though. 296 | 297 | 1. **Replay all the challenges**. Start with the transcript of public inputs, append root of $t(x)$, sample $\alpha, \beta, \delta$, append root of $CP(x)$, sample $z$, append $CP(z), t(z)$ and $t(gz)$, sample $\delta, \epsilon, \zeta$ and resample all FRI randomness & queries. 298 | 299 | 2. Check that $CP(z)$ is correctly linked with $t(z), t(gz)$. To do this, first compute $C_t(z)$ and then find the linear combination using $\alpha, \beta, \delta$ and see if it equals $CP(z)$ as given by the prover: 300 | 301 | $$ 302 | \frac{C_t(z)}{Z(z)} = \frac{t(gz) - t(z)^2}{Z(z)} 303 | $$ 304 | 305 | $$ 306 | CP(z) \iff \alpha\frac{C_t(z)}{Z(z)} + \beta \frac{t(z)-2}{z-1} + \delta \frac{t(z) - 2^{2^n}}{z - g^n} 307 | $$ 308 | 309 | 3. **Check FRI**. This means checking all the authentication paths and the folding steps, as well as checking the correctness of the DEEP ALI polynomial. For the latter, simply do: 310 | 311 | $$ 312 | \delta \frac{CP(x) - CP(z)}{x-z} + \epsilon \frac{t(x) - t(z)}{x-z} + \zeta \frac{t(x) - t(gz)}{x-gz} 313 | $$ 314 | 315 | If all these checks pass, the proof is correct. 316 | 317 | ## Implementation 318 | 319 | Of course, LambdaWorks has an implementation: . 320 | 321 | > They also have a "grinding" implementation, which is a proof-of-work that makes proving a bit more costly for more security. 322 | 323 | ## See Also 324 | 325 | - [ETHStark](https://eprint.iacr.org/2021/582.pdf) 326 | - 327 | - 328 | - 329 | -------------------------------------------------------------------------------- /docs/WEEK-5.md: -------------------------------------------------------------------------------- 1 | > # Week 5 2 | > 3 | > We talked about PLONK. 4 | 5 | # Plonk 6 | 7 | Plonk is a zk-SNARK that is based on the polynomial commitment scheme. It is a universal and scalable zk-SNARK that can be used for any NP language. It is a proof system that allows a prover to convince a verifier that a statement is true without revealing any information about the statement itself. With Plonk, you make use of **arithmetization** and a **polynomial commitment scheme**. For PCS you have several options: 8 | 9 | | PCS | Used in | Notes | 10 | | --- | -------------------------- | -------------------------------------------------------- | 11 | | KZG | "Vanilla" Plonk | Trusted Setup, small proof size | 12 | | IPA | Halo2 / Kimchi | Efficient recursion | 13 | | FRI | Boojum / Plonky2 / Plonky3 | No trusted setup, large proof size, can use small fields | 14 | 15 | IPA stands for Inner-product Argument, and we can think of it as some kind of Pedersen commitment. 16 | 17 | In FRI, the ability to use small fields enable us to implement faster provers, which is nice. 18 | 19 | > **Proof Recursion / Aggregation**: We should make a small note on this, now that we mention "Efficient recursion" in IPA. The idea here is that you can create a proof that you have verified a proof. In particular, you can use a different proof system to do this, i.e. use a Groth16 to prove that you have a valid STARK proof; and verify this final proof on Ethereum. This also allows for proof aggregation, where you can aggregate multiple proofs into a single proof. 20 | 21 | ## **Recall**: R1CS 22 | 23 | Before Plonk, the most widely used system was R1CS where we work with the equation: 24 | 25 | $$ 26 | (Az) \cdot (Bz) = Cz 27 | $$ 28 | 29 | Here, $A, B, C$ are sparse matrices, they contain a lot of zeros. 30 | 31 | ## Plonk Gates 32 | 33 | Imagine that you have a gate with 2 inputs $a, b$ and an output $c$, such as: 34 | 35 | ```mermaid 36 | graph LR 37 | g((" ")) 38 | a & b --> g 39 | g --> c 40 | ``` 41 | 42 | Consider the table $T$: 43 | 44 | | $a$ | $b$ | $c$ | 45 | | -------- | -------- | -------- | 46 | | $a_1$ | $b_1$ | $c_1$ | 47 | | $a_2$ | $b_2$ | $c_2$ | 48 | | $\ldots$ | $\ldots$ | $\ldots$ | 49 | | $a_n$ | $b_n$ | $c_n$ | 50 | 51 | Now, consider some selectors $q_i$ and define the equation: 52 | 53 | $$ 54 | q_{L_i}a_i + q_{R_i}b_i + q_{M_i}a_ib_i + q_{O_i}c_i + q_{C_i} = 0 55 | $$ 56 | 57 | This equation above is a "Basic Gate", and it allows you to capture certain computations. 58 | If $q_L=1,, q_R=1$ and $q_O =-1$ and all other selectors are 0, then you get an addition gate: 59 | 60 | $$ 61 | a + b - c = 0 62 | $$ 63 | 64 | If $q_M=1$ and $q_O =-1$ and all other selectors are 0, then you get a multiplication gate: 65 | 66 | $$ 67 | ab - c = 0 68 | $$ 69 | 70 | With more clever selectors, you can implement **custom gates**. For example, you can introduce a term with selector like $q_Na^5$ so that when $q_N=1$ and $q_O=-1$ you get the constraint: 71 | 72 | $$ 73 | a^5 - c = 0 74 | $$ 75 | 76 | This can help with some hard computations like elliptic curve additions, posedion hashes and foreign-field arithmetic. Moreover, custom gates allow one to use **lookup gates**. With a lookup gate, you can optimize a computation that is hard to arithmetize (e.g. SHA2 or AES) by simply storing a table of its inputs and outputs and then use that table to lookup a correct result. 77 | 78 | ### Gates To Polynomials 79 | 80 | Consider the basic gate again: 81 | 82 | $$ 83 | q_{L_i}a_i + q_{R_i}b_i + q_{M_i}a_ib_i + q_{O_i}c_i + q_{C_i} = 0 84 | $$ 85 | 86 | for $i = 0, 1, \ldots, n-1$. Now, we will encode these equations as relationships between polynomials. We will define polynomials $a(x), b(x), c(x)$ and for selectors $q_L(x), q_R(x), q_M(x), q_O(x), q_C(x)$. 87 | 88 | Similar to STARK from the week before, we will pick a domain of size $n$ such as $D_i = \{g^0, g^1, \ldots, g^{n-1}\}$. We will interpolate the polynomials so that: 89 | 90 | - $a(g^i) = a_i$ 91 | - $b(g^i) = b_i$ 92 | - $c(g^i) = c_i$ 93 | 94 | > We don't need to interpolate the selector polynomials because they are circuit-dependent, you can find them once you wrote the circuit independent of the inputs; they are "execution independent". 95 | 96 | Now, define the polynomial $P(x)$ that is: 97 | 98 | $$ 99 | P(x) = q_L(x)a(x) + q_R(x)b(x) + q_M(x)a(x)b(x) + q_O(x)c(x) + q_C(x) 100 | $$ 101 | 102 | If the constraints are valid, then it must be that $P(x) = 0$ for all $x \in D_i$. This would therefore mean that $P(x)$ is some vanishing polynomial $Z(x)$ times a quotient polynomial $Q(x)$ that is: 103 | 104 | $$ 105 | P(x) = Z(x)Q(x) 106 | $$ 107 | 108 | > Recall that vanishing polynomial is the product of roots over the domain, and when using roots of unity has a really nice form, shown below: 109 | > 110 | > $$ 111 | > Z(x) = \prod_{x_i \in D_i} (x - x_i) = x^n - 1 112 | > $$ 113 | 114 | ## Public Inputs 115 | 116 | For public inputs, we can add "dummy" gates where the input is $a_i$ and only $q_{L, i}=1$ and all other selectors are 0. The constraint is that $a_i = p_i$ where $p_i$ is the public input. 117 | 118 | Again we are working over the basic gate equation, but the resulting polynomial is the public inputs polynomial: 119 | 120 | $$ 121 | PI(x) = q_L(x)a(x) + q_R(x)b(x) + q_M(x)a(x)b(x) + q_O(x)c(x) + q_C(x) 122 | $$ 123 | 124 | For the dummy gates this results in the constraint: 125 | 126 | $$ 127 | a(g^i) = p_i = PI(g^i) 128 | $$ 129 | 130 | The public input polynomial beyond the dummy gates should be equal to 0, i.e. $PI(g^i) = 0$. By splitting the gates like this, and using the selectors, we can take the public input polynomial $PI(x)$ out of circuit polynomial $P(x)$ itself. 131 | 132 | Note that the prover and verifier can compute this polynomial on their own sides, and they can use Lagrange basis polynomials for this: 133 | 134 | $$ 135 | PI(x) = \sum_{i=0}^{n-1} p_i \cdot L_i(x) 136 | $$ 137 | 138 | where $L_i(x)$ is the Lagrange basis polynomial such that $L_i(x_i) = 1$ and $L_i(x_j) = 0$ for $j \neq i$. 139 | 140 | ## Wiring between Gates 141 | 142 | The gate equations above capture the constraint within a gate alone, but not their connections! 143 | 144 | ```mermaid 145 | graph LR 146 | g1((" ")); g2((" ")) 147 | a1 & b1 --> g1 148 | g1 --"c1"--> g2 149 | b2 --> g2 150 | g2 --> c2 151 | ``` 152 | 153 | In the diagram above for example, $c_1$ of the first gate must be equal to $a_2$ of the second gate. Plonk has a notion of **copy constraints** to tackle this problem of "wiring" the gates correctly. 154 | 155 | > The idea here is similar to "Memory Check" in CairoVM. 156 | 157 | ### Permutation Argument 158 | 159 | Fix some value $i$, and consider two polynomials $Z(x)$ and $Z^*(x)$. The claim is that $L_i(a)(Z(a) - Z^*(a)) = 0$ for all $a \in D_i$ if and only if $Z(g^i) = Z^*(g^i)$. 160 | 161 | Consider $f, g$ of degree less than $d$. Consider a permutation $\sigma : [n] \to [n]$ such that $g = \sigma(f)$ if for every $i \in [n]$ we have $g(g^i) = f(g^{\sigma(i)})$. 162 | 163 | For example, the permutation $\sigma := \{1, 2, 3, 4\} \to \{3, 4, 2, 1\}$ implies the equalities: 164 | 165 | - $g(\omega^1) = f(\omega^3)$ 166 | - $g(\omega^2) = f(\omega^4)$ 167 | - $g(\omega^3) = f(\omega^2)$ 168 | - $g(\omega^4) = f(\omega^1)$ 169 | 170 | Verifier samples $\beta, \delta \in \mathbb{F}$ and sends them to prover. Then, 171 | 172 | $$ 173 | f'(\omega^i) = f(\omega^i) + \beta \cdot i + \delta 174 | $$ 175 | 176 | $$ 177 | g'(\omega^i) = g(\omega^i) + \beta \cdot \sigma(i) + \delta 178 | $$ 179 | 180 | Then, the polynomial $Z(x)$ is computed as: 181 | 182 | $$ 183 | Z(w) = 1 : \forall i = 2 \ldots n 184 | $$ 185 | 186 | $$ 187 | Z(\omega^i) = \prod_{1 \leq j < i} \frac{f'(\omega^j)}{g'(\omega^j)} = Z(\omega^{i-1})\frac{f'(\omega^{i-1})}{g'(\omega^{i-1})} 188 | $$ 189 | 190 | With this, the verifier can check for every $a \in \{\omega^0, \omega^1, \omega^2, \ldots\}$ that: 191 | 192 | 1. $L_1(a)(Z(a) - 1) = 0$ 193 | 2. $Z(a)f'(a) = Z(\omega a)g'(a)$ 194 | 195 | > The permutation trick here is better found in the Plonk paper. 196 | 197 | ## Prover 198 | 199 | Prover (having access to witness values $w_i$ for $i \in \{0, 1, \ldots, 3n\}$) does the following: 200 | 201 | ### Round 1: Blinding 202 | 203 | Sample $b_1, b_2, \ldots, b_9$ blinding factors, and compute: 204 | 205 | $$ 206 | a(x) = \sum a_i L_i(x) + (b_1x + b_2)Z(x) 207 | $$ 208 | 209 | Here $Z$ is the vanishing poylynomial. Notice how $Z(x)$ results in zero when the input is $g^i$, but when evaluated at some other value it is there which brings some extra randomness to the evaluation due to $b_1x + b_2$. 210 | 211 | $$ 212 | b(x) = \sum b_i L_i(x) + (b_3x + b_4)Z(x) 213 | $$ 214 | 215 | $$ 216 | c(x) = \sum c_i L_i(x) + (b_5x + b_6)Z(x) 217 | $$ 218 | 219 | Prover now commits to these polynomials to obtain $\boxed{a(x)}_1, \boxed{b(x)}_1, \boxed{c(x)}_1$. We will use the final 3 blindings in the next step. 220 | 221 | ### Round 2: Permutation 222 | 223 | Now we will do the permutation argument. First, we sample $\beta, \gamma$ (from the transcript, as if its provided from the verifier). Then, we build the permutation polynomial $z(x)$. Recall that if there is a value $v_i$ at index $i$, then there is a value at $v_i'$ at index $\sigma(i)$ (permuted). 224 | 225 | $$ 226 | z(x) = L_0(x) + \sum_{i=0}^{n-1}\left(L_{i+1}(x)\prod_{j=i}^i (a_j + \beta g^j \right) 227 | $$ 228 | 229 | TODO: check paper for this 230 | 231 | TODO: An addition blinding is added $(b_7x^2 + b_8x + b_9)Z(x)$. 232 | 233 | ### Round 3: Computing Quotients 234 | 235 | TODO: !!! 236 | 237 | ### Round 4: Evaluation 238 | 239 | We must convince the verifier that all these constraints hold, and show that the polynomials are correct. We will evaluate the polynomials at some random point $\zeta$ (from the transcript) and send the evaluations to the verifier. 240 | 241 | - $\bar{a} = a(\zeta)$ 242 | - $\bar{b} = b(\zeta)$ 243 | - $\bar{c} = c(\zeta)$ 244 | - $\bar{s_{\sigma_1}} = s_{\sigma_1}(\zeta)$ 245 | - $\bar{s_{\sigma_2}} = s_{\sigma_2}(\zeta)$ 246 | - $\bar{z}g = z(\zeta g)$ 247 | 248 | Prover sends $(\bar{a}, \bar{b}, \bar{c}, \bar{s_{\sigma_1}}, \bar{s_{\sigma_2}}, \bar{z}g)$ to verifier. 249 | 250 | ### Round 5: Proving Evaluations 251 | 252 | Sample a value $v$ from transcript. Compute linearization polynomial: 253 | 254 | TODO: !!! 255 | 256 | > During the random linear combination, instead of a random sample per point, you can use a single random sample and its consecutive powers, which is slightly less secure but reduces communication. 257 | -------------------------------------------------------------------------------- /docs/WEEK-6.md: -------------------------------------------------------------------------------- 1 | > # Week 6 2 | > 3 | > Skipped due to conference. 4 | -------------------------------------------------------------------------------- /docs/WEEK-7.md: -------------------------------------------------------------------------------- 1 | > # Week 7 2 | > 3 | > Multivariate polynomials, Multi-linear Extensions & Sumcheck, Binius. 4 | 5 | # Multivariate Polynomials 6 | 7 | A multivariate polynomial is a polynomial with more than one variable. For example, the polynomial $x^2 + 2xy + y^2$ is a multivariate polynomial in the variables $x$ and $y$. 8 | 9 | So far within the bootcamp we have worked with univariate polynomials, where we did interpolation with FFT and we made use of quotient polynomials by dividing some polynomial with a vanishing polynomial over some domain. 10 | 11 | The degree of a multivariate polynomial is the highest sum of the exponents of the variables in any term of the polynomial. For example, the degree of the polynomial $x^2 + 2xy + y^2$ is 2. 12 | 13 | Schwartz-Zippel lemma that we have made use of so far works here as well. 14 | 15 | $$ 16 | \Pr[P(r) = 0 : r \gets \mathbb{F}] \leq \frac{d \cdot n}{|\mathbb{F}|} 17 | $$ 18 | 19 | ## Multi-linear Extension (MLE) 20 | 21 | For a given function $f : S \to \mathbb{F}_p$ we can obtain a multilinear extension $\tilde{f}$ such that for the given $(x_0, x_1, \ldots, x_{n-1})$ we have: 22 | 23 | $$ 24 | f(x_0, x_1, \ldots, x_{n-1}) = \tilde{f}(x_0, x_1, \ldots, x_{n-1}) 25 | $$ 26 | 27 | where $v$ is a binary decomposition of $v$. 28 | 29 | We can think of this like interpolation we had done for univariate polynomials. 30 | 31 | Multi-linear extension is **unique**, just like the univariate interpolations! 32 | 33 | ### Lagrange Basis over Boolean Hypercube 34 | 35 | The set $S = \{0, 1\}^n$ is sometimes known as the **Boolean hypercube**. 36 | 37 | We can talk about Lagrange basis polynomials in the multivariate case as well. Consider $f(x) = v$. We can create a Lagrange basis that is 1 when $x = r$ and 0 otherwise: 38 | 39 | $$ 40 | L_r(x) = \prod_{i=0}^{n-1} (x_ir_i + (1 - x_i)(1 - r_i)) 41 | $$ 42 | 43 | where $r_i$ are the bits from the binary decomposition of $r$. The idea here is simple, the bits that we expect to be set or unset make up the terms where if $r_i$ is set we have $x_i$, otherwise we have $1 - x_i$. With many such basis functions, we can construct the MLE: 44 | 45 | $$ 46 | \tilde{f}(x) = \sum_{v}^{2^n-1} f(v) \cdot L_v(x) 47 | $$ 48 | 49 | > Consider the Lagrange basis for 3-bit $r$ inputs: 50 | > 51 | > - $L_0(x) = L_{000}(x_0, x_1, x_2) = (1-x_0)(1-x_1)(1-x_2)$ 52 | > - $L_1(x) = L_{001}(x_0, x_1, x_2) = x_0(1-x_1)(1-x_2)$ 53 | > - $L_2(x) = L_{010}(x_0, x_1, x_2) = (1-x_0)x_1(1-x_2)$ 54 | > - $L_3(x) = L_{011}(x_0, x_1, x_2) = x_0x_1(1-x_2)$ 55 | > - $L_4(x) = L_{100}(x_0, x_1, x_2) = (1-x_0)(1-x_1)x_2$ 56 | > - $L_5(x) = L_{101}(x_0, x_1, x_2) = x_0(1-x_1)x_2$ 57 | > - $L_6(x) = L_{110}(x_0, x_1, x_2) = (1-x_0)x_1x_2$ 58 | > - $L_7(x) = L_{111}(x_0, x_1, x_2) = x_0x_1x_2$ 59 | 60 | ### Evaluating at a Random Point 61 | 62 | In the multivariate case, we do not make use of FFT; we instead use a clever algorithm around the Lagrange basis construction. 63 | 64 | > See the "Proofs, Arguments, and Zero-Knowledge" book by Justin Thaler to see the efficient method, chapters 3 & 4. 65 | 66 | We can evaluate the multilinear extension at a random point $r = (r_0, r_1, \ldots, r_n)$ efficiently to get the value of the function at that point: 67 | 68 | $$ 69 | \tilde{f}(r_0, r_1, \ldots, r_n) = \sum_{v} f(v) \cdot L_v(r_0, r_1, \ldots, r_n) 70 | $$ 71 | 72 | # Sum-Check Protocol 73 | 74 | Sum-check protocol is an important protocol in the context of MLE-based proof systems. It is efficient, and has low communication costs. Consider a $\nu$-variate polynomial $g(x_1, x_2, \ldots, x_\nu)$ of degree $d$ over $\mathbb{F}$, and a set $S = \{0, 1\}^\nu$; the Sum-check proves that $H$ is the result of the sum below: 75 | 76 | $$ 77 | \sum_{x_1\in\{0, 1\}}\sum_{x_2\in\{0, 1\}}\ldots\sum_{x_\nu\in\{0, 1\}}g(x_1, x_2, \ldots, x_\nu) = H 78 | $$ 79 | 80 | We want to reduce the amount of work that the verifier has to do to check this result. In the naive version, the verifier would have to check $2^\nu$ values. The sum-check protocol reduces this to $O(\nu)$, along with a cost to evaluate $g$ at a random point. 81 | 82 | ## Protocol 83 | 84 | > We will describe the interactive one, but one can use Fiat-Shamir to make this non-interactive. 85 | 86 | 1. The prover sends the verifier the value $c_1$ which is claimed to be equal to $H$. 87 | 88 | 2. The prover sends $g_1(x_1)$ (a univariate polynomial of degree less than $d$) which is claimed to equal to: 89 | 90 | $$ 91 | \sum_{y_2\in\{0, 1\}}\sum_{y_3\in\{0, 1\}}\ldots\sum_{y_\nu\in\{0, 1\}}g(x_1, y_2, y_3, \ldots, y_\nu) 92 | $$ 93 | 94 | > Basically, the sum is taken over all values except the first one. 95 | 96 | The verifier checks that $g_1(0) + g_1(1) = c_1$, which essentially translates to the original summation above, and also makes sure that $g_1$ is a polynomial of degree less than $d$. 97 | 98 | > Verifier can check the degree by checking the number of coefficients, since the polynomial is sent in clear. 99 | 100 | 3. Verifier chooses random $r_1$ and sends it to the prover. We must now make sure that $g_1(r_1)$ is the correct value. 101 | 102 | $$ 103 | \sum_{y_2\in\{0, 1\}}\sum_{y_3\in\{0, 1\}}\ldots\sum_{y_\nu\in\{0, 1\}}g(r_1, y_2, y_3, \ldots, y_\nu) 104 | $$ 105 | 106 | Well, the expression above is just like the sum-check protocol, but instead of $\nu$ variables, we have $\nu - 1$ variables with $x_1 = r_1$ fixed. We can repeat the process above to check prove this result. 107 | 108 | 4. The prover sends $g_2(x_2)$ which is claimed to equal: 109 | 110 | $$ 111 | g_2(x) = \sum_{y_3\in\{0, 1\}}\sum_{y_4\in\{0, 1\}}\ldots\sum_{y_\nu\in\{0, 1\}}g(r_1, x_2, y_3, \ldots, y_\nu) 112 | $$ 113 | 114 | The verifier checks that $g_2(0) + g_1(1) = g_1(r_1)$, and confirms the degree. 115 | 116 | 5. Verifier chooses random $r_2$ and sends it to the prover. We must now make sure that $g_2(r_2)$ is the correct value. 117 | 118 | **and so on...** 119 | 120 | Towards the end, the prover sends $g_\nu(x_\nu)$, and verifier checks that $g_\nu(0) + g_\nu(1) = g_{\nu-1}(r_{\nu-1})$ along with the degree. 121 | 122 | At the final step, verifier picks random $r_\nu$ and verifies that: 123 | 124 | $$ 125 | g_\nu(r_\nu) = g(r_1, r_2, \ldots, r_\nu) 126 | $$ 127 | 128 | and if this is true, it **accepts**. This final evaluation over $g$ is the random point evaluation we talked about earlier. 129 | 130 | > There is also something called the Zero-Check Protocol. 131 | 132 | ## Proof Systems & PCS based on MLE & Sum-Check 133 | 134 | - **HyperPlonk** adapts the Plonk protocol to multivariate polynomials. 135 | 136 | - **Spartan** is a proof system based on R1CS. 137 | 138 | - **Brakedown** is a polynomial commitment scheme based on multivariate polynomials. 139 | 140 | - **Binius** is a really efficient proof system. 141 | 142 | ## See also 143 | 144 | - [Semiotic AI blog post](https://semiotic.ai/articles/sumcheck-tutorial/) 145 | - [Matteo notes](https://publish.obsidian.md/matteo/3.+Permanent+notes/Sum-Check+Protocol) 146 | - [0xSage SumCheck implementation](https://github.com/0xSage/thaler) 147 | - [Punwai SumCheck implementation](https://github.com/punwai/sumcheck) 148 | - [Ingonyama SumCheck implementation](https://github.com/ingonyama-zk/super-sumcheck) 149 | - [Arkworks SumCheck implementation](https://github.com/arkworks-rs/sumcheck/blob/master/src/ml_sumcheck/mod.rs#L18) 150 | - [Proofs, Arguments, and Zero-Knowledge by Justin Thaler](https://people.cs.georgetown.edu/jthaler/ProofsArgsAndZK.pdf) Chapters 3 & 4 151 | 152 | # Binius & Brakedown 153 | 154 | > [Binius](https://eprint.iacr.org/2023/1784) is a modified [Brakedown](https://eprint.iacr.org/2021/1043)-type commitment. We will not go into details of **Binius**, but instead we will describe **Brakedown** and **Binary Fields**. 155 | 156 | Brakedown is a hash-based commitment scheme, which we will now go over. Lets consider a polynomial $p(x)$ of degree 15, with 16 coefficients $(a_0, a_1, \ldots, a_{15})$. Evaluating this polynomial at $x = z$ translates to: 157 | 158 | $$ 159 | P(z) = a_0 + a_1z + a_2z^2 + \ldots + a_{15}z^{15} 160 | $$ 161 | 162 | This actually translates to a "vector \* matrix \* vector" multiplication: 163 | 164 | $$ 165 | P(z) = v^t \cdot M \cdot w 166 | $$ 167 | 168 | such that: 169 | 170 | $$ 171 | v = \begin{bmatrix} 1 \\ z^4 \\ z^8 \\ z^{12} \end{bmatrix}, 172 | M = \begin{bmatrix} 173 | a_0 & a_1 & a_2 & a_3 \\ 174 | a_4 & a_5 & a_6 & a_7 \\ 175 | a_8 & a_9 & a_{10} & a_{11} \\ 176 | a_{12} & a_{13} & a_{14} & a_{15} 177 | \end{bmatrix}, 178 | w = \begin{bmatrix} 1 \\ z \\ z^2 \\ z^3 \end{bmatrix} 179 | $$ 180 | 181 | Now, a similar idea applies to multilinear extension as well. Recall that Lagrange basis are given by: 182 | 183 | $$ 184 | \{L_k^{(r)}\} = \textcircled{x}_{i=0}^n(1-r_i, r_i) 185 | $$ 186 | 187 | Here the circled $x$ means a tensor product. 188 | 189 | $$ 190 | \tilde{f}(r) = [ \textcircled{x}_{i=l_1}^n(1-r_i, r_i)]^t \cdot M \cdot \textcircled{x}_{i=0}^{l_1-1}(1-r_i, r_i) 191 | $$ 192 | 193 | What we have to do is that we must organize the coefficients within the MLE $\tilde{f}(x_0, x_1, \ldots, x_n)$ into a matrix $M$, as shown below: 194 | 195 | $$ 196 | M = \begin{bmatrix} 197 | \text{row}_0 \\ 198 | \text{row}_1 \\ 199 | \ldots \\ 200 | \text{row}_{n-l_1} \\ 201 | \end{bmatrix} 202 | $$ 203 | 204 | Create a new matrix $U$ where each row of $U$ is the encoding $\text{Enc}$ (using some linear code) of each row of $M$. 205 | 206 | $$ 207 | U = \begin{bmatrix} 208 | \text{Enc}(\text{row}_0) \\ 209 | \text{Enc}(\text{row}_1) \\ 210 | \ldots \\ 211 | \text{Enc}(\text{row}_{n-l_1}) \\ 212 | \end{bmatrix} 213 | $$ 214 | 215 | Then, we create a Merkle tree using the columns of $U$ as defined above. 216 | 217 | Prover wants to show that $\tilde{f}(r) = v$ (Eval procedure PCS). Prover sends $L$ to the verifier, as defined below: 218 | 219 | $$ 220 | L = [ \textcircled{x}_{i=l_1}^n(1-r_i, r_i)]^t \cdot M 221 | $$ 222 | 223 | whic is simply a linear combination of the rows of $M$. 224 | 225 | 1. The verifier can check the evaluation by completing the rest of the operation using the right hand-side tensor product. 226 | 227 | $$ 228 | L \cdot \textcircled{x}_{i=0}^{l_1-1}(1-r_i, r_i) = v = \tilde{f}(r) 229 | $$ 230 | 231 | Doing this requires around $2^{l_1}$ operations, and we usually have $l_1 \approx n/2$. 232 | 233 | 2. Verifier then checks the encoding of $L$: 234 | 235 | $$ 236 | \text{Enc}(L) = [ \textcircled{x}_{i=l_1}^n(1-r_i, r_i)]^t \cdot U 237 | $$ 238 | 239 | Instead of checking the entire encoding, we can do this statistically via columns: 240 | 241 | $$ 242 | \text{Column}(\text{Enc}(L))_k = [ \textcircled{x}_{i=l_1}^n(1-r_i, r_i)]^t \cdot \text{Column}(U)_k 243 | $$ 244 | 245 | Prover responds with the required Columns and their authentication paths from the Merkle tree to answer the Verifier's queries during this step. 246 | 247 | Verificaiton time & proof size are both $\mathcal{O}(\sqrt{K})$, meaning sub-linear verifer and proof size. Asymptotically, its worse than $\mathcal{O}(\log^2(n))$ but in practice it is still efficient. Prover runs in linear time $\mathcal{O}(n)$. 248 | 249 | > When Reed-Solomon encoding is used within **Brakedown**, we call that **Shockwave**. 250 | 251 | ## Brakedown using Binary Fields 252 | 253 | Binius is achieved by using Brakedown over Binary Fields. A binary field is a field with characteristic 2, and is denoted by $\mathbb{F}_{2^m}$. The characteristic is the number of times you have to add 1 to itself to get 0. In binary fields, this is 2. 254 | 255 | The simplest binary field is $\mathbb{F}_2$ which is just $\{0, 1\}$ with addition and multiplication that can be represented using XOR and AND respectively. Polynomials over this field have coefficients that are either 1 or 0. 256 | 257 | We would like to have irreducible polynomials of a given degree to create extension fields. For example, to create a quadratic extension we need an irreducible polynomial of degree 2. The polynomial $x^2 + x + 1$ is irreducible over $\mathbb{F}_2$. Now, we can consider the polynomials $\mathbb{F}_2[x] / (1 + x + x^2)$ which is a field with 4 elements that are of form $a + bx$ with $a, b \in \mathbb{F}_2$. 258 | 259 | ### Representation with Bits 260 | 261 | Since we are working with "bit" coefficients, it is often useful to represent the polynomials as binary numbers. For example, the polynomial $x^3 + x + 1$ can be represented as 1011 in binary. 262 | 263 | - Even in this notation, **addition** can be shown using XOR. 264 | - However, multiplication is not that straightforward because the degree may change. One can build a multiplication table as a precomputed table to do this efficiently. 265 | 266 | ### Higher Extensions 267 | 268 | How can we go to higher extensions? There are two methods: 269 | 270 | - **Direct Extension**: Simply find a polynomial of higher degree that is irreducible. 271 | For example, $\mathbb{F}_2[x] / (1 + x + x^3 + x^4 + x^8)$ is a degree-8 extension used within AES, and coefficients are 0 and 1 only. 272 | - **Towers of Extension**: Find an irreducible polynomial within your extension field, and extend over that, again and again. 273 | - Start with $\mathbb{F}_2 \to \mathbb{F}_{2^2} = \mathbb{F}_2[x_0] / (1 +x_0 + x_0^2)$ with some degree-2 polynomial in the binary field. 274 | - Then, extend to $\mathbb{F}_{2^2} \to \mathbb{F}_{2^4} = \mathbb{F}_{2^2}[x_1] / (1 + x_1x_0 + x_1^2)$ with some degree-2 polynomial in the previous extended field. 275 | - Then, extend to $\mathbb{F}_{2^4} \to \mathbb{F}_{2^8} = \mathbb{F}_{2^4}[x_2] / (1 + x_2x_1 + x_2^2)$ with some degree-2 polynomial in the previous extended field. 276 | - and so on... 277 | 278 | Notice that an element in $\mathbb{F}_{2^8}$ can be represented as: 279 | 280 | $$ 281 | a_0 + a_1x_2 = (a_{00} + a_{01}x_1) + (a_{10} + a_{11}x_1)x_2 282 | $$ 283 | 284 | and these look much similar to multilinear extensions we have been talking about. 285 | -------------------------------------------------------------------------------- /docs/WEEK-8.md: -------------------------------------------------------------------------------- 1 | > # Week 8 2 | > 3 | > Plookup & zkVMs. 4 | 5 | # Lookup Argument 6 | 7 | Remember the gate constraint: 8 | 9 | $$ 10 | q_La + q_Rb + q_Mab + q_Oc + q_C + PI = 0 11 | $$ 12 | 13 | From this, we had interpolated polynomials like $q_L(x), q_R(x), q_M(x), q_O(x), q_C(x)$ as well as $a(x), b(x)$ and $c(x)$. We also had the ability to capture more complex computations with **custom gates**. 14 | 15 | Consider the XOR example over `u16` (16-bit unsigned) integers, i.e. $a \oplus b = c$. So the constraints here is that $a, b$ must be in range, and $c$ should be result of the XOR operation (which is kinda complex in finite field). 16 | 17 | For this, we can instead compute everything beforehand and create a **lookup table**. For example, consider a table $T$ with three columns such that $T_0$ has 16-bit values for $a$, $T_1$ has 16-bit values for $b$ and $T_2$ has 16-bit values for $c$. What we want is to show that at some point the trace $a(x), b(x), c(x)$ have a corresponding "row" in this table. 18 | 19 | We do that using a **lookup gate**. We will describe a selector for this lookup gate $q_{LU}(x)$. Let's dive in. 20 | 21 | ## Compressing the Columns 22 | 23 | Sample random $\zeta$ from verifier, and compress the columns of the table via a random linear combination: 24 | 25 | $$ 26 | t_i = T_{0i} + \zeta T_{1i} + \zeta^2 T_{2i} 27 | $$ 28 | 29 | We can also define a linear combination for the trace columns: 30 | 31 | $$ 32 | f_i = a_i + \zeta b_i + \zeta^2 c_i 33 | $$ 34 | 35 | The problem reduces to showing that $\{f_i\} \subset \{t_i\}$, i.e. the values in $f_i$ shall appear in values of $t_i$. It is okay if $f_i$ has some duplicate values, or that it has some values missing from $t_i$. 36 | 37 | Here is the idea, if for a set of values $\{a_i\}, \{b_i\}$ the following holds: 38 | 39 | $$ 40 | \prod(x + a_i) = \prod(x + b_i) 41 | $$ 42 | 43 | then $\{a_i\}$ is a permutation of $\{b_i\}$. This is because the polynomial on the left has roots at $a_i$, and the polynomial on the right has roots at $b_i$. If the polynomials are equal, then the roots must be equal. There is a problem with **multiplicities** though, what if the same values has been used many times? 44 | 45 | > [!NOTE] 46 | > 47 | > There is an [alternative method](https://eprint.iacr.org/2022/1530.pdf) using the derivate of this as well: 48 | > 49 | > $$ 50 | > \sum \frac{1}{x + a_i} = \sum \frac{m_j}{x + b_j} 51 | > $$ 52 | 53 | We will now work with randomized differences, let $s = (f, t)$, sorted by $t$. Then, compute: 54 | 55 | - $\Delta s = s_i + \delta s_{i+1}$ 56 | - $\Delta t = t_i + \delta t_{i+1}$ 57 | 58 | ## Protocol 59 | 60 | 1. Compress columns of $T$ to obtain $t_i$ as: 61 | 62 | $$ 63 | t_i = T_{0i} + \zeta T_{1i} + \zeta^2 T_{2i} 64 | $$ 65 | 66 | > Here, $\zeta$ is provided by the verifier. 67 | 68 | 2. Construct the "query wire", which are just the values $f_i$. There are two cases though, one when we are doing a lookup and one when we are not. 69 | 70 | $$ 71 | f_i = 72 | \begin{cases} 73 | a_i + \zeta b_i + \zeta^2 c_i & \text{if lookup} \\ 74 | T_{0,n} + \zeta T_{1,n} + \zeta^2 T_{2,n} & \text{if not lookup} 75 | \end{cases} 76 | $$ 77 | 78 | > The "not lookup" case is simply to fill some dummy values that are included within the table anyways. In the example above, we simply picked the last ($n$-th) row of the table. 79 | 80 | 3. Create sorted vector $s$ by combining the vectors $f, t$. 81 | 82 | - $f = (f_0, f_1, \ldots, f_n)$ 83 | - $t = (t_0, t_1, \ldots, t_n)$ 84 | - $s = (f, t)$ concatenated & sorted with respect to $t$ 85 | 86 | > An example: $t = (1, 5, 7, 2, 4, 8)$ and $f = (1, 4, 4, 1, 8, 7)$. 87 | > 88 | > When we build $s$ just by concatenating these two vectors, we get $s = (1, 4, 4, 1, 8, 7, 1, 5, 7, 2, 4, 8)$. 89 | > 90 | > After sorting with respect to $t$, we get $s = (1, 1, 1, 5, 7, 7, 2, 4, 4, 4, 8, 8)$. 91 | > 92 | > This helps when we are computing the differences. When a difference occurs in the sorted $s$, and we compute $\Delta s$, we will get some factors like $(1 + \delta)s_i$. 93 | 94 | 4. Compute deltas: 95 | 96 | $$ 97 | \Delta t_i = \begin{cases} 98 | t_i + \delta t_{i+1} & \text{if } i \in \{0, 1, \ldots, n-1\} \\ 99 | t_i + \delta t_0 & \text{if } i = n \text{ to wrap around} 100 | \end{cases} 101 | $$ 102 | 103 | $$ 104 | \Delta s_i = \begin{cases} 105 | s_i + \delta s_{i+1} & \text{if } i \in \{0, 1, \ldots, n-1\} \\ 106 | s_i + \delta s_0 & \text{if } i = n \text{ to wrap around} 107 | \end{cases} 108 | $$ 109 | 110 | Note that $s_i$ is twice the length of $t_i$. For that reason, we will split $\Delta s_i$ into two parts and compute $\Delta s_i$ for both of them: 111 | 112 | - $h_1 = (s_0, s_1, \ldots, s_n)$ 113 | - $h_2 = (s_{n+1}, s_{n+2}, \ldots, s_{2n+1})$ 114 | 115 | Now, let $\omega$ be a primitive root of unity of order $n$ (as we used to interpolate the trace polynomial), we will create the following polynomial: 116 | 117 | $$ 118 | Z(\omega x) = Z(x) \cdot \frac{ 119 | (1+\delta)(\epsilon + f(x))(\epsilon(1 + \delta) + t(x) + \delta t(\omega x)) 120 | }{ 121 | (\epsilon(1 + \delta) + h_1(x) + \delta h_1(\omega x))(\epsilon(1 + \delta) + h_2(x) + \delta h_2(\omega x)) 122 | } 123 | $$ 124 | 125 | This is similar to the permutation argument we had seen in PlonK. What it proves is that $(f, t)$ is a permutation of the sorted $s$. 126 | 127 | $$ 128 | Z(\omega x) = Z(x) \cdot \frac{ 129 | (1+\delta)(\epsilon + f(x))(\epsilon(1 + \delta) + \Delta t(x)) 130 | }{ 131 | (\epsilon(1 + \delta) + \Delta h_1 (x))(\epsilon(1 + \delta) + \Delta h_2(x)) 132 | } 133 | $$ 134 | 135 | > Another way to view this polynomial is to see that: 136 | > 137 | > $$ 138 | > Z_{i+1} = Z_i \cdot \frac{ 139 | > (1+\delta)(\epsilon + f_i)(\epsilon(1 + \delta) + \Delta t_i) 140 | > }{ 141 | > (\epsilon(1 + \delta) + \Delta h_{1,i})(\epsilon(1 + \delta) + \Delta h_{2,i}) 142 | > } 143 | > $$ 144 | 145 | We can split $s$ into $h_1, h_2$ in an even-odd order, and rewrite the equation above as below, changing one instance of $h_1$ with $h_2$: 146 | 147 | $$ 148 | Z(\omega x) = Z(x) \cdot \frac{ 149 | (1+\delta)(\epsilon + f(x))(\epsilon(1 + \delta) + t(x) + \delta t(\omega x)) 150 | }{ 151 | (\epsilon(1 + \delta) + h_1(x) + \delta h_2(\omega x))(\epsilon(1 + \delta) + h_1(x) + \delta h_2(\omega x)) 152 | } 153 | $$ 154 | 155 | Let's revisit our polynomials then: 156 | 157 | - **Selector Polynomials**: The lookup selector $q_{LU}(x)$ is designed such that $q_{KU}(\omega^k)$ is 1 if the $k$-th gate is a lookup gate, or 0 if it is not. 158 | 159 | - **Permutation Polynomials**: $S_{\sigma_0}(x), S_{\sigma_1}(x), S_{\sigma_2}(x)$ along with $S_{ID_0}(x) = x, S_{ID_2}(x) = k_1x$, polys x, k_1, and k_2x (out of domain ks) 160 | 161 | - **Table Polynomials**: $T_0(x), T_1(x), T_2(x)$ will simply interpolate the columns of the table. 162 | 163 | Finally, show that: 164 | 165 | 1. Gate constraints hold. 166 | 2. Lookup constraints hold: $q_{LU, i} \cdot (a_i + \zeta b_i + \zeta^2 c_i) = 0$ 167 | 3. Copy constraints hold 168 | 4. Values of $f_i$ are contained in $t_i$ 169 | 170 | ## Revisiting the Protocol 171 | 172 | - **Round 1**: Compute the blinded $a(x), b(x), c(x)$ is the same as before, and obtain $\boxed{a}, \boxed{b}, \boxed{c}$. 173 | - **Round 2**: Compute the compressed table & do the sorting using $f_i, t_i$. The table is precomputed, but the compression requires a public input from the verifier (i.e. the transcript). Once the sorted set is computed, we end up with $\boxed{f}, \boxed{h_1}, \boxed{h_2}$. Here these polynomials have blinding factors as well. 174 | - **Round 3**: Compute the permutation arguments, with some additional randomness & blindings. Now we will capture both the copy constraints for $a, b, c$ and the permutations for the lookups $f, t$. We end up with $\boxed{Z_1}, \boxed{Z_2}$, one for each respectively. 175 | - **Round 4**: Compute the quotient polynomials, showing that the constraints hold and all. 176 | - **Round 5**: Comptute evaluations on the random sampled point, for each polynomial. 177 | - **Round 6**: Create the opening proof for the evaluations. 178 | 179 | > At this point there were too many equations to write down, so we end here... 180 | 181 | ### See Also 182 | 183 | - [Plookup](https://eprint.iacr.org/2020/315.pdf) 184 | - [PlonKup](https://eprint.iacr.org/2022/086) 185 | 186 | # Zero-Knowledge Virtual Machines (zkVM) 187 | 188 | A zero-knowledge virtual machine is a virtual machine that can execute programs such that a zero-knowledge proof can be created about their execution. The idea is to have a virtual machine that can execute programs, but the verifier does not know what the program is doing. The verifier only knows that the program has been executed correctly. 189 | 190 | ## In the Wild 191 | 192 | There are quite a variety of zkVMs with different designs: 193 | 194 | - **Cairo**: it was the first VM among all. It is not as fancy as the new ones, but it opened the path for the rest. 195 | - **RISC0**: it is a RISC-V based VM, able to prove Rust code 196 | - **Valida/SP1**: ... 197 | - **Miden**: has its own assembly instruction set. 198 | - **Nexus**: ... 199 | - **Ola**: ... 200 | - **Jolt**: ... 201 | 202 | ### Architecture 203 | 204 | There are two main **architectures** in any kind of VM: 205 | 206 | - **Von-Neumann**: The operations and instruction are together in the same piece of memory 207 | - **Harvard**: The operations and instructions are separated 208 | 209 | ### Proof Systems 210 | 211 | The most important thing about a zkVM is the proof system that it uses. We have several options: 212 | 213 | - **STARKs** (Cairo, Risc0, Miden) 214 | - **Plonky3** (Valida/SP1) 215 | - **Plonky2** + **STARKY** (Ola) 216 | - **Lasso** (Jolt) 217 | - **Nexus** (Elliptic Curve snarks) 218 | 219 | The choice of proof system will affect three main things: 220 | 221 | - **Proof Size**: we want small proofs 222 | - **Speed**: we want the VM to be fast 223 | - **Recursion**: we want recursion to be easy 224 | 225 | In general, most VMs seem to prefer STARKs because it easier to do recursion & you can choose your field; its also post-quantum secure! The large proof size is a problem there, but you can do a final proving step with a much smaller (constant-size) proof to solve that. 226 | 227 | ### Instruction Set 228 | 229 | Many VMs have a _proof compression_ step that generates one final proof from a multiple of them. 230 | 231 | We should also think of the Instruction Set Architecture (ISA) of the VM. The ISA is the set of instructions that the VM can execute. The number of instructions is important, as well as the complexity of the instructions. 232 | 233 | - **RISC-V** (RISC0) 234 | - **Cairo** (Cairo) 235 | - **Miden Assembly** (Miden) 236 | - **LLVM** + own ISA (Valida) 237 | 238 | ### Modularity 239 | 240 | A VM can be Modular or Monolithic: 241 | 242 | - A modular zkVM is one that can be split into different parts, and each part can be replaced by another one; it is easier to extend such a zkVM with new operations. The communication between the different modules of the VM is often done via lookup arguments. 243 | 244 | - A monolithic zkVM however, is one that is a single piece of code. 245 | 246 | Some modularity examples: 247 | 248 | - In Valida/SP1, we have a specialized Arithmetic Logic Unit (ALU) "copressor" that does specialized arithmetic operations. This is separated from the main CPU of the zkVM, all you have to do is communicate between them. 249 | - In Miden, we have "chiplets". 250 | - In Cairo we have "built-ins". 251 | 252 | When we use more modules, our proof size goes up. This is partly due to "communication" proofs. 253 | 254 | ### Finite Field 255 | 256 | Another important aspect is the field used within the zkVM. 257 | 258 | - Stark252 (Cairo) 259 | - Mini-Goldilocks (Miden, Ola) 260 | - BabyBear (RISC0, SP1, Lita) 261 | - BN254 base & scalar fields (Nexus) 262 | 263 | > [!WARN] 264 | > 265 | > If we are using elliptic curves, we are not as free as we would be in STARK when it comes to picking our field. 266 | 267 | Mersenne31 or the binary fields (e.g. as used with Binius) are not used yet, but it would be nice to see them in action sometime. 268 | 269 | > Does using a small field incur any security costs? Well, not really because the sampling is done from an extension field, which is much larger even if we are working with a small field. 270 | 271 | ### Recursion 272 | 273 | It is important if the VM has recursion or not. 274 | 275 | - If you don't have recursion, the size of the things that you can prove becomes **bounded**. 276 | - If you have recursion, you can prove things of arbitrary size. 277 | 278 | For example in StarkNet, instead of proving a whole block, you can prove each transaction separately and then aggregate the proofs. 279 | 280 | With the ability to do recursion, we can talk about **continutions**. If you want to prove a very large program, you can run up to a certain point, generate the proof, and then move on to the next part of the program and so on. 281 | 282 | - One can reduce a list of proofs in a binary-tree fashion, reducing two-to-one until we get to the root. 283 | 284 | - Or we can do "rollup continuations" where we prove each segment along with a proof that the previous segment was correct. 285 | 286 | > **Final Proofs**: To make it easy to verify proofs in resource-constrained systems, one can choose to do a final proof using a proof system that is known to have a very efficient verifier. For example, a Groth16 proof to show that a STARK proof is valid. 287 | 288 | ### Folding Schemes / Proof-Carrying Data 289 | 290 | If we are using R1CS (e.g. Nexus), we have some nice folding schemes (Nova, SuperNova, HyperNove) that we can use to "fold" multiple instances into one. Recall that R1CS looked like this: 291 | 292 | $$ 293 | (Az) \cdot (Bz) = (Cz) 294 | $$ 295 | 296 | The question is, if I had $z_1$ and $z_2$ can I combine them to produce $z'$ such that if I verify the R1CS with $z'$ it is the same as verifying the R1CS with $z_1$ and $z_2$ separately? The answer is: **yes**, you can. However, you need to do some modifications to R1CS. 297 | 298 | Firstly, if the systems are linear, you can do a linear combination to compute $z' = z_1 + r \cdot z_2$. If the systems are not linear, this will fail though. An idea for this was to relax R1CS a bit: 299 | 300 | $$ 301 | (Az) \cdot (Bz) = u \cdot (Cz) + E 302 | $$ 303 | 304 | by adding "slack terms" $u$ and $E$ that account for the error. This is the idea behind **Nova** folding scheme. It is based on "Incrementially Verifiable Computation" (IVC). Instead of compressing all proofs into one, here we compress the "executions of the VM". The only proof that is generated is from the end result alone. 305 | 306 | This is also the idea behind Mina Protocol, where the blockchain is compressed into a single proof. Their construction in based on Pickles (whic is based on Kimchi) and these make use of the Pasta (Pallas + Vesta) pair of elliptic curves. In such a pair of curves, the base field of one is the scalar field of the other and vice-versa. 307 | 308 | ### Hash Functions 309 | 310 | The choice of hash function is rather important in a VM, as it is used in many places. Some choices: 311 | 312 | - Pedersen 313 | - Poseidon 314 | - Monolotih 315 | - Rescue Prime 316 | - BLAKE 317 | - KECCAK 318 | 319 | ## See also 320 | 321 | - Miden docs are nice 322 | - CairoVM paper is nice 323 | -------------------------------------------------------------------------------- /exercises/interview/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "interview" 3 | version.workspace = true 4 | edition.workspace = true 5 | 6 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 7 | 8 | [dependencies] 9 | lambdaworks-math.workspace = true 10 | -------------------------------------------------------------------------------- /exercises/interview/README.md: -------------------------------------------------------------------------------- 1 | # Lambda0b10 Interview Questions 2 | 3 | > Using lambdaworks, compute the public key associated with the secret key `0x6C616D6264617370` with the BLS12-381 elliptic curve. Provide link to repo. 4 | 5 | It is the point `(0x67f9ffc5eaf6c19292112eadf50c11e7460e7568493141676f6ba1374badd9f6ab1f2f5e155b0e3d2f4c1de79554f80, 0x18509d22f2107b667a8f75de737a4fb967f6c3e745a7c2361868515402318f006bd360b8a8763d7844381c6e510799cc)`. See [here](./src/main.rs). 6 | 7 | > What is a lookup argument and what are the protocols used? 8 | 9 | A lookup argument is a set-membership argument, i.e. instead of computing f(x) = y, one can lookup a table where the domain and range of f is given as columns, and see if a row has x and y in that table. This helps with the cases where computing f is costly, (i.e. a SHA256 hash in a circuit) but looking up the input and output is much more efficient. Plookup (A. Gabizon & Z. Williamson) was a milestone in the lookup tables scene. TinyRAM (Bootle et al.) is another earlier work that made use of lookups. 10 | 11 | > What are the differences between SHA-2 and SHA-3? 12 | 13 | SHA-2 uses Merkle-Damgard construction, while SHA-3 uses Sponge construction. MD constructions are open to length extension attacks, Sponge are not. SHA-2 is more performant than SHA-3, although SHA-3 is more amenable to parallelization. 14 | 15 | > Explain Reed-Solomon codes in a concise way 16 | 17 | Reed-Solomon code is an error-correcting code, where a message of length K is treated as a univariate polynomial of degree K-1, and the codeword is the evaluation of this polynomial at N publicly known points. The distance of this code is n - k + 1, which is actually the most optimal one; i.e. Reed-Solomon code is an optimal linear code. 18 | 19 | > Give the proof systems included in lambdaworks 20 | 21 | Based on the table under the README of Lambdaworks, it currently supports Groth16 and STARK. 22 | 23 | > Give the multiplicative inverse of 2 modulo $2^{64} - 2^{32} + 1$ (the so-called mini-Goldilocks prime) 24 | 25 | It is `9223372034707292161`. See [here](./src/main.rs). 26 | 27 | > Explain generics in Rust. 28 | 29 | Generics are template arguments that allow an implementation or definition to respect multiple types. When the logic of some code is applicable to multiple types, we go for generics instead of writing that code for each type separately. Generics can also help define objects (structs) that accept multiple types as well. 30 | 31 | Rust's generics are quite powerful, in the sense that we can not only specify types but also specify traits: we can have template arguments that accept any type that implements a certain function, or a trait. 32 | 33 | > Why are we launching this today? What makes this day so special? 34 | 35 | Perhaps its because today is the day before the Dencun upgrade on Ethereum, and we owe the tech behind it to a lot of cryptography. Not sure though, every day is special if it is the start of a cryptography bootcamp! :) 36 | -------------------------------------------------------------------------------- /exercises/interview/src/lib.rs: -------------------------------------------------------------------------------- 1 | #[cfg(test)] 2 | mod tests { 3 | use lambdaworks_math::cyclic_group::IsGroup; 4 | use lambdaworks_math::elliptic_curve::short_weierstrass::curves::bls12_381::curve::BLS12381Curve; 5 | use lambdaworks_math::elliptic_curve::traits::IsEllipticCurve; 6 | use lambdaworks_math::field::element::FieldElement; 7 | use lambdaworks_math::field::fields::u64_goldilocks_field::Goldilocks64Field; 8 | 9 | /// Using lambdaworks, compute the public key associated with the secret key 0x6C616D6264617370 with the BLS12-381 elliptic curve. 10 | #[test] 11 | fn test_chal_bls12_381() { 12 | let gen = BLS12381Curve::generator(); 13 | let privkey = 0x6C616D6264617370u64; // lambdasp 14 | let pubkey = gen.operate_with_self(privkey).to_affine(); 15 | println!("Public key:\n({}, {})", pubkey.x(), pubkey.y()); 16 | assert_eq!("0x67f9ffc5eaf6c19292112eadf50c11e7460e7568493141676f6ba1374badd9f6ab1f2f5e155b0e3d2f4c1de79554f80", pubkey.x().to_string()); 17 | assert_eq!("0x18509d22f2107b667a8f75de737a4fb967f6c3e745a7c2361868515402318f006bd360b8a8763d7844381c6e510799cc", pubkey.y().to_string()); 18 | } 19 | 20 | /// Give the multiplicative inverse of 2 modulo 2^{64} - 2^{32} + 1 (the so-called mini-Goldilocks prime) 21 | #[test] 22 | fn test_chal_goldilocks() { 23 | type Goldilocks64FieldElement = FieldElement; 24 | 25 | let two = Goldilocks64FieldElement::from_raw(2u64); 26 | let two_inv = two.inv().expect("expected inverse"); 27 | println!("Inverse of two in Goldilocks64:\n{}", two_inv); 28 | assert_eq!(two * two_inv, Goldilocks64FieldElement::one()); 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /exercises/ntt/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "ntt" 3 | version.workspace = true 4 | edition.workspace = true 5 | 6 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 7 | 8 | [dependencies] 9 | lambdaworks-math.workspace = true 10 | log.workspace = true 11 | env_logger.workspace = true 12 | -------------------------------------------------------------------------------- /exercises/ntt/README.md: -------------------------------------------------------------------------------- 1 | # Number Theoretic Transform (NTT) 2 | 3 | More specifically, here we implement radix-2 fast Number Theoretic Transform (NTT) and its inverse (INTT). The NTT of a vector of $n$ polynomial coefficients $a_0, a_1, \ldots, a_{n-1}$ is defined as $n$ new coefficients $A_0, A_1, \ldots, A_{n-1}$ such that: 4 | 5 | $$ 6 | A_j = \sum_{i=0}^{n-1} a_j \cdot \omega^{ij} 7 | $$ 8 | 9 | where $n$ is a power of 2, $\omega$ is a primitive $n$-th root of unity in the finite field $\mathbb{F}_p$, and all operations are defined over the field $\pmod p$. Notice that $A_j$ is equal to the polynomial evaluated at $\omega^j$. 10 | 11 | > [!NOTE] 12 | > 13 | > A primitive $n$-th root of unity is an element $\omega$ such that $\omega^n = 1$ and $\omega^j \ne 1$ for all $0 < j < n$ that divides $n$. 14 | 15 | In the more explicit form: 16 | 17 | $$ 18 | A_j = a_0 \cdot \omega^{0j} + a_1 \cdot \omega^{1j} + \ldots + a_{n-1} \cdot \omega^{(n-1)j} 19 | $$ 20 | 21 | The trick is to split the polynomial into two parts: the even coefficients and the odd coefficients. This is done by using the following relations: 22 | 23 | $$ 24 | \begin{align*} 25 | A_j = &a_0 \cdot \omega^{0j} + a_2 \cdot \omega^{2j} + \ldots + a_{n-2} \cdot \omega^{(n-2)j} &\text{(even terms)}\\ 26 | + &a_1 \cdot \omega^{1j} + a_3 \cdot \omega^{3j} + \ldots + a_{n-1} \cdot \omega^{(n-1)j} &\text{(odd terms)} 27 | \end{align*} 28 | $$ 29 | 30 | Now notice that odd terms have a common factor of $\omega^j$ below: 31 | 32 | $$ 33 | \begin{align*} 34 | A_j = &a_0 \cdot \omega^{0j} + a_2 \cdot \omega^{2j} + \ldots + a_{n-2} \cdot \omega^{(n-2)j}\\ 35 | + w^j (&a_1 \cdot \omega^{0j} + a_3 \cdot \omega^{2j} + \ldots + a_{n-1} \cdot \omega^{(n-2)j}) 36 | \end{align*} 37 | $$ 38 | 39 | Now let us substitue $\gamma = \omega^2$ and re-write the above equation: 40 | 41 | $$ 42 | \begin{align*} 43 | A_j = &a_0 \cdot \gamma^{0j} + a_2 \cdot \gamma^{1j} + \ldots + a_{n-2} \cdot \gamma^{(n/2-1)j}\\ 44 | + w^j(&a_1 \cdot \gamma^{0j} + a_3 \cdot \gamma^{1j} + \ldots + a_{n-1} \cdot \gamma^{(n/2-1)j}) 45 | \end{align*} 46 | $$ 47 | 48 | What we ended up with is two smaller NTTs of size $n/2$ each! This is the basis of the (radix-2) Cooley-Tukey algorithm for fast Fourier transform. The algorithm is recursive and has a time complexity of $O(n \log n)$. 49 | 50 | > [!NOTE] 51 | > 52 | > If $\omega$ is a primitive $n$-th root of unity, then $\omega^2$ is a primitive $n/2$-th root of unity. So this substitution does not break that property. 53 | 54 | Primitive $n$-th roots of unity have two properties that are useful for the NTT: 55 | 56 | - **Periodicity**: $\omega^{j + n} = \omega^j$ for all $j$. It is quite evident why this is: 57 | 58 | $$ 59 | \omega^{j + n} = \omega^j \cdot \omega^n = \omega^j \cdot 1 = \omega^j 60 | $$ 61 | 62 | - **Symmetricity**: $\omega^{j + n/2} = -\omega^j$ for all $j$. (TODO: explain why) 63 | 64 | We end up with the following "butterfly" operation: 65 | 66 | $$ 67 | \begin{align*} 68 | A_j &= E_j + \omega^j O_j \\ 69 | A_{j+n/2} &= E_j - \omega^j O_j 70 | \end{align*} 71 | $$ 72 | 73 | where $E_j$ is the even part and $O_j$ is the odd part of $A_j$, as depicted above. Here, $\omega^j$ is also called the "twiddle factor". 74 | -------------------------------------------------------------------------------- /exercises/ntt/src/lib.rs: -------------------------------------------------------------------------------- 1 | mod ntt; 2 | pub use ntt::NTT; 3 | -------------------------------------------------------------------------------- /exercises/ntt/src/main.rs: -------------------------------------------------------------------------------- 1 | use std::env; 2 | 3 | use lambdaworks_math::{ 4 | field::{element::FieldElement, test_fields::u64_test_field::U64Field}, 5 | polynomial::Polynomial, 6 | }; 7 | use ntt::NTT; 8 | 9 | // define a prime field of order 17 10 | type F = U64Field<17>; 11 | type FE = FieldElement; 12 | 13 | fn main() { 14 | env::set_var("RUST_LOG", "debug"); 15 | env_logger::init(); 16 | 17 | // 13 is a primitive 4-th root of unity 18 | // and 4 is the max we can have here because 17 - 1 = 2^4 19 | let w = FE::from(13u64); 20 | 21 | let coeffs = (0..4).map(|i| FE::from(i as u64)).collect::>(); 22 | let poly = Polynomial::new(&coeffs); 23 | let ntt = NTT::new(w.clone(), 4); 24 | 25 | log::info!("Twiddle factors:"); 26 | for (i, w_i) in ntt.twiddles.iter().enumerate() { 27 | log::info!("w^{} = {}", i, w_i.representative()); 28 | } 29 | let evals = ntt.forward(&coeffs); 30 | 31 | // confirm evaluations 32 | for (i, e) in evals.iter().enumerate() { 33 | let y = poly.evaluate(&ntt.twiddles[i]); 34 | assert_eq!(e, &y); 35 | log::debug!("A_{} = {}", i, e.representative()); 36 | } 37 | 38 | // inverse 39 | // TODO: !!! 40 | } 41 | -------------------------------------------------------------------------------- /exercises/ntt/src/ntt.rs: -------------------------------------------------------------------------------- 1 | use lambdaworks_math::field::{element::FieldElement, traits::IsField}; 2 | 3 | pub struct NTT { 4 | /// Twiddle factors w^0, w^1, w^2, ..., w^(n-1) in order, 5 | /// so that `twiddles[i] = w^i`. 6 | pub twiddles: Vec>, 7 | } 8 | 9 | impl NTT { 10 | pub fn new(w: FieldElement, n: u64) -> Self { 11 | // must be primitive 2^k-th root of unity 12 | assert_eq!(n.count_ones(), 1, "n must be a power of 2"); 13 | assert_eq!(w.pow(n), FieldElement::one()); 14 | assert_ne!(w.pow(n - 1), FieldElement::one()); 15 | 16 | Self { 17 | // construct twiddle factors w^0, w^1, w^2, ..., w^(n-1) 18 | twiddles: (0..n).map(|i| w.pow(i)).collect(), 19 | } 20 | } 21 | 22 | pub fn forward(&self, coeffs: &[FieldElement]) -> Vec> { 23 | assert!(self.twiddles.len() >= coeffs.len(), "too many inputs"); 24 | let n = coeffs.len(); 25 | if n == 1 { 26 | return coeffs.to_vec(); 27 | } 28 | assert_eq!(n.count_ones(), 1, "n must be a power of 2"); 29 | 30 | let half = n >> 1; 31 | let (even, odd) = even_odd_split(coeffs.to_vec()); 32 | let (even, odd) = (self.forward(&even), self.forward(&odd)); 33 | 34 | let mut res = vec![FieldElement::zero(); n]; 35 | for j in 0..half { 36 | // A_j = E_j + w^j * O_j 37 | res[j] = even[j].clone() + self.twiddles[j].clone() * odd[j].clone(); 38 | 39 | // A_{j + n/2} = E_j - w^j * O_j 40 | res[j + half] = even[j].clone() - self.twiddles[j].clone() * odd[j].clone(); 41 | } 42 | 43 | res 44 | } 45 | } 46 | 47 | /// Splits a given array into two arrays, one containing the elements at even indices and the other 48 | /// containing the elements at odd indices. 49 | pub fn even_odd_split(arr: Vec) -> (Vec, Vec) { 50 | let (even, odd): (Vec<_>, Vec<_>) = arr.into_iter().enumerate().partition(|(i, _)| i & 1 == 0); 51 | 52 | let even = even.into_iter().map(|(_, x)| x).collect::>(); 53 | let odd = odd.into_iter().map(|(_, x)| x).collect::>(); 54 | 55 | (even, odd) 56 | } 57 | 58 | #[cfg(test)] 59 | mod tests { 60 | use super::*; 61 | 62 | #[test] 63 | fn test_even_odd_split() { 64 | let arr = vec![1, 2, 3, 4, 5, 6, 7, 8]; 65 | let (even, odd) = even_odd_split(arr); 66 | 67 | assert_eq!(even, vec![1, 3, 5, 7]); 68 | assert_eq!(odd, vec![2, 4, 6, 8]); 69 | } 70 | } 71 | -------------------------------------------------------------------------------- /exercises/rsa/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "rsa" 3 | version.workspace = true 4 | edition.workspace = true 5 | 6 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 7 | 8 | [dependencies] 9 | num-bigint = "0.4.5" 10 | num-traits = "0.2.19" 11 | -------------------------------------------------------------------------------- /exercises/rsa/src/lib.rs: -------------------------------------------------------------------------------- 1 | use num_bigint::BigUint; 2 | use num_traits::FromPrimitive; 3 | 4 | /// A dummy implementation of the RSA cryptosystem. 5 | pub struct RSA { 6 | e: BigUint, 7 | d: BigUint, 8 | n: BigUint, 9 | } 10 | 11 | impl RSA { 12 | pub fn new(p: BigUint, q: BigUint) -> Self { 13 | let one = BigUint::from_u8(1).unwrap(); 14 | let n = p.clone() * q.clone(); 15 | let phi_n = (p - one.clone()) * (q - one.clone()); 16 | 17 | let e = BigUint::from_u32(65537).unwrap(); // 0x10001 18 | 19 | // e * d = 1 (mod phi(n)) 20 | let e_inv = e.modinv(&phi_n.clone()).unwrap(); 21 | assert!( 22 | (e.clone() * e_inv.clone()) % phi_n.clone() == one, 23 | "not inverse" 24 | ); 25 | 26 | RSA { e, n, d: e_inv } 27 | } 28 | 29 | pub fn public_key(&self) -> (&BigUint, &BigUint) { 30 | (&self.e, &self.n) 31 | } 32 | 33 | pub fn secret_key(&self) -> &BigUint { 34 | &self.d 35 | } 36 | 37 | pub fn encrypt(&self, m: BigUint) -> BigUint { 38 | m.modpow(&self.e, &self.n) 39 | } 40 | 41 | pub fn decrypt(&self, c: BigUint) -> BigUint { 42 | c.modpow(&self.d, &self.n) 43 | } 44 | } 45 | 46 | #[cfg(test)] 47 | mod tests { 48 | use super::*; 49 | 50 | #[test] 51 | fn test_rsa() { 52 | let p = BigUint::from_u32(61).unwrap(); 53 | let q = BigUint::from_u32(53).unwrap(); 54 | let rsa = RSA::new(p, q); 55 | 56 | let m = BigUint::from_u32(42).unwrap(); 57 | assert_eq!(m, rsa.decrypt(rsa.encrypt(m.clone()))); 58 | } 59 | } 60 | -------------------------------------------------------------------------------- /exercises/shamir-secret-share/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "shamir-secret-share" 3 | version.workspace = true 4 | edition.workspace = true 5 | 6 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 7 | 8 | [dependencies] 9 | lambdaworks-math.workspace = true 10 | lambdaworks-crypto.workspace = true 11 | rand.workspace = true 12 | -------------------------------------------------------------------------------- /exercises/shamir-secret-share/README.md: -------------------------------------------------------------------------------- 1 | # Shamir's Secret Sharing 2 | 3 | Shami's Secret Sharing is a cryptographic technique that allows a secret to be split into multiple parts, called _shares_, in such a way that the secret can only be reconstructed when a sufficient number of shares are combined together. This is a form of threshold cryptography, where the secret is divided into $n$ shares, and the secret can be reconstructed only when $k$ shares are combined together. 4 | 5 | Consider a secret $s$ that we want to split into $n$ shares such that the secret can be reconstructed when $k$ shares are combined together. The secret sharing scheme works as follows: 6 | 7 | 1. The secret $s$ is represented as a polynomial $f(x) = s + a_1x + a_2x^2 + \ldots + a_{k-1}x^{k-1}$ of degree $k-1$. 8 | 9 | 2. $n$ points $(x_1, f(x_1)), (x_2, f(x_2)), \ldots, (x_n, f(x_n))$ are chosen on the polynomial $f(x)$, where $x_1, x_2, \ldots, x_n$ are distinct. 10 | 11 | 3. The shares are given to the participants, where each share has the form: $(x_i, f(x_i))$. 12 | 13 | 4. To reconstruct the secret, a participant must have at least $k$ shares. The secret can be reconstructed by interpolating (via Lagrange Interpolation) the polynomial $f(x)$ using the $k$ shares. 14 | 15 | 5. The secret $s$ can be reconstructed by evaluating the polynomial $f(x)$ at $x = 0$ to obtain $f(0) = s$. 16 | 17 | ## Usage 18 | 19 | The API is rather straightforward: 20 | 21 | ```rs 22 | // n shares, with k shares required to reconstruct the secret 23 | let shamir = ShamirSecretShare::::new(n, k); 24 | 25 | // create shares from secret 26 | let shares = shamir.create_shares(secret); 27 | assert_eq!(shares.len(), n); 28 | 29 | // reconstruct the secret from a subset of shares 30 | let reconstructed_secret = shamir.reconstruct_secret(shares); 31 | assert_eq!(reconstructed_secret, secret); 32 | ``` 33 | -------------------------------------------------------------------------------- /exercises/shamir-secret-share/src/lib.rs: -------------------------------------------------------------------------------- 1 | use std::marker::PhantomData; 2 | 3 | use lambdaworks_math::{ 4 | field::{element::FieldElement, traits::IsField}, 5 | polynomial::Polynomial, 6 | }; 7 | 8 | pub struct ShamirSecretShare { 9 | n: usize, // num shares to create 10 | k: usize, // needed number of shares to reconstruct 11 | phantom: PhantomData, // to make F part of this struct 12 | } 13 | 14 | /// A share is just an evaluation point on the polynomial, i.e. `p(x) = y`. 15 | #[derive(Clone, Debug)] 16 | pub struct Share { 17 | pub x: FieldElement, 18 | pub y: FieldElement, 19 | } 20 | 21 | impl ShamirSecretShare { 22 | pub fn new(n: usize, k: usize) -> Self { 23 | assert!(n > k); 24 | Self { 25 | n, 26 | k, 27 | phantom: PhantomData, 28 | } 29 | } 30 | 31 | /// Given a secret, creates a set of shares: 32 | /// 33 | /// 1. Create a random polynomial of degree `k-1` with the secret as the constant term. 34 | /// 2. Evaluate the polynomial at `n` random points to create `n` shares. 35 | /// 3. Return the shares. 36 | pub fn create_shares(&self, secret: FieldElement) -> Vec> { 37 | let xs = (0..=self.n) 38 | .map(|i| { 39 | if i == 0 { 40 | FieldElement::::zero() 41 | } else { 42 | FieldElement::::from(rand::random::()) 43 | } 44 | }) 45 | .collect::>(); 46 | 47 | let mut ys = (0..self.k) 48 | .map(|i| { 49 | if i == 0 { 50 | secret.clone() 51 | } else { 52 | FieldElement::::from(rand::random::()) 53 | } 54 | }) 55 | .collect::>(); 56 | 57 | // interpolate from k points 58 | let poly = Polynomial::interpolate(&xs.as_slice()[..self.k], &ys).unwrap(); 59 | 60 | // create additional shares 61 | let ys_extra = (self.k..=self.n) 62 | .map(|i| poly.evaluate(&xs[i])) 63 | .collect::>(); 64 | ys.extend(ys_extra); 65 | 66 | // return as Share objects 67 | xs.into_iter() 68 | .zip(ys) 69 | .skip(1) // skip the secret itself 70 | .map(|(x, y)| Share { x, y }) 71 | .collect() 72 | } 73 | 74 | /// Given a set of shares, reconstructs the secret. 75 | /// 76 | /// 1. Use Lagrange interpolation to reconstruct the polynomial. 77 | /// 2. Evaluate the polynomial at `0` to get the secret. 78 | pub fn reconstruct_secret(&self, shares: Vec>) -> FieldElement { 79 | assert!(shares.len() >= self.k, "not enough shares"); 80 | let xs = shares.iter().map(|s| s.x.clone()).collect::>(); 81 | let ys = shares.iter().map(|s| s.y.clone()).collect::>(); 82 | 83 | let p = Polynomial::interpolate(&xs, &ys).expect("should interpolate"); 84 | 85 | p.evaluate(&FieldElement::::zero()) 86 | } 87 | } 88 | 89 | #[cfg(test)] 90 | mod tests { 91 | use super::*; 92 | use lambdaworks_math::field::fields::u64_goldilocks_field::Goldilocks64Field; 93 | 94 | type F = Goldilocks64Field; 95 | 96 | fn test_shamir_secret_share(n: usize, k: usize) { 97 | let shamir = ShamirSecretShare::::new(n, k); 98 | 99 | // create shares from secret 100 | let secret = FieldElement::::from(rand::random::()); 101 | let shares = shamir.create_shares(secret.clone()); 102 | assert_eq!(shares.len(), n); 103 | 104 | // reconstruct the secret from a subset of shares 105 | let subset_shares = shares.into_iter().take(k).collect::>(); 106 | let reconstructed_secret = shamir.reconstruct_secret(subset_shares); 107 | assert_eq!(reconstructed_secret, secret); 108 | } 109 | 110 | #[test] 111 | fn test_n3_k2() { 112 | test_shamir_secret_share(3, 2); 113 | } 114 | 115 | #[test] 116 | fn test_n5_k3() { 117 | test_shamir_secret_share(5, 3); 118 | } 119 | 120 | #[test] 121 | fn test_n7_k4() { 122 | test_shamir_secret_share(7, 4); 123 | } 124 | } 125 | -------------------------------------------------------------------------------- /exercises/sumcheck/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "sumcheck" 3 | version.workspace = true 4 | edition.workspace = true 5 | 6 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 7 | 8 | [dependencies] 9 | lambdaworks-math.workspace = true 10 | lambdaworks-crypto.workspace = true 11 | rand.workspace = true 12 | log.workspace = true 13 | env_logger.workspace = true 14 | csv = "1.1" 15 | -------------------------------------------------------------------------------- /exercises/sumcheck/README.md: -------------------------------------------------------------------------------- 1 | # Sumcheck Protocol 2 | 3 | Consider a $n$-variate polynomial $g(x_1, x_2, \ldots, x_n)$ of degree $d$ over $\mathbb{F}$, and a set $S = \{0, 1\}^n$ also denoted as a Boolean hypercube. We are interested in the following sum: 4 | 5 | $$ 6 | \sum_{(x_1, x_2, \ldots, x_n)\in\{0, 1\}^n}g(x_1, x_2, \ldots, x_n) = H 7 | $$ 8 | 9 | A naive proof would be to send the polynomial to the verifier and have him evaluate the polynomial at all $2^n$ points and have it sum them up. However, this is not efficient. Instead, we can use the sumcheck protocol to prove the sum in a more efficient manner. 10 | 11 | ### First Round 12 | 13 | The prover computes the sum $H$ and it sends a value $C_1$ that is claimed to equal $H$. Along with that, the prover sends a univariate polynomial $g_1(X_1)$ that is claimed to equal: 14 | 15 | $$ 16 | \sum_{(x_2, x_3, \ldots, x_n)\in\{0, 1\}^{n-1}}g(X_1, x_2, \ldots, x_n) 17 | $$ 18 | 19 | The verifier checks that $g_1(0) + g_1(1) = C_1$ and that $g_1$ is a univariate polynomial of degree at most $\deg_1(g)$ (the degree of the term $X_1$ in the $n$-variate $g$ polynomial). If the check passes, the verifier sends a random challenge $r_1$ to the prover. 20 | 21 | ### Middle Rounds 22 | 23 | In each intermediate round $j$, the prover sends a univariate polynomial $g_j(X_j)$ which it claims to equal: 24 | 25 | $$ 26 | \sum_{(x_{j+1}, \ldots, x_n)\in\{0, 1\}^{n-j}}g(r_1, r_2, \ldots, r_{j-1}, X_j, x_{j+1}, \ldots, x_n) 27 | $$ 28 | 29 | The verifier checks that $g_j(0) + g_j(1) = g_{j-1}(r_{j-1})$ and that $g_1$ is a univariate polynomial of degree at most $\deg_j(g)$ (the degree of the term $X_j$ in the $n$-variate $g$ polynomial). If the check passes, the verifier sends a random challenge $r_j$ to the prover. 30 | 31 | > The first round can be seen as a special case of the middle round, where instead of a univariate polynomial we have a constant polynomial for $g_0$. 32 | 33 | ### Final Round 34 | 35 | With the end of last "middle" round the prover had sent the polynomial $g_n(X_n)$ that is claimed to equal $g(r_1, r_2, \ldots, X_n)$, and the verifier checked it as described above. Then, we ended up with $n$ random values $(r_1, r_2, \ldots, r_n)$. Finally, the verifier makes an oracle query to the polynomoial $g$ itself to compute: 36 | 37 | $$ 38 | g(r_1, r_2, \ldots, r_n) 39 | $$ 40 | 41 | The verifier checks that: 42 | 43 | $$ 44 | g_n(r_n) = g(r_1, r_2, \ldots, r_n) 45 | $$ 46 | 47 | If all checks pass, the verifier accepts the proof. 48 | 49 | ## Usage 50 | 51 | Sumcheck takes in a `DenseMultilinearPolynomial` created from evaluations of a function, meaning that a multi-linear extension (MLE) takes place from those evaluations. 52 | 53 | The struct simple has a `prove` function, which returns a `SumCheckProof` struct that has a `verify` function. Verification panics if the proof is invalid. 54 | 55 | ```rs 56 | // assuming `evals` exist 57 | let poly = DenseMultilinearPolynomial::new(evals); 58 | 59 | // create proof 60 | let sumcheck = SumCheck::new(poly); 61 | let proof = sumcheck.prove(); 62 | 63 | // verify proof 64 | proof.verify(); 65 | ``` 66 | 67 | ## Implementation 68 | 69 | The `prove` function is rather straightforward, we begin with the first interpolation and the computation for $C_1$, then we proceed with the middle rounds and finally the final round. 70 | 71 | Within the loop, we check the results of previous round (i.e. $g_{j-1}(r_{j-1}) = g_j(0) + g_j(1)$) and the degree of the polynomial. Then, a random variable is sampled and we proceed to the next round. 72 | 73 | If the round is not final, a new polynomial is interpolated with the given random variables. At the final round, the last check $g_n(r_n) = g(r_1, r_2, \ldots, r_n)$ is performed. 74 | 75 | Prover does all the verifier checks during proof creation for sanity, and the proof simply contains the interpolated polynomials. The random variables are obtained from the transcript, so they are not stored in the proof. 76 | 77 | > [!NOTE] 78 | > 79 | > This implementation is rather naive, especially within the univariate interpolations. For example, suppose we have $g(x_1, x_2, x_3)$ and we would like to interpolate $g_1(X_1) = g(X_1, 0, 0) + g(X_1, 0, 1) + g(X_1, 1, 0) + g(X_1, 1, 1)$. This is done via 4 different Lagrange interpolations, one for each term, and for each interpolation we evaluate it at 2 points (because we are ok with a degree 1 polynomial). It is possible to reuse some of these interpolations within the recursive steps, but I did not bother with that here. 80 | > 81 | > As a result, if you try with a high number of variables, this may take a while to compute. 82 | -------------------------------------------------------------------------------- /exercises/sumcheck/examples/adjacency_matrix.csv: -------------------------------------------------------------------------------- 1 | 0,1,0,0,0,0,0,0,0,1,0,0,0,0,1,0 2 | 1,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0 3 | 0,0,0,0,1,0,0,1,0,0,0,1,1,0,1,1 4 | 0,1,0,0,1,1,0,0,1,0,0,0,1,1,0,0 5 | 0,0,1,1,0,0,0,1,0,1,0,1,0,0,0,0 6 | 0,0,0,1,0,0,1,0,0,1,1,0,0,1,1,0 7 | 0,0,0,0,0,1,0,0,0,1,1,0,1,0,0,0 8 | 0,0,1,0,1,0,0,0,1,0,0,0,0,0,0,0 9 | 0,0,0,1,0,0,0,1,0,1,1,0,0,1,0,0 10 | 1,0,0,0,1,1,1,0,1,0,0,1,0,0,0,0 11 | 0,0,0,0,0,1,1,0,1,0,0,0,1,0,0,0 12 | 0,0,1,0,1,0,0,0,0,1,0,0,0,0,0,1 13 | 0,0,1,1,0,0,1,0,0,0,1,0,0,1,0,1 14 | 0,1,0,1,0,1,0,0,1,0,0,0,1,0,0,0 15 | 1,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0 16 | 0,0,1,0,0,0,0,0,0,0,0,1,1,0,0,0 17 | -------------------------------------------------------------------------------- /exercises/sumcheck/examples/graph_triangles.rs: -------------------------------------------------------------------------------- 1 | use std::env; 2 | use std::error::Error; 3 | use csv::ReaderBuilder; 4 | 5 | use lambdaworks_math::field::element::FieldElement; 6 | use lambdaworks_math::field::fields::u64_prime_field::U64PrimeField; 7 | use lambdaworks_math::polynomial::dense_multilinear_poly::DenseMultilinearPolynomial; 8 | use sumcheck::sumcheck::SumCheck; 9 | 10 | const FIELD_MODULUS: u64 = 65537; 11 | type F = U64PrimeField; 12 | type FE = FieldElement; 13 | 14 | fn read_matrix_from_csv(file_path: &str) -> Result>, Box> { 15 | let mut reader = ReaderBuilder::new().has_headers(false).from_path(file_path)?; 16 | let mut matrix = Vec::new(); 17 | 18 | for result in reader.records() { 19 | let record = result?; 20 | let row: Vec = record.iter() 21 | .map(|s| { 22 | match s.parse::() { 23 | Ok(v) => FE::from(v), 24 | Err(_) => panic!("Unable to parse field matrix from file.") 25 | } 26 | }) 27 | .collect(); 28 | matrix.push(row); 29 | } 30 | 31 | Ok(matrix) 32 | } 33 | 34 | fn main() -> Result<(), Box> { 35 | env::set_var("RUST_LOG", "debug"); 36 | env_logger::init(); 37 | 38 | let file_path = "examples/adjacency_matrix.csv"; 39 | let adj_matrix = read_matrix_from_csv(file_path)?; 40 | 41 | let num_vertices = adj_matrix.len(); // number of vertices 42 | assert!(num_vertices.is_power_of_two(), "The number of vertices must be a power of two"); 43 | // check that chosen field is large enough for the counting triangles problem 44 | // (2023 - Thaler - Proof, Arguments and Zero-Knowledge, Section 4.3, page 44) 45 | assert!(FIELD_MODULUS > 6 * (num_vertices.pow(3) as u64), "Field is not large enough."); 46 | 47 | let num_bits = num_vertices.trailing_zeros() as usize; 48 | let num_vars = 3 * num_bits; // number of variables 49 | let num_evals = 1 << num_vars; // number of evaluations 50 | log::debug!("Num. of vertices: {}", num_vertices); 51 | log::debug!("Num. of variables: {}", num_vars); 52 | log::debug!("Num. of evaluations: {}", num_evals); 53 | 54 | log::info!("Getting evaluations of g over the boolean hypercube"); 55 | let evals = (0..num_evals) 56 | .map(|i| { 57 | let x = i / num_vertices.pow(2); 58 | let y = (i % num_vertices.pow(2)) / num_vertices; 59 | let z = i % num_vertices; 60 | let g = adj_matrix[x][y] * adj_matrix[y][z] * adj_matrix[z][x]; 61 | log::debug!( 62 | "g({:?}, {:?}, {:?}) = {:?}", 63 | i / num_vertices.pow(2), (i % num_vertices.pow(2)) / num_vertices , i % num_vertices, 64 | g.representative() 65 | ); 66 | 67 | g 68 | }) 69 | .collect::>(); 70 | 71 | // get number of triangles. The sum of g over the hypercube is divided by the number of permutations (6) 72 | let num_triangles = evals.iter().fold(FE::zero(), |acc, g| acc + g) / FE::from(6); 73 | log::info!( 74 | "The number of triangles in the graph is: {:?}", 75 | num_triangles.representative() 76 | ); 77 | 78 | // create a dense multilienar poly from the evaluations 79 | let poly = DenseMultilinearPolynomial::new(evals); 80 | assert_eq!(poly.len(), num_evals); 81 | assert_eq!(poly.num_vars(), num_vars); 82 | 83 | // create sumcheck proof 84 | let sumcheck = SumCheck::new(poly); 85 | let proof = sumcheck.prove(); 86 | 87 | // verify proof 88 | proof.verify(); 89 | 90 | Ok(()) 91 | } -------------------------------------------------------------------------------- /exercises/sumcheck/src/lib.rs: -------------------------------------------------------------------------------- 1 | pub mod sumcheck; 2 | pub mod utils; 3 | -------------------------------------------------------------------------------- /exercises/sumcheck/src/main.rs: -------------------------------------------------------------------------------- 1 | use std::env; 2 | 3 | use lambdaworks_math::field::element::FieldElement; 4 | use lambdaworks_math::field::fields::u64_prime_field::U64PrimeField; 5 | use lambdaworks_math::polynomial::dense_multilinear_poly::DenseMultilinearPolynomial; 6 | use sumcheck::sumcheck::SumCheck; 7 | use sumcheck::utils::to_binary_felts; 8 | 9 | type F = U64PrimeField<17>; 10 | type FE = FieldElement; 11 | 12 | // A 3-variate poly x_1*x_2*x_3 + 2*x_2 + 3*x_1^2 + x_2^4*x_3 + 5*x_1*x_2 + 2*x_3 13 | fn g(xs: Vec) -> FE { 14 | vec![ 15 | // x_1*x_2*x_3 16 | xs[0].clone() * xs[1].clone() * xs[2].clone(), 17 | // 2*x_2 18 | FE::from(2) * xs[1].clone(), 19 | // 3*x_1^2 20 | FE::from(3) * xs[0].pow(2u64), 21 | // x_2^4*x_3 22 | xs[1].pow(4_u64) * xs[2].clone(), 23 | // 5*x_1*x_2 24 | FE::from(5) * xs[0].clone() * xs[1].clone(), 25 | // 2*x_3 26 | FE::from(2) * xs[2].clone(), 27 | ] 28 | .iter() 29 | .fold(FE::zero(), |acc, y| acc + y) 30 | } 31 | 32 | // MLE of the polynomial above, redundant terms written for clarity 33 | // I handwrite this to show in clear how MLE works 34 | fn g_mle(xs: Vec) -> FE { 35 | #[inline(always)] 36 | fn _1(x: &FE) -> FE { 37 | x.clone() 38 | } 39 | #[inline(always)] 40 | fn _0(x: &FE) -> FE { 41 | FE::one() - x.clone() 42 | } 43 | 44 | vec![ 45 | _0(&xs[0]) * _0(&xs[1]) * _0(&xs[2]) * FE::from(00), // (000): -> 0 46 | _0(&xs[0]) * _0(&xs[1]) * _1(&xs[2]) * FE::from(02), // (001): -> 2 47 | _0(&xs[0]) * _1(&xs[1]) * _0(&xs[2]) * FE::from(02), // (010): -> 2 48 | _0(&xs[0]) * _1(&xs[1]) * _1(&xs[2]) * FE::from(05), // (011): -> 5 49 | _1(&xs[0]) * _0(&xs[1]) * _0(&xs[2]) * FE::from(03), // (100): -> 3 50 | _1(&xs[0]) * _0(&xs[1]) * _1(&xs[2]) * FE::from(05), // (101): -> 5 51 | _1(&xs[0]) * _1(&xs[1]) * _0(&xs[2]) * FE::from(10), // (110): -> 10 52 | _1(&xs[0]) * _1(&xs[1]) * _1(&xs[2]) * FE::from(14), // (111): -> 14 53 | ] 54 | .iter() 55 | .fold(FE::zero(), |acc, y| acc + y) 56 | } 57 | 58 | fn main() { 59 | env::set_var("RUST_LOG", "debug"); 60 | env_logger::init(); 61 | 62 | const NUM_VARS: usize = 3; // number of variables 63 | const NUM_EVALS: usize = 1 << NUM_VARS; // number of evaluations 64 | 65 | log::info!("Evaluating g over the boolean hypercube"); 66 | let evals = (0..NUM_EVALS) 67 | .map(|i| { 68 | let xs = to_binary_felts(i, NUM_VARS); 69 | let y = g(xs.clone()); 70 | assert_eq!(y, g_mle(xs.clone()), "g_mle and g differ"); 71 | log::debug!( 72 | "g({}) = g({},{},{}) = {}", 73 | i, 74 | xs[0].representative(), 75 | xs[1].representative(), 76 | xs[2].representative(), 77 | y.representative() 78 | ); 79 | 80 | y 81 | }) 82 | .collect(); 83 | 84 | // create a dense multilienar poly from the evaluations 85 | let poly = DenseMultilinearPolynomial::new(evals); 86 | assert_eq!(poly.len(), NUM_EVALS); 87 | assert_eq!(poly.num_vars(), NUM_VARS); 88 | 89 | // create sumcheck proof 90 | let sumcheck = SumCheck::new(poly); 91 | let proof = sumcheck.prove(); 92 | 93 | // verify proof 94 | proof.verify(); 95 | } 96 | -------------------------------------------------------------------------------- /exercises/sumcheck/src/sumcheck.rs: -------------------------------------------------------------------------------- 1 | #![allow(non_snake_case)] 2 | 3 | use lambdaworks_crypto::fiat_shamir::is_transcript::IsTranscript; 4 | use lambdaworks_math::{ 5 | field::{element::FieldElement as FE, traits::IsField}, 6 | polynomial::{dense_multilinear_poly::DenseMultilinearPolynomial, Polynomial}, 7 | traits::ByteConversion, 8 | }; 9 | 10 | use crate::utils::{create_transcript, to_binary_felts}; 11 | 12 | /// A proof for the SumCheck protocol. 13 | pub struct SumCheckProof 14 | where 15 | ::BaseType: Send + Sync, 16 | { 17 | g: DenseMultilinearPolynomial, 18 | polys: Vec>>, 19 | sum: FE, 20 | } 21 | 22 | impl SumCheckProof 23 | where 24 | ::BaseType: Send + Sync, 25 | FE: ByteConversion, 26 | { 27 | pub fn verify(&self) { 28 | let mut transcript = create_transcript(&self.g); 29 | let (one, zero) = (FE::::one(), FE::::zero()); 30 | 31 | // first check the sum itself 32 | log::debug!("Verifying round 1"); 33 | assert_eq!( 34 | self.sum, 35 | self.polys[0].evaluate(&zero) + self.polys[0].evaluate(&one) 36 | ); 37 | 38 | // then check intermediate rounds 39 | let mut rs = Vec::new(); 40 | 41 | for i in 1..self.g.num_vars() { 42 | log::info!("Verifying round {}", i + 1); 43 | log::debug!( 44 | "Checking g_{}(r_{}) = g_{}(0) + g_{}(1)", 45 | i, 46 | i, 47 | i + 1, 48 | i + 1 49 | ); 50 | 51 | let r = transcript.sample_field_element(); 52 | assert_eq!( 53 | self.polys[i - 1].evaluate(&r), 54 | self.polys[i].evaluate(&zero) + self.polys[i].evaluate(&one), 55 | ); 56 | rs.push(r); 57 | } 58 | 59 | // check final round 60 | log::info!("Verifying final round {}", self.polys.len()); 61 | let r = transcript.sample_field_element(); 62 | rs.push(r.clone()); 63 | assert_eq!( 64 | self.polys.last().unwrap().evaluate(&r), 65 | self.g.evaluate(rs).unwrap() 66 | ); 67 | 68 | log::debug!("Verification complete."); 69 | } 70 | } 71 | 72 | /// This struct will apply the SumCheck protocol prover using a given polynomial, along with a 73 | /// verifier instantiated using the transcript (Fiat-Shamir transform). 74 | pub struct SumCheck 75 | where 76 | ::BaseType: Send + Sync, 77 | { 78 | /// Multilinear polynomial to be Sumchecked. 79 | g: DenseMultilinearPolynomial, 80 | /// Sum of the polynomial evaluations. 81 | sum: FE, 82 | } 83 | 84 | impl SumCheck 85 | where 86 | ::BaseType: Send + Sync, 87 | FE: ByteConversion, 88 | { 89 | pub fn new(g: DenseMultilinearPolynomial) -> Self { 90 | log::info!( 91 | "Sumcheck starting for {}-variate multilinear polynomial", 92 | g.num_vars() 93 | ); 94 | 95 | let sum = g.evals().iter().fold(FE::::zero(), |acc, y| acc + y); 96 | 97 | Self { g, sum } 98 | } 99 | 100 | // Run the initialization round and return the claimed sum check value 101 | pub fn prove(&self) -> SumCheckProof { 102 | let mut transcript = create_transcript(&self.g); 103 | let mut round = 1usize; 104 | log::info!("Round: {}", round); 105 | 106 | let (one, zero) = (FE::::one(), FE::::zero()); 107 | 108 | // first polynomial has no random variables 109 | let mut polys = vec![self.interpolate(&vec![])]; 110 | let mut last_poly_name = "g_1".to_string(); 111 | 112 | // first check is made against the sum itself 113 | let mut check = self.sum.clone(); 114 | let mut check_name = "C_1".to_string(); 115 | 116 | // we will collect all random variables to be used in the final check 117 | let mut random_vars = vec![]; 118 | while random_vars.len() <= self.g.num_vars() { 119 | let last_poly = polys.last().unwrap(); 120 | 121 | // verifier checks the sum & degree 122 | log::info!( 123 | "Checking {} = {}(0) + {}(1)", 124 | check_name, 125 | last_poly_name, 126 | last_poly_name 127 | ); 128 | assert_eq!(check, last_poly.evaluate(&zero) + last_poly.evaluate(&one)); 129 | assert!(last_poly.degree() <= 1, "degree should be at most 1"); 130 | 131 | // verifier adds a random query 132 | let r = transcript.sample_field_element(); 133 | 134 | log::debug!("Evaluating {} at r_{}", last_poly_name, round); 135 | check = last_poly.evaluate(&r); // check is updated to g_{j-1}(r_{j-1}) 136 | check_name = format!("g_{}(r_{})", round, round); 137 | random_vars.push(r); // random query is added to history 138 | 139 | // when a round is done, and the random variable is added, 140 | // the number of polynomials and random variables should match 141 | assert_eq!(random_vars.len(), polys.len()); 142 | round += 1; 143 | 144 | if random_vars.len() == self.g.num_vars() { 145 | // we have all random variables we need, we can make the final check 146 | log::info!("Round: Final"); 147 | log::info!( 148 | "Checking g_{}(r_{}) = g(r_1, r_2, ..., r_n)", 149 | round - 1, 150 | round - 1 151 | ); 152 | let final_sum = self.g.evaluate(random_vars.clone()).unwrap(); // check is updated to g(r_1, r_2, ..., r_n) 153 | assert_eq!(final_sum, check); 154 | break; 155 | } else { 156 | // interpolation is made for the next fixed variable 157 | log::info!("Round: {}", round); 158 | last_poly_name = format!("g_{}", round); 159 | log::debug!("Interpolating g_{} for variable X_{}", round, round); 160 | polys.push(self.interpolate(&random_vars)); 161 | } 162 | } 163 | 164 | log::info!("Sumcheck completed successfully!"); 165 | assert_eq!(polys.len(), self.g.num_vars()); 166 | SumCheckProof { 167 | g: self.g.clone(), 168 | sum: self.sum.clone(), 169 | polys, 170 | } 171 | } 172 | 173 | /// Given a list of random variables, interpolate the polynomial at the next index. 174 | /// 175 | /// For instance, for `g(x_1, x_2, ..., x_n)` with random variables `r_1, r_2, ..., r_{k-1}` 176 | /// we interpolate a polynomial `g_k(X_k) = sum(g(r_1, r_2, ..., r_{k-1}, X_k, x_{k+1}, ..., x_n))` 177 | /// where `x_{k+1}, ..., x_n` are evaluated over 0s and 1s. 178 | /// 179 | /// As a concrete example, consider `g(x_1, x_2, x_3)` with random variable `r_1`. This function will interpolate 180 | /// a univariate polynomial `g_2(X_2) = g(r_1, X_2, 0) + g(r_1, X_2, 1)`. 181 | /// 182 | /// There are probably clever ways to do this, but here we are working with MLE's so all terms have degree 1. With that, we only need 2 evaluations 183 | /// for every term in the sum to interpolate the polynomial for a term, and we can sum all polys. 184 | pub fn interpolate(&self, rs: &Vec>) -> Polynomial> { 185 | // we need (0, 1) pair for each input besides the fixed term & random variables. 186 | let num_vars = self.g.num_vars() - rs.len() - 1; 187 | 188 | // iterate over all combinations of 0s and 1s for the remaining variables 189 | // interpolate the polynomial for each setting, and sum them all 190 | (0..1 << num_vars) 191 | .map(|i| { 192 | // convert `i` to 0s and 1s 193 | let xs = to_binary_felts(i, num_vars); 194 | 195 | // to interpolate the currently fixed setting, e.g. g'(X) = g(rs..., X, xs...), we first need to evaluate at some points 196 | // just 0 and 1 is enough because all terms are degree 1 in each variable (due to MLE) 197 | let eval_xs = vec![FE::::zero(), FE::::one()]; 198 | let eval_ys = eval_xs 199 | .clone() 200 | .into_iter() 201 | .map(|X| { 202 | // prepare parameters 203 | let mut inputs = rs.clone(); 204 | inputs.push(X); 205 | inputs.extend(xs.clone()); 206 | 207 | // evaluate the polynomial 208 | self.g.evaluate(inputs).unwrap() 209 | }) 210 | .collect::>(); 211 | 212 | // interpolate the univariate polynomial using these evaluations 213 | let poly = Polynomial::interpolate(&eval_xs, &eval_ys).unwrap(); 214 | assert!(poly.degree() <= 1, "degree must be at most 1"); 215 | 216 | poly 217 | }) 218 | .fold(Polynomial::zero(), |acc, poly| acc + poly) 219 | } 220 | } 221 | 222 | #[cfg(test)] 223 | mod tests { 224 | use crate::utils::random_evals; 225 | use lambdaworks_math::field::fields::u64_prime_field::U64PrimeField; 226 | 227 | use super::*; 228 | 229 | const ORDER: u64 = 17; 230 | type F = U64PrimeField; 231 | 232 | fn run_test(n: usize) { 233 | let evals = random_evals::(n); 234 | assert_eq!(evals.len(), 1 << n); 235 | 236 | let poly = DenseMultilinearPolynomial::new(evals); 237 | assert_eq!(poly.num_vars(), n); 238 | 239 | let sumcheck = SumCheck::new(poly); 240 | let proof = sumcheck.prove(); 241 | 242 | proof.verify(); 243 | } 244 | 245 | #[test] 246 | fn test_2_vars() { 247 | run_test(2); 248 | } 249 | 250 | #[test] 251 | fn test_3_vars() { 252 | run_test(3); 253 | } 254 | 255 | #[test] 256 | fn test_7_vars() { 257 | run_test(7); 258 | } 259 | } 260 | -------------------------------------------------------------------------------- /exercises/sumcheck/src/utils.rs: -------------------------------------------------------------------------------- 1 | use lambdaworks_crypto::fiat_shamir::default_transcript::DefaultTranscript; 2 | use lambdaworks_math::{ 3 | field::{element::FieldElement, traits::IsField}, 4 | polynomial::dense_multilinear_poly::DenseMultilinearPolynomial, 5 | traits::ByteConversion, 6 | }; 7 | 8 | /// Given a number `n`, return a vector of `len` binary values in the field. 9 | pub fn to_binary_felts(n: usize, len: usize) -> Vec> 10 | where 11 | FieldElement: ByteConversion, 12 | { 13 | (0..len) 14 | .map(|b| { 15 | if n & (1 << b) != 0 { 16 | FieldElement::::one() 17 | } else { 18 | FieldElement::::zero() 19 | } 20 | }) 21 | .rev() 22 | .collect() 23 | } 24 | 25 | /// Generate random evaluations for a given number of variables. 26 | pub fn random_evals(num_vars: usize) -> Vec> { 27 | (0..1 << num_vars) 28 | .map(|_| FieldElement::::from(rand::random::())) 29 | .collect() 30 | } 31 | 32 | /// Creates a transcript given a multilinear polynomial. 33 | /// 34 | /// It uses the evaluation values as initial randomness. 35 | pub fn create_transcript(poly: &DenseMultilinearPolynomial) -> DefaultTranscript 36 | where 37 | ::BaseType: Send + Sync, 38 | FieldElement: ByteConversion, 39 | { 40 | // use the polynomial evaluations to initialize the transcript 41 | let init_bytes = poly 42 | .evals() 43 | .iter() 44 | .flat_map(|y| y.to_bytes_be()) 45 | .collect::>(); 46 | 47 | DefaultTranscript::new(&init_bytes) 48 | } 49 | -------------------------------------------------------------------------------- /exercises/vault-of-loki/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "vault-of-loki" 3 | version.workspace = true 4 | edition.workspace = true 5 | 6 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 7 | 8 | [dependencies] 9 | lambdaworks-math.workspace = true 10 | lambdaworks-crypto.workspace = true 11 | rand.workspace = true 12 | -------------------------------------------------------------------------------- /exercises/vault-of-loki/README.md: -------------------------------------------------------------------------------- 1 | # [Challenge 2: Vault of Loki](https://github.com/lambdaclass/lambdaworks/tree/main/exercises/challenge_2) 2 | 3 | The challenge is given as below: 4 | 5 | > # Breaking into the vault of Loki 6 | > 7 | > After years of careful investigation, you have reached the gate to Loki's vault in the icy mountains of Norway, where it is said that many great treasures and powerful weapons are hidden. The gate seems unbreakable, but you spot some ancient machinery with inscriptions in old runes. After some help from ChatGPT, you are able to translate the symbols and the whole message into modern English, and it reads: 8 | > 9 | > If you can prove that the polynomial 10 | > 11 | > $$ 12 | > \begin{aligned} 13 | > p(x) &= 69 +78x + 32x^2 + 65x^3 + 82x^4 + 71x^5 + 69x^6 + 78x^7 + 84x^8 + 73x^9 \newline &+78x^{10} + 65x^{11} + 32x^{12} + 78x^{13} + 65x^{14}+ 67x^{15} + 73x^{16} + 32x^{17} \newline 14 | > &+ 84x^{18} + 73x^{19} + 69x^{20} + 82x^{21} + 82x^{22} + 65 x^{23} 15 | > \end{aligned} 16 | > $$ 17 | > 18 | > is equal to $3$ at $x = 1$ modulo the BLS12-381 $r$ parameter, then the gate will open. 19 | > 20 | > Below is a long list of bytes representing the SRS that can be used to perform KZG commitments. The machinery, after careful examination, performs the KZG verification using pairings. There is only one open place where you can place a wooden tablet with your answer, comprising 48 bytes. You guess this should be the proof of the KZG scheme, providing the point in compressed form, following the ZCash standard. The other elements contain the commitment to $p(x)$, the desired value $3$, and the point $x=1$. You ask ChatGPT for enlightenment, but it suddenly collapses and only shows the message: fatal error. Is this just an impossible task? Perhaps there is some trick to get by Loki's challenge... 21 | 22 | ## Solution 23 | 24 | It appears that we need to be making a fake proof using KZG, which is possible when you know the toxic waste. This brings the question: can we find something within the SRS that perhaps gives away the toxic waste? Now, toxic waste is just a scalar; the error itself probably has to do with the generator point picked for the curve. 25 | 26 | When we look at the points within SRS, it appears that the points start to repeat at some point! In fact, at precisely every 64 elements, the points repeat. This means that for some generator $g$ and secret $s$ we have $g = s^{64}g$. Given the fact that $g$ is not the point at infinity, we can see the following: 27 | 28 | $$ 29 | g \equiv s^{64}g \pmod{r} \implies s^{64} \equiv 1 \pmod{r} 30 | $$ 31 | 32 | Our secret is actually a primitive-64th root of unity in the scalar field! It's primitive because otherwise we would see the points repeat earlier. Once we find a candidate value for the primitive-64th root of unity, we can simply check if $g$ times the candidate equals the second point in the SRS. If it does, we have found the toxic waste! 33 | 34 | ### Finding the Primitive Root 35 | 36 | To find the primitive-64th root of unity, we can simply brute force it. Once we have the toxic waste, we can generate a fake proof and open the gate to Loki's vault. To find this value, we follow an approach described at . 37 | 38 | First, does 64 divide $p-1$ for the BLS12-381's scalar order? The scalar order is equal to $r$ below (see from ): 39 | 40 | ```c 41 | r = 0x73EDA753299D7D483339D80809A1D80553BDA402FFFE5BFEFFFFFFFF00000001 42 | r-1 = 0x73EDA753299D7D483339D80809A1D80553BDA402FFFE5BFEFFFFFFFF00000000 43 | ``` 44 | 45 | It is pretty obvious that we can divide $r-1$ by 64, since we can right-shift it 6 times without losing any information. The quotient is then: 46 | 47 | ```c 48 | r-1 / 64 = 0x01CFB69D4CA675F520CCE76020268760154EF6900BFFF96FFBFFFFFFFC000000 49 | ``` 50 | 51 | Following the method in the link above, consider a random $x$ in the field. 52 | 53 | $$ 54 | (x^{(r-1)/64})^{64} = x^{r-1} = 1 55 | $$ 56 | 57 | So, $x^{(r-1)/64}$ is a 64th root of unity. When we are interested in a primitive $n$-th root $g$, it must be that $g^j \ne 1$ for all $0 < j < n$ that divides $n$. Since $n=64$ in our case, we must ensure that $g^{32}$ is not a root of unity as well. It suffices to check for 32 and not for 16, 8 etc. as any further divisions will surely result in an 32th root anyways. 58 | 59 | Here is the snippet: 60 | 61 | ```rs 62 | fn find_primitive_root() -> FrElement { 63 | loop { 64 | // random element within the scalar field of order r 65 | let g = FrElement::from(random::()); 66 | 67 | // (r - 1) / 64 68 | let cofactor: UnsignedInteger<6> = UnsignedInteger::from_hex_unchecked( 69 | "0x01CFB69D4CA675F520CCE76020268760154EF6900BFFF96FFBFFFFFFFC000000", 70 | ); 71 | 72 | // obtain root of unity via cofactor clearing 73 | let root = g.pow(cofactor); 74 | debug_assert_eq!(root.pow(64u64), FrElement::one()); 75 | 76 | // check that its indeed primitive 77 | if root.pow(32u64) != FrElement::one() { 78 | return root; 79 | } 80 | } 81 | } 82 | ``` 83 | 84 | ### Finding the Secret 85 | 86 | When it comes to primitive roots, there is no "the" primitive root; there are many, and none is more primitive than the other. We can simply pick one and check if it works. To do this, we look at the first two elements in SRS for the group, which is $\{g, sg\}$. Once we find a candidate $s'$, we can check if $sg = s'g$ and then recover the "toxic waste". 87 | 88 | Here is the snippet: 89 | 90 | ```rs 91 | fn find_toxic_waste(g1: &G1Point, sg1: &G1Point, g2: &G2Point, sg2: &G2Point) -> FrElement { 92 | // infinite loop, but we are SURE about this 93 | loop { 94 | // find a primitive root of unity 95 | let s = find_primitive_root(); 96 | 97 | // see if it matches the secret 98 | if g1.operate_with_self(s.representative()) == *sg1 99 | && g2.operate_with_self(s.representative()) == *sg2 100 | { 101 | return s; 102 | } 103 | } 104 | } 105 | ``` 106 | 107 | ### Faking the Proof 108 | 109 | Now that we know the toxic waste, we can generate the fake proof. Recall the thing that a verifier checks in KZG: consider a polynomial $P(x)$ that is evaluated at point $z$ and results in $v$. This implies that: 110 | 111 | $$ 112 | P(x) - v = (x-z)Q(x) 113 | $$ 114 | 115 | for some polynomial $Q(x)$. By using the commitments at a secret points $s$ here we prove: 116 | 117 | $$ 118 | P(s) - v = (s-z)Q(s) 119 | $$ 120 | 121 | Normally, the $s$ on the right side comes from the $sg_2$ within the SRS, and the quotient polynomial & its commitment is computed explicitly. However, ours is a fake proof and we can't have a valid quotient here. Instead, we will do: 122 | 123 | $$ 124 | (P(s) - v)(s-z)^{-1} = Q(s) 125 | $$ 126 | 127 | By knowing the secret $s$ we can make it seem as if we have committed to a valid quotient polynomial! This is perfectly fine since its just a scalar multiplication anyways. Our fake proof is to show that $P(1) = 3$, which means: 128 | 129 | $$ 130 | (P(s) - 3)(s-1)^{-1} = Q(s) 131 | $$ 132 | 133 | Well, we can compute that $Q(s)$ without having $Q$ at all, the left hand-side of the equation is enough. Once we compute $Q(s)$ that way, all that remains is to compute the commitment $Q(s)g_1$ (as if we have done an MSM). Again, recall that this was only possible because we perfectly knew the secret $s$. 134 | 135 | ```rs 136 | let (v, z) = (FrElement::from(3), FrElement::from(1)); 137 | 138 | // compute q(s) via the fake proof method = (P(s) - v) / (s - z) 139 | let q_s = (p.evaluate(&s) - v.clone()) * (s - z.clone()).inv().expect("should invert"); 140 | 141 | // find the commitment as g * q(s) 142 | // normally we would do MSM for this using SRS, but we know the toxic waste :) 143 | let q_commitment = g1.operate_with_self(q_s.representative()); 144 | 145 | let fake_proof = q_commitment; 146 | println!("Fake proof for submission:"); 147 | println!("{:?}", &fake_proof.to_affine().x().to_string()); 148 | println!("{:?}", &fake_proof.to_affine().y().to_string()); 149 | assert!(kzg.verify(&z, &v, &p_commitment, &fake_proof)); 150 | println!("Faked succesfully!"); 151 | ``` 152 | -------------------------------------------------------------------------------- /exercises/vault-of-loki/src/main.rs: -------------------------------------------------------------------------------- 1 | use lambdaworks_crypto::commitments::{ 2 | kzg::{KateZaveruchaGoldberg, StructuredReferenceString}, 3 | traits::IsCommitmentScheme, 4 | }; 5 | use lambdaworks_math::{ 6 | cyclic_group::IsGroup, 7 | elliptic_curve::short_weierstrass::{ 8 | curves::bls12_381::{ 9 | curve::BLS12381Curve, 10 | default_types::{FrConfig, FrElement}, 11 | field_extension::BLS12381PrimeField, 12 | pairing::BLS12381AtePairing, 13 | twist::BLS12381TwistCurve, 14 | }, 15 | point::ShortWeierstrassProjectivePoint, 16 | }, 17 | field::{ 18 | element::FieldElement, fields::montgomery_backed_prime_fields::MontgomeryBackendPrimeField, 19 | }, 20 | polynomial::Polynomial, 21 | unsigned_integer::element::UnsignedInteger, 22 | }; 23 | use rand::random; 24 | 25 | type G1Point = ShortWeierstrassProjectivePoint; 26 | type G2Point = ShortWeierstrassProjectivePoint; 27 | 28 | #[allow(clippy::upper_case_acronyms)] 29 | type KZG = KateZaveruchaGoldberg, BLS12381AtePairing>; 30 | pub type Fq = FieldElement; 31 | 32 | /// This function creates the polynomial as given in the problem. We don't touch it. 33 | fn challenge_polynomial() -> Polynomial { 34 | Polynomial::::new(&[ 35 | FieldElement::from(69), 36 | FieldElement::from(78), 37 | FieldElement::from(32), 38 | FieldElement::from(65), 39 | FieldElement::from(82), 40 | FieldElement::from(71), 41 | FieldElement::from(69), 42 | FieldElement::from(78), 43 | FieldElement::from(84), 44 | FieldElement::from(73), 45 | FieldElement::from(78), 46 | FieldElement::from(65), 47 | FieldElement::from(32), 48 | FieldElement::from(78), 49 | FieldElement::from(65), 50 | FieldElement::from(67), 51 | FieldElement::from(73), 52 | FieldElement::from(32), 53 | FieldElement::from(84), 54 | FieldElement::from(73), 55 | FieldElement::from(69), 56 | FieldElement::from(82), 57 | FieldElement::from(65), 58 | ]) 59 | } 60 | 61 | /// Utility to read the SRS from the file. 62 | fn read_srs() -> StructuredReferenceString { 63 | let base_dir = env!("CARGO_MANIFEST_DIR"); 64 | let srs_path = base_dir.to_owned() + "/srs.bin"; 65 | StructuredReferenceString::::from_file(&srs_path).unwrap() 66 | } 67 | 68 | /// Finds the toxic waste given the generators and their scalar multiples. 69 | /// 70 | /// Note that we use $g$ and $sg$ here, but any $s^ig$ and $s^{i+1}g$ would work too. 71 | fn find_toxic_waste(g1: &G1Point, sg1: &G1Point, g2: &G2Point, sg2: &G2Point) -> FrElement { 72 | // infinite loop, but we are SURE about this 73 | loop { 74 | // find a primitive root of unity 75 | let s = find_primitive_root(); 76 | 77 | // see if it matches the secret 78 | if g1.operate_with_self(s.representative()) == *sg1 79 | && g2.operate_with_self(s.representative()) == *sg2 80 | { 81 | return s; 82 | } 83 | } 84 | } 85 | 86 | /// Finds a primitive 64th root of unity in the scalar field of the BLS12-381 curve. 87 | fn find_primitive_root() -> FrElement { 88 | loop { 89 | // random element within the scalar field of order r 90 | let g = FrElement::from(random::()); 91 | 92 | // (r - 1) / 64 93 | let cofactor: UnsignedInteger<6> = UnsignedInteger::from_hex_unchecked( 94 | "0x01CFB69D4CA675F520CCE76020268760154EF6900BFFF96FFBFFFFFFFC000000", 95 | ); 96 | 97 | // obtain root of unity via cofactor clearing 98 | let root = g.pow(cofactor); 99 | debug_assert_eq!(root.pow(64u64), FrElement::one()); 100 | 101 | // check that its indeed primitive 102 | if root.pow(32u64) != FrElement::one() { 103 | return root; 104 | } 105 | } 106 | } 107 | 108 | fn main() { 109 | let srs = read_srs(); 110 | let kzg = KZG::new(srs.clone()); 111 | let p = challenge_polynomial(); 112 | 113 | // the commitment is just a point on the curve, computed via MSM 114 | let p_commitment: G1Point = kzg.commit(&p); 115 | 116 | // find the toxic waste 117 | let (g1, sg1) = (&srs.powers_main_group[0], &srs.powers_main_group[1]); 118 | let (g2, sg2) = ( 119 | &srs.powers_secondary_group[0], 120 | &srs.powers_secondary_group[1], 121 | ); 122 | let s = find_toxic_waste(g1, sg1, g2, sg2); 123 | 124 | let (v, z) = (FrElement::from(3), FrElement::from(1)); 125 | 126 | // compute q(s) via the fake proof method = (P(s) - v) / (s - z) 127 | let q_s = (p.evaluate(&s) - v.clone()) * (s - z.clone()).inv().expect("should invert"); 128 | 129 | // find the commitment as g * q(s) 130 | // normally we would do MSM for this using SRS, but we know the toxic waste :) 131 | let q_commitment = g1.operate_with_self(q_s.representative()); 132 | 133 | let fake_proof = q_commitment; 134 | println!("Fake proof for submission:"); 135 | println!("{:?}", &fake_proof.to_affine().x().to_string()); 136 | println!("{:?}", &fake_proof.to_affine().y().to_string()); 137 | assert!(kzg.verify(&z, &v, &p_commitment, &fake_proof)); 138 | println!("Faked succesfully!"); 139 | } 140 | 141 | #[cfg(test)] 142 | mod tests { 143 | use super::*; 144 | 145 | #[test] 146 | fn test_examine_srs() { 147 | let srs = read_srs(); 148 | 149 | // find the repeating point 150 | let mut ctr = 1; 151 | let g = srs.powers_main_group[0].clone(); 152 | for p in srs 153 | .powers_main_group 154 | .iter() 155 | .skip(1) 156 | .take_while(|p| *p != &g) 157 | { 158 | println!("{}\t[{} : {} : {}]", ctr, p.x(), p.y(), p.z()); 159 | ctr += 1; 160 | } 161 | 162 | println!("Repeat found at: {}", ctr); // ctr turns out to be 64 163 | } 164 | 165 | #[test] 166 | fn test_primitive_root() { 167 | let root = find_primitive_root(); 168 | assert_eq!(root.pow(64u64), FrElement::one()); 169 | println!("Primitive 64th root of unity: {}", root); 170 | } 171 | 172 | #[test] 173 | fn test_toxic_waste() { 174 | let srs = read_srs(); 175 | let (g1, sg1) = (&srs.powers_main_group[0], &srs.powers_main_group[1]); 176 | let (g2, sg2) = ( 177 | &srs.powers_secondary_group[0], 178 | &srs.powers_secondary_group[1], 179 | ); 180 | 181 | let s = find_toxic_waste(g1, sg1, g2, sg2); 182 | 183 | println!("Toxic waste: {}", s); 184 | // 0xe4840ac57f86f5e293b1d67bc8de5d9a12a70a615d0b8e4d2fc5e69ac5db47f 185 | } 186 | } 187 | -------------------------------------------------------------------------------- /exercises/vault-of-loki/srs.bin: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/erhant/lambda-0b10/7f5d148deda7a798d80019a52421ed4c4a974479/exercises/vault-of-loki/srs.bin -------------------------------------------------------------------------------- /snarks/babysnark/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "babysnark" 3 | version.workspace = true 4 | edition.workspace = true 5 | 6 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 7 | 8 | [dependencies] 9 | lambdaworks-math.workspace = true 10 | lambdaworks-crypto.workspace = true 11 | baby-snark = { git = "https://github.com/lambdaclass/lambdaworks.git", branch = "main" } 12 | -------------------------------------------------------------------------------- /snarks/babysnark/README.md: -------------------------------------------------------------------------------- 1 | # BabySNARK Circuits 2 | 3 | This directory contains small example circuits using BabySNARK. Each circuit here is arithmetized into a Square-span Program (SSP) which is: 4 | 5 | $$ 6 | (U . z)^2 = 1 7 | $$ 8 | 9 | where $U$ is a matrix and $z$ is a vector. If the equation holds for a given $z$, then the circuit is satisfied. 10 | 11 | > [!NOTE] 12 | > 13 | > To reiterate, here we have only the circuits, **not** the implementation of BabySNARK! 14 | -------------------------------------------------------------------------------- /snarks/babysnark/src/lib.rs: -------------------------------------------------------------------------------- 1 | #[cfg(test)] 2 | mod tests { 3 | use baby_snark::common::FrElement; 4 | use baby_snark::scs::SquareConstraintSystem; 5 | use baby_snark::ssp::SquareSpanProgram; 6 | use baby_snark::utils::i64_vec_to_field; 7 | use baby_snark::{setup, verify, Prover}; 8 | 9 | /// AND gate for two inputs `a, b` and output `c` has the following constraints: 10 | /// 11 | /// - `(2a - 1)^2 = 1`: `a` is a bit 12 | /// - `(2b - 1)^2 = 1`: `b` is a bit 13 | /// - `(2c - 1)^2 = 1`: `c` is a bit 14 | /// - `(2a + 2b - 4c - 1)^2 = 1`: `c = a AND b` 15 | #[test] 16 | fn test_and_gate() { 17 | let u = vec![ 18 | i64_vec_to_field(&[-1, 2, 0, 0]), // -1 2 0 0 || 1 19 | i64_vec_to_field(&[-1, 0, 2, 0]), // -1 0 2 0 || a 20 | i64_vec_to_field(&[-1, 0, 0, 2]), // -1 0 0 2 || b 21 | i64_vec_to_field(&[-1, 2, 2, -4]), // -1 2 2 -4 || c 22 | ]; 23 | 24 | for (a, b) in [(0, 0), (0, 1), (1, 0), (1, 1)].into_iter() { 25 | let c = a & b; 26 | println!("a: {}, b: {}, c: {}", a, b, c); 27 | let witness = i64_vec_to_field(&[1, a, b]); 28 | let public = i64_vec_to_field(&[c]); 29 | verify_integration(u.clone(), witness, public); 30 | } 31 | } 32 | 33 | /// utility to be used by multiple tests 34 | fn verify_integration(u: Vec>, witness: Vec, public: Vec) { 35 | let mut input = witness; 36 | input.extend(public.clone()); 37 | 38 | let ssp = SquareSpanProgram::from_scs(SquareConstraintSystem::from_matrix(u, public.len())); 39 | let (proving_key, verifying_key) = setup(&ssp); 40 | 41 | let proof = Prover::prove(&input, &ssp, &proving_key).unwrap(); 42 | 43 | let verified = verify(&verifying_key, &proof, &public); 44 | assert!(verified); 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /snarks/stark101/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "stark101" 3 | version.workspace = true 4 | edition.workspace = true 5 | 6 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 7 | 8 | [dependencies] 9 | lambdaworks-math.workspace = true 10 | lambdaworks-crypto.workspace = true 11 | rand.workspace = true 12 | log.workspace = true 13 | env_logger.workspace = true 14 | hex.workspace = true 15 | serde.workspace = true 16 | serde_json.workspace = true 17 | -------------------------------------------------------------------------------- /snarks/stark101/README.md: -------------------------------------------------------------------------------- 1 | # [Stark101](https://github.com/starkware-industries/stark101) 2 | 3 | Our objective is to prove that we know $x$ such that $a_{1022} = 2338775057$ in a finite field where $a_n$ is given by the Fibonacci sequence $a_n = a_{n-1} + a_{n-2}$ with $a_0 = 1$ and $a_1 = x$. The order of field is $3 \times 2^{30} - 1 = 3221225473$. 4 | 5 | - [Notebook 1](https://github.com/starkware-industries/stark101/blob/master/tutorial/Stark101-part1.ipynb) - [Video 1](https://www.youtube.com/watch?v=Y0uJz9VL3Fo) 6 | - [Notebook 2](https://github.com/starkware-industries/stark101/blob/master/tutorial/Stark101-part2.ipynb) - [Video 2](https://www.youtube.com/watch?v=fg3mFPXEYQY) 7 | - [Notebook 3](https://github.com/starkware-industries/stark101/blob/master/tutorial/Stark101-part3.ipynb) - [Video 3](https://www.youtube.com/watch?v=gd1NbKUOJwA) 8 | - [Notebook 4](https://github.com/starkware-industries/stark101/blob/master/tutorial/Stark101-part4.ipynb) - [Video 4](https://www.youtube.com/watch?v=CxP28qM4tAc) 9 | - [Notebook 5](https://github.com/starkware-industries/stark101/blob/master/tutorial/Stark101-part5.ipynb) - [Video 5](https://www.youtube.com/watch?v=iuNbrTkH2ik) 10 | 11 | The implementation is found within the [`main.rs`](./src/main.rs) file. We make use of LambdaWorks's following tools together with our custom field: 12 | 13 | - MerkleTree using `Sha2_256Backend` for Merkle commitments 14 | - Transcript using `DefaultTranscript`, for the Fiat-Shamir transform 15 | - Polynomial library for polynomial operations 16 | - An additional Proof struct has been written, so that at the end the proof is serialized & saved on disk. 17 | 18 | > [!TIP] 19 | > 20 | > We stick to the naming conventions used in the tutorial, so it should be easy to follow the code along with the notebooks. 21 | 22 | ## Usage 23 | 24 | Run the prover via: 25 | 26 | ```sh 27 | cargo run --release --bin stark101 28 | ``` 29 | 30 | > [!TIP] 31 | > 32 | > `debug` mode is rather slow especially during the most compute-intensive interpolation part, so we use `release` mode instead. 33 | -------------------------------------------------------------------------------- /snarks/stark101/src/field.rs: -------------------------------------------------------------------------------- 1 | use lambdaworks_crypto::{ 2 | fiat_shamir::default_transcript::DefaultTranscript, 3 | merkle_tree::backends::types::Sha2_256Backend, 4 | }; 5 | use lambdaworks_math::{ 6 | field::{ 7 | element::FieldElement, 8 | fields::montgomery_backed_prime_fields::{IsModulus, U64PrimeField}, 9 | }, 10 | unsigned_integer::element::U64, 11 | }; 12 | use serde::{Deserialize, Serialize}; 13 | 14 | #[derive(Clone, Debug, Hash, Copy, Serialize, Deserialize)] 15 | pub struct MontgomeryConfigStark101PrimeField; 16 | impl IsModulus for MontgomeryConfigStark101PrimeField { 17 | const MODULUS: U64 = U64::from_hex_unchecked("c0000001"); 18 | } 19 | 20 | pub type Stark101PrimeField = U64PrimeField; 21 | 22 | /// Backend for Merkle Tree using Sha256. 23 | pub type Stark101PrimeFieldBackend = Sha2_256Backend; 24 | 25 | /// Transcript for Fiat-Shamir transform using Stark101PrimeField. 26 | pub type Stark101PrimeFieldTranscript = DefaultTranscript; 27 | 28 | // impl IsFFTField for Stark101PrimeField { 29 | // const TWO_ADICITY: u64 = 30; 30 | // // Change this line for a new function like `from_limbs`. 31 | // const TWO_ADIC_PRIMITVE_ROOT_OF_UNITY: U64 = UnsignedInteger::from_hex_unchecked( 32 | // "5282db87529cfa3f0464519c8b0fa5ad187148e11a61616070024f42f8ef94", // TODO: !!! 33 | // ); 34 | 35 | // fn field_name() -> &'static str { 36 | // "stark101" 37 | // } 38 | // } 39 | 40 | pub type Stark101PrimeFieldElement = FieldElement; 41 | 42 | /// Returns a generator for a subgroup of the given order a power of two. 43 | /// 44 | /// 1. Generate a random element `r` in the field. 45 | /// 2. Compute `g = r^(order_u128 / order)` (co-factor clearing). 46 | /// 3. If `g^order == 1` and `g^(order >> 1) != 1`, return `g`. 47 | /// The second check ensures that the order of `g` is exactly `order`, and not that of 48 | /// some smaller sub-group. 49 | /// 50 | /// ## Panics 51 | /// 52 | /// If the order does not divide the multiplicative field order. 53 | pub fn get_subgroup_generator(order: u128) -> Stark101PrimeFieldElement { 54 | let order_u128: u128 = 3u128 * (1u128 << 30u128); 55 | assert!( 56 | order_u128 % order == 0, 57 | "order must divide the multiplicative field order" 58 | ); 59 | 60 | loop { 61 | let r = Stark101PrimeFieldElement::from(rand::random::()); 62 | 63 | // co-factor clearing 64 | let g = r.pow(order_u128 / order); 65 | 66 | if g.pow(order) == Stark101PrimeFieldElement::one() 67 | && g.pow(order >> 1) != Stark101PrimeFieldElement::one() 68 | { 69 | return g; 70 | } 71 | } 72 | } 73 | 74 | /// Given a generator `g`, generates the group that it belongs to. 75 | pub fn generate_subgroup(g: Stark101PrimeFieldElement) -> Vec { 76 | let mut subgroup = Vec::new(); 77 | subgroup.push(Stark101PrimeFieldElement::one()); 78 | 79 | let mut next = g; 80 | while next != Stark101PrimeFieldElement::one() { 81 | subgroup.push(next); 82 | next *= g; 83 | } 84 | 85 | subgroup 86 | } 87 | 88 | /// Generates a generator of the group, by sampling a random element and making sure 89 | /// that it does not belong to a smaller subgroup. 90 | pub fn generate_generator() -> Stark101PrimeFieldElement { 91 | loop { 92 | let r = Stark101PrimeFieldElement::from(rand::random::()); 93 | 94 | if r.pow(3u128) != Stark101PrimeFieldElement::one() 95 | && r.pow(1u128 << 30u128) != Stark101PrimeFieldElement::one() 96 | { 97 | return r; 98 | } 99 | } 100 | } 101 | 102 | #[cfg(test)] 103 | mod tests { 104 | use super::Stark101PrimeFieldElement as FE; 105 | use super::*; 106 | 107 | #[test] 108 | fn test_field_mul() { 109 | let a = FE::from(4u64); 110 | let b = FE::from(2u64); 111 | let c = FE::from(8u64); 112 | assert_eq!(a * b, c); 113 | } 114 | 115 | #[test] 116 | fn test_subgroup_1024() { 117 | let order = 1024u128; 118 | let g = get_subgroup_generator(order); 119 | let subgroup = generate_subgroup(g); 120 | assert_eq!(subgroup.len(), order as usize); 121 | } 122 | 123 | #[test] 124 | fn test_subgroup_8192() { 125 | let order = 8192u128; 126 | let g = get_subgroup_generator(order); 127 | let subgroup = generate_subgroup(g); 128 | assert_eq!(subgroup.len(), order as usize); 129 | } 130 | 131 | #[test] 132 | fn test_subgroup_1() { 133 | let subgroup = generate_subgroup(FE::one()); 134 | assert_eq!(subgroup.len(), 1); 135 | } 136 | 137 | #[test] 138 | fn test_generator() { 139 | let order_u128: u128 = 3u128 * (1u128 << 30u128); 140 | let g = generate_generator(); 141 | assert_eq!(g.pow(order_u128), FE::one()); 142 | } 143 | } 144 | -------------------------------------------------------------------------------- /snarks/stark101/src/fri.rs: -------------------------------------------------------------------------------- 1 | #![allow(clippy::too_many_arguments, clippy::type_complexity)] 2 | 3 | use lambdaworks_crypto::{ 4 | fiat_shamir::is_transcript::IsTranscript, 5 | merkle_tree::{backends::types::Sha2_256Backend, merkle::MerkleTree}, 6 | }; 7 | use lambdaworks_math::{ 8 | field::{element::FieldElement, traits::IsField}, 9 | polynomial::Polynomial, 10 | traits::AsBytes, 11 | }; 12 | 13 | use crate::proof::Stark101Decommitment; 14 | 15 | /// We use a constant blowup factor in this example. 16 | pub const BLOWUP_FACTOR: usize = 8; 17 | 18 | /// Given a domain of length `n`, returns the first half of it with each element squared. 19 | pub fn next_fri_domain(domain: Vec>) -> Vec> { 20 | domain 21 | .iter() 22 | .take(domain.len() >> 1) 23 | .map(|x| x.square()) 24 | .collect() 25 | } 26 | 27 | /// Given a polynomial `poly` and a field element `beta`, returns the folding operator applied to `poly`. 28 | /// 29 | /// What happens here is that `poly` is split into even and odd coefficients, and the odd part is multiplied 30 | /// by `beta`, therefore reducing the degree of the polynomial by half. 31 | pub fn next_fri_polynomial( 32 | poly: Polynomial>, 33 | beta: FieldElement, 34 | ) -> Polynomial> { 35 | let even_coeffs = poly 36 | .coefficients 37 | .clone() 38 | .into_iter() 39 | .step_by(2) 40 | .collect::>(); 41 | let even = Polynomial::new(&even_coeffs); 42 | 43 | let odd_coeffs = poly 44 | .coefficients 45 | .clone() 46 | .into_iter() 47 | .skip(1) 48 | .step_by(2) 49 | .collect::>(); 50 | let odd = Polynomial::new(&odd_coeffs); 51 | 52 | even + beta * odd 53 | } 54 | 55 | /// Given a polynomial `poly` and an evaluation domain `domain` along with a 56 | /// random field element `beta`, returns the next FRI layer. 57 | /// 58 | /// This next layer contains the evaluations of the folded polynomial over the squared half-domain. 59 | pub fn next_fri_layer( 60 | poly: Polynomial>, 61 | domain: Vec>, 62 | beta: FieldElement, 63 | ) -> ( 64 | Polynomial>, 65 | Vec>, 66 | Vec>, 67 | ) { 68 | let next_poly = next_fri_polynomial(poly, beta); 69 | let next_domain = next_fri_domain(domain); 70 | let next_layer = next_domain 71 | .iter() 72 | .map(|x| next_poly.evaluate(x)) 73 | .collect::>(); 74 | 75 | (next_poly, next_domain, next_layer) 76 | } 77 | 78 | /// Commits to the given polynomial `cp` and returns the FRI layers along with their Merkle trees. 79 | pub fn fri_commit>( 80 | cp: Polynomial>, 81 | domain: Vec>, 82 | cp_eval: Vec>, 83 | cp_merkle: MerkleTree>, 84 | channel: &mut T, 85 | ) -> ( 86 | Vec>>, 87 | Vec>>, 88 | Vec>>, 89 | Vec>>, 90 | ) 91 | where 92 | FieldElement: AsBytes + Send + Sync, 93 | { 94 | let mut fri_polys = vec![cp]; 95 | let mut fri_domains = vec![domain]; 96 | let mut fri_layers = vec![cp_eval]; 97 | let mut fri_merkles = vec![cp_merkle]; 98 | 99 | // apply FRI until you end up with a constant polynomial 100 | while fri_polys.last().unwrap().degree() > 0 { 101 | // sample randomness 102 | let beta = channel.sample_field_element(); 103 | 104 | // apply FRI operator 105 | let (next_poly, next_domain, next_layer) = next_fri_layer( 106 | fri_polys.last().unwrap().clone(), 107 | fri_domains.last().unwrap().clone(), 108 | beta, 109 | ); 110 | fri_polys.push(next_poly); 111 | fri_domains.push(next_domain); 112 | fri_layers.push(next_layer.clone()); 113 | 114 | // commit to layer & add root to transcript 115 | let tree = MerkleTree::>::build(&next_layer); 116 | channel.append_bytes(&tree.root); 117 | fri_merkles.push(tree); 118 | } 119 | 120 | // add constant polynomial to transcript 121 | assert_eq!(fri_polys.last().unwrap().degree(), 0); 122 | channel.append_field_element(&fri_layers.last().unwrap()[0]); 123 | 124 | (fri_polys, fri_domains, fri_layers, fri_merkles) 125 | } 126 | 127 | /// Decommits on FRI layers, providing the evaluations of the polynomial at the given index and its sibling along with 128 | /// Merkle authentication paths. 129 | /// 130 | /// For this example in particular, it provides the following: 131 | /// - `cp_0(x)` and its path 132 | /// - `cp_0(-x)` and its path 133 | /// - `cp_1(x^2)` and its path 134 | /// - `cp_1(-x^2)` and its path 135 | /// - `cp_2(x^4)` and its path 136 | /// - `cp_2(-x^4)` and its path 137 | /// - ... 138 | /// - `cp_10(x^1024)` and its path 139 | pub fn decommit_on_fri_layers>( 140 | idx: usize, 141 | channel: &mut T, 142 | evals: &mut Vec>, 143 | paths: &mut Vec>, 144 | fri_layers: &[Vec>], 145 | fri_merkles: &[MerkleTree>], 146 | ) where 147 | FieldElement: AsBytes + Send + Sync, 148 | { 149 | for i in 0..fri_layers.len() - 1 { 150 | let layer = fri_layers[i].clone(); 151 | let merkle = fri_merkles[i].clone(); 152 | 153 | let length = layer.len(); 154 | let idx = idx % length; // idx is always in the first half of the layer 155 | let sib_idx = (idx + (length >> 1)) % length; // sibling idx is in the other half 156 | 157 | // cp_i(x^{2(i+1)}), e.g. cp_2(x^4) 158 | let eval = &layer[idx]; 159 | channel.append_field_element(eval); 160 | evals.push(eval.clone()); 161 | let auth_path = merkle.get_proof_by_pos(idx).unwrap(); 162 | for path in &auth_path.merkle_path { 163 | channel.append_bytes(path); 164 | } 165 | paths.push(auth_path.merkle_path); 166 | 167 | // cp_i(-x^{2(i+1)}), e.g. cp_2(-x^4) 168 | let eval = &layer[sib_idx]; 169 | channel.append_field_element(eval); 170 | evals.push(eval.clone()); 171 | let auth_path = merkle.get_proof_by_pos(sib_idx).unwrap(); 172 | for path in &auth_path.merkle_path { 173 | channel.append_bytes(path); 174 | } 175 | paths.push(auth_path.merkle_path); 176 | } 177 | 178 | channel.append_field_element(&fri_layers.last().unwrap()[0]); 179 | } 180 | 181 | /// Decommits on an FRI query. Since our CP makes use of `x`, `g . x` and `g^2 . x`, we need to decommit on these 182 | /// three points. However, due to the domain extension, these points are `BLOWUP_FACTOR` apart from each other. 183 | /// 184 | /// Within this function, we first provide Merkle proofs to the evaluations of the polynomial at these points. 185 | /// That is, we provide the things below: 186 | /// 187 | /// - `f(x)` and its path 188 | /// - `f(g . x)` and its path 189 | /// - `f(g^2 . x)` and its path 190 | /// 191 | /// Then, we call `decommit_on_layers` to provide the rest of decommitment. 192 | pub fn decommit_on_query>( 193 | idx: usize, 194 | channel: &mut T, 195 | evals: &mut Vec>, 196 | paths: &mut Vec>, 197 | f_eval: &[FieldElement], 198 | f_merkle: &MerkleTree>, 199 | fri_layers: &[Vec>], 200 | fri_merkles: &[MerkleTree>], 201 | ) where 202 | FieldElement: AsBytes + Send + Sync, 203 | { 204 | assert!(idx + 2 * BLOWUP_FACTOR < f_eval.len(), "index out-of-range"); 205 | 206 | // f(x) 207 | let eval = &f_eval[idx]; 208 | channel.append_field_element(eval); 209 | evals.push(eval.clone()); 210 | let auth_path = f_merkle.get_proof_by_pos(idx).unwrap(); 211 | for path in &auth_path.merkle_path { 212 | channel.append_bytes(path); 213 | } 214 | paths.push(auth_path.merkle_path); 215 | 216 | // f(g . x) 217 | let eval = &f_eval[idx + BLOWUP_FACTOR]; 218 | channel.append_field_element(eval); 219 | evals.push(eval.clone()); 220 | let auth_path = f_merkle.get_proof_by_pos(idx + BLOWUP_FACTOR).unwrap(); 221 | for path in &auth_path.merkle_path { 222 | channel.append_bytes(path); 223 | } 224 | paths.push(auth_path.merkle_path); 225 | 226 | // f(g^2 . x) 227 | let eval = &f_eval[idx + 2 * BLOWUP_FACTOR]; 228 | channel.append_field_element(eval); 229 | evals.push(eval.clone()); 230 | let auth_path = f_merkle.get_proof_by_pos(idx + 2 * BLOWUP_FACTOR).unwrap(); 231 | for path in &auth_path.merkle_path { 232 | channel.append_bytes(path); 233 | } 234 | paths.push(auth_path.merkle_path); 235 | 236 | decommit_on_fri_layers(idx, channel, evals, paths, fri_layers, fri_merkles); 237 | } 238 | 239 | /// Generate `num_queries` random queries and decommits on those indices. 240 | /// The queries are sampled from the transcript, i.e. they are "sent" by 241 | /// the verifier. 242 | pub fn decommit_fri>( 243 | num_queries: usize, 244 | channel: &mut T, 245 | f_eval: &[FieldElement], 246 | f_merkle: &MerkleTree>, 247 | fri_layers: &[Vec>], 248 | fri_merkles: &[MerkleTree>], 249 | ) -> Vec> 250 | where 251 | FieldElement: AsBytes + Send + Sync, 252 | { 253 | let upper_bound = (f_eval.len() - 2 * BLOWUP_FACTOR) as u64; 254 | let mut decommitments = Vec::new(); 255 | for _ in 0..num_queries { 256 | let mut evals = Vec::new(); 257 | let mut paths = Vec::new(); 258 | let random_idx = channel.sample_u64(upper_bound); 259 | decommit_on_query( 260 | random_idx as usize, 261 | channel, 262 | &mut evals, 263 | &mut paths, 264 | f_eval, 265 | f_merkle, 266 | fri_layers, 267 | fri_merkles, 268 | ); 269 | 270 | decommitments.push(Stark101Decommitment { evals, paths }); 271 | } 272 | 273 | decommitments 274 | } 275 | -------------------------------------------------------------------------------- /snarks/stark101/src/lib.rs: -------------------------------------------------------------------------------- 1 | pub mod field; 2 | pub mod fri; 3 | pub mod program; 4 | pub mod proof; 5 | -------------------------------------------------------------------------------- /snarks/stark101/src/main.rs: -------------------------------------------------------------------------------- 1 | #![allow(non_snake_case, unused_variables)] 2 | 3 | use std::env; 4 | 5 | use lambdaworks_crypto::{ 6 | fiat_shamir::is_transcript::IsTranscript, merkle_tree::merkle::MerkleTree, 7 | }; 8 | use lambdaworks_math::polynomial::Polynomial; 9 | use stark101::{ 10 | field::{ 11 | generate_generator, generate_subgroup, get_subgroup_generator, Stark101PrimeFieldBackend, 12 | Stark101PrimeFieldElement as FE, Stark101PrimeFieldTranscript, 13 | }, 14 | fri::{decommit_fri, fri_commit, BLOWUP_FACTOR}, 15 | program::fibonacci_square, 16 | proof::{Stark101Commitment, Stark101Proof}, 17 | }; 18 | 19 | fn main() { 20 | env::set_var("RUST_LOG", "debug"); 21 | env_logger::init(); 22 | 23 | ///////////////////////////////////////////////////////////////////////////////// 24 | /////////////////////////////////// PART 1 //////////////////////////////////// 25 | ///////////////////////////////////////////////////////////////////////////////// 26 | log::info!("Computing trace of FibonacciSq program"); 27 | let n = 1023; 28 | let a_0 = FE::from(1u64); 29 | let a_1 = FE::from(3141592u64); 30 | let a = fibonacci_square(a_0, a_1, n); 31 | assert_eq!(a.len(), n); 32 | assert_eq!(*a.last().unwrap(), FE::from(2338775057u64)); 33 | 34 | log::info!("Creating transcript"); 35 | let mut channel = Stark101PrimeFieldTranscript::default(); 36 | 37 | log::info!("Creating evaluation domain"); 38 | let G_order = n + 1; 39 | let g = get_subgroup_generator(G_order as u128); 40 | let G = generate_subgroup(g); 41 | assert!(G.len() == G_order); 42 | log::debug!("Evaluation domain has {} elements", G.len()); 43 | 44 | log::info!("Interpolating the trace (may take some time)"); 45 | let f = Polynomial::interpolate(&G.as_slice()[..G_order - 1], &a).expect("should interpolate"); 46 | assert_eq!(f.evaluate(&G[0]), a[0]); 47 | assert_eq!(f.evaluate(&G[1]), a[1]); 48 | assert_eq!(f.evaluate(&G[345]), a[345]); 49 | log::debug!("Trace polynomial has degree {}", f.degree()); 50 | 51 | log::info!("Extending to a larger domain"); 52 | let w = generate_generator(); 53 | let H_order = (n + 1) * BLOWUP_FACTOR; // extend to a larger domain 54 | let h = get_subgroup_generator(H_order as u128); 55 | let H = generate_subgroup(h); 56 | let eval_domain = H.clone().into_iter().map(|x| w * x).collect::>(); 57 | log::debug!("Coset has {} elements", eval_domain.len()); 58 | 59 | log::info!("Evaluating the trace polynomial on the coset"); 60 | let f_eval = eval_domain 61 | .iter() 62 | .map(|x| f.evaluate(x)) 63 | .collect::>(); 64 | 65 | log::info!("Merkle committing to evaluations"); 66 | let f_merkle = MerkleTree::::build(&f_eval); 67 | let f_merkle_root = f_merkle.root; 68 | log::debug!("Merkle Root: {}", hex::encode(f_merkle_root)); 69 | channel.append_bytes(&f_merkle_root); 70 | 71 | ///////////////////////////////////////////////////////////////////////////////// 72 | /////////////////////////////////// PART 2 //////////////////////////////////// 73 | ///////////////////////////////////////////////////////////////////////////////// 74 | log::info!("Constructing the first constraint: a_0 = 1 ==> f(0) = 1"); 75 | let numer0 = f.clone() - Polynomial::new_monomial(FE::from(1u64), 0); // f - 1 76 | let denom0 = Polynomial::new(&[-FE::from(1u64), FE::from(1u64)]); // X - g^0 = X - 1 77 | let p0 = numer0 / denom0; 78 | assert_eq!(p0.degree(), 1021); // 1022 - 1 79 | 80 | log::info!("Constructing the final constraint: a_1022 = 2338775057 ==> f(1022) = 2338775057"); 81 | let numer1 = f.clone() - Polynomial::new_monomial(FE::from(2338775057u64), 0); // f - 2338775057 82 | let denom1 = Polynomial::new(&[-g.pow(1022u64), FE::from(1u64)]); // X - g^1022 83 | let p1 = numer1 / denom1; 84 | assert_eq!(p1.degree(), 1021); // 1022 - 1 85 | 86 | log::info!("Constructing the transition constraints: a_n = a_(n-1)^2 + a_(n-2)^2 ==> f(g^2 . x) = f(g . x)^2 + f(x)^2"); 87 | let fg2 = f.scale(&g.pow(2u64)); // f(g^2 . x) 88 | let fg = f.scale(&g); // f(g. x) 89 | let numer2 = fg2 - (fg.clone() * fg) - (f.clone() * f); // f(g^2 . x) - f(g . x)^2 - f(x)^2 90 | let x_1024 = Polynomial::new_monomial(FE::one(), 1024) - Polynomial::new_monomial(FE::one(), 0); // X^1024 - 1 91 | let x_m_1021 = Polynomial::new(&[-g.pow(1021u64), FE::one()]); // X - g^1021 92 | let x_m_1022 = Polynomial::new(&[-g.pow(1022u64), FE::one()]); // X - g^1022 93 | let x_m_1023 = Polynomial::new(&[-g.pow(1023u64), FE::one()]); // X - g^1023 94 | let denom2 = x_1024 / (x_m_1021 * x_m_1022 * x_m_1023); 95 | let p2 = numer2 / denom2; 96 | assert_eq!(p2.degree(), 1023); // (1022 * 2) - (1024 - 3) = 2044 - 1021 = 1023 97 | 98 | log::info!("Creating the composition polynomial"); 99 | let alpha0 = channel.sample_field_element(); 100 | let alpha1 = channel.sample_field_element(); 101 | let alpha2 = channel.sample_field_element(); 102 | let cp = p0 * alpha0 + p1 * alpha1 + p2 * alpha2; 103 | 104 | log::info!("Evaluating over the composition polynomial"); 105 | let cp_eval = eval_domain 106 | .iter() 107 | .map(|x| cp.evaluate(x)) 108 | .collect::>(); 109 | 110 | log::info!("Merkle committing to the evaluations"); 111 | let cp_merkle = MerkleTree::::build(&cp_eval); 112 | let cp_merkle_root = cp_merkle.root; 113 | log::debug!("Merkle Root: {}", hex::encode(cp_merkle_root)); 114 | channel.append_bytes(&cp_merkle_root); 115 | 116 | ///////////////////////////////////////////////////////////////////////////////// 117 | /////////////////////////////////// PART 3 //////////////////////////////////// 118 | ///////////////////////////////////////////////////////////////////////////////// 119 | log::info!("FRI committing to the composition polynomial"); 120 | let (fri_polys, fri_domains, fri_layers, fri_merkles) = 121 | fri_commit(cp, eval_domain, cp_eval, cp_merkle, &mut channel); 122 | assert_eq!(fri_layers.len(), 11); 123 | assert_eq!(fri_layers.last().unwrap().len(), BLOWUP_FACTOR); 124 | assert_eq!(fri_polys.last().unwrap().degree(), 0); 125 | 126 | ///////////////////////////////////////////////////////////////////////////////// 127 | /////////////////////////////////// PART 4 //////////////////////////////////// 128 | ///////////////////////////////////////////////////////////////////////////////// 129 | log::info!("Generating queries and decommitments to FRI"); 130 | let num_queries = 3usize; 131 | let decommitments = decommit_fri( 132 | num_queries, 133 | &mut channel, 134 | &f_eval, 135 | &f_merkle, 136 | &fri_layers, 137 | &fri_merkles, 138 | ); 139 | 140 | let final_state = hex::encode(channel.state()); 141 | log::debug!("Final transcript state: {}", final_state); 142 | 143 | ///////////////////////////////////////////////////////////////////////////////// 144 | /////////////////////////////////// PROOF //////////////////////////////////// 145 | ///////////////////////////////////////////////////////////////////////////////// 146 | log::info!("Creating proof object"); 147 | assert_eq!(decommitments.len(), num_queries); 148 | assert_eq!(fri_merkles.len(), 11); 149 | assert_eq!(decommitments[0].evals.len(), 23); // 3 (trace) + 9 * 2 + 1 (constant) 150 | assert_eq!(decommitments[0].paths.len(), 23); // 3 (trace) + 9 * 2 + 1 (constant) 151 | let proof = Stark101Proof { 152 | commitment: Stark101Commitment { 153 | trace_root: f_merkle_root, 154 | cp_roots: fri_merkles.iter().map(|m| m.root).collect(), 155 | }, 156 | decommitments, 157 | }; 158 | 159 | let path_str = proof.write_to_file(); 160 | log::info!("Proof created at {}", path_str); 161 | } 162 | -------------------------------------------------------------------------------- /snarks/stark101/src/program.rs: -------------------------------------------------------------------------------- 1 | use lambdaworks_math::field::{element::FieldElement, traits::IsField}; 2 | 3 | /// Returns the trace of a program for `FibonacciSq`. 4 | pub fn fibonacci_square( 5 | a_0: FieldElement, 6 | a_1: FieldElement, 7 | n: usize, 8 | ) -> Vec> { 9 | let mut trace = Vec::with_capacity(n); 10 | trace.push(a_0); 11 | trace.push(a_1); 12 | 13 | for i in 2..n { 14 | let a_i = trace[i - 1].square() + trace[i - 2].square(); 15 | trace.push(a_i); 16 | } 17 | 18 | trace 19 | } 20 | 21 | #[cfg(test)] 22 | mod tests { 23 | use super::*; 24 | use crate::field::Stark101PrimeFieldElement as FE; 25 | 26 | #[test] 27 | fn test_trace() { 28 | let n = 1023; 29 | let a_0 = FE::from(1u64); 30 | let a_1 = FE::from(3141592u64); 31 | let a = fibonacci_square(a_0, a_1, n); 32 | 33 | assert_eq!(a.len(), n); 34 | assert_eq!(*a.last().unwrap(), FE::from(2338775057u64)); 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /snarks/stark101/src/proof.rs: -------------------------------------------------------------------------------- 1 | use lambdaworks_math::field::{element::FieldElement, traits::IsField}; 2 | use serde::{Deserialize, Serialize}; 3 | 4 | #[derive(Debug, Clone, Serialize, Deserialize)] 5 | pub struct Stark101Commitment { 6 | pub trace_root: [u8; 32], 7 | pub cp_roots: Vec<[u8; 32]>, 8 | } 9 | 10 | #[derive(Debug, Clone, Serialize, Deserialize)] 11 | pub struct Stark101Decommitment { 12 | pub evals: Vec>, 13 | pub paths: Vec>, 14 | } 15 | 16 | /// A Stark101 proof, based on [this video](https://www.youtube.com/watch?v=CxP28qM4tAc) at 11:15. 17 | /// 18 | /// - `commitment`: the commitment to the proof, which includes the trace root and the composition polynomial roots. 19 | /// - `decommitments`: the decommitments to the proof, which includes the evaluations of the trace and composition 20 | /// polynomial at the given index and their sibling along with Merkle authentication paths, for each query. 21 | #[derive(Debug, Clone, Serialize, Deserialize)] 22 | pub struct Stark101Proof { 23 | pub commitment: Stark101Commitment, 24 | pub decommitments: Vec>, 25 | } 26 | 27 | impl Stark101Proof { 28 | pub fn write_to_file(&self) -> String { 29 | let proof_str = serde_json::to_string(&self).expect("should serialize"); 30 | let proof_filepath = std::env::current_dir().expect("should get current dir"); 31 | let proof_filepath = if proof_filepath.ends_with("lambda-0b10") { 32 | proof_filepath.join("snarks/stark101/proof.json") 33 | } else { 34 | proof_filepath.join("proof.json") 35 | }; 36 | 37 | let path_str = proof_filepath.to_str().unwrap().to_string(); 38 | std::fs::write(proof_filepath, proof_str).expect("unable to write"); 39 | 40 | path_str 41 | } 42 | } 43 | --------------------------------------------------------------------------------