115 |
116 | );
117 | }
118 | };
119 |
--------------------------------------------------------------------------------
/src/components/Nav.tsx:
--------------------------------------------------------------------------------
1 | import {
2 | Box,
3 | Divider,
4 | Drawer,
5 | DrawerBody,
6 | DrawerContent,
7 | IconButton,
8 | Link,
9 | Menu,
10 | MenuButton,
11 | Stack,
12 | Text,
13 | useDisclosure,
14 | Wrap,
15 | WrapItem
16 | } from '@chakra-ui/react';
17 | import { FC } from 'react';
18 | import NextLink from 'next/link';
19 |
20 | import { CloseIcon, HamburgerIcon } from './UI/icons';
21 |
22 | import { isLastItem } from '../utils';
23 |
24 | import { NAV_LINKS } from '../constants';
25 |
26 | export const Nav: FC = () => {
27 | const { isOpen, onOpen, onClose } = useDisclosure();
28 |
29 | return (
30 | <>
31 |
36 |
37 | {NAV_LINKS.map(({ href, text }, idx) => (
38 |
39 |
40 |
46 | {text}
47 |
48 |
49 |
50 | ))}
51 |
52 |
53 |
54 |
55 |
56 |
78 |
79 |
80 |
81 |
82 |
83 |
84 | }>
85 | {NAV_LINKS.map(({ href, text }) => (
86 |
87 |
93 |
94 | {text}
95 |
96 |
97 |
98 | ))}
99 |
100 |
101 |
102 |
103 |
104 |
105 | >
106 | );
107 | };
108 |
--------------------------------------------------------------------------------
/src/pages/events.tsx:
--------------------------------------------------------------------------------
1 | import { Heading, Stack } from '@chakra-ui/react';
2 | import type { NextPage } from 'next';
3 |
4 | import { Event, PageMetadata } from '../components/UI';
5 |
6 | const Events: NextPage = () => {
7 | return (
8 | <>
9 |
13 |
14 |
15 |
16 | Events
17 |
18 |
19 |
20 |
25 | Ethereum is increasingly reliant on cryptography. To date we are actively using hash functions, EC-DSA signatures, BLS signatures, and we will be using polynomial commitments after the Cancun-Deneb upgrade.
26 |
27 | This workshop highlights the current and future suggestions for cryptographic uses and questions how best to ensure Ethereum remains resilient to cryptographic attacks. We will cover a large spectrum of topics, such as quantum resilience, long term privacy assurance, threshold cryptography and fuzzing mindsets. We will hear from top researchers and practitioners both inside and outside of the Ethereum ecosystem.
28 |
29 |
30 |
35 | Between April 28th and 30th, 2023, the Ethereum Foundation invited a group of
36 | researchers to conduct an initial analysis of the candidate sequential function MinRoot.
37 | The purpose of this gathering was to collectively delve into the intricacies of MinRoot
38 | and assess its potential implications for the Ethereum ecosystem. Preliminary analysis
39 | was conducted by Gaetan Leurent, Maria Naya Plasencia, and Stefano Tessaro. At the
40 | event, the researchers were divided into three groups, each tasked with different
41 | aspects of MinRoot's evaluation. The resulting report serves as a comprehensive joint
42 | summary, presenting the culmination of the researchers' intensive efforts during the
43 | event.{' '}
44 |
45 | Report (PDF, 18 September 2023)
46 |
47 | .
48 |
49 |
54 | this workshop brings the most interesting and challenging open cryptographic questions
55 | that Ethereum, Filecoin and other blockchain systems face, to the attention of academia.
56 | We will cover a large spectrum of research topics, such as vector commitments, SNARKs,
57 | shuffles, authenticated data structures and more. We will start the day with an update
58 | on to the problems discussed at last year's workshop.
59 |
60 |
61 |
66 | this workshop brought the most interesting and challenging open cryptographic questions
67 | that Ethereum faces to the attention of academia. We covered a large spectrum of
68 | research topics, such as multisignatures, commitments, verifiable delay functions,
69 | secure computation, zk-friendly hash functions and more.
70 |
71 |
72 |
73 | >
74 | );
75 | };
76 |
77 | export default Events;
78 |
--------------------------------------------------------------------------------
/src/pages/blog/index.tsx:
--------------------------------------------------------------------------------
1 | import fs from 'fs';
2 | import matter from 'gray-matter';
3 | import { Heading, Stack } from '@chakra-ui/react';
4 | import type { GetStaticProps, NextPage } from 'next';
5 | // import TweetEmbed from 'react-tweet-embed';
6 |
7 | import { ExternalPost, InternalPost, PageMetadata } from '../../components/UI';
8 |
9 | import { getParsedDate, sortByDate } from '../../utils';
10 |
11 | import { MarkdownPost } from '../../types';
12 | import { POSTS_DIR } from '../../constants';
13 |
14 | export const getStaticProps: GetStaticProps = async context => {
15 | // get list of files from the posts folder
16 | const files = fs.existsSync(POSTS_DIR) ? fs.readdirSync(POSTS_DIR) : [];
17 |
18 | // get frontmatter & slug from each post
19 | const posts = files.map(fileName => {
20 | const slug = fileName.replace('.md', '');
21 | const readFile = fs.readFileSync(`${POSTS_DIR}/${fileName}`, 'utf-8');
22 | const { data: frontmatter } = matter(readFile);
23 |
24 | return {
25 | slug,
26 | frontmatter
27 | };
28 | });
29 |
30 | // return the pages static props
31 | return {
32 | props: {
33 | posts
34 | }
35 | };
36 | };
37 |
38 | interface Props {
39 | posts: MarkdownPost[];
40 | }
41 |
42 | // add here the list of external blog posts, with title, date and link
43 | const externalLinks = [
44 | {
45 | title: 'A Universal Verification Equation for Data Availability Sampling',
46 | date: '2022-08-04',
47 | link: 'https://ethresear.ch/t/a-universal-verification-equation-for-data-availability-sampling/13240'
48 | },
49 | {
50 | title: 'Whisk: A practical shuffle-based SSLE protocol for Ethereum',
51 | date: '2022-01-13',
52 | link: 'https://ethresear.ch/t/whisk-a-practical-shuffle-based-ssle-protocol-for-ethereum/11763'
53 | },
54 | {
55 | title: 'Introducing Bandersnatch: a fast elliptic curve built over the BLS12-381 scalar field',
56 | date: '2021-06-29',
57 | link: 'https://ethresear.ch/t/introducing-bandersnatch-a-fast-elliptic-curve-built-over-the-bls12-381-scalar-field/9957'
58 | },
59 | {
60 | title: 'Inner Product Arguments',
61 | date: '2021-06-27',
62 | link: 'https://dankradfeist.de/ethereum/2021/07/27/inner-product-arguments.html'
63 | },
64 | {
65 | title: 'PCS multiproofs using random evaluation',
66 | date: '2021-06-18',
67 | link: 'https://dankradfeist.de/ethereum/2021/06/18/pcs-multiproofs.html'
68 | },
69 | {
70 | title: 'VDF Proving with SnarkPack',
71 | date: '2020-07-16',
72 | link: 'https://ethresear.ch/t/vdf-proving-with-snarkpack/10096/1'
73 | },
74 | {
75 | title: 'KZG polynomial commitments',
76 | date: '2020-06-16',
77 | link: 'https://dankradfeist.de/ethereum/2020/06/16/kate-polynomial-commitments.html'
78 | }
79 | ];
80 |
81 | const Blog: NextPage = ({ posts }) => {
82 | const internalPosts = posts.map(post => {
83 | //extract slug and frontmatter
84 | const { slug, frontmatter } = post;
85 | //extract frontmatter properties
86 | const { title, date } = frontmatter;
87 | const parsedDate = getParsedDate(date);
88 |
89 | //JSX for individual blog listing
90 | return ;
91 | });
92 |
93 | const externalPosts = externalLinks.map(({ date, link, title }) => (
94 |
95 | ));
96 |
97 | return (
98 | <>
99 |
103 |
104 |
105 |
106 | Blog
107 |
108 |
109 | {internalPosts.concat(externalPosts).sort(sortByDate)}
110 |
111 | {/*
112 |
113 |
114 |
115 |
116 |
117 | */}
118 |
119 | >
120 | );
121 | };
122 |
123 | export default Blog;
124 |
--------------------------------------------------------------------------------
/src/bounties-data-source/mimc-hash-challenge.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: 'MiMC Hash Challenge Bounty'
3 | description: 'Rewards for finding collisions in MiMCSponge, a sponge construction instantiated with MiMC-Feistel over a prime field, targeting 128-bit and 80-bit security.'
4 | ---
5 |
6 | The [Ethereum Foundation](https://ethereum.org/en/) and [Protocol Labs](https://protocol.ai/) are offering rewards for finding collisions in MiMCSponge, a [sponge construction](https://en.wikipedia.org/wiki/Sponge_function) instantiated with MiMC-Feistel over a prime field, targeting 128-bit and 80-bit security, on one of two fields described below.
7 |
8 | ## Introduction
9 |
10 | In 2017 Ethereum added support for BN254, a pairing-friendly elliptic-curve, via the [Byzantium hard-fork](https://github.com/ethereum/EIPs/blob/master/EIPS/eip-609.md), making it possible to verify SNARKs in a smart contract. Many applications use hashes both inside SNARKs and in smart contracts, calling for a hash function that is efficient in both cases.
11 |
12 | Protocol Labs are using BLS12-381, a pairing-friendly elliptic-curve introduced by the ECC team.
13 |
14 | MiMC has been initially introduced in a [paper from 2016](https://eprint.iacr.org/2016/492.pdf), as a cryptographic primitive with low multiplicative complexity, making it attractive for SNARKs, such as [Groth16](https://eprint.iacr.org/2016/260.pdf). One particular use of interest is a hash function based on a sponge construction instantiated with MiMC-Feistel permutation over a prime field.
15 |
16 | While more low multiplicative complexity hash function have been published, MiMC is the earliest of the bunch and is already used in some applications on Ethereum.
17 |
18 | ## Challenge Details
19 |
20 | Rewards will be given for the following results:
21 |
22 | | Result | Reward |
23 | | ------------------------------------------------------------------------------------------ | ------- |
24 | | Collisions on the proposed 220 rounds, on either of the fields, targeting 128-bit security | $20,000 |
25 | | Collisions on the proposed 220 rounds, on either of the fields, targeting 128-bit security | $20,000 |
26 |
27 | ### BN254
28 |
29 | | Parameter | Value |
30 | | ----------- | ----------------------------------------------------------------------------- |
31 | | Field prime | 21888242871839275222246405745257275088548364400416034343698204186575808495617 |
32 | | Rounds | 220 |
33 | | Exponent | 5 |
34 | | r | 1 |
35 | | c | 1 |
36 |
37 | ### BLS12-381
38 |
39 | | Parameter | Value |
40 | | ----------- | ----------------------------------------------------------------------------- |
41 | | Field prime | 52435875175126190479447740508185965837690552500527637822603658699938581184513 |
42 | | Rounds | 220 |
43 | | Exponent | 5 |
44 | | r | 1 |
45 | | c | 1 |
46 |
47 | ## Reference code
48 |
49 | Reference code for MiMCSponge on BN254 exists in the [circomlib](https://github.com/iden3/circomlibjs/blob/5164544558570f934d72d40c70779fc745350a0e/src/mimcsponge.js) code base, where the constants for the hash are generated using [this code](https://github.com/iden3/circomlibjs/blob/5164544558570f934d72d40c70779fc745350a0e/src/mimcsponge_printconstants.js). Participants are also encouraged to examine the [MiMCSponge circuit code](https://github.com/iden3/circomlib/blob/master/circuits/mimcsponge.circom), the [MiMC-Feistel EVM bytecode](https://github.com/iden3/circomlibjs/blob/5164544558570f934d72d40c70779fc745350a0e/src/mimcsponge_gencontract.js) and the MiMCSponge Solidity code. Rewards for significant bugs in these may also be offered.
50 |
51 | ## Submissions
52 |
53 | Submissions should be sent to [mimc-challenge@ethereum.org](mailto:mimc-challenge@ethereum.org), and rewards will be given in USD, ETH or DAI. Submissions can not be anonymous.
54 |
--------------------------------------------------------------------------------
/src/pages/team.tsx:
--------------------------------------------------------------------------------
1 | import { Heading, Link, Stack, Text } from '@chakra-ui/react';
2 | import type { NextPage } from 'next';
3 |
4 | import { PageMetadata } from '../components/UI';
5 |
6 | const Team: NextPage = () => {
7 | return (
8 | <>
9 |
13 |
14 |
15 |
16 | Team
17 |
18 |
19 |
20 | Meet our team of world leading cryptography researchers. It goes without saying that we
21 | are all interested in cryptography and blockchains. We also have more specific interests
22 | ranging from cryptanalysis to zero-knowledge proofs to post-quantum cryptography.
23 |
24 |
25 |
26 |
27 |
28 |
34 | Dankrad Feist.
35 | {' '}
36 | Vector commitments; Verkle trees; MPCs; and zero-knowledge proofs.
37 |
38 |
39 |
40 |
41 |
42 |
48 | Gottfried Herold.
49 | {' '}
50 | Public key cryptanalysis.
51 |
52 |
53 |
54 |
55 |
56 |
62 | George Kadianakis.
63 | {' '}
64 | Shuffling ZKPs; polynomial commitments; protocol design and implementation.
65 |
66 |
67 |
68 |
69 |
70 |
76 | Dmitry Khovratovich.
77 | {' '}
78 | Symmetric crypto design; cryptanalysis of schemes and protocols; zero-knowledge proofs
79 | and circuits; verifiable delay functions; and privacy and anonymity.
80 |
81 |
82 |
83 |
84 |
85 |
91 | Antonio Sanso.
92 | {' '}
93 | Isogenies; elliptic curves; public key cryptography; cryptanalysis.
94 |
95 |
96 |
97 |
98 |
99 |
105 | Benedikt Wagner.
106 | {' '}
107 | Distributed hash tables; data availability sampling; variants of digital signatures.
108 |
109 |
110 |
111 |
112 |
113 |
119 | Arantxa Zapico.
120 | {' '}
121 | Zero-knowledge proofs; lookup arguments, vector commitments.
122 |
123 |
124 |
125 |
126 |
127 | >
128 | );
129 | };
130 |
131 | export default Team;
132 |
--------------------------------------------------------------------------------
/src/bounties-data-source/zk-hash.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: 'ZK Hash Function Cryptanalysis Bounties'
3 | description: 'Help us understand the security of new hash functions better.'
4 | ---
5 |
6 | ## Terms
7 |
8 | **Task:** find $X1,X2,Y1,Y2$ such that $\displaystyle Perm(X1,X2,0)=(Y1,Y2,0)$
9 |
10 | where $Perm$ is the inner sponge permutation (bijective mapping) of the hash function the challenge list.
11 |
12 | - Solutions should be sent to [Dmitry Khovratovich](mailto:dmitry.khovratovich@ethereum.org) before November 30th 2022.
13 | - First come first win.
14 | - Within 1 month after the submission the authors should provide a technical report with the attack description, which should be released to the public domain at latest December 1st 2022. The code should be also made public before this date.
15 | - **Total Bounty Budget:** $200,000 USD.
16 | - Parameters are fixed on November 23rd 2021.
17 |
18 | ## Rescue Prime
19 |
20 | [Design spec.](https://www.esat.kuleuven.be/cosic/publications/article-3259.pdf)
21 |
22 | - $p=18446744073709551557 \text{\textasciitilde} 2^{64}$
23 | - $m=3$
24 | - $alpha=3$
25 | - Number of rounds: $N$
26 | - Brute force attack complexity: $2^{64}$
27 |
28 | We expect that a variant with $s$ bits of security to withstand attacks of complexity up to $2^{1.5s}$ time (function calls) and memory.
29 |
30 | [Reference implementation and bounty instances.](https://extgit.iaik.tugraz.at/krypto/zkfriendlyhashzoo/-/tree/master/bounties/src/rescue_prime)
31 |
32 | | Category | Parameters | Security Level (bits) | Bounty |
33 | | -------- | ----------------- | --------------------- | ---------- |
34 | | ~~Easy~~ | $\sout{N=4, m=3}$ | ~~25~~ | ~~$2,000~~ |
35 | | Easy | $N=6, m=2$ | 25 | $4,000 |
36 | | Medium | $N=7, m=2$ | 29 | $6,000 |
37 | | Hard | $N=5, m=3$ | 30 | $12,000 |
38 | | Hard | $N=8, m=2$ | 33 | $26,000 |
39 |
40 | ## Feistel-MIMC
41 |
42 | [Design spec.](https://eprint.iacr.org/2016/492.pdf)
43 |
44 | - $p=18446744073709551557 \text{\textasciitilde} 2^{64}$
45 | - $alpha=3$
46 | - **Task:** find $X,Y$ such that $Feistel\text{\textendash}MiMC(X,0)=(Y,0)$
47 | - Number of rounds: $r$
48 | - Brute force attack complexity: $2^{64}$
49 |
50 | We expect that a variant with $s$ bits of security to withstand attacks of complexity up to $2^{2s}$ time (function calls) and memory.
51 |
52 | The initial parameters were broken and were replaced.
53 |
54 | [Reference implementation and bounty instances.](https://extgit.iaik.tugraz.at/krypto/zkfriendlyhashzoo/-/tree/master/bounties/src/feistel_mimc)
55 |
56 | | Category | Parameters | Security Level (bits) | Bounty |
57 | | -------- | ----------------- | --------------------- | ---------- |
58 | | ~~Easy~~ | $\sout{N=4, m=3}$ | ~~25~~ | ~~$2,000~~ |
59 | | Easy | $N=6, m=2$ | 25 | $4,000 |
60 | | Medium | $N=7, m=2$ | 29 | $6,000 |
61 | | Hard | $N=5, m=3$ | 30 | $12,000 |
62 | | Hard | $N=8, m=2$ | 33 | $26,000 |
63 |
64 | ## Poseidon
65 |
66 | [Design spec.](https://eprint.iacr.org/2019/458.pdf)
67 |
68 | - $p=18446744073709551557 \text{\textasciitilde} 2^{64}$
69 | - $d=3$
70 | - $t=3$
71 | - Number of full rounds: $RF=8$
72 | - Number of partial rounds $RP$ varies (see below)
73 | - Brute force attack complexity: $2^{64}$
74 |
75 | We expect that a variant with $s$ bits of security to withstand attacks of complexity up to $2^{s+37}$ time (function calls) and memory.
76 |
77 | The initial parameters were broken and were replaced.
78 |
79 | [Reference implementation and bounty instances.](https://extgit.iaik.tugraz.at/krypto/zkfriendlyhashzoo/-/tree/master/bounties/src/poseidon)
80 |
81 | | Category | Parameters | Security Level (bits) | Bounty |
82 | | ---------- | -------------- | --------------------- | ---------- |
83 | | ~~Easy~~ | $\sout{RP=3}$ | ~~8~~ | ~~$2,000~~ |
84 | | ~~Easy~~ | $\sout{RP=8}$ | ~~16~~ | ~~$4,000~~ |
85 | | ~~Medium~~ | $\sout{RP=13}$ | ~~24~~ | ~~$6,000~~ |
86 | | Hard | $RP=19$ | 32 | $12,000 |
87 | | Hard | $RP=24$ | 40 | $26,000 |
88 |
89 | ## Reinforced Concrete
90 |
91 | [Design spec.](https://eprint.iacr.org/2021/1038.pdf)
92 |
93 | - Number of layers as in the original design
94 | - Different prime field
95 | - The best attack we have found for these variants is exhaustive search.
96 | - Groebner basis challenges might be declared additionally.
97 |
98 | We expect that a variant with $s$ bits of security to withstand attacks of complexity up to $2^{2s}$ time (function calls) and memory.
99 |
100 | [Decomposition and alpha/beta values.](https://hackmd.io/l2JT8AQITJ2xRZpGErPnzA#Decomposition-parameters)
101 |
102 | [Reference implementation and bounty instances.](https://extgit.iaik.tugraz.at/krypto/zkfriendlyhashzoo/-/tree/master/bounties/src/reinforced_concrete)
103 |
104 | | Category | Parameters | Security Level (bits) | Bounty |
105 | | -------- | ------------------------ | --------------------- | ------- |
106 | | Easy | $p=281474976710597$ | 24 | $4,000 |
107 | | Hard | $p=72057594037926839$ | 28 | $6,000 |
108 | | Hard | $p=18446744073709551557$ | 32 | $12,000 |
109 |
110 | ## Contact
111 |
112 | [dmitry.khovratovich@ethereum.org](mailto:dmitry.khovratovich@ethereum.org)
113 |
--------------------------------------------------------------------------------
/src/posts/algorand-hash-analysis.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: 'Cryptanalysis of the Algorand Subset-Sum Hash Function (UPDATED 25th June 2022)'
3 | description: 'K-tree attack on the Algorand hash function'
4 | author: 'Dmitry Khovratovich'
5 | date: '2022-06-25'
6 | ---
7 |
8 | ## 1. Introduction
9 |
10 | Algorand has proposed [a compression function](https://github.com/algorand/go-sumhash/blob/master/spec/sumhash-spec.pdf) which is lattice-ZKP friendly and which they plan to use inside the Merkle-Damgard framework.
11 |
12 | The public constants of the function are matrix $A\in Z_q^{n\times m}$ where $q=2^{64}, n=8, m=1024$. Let us denote the columns of $A$ by $A_i$, $0\leq i 1$ one can do better than $2^{n/2}$, concretely in $2^{\frac{n}{k+1}}$ time. The idea can be illustrated for $k=2$:
32 | * Find $2^{n/3}$ \emph{partial collisions} between $L_1$ and $L_2$ i.e. strings that collide in the first $n/3$ bits. This can be done in $2^{n/3}$ time by taking $2^{n/3}$ elements from both lists and sorting them by the first bits. Put collisions and their components into new list $X = \{(l_1\oplus l_2,l_1,l_2)\}$
33 | * Repeat the same for lists $L_3$ and $L_4$ and obtain list $Y$.
34 | * Find tuples $(x,l_1,l_2)\in X$ and $(y,l_3,l_4)\in Y$ such that $x=y$. This is feasible since both $x$ and $y$ are zero in the first $n/3$ bits so we need only $2^{n/3}$ tuples in both lists to find a collision on the remaining $2n/3$ bits.
35 |
36 | 
37 |
38 | The same approach works for bigger $k$. Note though that there is no such algorithms for 3 lists $L_1,L_2,L_3$ and the best attack is still $O(2^{n/2})$.
39 |
40 | Finally remark that the process is memory-heavy for $k>1$. This fact is used in the memory-hard proof-of-work [Equihash](https://eprint.iacr.org/2015/946.pdf), used in [Zcash](https://z.cash/).
41 |
42 | ## 3. Attack on the Algorand hash
43 |
44 | The compression function described in Section 1 is vulnerable to a modification of the generalized birthday attack. Let us find a collision:
45 | $$
46 | f_A(\mathbf{x})=f_A(\mathbf{y})
47 | $$
48 |
49 | We do as follows:
50 | * Split the $\mathbf{x},\mathbf{y}$ into 16 64-bit chunks $\mathbf{x}_1,\mathbf{x}_2,\ldots,\mathbf{x}_{16},\mathbf{y}_1,\mathbf{y}_2,\ldots,\mathbf{y}_{16}$.
51 | * Interpret the output $f_A(\mathbf{x})$ as a 6-tuple $(a_1,a_2,a_3,a_4,a_5,a_6)$ where $a_1$ is 32-bit and all other $a_i$ are 96-bit.
52 | * For all $2^{64+64}$ pairs $(\mathbf{x}_{i},\mathbf{y}_{i}), i<8$, find $2^{64+64-32}=2^{96}$ collisions for $f_A$ in $a_1$, i.e. solutions for
53 | $$
54 | f_A(\mathbf{x}_{i})+f_A(\mathbf{y}_{i})=(0,*,*,*,*,*)
55 | $$
56 | spending $2^{96}$ time and space for each $i$ using list sorting for birthday paradox. Store all solutions in lists $L_i$.
57 | * For each $j<8$ find partial collisions between $L_{2j+1}$ and $L_{2j+2}$ in $a_2$:
58 | $$
59 | \underbrace{z}_{\in L_{2j+1}}+\underbrace{z'}_{\in L_{2j+2}} = (0,0,*,*,*,*)
60 | $$
61 | Note that since both $z$ and $z'$ are 0 in $a_1$ they sum to 0 in it. The number of partial collisions between $L_{2j+1}$ and $L_{2j+2}$ is $2^{96+96-96}=2^{96}$. Store the results in 8 lists $L_k$.
62 | * We now find $2^{96}$ partial collisions between pairs of lists in $a_3$ and obtain 4 lists. Then proceed the same way with $a_4$ and get two lists.
63 | * Find a single collision between the two lists in $a_5$ and $a_6$ at cost $2^{96}$. It yields
64 |
65 | $$
66 | \sum f_A(\mathbf{x}_{i})+f_A(\mathbf{y}_i)=0\;\Leftrightarrow\;
67 | f_A(\mathbf{x}) = f_A(\mathbf{y})
68 | $$
69 |
70 | i.e. a collision.
71 |
72 | Overall the collision attack costs $2^{98}$ time (it is not necessary to work on all the lists simultaneously) and thus the overall security of the subset sum hash is at most 98 bits in the time cost model.
73 |
74 |
75 | # UPDATE (25 June 2022)
76 |
77 | ## 4. Bug in Section 3 and its fix
78 |
79 | The Algorand team has kindly reported us a flaw in Section 3. Concretely, if one merges $f_A(\mathbf{x}_i)$ and $\mathbf{y}_i$ in the first step of the attack than due to the linearity of $f_A$ the number of possible pairs is $3^{64}$ rather than $4^{64}$, which makes the search for $2^{96}$ partial collisions more expensive.
80 |
81 | The simple fix to this flaw is to merge instead $f_A(\mathbf{x}_1)$ with $f_A(\mathbf{x}_2)$, then $f_A(\mathbf{x}_3)$ with $f_A(\mathbf{x}_4)$, so that the inputs activate different scalars in $A$. When repeating for $f_A(\mathbf{y}_i)$, one should target different collision bits to avoid having $\mathbf{x}=\mathbf{y}$. The rest of the attack remains the same with the same complexity estimate.
82 |
83 |
84 | ## 5. Algorand internal analysis
85 |
86 | In response to our original post, the Algorand team has published an [internal analysis](https://github.com/algorand/go-sumhash/blob/3ba719a3de9ed604040aa81c0288aa2feda8ebae/cryptanalysis/merging-trees-ss.pdf). The report investigates the complexity of the Wagner attack implemented on a quantum computer. For collision search the report estimates the quantum attack complexity as $2^{108}$ time and $2^{40}$ memory. The same document also gives the complexity of the classical Wagner attack as $2^{107}$ time and $2^{85}$ memory, which is a variation (another point on the time-area tradeoff curve) of our attack in Section 3, assuming our bugfix above.
87 |
88 |
89 | ## Acknowledgements
90 |
91 | We thank Chris Peikert for fruitful discussions that has led to the attack refinements.
92 |
93 |
94 |
95 |
--------------------------------------------------------------------------------
/src/posts/zkalc.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: 'zkalc: a cryptographic calculator'
3 | description: 'zkalc: a cryptographic calculator!'
4 | author: 'George Kadianakis, Michele Orrù'
5 | date: '2023-01-16'
6 | ---
7 |
8 |
9 |
10 |
11 |
12 | [zkalc](https://zka.lc) helps you calculate how much time cryptographic operations take on a real computer.
13 |
14 | zkalc was created to instantly answer questions like *"How quickly can I run an MSM of size $2^{18}$ and compute $120$ pairings on an AWS C5 machine?"* or *"Which curve can perform DH in less than $10$ microseconds?"*.
15 |
16 | Tune in and [play with it](https://zka.lc)!
17 |
18 | ## Why?
19 |
20 | Cryptographers tend to be good at cryptography but they can be quite bad at estimating the time it takes a computer to run their schemes.
21 |
22 |
23 |
24 | We hope that [zkalc](https://zka.lc) can help shorten the gap between cryptography and practice:
25 |
26 | - Cryptographers can use the simple UX to learn how fast their new fancy scheme runs on various machines without wasting computer cycles and CO2;
27 | - Protocol designers can easily tune the parameters of their protocol depending on their requirements
28 |
29 | We designed zkalc to be easy to use but also *easy to extend*. Writing new types of benchmarks, or adding fresh data to the zkalc website [is easy](https://zka.lc/about).
30 |
31 | ## How does [zkalc](https://zka.lc) work?
32 |
33 | Let's now go over our benchmarking pipeline and how we derive our results. In short:
34 |
35 | 1. For each supported operation, we run benchmarks that measure its performance. We use [`criterion.rs`](https://github.com/bheisler/criterion.rs) to take multiple sample (at least 10, ever for large computations like MSMs), and then select the average.
36 | 1. We collect benchmark results inside the `perf/data/` [directory](https://github.com/asn-d6/zkalc/tree/main/perf/data) and make them freely available for anyone to use.
37 | 1. For each operation, we [fit a function](https://en.wikipedia.org/wiki/Curve_fitting) to its benchmark results. We use linear interpolation inside the benchmark bounds and least squares regression outside the benchmarking bounds.
38 | 3. When a user queries zkalc for an operation of size $n$, we estimate the its running time using the produced function.
39 |
40 | In this blog post we will go deeper into the above process. We will mainly focus on the function fitting, but if you are interested in the entire story of how our benchmarks work, or if you want to see the interactive version of the graphs below, please visit the [zkalc methodology page](https://zka.lc/methodology).
41 |
42 | ### Running benchmarks
43 |
44 | For every supported operation, we write benchmarks and run them in multiple platforms. We then store the results in the [`perf/` directory](https://github.com/asn-d6/zkalc/tree/main/perf/data) of zkalc.
45 |
46 | ### Answering user queries
47 |
48 | Now we have benchmark data for every operation in the `perf/` directory. The next step is to fit a function $f(x)$ to every operation, so that when a user queries us for an operation with arbitrary size $n$, we can answer it by evaluating $f(n)$.
49 |
50 | For simple operations like basic scalar multiplication and field addition (which are not amortized) we consider them to be sequential computations. That is, if a single scalar multiplication takes $x$ seconds, $n$ such operations will take $n \cdot x$ seconds. That results in a simple linear function $f(x) = n \cdot x$.
51 |
52 | More complicated operations like MSMs and pairing products are amortized and their performance doesn't follow a simple linear curve.
53 |
54 | For such operations, we [collect benchmark data](https://github.com/asn-d6/zkalc/blob/main/backend/arkworks/benches/bench_arkworks.rs#L52) for various sizes. For example, consider the figure below which displays the benchmark data from a $\mathbb G_1$ MSM operation for sizes from $2$ to $2^{21}$ (both axis are in log scale):
55 |
56 |
57 |
58 |
59 | To answer user queries within the benchmark range, we perform [polynomial interpolation](https://www.youtube.com/watch?v=yQsDxOdn1hk) over the benchmark data.
60 |
61 | That is, for each pair of benchmark data $(x_i, f(x_i))$ and $(x_{i+1}, f(x_{i+1}))$ we trace the line [that goes through both points](https://github.com/asn-d6/zkalc/blob/main/frontend/lib/estimates.js#L26). We end up with a piecewise function that covers the entire benchmark range, as we can see in the figure below:
62 |
63 |
64 |
65 |
66 | For user queries outside of the benchmarking range we [extrapolate](https://en.wikipedia.org/wiki/Extrapolation) via non-linear least squares. To make things more exciting we decided that it should be done... in [Javascript](https://github.com/asn-d6/zkalc/blob/main/frontend/lib/estimates.js) inside your browser.
67 |
68 | In the specific case of MSMs, Pippenger's complexity is [well known](https://jbootle.github.io/Misc/pippenger.pdf) to be asymptotically $O({n} / {\log n})$. Hence, we use least squares to fit the data set to a function $h(x) = \frac{a x + b}{\log x}$ solving for $a, b$.
69 |
70 | Here is an illustration of the extrapolation behavior of $\mathbb G_1$ MSM outside of the benchmarking range (that is, after $2^{21}$):
71 |
72 |
73 |
74 | We do not expect extrapolation to faithfully follow the benchmarks. We believe however that the estimates provide a rough idea of how long an algorithm will take.
75 |
76 | In the end of this process, we end up with a piecewise function for each operation that we can query inside and outside the benchmarking range to answer user queries.
77 |
78 | Do [give zkalc a try](https://zka.lc) and let us know what you think!
79 |
80 | ## Visualizing crypto performance with zkalc
81 |
82 | In the zkalc website, you will also find the [zcharts](http://zka.lc/charts) corner where we visualize all the raw benchmark data we used in the above section.
83 |
84 | We hope that this visual approach will help you grok the benchmark data that zkalc is based on, but also acquire a better understanding of the performance variations between different implementations/curves.
85 |
86 | ## A call for help
87 |
88 | [zkalc](https://zka.lc) can be only as useful as the data it provides,and there is lots of room for additional benchmarks. Can you run benchmarks on a large cloud provider? We would love to get in touch and gather benchmarks for [zkalc](https://zka.lc). Do you have access to a beefy GPU prover? We would love to help you run [zkalc](https://zka.lc). Did you just design a new elliptic curve? Benchmark it with [zkalc](https://zka.lc). Are you working on a new crypto library? You guessed it. Adding benchmarks to [zkalc](https://zka.lc) is actually not hard; check [our website for instructions](https://zka.lc/about)!
89 |
90 | In the future, we also want to expand [zkalc](https://zka.lc) to support higher level primitives. From FFTs, to IPAs, to various polynomial commitment and lookup argument schemes. If you want to write benchmarks for any of these, check out our [TODO](https://github.com/asn-d6/zkalc/blob/main/TODO.md) file and please get in touch! :)
91 |
92 | ## Acknowledgements
93 |
94 | Many thanks to [Patrick Armino](https://patrick.wtf) and [Jonathan Xu](https://jonathanxu.com/) for their help with the UX.
95 |
--------------------------------------------------------------------------------
/public/images/ef-logo.svg:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/src/posts/pq-ssle.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: 'Towards practical post quantum Single Secret Leader Election (SSLE) - Part 1'
3 | description: 'Discussing a possible post quantum SSLE solution'
4 | author: 'Antonio Sanso'
5 | date: '2022-08-30'
6 | ---
7 |
8 | ## Introduction
9 |
10 | [Single Secret Leader Election](https://eprint.iacr.org/2020/025.pdf) (*SSLE* from now on) is an important research problem the cryptographic community has been researching on. The *SSLE* protocols allow a set of users to elect a leader ensuring that the identity of the winner remains secret until he decides to reveal himself.
11 | [Whisk](https://ethresear.ch/t/whisk-a-practical-shuffle-based-ssle-protocol-for-ethereum/11763) is a block proposer election protocol tailored to the Ethereum beacon chain that protects the privacy of proposers. It relies on discrete logarithm assumptions and uses a shuffling approach and NIZK proof of shuffle to prove correctness.
12 | This year [NIST announced](https://csrc.nist.gov/Projects/post-quantum-cryptography/selected-algorithms-2022) its choice for Post-Quantum-Cryptography algorithms that are going to replace the existing public key infrastructure ([Zhenfei Zhang](https://zhenfeizhang.github.io/material/aboutme/) covered this in a [previous blog post](https://crypto.ethereum.org/blog/nist-pqc-standard)).
13 |
14 | In this blog post we are going to analyze a possible Post Quantum analogue of [Whisk](https://ethresear.ch/t/whisk-a-practical-shuffle-based-ssle-protocol-for-ethereum/11763) based on Commutative Supersingular isogenies (CSIDH).
15 |
16 | **N.B.** If you wonder if this solution is affected by the new [devastating attack on SIDH](https://eprint.iacr.org/2022/975.pdf) the answer is **NO**. The Castryck-Decru Key Recovery Attack crucially relies on torsion point information that are not present in CSIDH based solutions.
17 |
18 | ## Whisk's recap
19 |
20 | As mentioned above [Whisk](https://ethresear.ch/t/whisk-a-practical-shuffle-based-ssle-protocol-for-ethereum/11763) is a proposal to fully implement *SSLE* from `DDH`and `shuffles` scheme (see also section 6 from [Boneh et al paper](https://eprint.iacr.org/2020/025.pdf)).
21 | The idea behind this solution is pretty straightforward and neat. Let's list below the key ingredients of the commitment scheme in Whisk (at the net of the shuffles):
22 |
23 | 1. Alice commits to a random long-term secret `k` using a tuple $(rG,krG)$ (called **tracker**).
24 | 2. Bob randomizes Alice’s **tracker** with a random secret $z$ by multiplying both elements of the tuple: $(zrG,zkrG)$.
25 | 3. Alice proves ownership of her randomized tracker (i.e. open it) by providing a proof of knowledge of a discrete log (`DLOG NIZK`) that proves knowledge of a `k` such that $k(zrG)==zkrG$ .
26 | 4. Identity binding is achieved by having Alice provide a deterministic commitment $com(k)=kG$ when she registers her **tracker**.
27 | 5. We also use it at registration and when opening the trackers to check that both the tracker and $com(k)$ use the same $k$ using a discrete log equivalence proof (`DLEQ NIZK`).
28 |
29 | Whisk can be implemented in any group where the Decisional Diffie Hellman problem (DDH) is hard. Currently Whisk is instantiated via a commitment scheme in [BLS12-381](https://hackmd.io/@benjaminion/bls12-381).
30 |
31 | ## Commutative Supersingular isogenies (CSIDH).
32 |
33 | This section (and the remainder of the blog post) will require some knowledge about elliptic curves and isogeny based cryptography. The general reference on elliptic curves is [Silverman](https://link.springer.com/book/10.1007/978-0-387-09494-6) for a thorough explanation of isogenies we refer to [De Feo](https://arxiv.org/pdf/1711.04062.pdf).
34 |
35 | CSIDH is an isogeny based post quantum key exchange presented at [Asiacrypt 2018 ](10.1007/978-3-030-03332-3_15) based on an efficient commutative group action. The idea of using group actions based on isogenies finds its origins in the now well known [1997 paper by Couveignes](https://eprint.iacr.org/2006/291.pdf). Almost 10 years later Rostovtsev and Stolbunov [rediscovered Couveignes's ideas ](https://eprint.iacr.org/2006/145.pdf).
36 |
37 | Couveignes in his seminal work introduced the concept of *Very Hard Homogeneous Spaces* (VHHS). A VHHS is a generalization of cyclic groups for which the computational and decisional Diffie-Hellman problem are hard. The exponentiation in the group (or the scalar multiplication if we use additive notation) is replaced by a group action on a set. The main hardness assumption underlying group actions based on isogenies, is that it is hard to invert the group action:
38 |
39 | **Group Action Inverse Problem (GAIP)).** Given a curve $E$, with $End(E) = O$, find an ideal a ⊂ O such that $E = [a]E_0$.
40 |
41 | The GAIP (also known as *vectorization*) might resemble a bit the discrete logarithm problem and in this blog post we exploit this analogy to translate the commitment scheme in Whisk to the CSIDH setting.
42 |
43 | ## CSIDH Whisk
44 |
45 | In this section we will show that a 1:1 translation is indeed (almost) easily achievable. Indeed the translation from the DLOG setting to VHHS presents a caveat: in this blog post we will focus our attention on the *fraud proof version* of shuffle based *SSLE*. This is also described in the original SSLE paper (see **Removing NIZKs** paragraph). The reason behind this is because currently there isn't a way to have NIZK proof of shuffle based on isogenies. Apart from this, let's see how it is indeed possible to translate all the other ingredients.
46 |
47 | ### Whisk commitment scheme
48 |
49 | The hardness of the GAIP problem gives a natural translation of the Whish commitment scheme. Alice commits to a random long-term secret $[k]$ using a tuple $([r]E_0,[k][r]E_0)$, where $E_0:y^2 = x^3 + x$ over $F_p$ is the base curve (the equivalent of the generator $G$ in the elliptic curve based solution).
50 | Also the randomization phase is trivial: Bob randomizes Alice’s **tracker** with a random secret $[z]$ by multiplying both elements of the tuple: $([z][r]E_0,[z][k][r]E_0)$.
51 |
52 | ### `DDH` and CSIDH
53 |
54 | The next thing to address is ensuring DDH is a hard problem in CSIDH.
55 |
56 | **Group-Action DDH** the Group-Action DDH assumption holds if the two distributions
57 | $([a]E_0, [b]E_0, [a][b]E_0)$ and $([a]E_0, [b]E_0, [c]E_0)$ are computationally indistinguishable.
58 |
59 | [Castryck et al](CSV20) showed that the DDH problem is easy in ideal-class-group actions when the class number is even. Such groups are therefore unsuited for the above construction. As a countermeasure to their attack, they suggest working with supersingular elliptic curves over Fp for $p ≡ 3 (mod 4)$, which is already the case for CSIDH. In that setting, the Group-Action DDH problem is conjectured to be hard.
60 |
61 | ### `DLOG NIZK` in CSIDH
62 |
63 | A sigma protocol proving knowledge of a solution of a GAIP instance in zero knowledge has been described in original [Couveignes's paper](https://eprint.iacr.org/2006/291.pdf) and further analyzed in [Stolbunov'sPhD thesis](https://ntnuopen.ntnu.no/ntnu-xmlui/bitstream/handle/11250/262577/529395_FULLTEXT01.pdf). Two incarnations of these ideas in the CSIDH setting are [SeaSign](https://eprint.iacr.org/2018/824.pdf) and [CSI-FiSh](https://eprint.iacr.org/2019/498.pdf). The first paper ([SeaSign](https://eprint.iacr.org/2018/824.pdf)) uses *rejection sampling* (a technique successfully employed in lattice based cryptography) to prevent signatures from leaking the private key (a problem that occurs if a sigma protocol is performed naively). The same is achieved in the latter paper ([CSI-FiSh](https://eprint.iacr.org/2019/498.pdf)) computing the class group of the imaginary quadratic field used in the CSIDH-512 cryptosystem.
64 |
65 | ### `DLEQ NIZK` in CSIDH
66 |
67 | A way to solve discrete log equivalence proof (DLEQ NIZK) in the CSIDH is provided in [Beullens et al.](https://eprint.iacr.org/2020/1323.pdf) section 2.4.
68 |
69 | ## Conclusion
70 |
71 | In this blog post we briefly analyzed a possible replacement of **Whisk** in the Post Quantum setting. We achieved this employing the commutative supersingular isogeny (CSIDH) setting. We have seen that a direct translation from DLOG to VHHS is indeed possible with some limitations. The derived Post Quantum Whisk Protocol is restricted to the *fraud proof version* due the lack of NIZK proof of shuffle in the isogeny setting. The current [zero-knowledge proving system](https://ethresear.ch/t/provable-single-secret-leader-election/7971) is an adaptation of the [Bayer-Groth shuffle argument](http://www0.cs.ucl.ac.uk/staff/J.Groth/MinimalShuffle.pdf) but is currently out of reach for isogeny based cryptography. We hope this blog post stimulates researchers to look into this open problem.
72 |
73 | ## Acknowledgement
74 |
75 | We would like to thank Ward Beullens, Dan Boneh, Luca De Feo and George Kadianakis for for fruitful discussions and comments.
76 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | [](https://app.netlify.com/sites/cryptography-research/deploys)
2 |
3 |
4 | Ethereum Foundation Cryptography Research
5 |
6 |
7 | The Ethereum Foundation leads research into cryptographic protocols that are useful within
8 | the greater Ethereum community and more generally. Cryptography is a key tool that enables
9 | greater functionality, security, efficiency, and auditability in decentralized settings.
10 | We are currently conducting research into verifiable delay functions, multiparty
11 | computation, vector commitments, and zero-knowledge proofs etc. We have a culture of open
12 | source and no patents are put on any work that we produce.
13 |
14 | This repository holds the codebase to our website, [crypto.ethereum.org](crypto.ethereum.org)
15 |
16 | ## Stack
17 |
18 | The main stack used in the project includes:
19 |
20 | - [Next.js](https://nextjs.org/).
21 | - [TypeScript](https://www.typescriptlang.org/).
22 | - [ChakraUI](https://chakra-ui.com/) as component library.
23 | - [KaTeX](https://katex.org/) to render LaTeX math syntax.
24 |
25 | ## Local development
26 |
27 | The project is bootstrapped with [`create-next-app`](https://github.com/vercel/next.js/tree/canary/packages/create-next-app), with a custom scaffolding.
28 |
29 | ### Getting Started
30 |
31 | First, run the development server:
32 |
33 | ```bash
34 | npm run dev
35 | # or
36 | yarn dev
37 | ```
38 |
39 | Open [http://localhost:3000](http://localhost:3000) with your browser to see the result.
40 |
41 | You can start editing the page by modifying `pages/index.tsx`. The page auto-updates as you edit the file.
42 |
43 | ## Project Structure
44 |
45 | The following list describes the main elements of the project structure:
46 |
47 | - `public`: contains static assets like fonts and images.
48 | - `src`: contains the source code.
49 | - `components`: React components.
50 | - components with state are directly located inside `/components`.
51 | - `layout`: components used to contain and apply different layouts to different pages.
52 | - `UI`: stateless (functional) components.
53 | - `pages`: includes components that renders to pages and [NextJS API Routes](https://nextjs.org/docs/api-routes/introduction).
54 | - `posts`: markdown blog posts.
55 | - `styles`: css stylesheets.
56 | - `global.css`: global stylesheet.
57 | - `theme`: contains the [Chakra UI custom theme](https://chakra-ui.com/docs/styled-system/theming/customize-theme), organized in `foundations` and `components` for better scaling.
58 | - `utils`: utilitary stuff.
59 | - `constants.ts`: this is the _global_ constants file, containg URLs and lists of elements we use across the site.
60 | - `types.ts`: contains the custom defined TypeScript types and interfaces.
61 |
62 | ## Markdown & LaTex support on blog posts
63 |
64 | ### Markdown
65 |
66 | Support for [GitHub Flavored Markdown](https://github.github.com/gfm/), which is a superset of CommonMark and adds supports to other features like tables.
67 |
68 | ### LaTeX
69 |
70 | The site uses [KaTeX](https://katex.org) to render LaTeX/math and inside `/research` publications abstracts. LaTeX-rendering libs are not 100% compatible with LaTex yet, so please check the [support table](https://katex.org/docs/support_table.html) if you are having issues with some expression.
71 |
72 | ## How to add a new blog post
73 |
74 | The site supports both _internal_ and _external_ blog posts.
75 |
76 | - **Internal posts**: to add a new one, just create a new markdown (`.md`) file under `src/posts` (make sure first this directory exists, otherwise create it first, under `/src`). The name of the file should follow the [kebab case](https://www.theserverside.com/definition/Kebab-case) convention, as it will be used to generate the url to the post. You also have to add some [Front Matter](https://frontmatter.codes/docs/markdown) metadata, like the post `title`, `author(s)` and `date`, which are required.
77 |
78 | Metadata example:
79 |
80 | ```
81 | ---
82 | title: 'VDF Proving with SnarkPack'
83 | description 'Some awesome description for social media snippets, under 160 characters'
84 | author: 'Mary Maller'
85 | date: '2022-03-16'
86 | ---
87 | ```
88 |
89 | Post titles should be under 60 characters. [Learn more on title tags](https://moz.com/learn/seo/title-tag).
90 |
91 | Post descriptions should be under 160 characters. [Learn more on meta descriptions](https://moz.com/learn/seo/meta-description).
92 |
93 | - **External posts**: you can also link to an external post from the `/blog` page by appending an object with the required data (`title`, `date`, `link`) to the `externalLinks` list from the `src/pages/blog/index.tsx` file. See the example below:
94 |
95 | ```
96 | const externalLinks = [
97 | {
98 | title: 'Ethereum Merge: Run the majority client at your own peril!',
99 | date: '2022-03-24',
100 | link: 'https://dankradfeist.de/ethereum/2022/03/24/run-the-majority-client-at-your-own-peril.html'
101 | }
102 | ];
103 | ```
104 |
105 | ### How to add images to a local post
106 |
107 | Image files should be placed inside `/public/images/` and the path to the image will be referenced as `/images/${filename}`. For example, we can insert the EF logo in a post by using
108 |
109 | ```
110 | 
111 | ```
112 |
113 | Take into account that images are automatically centered, no need to add extra HTML.
114 |
115 | ### How to add footnotes to a local post
116 |
117 | Follow [this syntax](https://github.blog/changelog/2021-09-30-footnotes-now-supported-in-markdown-fields/).
118 |
119 | ## How to deploy changes succesfully
120 |
121 | - **Locally**: **Make sure the site builds** locally, otherwise the build will break and the new version of the site (e.g.: adding a new post) will not be generated. To be sure of this, run the `yarn build` command locally and check that you get no errors.
122 | - **On GitHub**: check that the `Deploy Preview` passes succesfully.
123 |
124 | ## Bounties pages
125 |
126 | The source files (`.md`) for the bounties pages are located at `/src/bounties-data-source`. If you need to update the content from a certain bounty, just modify the corresponding file. LaTeX/math is also supported here.
127 |
128 | For a better organization, images used in bounties pages are placed inside `/public/images/bounties/` and the path to the image have to be referenced as `/images/bounties/${filename}` (check `/src/bounties-data-source/rsa/assumptions.md` as example).
129 |
130 | ## How to add a new entry (Publication) on Research page
131 |
132 | The best way is to just follow the current `Publication` structure you can find in `/src/pages/research.tsx` and use any other existent entry as example. For publications that are not associated to a conference, just use the `year` prop, with a numeric value, like the example below:
133 |
134 | ```
135 |
141 |
142 |
143 | In this note we explain how to compute n KZG proofs for a polynomial of degree d in
144 | time superlinear of (n+d). Our technique is used in lookup arguments and vector
145 | commitment schemes.
146 |
147 |
148 |
149 | ```
150 |
151 | For publications associated to a conference, use the `conference` property instead, with a text value. Don't use `year` in this case, just include it as part of the `conference` value, as you can see in the example below:
152 |
153 | ```
154 |
161 |
162 |
163 | An aggregatable subvector commitment (aSVC) scheme is a vector commitment (VC)
164 | scheme that can aggregate multiple proofs into a single, small subvector proof. In
165 | this paper, we formalize aSVCs and give a construction from constant-sized
166 | polynomial commitments. Our construction is unique in that it has linear-sized
167 | public parameters, it can compute all constant-sized proofs in quasilinear time, it
168 | updates proofs in constant time and it can aggregate multiple proofs into a
169 | constant-sized subvector proof. Furthermore, our concrete proof sizes are small due
170 | to our use of pairing-friendly groups. We use our aSVC to obtain a payments-only
171 | stateless cryptocurrency with very low communication and computation overheads.
172 | Specifically, our constant-sized, aggregatable proofs reduce each block's proof
173 | overhead to a single group element, which is optimal. Furthermore, our subvector
174 | proofs speed up block verification and our smaller public parameters further reduce
175 | block size.
176 |
177 |
178 |
179 | ```
180 |
181 | ## How to add a new entry on Events page
182 |
183 | Follow the current `Event` structure you can find in `/src/pages/events.tsx` and use any other existent entry as example, like the example below:
184 |
185 | ```
186 |
191 | this workshop brings the most interesting and challenging open cryptographic questions
192 | that Ethereum, Filecoin and other blockchain systems face, to the attention of academia.
193 | We will cover a large spectrum of research topics, such as vector commitments, SNARKs,
194 | shuffles, authenticated data structures and more. We will start the day with an update
195 | on to the problems discussed at last year's workshop.
196 |
197 | ```
198 |
199 | Be sure to provide a value for `conference`, `workshop` and the correct `url`.
200 |
201 | ### Notes
202 |
203 | - Dates should follow the `yyyy-mm-dd` format (for both internal and external posts), like `date: '2022-03-16'`
204 | - Blog posts are sorted automatically by date, regardless the order of insertion.
205 | - Check the current sample posts on `src/posts`.
206 |
207 | ## Tutorials
208 |
209 | ### Learning NextJS
210 |
211 | To learn more about Next.js, take a look at the following resources:
212 |
213 | - [Next.js Documentation](https://nextjs.org/docs) - learn about Next.js features and API.
214 | - [Learn Next.js](https://nextjs.org/learn) - an interactive Next.js tutorial.
215 |
216 | ### Adding ChakraUI to a NextJS project
217 |
218 | [This](https://chakra-ui.com/guides/getting-started/nextjs-guide) is a very clear and step-by-step guide on it.
219 |
220 | ### Learning ChakraUI
221 |
222 | We recommend checking the [official docs](https://chakra-ui.com/docs/getting-started).
223 |
--------------------------------------------------------------------------------
/src/posts/nist-pqc-standard.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: 'NIST Post-Quantum-Cryptography Standardization Process and What it means for Ethereum'
3 | description: 'Explaining NIST PQC standardization and its implications on Ethereum'
4 | author: 'Zhenfei Zhang'
5 | date: '2022-07-11'
6 | ---
7 | ## 1. Introduction
8 | On July 5, 2022, the US National Institute of Standards and Technology (NIST) [announced](https://csrc.nist.gov/Projects/post-quantum-cryptography/selected-algorithms-2022) it will standardize four quantum-safe cryptography algorithms, including
9 | - [Kyber](https://pq-crystals.org/kyber/), a lattice based public-key encryption (PKE) and key-establishment algorithm,
10 | - [Dilithium](https://pq-crystals.org/dilithium/), a lattice based digital signature scheme,
11 | - [Falcon](https://falcon-sign.info/), another lattice based digital signature scheme,
12 | - [SPHINCS+](https://sphincs.org/), a hash based digital signature scheme.
13 |
14 | This _semi-concludes_ a half-decade long search for quantum-safe alternatives to existing public
15 | key infrastructure.
16 |
17 | ### 1.1 The so-called Quantum Apocalypse
18 |
19 | Today, our entire public key infrastructure is built on top of two mathematical problems: integer factorization and discrete logarithm problems. When you open an HTTPS link, it runs the TLS protocol under the hood, which negotiates a session key via the Diffie-Hellman key exchange protocol over a certain elliptic curve group. Another example, a step closer to our blockchain community, when you make an ETH transfer, you sign your transaction with your secret key, using the ECDSA digital signature scheme.
20 |
21 | Almost 40 years ago, Peter Shor discovered [an algorithm](https://en.wikipedia.org/wiki/Shor%27s_algorithm) that runs in linear time over a quantum computer, that finds the period of a given function. This result implies that both the integer factorization and the discrete logarithm problems are easy to solve with quantum computers, and hence, eliminate existing PKIs that we are using today.
22 |
23 | ## 2. NIST's search for quantum-safe candidates
24 |
25 | Although cryptographic research in quantum-safe cryptography is piloted back in 1960s, this space did not receive much attention till the winter of 2016, when NIST, the _de facto_ standard body to make cryptographic standards for almost the entire world, [publicly announced](https://csrc.nist.gov/Projects/post-quantum-cryptography/post-quantum-cryptography-standardization/Call-for-Proposals) its call for proposals for the quantum-safe cryptography solutions for both key establishment and digital signatures. A year later, NIST received [81 submissions](https://csrc.nist.gov/Projects/post-quantum-cryptography/post-quantum-cryptography-standardization/Round-1-Submissions) from academic thought leaders and industry pioneers, consists of the following 5 categories,
26 | - [lattice based cryptography](https://en.wikipedia.org/wiki/Lattice-based_cryptography), for both PKEs and signatures;
27 | - [code based cryptography](https://en.wikipedia.org/wiki/McEliece_cryptosystem), for PKEs;
28 | - [multivariate cryptography](https://en.wikipedia.org/wiki/Multivariate_cryptography), for signatures;
29 | - [hash based cryptography](https://en.wikipedia.org/wiki/Hash-based_cryptography), for signatures;
30 | - [supersingular isogeny cryptography](https://en.wikipedia.org/wiki/Supersingular_isogeny_key_exchange), for PKEs.
31 |
32 | The evaluation and cryptanalysis has began since then, with interesting modifications, breaks, fixes, and optimizations. In early 2019 and mid 2020, NIST announced their [2nd](https://csrc.nist.gov/Projects/post-quantum-cryptography/post-quantum-cryptography-standardization/round-2-submissions) and [3rd round](https://csrc.nist.gov/Projects/post-quantum-cryptography/post-quantum-cryptography-standardization/round-3-submissions) picks, reducing candidates from 81 to 26 then to 15. On July 5, 2022, NIST finally concluded the processes and chose to standardize [Kyber](https://pq-crystals.org/kyber/) for key establishment, [Dilithium](https://pq-crystals.org/dilithium/), [Falcon](https://falcon-sign.info/) and [SPHINCS+](https://sphincs.org/) for digital signatures.
33 |
34 | As NIST remarked in [their own report](https://nvlpubs.nist.gov/nistpubs/ir/2022/NIST.IR.8413.pdf), the security of both Kyber and Dilithium are well understood; they both offer great performance and suit a wide range of applications. Falcon is based on a stronger assumption, and is an alternative to Dilithium in the use cases where signature sizes are sensitive. SPHINCS+ is ideal for users who are conservative in their trust assumptions because SPHINCS+ only relies on hash functions.
35 |
36 | ## 3. NIST's next steps
37 |
38 | NIST plans to standardize both [Kyber](https://pq-crystals.org/kyber/) and [Dilithium](https://pq-crystals.org/dilithium/) first, followed by [Falcon](https://falcon-sign.info/) and [SPHINCS+](https://sphincs.org/). Each standard is expected to take roughly one year to complete. Changes in parameters are possible between the final standard and what is submitted to the 3rd round.
39 |
40 | In the meantime, note that the selected algorithms are build from lattices and hashes, while it is wise to standardize schemes from various of hardness assumptions, in case of breakthroughs in cryptanalytic research. NIST plans to take further actions in parallel with the standardization effort:
41 |
42 | - NIST will started a [4th round](https://csrc.nist.gov/Projects/post-quantum-cryptography/round-4-submissions), analyzing key establishment schemes from code ([BIKE](https://bikesuite.org/), [classic McEliece](https://classic.mceliece.org) and [HQC](http://pqc-hqc.org/)) and supersingular isogeny ([SIKE](http://sike.org/));
43 | - NIST will start a call for proposal for post-quantum signature schemes with a preference of neither lattice nor hash based construction.
44 |
45 | ## 4. What are the implications for Ethereum
46 |
47 | First, it is safe to assume that there does not exist a general purpose quantum computer that is capable of breaking ECC as of today. There are various estimations of when or whether a quantum computer will arrive. This is out of the scope of this blog. Here we assume that we have sufficient time to deploy counter measures.
48 |
49 | A natural question is __when do we need to be quantum ready__? In traditional world, it is advised to be quantum-safe as soon as possible due to the so-called _harvest-then-decrypt_ attacks, where an attacker may collect all the data sent over encrypted channels (for example, over TLS 1.3) and decrypt them when quantum computers become available. In the blockchain world, it becomes more severe if an application is required to store encrypted files on chain. However, for most use cases where cryptography is used for integrity or authenticity, it can wait a bit, since a future quantum attacker cannot come back in time and break the authenticity of today. This gives us some buffer time to study and deploy counter measures.
50 |
51 | So, it becomes important to know the building blocks that are potentially vulnerable to quantum computers; and their quantum-safe alternatives.
52 |
53 | ### 4.1 Digital Signatures
54 |
55 | Ethereum right now use ECDSA for authentication. As stated earlier, this is vulnerable to quantum attackers. We may switch to one of the above three quantum-safe signature schemes. We expect a (significant) decrease of performance due to the large size of signatures and public keys, listed below:
56 |
57 | | | ECDSA | Dilithium | Falcon | SPHINCS+ |
58 | | --- | ---: |---: |---: |---: |
59 | | public key | 32 B| 1.3 KB | 897 B | 48 B |
60 | | signature | 64 B| 2.4 KB | 666 B | 31 KB |
61 |
62 | As one can see, the smallest quantum-safe signature scheme requires some 666 bytes for a signature, increased by 10x from 64 bytes as in ECDSA. We do not consider this to be scalable. Active research has been done in this domain to aggregate signatures, either natively or through a quantum-safe snark (more on this later). We may also hope for a new multivariate based signature scheme (which tend to have similar signature size as ECC, albeit a gigantic public key).
63 |
64 | ### 4.2 Verkle Tree
65 |
66 | [Verkle tree](https://vitalik.eth.limo/general/2021/06/18/verkle.html) is build on top of the Pedersen commitment scheme and Inner Product Arguments (IPAs), which assumes discrete logarithm is hard. There exist quantum-safe alternatives to vector commitments build on top of lattices, but all of the candidates perform a few magnitudes worse than ECC based solutions. Our best candidate thus far is to move back to [Merkle Patricia tree](https://ethereum.stackexchange.com/questions/6415/eli5-how-does-a-merkle-patricia-trie-tree-work) that only relies on the hash assumption.
67 |
68 | ### 4.3 Zero Knowledge Proofs and their applications
69 |
70 | Zero knowledge proofs enable a large number of applications, ranging from [private transactions](https://z.cash/), [Verifiable Delay Function](https://eprint.iacr.org/2018/601.pdf), [single secret leader selection](https://ethresear.ch/t/whisk-a-practical-shuffle-based-ssle-protocol-for-ethereum/11763), [zk-rollups](https://ethresear.ch/t/zkopru-zk-optimistic-rollup-for-private-transactions/7717), [zkEVMs](https://ethresear.ch/t/the-intuition-and-summary-of-zkevm/10877) and more. There exist a various flavours of snark systems, split into two categories:
71 | - pairing or elliptic curve based, such as Groth16, vanilla PLONK, Marlin, BulletProof, etc.
72 | - hash based, such as [Stark](https://eprint.iacr.org/2018/046.pdf) and [Plonky2](https://github.com/mir-protocol/plonky2).
73 |
74 |
75 | The first category will be vulnerable to quantum computers.
76 |
77 | Note that there are also lattice based constructions. Despite breakthrough works in the last few years, their performance is still multiple magnitude worse than hash based solutions, as of today.
78 |
79 | Here we briefly mention two applications that will be essential for the proof of state consensus. For both applications, switching to a quantum-safe snark system such as [Stark](https://eprint.iacr.org/2018/046.pdf) or [Plonky2](https://github.com/mir-protocol/plonky2) results into solid solutions, although substantial work is needed to concretize the solutions.
80 |
81 |
82 |
83 |
84 | #### Verifiable Delay Function
85 |
86 | The beacon chain used in proof of stake will use a [snark based VDF](https://zkproof.org/2021/11/24/practical-snark-based-vdf/) for validator and committee selection. The [current design](https://github.com/protocol/vdf) is to build it from the [Nova](https://eprint.iacr.org/2021/370) proof system which requires the discrete logarithm assumption. Replacing Nova with Stark or Plonky2 may be sufficient. In addition, we may use verifiable random functions, for which there are hash based and lattice based candidates.
87 |
88 | #### Single secret leader selection
89 |
90 | [Single secret leader selection](https://ethresear.ch/t/whisk-a-practical-shuffle-based-ssle-protocol-for-ethereum/11763) is used in a proof of stake protocol for block proposer selection. This is still an active research area. The major candidate under examination as of right now is [whisk](https://ethresear.ch/t/whisk-a-practical-shuffle-based-ssle-protocol-for-ethereum/11763) which uses a shuffling approach, and uses [an adaptation of the Bayer-Groth protocol](https://crypto.ethereum.org/blog/groth-sahai-blogpost) to prove shuffling correctness. This protocol relies on pairing and discrete logarithm assumptions. Switching to quantum-safe ZKPs will like decrease performance.
91 |
92 | ## 5. Conclusion
93 |
94 | NIST's conclusion of its standardization process is our first step entering the quantum-safe world. It gives us semi-satisfactory replacements to the existing public key infrastructure, and implies NIST's strong confidence in hash and lattice based constructions, which will guide us to identify better and more scalable quantum-safe candidates for blockchain cryptography.
95 |
96 | ## Acknowledgement
97 |
98 | We would like to thank Mary Maller for suggesting this blog; Mary and Dankrad Feist for feedbacks on earlier versions of this blog.
99 |
--------------------------------------------------------------------------------
/public/images/posts/zkalc/extrapolation.svg:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/public/images/posts/zkalc/points.svg:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/public/images/posts/zkalc/interpolation.svg:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/src/posts/schnorr-threshold-blogpost.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: 'On Security Assumptions Underpinning Recent Schnorr Threshold Schemes'
3 | description: 'Describing differences in the security assumptions underpinning four Schnorr threshold signature schemes'
4 | author: 'Chelsea Komlo'
5 | date: '2022-08-05'
6 | ---
7 |
8 | In this post, we discuss differences in the security assumptions underpinning four Schnorr threshold signature schemes. In particular, we will review the two-round FROST signing protocol by Komlo and Goldberg[^1] that we refer to as FROST1, as well as an optimized variant FROST2 by Crites, Komlo, and Maller[^2]. We refer to these schemes in conjunction as FROST 1/2. We contrast these schemes with two three-round signing protocols: SimpleTSig, also by Crites, Komlo, and Maller[^2], as well as the three-round scheme by Lindell[^3], which we call Lindell22.
9 |
10 | **TLDR.**
11 | - **FROST1/2 requires One-More Discrete Logarithm (OMDL) and Programmable Random Oracle Model (PROM) assumptions.**
12 | - **SimpleTSig can be proven using only discrete logarithm (DLP) and PROM assumptions.**
13 | - **Lindell22 can be proven using only DLP+PROM. The protocol employs the Fischlin Transform[^7] in lieu of Schnorr signatures for proofs of knowledge.**
14 |
15 | These assumptions refer only to the security of threshold signing and not to the distributed key generation process. Thanks to Elizabeth Crites and Mary Maller for feedback on this post.
16 |
17 |
18 |
19 |
20 | Let's dig more into the details now.
21 |
22 | ## Part One: What are security models, and why do they matter?
23 |
24 | Security models are a useful tool to allow for proving cryptography schemes while also indicating potential assumptions which may or may not hold in practice. Each security model encodes certain assumptions such as an adversary's capabilities, the ability to perfectly simulate certain functionality, or the properties of the underlying mathematical assumptions. For example:
25 |
26 | - The **Standard Model.** The adversary is limited only by time and computational power.
27 | - The **Random Oracle Model (ROM).** Assumes outputs from a hash function are indistinguishable from random values.
28 | - The **Programmable Random Oracle Mode (PROM).** Allows the random oracle to be *programmed* by the execution environment (which runs the adversary and simulates responses to the adversary's oracle queries), with the restriction that the programming must be indistinguishable from all other truly random responses.
29 |
30 |
31 | ## Part Two: What are security assumptions, and why do they matter?
32 |
33 | A security assumption simply states the assumed hardness to an adversary of some particular computational problem. For example:
34 |
35 | - **Discrete Logarithm Problem (DLP).** Considered to be a "standard assumption" in cryptography. The problem is simple: given some challenge $Y$ that is in a group $G$ where $g$ is a generator of $G$, output the discrete logarithm relation $x$ between $Y$ and $g$, where $Y = g^x$.
36 |
37 | - **One More Discrete Logarithm Assumption (OMDL).** OMDL was first introduced by Bellare et al.[^8] and proven secure[^9], and can be as follows: given $\ell +1$ discrete logarithm challenges $X_0 = g^{\alpha_0}, X_1 = g^{\alpha_1}, \dots, X_\ell = g^{\alpha_\ell}$ and access to a discrete logarithm solution oracle $\mathcal{O}_\text{dlsol} (X_i) \rightarrow \alpha_i$ which can be queried up to $\ell$ times, the challenge is to output $\ell+1$ discrete logarithm solutions $\alpha_i$ for all $i \in \{ 0, \ldots, \ell\}$.
38 |
39 | While perhaps not considered a "standard" assumption in the same way that plain Computational Diffie-Hellman (CDH) or other problems that reduce to simply a single discrete logarithm assumption, OMDL underpins the security of many cryptographic schemes in theory and in practice, such as blind signatures.
40 |
41 | - **Knowledge of Exponent Assumption (KEA).** KEA is a white-box assumption, and is not falsifiable (thus a stronger assumption than what we have reviewed thus far). KEA says that for an adversary given a generator $g$ of a group $G$ and random element $X \in G$ such that $X = g^x$ for a random x, then if the adversary outputs a tuple $(A, B)$ such that $(A, B) = (g^a, X^a)$, then there exists an extractor that will output $a$. Informally, this means that the only way for the adversary to produce $(A, B)$ is by exponentiating each element in the tuple $(g, X)$ with the value $a$, thereby demonstrating the adversary's knowledge of $a$ (as opposed to choosing random elements in $G$).
42 |
43 |
44 | If you would like more context on how these assumptions are used to prove the security a cryptographic scheme, we give more context later in this post.
45 |
46 |
47 | ## Part Three: I thought we were supposed to be talking about Schnorr threshold schemes...
48 |
49 | Yes, we are! Finally getting to that.
50 |
51 | The reason we wanted to write this post is because there has been some debate about the security of two-round Schnorr threshold signature schemes (FROST1/2) and how they compare to less efficient three-round Schnorr threshold signature schemes. We'll review these schemes now, and clarify their resulting security next.
52 |
53 | **FROST1** was introduced by Komlo and Goldberg in 2020[^1]. In that work, they did two things. They introduced 1) a Distributed Key Generation (DKG) protocol that is a minor improvement upon the Pedersen DKG[^4] that we will call PedPop, as well as 2) a novel two-round threshold signing protocol that is secure against ROS attacks[^5] that we refer to as FROST1.
54 |
55 | **FROST2** is an optimized variant of FROST1 by introduced by Crites, Komlo, and Maller in 2021[^2], and reduces the number of exponentiations required for signing operations and verification from linear in the number of signers to constant.
56 |
57 | **SimpleTSig** is a three-round threshold signature scheme also introduced by Crites, Komlo, and Maller[^2], and is the threshold analogue of a three-round multisignature scheme called SimpleMuSig, presented in the same work[^2].
58 |
59 | **Lindell22** is a three-round threshold signing protocol introduced by Lindell in 2022[^3].
60 |
61 | We next show that for threshold signing, SimpleTSig and Lindell22 require the weakest assumptions of all of these schemes. FROST1/2 requires sightly stronger assumptions due to OMDL. However, as mentioned before, OMDL underpins many existing cryptosystems such as blind signatures.
62 |
63 | We split this analysis into two parts, that of 1) key generation, and 2) signing. The reason for this split is because the key generation mechanism can be viewed as independent to signing, so long as it produces the expected secret and public key material required for signing operations. Hence, the security assumptions required by a certain key generation are imposed on a scheme only if that particular key generation mechanism is used.
64 |
65 | ## Part Four: What assumptions underpin various key generation protocols that could be used by FROST1/2, SimpleTSig, or Lindell22?
66 |
67 | We now describe three different key generation mechanisms, all of which can be used in conjunction with any of the threshold signature schemes described in part four. Note that this list is not exhaustive.
68 |
69 | **[Standard Model] Trusted key generation.** In this setting, a trusted dealer can simply generate all key material and distribute it to each player via Shamir's secret sharing. Shamir's secret sharing is information-theoretically secure, but if Verifiable Secret Sharing (VSS) is used then the discrete logarithm assumption is required. VSS is generally helpful as it allows each participant to ensure that its share is consistent with other players. In each setting however, the dealer is trusted to perform key generation honestly and delete key material after. This variant is described more in the FROST CFRG draft in Appendix B.
70 |
71 | **[Standard Model] Pedersen.** The security of the Pedersen DKG when used as key generation for FROST1, FROST2, SimpleTSig, or Lindell22 relies on at least half of the participants being honest and the underlying signature scheme being secure.
72 |
73 | **[KEA+PROM] PedPop.** An efficient two-round DKG introduced by Komlo and Goldberg along with FROST1. PedPop is simply Pedersen DKG, with the additional step where each participant additionally publishes a Schnorr signature during the first round to prove knowledge of their secret key material. This extra step ensures that security holds given any threshold of honest parties. The security of PedPop when used as key generation for FROST2 and SimpleTSig was demonstrated[^2]. Note that KEA is required for the environment to extract the adversary's secret keys in the proof of security; alternatively, the Fischlin transform could be used in lieu of Schnorr as the proof of possession (and so would be only in the PROM). See further discussion in Part 9.
74 |
75 | **[Standard Model] Gennaro et al.** A three-round DKG that is secure in the standard model.
76 |
77 |
78 | ## Part Five: Which assumptions does two-round threshold signing protocol FROST1/2 rely on?
79 |
80 | FROST1/2 signing can be proven using:
81 |
82 | 1. One-More Discrete Logarithm Assumption (OMDL)
83 | 2. Programmable Random Oracle Model (PROM)
84 |
85 | By reducing to OMDL, the environment does not need to rely on extracting secret information from the adversary during its simulation of signing; the adversary is simply required to output a valid forgery. The use of two nonces and the randomizing factor in FROST allows for a true reduction to OMDL, unlike prior related multisignature schemes that had subtle flaws in their attempt to an OMDL reduction[^12],
86 |
87 | The proof for FROST1[^1] required a heuristic assumption and so could not prove these properties directly. The proof for FROST2[^2] provides a direct proof for FROST2 with PedPop as the key generation protocol. Proofs for FROST1 and FROST2 in a recent paper by Bellare, Tessaro, and Zhu[^11] employ an abstraction of key generation, and so demonstrate a direct reduction to PROM+OMDL.
88 |
89 |
90 | ## Part Six: Which assumptions does three-round SimpleTSig signing rely on?
91 |
92 | SimpleTSig signing can be proven using:
93 |
94 | 1. Discrete Logarithm Problem (DLP)
95 | 2. PROM
96 |
97 | The reason why SimpleTSig can be proven in ROM+DLP is because it relies upon a commit-open-sign protocol flow. Similarly to FROST 1/2, the environment does not need to extract secret values from the adversary during its simulation of signing; it simply requires that the adversary output a valid forgery at the end of the protocol.
98 |
99 |
100 | ## Part Seven: Which assumptions does three-round Lindell22 signing rely on?
101 |
102 | Lindell22 signing can be proven using:
103 |
104 | 1. DLP
105 | 2. PROM
106 |
107 | Unlike FROST1/2 and SimpleTSig, Lindell22 employs Schnorr signatures at intermediate steps throughout the signing protocol so that participants can prove possession of their nonces. The proof of security requires the environment to *extract* the adversary's nonces in order to demonstrate the reduction to DLP. While employing Schnorr signatures is sufficient to perfectly simulate an idealized zero-knowledge and commitment functionality, Schnorr signatures are *not* sufficient for the environment to perform this extraction step.
108 |
109 | Hence, Lindell22 must instead employ the Fischlin Transform in lieu of employing Schnorr signatures for the proof to go through in the PROM. Doing so has a non-zero impact on the performance and complexity of the protocol. See further discussion below on this topic.
110 |
111 |
112 | ## Part Eight. I'm confused about why Fischlin/KEA are even required.
113 |
114 | This is going to be dense, so hang on :)
115 |
116 | In summary, the Fischlin Transform requires a change to the actual protocol so that the prover **brute-forces** to find a weak hash function output where the least significant $b$ bits are zero.
117 | Why is this transform necessary? In summary, it ensures that in the proof of security, the environment is able to extract the necessary secret information from the adversary for the proof to go through.
118 |
119 | KEA simply defines an extractor that is assumed to be able to extract the correct values, given the constraints described above. Hence, this assumption is non-falsifiable and therefore considered a strong assumption.
120 |
121 | Notably, KEA and Fischlin are often interchangable for protocols that require online extraction for proofs of possession. Lindell22 employs the Fischlin Transform (and hence is in the PROM), but could easily instead employ KEA. The proof for PedPop[^2] assumes KEA, but alternatively, could use Fischlin.
122 |
123 | Forking+rewinding is how the unforgeability of Schnorr signatures is proven to reduce to the hardness of discrete log in the programmable ROM, when Fiat-Shamir is employed. We describe in more detail this reduction at the end of this post. However, while the proof of *unforgeability* for Schnorr signatures incurs acceptable tightness loss when forking+rewinding is used, the same is not true when Schnorr signatures are employed as proofs of possession (PoP) and the environment must *extract* secret information from the adversary, as is the case in PedPop and Lindell22. In the extractability case, the tightness loss incurred is instead *exponential*. Hence why in the PoP setting where extractability is required, either KEA or Fischlin must instead be employed.
124 |
125 | The Fischlin Transform provides an alternative to forking+rewinding, so that the environment can similarly extract secret information in an *online* manner, hence allowing for a tight(er) proof. Sounds too good to be true? It is, a bit. The Fischlin Transform requires the prover to brute-force finding a challenge where the least significant $b$ bits of the challenge must be all zeros. Hence, since the prover is unlikely to find this challenge immediately, it must make many challenge queries, therefore allowing the environment to extract secret values, similarly to the forking+rewinding case. However, unsurprisingly, doing so is expensive for reasonable security parameters.
126 |
127 |
128 | ## Part Nine: This post is really long. What should I take away from all of this?
129 |
130 | Let's summarize the key takeaways.
131 |
132 | 1. Two-round threshold signing protocols FROST 1 and FROST2 rely on the programmable Random Oracle Model (PROM) and One-More Discrete Logarithm (OMDL) assumptions.
133 | 2. Three-round threshold signing protocol SimpleTSig relies on PROM + DL.
134 | 3. Three-round threshold signing protocol Lindell22 relies on PROM + DL. The Fischlin Transform imposes some performance costs.
135 |
136 | Thanks, and happy threshold signing!
137 |
138 |
139 |
140 | [^1]: https://eprint.iacr.org/2020/852
141 |
142 | [^2]: https://eprint.iacr.org/2021/1375
143 |
144 | [^3]: https://eprint.iacr.org/2022/374
145 |
146 | [^4]: https://www.cs.cornell.edu/courses/cs754/2001fa/129.PDF
147 |
148 | [^5]: https://eprint.iacr.org/2020/945
149 |
150 | [^6]: https://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.134.6445&rep=rep1&type=pdf
151 |
152 | [^7]: https://www.iacr.org/archive/crypto2005/36210148/36210148.pdf
153 |
154 | [^8]: https://eprint.iacr.org/2001/002
155 |
156 | [^9]: https://eprint.iacr.org/2021/866
157 |
158 | [^10]: https://eprint.iacr.org/2004/008
159 |
160 | [^11]: https://eprint.iacr.org/2022/833
161 |
162 | [^12]: https://eprint.iacr.org/2018/417
163 |
164 |
165 |
166 |
167 | ## More on proving the security of cryptographic schemes
168 |
169 | Similar to demonstrating that a problem is in NP for complexity theory by reducing the problem to another known NP problem, in cryptography, we use reductions to hard mathematical problems to demonstrate that breaking a cryptography scheme is as hard as breaking some known-to-be-hard mathematical problem. For example, we might say that "an adversary wishing to compromise the security of a key-exchange protocol must solve for the discrete logarithm of a value, where the most efficient way to do it is by brute force, which takes X computational power over Y number of years." We model the adversary as a black-box randomized algorithm, which is run by the execution environment, outputting some value at the end, resulting in either a win or fail for the adversary. We can then provide a lower bound on how long it would take an adversary to eventually win (e.g., solve for an unknown discrete log), and determine parameters for the security.
170 |
171 | Showing this reduction can be done a number of ways, but there are two proof techniques that are considered to be best practices in cryptography.
172 |
173 | 1. Game-based proofs
174 | 2. Simulation-based proofs.
175 |
176 | **Game-based proofs** demonstrate that an adversary that wins in some game A can be used as a subroutine by another adversary to win in a different game B. Using our key-exchange example, we could show that an adversary that wins in a game against the key-exchange scheme could be used as a subroutine by another adversary to win in a game against the discrete logarithm problem. By a game, we simply mean some program that initializes an adversary, simulates oracle queries to it, and at the end determines if the adversary has successfully completed its attack.
177 |
178 | **Simulation-based proofs** instead rely on proving that some function in the "real world" is indistinguishable to that function in the "ideal world." For example, we could call an encryption secure if an adversary that learns the output of some ciphertext of a real message (real world) obtains no more information than an adversary that learns a ciphertext of garbage (ideal world). The adversary is allowed to interact with the environment, receiving outputs representing the real world and the ideal world. We say the scheme is secure if the adversary successfully distinguishes between the two with negligible probability.
179 |
180 |
181 |
182 | ## More on the reduction of Schnorr signatures to DLP
183 |
184 | Above, we talked about proving the security of Schnorr signatures as the result of Fiat-Shamir by reducing to the hardness of DLP. We give this reduction step-by-step now:
185 |
186 | 1. The environment is given $PK$ as the challenge without knowing the secret key, and must simulate signing to an adversary, whose goal is to compute a forgery.
187 | 2. When the adversary successfully produces a forgery (with some probability), the environment then forks its state, and then re-runs the adversary. The adversary again will produce a second forgery, outputting the following two forgeries to the environment.
188 |
189 | $$(R, c, z), \text{ and } (R, c', z')$$
190 |
191 | 4. In the above, $R$ is the commitment (and importantly, is the same in the two tuples).
192 | 5. $c=H(R, m)$ is the challenge the adversary obtains from the challenge oracle (the random oracle H that the environment simulates) *before* the adversary is forked, and $c' = H(R, m)$ is the challenge *after* the adverary is forked, where importantly $c\neq c'$.
193 | 6. $z$ is the adversary's forgery with respect to $(R, c)$ *before* the fork, and $z'$ is the adversary's forgery with respect to $(R, c')$ *after* the fork.
194 | 7. The environment can then extract $sk$ simply by deriving $(z-z')/(c-c')$.
195 |
--------------------------------------------------------------------------------