├── .gitmodules ├── blocky ├── .gitignore ├── .prettierrc ├── README.md ├── package.json ├── src │ ├── block.ts │ ├── block_grid.test.ts │ ├── block_grid.ts │ ├── index.html │ └── index.tsx ├── tsconfig.json └── yarn.lock ├── cryptography-engineer ├── .clang-format ├── .gitignore ├── CMakeLists.txt ├── README.md ├── bootstrap.sh ├── cmake │ ├── arch.cmake │ ├── barretenberg.cmake │ ├── benchmark.cmake │ ├── build.cmake │ ├── gtest.cmake │ ├── module.cmake │ ├── threading.cmake │ ├── toolchain.cmake │ └── toolchains │ │ ├── arm-apple-clang.cmake │ │ ├── arm64-linux-gcc.cmake │ │ ├── i386-linux-clang.cmake │ │ ├── wasm-linux-clang.cmake │ │ ├── x86_64-apple-clang.cmake │ │ ├── x86_64-linux-clang.cmake │ │ ├── x86_64-linux-gcc.cmake │ │ └── x86_64-linux-gcc10.cmake ├── format.sh └── src │ ├── CMakeLists.txt │ ├── ec_fft │ ├── CMakeLists.txt │ ├── README.md │ ├── ec_fft.cpp │ ├── ec_fft.hpp │ └── ec_fft.test.cpp │ └── indexed_merkle_tree │ ├── CMakeLists.txt │ ├── README.md │ ├── indexed_merkle_tree.cpp │ ├── indexed_merkle_tree.hpp │ ├── indexed_merkle_tree.test.cpp │ └── leaf.hpp ├── eng-sessions ├── merkle-tree-cpp │ ├── .clang-format │ ├── .clangd │ ├── .gitignore │ ├── README.md │ ├── run.sh │ └── src │ │ ├── hash_path.hpp │ │ ├── main.cpp │ │ ├── merkle_tree.hpp │ │ ├── mock_db.hpp │ │ ├── sha256.cpp │ │ ├── sha256.hpp │ │ └── sha256_hasher.hpp └── merkle-tree │ ├── .gitignore │ ├── .prettierrc │ ├── README.md │ ├── package.json │ ├── src │ ├── hash_path.ts │ ├── index.ts │ ├── merkle_tree.test.ts │ ├── merkle_tree.ts │ └── sha256_hasher.ts │ ├── tsconfig.json │ └── yarn.lock ├── senior-applied-cryptography-engineer └── README.md ├── senior-software-engineer └── solidity ├── .gitattributes ├── .gitignore ├── .solhint.json ├── README.md ├── contracts ├── DefiBridgeProxy.sol ├── ERC20Mintable.sol ├── Types.sol ├── UniswapBridge.sol └── interfaces │ └── IDefiBridge.sol ├── ensure_versions.js ├── hardhat.config.ts ├── package.json ├── src ├── contracts │ ├── defi_bridge_proxy.ts │ └── uniswap_bridge.test.ts └── deploy │ ├── deploy_erc20.ts │ └── deploy_uniswap.ts ├── tsconfig.json └── yarn.lock /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "cryptography-engineer/bb"] 2 | path = cryptography-engineer/barretenberg 3 | url = https://github.com/AztecProtocol/barretenberg 4 | branch = sb/defi-bridge-project 5 | -------------------------------------------------------------------------------- /blocky/.gitignore: -------------------------------------------------------------------------------- 1 | node_modules 2 | dist 3 | .parcel-cache -------------------------------------------------------------------------------- /blocky/.prettierrc: -------------------------------------------------------------------------------- 1 | { 2 | "singleQuote": true, 3 | "trailingComma": "all", 4 | "printWidth": 120, 5 | "arrowParens": "avoid" 6 | } 7 | -------------------------------------------------------------------------------- /blocky/README.md: -------------------------------------------------------------------------------- 1 | # Aztec Blocky Test 2 | 3 | **WARNING: Do not fork this repository or make a public repository containing your solution. Either copy it to a private repository or submit your solution via other means.** 4 | 5 | Links to solutions may be sent to charlie@aztecprotocol.com. 6 | 7 | ## To get started 8 | 9 | ```sh 10 | yarn install 11 | yarn start 12 | ``` 13 | 14 | Navigate to `http://localhost:8080`. 15 | 16 | `yarn test` or `yarn test --watch` to run the unit tests on the terminal. 17 | 18 | ## Time 19 | 20 | It's expected you take no more than half a day. If you complete the algorithm sooner, feel free to be creative to improve the game further. 21 | 22 | ## Task 23 | 24 | Implement `clicked` to remove all blocks of the same colour that are connected to the target element, then allow the blocks above the removed to "fall down". 25 | 26 | E.g. given: 27 | 28 | ![Initial state](https://trottski.s3.amazonaws.com/snaps/initial.jpg) 29 | 30 | After clicking one of the bottom right blue boxes it should then look like this: 31 | 32 | ![state 2](https://trottski.s3.amazonaws.com/snaps/stage2.jpg) 33 | -------------------------------------------------------------------------------- /blocky/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "front-end-dev-test", 3 | "version": "1.0.0", 4 | "license": "MIT", 5 | "description": "Aztec blocky test.", 6 | "main": "index.js", 7 | "scripts": { 8 | "build": "parcel build ./src/index.html", 9 | "start": "parcel --port 8080 ./src/index.html", 10 | "test": "jest" 11 | }, 12 | "jest": { 13 | "transform": { 14 | "^.+\\.ts$": "ts-jest" 15 | }, 16 | "testRegex": ".*\\.test\\.ts$", 17 | "rootDir": "./src" 18 | }, 19 | "devDependencies": { 20 | "@types/jest": "^26.0.23", 21 | "@types/react-dom": "^17.0.8", 22 | "@types/styled-components": "^5.1.10", 23 | "jest": "^27.0.6", 24 | "parcel": "^2.0.0-beta.2", 25 | "prettier": "^2.3.2", 26 | "react": "^17.0.2", 27 | "react-dom": "^17.0.2", 28 | "styled-components": "^5.3.0", 29 | "ts-jest": "^27.0.3", 30 | "typescript": "^4.3.4" 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /blocky/src/block.ts: -------------------------------------------------------------------------------- 1 | export enum Colour { 2 | RED, 3 | GREEN, 4 | BLUE, 5 | ORANGE, 6 | } 7 | 8 | export class Block { 9 | public colour: Colour; 10 | 11 | constructor() { 12 | this.colour = Colour[Colour[Math.floor(Math.random() * (Colour.ORANGE + 1))] as keyof typeof Colour]; 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /blocky/src/block_grid.test.ts: -------------------------------------------------------------------------------- 1 | import { Colour } from './block'; 2 | import { BlockGrid } from './block_grid'; 3 | 4 | describe('BlockGrid', () => { 5 | it('should create blocks with one of the valid colours', () => { 6 | const blockGrid = new BlockGrid(10, 10); 7 | 8 | blockGrid.grid.forEach(col => { 9 | col.forEach(block => { 10 | expect(block).not.toBeNull(); 11 | expect(block!.colour).toBeLessThanOrEqual(Colour.ORANGE); 12 | }); 13 | }); 14 | }); 15 | 16 | it('should perform correct algorithm when clicked', () => { 17 | // Implement me. 18 | }); 19 | }); 20 | -------------------------------------------------------------------------------- /blocky/src/block_grid.ts: -------------------------------------------------------------------------------- 1 | import { Block } from './block'; 2 | 3 | export class BlockGrid { 4 | public grid: Block[][] = []; 5 | 6 | constructor(public numCols: number, public numRows: number) { 7 | for (let x = 0; x < numCols; x++) { 8 | const col = []; 9 | for (let y = 0; y < numRows; y++) { 10 | col.push(new Block()); 11 | } 12 | this.grid.push(col); 13 | } 14 | } 15 | 16 | clicked(x: number, y: number) { 17 | console.log(`(${x}, ${y}): Implement me...`); 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /blocky/src/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | Aztec Blocky Test 5 | 6 | 7 |
8 | 9 | 10 | 11 | -------------------------------------------------------------------------------- /blocky/src/index.tsx: -------------------------------------------------------------------------------- 1 | import React from 'react'; 2 | import ReactDOM from 'react-dom'; 3 | import { Colour } from './block'; 4 | import { BlockGrid } from './block_grid'; 5 | import styled, { createGlobalStyle } from 'styled-components'; 6 | 7 | const GlobalStyle = createGlobalStyle` 8 | html, body, #root { 9 | background: grey; 10 | margin: 0; 11 | padding: 0; 12 | width: 100%; 13 | height: 100%; 14 | } 15 | `; 16 | 17 | const StyledGrid = styled.div` 18 | width: 100%; 19 | height: 100%; 20 | background: grey; 21 | `; 22 | 23 | const StyledColumn = styled.div` 24 | float: left; 25 | background: grey; 26 | width: 10%; 27 | height: 100%; 28 | `; 29 | 30 | const StyledBlock = styled.div` 31 | width: 100%; 32 | height: 10%; 33 | margin: 0; 34 | padding: 0; 35 | `; 36 | 37 | function Blocky({ grid }: { grid: BlockGrid }) { 38 | return ( 39 | 40 | {grid.grid.map((col, i) => ( 41 | 42 | {col.map((block, j) => ( 43 | grid.clicked(i, j)} 47 | > 48 | ))} 49 | 50 | ))} 51 | 52 | ); 53 | } 54 | 55 | async function main() { 56 | const blockGrid = new BlockGrid(10, 10); 57 | console.log(blockGrid); 58 | ReactDOM.render( 59 | <> 60 | 61 | 62 | , 63 | document.getElementById('root'), 64 | ); 65 | } 66 | 67 | main().catch(console.error); 68 | -------------------------------------------------------------------------------- /blocky/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "target": "es2020", 4 | "module": "es6", 5 | "lib": ["es2020", "dom"], 6 | "moduleResolution": "node", 7 | "jsx": "react", 8 | "noEmit": true, 9 | "strict": true, 10 | "esModuleInterop": true 11 | } 12 | } 13 | -------------------------------------------------------------------------------- /cryptography-engineer/.clang-format: -------------------------------------------------------------------------------- 1 | PointerAlignment: Left 2 | ColumnLimit: 120 3 | BreakBeforeBraces: Allman 4 | IndentWidth: 4 5 | BinPackArguments: false 6 | BinPackParameters: false 7 | AllowShortFunctionsOnASingleLine: None 8 | Cpp11BracedListStyle: false 9 | AlwaysBreakAfterReturnType: None 10 | AlwaysBreakAfterDefinitionReturnType: None 11 | PenaltyReturnTypeOnItsOwnLine: 1000000 12 | BreakConstructorInitializers: BeforeComma 13 | BreakBeforeBraces: Custom 14 | BraceWrapping: 15 | AfterClass: false 16 | AfterEnum: false 17 | AfterFunction: true 18 | AfterNamespace: false 19 | AfterStruct: false 20 | AfterUnion: false 21 | AfterExternBlock: false 22 | BeforeCatch: false 23 | BeforeElse: false 24 | SplitEmptyFunction: false 25 | SplitEmptyRecord: false 26 | SplitEmptyNamespace: false 27 | AllowShortFunctionsOnASingleLine : Inline 28 | SortIncludes: false -------------------------------------------------------------------------------- /cryptography-engineer/.gitignore: -------------------------------------------------------------------------------- 1 | .cache/ 2 | build*/ 3 | src/wasi-sdk-* 4 | src/aztec/proof_system/proving_key/fixtures 5 | src/aztec/rollup/proofs/*/fixtures 6 | srs_db/ignition/transcript* 7 | srs_db/lagrange 8 | srs_db/coset_lagrange 9 | srs_db/modified_lagrange 10 | # to be unignored when we agree on clang-tidy rules 11 | .clangd -------------------------------------------------------------------------------- /cryptography-engineer/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | # aztec-connect-cpp 2 | # copyright 2019 Spilsbury Holdings Ltd 3 | 4 | cmake_minimum_required(VERSION 3.16) 5 | 6 | # Get the full path to barretenberg. This is helpful because the required 7 | # relative path changes based on where in cmake the path is used. 8 | # `BBERG_DIR` must be set before toolchain.cmake is imported because 9 | # `BBERG_DIR` is used in toolchain.cmake to determine `WASI_SDK_PREFIX` 10 | get_filename_component(BBERG_DIR ../barretenberg/cpp 11 | REALPATH BASE_DIR "${CMAKE_BINARY_DIR}") 12 | 13 | include(cmake/toolchain.cmake) 14 | 15 | set(PROJECT_VERSION 0.1.0) 16 | project(AztecInterviewTests 17 | DESCRIPTION "Project containing C++ technical tests for the position of Cryptography Engineer" 18 | LANGUAGES CXX C) 19 | 20 | # include barretenberg as ExternalProject 21 | include(cmake/barretenberg.cmake) 22 | 23 | option(DISABLE_ASM "Disable custom assembly" OFF) 24 | option(DISABLE_ADX "Disable ADX assembly variant" OFF) 25 | option(MULTITHREADING "Enable multi-threading" ON) 26 | option(TESTING "Build tests" ON) 27 | 28 | if(ARM) 29 | message(STATUS "Compiling for ARM.") 30 | set(DISABLE_ASM ON) 31 | set(DISABLE_ADX ON) 32 | set(RUN_HAVE_STD_REGEX 0) 33 | set(RUN_HAVE_POSIX_REGEX 0) 34 | endif() 35 | 36 | if(WASM) 37 | message(STATUS "Compiling for WebAssembly.") 38 | set(DISABLE_ASM ON) 39 | set(MULTITHREADING OFF) 40 | endif() 41 | 42 | set(CMAKE_C_STANDARD 11) 43 | set(CMAKE_C_EXTENSIONS ON) 44 | set(CMAKE_CXX_STANDARD 20) 45 | set(CMAKE_CXX_STANDARD_REQUIRED TRUE) 46 | set(CMAKE_CXX_EXTENSIONS ON) 47 | 48 | include(cmake/build.cmake) 49 | include(cmake/arch.cmake) 50 | include(cmake/threading.cmake) 51 | include(cmake/gtest.cmake) 52 | include(cmake/module.cmake) 53 | 54 | add_subdirectory(src) -------------------------------------------------------------------------------- /cryptography-engineer/README.md: -------------------------------------------------------------------------------- 1 | ## Cryptography Take-Home Tests 2 | 3 | Welcome to Aztec's cryptography tests as a part of your interview process. This module contains some coding tests designed to be attempted by candidates for either of the roles listed below. 4 | 5 | 1. Cryptography Engineer 6 | 2. Applied Cryptography Engineer 7 | 3. Applied Cryptographer 8 | 9 | This module contains the following exercises: 10 | 11 | 1. [Indexed Merkle Tree](./src/indexed_merkle_tree/README.md) 12 | 2. [EC-FFT](./src/ec_fft/README.md) 13 | 14 | Since these exercises use our in-house cryptography library barretenberg in the backend, you need to run the following commands to get started after you have cloned this repository. Please _do not_ fork the original repository, instead clone it and push it to your private repository. 15 | 16 | ```console 17 | $ cd cryptography-engineer 18 | $ ./bootstrap.sh # this clones the barretenberg submodule and builds it 19 | $ cd build 20 | $ make _tests # this compiles the given test module 21 | $ ./bin/_tests # this runs the tests in that module 22 | ``` 23 | 24 | Here, `module_name` must be replaced with `indexed_merkle_tree` for the first exercise. In case you face any issues with setting up this framework, feel free to reach out to [suyash@aztecprotocol.com](mailto:suyash@aztecprotocol.com) or [cody@aztecprotocol.com](mailto:cody@aztecprotocol.com). 25 | -------------------------------------------------------------------------------- /cryptography-engineer/bootstrap.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | # Update the submodule 5 | git submodule init 6 | git submodule update --init --recursive 7 | 8 | # Clean. 9 | rm -rf ./build 10 | rm -rf ./build-wasm 11 | rm -rf ./src/wasi-sdk-* 12 | 13 | # Clean barretenberg. 14 | rm -rf ../barretenberg/cpp/build 15 | rm -rf ../barretenberg/cpp/build-wasm 16 | rm -rf ../barretenberg/cpp/src/wasi-sdk-* 17 | 18 | # Install formatting git hook. 19 | HOOKS_DIR=$(git rev-parse --git-path hooks) 20 | echo "cd \$(git rev-parse --show-toplevel)/cryptography-engineer && ./format.sh staged" > $HOOKS_DIR/pre-commit 21 | chmod +x $HOOKS_DIR/pre-commit 22 | 23 | # Determine system. 24 | if [[ "$OSTYPE" == "darwin"* ]]; then 25 | OS=macos 26 | elif [[ "$OSTYPE" == "linux-gnu" ]]; then 27 | OS=linux 28 | else 29 | echo "Unknown OS: $OSTYPE" 30 | exit 1 31 | fi 32 | 33 | # Download ignition transcripts. 34 | (cd barretenberg/cpp/srs_db && ./download_ignition.sh 3) 35 | 36 | # Pick native toolchain file. 37 | if [ "$OS" == "macos" ]; then 38 | export BREW_PREFIX=$(brew --prefix) 39 | # Ensure we have toolchain. 40 | if [ ! "$?" -eq 0 ] || [ ! -f "$BREW_PREFIX/opt/llvm/bin/clang++" ]; then 41 | echo "Default clang not sufficient. Install homebrew, and then: brew install llvm libomp clang-format" 42 | exit 1 43 | fi 44 | ARCH=$(uname -m) 45 | if [ "$ARCH" = "arm64" ]; then 46 | TOOLCHAIN=arm-apple-clang 47 | else 48 | TOOLCHAIN=x86_64-apple-clang 49 | fi 50 | else 51 | TOOLCHAIN=x86_64-linux-clang 52 | fi 53 | 54 | # Build native. 55 | mkdir -p build && cd build 56 | cmake -DCMAKE_BUILD_TYPE=RelWithAssert -DTOOLCHAIN=$TOOLCHAIN .. 57 | cmake --build . --parallel ${@/#/--target } 58 | cd .. -------------------------------------------------------------------------------- /cryptography-engineer/cmake/arch.cmake: -------------------------------------------------------------------------------- 1 | if(WASM) 2 | # Disable SLP vectorization on WASM as it's brokenly slow. To give an idea, with this off it still takes 3 | # 2m:18s to compile scalar_multiplication.cpp, and with it on I estimate it's 50-100 times longer. I never 4 | # had the patience to wait it out... 5 | add_compile_options(-fno-exceptions -fno-slp-vectorize) 6 | endif() 7 | 8 | if(NOT WASM AND NOT APPLE) 9 | add_compile_options(-march=skylake-avx512) 10 | endif() 11 | -------------------------------------------------------------------------------- /cryptography-engineer/cmake/barretenberg.cmake: -------------------------------------------------------------------------------- 1 | # Here we Set up barretenberg as an ExternalProject 2 | # - Point to its source and build directories 3 | # - Construct its `configure` and `build` command lines 4 | # - include its `src/` in `search path for includes 5 | # - Depend on specific libraries from barretenberg 6 | # 7 | # If barretenberg's cmake files change, its configure and build are triggered 8 | # If barretenberg's source files change, build is triggered 9 | 10 | include(ExternalProject) 11 | 12 | if (WASM) 13 | set(BBERG_BUILD_DIR ${BBERG_DIR}/build-wasm) 14 | else() 15 | set(BBERG_BUILD_DIR ${BBERG_DIR}/build) 16 | endif() 17 | 18 | # If the OpenMP library is included via this option, propogate to ExternalProject configure 19 | if (OpenMP_omp_LIBRARY) 20 | set(LIB_OMP_OPTION -DOpenMP_omp_LIBRARY=${OpenMP_omp_LIBRARY}) 21 | endif() 22 | 23 | # Make sure barretenberg doesn't set its own WASI_SDK_PREFIX 24 | if (WASI_SDK_PREFIX) 25 | set(WASI_SDK_OPTION -DWASI_SDK_PREFIX=${WASI_SDK_PREFIX}) 26 | endif() 27 | 28 | # cmake configure cli args for ExternalProject 29 | set(BBERG_CONFIGURE_ARGS -DTOOLCHAIN=${TOOLCHAIN} ${WASI_SDK_OPTION} ${LIB_OMP_OPTION} -DCI=${CI}) 30 | 31 | # Naming: Project: Barretenberg, Libraries: barretenberg, env 32 | # Need BUILD_ALWAYS to ensure that barretenberg is automatically reconfigured when its CMake files change 33 | # "Enabling this option forces the build step to always be run. This can be the easiest way to robustly 34 | # ensure that the external project's own build dependencies are evaluated rather than relying on the 35 | # default success timestamp-based method." - https://cmake.org/cmake/help/latest/module/ExternalProject.html 36 | ExternalProject_Add(Barretenberg 37 | SOURCE_DIR ${BBERG_DIR} 38 | BINARY_DIR ${BBERG_BUILD_DIR} # build directory 39 | BUILD_ALWAYS TRUE 40 | UPDATE_COMMAND "" 41 | INSTALL_COMMAND "" 42 | CONFIGURE_COMMAND ${CMAKE_COMMAND} ${BBERG_CONFIGURE_ARGS} .. 43 | BUILD_COMMAND ${CMAKE_COMMAND} --build . --parallel --target barretenberg --target env) 44 | 45 | include_directories(${BBERG_DIR}/src/aztec) 46 | 47 | # Add the imported barretenberg and env libraries, point to their library archives, 48 | # and add a dependency of these libraries on the imported project 49 | add_library(barretenberg STATIC IMPORTED) 50 | set_target_properties(barretenberg PROPERTIES IMPORTED_LOCATION ${BBERG_BUILD_DIR}/lib/libbarretenberg.a) 51 | add_dependencies(barretenberg Barretenberg) 52 | 53 | # env is needed for logstr in native executables and wasm tests 54 | # It is otherwise omitted from wasm to prevent use of C++ logstr instead of imported/Typescript 55 | add_library(env STATIC IMPORTED) 56 | set_target_properties(env PROPERTIES IMPORTED_LOCATION ${BBERG_BUILD_DIR}/lib/libenv.a) 57 | add_dependencies(env Barretenberg) -------------------------------------------------------------------------------- /cryptography-engineer/cmake/benchmark.cmake: -------------------------------------------------------------------------------- 1 | if(NOT TESTING) 2 | set(BENCHMARKS OFF) 3 | endif() 4 | 5 | if(BENCHMARKS) 6 | include(FetchContent) 7 | 8 | FetchContent_Declare( 9 | benchmark 10 | GIT_REPOSITORY https://github.com/google/benchmark 11 | GIT_TAG v1.6.1 12 | ) 13 | 14 | FetchContent_GetProperties(benchmark) 15 | if(NOT benchmark_POPULATED) 16 | fetchcontent_populate(benchmark) 17 | set(BENCHMARK_ENABLE_TESTING OFF CACHE BOOL "Benchmark tests off") 18 | add_subdirectory(${benchmark_SOURCE_DIR} ${benchmark_BINARY_DIR} EXCLUDE_FROM_ALL) 19 | endif() 20 | endif() 21 | -------------------------------------------------------------------------------- /cryptography-engineer/cmake/build.cmake: -------------------------------------------------------------------------------- 1 | if(NOT CMAKE_BUILD_TYPE AND NOT CMAKE_CONFIGURATION_TYPES) 2 | set(CMAKE_BUILD_TYPE "Release" CACHE STRING "Choose the type of build." FORCE) 3 | endif() 4 | message(STATUS "Build type: ${CMAKE_BUILD_TYPE}") 5 | 6 | if(CMAKE_BUILD_TYPE STREQUAL "RelWithAssert") 7 | add_compile_options(-O3) 8 | remove_definitions(-DNDEBUG) 9 | endif() -------------------------------------------------------------------------------- /cryptography-engineer/cmake/gtest.cmake: -------------------------------------------------------------------------------- 1 | if(TESTING) 2 | include(GoogleTest) 3 | include(FetchContent) 4 | 5 | FetchContent_Declare( 6 | googletest 7 | GIT_REPOSITORY https://github.com/google/googletest.git 8 | GIT_TAG release-1.10.0 9 | ) 10 | 11 | FetchContent_GetProperties(googletest) 12 | if(NOT googletest_POPULATED) 13 | FetchContent_Populate(googletest) 14 | add_subdirectory(${googletest_SOURCE_DIR} ${googletest_BINARY_DIR} EXCLUDE_FROM_ALL) 15 | endif() 16 | 17 | if(WASM) 18 | target_compile_definitions( 19 | gtest 20 | PRIVATE 21 | -DGTEST_HAS_EXCEPTIONS=0 22 | -DGTEST_HAS_STREAM_REDIRECTION=0) 23 | endif() 24 | 25 | mark_as_advanced( 26 | BUILD_GMOCK BUILD_GTEST BUILD_SHARED_LIBS 27 | gmock_build_tests gtest_build_samples gtest_build_tests 28 | gtest_disable_pthreads gtest_force_shared_crt gtest_hide_internal_symbols 29 | ) 30 | 31 | enable_testing() 32 | endif() 33 | -------------------------------------------------------------------------------- /cryptography-engineer/cmake/module.cmake: -------------------------------------------------------------------------------- 1 | # copyright 2019 Spilsbury Holdings 2 | # 3 | # usage: barretenberg_module(module_name [dependencies ...]) 4 | # 5 | # Scans for all .cpp files in a subdirectory, and creates a library named . 6 | # Scans for all .test.cpp files in a subdirectory, and creates a gtest binary named _tests. 7 | # Scans for all .bench.cpp files in a subdirectory, and creates a benchmark binary named _bench. 8 | # 9 | # We have to get a bit complicated here, due to the fact CMake will not parallelise the building of object files 10 | # between dependent targets, due to the potential of post-build code generation steps etc. 11 | # To work around this, we create "object libraries" containing the object files. 12 | # Then we declare executables/libraries that are to be built from these object files. 13 | # These assets will only be linked as their dependencies complete, but we can parallelise the compilation at least. 14 | 15 | function(barretenberg_module MODULE_NAME) 16 | file(GLOB_RECURSE SOURCE_FILES *.cpp) 17 | file(GLOB_RECURSE HEADER_FILES *.hpp) 18 | list(FILTER SOURCE_FILES EXCLUDE REGEX ".*\.(fuzzer|test|bench).cpp$") 19 | 20 | if(SOURCE_FILES) 21 | add_library( 22 | ${MODULE_NAME}_objects 23 | OBJECT 24 | ${SOURCE_FILES} 25 | ) 26 | 27 | add_library( 28 | ${MODULE_NAME} 29 | STATIC 30 | $ 31 | ) 32 | 33 | target_link_libraries( 34 | ${MODULE_NAME} 35 | PUBLIC 36 | ${ARGN} 37 | barretenberg 38 | ${TBB_IMPORTED_TARGETS} 39 | ) 40 | 41 | set(MODULE_LINK_NAME ${MODULE_NAME}) 42 | endif() 43 | 44 | file(GLOB_RECURSE TEST_SOURCE_FILES *.test.cpp) 45 | if(TESTING AND TEST_SOURCE_FILES) 46 | add_library( 47 | ${MODULE_NAME}_test_objects 48 | OBJECT 49 | ${TEST_SOURCE_FILES} 50 | ) 51 | 52 | target_link_libraries( 53 | ${MODULE_NAME}_test_objects 54 | PRIVATE 55 | gtest 56 | barretenberg 57 | env 58 | ${TBB_IMPORTED_TARGETS} 59 | ) 60 | 61 | add_executable( 62 | ${MODULE_NAME}_tests 63 | $ 64 | ) 65 | 66 | if(WASM) 67 | target_link_options( 68 | ${MODULE_NAME}_tests 69 | PRIVATE 70 | -Wl,-z,stack-size=8388608 71 | ) 72 | endif() 73 | 74 | if(CI) 75 | target_compile_definitions( 76 | ${MODULE_NAME}_test_objects 77 | PRIVATE 78 | -DCI=1 79 | ) 80 | endif() 81 | 82 | if(DISABLE_HEAVY_TESTS) 83 | target_compile_definitions( 84 | ${MODULE_NAME}_test_objects 85 | PRIVATE 86 | -DDISABLE_HEAVY_TESTS=1 87 | ) 88 | endif() 89 | 90 | target_link_libraries( 91 | ${MODULE_NAME}_tests 92 | PRIVATE 93 | ${MODULE_LINK_NAME} 94 | ${ARGN} 95 | gtest 96 | gtest_main 97 | barretenberg 98 | env 99 | ${TBB_IMPORTED_TARGETS} 100 | ) 101 | 102 | if(NOT WASM AND NOT CI) 103 | # Currently haven't found a way to easily wrap the calls in wasmtime when run from ctest. 104 | gtest_discover_tests(${MODULE_NAME}_tests WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) 105 | endif() 106 | 107 | add_custom_target( 108 | run_${MODULE_NAME}_tests 109 | COMMAND ${MODULE_NAME}_tests 110 | WORKING_DIRECTORY ${CMAKE_BINARY_DIR} 111 | ) 112 | endif() 113 | 114 | file(GLOB_RECURSE FUZZERS_SOURCE_FILES *.fuzzer.cpp) 115 | if(FUZZING AND FUZZERS_SOURCE_FILES) 116 | foreach(FUZZER_SOURCE_FILE ${FUZZERS_SOURCE_FILES}) 117 | get_filename_component(FUZZER_NAME_STEM ${FUZZER_SOURCE_FILE} NAME_WE) 118 | add_executable( 119 | ${MODULE_NAME}_${FUZZER_NAME_STEM}_fuzzer 120 | ${FUZZER_SOURCE_FILE} 121 | ) 122 | 123 | target_link_options( 124 | ${MODULE_NAME}_${FUZZER_NAME_STEM}_fuzzer 125 | PRIVATE 126 | "-fsanitize=fuzzer" 127 | ${SANITIZER_OPTIONS} 128 | ) 129 | 130 | target_link_libraries( 131 | ${MODULE_NAME}_${FUZZER_NAME_STEM}_fuzzer 132 | PRIVATE 133 | ${MODULE_LINK_NAME} 134 | barretenberg 135 | env 136 | ) 137 | endforeach() 138 | endif() 139 | 140 | file(GLOB_RECURSE BENCH_SOURCE_FILES *.bench.cpp) 141 | if(BENCHMARKS AND BENCH_SOURCE_FILES) 142 | add_library( 143 | ${MODULE_NAME}_bench_objects 144 | OBJECT 145 | ${BENCH_SOURCE_FILES} 146 | ) 147 | 148 | target_link_libraries( 149 | ${MODULE_NAME}_bench_objects 150 | PRIVATE 151 | benchmark 152 | barretenberg 153 | env 154 | ${TBB_IMPORTED_TARGETS} 155 | ) 156 | 157 | add_executable( 158 | ${MODULE_NAME}_bench 159 | $ 160 | ) 161 | 162 | target_link_libraries( 163 | ${MODULE_NAME}_bench 164 | PRIVATE 165 | ${MODULE_LINK_NAME} 166 | ${ARGN} 167 | benchmark 168 | barretenberg 169 | env 170 | ${TBB_IMPORTED_TARGETS} 171 | ) 172 | 173 | add_custom_target( 174 | run_${MODULE_NAME}_bench 175 | COMMAND ${MODULE_NAME}_bench 176 | WORKING_DIRECTORY ${CMAKE_BINARY_DIR} 177 | ) 178 | endif() 179 | endfunction() -------------------------------------------------------------------------------- /cryptography-engineer/cmake/threading.cmake: -------------------------------------------------------------------------------- 1 | if(APPLE) 2 | if(CMAKE_C_COMPILER_ID MATCHES "Clang") 3 | set(OpenMP_C_FLAGS "-fopenmp") 4 | set(OpenMP_C_FLAGS_WORK "-fopenmp") 5 | set(OpenMP_C_LIB_NAMES "libomp") 6 | set(OpenMP_C_LIB_NAMES_WORK "libomp") 7 | set(OpenMP_libomp_LIBRARY "$ENV{BREW_PREFIX}/opt/libomp/lib/libomp.dylib") 8 | endif() 9 | if(CMAKE_CXX_COMPILER_ID MATCHES "Clang") 10 | set(OpenMP_CXX_FLAGS "-fopenmp") 11 | set(OpenMP_CXX_FLAGS_WORK "-fopenmp") 12 | set(OpenMP_CXX_LIB_NAMES "libomp") 13 | set(OpenMP_CXX_LIB_NAMES_WORK "libomp") 14 | set(OpenMP_libomp_LIBRARY "$ENV{BREW_PREFIX}/opt/libomp/lib/libomp.dylib") 15 | endif() 16 | endif() 17 | 18 | if(MULTITHREADING) 19 | find_package(OpenMP REQUIRED) 20 | message(STATUS "Multithreading is enabled.") 21 | link_libraries(OpenMP::OpenMP_CXX) 22 | else() 23 | message(STATUS "Multithreading is disabled.") 24 | add_definitions(-DNO_MULTITHREADING -DBOOST_SP_NO_ATOMIC_ACCESS) 25 | endif() 26 | 27 | if(DISABLE_TBB) 28 | message(STATUS "Intel Thread Building Blocks is disabled.") 29 | add_definitions(-DNO_TBB) 30 | else() 31 | find_package(TBB REQUIRED tbb) 32 | if(${TBB_FOUND}) 33 | message(STATUS "Intel Thread Building Blocks is enabled.") 34 | else() 35 | message(STATUS "Could not locate TBB.") 36 | add_definitions(-DNO_TBB) 37 | endif() 38 | endif() 39 | -------------------------------------------------------------------------------- /cryptography-engineer/cmake/toolchain.cmake: -------------------------------------------------------------------------------- 1 | if (CMAKE_C_COMPILER AND CMAKE_CXX_COMPILER) 2 | message(STATUS "Toolchain: manually chosen ${CMAKE_C_COMPILER} and ${CMAKE_CXX_COMPILER}") 3 | else() 4 | if(NOT TOOLCHAIN) 5 | set(TOOLCHAIN "x86_64-linux-clang" CACHE STRING "Build toolchain." FORCE) 6 | endif() 7 | message(STATUS "Toolchain: ${TOOLCHAIN}") 8 | 9 | include("./cmake/toolchains/${TOOLCHAIN}.cmake") 10 | endif() -------------------------------------------------------------------------------- /cryptography-engineer/cmake/toolchains/arm-apple-clang.cmake: -------------------------------------------------------------------------------- 1 | set(APPLE ON) 2 | set(ARM ON) 3 | set(CMAKE_CXX_COMPILER "$ENV{BREW_PREFIX}/opt/llvm/bin/clang++") 4 | set(CMAKE_C_COMPILER "$ENV{BREW_PREFIX}/opt/llvm/bin/clang") -------------------------------------------------------------------------------- /cryptography-engineer/cmake/toolchains/arm64-linux-gcc.cmake: -------------------------------------------------------------------------------- 1 | set(ARM ON) 2 | set(CMAKE_SYSTEM_NAME Linux) 3 | set(CMAKE_SYSTEM_VERSION 1) 4 | set(CMAKE_SYSTEM_PROCESSOR aarch64) 5 | 6 | set(cross_triple "aarch64-unknown-linux-gnu") 7 | set(cross_root /usr/xcc/${cross_triple}) 8 | 9 | set(CMAKE_C_COMPILER $ENV{CC}) 10 | set(CMAKE_CXX_COMPILER $ENV{CXX}) 11 | set(CMAKE_Fortran_COMPILER $ENV{FC}) 12 | 13 | set(CMAKE_CXX_FLAGS "-I ${cross_root}/include/") 14 | 15 | set(CMAKE_FIND_ROOT_PATH ${cross_root} ${cross_root}/${cross_triple}) 16 | set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER) 17 | set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY BOTH) 18 | set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE BOTH) 19 | set(CMAKE_SYSROOT ${cross_root}/${cross_triple}/sysroot) 20 | 21 | set(CMAKE_CROSSCOMPILING_EMULATOR /usr/bin/qemu-aarch64) -------------------------------------------------------------------------------- /cryptography-engineer/cmake/toolchains/i386-linux-clang.cmake: -------------------------------------------------------------------------------- 1 | # Sometimes we need to set compilers manually, for example for fuzzing 2 | if(NOT CMAKE_C_COMPILER) 3 | set(CMAKE_C_COMPILER "clang") 4 | endif() 5 | 6 | if(NOT CMAKE_CXX_COMPILER) 7 | set(CMAKE_CXX_COMPILER "clang++") 8 | endif() 9 | 10 | add_compile_options("-m32") 11 | add_link_options("-m32") 12 | set(MULTITHREADING OFF) 13 | add_definitions(-DDISABLE_SHENANIGANS=1) -------------------------------------------------------------------------------- /cryptography-engineer/cmake/toolchains/wasm-linux-clang.cmake: -------------------------------------------------------------------------------- 1 | # Cmake toolchain description file for the Makefile 2 | 3 | # This is arbitrary, AFAIK, for now. 4 | cmake_minimum_required(VERSION 3.4.0) 5 | 6 | set(WASM ON) 7 | set(CMAKE_SYSTEM_NAME Generic) 8 | set(CMAKE_SYSTEM_VERSION 1) 9 | set(CMAKE_SYSTEM_PROCESSOR wasm32) 10 | set(triple wasm32-wasi) 11 | 12 | set(WASI_SDK_PREFIX "${CMAKE_CURRENT_SOURCE_DIR}/src/wasi-sdk-12.0") 13 | set(CMAKE_C_COMPILER ${WASI_SDK_PREFIX}/bin/clang) 14 | set(CMAKE_CXX_COMPILER ${WASI_SDK_PREFIX}/bin/clang++) 15 | set(CMAKE_AR ${WASI_SDK_PREFIX}/bin/llvm-ar CACHE STRING "wasi-sdk build") 16 | set(CMAKE_RANLIB ${WASI_SDK_PREFIX}/bin/llvm-ranlib CACHE STRING "wasi-sdk build") 17 | set(CMAKE_C_COMPILER_TARGET ${triple} CACHE STRING "wasi-sdk build") 18 | set(CMAKE_CXX_COMPILER_TARGET ${triple} CACHE STRING "wasi-sdk build") 19 | #set(CMAKE_EXE_LINKER_FLAGS "-Wl,--no-threads" CACHE STRING "wasi-sdk build") 20 | 21 | set(CMAKE_SYSROOT ${WASI_SDK_PREFIX}/share/wasi-sysroot CACHE STRING "wasi-sdk build") 22 | set(CMAKE_STAGING_PREFIX ${WASI_SDK_PREFIX}/share/wasi-sysroot CACHE STRING "wasi-sdk build") 23 | 24 | # Don't look in the sysroot for executables to run during the build 25 | set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER) 26 | # Only look in the sysroot (not in the host paths) for the rest 27 | set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY) 28 | set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY) 29 | set(CMAKE_FIND_ROOT_PATH_MODE_PACKAGE ONLY) 30 | 31 | # Some other hacks 32 | set(CMAKE_C_COMPILER_WORKS ON) 33 | set(CMAKE_CXX_COMPILER_WORKS ON) 34 | 35 | add_definitions(-D_WASI_EMULATED_PROCESS_CLOCKS=1) -------------------------------------------------------------------------------- /cryptography-engineer/cmake/toolchains/x86_64-apple-clang.cmake: -------------------------------------------------------------------------------- 1 | set(APPLE ON) 2 | set(CMAKE_CXX_COMPILER "$ENV{BREW_PREFIX}/opt/llvm/bin/clang++") 3 | set(CMAKE_C_COMPILER "$ENV{BREW_PREFIX}/opt/llvm/bin/clang") -------------------------------------------------------------------------------- /cryptography-engineer/cmake/toolchains/x86_64-linux-clang.cmake: -------------------------------------------------------------------------------- 1 | set(CMAKE_C_COMPILER "clang") 2 | set(CMAKE_CXX_COMPILER "clang++") -------------------------------------------------------------------------------- /cryptography-engineer/cmake/toolchains/x86_64-linux-gcc.cmake: -------------------------------------------------------------------------------- 1 | set(CMAKE_C_COMPILER "gcc") 2 | set(CMAKE_CXX_COMPILER "g++") 3 | # TODO(Cody): git rid of this when Adrian's work goes in 4 | add_compile_options(-Wno-uninitialized) 5 | add_compile_options(-Wno-maybe-uninitialized) -------------------------------------------------------------------------------- /cryptography-engineer/cmake/toolchains/x86_64-linux-gcc10.cmake: -------------------------------------------------------------------------------- 1 | set(CMAKE_C_COMPILER "gcc-10") 2 | set(CMAKE_CXX_COMPILER "g++-10") 3 | # TODO(Cody): git rid of this when Adrian's work goes in 4 | add_compile_options(-Wno-uninitialized) 5 | add_compile_options(-Wno-maybe-uninitialized) -------------------------------------------------------------------------------- /cryptography-engineer/format.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | if [ "$1" == "staged" ]; then 5 | echo Formatting staged files... 6 | for FILE in $(git diff-index --diff-filter=d --relative --cached --name-only HEAD | grep -e '\.\(cpp\|hpp\|tcc\)$'); do 7 | clang-format -i $FILE 8 | sed -i.bak 's/\r$//' $FILE && rm ${FILE}.bak 9 | git add $FILE 10 | done 11 | elif [ -n "$1" ]; then 12 | for FILE in $(git diff-index --relative --name-only $1 | grep -e '\.\(cpp\|hpp\|tcc\)$'); do 13 | clang-format -i $FILE 14 | sed -i.bak 's/\r$//' $FILE && rm ${FILE}.bak 15 | done 16 | else 17 | for FILE in $(find ./src -iname *.hpp -o -iname *.cpp -o -iname *.tcc | grep -v src/boost); do 18 | clang-format -i $FILE 19 | sed -i.bak 's/\r$//' $FILE && rm ${FILE}.bak 20 | done 21 | fi -------------------------------------------------------------------------------- /cryptography-engineer/src/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin) 2 | 3 | add_compile_options(-Werror -Wall -Wextra -Wconversion -Wsign-conversion -Wno-deprecated -Wno-tautological-compare -Wfatal-errors) 4 | 5 | if(CMAKE_CXX_COMPILER_ID MATCHES "Clang") 6 | add_compile_options(-Wno-unguarded-availability-new -Wno-c99-extensions -fconstexpr-steps=100000000) 7 | if(MEMORY_CHECKS) 8 | message(STATUS "Compiling with memory checks.") 9 | set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsanitize=address") 10 | endif() 11 | endif() 12 | 13 | if(CMAKE_CXX_COMPILER_ID MATCHES "GNU") 14 | add_compile_options(-Wno-deprecated-copy -fconstexpr-ops-limit=100000000) 15 | endif() 16 | 17 | include_directories(${CMAKE_CURRENT_SOURCE_DIR}) 18 | 19 | # I feel this should be limited to ecc, however it's currently used in headers that go across libraries, 20 | # and there currently isn't an easy way to inherit the DDISABLE_SHENANIGANS parameter. 21 | if(DISABLE_ASM) 22 | message(STATUS "Using fallback non-assembly methods for field multiplications.") 23 | add_definitions(-DDISABLE_SHENANIGANS=1) 24 | else() 25 | message(STATUS "Using optimized assembly for field arithmetic.") 26 | endif() 27 | 28 | add_subdirectory(indexed_merkle_tree) 29 | add_subdirectory(ec_fft) 30 | 31 | if(BENCHMARKS) 32 | add_subdirectory(benchmark) 33 | endif() 34 | -------------------------------------------------------------------------------- /cryptography-engineer/src/ec_fft/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | # create a barretenberg_module for ec_fft 2 | barretenberg_module(ec_fft) -------------------------------------------------------------------------------- /cryptography-engineer/src/ec_fft/README.md: -------------------------------------------------------------------------------- 1 | ## EC-FFT Test 2 | 3 | Hi there! Welcome to the `ec_fft` test that you are about to take. We will guide you through the concept of EC-FFT before you begin crushing this exercise. This exercise consists of two parts: 4 | 5 | 1. Implement the FFT algorithm for curve points: 6 | ```cpp 7 | void ec_fft_inner(g1::element* g1_elements, const size_t n, const std::vector& root_table); 8 | ``` 9 | 2. Using the `ec_fft` function, convert a monomial reference string to a Lagrange reference string without knowing the secret $x$. 10 | ```cpp 11 | void convert_srs(g1::affine_element* monomial_srs, g1::affine_element* lagrange_srs, const evaluation_domain& domain); 12 | ``` 13 | Objective: Implement the above functions to get the four pre-written tests in `ec_fft.test.cpp` passing. 14 | 15 | Pro-tip: You can take a look at the pre-written tests if you need help with the syntax. 16 | 17 | #### Monomial Reference String 18 | 19 | Universal zk-SNARKs like PlonK need to run a one-time trusted setup ceremony to generate a Structured Reference String (SRS). Any number of participants can participate in this ceremony but only _one_ of all the participants needs to be _honest_. As a part of this ceremony, each participant contributes to the setup with their own _secret_ which they are free to choose. If even one of the participants generates this secret randomly and destroys it successfully, the setup ceremony is considered to be successful. Fur our purposes, the output of the setup ceremony is a structure reference string of the form: 20 | 21 | $$ 22 | \begin{aligned} 23 | \mathbb{G}_1 \text{ points: } 24 | \big([1]_1,[x]_1, [x^2]_1, [x^3]_1, \dots, [x^{N-1}]_1\big), \\ 25 | \end{aligned} 26 | $$ 27 | 28 | Here, $\mathbb{G}_1$ is a cyclic group, $\mathbb{F}$ is a finite field, and the order of $\mathbb{G}_1$ is the same as the order of $\mathbb{F}$. The element $x \in \mathbb{F}$ is the combined secret of all the participants and is assumed to be unknown to anyone in the world. Further, we define $[a]_1 := aG_1$, where $G_1\in \mathbb{G}_1$ is a fixed generator $\mathbb{G}_1$. 29 | 30 | The monomial reference string is used to commit to polynomials in their coefficient/monomial form. For example, given a polynomial $f(X) = f_0 + f_1X + f_2X^2 + \dots + f_{n-1}X^{n-1}$ for $n < N$, we can compute its commitment as: 31 | 32 | $$ 33 | \textsf{commit}(f) := [f(x)]_1 = \sum_{i=0}^{n-1} f_i \cdot [x^i]_1. 34 | $$ 35 | 36 | #### Lagrange Representation 37 | 38 | An alternative way to represent a polynomial is in its Lagrange form. Let $\{\omega^0, \omega^1, \omega^2, \dots, \omega^{n-1}\}$ be the $n$-th roots of unity and assume $n$ is a power of two. Then we can write the same polynomial $f(X)$ as: 39 | 40 | $$ 41 | f(X) = \sum_{i=0}^{n-1} f(\omega^i) \cdot L_{n,i}(X) 42 | $$ 43 | 44 | where $L_i(X)$ is the $n$-th Lagrange basis polynomial defined as: 45 | 46 | $$ 47 | L_{n,i}(\omega^j) = 48 | \begin{cases} 49 | 1 & \text{if }j = i \\ 50 | 0 & \text{otherwise} 51 | \end{cases}. 52 | $$ 53 | 54 | In other words, the Lagrange basis polynomial $L_{n,i}(X)$ is $1$ on $\omega^i$ and $0$ on the other roots $\{\omega^j\}_{j \neq i}$. The Lagrange form of a polynomial is sometimes more useful than the monomial form. 55 | 56 | #### Fast Fourier Transform 57 | 58 | Given the coefficent form of a polynomial, it is possible to convert it to the Lagrange form using the Fast Fourier Transform (FFT) operation. Similarly, we can take an inverse FFT to convert the Lagrange form to its coefficient form. 59 | 60 | $$ 61 | \begin{aligned} 62 | \{f_i\}_{i=0}^{n} \xrightarrow{\textsf{FFT}} \{f(\omega^i)\}_{i=0}^{n}, \\ 63 | \{f(\omega^i)\}_{i=0}^{n} \xrightarrow{\textsf{iFFT}} \{f_i\}_{i=0}^{n}. 64 | \end{aligned} 65 | $$ 66 | 67 | Note that this $\textsf{FFT}$ operation is defined on scalars in the field $\mathbb{F}$. 68 | 69 | #### EC-FFT 70 | 71 | Suppose we are given a bunch of elliptic curve points: 72 | 73 | $$ 74 | \{a_0G_1, a_1G_1, \dots, a_{n-1}G_1\} \in \mathbb{G}_1^{n} 75 | $$ 76 | 77 | for some scalars $\{a_i\}_{i=0}^{n-1}\in \mathbb{F}^n$. You can think of these scalars $\{a_i\}_{i=0}^{n-1}$ as coefficients of some polynomial $A(X)$. Note that you only have access to the given curve points and not the actual coefficients $\{a_i\}_{i=0}^{n-1}$. The question is: can you convert this set of curve points to another set of curve points defined as: 78 | 79 | $$ 80 | \{A(\omega^0)\cdot G_1, \ A(\omega^1)\cdot G_1, \ \dots, \ A(\omega^{n-1})\cdot G_1\} \in \mathbb{G}_1^{n}. 81 | $$ 82 | 83 | Since you do not have access to the coefficient $\{a_i\}_{i=0}^{n-1}$ you cannot just compute its FFT. But instead, if we can do an FFT operation on the curve points, that will give us the desried result. EC-FFT is exactly that: FFT on Elliptic Curve points! 84 | 85 | In this exercise, you have to implement the function `ec_fft` that takes in a set of points `g1_elements` and modifies the same points to get the FFT form. 86 | 87 | #### Application of EC-FFT 88 | 89 | Recall that our monomial SRS (of size $n$) was of the form: 90 | 91 | $$ 92 | \mathbb{G}_1 \text{ monomial points: } 93 | \big([1]_1,[x]_1, [x^2]_1, [x^3]_1, \dots, [x^{n-1}]_1\big). 94 | $$ 95 | 96 | Let's say we want to convert this monomial SRS to the Lagrange SRS: 97 | 98 | $$ 99 | \mathbb{G}_1 \text{ lagrange points: } 100 | \big([L_0(x)]_1,[L_1(x)]_1, [L_2(x)]_1, \dots, [L_{n-1}(x)]_1\big). 101 | $$ 102 | 103 | without knowing the scalar $x\in \mathbb{F}$. We can do this by using EC-FFT functionality. For that, lets Define a polynomial with coefficients $\{1, x, x^2, \dots, x^{n-1}\}$: 104 | 105 | $$ 106 | P(Y) := 1 + xY + x^2Y^2 + \dots + x^{n-1}Y^{n-1}. 107 | $$ 108 | 109 | Now, we can write the Lagrange polynomial $L_{n,i}(X)$ as: 110 | 111 | $$ 112 | L_{n,i}(X) := \frac{1}{n}\left( \left(\frac{X}{\omega^i}\right)^0 + \left(\frac{X}{\omega^i}\right)^1 + \dots + \left(\frac{X}{\omega^i}\right)^{n-1} \right) 113 | $$ 114 | 115 | This is because: when $X=\omega^i$ all of the terms would be 1 and so $L_{n,i}(\omega^i)=\frac{1 + 1 + \dots + 1}{n} = 1$. On the other hand, if $X = \omega^j$ s.t. $j\neq i$, the term in the numerator would just be the sum of all $n$-th roots of unity, i.e. $L_{n,i}(\omega^j)=\frac{\sum_k\omega^k}{n} = 0$. Using this definition of the Lagrange polynomial, we have: 116 | 117 | $$ 118 | L_{n,i}(X) := \frac{1}{n}\sum_{j=0}^{n-1}\left(\frac{X}{\omega^i}\right)^j = \frac{1}{n}\sum_{j=0}^{n-1}\left(\omega^{-i}X\right)^j. 119 | $$ 120 | 121 | By the definition of $P(Y)$, we can write: 122 | 123 | $$ 124 | \begin{aligned} 125 | P(Y) &= \sum_{j=0}^{n-1} (x \cdot Y)^j \\ 126 | \implies P(\omega^{-i}) &= \sum_{j=0}^{n-1} (\omega^{-i} \cdot x)^j =: n \cdot L_{n,i}(x) \\ 127 | \therefore \quad L_{n,i}(x) &:= \frac{1}{n} \cdot P(\omega^{-i}) \qquad \text{(1)} 128 | \end{aligned} 129 | $$ 130 | 131 | Therefore, we can compute the Lagrange SRS from the monomial SRS by first taking the EC-FFT on the monomial SRS and applying the transform shown in equation $(1)$. 132 | 133 | 1. Given the monomial SRS, take its EC-FFT; 134 | 135 | $$ 136 | \Big([1]_1, [x]_1, [x^2]_1, \dots, [x^{n-1}]_1\Big) 137 | \xrightarrow{\textsf{ec-FFT}} 138 | \Big([P(\omega^0)]_1, [P(\omega^1)]_1, [P(\omega^2)]_1, \dots, [P(\omega^{n-1})]_1\Big) 139 | $$ 140 | 141 | 2. Apply the transformation shown in equation $(1)$: 142 | 143 | $$ 144 | [L_{n,i}(x)]_1 := \frac{1}{n} \cdot [P(\omega^{-i})]_1 \quad \forall i \in [0, n) 145 | $$ 146 | 147 | As a part of this exercise, you also have to implement the function `convert_srs` that takes in a `monomial_srs` and converts it to a `lagrange_srs` for a given size as explained above. 148 | -------------------------------------------------------------------------------- /cryptography-engineer/src/ec_fft/ec_fft.cpp: -------------------------------------------------------------------------------- 1 | #include "ec_fft.hpp" 2 | 3 | #pragma GCC diagnostic ignored "-Wunused-variable" 4 | #pragma GCC diagnostic ignored "-Wunused-parameter" 5 | 6 | namespace waffle { 7 | namespace g1_fft { 8 | 9 | using namespace barretenberg; 10 | 11 | inline bool is_power_of_two(uint64_t x) 12 | { 13 | return x && !(x & (x - 1)); 14 | } 15 | 16 | inline uint32_t reverse_bits(uint32_t x, uint32_t bit_length) 17 | { 18 | x = (((x & 0xaaaaaaaa) >> 1) | ((x & 0x55555555) << 1)); 19 | x = (((x & 0xcccccccc) >> 2) | ((x & 0x33333333) << 2)); 20 | x = (((x & 0xf0f0f0f0) >> 4) | ((x & 0x0f0f0f0f) << 4)); 21 | x = (((x & 0xff00ff00) >> 8) | ((x & 0x00ff00ff) << 8)); 22 | return (((x >> 16) | (x << 16))) >> (32 - bit_length); 23 | } 24 | 25 | inline void ec_fft_inner(g1::element* g1_elements, const size_t n, const std::vector& root_table) 26 | { 27 | is_power_of_two(n); 28 | ASSERT(!root_table.empty()); 29 | 30 | // Exercise: implement the butterfly structure to perform ec-fft 31 | } 32 | 33 | void ec_fft(g1::element* g1_elements, const evaluation_domain& domain) 34 | { 35 | ec_fft_inner(g1_elements, domain.size, domain.get_round_roots()); 36 | } 37 | 38 | void ec_ifft(g1::element* g1_elements, const evaluation_domain& domain) 39 | { 40 | ec_fft_inner(g1_elements, domain.size, domain.get_inverse_round_roots()); 41 | for (size_t i = 0; i < domain.size; i++) { 42 | g1_elements[i] *= domain.domain_inverse; 43 | } 44 | } 45 | 46 | void convert_srs(g1::affine_element* monomial_srs, g1::affine_element* lagrange_srs, const evaluation_domain& domain) 47 | { 48 | const size_t n = domain.size; 49 | is_power_of_two(n); 50 | 51 | // Exercise: implement the conversion of monomial to Lagrange SRS 52 | // Note that you can convert from g1::affine_element form to g1::element by just doing: 53 | // g1::affine_element x_affine = g1::affine_one; 54 | // auto x_elem = g1::element(x_affine); 55 | // Conversion from g1::element to g1::affine_element can be done similarly. 56 | } 57 | 58 | } // namespace g1_fft 59 | } // namespace waffle -------------------------------------------------------------------------------- /cryptography-engineer/src/ec_fft/ec_fft.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include 3 | #include 4 | #include 5 | 6 | namespace waffle { 7 | namespace g1_fft { 8 | 9 | using namespace barretenberg; 10 | 11 | /** 12 | * Computes FFT (butterfly-structure) on EC (elliptic curve) points `g1_elements`. 13 | * 14 | * @param g1_elements: Given set of curve points on the BN254 curve 15 | * @param n: Number of curve points (assumed to be a power of two) 16 | * @param root_table: Contains roots of unity required in each round of the butterfly structure 17 | * @details: 18 | * root_table[0]: [1 ω₁] 19 | * root_table[1]: [1 ω₂ (ω₂)² (ω₂)³] 20 | * . 21 | * . 22 | * . 23 | * root_table[m]: [1 ωᵣ (ωᵣ)² (ωᵣ)³ ... (ωᵣ)ᴿ⁻² (ωᵣ)ᴿ⁻¹] 24 | * 25 | * where r = log2(n) - 1, R = 2^r and ωᵢ = i-th root of unity. 26 | */ 27 | void ec_fft_inner(g1::element* g1_elements, const size_t n, const std::vector& root_table); 28 | 29 | /** 30 | * Computes EC-FFT of `g1_elements` given the evaluation domain `domain`. 31 | * 32 | * @details: 33 | * The domain contains the following: 34 | * n = domain.size (number of `g1_elements`, assumed to be a power of two) 35 | * ω = domain.root (n-th root of unity) 36 | * 1/ω = domain.root_inverse (multiplicative inverse of ω) 37 | */ 38 | void ec_fft(g1::element* g1_elements, const evaluation_domain& domain); 39 | 40 | /** 41 | * Computes inverse EC-FFT of `g1_elements` given the evaluation domain `domain`. 42 | */ 43 | void ec_ifft(g1::element* g1_elements, const evaluation_domain& domain); 44 | 45 | /** 46 | * Using `ec_fft`, computes the Lagrange form of the SRS given the monomial form SRS `monomial_srs`. 47 | * 48 | * @param monomial_srs: Monomial SRS of the form: ([1]₁, [x]₁, [x²]₁, [x³]₁, ..., [xⁿ⁻¹]₁) 49 | * @param lagrange_srs: Result must be stored in this, it should be of the form: ([L₀(x)]₁, [L₁(x)]₁, ..., [Lⁿ⁻¹(x)]₁) 50 | * @param domain: contains the information about n-th roots of unity 51 | */ 52 | void convert_srs(g1::affine_element* monomial_srs, g1::affine_element* lagrange_srs, const evaluation_domain& domain); 53 | 54 | } // namespace g1_fft 55 | } // namespace waffle -------------------------------------------------------------------------------- /cryptography-engineer/src/ec_fft/ec_fft.test.cpp: -------------------------------------------------------------------------------- 1 | #include "ec_fft.hpp" 2 | #include 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | 12 | using namespace barretenberg; 13 | 14 | TEST(ec_fft, test_fft_ifft) 15 | { 16 | constexpr size_t n = 256; 17 | std::vector monomial_points; 18 | std::vector lagrange_points; 19 | 20 | for (size_t i = 0; i < n; i++) { 21 | fr multiplicand = fr::random_element(); 22 | g1::element monomial_term = g1::one * multiplicand; 23 | monomial_points.push_back(monomial_term); 24 | lagrange_points.push_back(monomial_term); 25 | } 26 | 27 | auto domain = evaluation_domain(n); 28 | domain.compute_lookup_table(); 29 | 30 | // Do EC-FFT and then EC-iFFT 31 | waffle::g1_fft::ec_fft(&lagrange_points[0], domain); 32 | waffle::g1_fft::ec_ifft(&lagrange_points[0], domain); 33 | 34 | // Compare the results 35 | for (size_t i = 0; i < n; i++) { 36 | EXPECT_EQ(monomial_points[i].normalize(), lagrange_points[i].normalize()); 37 | } 38 | } 39 | 40 | TEST(ec_fft, test_compare_ffts) 41 | { 42 | constexpr size_t n = 256; 43 | std::vector monomial_points; 44 | std::vector lagrange_points; 45 | std::vector poly_monomial; 46 | std::vector poly_lagrange; 47 | 48 | for (size_t i = 0; i < n; i++) { 49 | fr multiplicand = fr::random_element(); 50 | poly_monomial.push_back(multiplicand); 51 | poly_lagrange.push_back(multiplicand); 52 | 53 | g1::element monomial_term = g1::one * multiplicand; 54 | monomial_points.push_back(monomial_term); 55 | lagrange_points.push_back(monomial_term); 56 | } 57 | 58 | auto domain = evaluation_domain(n); 59 | domain.compute_lookup_table(); 60 | 61 | // Do EC FFT 62 | waffle::g1_fft::ec_fft(&lagrange_points[0], domain); 63 | 64 | // Do fr FFT 65 | polynomial_arithmetic::fft(&poly_lagrange[0], domain); 66 | 67 | // Compare the results 68 | for (size_t i = 0; i < n; i++) { 69 | fr scalar = poly_lagrange[i]; 70 | g1::element expected = g1::one * scalar; 71 | EXPECT_EQ(expected.normalize(), lagrange_points[i].normalize()); 72 | } 73 | } 74 | 75 | TEST(ec_fft, test_compare_iffts) 76 | { 77 | constexpr size_t n = 256; 78 | std::vector monomial_points; 79 | std::vector lagrange_points; 80 | std::vector poly_monomial; 81 | std::vector poly_lagrange; 82 | 83 | for (size_t i = 0; i < n; i++) { 84 | fr multiplicand = fr::random_element(); 85 | poly_monomial.push_back(multiplicand); 86 | poly_lagrange.push_back(multiplicand); 87 | 88 | g1::element monomial_term = g1::one * multiplicand; 89 | monomial_points.push_back(monomial_term); 90 | lagrange_points.push_back(monomial_term); 91 | } 92 | 93 | auto domain = evaluation_domain(n); 94 | domain.compute_lookup_table(); 95 | 96 | // Do EC iFFT 97 | waffle::g1_fft::ec_ifft(&monomial_points[0], domain); 98 | 99 | // Do fr iFFT 100 | polynomial_arithmetic::ifft(&poly_monomial[0], domain); 101 | 102 | // Compare the results 103 | for (size_t i = 0; i < n; i++) { 104 | fr scalar = poly_monomial[i]; 105 | g1::element expected = g1::one * scalar; 106 | EXPECT_EQ(expected.normalize(), monomial_points[i].normalize()); 107 | } 108 | } 109 | 110 | TEST(ec_fft, test_convert_srs) 111 | { 112 | constexpr size_t n = 512; 113 | std::vector monomial_points; 114 | std::vector lagrange_points; 115 | std::vector poly_monomial; 116 | std::vector poly_lagrange; 117 | 118 | const fr x = fr::random_element(); 119 | fr multiplicand = 1; 120 | for (size_t i = 0; i < n; i++) { 121 | // Fill the polynomials with random coefficients 122 | fr coefficient = fr::random_element(); 123 | poly_monomial.push_back(coefficient); 124 | poly_lagrange.push_back(coefficient); 125 | 126 | // Create a fake srs with secret x 127 | g1::element monomial_term = g1::one * multiplicand; 128 | monomial_points.push_back(monomial_term); 129 | lagrange_points.push_back(monomial_term); 130 | multiplicand *= x; 131 | } 132 | 133 | auto domain = evaluation_domain(n); 134 | domain.compute_lookup_table(); 135 | 136 | std::vector lagrange_srs; 137 | std::vector monomial_srs; 138 | lagrange_srs.resize(2 * n); 139 | monomial_srs.resize(2 * n); 140 | 141 | // Copy over the monomial points in monomial_srs 142 | for (size_t i = 0; i < n; i++) { 143 | monomial_srs[i] = g1::affine_element(monomial_points[i]); 144 | } 145 | 146 | // Convert from monomial to lagrange srs 147 | waffle::g1_fft::convert_srs(&monomial_srs[0], &lagrange_srs[0], domain); 148 | polynomial_arithmetic::fft(&poly_lagrange[0], domain); 149 | 150 | scalar_multiplication::pippenger_runtime_state state(n); 151 | scalar_multiplication::generate_pippenger_point_table(&monomial_srs[0], &monomial_srs[0], n); 152 | scalar_multiplication::generate_pippenger_point_table(&lagrange_srs[0], &lagrange_srs[0], n); 153 | 154 | // Check == 155 | g1::element expected = scalar_multiplication::pippenger(&poly_monomial[0], &monomial_srs[0], n, state); 156 | g1::element result = scalar_multiplication::pippenger(&poly_lagrange[0], &lagrange_srs[0], n, state); 157 | expected = expected.normalize(); 158 | result = result.normalize(); 159 | 160 | EXPECT_EQ(result, expected); 161 | } -------------------------------------------------------------------------------- /cryptography-engineer/src/indexed_merkle_tree/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | # create a barretenberg_module for merkle tree 2 | barretenberg_module(indexed_merkle_tree) -------------------------------------------------------------------------------- /cryptography-engineer/src/indexed_merkle_tree/README.md: -------------------------------------------------------------------------------- 1 | ## Indexed Merkle Tree Test 2 | 3 | Hi there! Welcome to this Indexed Merkle tree test that you are about to take. We will guide you through the concept of Indexed Merkle trees before you begin crushing this exercise. 4 | 5 | #### Pre-requisites 6 | 7 | We assume that you are familier with the concept of Merkle trees. If not, please read [this](https://decentralizedthoughts.github.io/2020-12-22-what-is-a-merkle-tree/) excellent blog. 8 | 9 | #### Indexed Merkle Tree 10 | 11 | An indexed Merkle tree is a variant of the basic Merkle tree where the leaf structure changes slightly. Each leaf in the indexed Merkle tree not only stores some value $v \in \mathbb{F}$ but also points to the leaf with the next higher value: 12 | 13 | $$ 14 | \textsf{leaf} = \{v, i_{\textsf{next}}, v_{\textsf{next}}\}. 15 | $$ 16 | 17 | where $i_{\textsf{next}}$ is the index of the leaf with the next higher value $v_{\textsf{next}} > v$. By design, we assume that there are no leaves in the tree with a value between the range $(v, v_{\textsf{next}})$. Let us look at a toy example of the state transitions in an indexed Merkle tree of depth 3. 18 | 19 | [Note: Please check out [this](https://hackmd.io/@suyash67/ByXqvJI12) hackmd for the images, they're missing from the dependencies.] 20 | 21 | 1. Initial state 22 | ![first](../../dependencies/images/0.png) 23 | 2. Add a new value $v=30$ 24 | ![second](../../dependencies/images/1.jpg) 25 | 3. Add a new value $v=10$ 26 | ![third](../../dependencies/images/2.jpg) 27 | 4. Add a new value $v=20$ 28 | ![forth](../../dependencies/images/3.jpg) 29 | 5. Add a new value $v=50$ 30 | ![fifth](../../dependencies/images/4.jpg) 31 | 32 | #### Exercise 33 | 34 | In this exercise, you will implement the indexed Merkle tree as a class called `IndexedMerkleTree`. The class definition and the boilter plate code is given in the files `indexed_merkle_tree.*pp`. The aim of this exercise is to fill in the pre-defined functions and get the two tests passing in `indexed_merkle_tree.test.cpp`. Note that the structure of the `leaf` is already implemented in `leaf.hpp` for you. 35 | 36 | To compile this module and run the tests, do: 37 | 38 | ```bash 39 | $ cd interview-tests/cryptography-engineer 40 | $ ./bootstrap.sh 41 | $ cd build 42 | $ make indexed_merkle_tree_tests # This compiles the module 43 | $ ./bin/indexed_merkle_tree_tests # This runs the tests 44 | ``` 45 | 46 | In case of any questions or suggestions related to this exercise, feel free to reach out to [suyash@aztecprotocol.com](mailto:suyash@aztecprotocol.com) or [cody@aztecprotocol.com](mailto:cody@aztecprotocol.com). 47 | -------------------------------------------------------------------------------- /cryptography-engineer/src/indexed_merkle_tree/indexed_merkle_tree.cpp: -------------------------------------------------------------------------------- 1 | #include "indexed_merkle_tree.hpp" 2 | #include 3 | 4 | namespace plonk { 5 | namespace stdlib { 6 | namespace indexed_merkle_tree { 7 | 8 | /** 9 | * Initialise an indexed merkle tree state with all the leaf values: H({0, 0, 0}). 10 | * Note that the leaf pre-image vector `leaves_` must be filled with {0, 0, 0} only at index 0. 11 | */ 12 | IndexedMerkleTree::IndexedMerkleTree(size_t depth) 13 | : depth_(depth) 14 | { 15 | ASSERT(depth_ >= 1 && depth <= 32); 16 | total_size_ = 1UL << depth_; 17 | hashes_.resize(total_size_ * 2 - 2); 18 | 19 | // Exercise: Build the initial state of the entire tree. 20 | } 21 | 22 | /** 23 | * Fetches a hash-path from a given index in the tree. 24 | * Note that the size of the fr_hash_path vector should be equal to the depth of the tree. 25 | */ 26 | fr_hash_path IndexedMerkleTree::get_hash_path(size_t) 27 | { 28 | // Exercise: fill the hash path for a given index. 29 | fr_hash_path path(depth_); 30 | return path; 31 | } 32 | 33 | /** 34 | * Update the node values (i.e. `hashes_`) given the leaf hash `value` and its index `index`. 35 | * Note that indexing in the tree starts from 0. 36 | * This function should return the updated root of the tree. 37 | */ 38 | fr IndexedMerkleTree::update_element_internal(size_t, fr const&) 39 | { 40 | // Exercise: insert the leaf hash `value` at `index`. 41 | return 0; 42 | } 43 | 44 | /** 45 | * Insert a new `value` in a new leaf in the `leaves_` vector in the form: {value, nextIdx, nextVal} 46 | * You will need to compute `nextIdx, nextVal` according to the way indexed merkle trees work. 47 | * Further, you will need to update one old leaf pre-image on inserting a new leaf. 48 | * Lastly, insert the new leaf hash in the tree as well as update the existing leaf hash of the old leaf. 49 | */ 50 | fr IndexedMerkleTree::update_element(fr const&) 51 | { 52 | // Exercise: add a new leaf with value `value` to the tree. 53 | return 0; 54 | } 55 | 56 | } // namespace indexed_merkle_tree 57 | } // namespace stdlib 58 | } // namespace plonk -------------------------------------------------------------------------------- /cryptography-engineer/src/indexed_merkle_tree/indexed_merkle_tree.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include 3 | #include "leaf.hpp" 4 | 5 | namespace plonk { 6 | namespace stdlib { 7 | namespace indexed_merkle_tree { 8 | 9 | using namespace barretenberg; 10 | using namespace plonk::stdlib::merkle_tree; 11 | 12 | /** 13 | * An IndexedMerkleTree is structured just like a usual merkle tree: 14 | * 15 | * hashes_ 16 | * +------------------------------------------------------------------------------+ 17 | * | 0 -> h_{0,0} h_{0,1} h_{0,2} h_{0,3} h_{0,4} h_{0,5} h_{0,6} h_{0,7} | 18 | * i | | 19 | * n | 8 -> h_{1,0} h_{1,1} h_{1,2} h_{1,3} | 20 | * d | | 21 | * e | 12 -> h_{2,0} h_{2,1} | 22 | * x | | 23 | * | 14 -> h_{3,0} | 24 | * +------------------------------------------------------------------------------+ 25 | * 26 | * Here, depth_ = 3 and {h_{0,j}}_{i=0..7} are leaf values. 27 | * Also, root_ = h_{3,0} and total_size_ = (2 * 8 - 2) = 14. 28 | * Lastly, h_{i,j} = hash( h_{i-1,2j}, h_{i-1,2j+1} ) where i > 1. 29 | * 30 | * 1. Initial state: 31 | * 32 | * # 33 | * 34 | * # # 35 | * 36 | * # # # # 37 | * 38 | * # # # # # # # # 39 | * 40 | * index 0 1 2 3 4 5 6 7 41 | * 42 | * val 0 0 0 0 0 0 0 0 43 | * nextIdx 0 0 0 0 0 0 0 0 44 | * nextVal 0 0 0 0 0 0 0 0 45 | * 46 | * 2. Add new leaf with value 30 47 | * 48 | * val 0 30 0 0 0 0 0 0 49 | * nextIdx 1 0 0 0 0 0 0 0 50 | * nextVal 30 0 0 0 0 0 0 0 51 | * 52 | * 3. Add new leaf with value 10 53 | * 54 | * val 0 30 10 0 0 0 0 0 55 | * nextIdx 2 0 1 0 0 0 0 0 56 | * nextVal 10 0 30 0 0 0 0 0 57 | * 58 | * 4. Add new leaf with value 20 59 | * 60 | * val 0 30 10 20 0 0 0 0 61 | * nextIdx 2 0 3 1 0 0 0 0 62 | * nextVal 10 0 20 30 0 0 0 0 63 | * 64 | * 5. Add new leaf with value 50 65 | * 66 | * val 0 30 10 20 50 0 0 0 67 | * nextIdx 2 4 3 1 0 0 0 0 68 | * nextVal 10 50 20 30 0 0 0 0 69 | */ 70 | class IndexedMerkleTree { 71 | public: 72 | IndexedMerkleTree(size_t depth); 73 | 74 | fr_hash_path get_hash_path(size_t index); 75 | 76 | fr update_element_internal(size_t index, fr const& value); 77 | 78 | fr update_element(fr const& value); 79 | 80 | fr root() const { return root_; } 81 | 82 | const std::vector& get_hashes() { return hashes_; } 83 | const std::vector& get_leaves() { return leaves_; } 84 | 85 | private: 86 | // The depth or height of the tree 87 | size_t depth_; 88 | 89 | // The total number of leaves in the tree 90 | size_t total_size_; 91 | 92 | // The root of the merkle tree 93 | barretenberg::fr root_; 94 | 95 | // Vector of pre-images of leaf values of the form {val, nextIdx, nextIdx} 96 | // Size = total_size_ 97 | std::vector leaves_; 98 | 99 | // Vector that stores all the leaf hashes as well as intermediate node values 100 | // Size: total_size_ + (total_size_ / 2) + (total_size_ / 4) + ... + 2 = 2 * total_size_ - 2 101 | std::vector hashes_; 102 | }; 103 | 104 | } // namespace indexed_merkle_tree 105 | } // namespace stdlib 106 | } // namespace plonk -------------------------------------------------------------------------------- /cryptography-engineer/src/indexed_merkle_tree/indexed_merkle_tree.test.cpp: -------------------------------------------------------------------------------- 1 | #include "indexed_merkle_tree.hpp" 2 | #include 3 | #include 4 | 5 | using namespace barretenberg; 6 | using namespace plonk::stdlib::indexed_merkle_tree; 7 | 8 | void print_tree(const size_t depth, std::vector hashes, std::string const& msg) 9 | { 10 | info("\n", msg); 11 | size_t offset = 0; 12 | for (size_t i = 0; i < depth; i++) { 13 | info("i = ", i); 14 | size_t layer_size = (1UL << (depth - i)); 15 | for (size_t j = 0; j < layer_size; j++) { 16 | info("j = ", j, ": ", hashes[offset + j]); 17 | } 18 | offset += layer_size; 19 | } 20 | } 21 | 22 | bool check_hash_path(const fr& root, const fr_hash_path& path, const leaf& leaf_value, const size_t idx) 23 | { 24 | auto current = leaf_value.hash(); 25 | size_t depth_ = path.size(); 26 | size_t index = idx; 27 | for (size_t i = 0; i < depth_; ++i) { 28 | fr left = (index & 1) ? path[i].first : current; 29 | fr right = (index & 1) ? current : path[i].second; 30 | current = compress_pair(left, right); 31 | index >>= 1; 32 | } 33 | return current == root; 34 | } 35 | 36 | TEST(stdlib_indexed_merkle_tree, test_toy_example) 37 | { 38 | // Create a depth-3 indexed merkle tree 39 | constexpr size_t depth = 3; 40 | IndexedMerkleTree tree(depth); 41 | 42 | /** 43 | * Intial state: 44 | * 45 | * index 0 1 2 3 4 5 6 7 46 | * --------------------------------------------------------------------- 47 | * val 0 0 0 0 0 0 0 0 48 | * nextIdx 0 0 0 0 0 0 0 0 49 | * nextVal 0 0 0 0 0 0 0 0 50 | */ 51 | leaf zero_leaf = { 0, 0, 0 }; 52 | EXPECT_EQ(tree.get_leaves().size(), 1); 53 | EXPECT_EQ(tree.get_leaves()[0], zero_leaf); 54 | 55 | /** 56 | * Add new value 30: 57 | * 58 | * index 0 1 2 3 4 5 6 7 59 | * --------------------------------------------------------------------- 60 | * val 0 30 0 0 0 0 0 0 61 | * nextIdx 1 0 0 0 0 0 0 0 62 | * nextVal 30 0 0 0 0 0 0 0 63 | */ 64 | tree.update_element(30); 65 | EXPECT_EQ(tree.get_leaves().size(), 2); 66 | EXPECT_EQ(tree.get_leaves()[0].hash(), leaf({ 0, 1, 30 }).hash()); 67 | EXPECT_EQ(tree.get_leaves()[1].hash(), leaf({ 30, 0, 0 }).hash()); 68 | 69 | /** 70 | * Add new value 10: 71 | * 72 | * index 0 1 2 3 4 5 6 7 73 | * --------------------------------------------------------------------- 74 | * val 0 30 10 0 0 0 0 0 75 | * nextIdx 2 0 1 0 0 0 0 0 76 | * nextVal 10 0 30 0 0 0 0 0 77 | */ 78 | tree.update_element(10); 79 | EXPECT_EQ(tree.get_leaves().size(), 3); 80 | EXPECT_EQ(tree.get_leaves()[0].hash(), leaf({ 0, 2, 10 }).hash()); 81 | EXPECT_EQ(tree.get_leaves()[1].hash(), leaf({ 30, 0, 0 }).hash()); 82 | EXPECT_EQ(tree.get_leaves()[2].hash(), leaf({ 10, 1, 30 }).hash()); 83 | 84 | /** 85 | * Add new value 20: 86 | * 87 | * index 0 1 2 3 4 5 6 7 88 | * --------------------------------------------------------------------- 89 | * val 0 30 10 20 0 0 0 0 90 | * nextIdx 2 0 3 1 0 0 0 0 91 | * nextVal 10 0 20 30 0 0 0 0 92 | */ 93 | tree.update_element(20); 94 | EXPECT_EQ(tree.get_leaves().size(), 4); 95 | EXPECT_EQ(tree.get_leaves()[0].hash(), leaf({ 0, 2, 10 }).hash()); 96 | EXPECT_EQ(tree.get_leaves()[1].hash(), leaf({ 30, 0, 0 }).hash()); 97 | EXPECT_EQ(tree.get_leaves()[2].hash(), leaf({ 10, 3, 20 }).hash()); 98 | EXPECT_EQ(tree.get_leaves()[3].hash(), leaf({ 20, 1, 30 }).hash()); 99 | 100 | // Adding the same value must not affect anything 101 | tree.update_element(20); 102 | EXPECT_EQ(tree.get_leaves().size(), 4); 103 | EXPECT_EQ(tree.get_leaves()[0].hash(), leaf({ 0, 2, 10 }).hash()); 104 | EXPECT_EQ(tree.get_leaves()[1].hash(), leaf({ 30, 0, 0 }).hash()); 105 | EXPECT_EQ(tree.get_leaves()[2].hash(), leaf({ 10, 3, 20 }).hash()); 106 | EXPECT_EQ(tree.get_leaves()[3].hash(), leaf({ 20, 1, 30 }).hash()); 107 | 108 | /** 109 | * Add new value 50: 110 | * 111 | * index 0 1 2 3 4 5 6 7 112 | * --------------------------------------------------------------------- 113 | * val 0 30 10 20 50 0 0 0 114 | * nextIdx 2 4 3 1 0 0 0 0 115 | * nextVal 10 50 20 30 0 0 0 0 116 | */ 117 | tree.update_element(50); 118 | EXPECT_EQ(tree.get_leaves().size(), 5); 119 | EXPECT_EQ(tree.get_leaves()[0].hash(), leaf({ 0, 2, 10 }).hash()); 120 | EXPECT_EQ(tree.get_leaves()[1].hash(), leaf({ 30, 4, 50 }).hash()); 121 | EXPECT_EQ(tree.get_leaves()[2].hash(), leaf({ 10, 3, 20 }).hash()); 122 | EXPECT_EQ(tree.get_leaves()[3].hash(), leaf({ 20, 1, 30 }).hash()); 123 | EXPECT_EQ(tree.get_leaves()[4].hash(), leaf({ 50, 0, 0 }).hash()); 124 | 125 | // Manually compute the node values 126 | auto e000 = tree.get_leaves()[0].hash(); 127 | auto e001 = tree.get_leaves()[1].hash(); 128 | auto e010 = tree.get_leaves()[2].hash(); 129 | auto e011 = tree.get_leaves()[3].hash(); 130 | auto e100 = tree.get_leaves()[4].hash(); 131 | auto e101 = leaf({ 0, 0, 0 }).hash(); 132 | auto e110 = leaf({ 0, 0, 0 }).hash(); 133 | auto e111 = leaf({ 0, 0, 0 }).hash(); 134 | 135 | auto e00 = compress_pair(e000, e001); 136 | auto e01 = compress_pair(e010, e011); 137 | auto e10 = compress_pair(e100, e101); 138 | auto e11 = compress_pair(e110, e111); 139 | 140 | auto e0 = compress_pair(e00, e01); 141 | auto e1 = compress_pair(e10, e11); 142 | auto root = compress_pair(e0, e1); 143 | 144 | // Check the hash path at index 2 and 3 145 | // Note: This merkle proof would also serve as a non-membership proof of values in (10, 20) and (20, 30) 146 | fr_hash_path expected = { 147 | std::make_pair(e010, e011), 148 | std::make_pair(e00, e01), 149 | std::make_pair(e0, e1), 150 | }; 151 | EXPECT_EQ(tree.get_hash_path(2), expected); 152 | EXPECT_EQ(tree.get_hash_path(3), expected); 153 | EXPECT_EQ(tree.root(), root); 154 | 155 | // Check the hash path at index 6 and 7 156 | expected = { 157 | std::make_pair(e110, e111), 158 | std::make_pair(e10, e11), 159 | std::make_pair(e0, e1), 160 | }; 161 | EXPECT_EQ(tree.get_hash_path(6), expected); 162 | EXPECT_EQ(tree.get_hash_path(7), expected); 163 | } 164 | 165 | TEST(stdlib_indexed_merkle_tree, test_real_example) 166 | { 167 | // Create a depth-8 indexed merkle tree 168 | constexpr size_t depth = 8; 169 | IndexedMerkleTree tree(depth); 170 | 171 | leaf zero_leaf = { 0, 0, 0 }; 172 | EXPECT_EQ(tree.get_leaves().size(), 1); 173 | EXPECT_EQ(tree.get_leaves()[0].hash(), zero_leaf.hash()); 174 | 175 | // Add 20 random values to the tree 176 | for (size_t i = 0; i < 20; i++) { 177 | auto value = fr::random_element(); 178 | tree.update_element(value); 179 | } 180 | 181 | auto abs_diff = [](uint256_t a, uint256_t b) { 182 | if (a > b) { 183 | return (a - b); 184 | } else { 185 | return (b - a); 186 | } 187 | }; 188 | 189 | // Check if a new random value is not a member of this tree. 190 | fr new_member = fr::random_element(); 191 | const auto& leaves = tree.get_leaves(); 192 | std::vector differences; 193 | for (size_t i = 0; i < leaves.size(); i++) { 194 | uint256_t diff_hi = abs_diff(uint256_t(new_member), uint256_t(leaves[i].value)); 195 | uint256_t diff_lo = abs_diff(uint256_t(new_member), uint256_t(leaves[i].nextValue)); 196 | differences.push_back(diff_hi + diff_lo); 197 | } 198 | auto it = std::min_element(differences.begin(), differences.end()); 199 | auto index = static_cast(it - differences.begin()); 200 | 201 | // Merkle proof at `index` proves non-membership of `new_member` 202 | auto hash_path = tree.get_hash_path(index); 203 | EXPECT_TRUE(check_hash_path(tree.root(), hash_path, leaves[index], index)); 204 | } -------------------------------------------------------------------------------- /cryptography-engineer/src/indexed_merkle_tree/leaf.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include 3 | #include 4 | 5 | namespace plonk { 6 | namespace stdlib { 7 | namespace indexed_merkle_tree { 8 | 9 | using namespace barretenberg; 10 | typedef uint256_t index_t; 11 | 12 | struct leaf { 13 | fr value; 14 | index_t nextIndex; 15 | fr nextValue; 16 | 17 | bool operator==(leaf const&) const = default; 18 | 19 | std::ostream& operator<<(std::ostream& os) 20 | { 21 | os << "value = " << value << "\nnextIdx = " << nextIndex << "\nnextVal = " << nextValue; 22 | return os; 23 | } 24 | 25 | void read(uint8_t const*& it) 26 | { 27 | using serialize::read; 28 | read(it, value); 29 | read(it, nextIndex); 30 | read(it, nextValue); 31 | } 32 | 33 | inline void write(std::vector& buf) 34 | { 35 | using serialize::write; 36 | write(buf, value); 37 | write(buf, nextIndex); 38 | write(buf, nextValue); 39 | } 40 | 41 | barretenberg::fr hash() const { return crypto::pedersen::compress_native({ value, nextIndex, nextValue }); } 42 | }; 43 | 44 | inline barretenberg::fr compress_pair(barretenberg::fr const& lhs, barretenberg::fr const& rhs) 45 | { 46 | return crypto::pedersen::compress_native({ lhs, rhs }); 47 | } 48 | 49 | } // namespace indexed_merkle_tree 50 | } // namespace stdlib 51 | } // namespace plonk -------------------------------------------------------------------------------- /eng-sessions/merkle-tree-cpp/.clang-format: -------------------------------------------------------------------------------- 1 | PointerAlignment: Left 2 | ColumnLimit: 120 3 | IndentWidth: 4 4 | BinPackArguments: false 5 | BinPackParameters: false 6 | Cpp11BracedListStyle: false 7 | AlwaysBreakAfterReturnType: None 8 | AlwaysBreakAfterDefinitionReturnType: None 9 | PenaltyReturnTypeOnItsOwnLine: 1000000 10 | BreakConstructorInitializers: BeforeComma 11 | BreakBeforeBraces: Custom 12 | BraceWrapping: 13 | AfterClass: false 14 | AfterEnum: false 15 | AfterFunction: true 16 | AfterNamespace: false 17 | AfterStruct: false 18 | AfterUnion: false 19 | AfterExternBlock: false 20 | BeforeCatch: false 21 | BeforeElse: false 22 | SplitEmptyFunction: false 23 | SplitEmptyRecord: false 24 | SplitEmptyNamespace: false 25 | AllowShortFunctionsOnASingleLine : Inline 26 | SortIncludes: true 27 | -------------------------------------------------------------------------------- /eng-sessions/merkle-tree-cpp/.clangd: -------------------------------------------------------------------------------- 1 | CompileFlags: 2 | Remove: -fconstexpr-ops-limit=* 3 | Add: [-std=c++20] 4 | --- 5 | If: 6 | PathMatch: [src/.*\.hpp, src/.*\.cpp, src/.*\.tcc] 7 | Diagnostics: 8 | UnusedIncludes: None 9 | 10 | # Static analysis configuration 11 | ClangTidy: 12 | Add: 13 | - cert-* 14 | - google-* 15 | - readability-* 16 | - bugprone-* 17 | - misc-* 18 | - performance-* 19 | Remove: 20 | - misc-const-correctness 21 | - readability-magic-numbers 22 | - readability-identifier-length 23 | - bugprone-easily-swappable-parameters 24 | - misc-non-private-member-variables-in-classes 25 | - google-build-using-namespace 26 | - readability-container-data-pointer 27 | - modernize-use-bool-literals 28 | - readability-function-cognitive-complexity 29 | - google-explicit-constructor 30 | - modernize-use-nodiscard 31 | 32 | --- 33 | # Disable some checks for Google Test/Bench 34 | If: 35 | PathMatch: [src/.*\.test\.cpp, src/.*\.bench\.cpp] 36 | -------------------------------------------------------------------------------- /eng-sessions/merkle-tree-cpp/.gitignore: -------------------------------------------------------------------------------- 1 | merkle_test 2 | -------------------------------------------------------------------------------- /eng-sessions/merkle-tree-cpp/README.md: -------------------------------------------------------------------------------- 1 | # Aztec Technical Challenge 2 | 3 | The test provides you an opportunity to demonstrate the following: 4 | 5 | - Your ability to write a data structure algorithm (in this case a merkle tree). 6 | - Your ability to write clean, idiomatic C++. 7 | 8 | ## Rationale 9 | 10 | A core data structure in the Aztec system is the merkle tree. It's a simple binary tree structure where the root node is represented by the hash of its two child hashes. Given any set of data in the leaves, this leads to a unique root. Furthermore, proof of existence of a piece of data can be represented by a hash path, a list of pairwise child hashes at each layer, from leaf to root. Aztec stores all of its notes in such data structures, and when proofs are generated they use hash paths to prove the data they are modifying exists. 11 | 12 | In this test you will be working on an implementation of a merkle tree. 13 | 14 | ## Merkle Tree Structure 15 | 16 | - The merkle tree is of depth `32`, and is fully formed with leaves consisting of `64` zero bytes at every index. 17 | - When inserting an element of arbitrary length, the value must first be `hash`ed to `32` bytes using sha256. 18 | - Each node of the tree is computed by `compress`ing its left and right subtree hashes and taking the resulting sha256 hash. 19 | - For reference, an unpopulated merkle tree will have a root hash of `1c9a7e5ff1cf48b4ad1582d3f4e4a1004f3b20d8c5a2b71387a4254ad933ebc5`. 20 | 21 | The merkle tree is to be persisted in a key value store. For practical purposes, unlike the typescript test, this database is completely mocked with an std::unordered_map. 22 | 23 | ## Building and Running 24 | 25 | After cloning the repo you may: 26 | 27 | - Ensure you have a version of the clang compiler in your PATH that can compile the tests, and run the test BASH script: 28 | ```bash 29 | ./run.sh 30 | ``` 31 | OR 32 | - Compile and run main.cpp in a compiler configured for the C++20 standard. 33 | -------------------------------------------------------------------------------- /eng-sessions/merkle-tree-cpp/run.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | echo "Compiling." 6 | 7 | cd "$(dirname "$0")"/src 8 | 9 | clang++ \ 10 | -std=c++20 \ 11 | -Wall -Wextra \ 12 | -O2 \ 13 | -o merkle_test \ 14 | sha256.cpp \ 15 | main.cpp 16 | 17 | echo "Running tests." 18 | ./merkle_test 19 | 20 | echo "All done!" -------------------------------------------------------------------------------- /eng-sessions/merkle-tree-cpp/src/hash_path.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include "sha256_hasher.hpp" 4 | #include 5 | #include 6 | #include 7 | 8 | /** 9 | * Mimics the TypeScript HashPath class. 10 | * A HashPath is a collection of pairs of 32-byte hashes, each pair 11 | * representing the left/right child at a given layer in the Merkle path. 12 | */ 13 | class HashPath { 14 | public: 15 | // Each entry in 'data' is (left_node, right_node). 16 | // Each node is 32 bytes. We'll store them in a sha256_hash_t of length 32. 17 | std::vector> data; 18 | 19 | HashPath() = default; 20 | 21 | HashPath(const std::vector> &d) 22 | : data(d) { 23 | } 24 | 25 | /** 26 | * Flatten the data into a single buffer, each pair appended: 27 | * first(32 bytes) + second(32 bytes) 28 | * repeated for each layer in the path. 29 | */ 30 | std::vector to_buffer() const 31 | { 32 | std::vector buf; 33 | buf.reserve(data.size() * 64); 34 | for (const auto &pair_item : data) { 35 | buf.insert(buf.end(), pair_item.first.begin(), pair_item.first.end()); 36 | buf.insert(buf.end(), pair_item.second.begin(), pair_item.second.end()); 37 | } 38 | return buf; 39 | } 40 | 41 | /** 42 | * Construct a HashPath from a buffer created by 'to_buffer()'. 43 | * For each 64 bytes, the first 32 are left, the second 32 are right. 44 | */ 45 | static HashPath from_buffer(const std::vector& buf) 46 | { 47 | HashPath path; 48 | if (buf.size() % 64 != 0) { 49 | // Invalid. In real usage, might throw or handle differently. 50 | return path; 51 | } 52 | size_t count = buf.size() / 64; 53 | path.data.reserve(count); 54 | for (size_t i = 0; i < count; ++i) { 55 | std::array left; 56 | std::array right; 57 | std::copy(buf.begin() + i * 64, buf.begin() + i * 64 + 32, left.begin()); 58 | std::copy(buf.begin() + i * 64 + 32, buf.begin() + i * 64 + 64, right.begin()); 59 | path.data.emplace_back(std::make_pair(left, right)); 60 | } 61 | return path; 62 | } 63 | }; 64 | 65 | /** 66 | * Simple equality operator for testing or comparison. 67 | */ 68 | inline bool operator==(const HashPath &a, const HashPath &b) { 69 | if (a.data.size() != b.data.size()) { 70 | return false; 71 | } 72 | for (size_t i = 0; i < a.data.size(); ++i) { 73 | if (a.data[i].first != b.data[i].first || 74 | a.data[i].second != b.data[i].second) { 75 | return false; 76 | } 77 | } 78 | return true; 79 | } 80 | -------------------------------------------------------------------------------- /eng-sessions/merkle-tree-cpp/src/main.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | 10 | #include "hash_path.hpp" 11 | #include "merkle_tree.hpp" 12 | #include "mock_db.hpp" 13 | #include "sha256_hasher.hpp" 14 | 15 | /** 16 | * Utility function: converts a vector of bytes to a lowercase hexadecimal string. 17 | */ 18 | template std::string to_hex(const T& data) 19 | { 20 | std::ostringstream oss; 21 | oss << std::hex << std::setfill('0'); 22 | for (auto byte : data) { 23 | oss << std::setw(2) << static_cast(byte); 24 | } 25 | return oss.str(); 26 | } 27 | 28 | /** 29 | * Asserts that the hexadecimal representation of 'actual' matches 'expected_hex'. 30 | * Throws std::runtime_error if the assertion fails. 31 | */ 32 | template 33 | void assert_equal_hex(const T& actual, const std::string& expected_hex, const std::string& test_message = "") 34 | { 35 | std::string actual_hex = to_hex(actual); 36 | if (actual_hex != expected_hex) { 37 | std::ostringstream oss; 38 | oss << "\nAssertion failed! " << test_message << "\n" 39 | << " Expected: " << expected_hex << "\n" 40 | << " Actual : " << actual_hex << "\n"; 41 | throw std::runtime_error(oss.str()); 42 | } 43 | } 44 | 45 | /** 46 | * Creates a 64-byte leaf value where the first 4 bytes encode the integer 'i' in little-endian order. 47 | */ 48 | std::vector make_leaf_64(uint32_t i) 49 | { 50 | std::vector leaf(64, 0); 51 | leaf[0] = static_cast(i & 0xff); 52 | leaf[1] = static_cast((i >> 8) & 0xff); 53 | leaf[2] = static_cast((i >> 16) & 0xff); 54 | leaf[3] = static_cast((i >> 24) & 0xff); 55 | return leaf; 56 | } 57 | 58 | void run_tests() 59 | { 60 | 61 | // Prepare 1024 leaves, each 64 bytes, with the index encoded in the first 4 bytes. 62 | std::vector> values(1024); 63 | for (int i = 0; i < 1024; ++i) { 64 | values[i] = make_leaf_64(i); 65 | } 66 | 67 | // Test 1: Verify that an empty tree of depth 32 has the correct root. 68 | { 69 | std::cout << "Test 1: Verify that an empty tree of depth 32 has the correct root." << std::endl; 70 | MockDB db; 71 | auto tree = MerkleTree::create(db, "test", 32); 72 | 73 | sha256_hash_t actual_root = tree.get_root(); 74 | assert_equal_hex(actual_root, 75 | "1c9a7e5ff1cf48b4ad1582d3f4e4a1004f3b20d8c5a2b71387a4254ad933ebc5", 76 | "Empty tree root check at depth 32"); 77 | std::cout << "Test 1 success" << std::endl; 78 | } 79 | 80 | // Test 2: Create a tree of depth 2 and verify correct root and hash paths. 81 | { 82 | std::cout << "Test 2: Create a tree of depth 2 and verify correct root and hash paths." << std::endl; 83 | MockDB db; 84 | Sha256Hasher hasher; 85 | 86 | // Compute expected hashes for 4 leaves. 87 | sha256_hash_t e00 = hasher.hash(values[0]); 88 | sha256_hash_t e01 = hasher.hash(values[1]); 89 | sha256_hash_t e02 = hasher.hash(values[2]); 90 | sha256_hash_t e03 = hasher.hash(values[3]); 91 | 92 | sha256_hash_t e10 = hasher.compress(e00, e01); 93 | sha256_hash_t e11 = hasher.compress(e02, e03); 94 | sha256_hash_t expected_root = hasher.compress(e10, e11); 95 | 96 | std::string expected_root_hex = to_hex(expected_root); 97 | if (expected_root_hex != "e645e6b5445483a358c4d15c1923c616a0e6884906b05c196d341ece93b2de42") { 98 | throw std::runtime_error("Error in the test's basic SHA256 assumptions!"); 99 | } 100 | 101 | // Create a depth=2 tree and insert 4 leaves. 102 | auto tree = MerkleTree::create(db, "test", 2); 103 | for (int i = 0; i < 4; ++i) { 104 | tree.update_element(i, values[i]); 105 | } 106 | 107 | // Verify hash paths: 108 | // For indices 0 and 1, the hash path should be [ [e00, e01], [e10, e11] ] 109 | HashPath expected_path_0 = HashPath({ { e00, e01 }, { e10, e11 } }); 110 | if (!(tree.get_hash_path(0) == expected_path_0)) { 111 | throw std::runtime_error("Hash path for index 0 does not match expected value."); 112 | } 113 | if (!(tree.get_hash_path(1) == expected_path_0)) { 114 | throw std::runtime_error("Hash path for index 1 does not match expected value."); 115 | } 116 | 117 | // For indices 2 and 3, the hash path should be [ [e02, e03], [e10, e11] ] 118 | HashPath expected_path_2 = HashPath({ { e02, e03 }, { e10, e11 } }); 119 | if (!(tree.get_hash_path(2) == expected_path_2)) { 120 | throw std::runtime_error("Hash path for index 2 does not match expected value."); 121 | } 122 | if (!(tree.get_hash_path(3) == expected_path_2)) { 123 | throw std::runtime_error("Hash path for index 3 does not match expected value."); 124 | } 125 | 126 | // Verify the tree root. 127 | auto actual_root = tree.get_root(); 128 | assert_equal_hex(actual_root, expected_root_hex, "Depth=2 root check"); 129 | 130 | std::cout << "Test 2 success" << std::endl; 131 | } 132 | 133 | // Test 3: Verify that a tree of depth 10 can be restored from previous data. 134 | { 135 | std::cout << "Test 3: Verify that a tree of depth 10 can be restored from previous data." << std::endl; 136 | MockDB db; 137 | 138 | auto tree = MerkleTree::create(db, "test", 10); 139 | for (int i = 0; i < 128; ++i) { 140 | tree.update_element(i, values[i]); 141 | } 142 | auto tree2 = MerkleTree::create(db, "test", 10); 143 | assert_equal_hex(tree2.get_root(), 144 | "4b8404d05a963de56f7212fbf8123204b1eb77a4cb16ae3875679a898aaa5daa", 145 | "Restored depth=10 root check"); 146 | 147 | for (int i = 0; i < 128; ++i) { 148 | HashPath hp1 = tree.get_hash_path(i); 149 | HashPath hp2 = 150 | tree2.get_hash_path(i); 151 | if (!(hp1 == hp2)) { 152 | throw std::runtime_error("Mismatch in hash paths for index " + std::to_string(i)); 153 | } 154 | } 155 | 156 | std::cout << "Test 3 success" << std::endl; 157 | } 158 | 159 | // Test 4: Insert 1024 values into a depth=32 tree and verify final root and hash path for index 100. 160 | { 161 | std::cout 162 | << "Test 4: Insert 1024 values into a depth=32 tree and verify final root and hash path for index 100." 163 | << std::endl; 164 | MockDB db; 165 | 166 | auto tree = MerkleTree::create(db, "test", 32); 167 | for (int i = 0; i < 1024; ++i) { 168 | tree.update_element(i, values[i]); 169 | } 170 | 171 | // Expected final root from TypeScript test. 172 | std::string expected_hex = "26996bfcb0aaf96422aefdd789396a3f6c8a4fa6dccc73e55060e03e2a238db0"; 173 | assert_equal_hex(tree.get_root(), expected_hex, "Insert 1024 values into 32-depth tree root check"); 174 | 175 | // Expected hash path for index 100 (a long hex string from TS test). 176 | std::string expected_path_hex = 177 | "f59927591e6e3283d4419e376e4ebb4e08f4f547a3d1076474a29c9d44a07b28e703b6c67d0d1d2a7ef4bd70b8cda5840" 178 | "61db4d9e3673f79d3cafab5ecbd9b1e6478de41cd35e7937dd9ac9f1bb59aaeb71c3baec571778d8eb3e22116810bcda2" 179 | "15145f0f4ca7c22c5c149359d8597258d8f1e3630b5d74f9035a69bb19bd9bc415913d1a01741ed6e881871baa9e3b3fa" 180 | "bfa5ae6009a2ec009ea22bbced51b1077408bc95eae2d5ba2c7cdd8d5690d6fb27702d295fa801212f5d0bc6cc923b8d4" 181 | "5c43b2e08d36dbfba1e74a5297242c9c460a111c89067daf1c59a6d44a062c7882de61cfe65b71bc5fe9fe636c825cc6a" 182 | "96df0b5b4e2885f9e974e80a00fe5022a268652893af91ffa3bf545d9c852c7181547bbc6e0ce29ee85fac6a7eca17c8b" 183 | "1a90804b7f360d319e43e590be9e92c66c74ffc5579e5e6e309de98a1f9b4911d80fe9b0e3e9db5f0cc67b2ebeb29a706" 184 | "1f1c7a2c623495b1ec4e4bef82f9d5be75ae2392b70bda91a35285be08b82684cddf58f9a27c1c5dad5a9eb9f33801429" 185 | "23feebd36b5b4cf22d70b9552d82b887d00aa2da4b65442a31030e9b809d10356452b8f549fef05433e17b308a5acc5f4" 186 | "0ae0c7e106c916a03ce229ed8fab8cb151ae4353f689e40324ffbd0a76b0e9b82d6d5686bdd5510a7ed3e64aba719d04f" 187 | "9015a6f55f69930f5fbb0da3800640af69d10528e7360f76d1a58a6e0bce4cfe43cf119b8f5173b6e5cfa351f35253eec" 188 | "e828e6c1433f76b88329232f5a6988105158fc273823f7525a53821a2aed0a185f4857b12650dd7be180906dcfdf5b108" 189 | "0bb93e1a157ed947c2184dc35e097843a4094f88dd50eb4a0d5828b0055f617c272a0bf7ed0665ed9fc4789e817ab7305" 190 | "013faa7724a3cf5b2192203f55bcc35322e08aee3d77d686c4590b140f5d9047ea13380b2442e0d001a6cf04127db0544" 191 | "1cd833107a52be852868890e4317e6a02ab47683aa759642206fe8f537ae7a17d3e37896a770ffe6715a97863ce29ccc8" 192 | "a059a33c0026add13b7d05f875f140027ef5118a2247bbb84ce8f2f0f1123623085daf7960c329f5fa8ac942e18e6ce87" 193 | "a78488cda91b88d9407627093af97852b350208a21bee3d1df6af5f5bbdb6be9ef8aa618e4bf8073960867171e29676f8" 194 | "b284dea6a08a85ef446e2aa248796ac561107c12c1be9f3e6fe5e08a5d0c005f314a4d08e393118b58d900f5e182e3c50" 195 | "ef74969ea16c7726c549757cc23523c369587da72937846c7860e58be630894eb80ec8b079672089d2cd222544e39ba45" 196 | "6fc2c32716738d49a7502ffcfb0340b1d7885688500ca308161a7f96b62df9d083b71fcc8f2bbb22e774a0c40a7772a3f" 197 | "bc9fa8e5381b695652ca7af0fa783af2f08586dba0f18fe6b1689256c0d385f42f5bbe2027a22c1996e110ba97c171d3e" 198 | "5948de92beb661a23b4ea755618621a94f28e34249c24280de0e084a7336e3c870a2d7904758d0d63c39ebade8509e0ae" 199 | "3c9c3876fb5fa112be18f905ecacfecb92057603ab9935eeed1d12d309a184dc60b6fb92a0a9d0a6c3495d8478798b3c9" 200 | "b46fcf66995eec8b2e541cad4e91de38385f2e046619f54496c2382cb6cacd5b98c26f5a4adbdffbd8298a18020d86c3b" 201 | "1f750ab7b25a3e13a5b8a235a82bdbabbb498868f893e908917775b62bff23294dbbe3a1cd8e6cc1c35b4801887b646a6" 202 | "f81f17f321d317d099bfac9d91c2b818f184161d8c3b2516ae643914451fd0bb492dc5fcddba7b592e3133393c16194fa" 203 | "c7431abf2f5485ed711db282183c819e08ebaa86b650ff5ee1c07d86c6605fa93b83b4d83e59733b5d244008b841ca13c" 204 | "cb2058a8d7fe3af8caa085a7639a832001457dfb9128a8061142ad0335629ff23ff9c4b7bf8c19a29dbbed382a13fdb41" 205 | "dd8630d2c76ab6e5f971b3c47c5065387763feb3c337d7a51a6fbf00b9e34c52e1c9195c969bd4e7a0bfd51d5c5bed9c11" 206 | "67b6483422f56919ce2f54dd9a9b0162455a5a75e83ac7c1965b2a44530ed65098e71f0aa83cc32edfbefa9f4d3e0174ca" 207 | "85182eec9f3a09f6a6c0df6377a510d7f5edf6415b5d1e1b4726f61a52a115319e3284020532a55c2d2f21aa5e7b806231" 208 | "206fa80a50bb6abe29085058f16212212a60eec8f049fecb92d8c8e0a84bc02bf86d2cef052a013d83a745d32b7d111ed3" 209 | "269866091cb266897ddfe124546b21352bfecbeddde993839f614c3dac0a3ee37543f9b412b16199dc158e23b544c4dc4" 210 | "cf039332ba8521be07ced0b8b09735cebf122d1c1ee7e388e03922704fc619e312724bb6d7c3153ed9de791d764a366b3" 211 | "89af13c58bf8a8d90481a467653cb1a76abf102ae036aa9ecb12c4e36998ebc183f0efc1628c6c76b813b97ccc7cdd2986" 212 | "268250628d0c10e385c58c6191e6fbe05191bcc04f133f2cea72c1c4b0ae7a85396184a39332cfb7630df1dac5c25fed0d" 213 | "fca561e9b81c96681fdd6b848930bd7ba8cac54661072113fb278869e07bb8587f91392933374d017bcbe13c90183e8f0c" 214 | "1dc88aa7f3ee1776e8f8ca4fa5efcea07191d96c35b286c3382e8869ff2c22b28cc10510d9853292803328be4fb0e80495" 215 | "e8bb8d271f5b889636a1e5e441b5b5ce9d1fc3def4c6e474045348a0cac3e35f03e6e4324400d2b4ddb5fe28e79f1b850f" 216 | "8658246ce9b6a1e7b49fc06db7143e8fe0b4f2b0c5523a5c1d48bdddad3b8a062632e00d4fd83dc6ff8aab7bf3adc647e0" 217 | "f1cfdd43a81a65985e929f70af28d0bdd1a90a808f977f597c7c778c489e98d3bd8910d31ac0f769896f30b46bea4e13cbe" 218 | "6d1377a90416df6e1e265052ad6017c0c1f2b28b47cc6f67e02e6e4e1bdefb994c6098953f34636ba2b6ca20a4721d2b26a886" 219 | "722ff"; 220 | auto hash_path_100 = tree.get_hash_path(100); 221 | std::string actual_path_hex = to_hex(hash_path_100.to_buffer()); 222 | if (actual_path_hex != expected_path_hex) { 223 | throw std::runtime_error("HashPath for index 100 does not match expected value."); 224 | } 225 | std::cout << "Test 4 success" << std::endl; 226 | } 227 | 228 | std::cout << "All tests passed successfully!\n"; 229 | } 230 | 231 | int main() 232 | { 233 | try { 234 | run_tests(); 235 | return 0; 236 | } catch (const std::exception& ex) { 237 | std::cerr << "Test error: " << ex.what() << std::endl; 238 | return 1; 239 | } 240 | } 241 | -------------------------------------------------------------------------------- /eng-sessions/merkle-tree-cpp/src/merkle_tree.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include "hash_path.hpp" 4 | #include "mock_db.hpp" 5 | #include "sha256_hasher.hpp" 6 | #include 7 | #include 8 | #include 9 | #include 10 | 11 | /** 12 | * The MerkleTree class implements a Merkle tree—a data structure that enables efficient 13 | * proofs of membership. 14 | * 15 | * NOTE: This is a placeholder version. All helper methods and internal implementation details 16 | * have been removed. Replace the "// Implement" comments with the appropriate logic. 17 | */ 18 | class MerkleTree { 19 | private: 20 | static constexpr uint32_t MAX_DEPTH = 32; 21 | static constexpr uint32_t LEAF_BYTES = 64; 22 | public: 23 | /** 24 | * Constructs a new or existing tree. 25 | * 26 | * @param db The underlying database. 27 | * @param name The name of the tree. 28 | * @param depth The tree’s depth (with leaves at layer = depth). 29 | * @param root (Optional) The pre-existing tree root. 30 | * 31 | * Throws std::runtime_error if depth is not in [1, 32]. 32 | */ 33 | MerkleTree(MockDB& db, const std::string& name, uint32_t depth, const sha256_hash_t& root = {}) 34 | : db(db) 35 | , name(name) 36 | , depth(depth) 37 | , root(root) 38 | , hasher() 39 | { 40 | if (!(depth >= 1 && depth <= MAX_DEPTH)) { 41 | throw std::runtime_error("Bad depth"); 42 | } 43 | // Implement. 44 | } 45 | 46 | /** 47 | * Creates (or restores) a MerkleTree instance. 48 | * 49 | * @param db The underlying database. 50 | * @param name The name of the tree. 51 | * @param depth The tree’s depth (default is 32). 52 | * @return A MerkleTree instance. 53 | */ 54 | static MerkleTree create(MockDB& db, const std::string& name, uint32_t depth = MAX_DEPTH) 55 | { 56 | return MerkleTree(db, name, depth); 57 | } 58 | 59 | /** 60 | * Returns the current Merkle tree root (32 bytes). 61 | */ 62 | sha256_hash_t get_root() const 63 | { 64 | return root; 65 | } 66 | 67 | /** 68 | * Returns the hash path (Merkle proof) for a particular leaf index. 69 | * 70 | * @param index The leaf index. 71 | * @return A HashPath object. 72 | */ 73 | HashPath get_hash_path(uint64_t index) const 74 | { 75 | // Implement. 76 | return HashPath(); 77 | } 78 | 79 | /** 80 | * Updates the leaf at the given index with the specified 64-byte value. 81 | * 82 | * @param index The index of the leaf. 83 | * @param value A 64-byte vector representing the leaf data. 84 | * @return The new 32-byte tree root. 85 | * 86 | * Throws std::runtime_error if value is not exactly 64 bytes. 87 | */ 88 | sha256_hash_t update_element(uint64_t index, const std::vector& value) 89 | { 90 | // Implement. 91 | return root; 92 | } 93 | 94 | private: 95 | // Core member variables. 96 | MockDB& db; 97 | std::string name; 98 | uint32_t depth; 99 | sha256_hash_t root; 100 | Sha256Hasher hasher; 101 | }; 102 | -------------------------------------------------------------------------------- /eng-sessions/merkle-tree-cpp/src/mock_db.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | /** 10 | * A simple mock for a key-value store, mimicking the minimal interface we need 11 | * from something like LevelUp in the TypeScript code. 12 | * 13 | * In a real DB scenario, these operations would be asynchronous and possibly 14 | * require a batch process for efficiency. 15 | * 16 | * We'll add a simple 'batch_write' as a "bonus" that would let us do a 17 | * combined write operation. In this mock, it's effectively just multiple puts. 18 | */ 19 | 20 | 21 | /** 22 | * A "batch" operation is just a list of (key, value) pairs to write. 23 | */ 24 | struct MockDBBatchItem { 25 | std::string key; 26 | std::array value; 27 | }; 28 | 29 | class MockDB { 30 | public: 31 | MockDB() = default; 32 | // Store to mock a DB. 33 | std::unordered_map> store{}; 34 | // retrieve a value from the store 35 | std::optional> get(const std::string& key) const 36 | { 37 | auto it = store.find(key); 38 | if (it == store.end()) { 39 | return std::nullopt; 40 | } 41 | return it->second; 42 | } 43 | 44 | // put a value into the store 45 | void put(const std::string& key, const std::array& value) { store[key] = value; } 46 | 47 | // bonus: naive batch write (in a real DB, this might be atomic) 48 | void batch_write(const std::vector &items) { 49 | for (auto &item : items) { 50 | store[item.key] = item.value; 51 | } 52 | } 53 | }; 54 | -------------------------------------------------------------------------------- /eng-sessions/merkle-tree-cpp/src/sha256.cpp: -------------------------------------------------------------------------------- 1 | #include "sha256.hpp" 2 | #include 3 | #include 4 | #include 5 | 6 | constexpr std::array SHA256::K; 7 | 8 | SHA256::SHA256() 9 | : blocklen(0) 10 | , bitlen(0) 11 | { 12 | state[0] = 0x6a09e667; 13 | state[1] = 0xbb67ae85; 14 | state[2] = 0x3c6ef372; 15 | state[3] = 0xa54ff53a; 16 | state[4] = 0x510e527f; 17 | state[5] = 0x9b05688c; 18 | state[6] = 0x1f83d9ab; 19 | state[7] = 0x5be0cd19; 20 | } 21 | 22 | void SHA256::update(const uint8_t* new_data, size_t length) 23 | { 24 | for (size_t i = 0; i < length; i++) { 25 | data[blocklen++] = new_data[i]; 26 | if (blocklen == 64) { 27 | transform(); 28 | bitlen += 512; 29 | blocklen = 0; 30 | } 31 | } 32 | } 33 | 34 | void SHA256::update(const std::string& new_data) 35 | { 36 | update(reinterpret_cast(new_data.c_str()), new_data.size()); 37 | } 38 | 39 | std::array SHA256::digest() 40 | { 41 | std::array hash; 42 | pad(); 43 | revert(hash); 44 | return hash; 45 | } 46 | 47 | uint32_t SHA256::rotr(uint32_t x, uint32_t n) 48 | { 49 | return (x >> n) | (x << (32 - n)); 50 | } 51 | 52 | uint32_t SHA256::choose(uint32_t e, uint32_t f, uint32_t g) 53 | { 54 | return (e & f) ^ (~e & g); 55 | } 56 | 57 | uint32_t SHA256::majority(uint32_t a, uint32_t b, uint32_t c) 58 | { 59 | return (a & (b | c)) | (b & c); 60 | } 61 | 62 | uint32_t SHA256::sig0(uint32_t x) 63 | { 64 | return rotr(x, 7) ^ rotr(x, 18) ^ (x >> 3); 65 | } 66 | 67 | uint32_t SHA256::sig1(uint32_t x) 68 | { 69 | return rotr(x, 17) ^ rotr(x, 19) ^ (x >> 10); 70 | } 71 | 72 | void SHA256::transform() 73 | { 74 | uint32_t maj, xorA, ch, xorE, sum, newA, newE, m[64]; 75 | uint32_t new_state[8]; 76 | 77 | // Prepare the message schedule array 78 | for (uint8_t i = 0, j = 0; i < 16; i++, j += 4) { 79 | m[i] = (data[j] << 24) | (data[j + 1] << 16) | (data[j + 2] << 8) | (data[j + 3]); 80 | } 81 | for (uint8_t k = 16; k < 64; k++) { 82 | m[k] = sig1(m[k - 2]) + m[k - 7] + sig0(m[k - 15]) + m[k - 16]; 83 | } 84 | 85 | // Copy current hash state to working variables 86 | for (uint8_t i = 0; i < 8; i++) { 87 | new_state[i] = state[i]; 88 | } 89 | 90 | // Compression function main loop 91 | for (uint8_t i = 0; i < 64; i++) { 92 | maj = majority(new_state[0], new_state[1], new_state[2]); 93 | xorA = rotr(new_state[0], 2) ^ rotr(new_state[0], 13) ^ rotr(new_state[0], 22); 94 | ch = choose(new_state[4], new_state[5], new_state[6]); 95 | xorE = rotr(new_state[4], 6) ^ rotr(new_state[4], 11) ^ rotr(new_state[4], 25); 96 | sum = m[i] + K[i] + new_state[7] + ch + xorE; 97 | newA = xorA + maj + sum; 98 | newE = new_state[3] + sum; 99 | 100 | new_state[7] = new_state[6]; 101 | new_state[6] = new_state[5]; 102 | new_state[5] = new_state[4]; 103 | new_state[4] = newE; 104 | new_state[3] = new_state[2]; 105 | new_state[2] = new_state[1]; 106 | new_state[1] = new_state[0]; 107 | new_state[0] = newA; 108 | } 109 | 110 | // Add the compressed chunk to the current hash value 111 | for (uint8_t i = 0; i < 8; i++) { 112 | state[i] += new_state[i]; 113 | } 114 | } 115 | 116 | void SHA256::pad() 117 | { 118 | // Save the current block length (the number of bytes already in the block) 119 | uint64_t orig_blocklen = blocklen; 120 | 121 | // Append the bit '1' (i.e. 0x80) to the message. 122 | data[blocklen++] = 0x80; 123 | 124 | // If the current block length is now greater than 56 bytes, 125 | // pad with zeros, transform the block, and then reset blocklen. 126 | if (blocklen > 56) { 127 | while (blocklen < 64) { 128 | data[blocklen++] = 0x00; 129 | } 130 | transform(); 131 | blocklen = 0; 132 | } 133 | 134 | // Pad with zeros until the block is 56 bytes long. 135 | while (blocklen < 56) { 136 | data[blocklen++] = 0x00; 137 | } 138 | 139 | // Compute the total message length in bits. 140 | uint64_t total_bits = bitlen + orig_blocklen * 8; 141 | 142 | // Append the length as a 64-bit big-endian integer. 143 | for (int i = 0; i < 8; ++i) { 144 | data[63 - i] = total_bits & 0xff; 145 | total_bits >>= 8; 146 | } 147 | 148 | // Process the final block. 149 | transform(); 150 | } 151 | 152 | void SHA256::revert(std::array& hash) 153 | { 154 | // SHA uses big-endian byte ordering, so convert each 32-bit chunk. 155 | for (uint8_t i = 0; i < 4; i++) { 156 | for (uint8_t j = 0; j < 8; j++) { 157 | hash[i + (j * 4)] = (state[j] >> (24 - i * 8)) & 0x000000ff; 158 | } 159 | } 160 | } 161 | 162 | std::string SHA256::to_string(const std::array& digest) 163 | { 164 | std::stringstream s; 165 | s << std::setfill('0') << std::hex; 166 | for (uint8_t byte : digest) { 167 | s << std::setw(2) << static_cast(byte); 168 | } 169 | return s.str(); 170 | } 171 | -------------------------------------------------------------------------------- /eng-sessions/merkle-tree-cpp/src/sha256.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include 3 | #include 4 | #include 5 | 6 | class SHA256 { 7 | 8 | public: 9 | SHA256(); 10 | void update(const uint8_t* new_data, size_t length); 11 | void update(const std::string& new_data); 12 | std::array digest(); 13 | 14 | static std::string to_string(const std::array & digest); 15 | 16 | private: 17 | uint8_t data[64]; 18 | uint32_t blocklen; 19 | uint64_t bitlen; 20 | uint32_t state[8]; //A, B, C, D, E, F, G, H 21 | 22 | static constexpr std::array K = { 23 | 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5, 24 | 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5, 25 | 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3, 26 | 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174, 27 | 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc, 28 | 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da, 29 | 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7, 30 | 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967, 31 | 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13, 32 | 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85, 33 | 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3, 34 | 0xd192e819,0xd6990624,0xf40e3585,0x106aa070, 35 | 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5, 36 | 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3, 37 | 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208, 38 | 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2 39 | }; 40 | 41 | static uint32_t rotr(uint32_t x, uint32_t n); 42 | static uint32_t choose(uint32_t e, uint32_t f, uint32_t g); 43 | static uint32_t majority(uint32_t a, uint32_t b, uint32_t c); 44 | static uint32_t sig0(uint32_t x); 45 | static uint32_t sig1(uint32_t x); 46 | void transform(); 47 | void pad(); 48 | void revert(std::array & hash); 49 | }; 50 | -------------------------------------------------------------------------------- /eng-sessions/merkle-tree-cpp/src/sha256_hasher.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include "sha256.hpp" 4 | #include 5 | #include 6 | #include 7 | 8 | using sha256_hash_t = std::array; 9 | 10 | /** 11 | * Interface: 12 | * - compress(lhs, rhs): concatenates lhs and rhs (64 bytes total) and returns their 32-byte SHA256 hash. 13 | * - hash(data): returns a 32-byte SHA256 hash of arbitrary-length data. 14 | */ 15 | class Sha256Hasher { 16 | public: 17 | /** 18 | * Given two 32-byte buffers, return a 32-byte digest representing their concatenation. 19 | */ 20 | std::array compress(const sha256_hash_t& lhs, const sha256_hash_t& rhs) 21 | { 22 | // They should each be 32 bytes for a merkle node. We'll concat them and compute the SHA-256 hash. 23 | SHA256 sha; 24 | sha.update(lhs.data(), lhs.size()); 25 | sha.update(rhs.data(), rhs.size()); 26 | return sha.digest(); 27 | } 28 | 29 | /** 30 | * Given data of arbitrary length, return its 32-byte SHA256 hash. 31 | */ 32 | std::array hash(const std::vector& data) 33 | { 34 | SHA256 sha; 35 | sha.update(data.data(), data.size()); 36 | return sha.digest(); 37 | } 38 | }; 39 | -------------------------------------------------------------------------------- /eng-sessions/merkle-tree/.gitignore: -------------------------------------------------------------------------------- 1 | /data 2 | /node_modules 3 | /dest -------------------------------------------------------------------------------- /eng-sessions/merkle-tree/.prettierrc: -------------------------------------------------------------------------------- 1 | { 2 | "singleQuote": true, 3 | "trailingComma": "all", 4 | "printWidth": 120, 5 | "arrowParens": "avoid" 6 | } 7 | -------------------------------------------------------------------------------- /eng-sessions/merkle-tree/README.md: -------------------------------------------------------------------------------- 1 | # Aztec Technical Challenge 2 | 3 | The test provides you an opportunity to demonstrate the following: 4 | 5 | - Your ability to write a data structure algorithm (in this case a merkle tree). 6 | - Your ability to write clean, idiomatic TypeScript. 7 | 8 | ## Rationale 9 | 10 | A core data structure in the Aztec system is the merkle tree. It's a simple binary tree structure where the root node is represented by the hash of its two child hashes. Given any set of data in the leaves, this leads to a unique root. Furthermore, proof of existence of a piece of data can be represented by a hash path, a list of pairwise child hashes at each layer, from leaf to root. Aztec stores all of its notes in such data structures, and when proofs are generated they use hash paths to prove the data they are modifying exists. 11 | 12 | In this test you will be working on an implementation of a merkle tree. 13 | 14 | ## Merkle Tree Structure 15 | 16 | - The merkle tree is of depth `32`, and is fully formed with leaves consisting of `64` zero bytes at every index. 17 | - When inserting an element of arbitrary length, the value must first be `hash`ed to `32` bytes using sha256. 18 | - Each node of the tree is computed by `compress`ing its left and right subtree hashes and taking the resulting sha256 hash. 19 | - For reference, an unpopulated merkle tree will have a root hash of `1c9a7e5ff1cf48b4ad1582d3f4e4a1004f3b20d8c5a2b71387a4254ad933ebc5`. 20 | 21 | The merkle tree is to be persisted in a key value store. `LevelUp` provides the basic key value store interface. 22 | 23 | ## Building and Running 24 | 25 | After cloning the repo: 26 | 27 | ```bash 28 | yarn install 29 | 30 | # To run all tests. 31 | yarn test 32 | 33 | # To run tests, watching for changes. 34 | yarn test --watch 35 | ``` 36 | -------------------------------------------------------------------------------- /eng-sessions/merkle-tree/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "interview-test", 3 | "version": "1.0.0", 4 | "main": "dest/index.js", 5 | "license": "GPL-2.0", 6 | "scripts": { 7 | "test": "jest" 8 | }, 9 | "jest": { 10 | "transform": { 11 | "^.+\\.tsx?$": "ts-jest" 12 | }, 13 | "testRegex": ".*\\.test\\.ts$", 14 | "moduleFileExtensions": [ 15 | "js", 16 | "ts" 17 | ], 18 | "rootDir": "./src" 19 | }, 20 | "dependencies": { 21 | "levelup": "^4.4.0", 22 | "memdown": "^5.1.0", 23 | "typescript": "^3.5.3" 24 | }, 25 | "devDependencies": { 26 | "@types/jest": "^26.0.10", 27 | "@types/levelup": "^4.3.0", 28 | "@types/memdown": "^3.0.0", 29 | "@types/node": "^12.6.8", 30 | "jest": "^24.8.0", 31 | "prettier": "^2.0.5", 32 | "ts-jest": "^24.0.2" 33 | } 34 | } 35 | -------------------------------------------------------------------------------- /eng-sessions/merkle-tree/src/hash_path.ts: -------------------------------------------------------------------------------- 1 | export class HashPath { 2 | constructor(public data: Buffer[][] = []) {} 3 | 4 | public toBuffer() { 5 | return Buffer.concat(this.data.flat()); 6 | } 7 | 8 | /** 9 | * Creates a new `HashPath` instance from a buffer previously created from `toBuffer`. 10 | */ 11 | static fromBuffer(buf: Buffer) { 12 | return new HashPath( 13 | [...new Array(buf.length / 64)].map((_, i) => [ 14 | buf.slice(i * 2 * 32, i * 2 * 32 + 32), 15 | buf.slice(i * 2 * 32 + 32, i * 2 * 32 + 64), 16 | ]), 17 | ); 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /eng-sessions/merkle-tree/src/index.ts: -------------------------------------------------------------------------------- 1 | export * from './merkle_tree'; 2 | export * from './hash_path'; 3 | -------------------------------------------------------------------------------- /eng-sessions/merkle-tree/src/merkle_tree.test.ts: -------------------------------------------------------------------------------- 1 | import { MerkleTree, HashPath } from '.'; 2 | import { Sha256Hasher } from './sha256_hasher'; 3 | import levelup from 'levelup'; 4 | import memdown from 'memdown'; 5 | 6 | describe('merkle_tree', () => { 7 | const values: Buffer[] = []; 8 | 9 | beforeAll(async () => { 10 | for (let i = 0; i < 1024; ++i) { 11 | const v = Buffer.alloc(64, 0); 12 | v.writeUInt32LE(i, 0); 13 | values[i] = v; 14 | } 15 | }); 16 | 17 | it('should have correct empty tree root for depth 32', async () => { 18 | const db = levelup(memdown()); 19 | const tree = await MerkleTree.new(db, 'test', 32); 20 | const root = tree.getRoot(); 21 | expect(root.toString('hex')).toEqual('1c9a7e5ff1cf48b4ad1582d3f4e4a1004f3b20d8c5a2b71387a4254ad933ebc5'); 22 | }); 23 | 24 | it('should have correct root', async () => { 25 | const db = levelup(memdown()); 26 | 27 | const hasher = new Sha256Hasher(); 28 | const e00 = hasher.hash(values[0]); 29 | const e01 = hasher.hash(values[1]); 30 | const e02 = hasher.hash(values[2]); 31 | const e03 = hasher.hash(values[3]); 32 | const e10 = hasher.compress(e00, e01); 33 | const e11 = hasher.compress(e02, e03); 34 | const root = hasher.compress(e10, e11); 35 | 36 | const tree = await MerkleTree.new(db, 'test', 2); 37 | 38 | for (let i = 0; i < 4; ++i) { 39 | await tree.updateElement(i, values[i]); 40 | } 41 | 42 | let expected = new HashPath([ 43 | [e00, e01], 44 | [e10, e11], 45 | ]); 46 | 47 | expect(await tree.getHashPath(0)).toEqual(expected); 48 | expect(await tree.getHashPath(1)).toEqual(expected); 49 | 50 | expected = new HashPath([ 51 | [e02, e03], 52 | [e10, e11], 53 | ]); 54 | 55 | expect(await tree.getHashPath(2)).toEqual(expected); 56 | expect(await tree.getHashPath(3)).toEqual(expected); 57 | expect(tree.getRoot()).toEqual(root); 58 | 59 | expect(root).toEqual(Buffer.from('e645e6b5445483a358c4d15c1923c616a0e6884906b05c196d341ece93b2de42', 'hex')); 60 | }); 61 | 62 | it('should be able to restore from previous data', async () => { 63 | const levelDown = memdown(); 64 | const db = levelup(levelDown); 65 | const tree = await MerkleTree.new(db, 'test', 10); 66 | for (let i = 0; i < 128; ++i) { 67 | await tree.updateElement(i, values[i]); 68 | } 69 | 70 | const db2 = levelup(levelDown); 71 | const tree2 = await MerkleTree.new(db2, 'test'); 72 | 73 | expect(tree.getRoot().toString('hex')).toBe('4b8404d05a963de56f7212fbf8123204b1eb77a4cb16ae3875679a898aaa5daa'); 74 | expect(tree.getRoot()).toEqual(tree2.getRoot()); 75 | for (let i = 0; i < 128; ++i) { 76 | expect(await tree.getHashPath(i)).toEqual(await tree2.getHashPath(i)); 77 | } 78 | }); 79 | 80 | it('should have correct results inserting 1024 values into 32 depth tree.', async () => { 81 | const db = levelup(memdown()); 82 | const tree = await MerkleTree.new(db, 'test', 32); 83 | 84 | for (let i = 0; i < values.length; ++i) { 85 | await tree.updateElement(i, values[i]); 86 | } 87 | 88 | expect(tree.getRoot().toString('hex')).toBe('26996bfcb0aaf96422aefdd789396a3f6c8a4fa6dccc73e55060e03e2a238db0'); 89 | 90 | const hashPath = await tree.getHashPath(100); 91 | expect(hashPath.toBuffer().toString('hex')).toBe( 92 | 'f59927591e6e3283d4419e376e4ebb4e08f4f547a3d1076474a29c9d44a07b28e703b6c67d0d1d2a7ef4bd70b8cda584061db4d9e3673f79d3cafab5ecbd9b1e6478de41cd35e7937dd9ac9f1bb59aaeb71c3baec571778d8eb3e22116810bcda215145f0f4ca7c22c5c149359d8597258d8f1e3630b5d74f9035a69bb19bd9bc415913d1a01741ed6e881871baa9e3b3fabfa5ae6009a2ec009ea22bbced51b1077408bc95eae2d5ba2c7cdd8d5690d6fb27702d295fa801212f5d0bc6cc923b8d45c43b2e08d36dbfba1e74a5297242c9c460a111c89067daf1c59a6d44a062c7882de61cfe65b71bc5fe9fe636c825cc6a96df0b5b4e2885f9e974e80a00fe5022a268652893af91ffa3bf545d9c852c7181547bbc6e0ce29ee85fac6a7eca17c8b1a90804b7f360d319e43e590be9e92c66c74ffc5579e5e6e309de98a1f9b4911d80fe9b0e3e9db5f0cc67b2ebeb29a7061f1c7a2c623495b1ec4e4bef82f9d5be75ae2392b70bda91a35285be08b82684cddf58f9a27c1c5dad5a9eb9f3380142923feebd36b5b4cf22d70b9552d82b887d00aa2da4b65442a31030e9b809d10356452b8f549fef05433e17b308a5acc5f40ae0c7e106c916a03ce229ed8fab8cb151ae4353f689e40324ffbd0a76b0e9b82d6d5686bdd5510a7ed3e64aba719d04f9015a6f55f69930f5fbb0da3800640af69d10528e7360f76d1a58a6e0bce4cfe43cf119b8f5173b6e5cfa351f35253eece828e6c1433f76b88329232f5a6988105158fc273823f7525a53821a2aed0a185f4857b12650dd7be180906dcfdf5b1080bb93e1a157ed947c2184dc35e097843a4094f88dd50eb4a0d5828b0055f617c272a0bf7ed0665ed9fc4789e817ab7305013faa7724a3cf5b2192203f55bcc35322e08aee3d77d686c4590b140f5d9047ea13380b2442e0d001a6cf04127db05441cd833107a52be852868890e4317e6a02ab47683aa759642206fe8f537ae7a17d3e37896a770ffe6715a97863ce29ccc8a059a33c0026add13b7d05f875f140027ef5118a2247bbb84ce8f2f0f1123623085daf7960c329f5fa8ac942e18e6ce87a78488cda91b88d9407627093af97852b350208a21bee3d1df6af5f5bbdb6be9ef8aa618e4bf8073960867171e29676f8b284dea6a08a85ef446e2aa248796ac561107c12c1be9f3e6fe5e08a5d0c005f314a4d08e393118b58d900f5e182e3c50ef74969ea16c7726c549757cc23523c369587da72937846c7860e58be630894eb80ec8b079672089d2cd222544e39ba456fc2c32716738d49a7502ffcfb0340b1d7885688500ca308161a7f96b62df9d083b71fcc8f2bbb22e774a0c40a7772a3fbc9fa8e5381b695652ca7af0fa783af2f08586dba0f18fe6b1689256c0d385f42f5bbe2027a22c1996e110ba97c171d3e5948de92beb661a23b4ea755618621a94f28e34249c24280de0e084a7336e3c870a2d7904758d0d63c39ebade8509e0ae3c9c3876fb5fa112be18f905ecacfecb92057603ab9935eeed1d12d309a184dc60b6fb92a0a9d0a6c3495d8478798b3c9b46fcf66995eec8b2e541cad4e91de38385f2e046619f54496c2382cb6cacd5b98c26f5a4adbdffbd8298a18020d86c3b1f750ab7b25a3e13a5b8a235a82bdbabbb498868f893e908917775b62bff23294dbbe3a1cd8e6cc1c35b4801887b646a6f81f17f321d317d099bfac9d91c2b818f184161d8c3b2516ae643914451fd0bb492dc5fcddba7b592e3133393c16194fac7431abf2f5485ed711db282183c819e08ebaa86b650ff5ee1c07d86c6605fa93b83b4d83e59733b5d244008b841ca13ccb2058a8d7fe3af8caa085a7639a832001457dfb9128a8061142ad0335629ff23ff9c4b7bf8c19a29dbbed382a13fdb41dd8630d2c76ab6e5f971b3c47c5065387763feb3c337d7a51a6fbf00b9e34c52e1c9195c969bd4e7a0bfd51d5c5bed9c1167b6483422f56919ce2f54dd9a9b0162455a5a75e83ac7c1965b2a44530ed65098e71f0aa83cc32edfbefa9f4d3e0174ca85182eec9f3a09f6a6c0df6377a510d7f5edf6415b5d1e1b4726f61a52a115319e3284020532a55c2d2f21aa5e7b806231206fa80a50bb6abe29085058f16212212a60eec8f049fecb92d8c8e0a84bc02bf86d2cef052a013d83a745d32b7d111ed3269866091cb266897ddfe124546b21352bfecbeddde993839f614c3dac0a3ee37543f9b412b16199dc158e23b544c4dc4cf039332ba8521be07ced0b8b09735cebf122d1c1ee7e388e03922704fc619e312724bb6d7c3153ed9de791d764a366b389af13c58bf8a8d90481a467653cb1a76abf102ae036aa9ecb12c4e36998ebc183f0efc1628c6c76b813b97ccc7cdd2986268250628d0c10e385c58c6191e6fbe05191bcc04f133f2cea72c1c4b0ae7a85396184a39332cfb7630df1dac5c25fed0dfca561e9b81c96681fdd6b848930bd7ba8cac54661072113fb278869e07bb8587f91392933374d017bcbe13c90183e8f0c1dc88aa7f3ee1776e8f8ca4fa5efcea07191d96c35b286c3382e8869ff2c22b28cc10510d9853292803328be4fb0e80495e8bb8d271f5b889636a1e5e441b5b5ce9d1fc3def4c6e474045348a0cac3e35f03e6e4324400d2b4ddb5fe28e79f1b850f8658246ce9b6a1e7b49fc06db7143e8fe0b4f2b0c5523a5c1d48bdddad3b8a062632e00d4fd83dc6ff8aab7bf3adc647e0f1cfdd43a81a65985e929f70af28d0bdd1a90a808f977f597c7c778c489e98d3bd8910d31ac0f769896f30b46bea4e13cbe6d1377a90416df6e1e265052ad6017c0c1f2b28b47cc6f67e02e6e4e1bdefb994c6098953f34636ba2b6ca20a4721d2b26a886722ff', 93 | ); 94 | }); 95 | }); 96 | -------------------------------------------------------------------------------- /eng-sessions/merkle-tree/src/merkle_tree.ts: -------------------------------------------------------------------------------- 1 | import { LevelUp, LevelUpChain } from 'levelup'; 2 | import { HashPath } from './hash_path'; 3 | import { Sha256Hasher } from './sha256_hasher'; 4 | 5 | const MAX_DEPTH = 32; 6 | const LEAF_BYTES = 64; // All leaf values are 64 bytes. 7 | 8 | /** 9 | * The merkle tree, in summary, is a data structure with a number of indexable elements, and the property 10 | * that it is possible to provide a succinct proof (HashPath) that a given piece of data, exists at a certain index, 11 | * for a given merkle tree root. 12 | */ 13 | export class MerkleTree { 14 | private hasher = new Sha256Hasher(); 15 | private root = Buffer.alloc(32); 16 | 17 | /** 18 | * Constructs a new MerkleTree instance, either initializing an empty tree, or restoring pre-existing state values. 19 | * Use the async static `new` function to construct. 20 | * 21 | * @param db Underlying leveldb. 22 | * @param name Name of the tree, to be used when restoring/persisting state. 23 | * @param depth The depth of the tree, to be no greater than MAX_DEPTH. 24 | * @param root When restoring, you need to provide the root. 25 | */ 26 | constructor(private db: LevelUp, private name: string, private depth: number, root?: Buffer) { 27 | if (!(depth >= 1 && depth <= MAX_DEPTH)) { 28 | throw Error('Bad depth'); 29 | } 30 | 31 | // Implement. 32 | } 33 | 34 | /** 35 | * Constructs or restores a new MerkleTree instance with the given `name` and `depth`. 36 | * The `db` contains the tree data. 37 | */ 38 | static async new(db: LevelUp, name: string, depth = MAX_DEPTH) { 39 | const meta: Buffer = await db.get(Buffer.from(name)).catch(() => {}); 40 | if (meta) { 41 | const root = meta.slice(0, 32); 42 | const depth = meta.readUInt32LE(32); 43 | return new MerkleTree(db, name, depth, root); 44 | } else { 45 | const tree = new MerkleTree(db, name, depth); 46 | await tree.writeMetaData(); 47 | return tree; 48 | } 49 | } 50 | 51 | private async writeMetaData(batch?: LevelUpChain) { 52 | const data = Buffer.alloc(40); 53 | this.root.copy(data); 54 | data.writeUInt32LE(this.depth, 32); 55 | if (batch) { 56 | batch.put(this.name, data); 57 | } else { 58 | await this.db.put(this.name, data); 59 | } 60 | } 61 | 62 | getRoot() { 63 | return this.root; 64 | } 65 | 66 | /** 67 | * Returns the hash path for `index`. 68 | * e.g. To return the HashPath for index 2, return the nodes marked `*` at each layer. 69 | * d0: [ root ] 70 | * d1: [*] [*] 71 | * d2: [*] [*] [ ] [ ] 72 | * d3: [ ] [ ] [*] [*] [ ] [ ] [ ] [ ] 73 | */ 74 | async getHashPath(index: number) { 75 | // Implement. 76 | return new HashPath(); 77 | } 78 | 79 | /** 80 | * Updates the tree with `value` at `index`. Returns the new tree root. 81 | */ 82 | async updateElement(index: number, value: Buffer) { 83 | // Implement. 84 | return this.root; 85 | } 86 | } 87 | -------------------------------------------------------------------------------- /eng-sessions/merkle-tree/src/sha256_hasher.ts: -------------------------------------------------------------------------------- 1 | import { createHash } from 'crypto'; 2 | 3 | /** 4 | * Implements a Hasher using the sha256 algorithm. 5 | */ 6 | export class Sha256Hasher { 7 | /** 8 | * Given two roots, the left hand subtree root, and the right hand subtree root, return a digest representing 9 | * the new tree root. 10 | */ 11 | compress(lhs: Buffer, rhs: Buffer): Buffer { 12 | return createHash('sha256') 13 | .update(Buffer.concat([lhs, rhs])) 14 | .digest(); 15 | } 16 | 17 | /** 18 | * Given `data` which is to be become an entry in the tree, return a digest that represents that data. 19 | */ 20 | hash(data: Buffer): Buffer { 21 | return createHash('sha256').update(data).digest(); 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /eng-sessions/merkle-tree/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "target": "esnext", 4 | "module": "commonjs", 5 | "moduleResolution": "node", 6 | "lib": ["esnext"], 7 | "noEmit": true, 8 | "strict": true, 9 | "noImplicitAny": true, 10 | "noImplicitThis": false, 11 | "esModuleInterop": true, 12 | "declaration": true, 13 | "emitDecoratorMetadata": true, 14 | "experimentalDecorators": true, 15 | "inlineSourceMap": true, 16 | "declarationMap": true, 17 | "types": ["node", "jest"] 18 | }, 19 | "include": ["src"] 20 | } 21 | -------------------------------------------------------------------------------- /senior-applied-cryptography-engineer/README.md: -------------------------------------------------------------------------------- 1 | # Applied Cryptographer Test 2 | 3 | **WARNING: Do not fork this repository or make a public repository containing your solution. Either copy it to a private repository or submit your solution via other means.** 4 | 5 | Links to solutions may be sent to travis@aztecprotocol.com. 6 | 7 | --- 8 | This is a test for our prospective applied cryptography engineers. 9 | 10 | These questions are a drawn from real topics and problems we've encountered at Aztec. They are representative of the kind of work you will be involved in as part of our team. 11 | 12 | Your core proficiencies may not align with what is needed to answer all questions. We don't expect candidates to provide complete answers to every question (this is not a trick, we genuinely don't) Please answer as many as you can. 13 | 14 | --- 15 | #### Question 1 16 | 17 | **The Plonk permutation argument is described in section 5 of the [paper](https://eprint.iacr.org/2019/953.pdf).** 18 | 19 | a) Read the first protocol described in Section 5 (page 20). Suppose we fixed in the protocol instead of choosing it randomly (and kept uniformly chosen as in the protocol). Suppose we fix . Give an example of a permutation , and a pair of polynomials f,g not satisfying , but still causing the verifier to accept with high probability. Explain why the example works. 20 | 21 | b) Same question when fixing instead and uniform . 22 | 23 | 24 | 25 | --- 26 | #### Question 2 27 | 28 | In [our implementation](https://github.com/AztecProtocol/barretenberg/tree/master/cpp/src/aztec/ecc/curves/bn254/scalar_multiplication) of [Pippenger's multi-exponentiation algorithm](https://jbootle.github.io/Misc/pippenger.pdf) for BN254 elliptic curve points we add points in pairs in affine form in the function *add_affine_points*. To our knowledge not many other implementations of Pippenger accumulate bucket points using this type of pair-wise addition (instead they iterate over the bucket points and add them successively into an accumulator point). **Why do we do the above instead?** 29 | 30 | --- 31 | #### Question 3 32 | 33 | **a) In a language of your choice (incl. pseudocode), implement a multithreaded radix sort. Inputs are 16-bit integers. Set size is ~2^24** 34 | 35 | --- 36 | #### Question 4 37 | 38 | Using https://github.com/AztecProtocol/barretenberg/blob/master/cpp/src/aztec/ecc/curves/bn254/fr.test.cpp as reference write a test where Alice convinces Bob of the value of the inner product of randomly sampled fr vectors of size n=2^d using the sumcheck protocol. Bob also has access to the sampled vectors. References for sumcheck:[1](https://people.cs.georgetown.edu/jthaler/sumcheck.pdf),[2](https://people.cs.georgetown.edu/jthaler/ProofsArgsAndZK.html). 39 | 40 | --- 41 | #### (harder/bonus) Question 5 42 | The following C++ code implements a [Montgomery modular multiplication](https://en.wikipedia.org/wiki/Montgomery_modular_multiplication) over a 254-bit prime field for x86/64 CPUs with the [BMI2](https://en.wikipedia.org/wiki/Bit_manipulation_instruction_set) instruction set. Field elements are stored in 4 64-bit 'limbs'. 43 | 44 | Operation is Montgomery-form equivalent of , where `q` is defined by `T::modulus`. 45 | 46 | Both inputs and outputs are allowed to be in an 'unreduced' form, where a factor of `q` may be added into the variable. (i.e. `a, b, c` can be 255-bit integers). 47 | 48 | Registers`%rax, %rbx, %rcx` contain references to `a, b, c` respectively. 49 | 50 | **a) Spot the error in this code. Why is it incorrect and what is the fix?** 51 | **b) What could be the impact of this error? How would you find erroneous inputs?** 52 | 53 | 54 | ``` 55 | struct alignas(32) field { 56 | uint64_t data[4]; 57 | }; 58 | 59 | /** 60 | * Compute Montgomery multiplication of a, b. 61 | * Result is stored, in "r" 62 | **/ 63 | template inline void asm_mul(const field& a, const field& b, field& r) noexcept 64 | { 65 | constexpr uint64_t r_inv = T::r_inv; // r_inv = (-1 / q) mod 2^256 66 | constexpr uint64_t modulus_0 = T::modulus_0; 67 | constexpr uint64_t modulus_1 = T::modulus_1; 68 | constexpr uint64_t modulus_2 = T::modulus_2; 69 | constexpr uint64_t modulus_3 = T::modulus_3; 70 | constexpr uint64_t zero_ref = 0; 71 | __asm__( 72 | "movq 0(%%rax), %%rdx \n\t" /* load a[0] into %rdx */ \ 73 | "xorq %%r8, %%r8 \n\t" /* clear r10 register, we use this when we need 0 */ \ 74 | /* front-load mul ops, can parallelize 4 of these but latency is 4 cycles */ \ 75 | "mulxq 0(%%rbx), %%r13, %%r14 \n\t" /* (r[0], r[1]) <- a[0] * b[0] */ \ 76 | "mulxq 8(%%rbx), %%r8, %%r9 \n\t" /* (t[0], t[1]) <- a[0] * b[1] */ \ 77 | "mulxq 16(%%rbx), %%r15, %%r10 \n\t" /* (r[2] , r[3]) <- a[0] * b[2] */ \ 78 | "mulxq 24(%%rbx), %%rdi, %%r12 \n\t" /* (t[2], r[4]) <- a[0] * b[3] (overwrite a[0]) */ \ 79 | /* zero flags */ \ 80 | \ 81 | /* start computing modular reduction */ \ 82 | "movq %%r13, %%rdx \n\t" /* move r[0] into %rdx */ \ 83 | "mulxq %[r_inv], %%rdx, %%r11 \n\t" /* (%rdx, _) <- k = r[1] * r_inv */ \ 84 | \ 85 | /* start first addition chain */ \ 86 | "adcxq %%r8, %%r14 \n\t" /* r[1] += t[0] */ \ 87 | "adoxq %%rdi, %%r10 \n\t" /* r[3] += t[2] + flag_o */ \ 88 | "adcxq %%r9, %%r15 \n\t" /* r[2] += t[1] + flag_c */ \ 89 | \ 90 | /* reduce by r[0] * k */ \ 91 | "mulxq %[modulus_3], %%rdi, %%r11 \n\t" /* (t[2], t[3]) <- (modulus.data[3] * k) */ \ 92 | "mulxq %[modulus_0], %%r8, %%r9 \n\t" /* (t[0], t[1]) <- (modulus.data[0] * k) */ \ 93 | "adcxq %%rdi, %%r10 \n\t" /* r[3] += t[2] + flag_c */ \ 94 | "adoxq %%r11, %%r12 \n\t" /* r[4] += t[3] + flag_c */ \ 95 | "adcxq %[zero_reference], %%r12 \n\t" /* r[4] += flag_i */ \ 96 | "adoxq %%r8, %%r13 \n\t" /* r[0] += t[0] (%r13 now free) */ \ 97 | "adcxq %%r9, %%r14 \n\t" /* r[1] += t[1] + flag_o */ \ 98 | "mulxq %[modulus_1], %%rdi, %%r11 \n\t" /* (t[0], t[1]) <- (modulus.data[1] * k) */ \ 99 | "mulxq %[modulus_2], %%r8, %%r9 \n\t" /* (t[0], t[1]) <- (modulus.data[2] * k) */ \ 100 | "adoxq %%rdi, %%r14 \n\t" /* r[1] += t[0] */ \ 101 | "adcxq %%r11, %%r15 \n\t" /* r[2] += t[1] + flag_c */ \ 102 | "adoxq %%r8, %%r15 \n\t" /* r[2] += t[0] + flag_o */ \ 103 | "adcxq %%r9, %%r10 \n\t" /* r[3] += t[1] + flag_o */ \ 104 | \ 105 | /* modulus = 254 bits, so max(t[3]) = 62 bits */ \ 106 | /* b also 254 bits, so (a[0] * b[3]) = 62 bits */ \ 107 | /* i.e. carry flag here is always 0 if b is in mont form, no need to update r[5] */ \ 108 | /* (which is very convenient because we're out of registers!) */ \ 109 | /* N.B. the value of r[4] now has a max of 63 bits and can accept another 62 bit value before overflowing */ \ 110 | \ 111 | /* a[1] * b */ \ 112 | "movq 8(%%rax), %%rdx \n\t" /* load a[1] into %rdx */ \ 113 | "mulxq 16(%%rbx), %%r8, %%r9 \n\t" /* (t[2], t[3]) <- (a[1] * b[2]) */ \ 114 | "mulxq 24(%%rbx), %%rdi, %%r13 \n\t" /* (t[6], r[5]) <- (a[1] * b[3]) */ \ 115 | "adoxq %%r8, %%r10 \n\t" /* r[3] += t[0] + flag_c */ \ 116 | "adcxq %%rdi, %%r12 \n\t" /* r[4] += t[2] + flag_o */ \ 117 | "adoxq %%r9, %%r12 \n\t" /* r[4] += t[1] + flag_c */ \ 118 | "adcxq %[zero_reference], %%r13 \n\t" /* r[5] += flag_o */ \ 119 | "adoxq %[zero_reference], %%r13 \n\t" /* r[5] += flag_c */ \ 120 | "mulxq 0(%%rbx), %%r8, %%r9 \n\t" /* (t[0], t[1]) <- (a[1] * b[0]) */ \ 121 | "mulxq 8(%%rbx), %%rdi, %%r11 \n\t" /* (t[4], t[5]) <- (a[1] * b[1]) */ \ 122 | "adcxq %%r8, %%r14 \n\t" /* r[1] += t[0] + flag_c */ \ 123 | "adoxq %%r9, %%r15 \n\t" /* r[2] += t[1] + flag_o */ \ 124 | "adcxq %%rdi, %%r15 \n\t" /* r[2] += t[0] + flag_c */ \ 125 | "adoxq %%r11, %%r10 \n\t" /* r[3] += t[1] + flag_o */ \ 126 | \ 127 | /* reduce by r[1] * k */ \ 128 | "movq %%r14, %%rdx \n\t" /* move r[1] into %rdx */ \ 129 | "mulxq %[r_inv], %%rdx, %%r8 \n\t" /* (%rdx, _) <- k = r[1] * r_inv */ \ 130 | "mulxq %[modulus_2], %%r8, %%r9 \n\t" /* (t[0], t[1]) <- (modulus.data[2] * k) */ \ 131 | "mulxq %[modulus_3], %%rdi, %%r11 \n\t" /* (t[2], t[3]) <- (modulus.data[3] * k) */ \ 132 | "adcxq %%r8, %%r10 \n\t" /* r[3] += t[0] + flag_o */ \ 133 | "adoxq %%r9, %%r12 \n\t" /* r[4] += t[2] + flag_c */ \ 134 | "adcxq %%rdi, %%r12 \n\t" /* r[4] += t[1] + flag_o */ \ 135 | "adoxq %%r11, %%r13 \n\t" /* r[5] += t[3] + flag_c */ \ 136 | "adcxq %[zero_reference], %%r13 \n\t" /* r[5] += flag_o */ \ 137 | "mulxq %[modulus_0], %%r8, %%r9 \n\t" /* (t[0], t[1]) <- (modulus.data[0] * k) */ \ 138 | "mulxq %[modulus_1], %%rdi, %%r11 \n\t" /* (t[0], t[1]) <- (modulus.data[1] * k) */ \ 139 | "adoxq %%r8, %%r14 \n\t" /* r[1] += t[0] (%r14 now free) */ \ 140 | "adcxq %%rdi, %%r15 \n\t" /* r[2] += t[0] + flag_c */ \ 141 | "adoxq %%r9, %%r15 \n\t" /* r[2] += t[1] + flag_o */ \ 142 | "adcxq %%r11, %%r10 \n\t" /* r[3] += t[1] + flag_c */ \ 143 | \ 144 | /* a[2] * b */ \ 145 | "movq 16(%%rax), %%rdx \n\t" /* load a[2] into %rdx */ \ 146 | "mulxq 8(%%rbx), %%rdi, %%r11 \n\t" /* (t[0], t[1]) <- (a[2] * b[1]) */ \ 147 | "mulxq 16(%%rbx), %%r8, %%r9 \n\t" /* (t[0], t[1]) <- (a[2] * b[2]) */ \ 148 | "adoxq %%rdi, %%r10 \n\t" /* r[3] += t[0] + flag_c */ \ 149 | "adcxq %%r11, %%r12 \n\t" /* r[4] += t[1] + flag_o */ \ 150 | "adoxq %%r8, %%r12 \n\t" /* r[4] += t[0] + flag_c */ \ 151 | "adcxq %%r9, %%r13 \n\t" /* r[5] += t[2] + flag_o */ \ 152 | "mulxq 24(%%rbx), %%rdi, %%r14 \n\t" /* (t[2], r[6]) <- (a[2] * b[3]) */ \ 153 | "mulxq 0(%%rbx), %%r8, %%r9 \n\t" /* (t[0], t[1]) <- (a[2] * b[0]) */ \ 154 | "adoxq %%rdi, %%r13 \n\t" /* r[5] += t[1] + flag_c */ \ 155 | "adcxq %[zero_reference], %%r14 \n\t" /* r[6] += flag_o */ \ 156 | "adoxq %[zero_reference], %%r14 \n\t" /* r[6] += flag_c */ \ 157 | "adcxq %%r8, %%r15 \n\t" /* r[2] += t[0] + flag_c */ \ 158 | "adoxq %%r9, %%r10 \n\t" /* r[3] += t[1] + flag_o */ \ 159 | \ 160 | /* reduce by r[2] * k */ \ 161 | "movq %%r15, %%rdx \n\t" /* move r[2] into %rdx */ \ 162 | "mulxq %[r_inv], %%rdx, %%r8 \n\t" /* (%rdx, _) <- k = r[1] * r_inv */ \ 163 | "mulxq %[modulus_1], %%rdi, %%r11 \n\t" /* (t[0], t[1]) <- (modulus.data[1] * k) */ \ 164 | "mulxq %[modulus_2], %%r8, %%r9 \n\t" /* (t[0], t[1]) <- (modulus.data[2] * k) */ \ 165 | "adcxq %%rdi, %%r10 \n\t" /* r[3] += t[1] + flag_o */ \ 166 | "adoxq %%r11, %%r12 \n\t" /* r[4] += t[1] + flag_c */ \ 167 | "adcxq %%r8, %%r12 \n\t" /* r[4] += t[0] + flag_o */ \ 168 | "adoxq %%r9, %%r13 \n\t" /* r[5] += t[2] + flag_c */ \ 169 | "mulxq %[modulus_3], %%rdi, %%r11 \n\t" /* (t[2], t[3]) <- (modulus.data[3] * k) */ \ 170 | "mulxq %[modulus_0], %%r8, %%r9 \n\t" /* (t[0], t[1]) <- (modulus.data[0] * k) */ \ 171 | "adcxq %%rdi, %%r13 \n\t" /* r[5] += t[1] + flag_o */ \ 172 | "adoxq %%r11, %%r14 \n\t" /* r[6] += t[3] + flag_c */ \ 173 | "adcxq %[zero_reference], %%r14 \n\t" /* r[6] += flag_o */ \ 174 | "adoxq %%r8, %%r15 \n\t" /* r[2] += t[0] (%r15 now free) */ \ 175 | "adcxq %%r9, %%r10 \n\t" /* r[3] += t[0] + flag_c */ \ 176 | \ 177 | /* a[3] * b */ \ 178 | "movq 24(%%rax), %%rdx \n\t" /* load a[3] into %rdx */ \ 179 | "mulxq 0(%%rbx), %%r8, %%r9 \n\t" /* (t[0], t[1]) <- (a[3] * b[0]) */ \ 180 | "mulxq 8(%%rbx), %%rdi, %%r11 \n\t" /* (t[4], t[5]) <- (a[3] * b[1]) */ \ 181 | "adoxq %%r8, %%r10 \n\t" /* r[3] += t[0] + flag_c */ \ 182 | "adcxq %%r9, %%r12 \n\t" /* r[4] += t[2] + flag_o */ \ 183 | "adoxq %%rdi, %%r12 \n\t" /* r[4] += t[1] + flag_c */ \ 184 | "adcxq %%r11, %%r13 \n\t" /* r[5] += t[3] + flag_o */ \ 185 | \ 186 | "mulxq 16(%%rbx), %%r8, %%r9 \n\t" /* (t[2], t[3]) <- (a[3] * b[2]) */ \ 187 | "mulxq 24(%%rbx), %%rdi, %%r15 \n\t" /* (t[6], r[7]) <- (a[3] * b[3]) */ \ 188 | "adoxq %%r8, %%r13 \n\t" /* r[5] += t[4] + flag_c */ \ 189 | "adcxq %%r9, %%r14 \n\t" /* r[6] += t[6] + flag_o */ \ 190 | "adoxq %%rdi, %%r14 \n\t" /* r[6] += t[5] + flag_c */ \ 191 | \ 192 | /* reduce by r[3] * k */ \ 193 | "movq %%r10, %%rdx \n\t" /* move r_inv into %rdx */ \ 194 | "mulxq %[r_inv], %%rdx, %%r8 \n\t" /* (%rdx, _) <- k = r[1] * r_inv */ \ 195 | "mulxq %[modulus_0], %%r8, %%r9 \n\t" /* (t[0], t[1]) <- (modulus.data[0] * k) */ \ 196 | "mulxq %[modulus_1], %%rdi, %%r11 \n\t" /* (t[2], t[3]) <- (modulus.data[1] * k) */ \ 197 | "adoxq %%r8, %%r10 \n\t" /* r[3] += t[0] (%rsi now free) */ \ 198 | "adcxq %%r9, %%r12 \n\t" /* r[4] += t[2] + flag_c */ \ 199 | "adoxq %%rdi, %%r12 \n\t" /* r[4] += t[1] + flag_o */ \ 200 | "adcxq %%r11, %%r13 \n\t" /* r[5] += t[3] + flag_c */ \ 201 | \ 202 | "mulxq %[modulus_2], %%r8, %%r9 \n\t" /* (t[4], t[5]) <- (modulus.data[2] * k) */ \ 203 | "mulxq %[modulus_3], %%rdi, %%rdx \n\t" /* (t[6], t[7]) <- (modulus.data[3] * k) */ \ 204 | "adoxq %%r8, %%r13 \n\t" /* r[5] += t[4] + flag_o */ \ 205 | "adcxq %%r9, %%r14 \n\t" /* r[6] += t[6] + flag_c */ \ 206 | "adoxq %%rdi, %%r14 \n\t" /* r[6] += t[5] + flag_o */ \ 207 | "adcxq %%rdx, %%r15 \n\t" /* r[7] += t[7] + flag_c */ \ 208 | "movq %%r12, 0(%%rcx) \n\t" \ 209 | "movq %%r13, 8(%%rcx) \n\t" \ 210 | "movq %%r14, 16(%%rcx) \n\t" \ 211 | "movq %%r15, 24(%%rcx) \n\t" 212 | : 213 | : "a"(&a), 214 | "b"(&b), 215 | "c"(&r), 216 | [ modulus_0 ] "m"(modulus_0), 217 | [ modulus_1 ] "m"(modulus_1), 218 | [ modulus_2 ] "m"(modulus_2), 219 | [ modulus_3 ] "m"(modulus_3), 220 | [ r_inv ] "m"(r_inv), 221 | [ zero_reference ] "m"(zero_ref) 222 | : "%rdx", "%rdi", "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15", "cc", "memory"); 223 | } 224 | ``` 225 | -------------------------------------------------------------------------------- /senior-software-engineer: -------------------------------------------------------------------------------- 1 | eng-sessions/merkle-tree -------------------------------------------------------------------------------- /solidity/.gitattributes: -------------------------------------------------------------------------------- 1 | *.sol linguist-language=Solidity -------------------------------------------------------------------------------- /solidity/.gitignore: -------------------------------------------------------------------------------- 1 | node_modules 2 | cache 3 | artifacts 4 | docs 5 | -------------------------------------------------------------------------------- /solidity/.solhint.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": "solhint:recommended" 3 | } 4 | -------------------------------------------------------------------------------- /solidity/README.md: -------------------------------------------------------------------------------- 1 | # Aztec Technical Challenge 2 | 3 | **WARNING: Do not fork this repository or make a public repository containing your solution. Either copy it to a private repository or submit your solution via other means.** 4 | 5 | Links to solutions may be sent to charlie@aztecprotocol.com. 6 | 7 | ## The task 8 | 9 | This project contains an example bridge and unit test that will do a synchronous swap of assets on Uniswap V2. There are 3 options you can choose of what's considered to be increasing difficulty. 10 | 11 | ### Option 1 12 | 13 | Improve the existing Uniswap bridge to: 14 | 15 | - Support token to token swaps. 16 | - Use aux data to specify a maximum price slippage. 17 | - Optionally improve gas usage. 18 | 19 | ### Option 2 20 | 21 | Write a new DeFi bridge contract that interacts with your preferred L1 protocol. It maybe desirable to leverage features such as virtual assets and asychronous completion. Example ideas would be to support exchanges with liquidity pools and limit orders, or lending and borrowing protocols and liquidity mining. 22 | 23 | ### Option 3 24 | 25 | Make the given Uniswap bridge contract net away trade balances when it has both sides of a market e.g `ETH/DAI` & `DAI/ETH`, only trading the delta via Uniswap. 26 | 27 | - Assume two different bridge ids pointing to the same bridge address. 28 | - One call to convert would take input ETH output DAI. 29 | - Second call to convert would take input DAI output ETH. 30 | - You can assume a fixed order to the two calls. 31 | - Use the asychronous flow. 32 | 33 | ## What is a bridge? 34 | 35 | A bridge is a layer 1 solidity contract that conforms a DeFi protocol to the interface the Aztec rollup expects. This allows the Aztec rollup contract to interact with the DeFi protocol via the bridge. 36 | 37 | A bridge contract models any layer 1 DeFi protocol as a synchronous or asynchronous asset swap. You can specify up to two input assets and two output assets per bridge. 38 | 39 | ## How does this work? 40 | 41 | Users who have shielded funds on Aztec can construct a zero-knowledge proof instructing the Aztec rollup contract to make an external L1 contract call. 42 | 43 | Aztec Connect works by batching L2 transactions by bridge id together in a rollup, and batch executing them against L1 bridge contracts. The results are later disseminated to the users privately by their relevant input ratios. 44 | 45 | ## Batching bridge interactions. 46 | 47 | Rollup providers are incentivised to batch any transaction with the same bridge id. This reduces the cost of the L1 transaction for similar trades. A bridge id is structured as: 48 | 49 | ``` 50 | BridgeId (248 bits) 51 | (auxData || bitConfig || outputAssetB || outputAssetA || inputAssetB || inputAssetA || bridgeAddressId) 52 | 64 32 30 30 30 30 32 53 | 54 | BitConfig (32 bits) 55 | (unused || firstAssetVirtual || secondAssetValid || secondAssetVirtual) 56 | 29 1 1 1 57 | ``` 58 | 59 | For this test, the bridge id is assumed to have already been parsed out into a more manageable form. 60 | 61 | ### Virtual Assets 62 | 63 | Aztec uses the concept of virtual assets or "position" tokens to represent a share of assets held by a bridge contract. This is far more gas efficient than minting ERC20 tokens. These are used when the bridge holds an asset that Aztec doesn't support, i.e. Uniswap Position NFT's or other non-fungible assets. 64 | 65 | If the output asset of any interaction is specified as virtual, the user will receive encrypted notes on Aztec representing their share of the position, but no tokens or ETH need to be transferred. The position tokens have an `assetId` that is the `interactionNonce` of the DeFi Bridge call. This is globably unique. Virtual assets can be used to construct complex flows, such as entering or exiting LP positions. i.e. One bridge contract can have multiple flows which are triggered using different input assets. 66 | 67 | ### Auxillary Data 68 | 69 | This is 64 bits of bridge specific data that can be passed through Aztec to the bridge contract. It is defined by the bridge contract and is opaque to Aztec. 70 | 71 | ## Bridge Contract Interface 72 | 73 | The bridge contract interface can be seen [here](./contracts/interfaces/IDefiBridge.sol), and has further information on the responsiblities of each function. 74 | 75 | ## Anatomy of a rollup transaction with defi bridge interactions. 76 | 77 | It may help to have some context around how the rollup processor contract processes each defi bridge. 78 | 79 | For each bridge id: 80 | 81 | 1. Parse the bridge id into constituent parts. 82 | 2. Transfer any tokens or ETH to the bridge address. 83 | 3. Call `convert` for each bridge id as described in the interface. 84 | 4. If `isAsync == false` check the bridge sent the DefiBridgeProxy the output assets if ETH or ERC20. 85 | 86 | Finally: 87 | 88 | 1. For any calls to `convert` that return `isAsync == true`, check if the interaction can be finalised by calling `canFinalise`. 89 | 2. For any calls to `canFinalise` that return `true`, finalise the interaction by calling `finalise`. 90 | 3. Check the bridge sent the DefiBridgeProxy the output assets if ETH or ERC20. 91 | -------------------------------------------------------------------------------- /solidity/contracts/DefiBridgeProxy.sol: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: GPL-2.0-only 2 | // Copyright 2020 Spilsbury Holdings Ltd 3 | pragma solidity >=0.6.10 <0.8.0; 4 | pragma experimental ABIEncoderV2; 5 | 6 | import {SafeMath} from '@openzeppelin/contracts/math/SafeMath.sol'; 7 | import {IDefiBridge} from './interfaces/IDefiBridge.sol'; 8 | import {Types} from './Types.sol'; 9 | 10 | // import 'hardhat/console.sol'; 11 | 12 | contract DefiBridgeProxy { 13 | using SafeMath for uint256; 14 | 15 | bytes4 private constant BALANCE_OF_SELECTOR = 0x70a08231; // bytes4(keccak256('balanceOf(address)')); 16 | bytes4 private constant TRANSFER_SELECTOR = 0xa9059cbb; // bytes4(keccak256('transfer(address,uint256)')); 17 | bytes4 private constant DEPOSIT_SELECTOR = 0xb6b55f25; // bytes4(keccak256('deposit(uint256)')); 18 | bytes4 private constant WITHDRAW_SELECTOR = 0x2e1a7d4d; // bytes4(keccak256('withdraw(uint256)')); 19 | 20 | event AztecBridgeInteraction( 21 | address indexed bridgeAddress, 22 | uint256 outputValueA, 23 | uint256 outputValueB, 24 | bool isAsync 25 | ); 26 | 27 | receive() external payable {} 28 | 29 | function getBalance(address assetAddress) internal view returns (uint256 result) { 30 | assembly { 31 | if iszero(assetAddress) { 32 | // This is ETH. 33 | result := balance(address()) 34 | } 35 | if assetAddress { 36 | // Is this a token. 37 | let ptr := mload(0x40) 38 | mstore(ptr, BALANCE_OF_SELECTOR) 39 | mstore(add(ptr, 0x4), address()) 40 | if iszero(staticcall(gas(), assetAddress, ptr, 0x24, ptr, 0x20)) { 41 | // Call failed. 42 | revert(0x00, 0x00) 43 | } 44 | result := mload(ptr) 45 | } 46 | } 47 | } 48 | 49 | function transferTokens( 50 | address assetAddress, 51 | address to, 52 | uint256 amount 53 | ) internal { 54 | assembly { 55 | let ptr := mload(0x40) 56 | mstore(ptr, TRANSFER_SELECTOR) 57 | mstore(add(ptr, 0x4), to) 58 | mstore(add(ptr, 0x24), amount) 59 | // is this correct or should we forward the correct amount 60 | if iszero(call(gas(), assetAddress, 0, ptr, 0x44, ptr, 0)) { 61 | // Call failed. 62 | revert(0x00, 0x00) 63 | } 64 | } 65 | } 66 | 67 | function convert( 68 | address bridgeAddress, 69 | Types.AztecAsset[4] calldata assets, 70 | uint256 auxData, 71 | uint256 interactionNonce, 72 | uint256 inputValue 73 | ) 74 | external 75 | returns ( 76 | uint256 outputValueA, 77 | uint256 outputValueB, 78 | bool isAsync 79 | ) 80 | { 81 | if (assets[0].assetType == Types.AztecAssetType.ERC20) { 82 | // Transfer totalInputValue to the bridge contract if erc20. ETH is sent on call to convert. 83 | transferTokens(assets[0].erc20Address, bridgeAddress, inputValue); 84 | } 85 | 86 | if (assets[1].assetType == Types.AztecAssetType.ERC20) { 87 | // Transfer totalInputValue to the bridge contract if erc20. ETH is sent on call to convert. 88 | transferTokens(assets[1].erc20Address, bridgeAddress, inputValue); 89 | } 90 | 91 | uint256 tempValueA; 92 | uint256 tempValueB; 93 | 94 | if (assets[2].assetType != Types.AztecAssetType.VIRTUAL) { 95 | tempValueA = getBalance(assets[2].erc20Address); 96 | } 97 | 98 | if (assets[3].assetType != Types.AztecAssetType.VIRTUAL) { 99 | tempValueB = getBalance(assets[3].erc20Address); 100 | } 101 | 102 | // Call bridge.convert(), which will return output values for the two output assets. 103 | // If input is ETH, send it along with call to convert. 104 | IDefiBridge bridgeContract = IDefiBridge(bridgeAddress); 105 | (outputValueA, outputValueB, isAsync) = bridgeContract.convert{ 106 | value: assets[0].assetType == Types.AztecAssetType.ETH ? inputValue : 0 107 | }( 108 | assets, 109 | uint64(auxData), 110 | interactionNonce, 111 | inputValue 112 | ); 113 | 114 | if ( 115 | assets[2].assetType != Types.AztecAssetType.VIRTUAL && 116 | assets[2].assetType != Types.AztecAssetType.NOT_USED 117 | ) { 118 | require( 119 | outputValueA == SafeMath.sub(getBalance(assets[2].erc20Address), tempValueA), 120 | 'DefiBridgeProxy: INCORRECT_ASSET_VALUE' 121 | ); 122 | } 123 | 124 | if ( 125 | assets[3].assetType != Types.AztecAssetType.VIRTUAL && 126 | assets[3].assetType != Types.AztecAssetType.NOT_USED 127 | ) { 128 | require( 129 | outputValueB == SafeMath.sub(getBalance(assets[3].erc20Address), tempValueB), 130 | 'DefiBridgeProxy: INCORRECT_ASSET_VALUE' 131 | ); 132 | } 133 | 134 | if (isAsync) { 135 | require(outputValueA == 0 && outputValueB == 0, 'DefiBridgeProxy: ASYNC_NONZERO_OUTPUT_VALUES'); 136 | } 137 | 138 | emit AztecBridgeInteraction(bridgeAddress, outputValueA, outputValueB, isAsync); 139 | } 140 | 141 | function canFinalise( 142 | address bridgeAddress, 143 | Types.AztecAsset[4] calldata assets, 144 | uint64 auxData, 145 | uint256 interactionNonce 146 | ) external view returns (bool ready) { 147 | IDefiBridge bridgeContract = IDefiBridge(bridgeAddress); 148 | (ready) = bridgeContract.canFinalise( 149 | assets, 150 | auxData, 151 | interactionNonce 152 | ); 153 | } 154 | 155 | function finalise( 156 | address bridgeAddress, 157 | Types.AztecAsset[4] calldata assets, 158 | uint64 auxData, 159 | uint256 interactionNonce 160 | ) external returns (uint256 outputValueA, uint256 outputValueB) { 161 | uint256 tempValueA; 162 | uint256 tempValueB; 163 | if (assets[2].assetType != Types.AztecAssetType.VIRTUAL) { 164 | tempValueA = getBalance(assets[2].erc20Address); 165 | } 166 | 167 | if (assets[3].assetType != Types.AztecAssetType.VIRTUAL) { 168 | tempValueB = getBalance(assets[3].erc20Address); 169 | } 170 | 171 | IDefiBridge bridgeContract = IDefiBridge(bridgeAddress); 172 | 173 | require(bridgeContract.canFinalise( 174 | assets, 175 | auxData, 176 | interactionNonce), 'DefiBridgeProxy: NOT_READY'); 177 | 178 | (outputValueA, outputValueB) = bridgeContract.finalise( 179 | assets, 180 | auxData, 181 | interactionNonce 182 | ); 183 | 184 | if ( 185 | assets[2].assetType != Types.AztecAssetType.VIRTUAL && 186 | assets[2].assetType != Types.AztecAssetType.NOT_USED 187 | ) { 188 | require( 189 | outputValueA == SafeMath.sub(getBalance(assets[2].erc20Address), tempValueA), 190 | 'DefiBridgeProxy: INCORRECT_ASSET_VALUE' 191 | ); 192 | } 193 | 194 | if ( 195 | assets[3].assetType != Types.AztecAssetType.VIRTUAL && 196 | assets[3].assetType != Types.AztecAssetType.NOT_USED 197 | ) { 198 | require( 199 | outputValueB == SafeMath.sub(getBalance(assets[3].erc20Address), tempValueB), 200 | 'DefiBridgeProxy: INCORRECT_ASSET_VALUE' 201 | ); 202 | } 203 | 204 | emit AztecBridgeInteraction(bridgeAddress, outputValueA, outputValueB, false); 205 | } 206 | } 207 | -------------------------------------------------------------------------------- /solidity/contracts/ERC20Mintable.sol: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: GPL-2.0-only 2 | // Copyright 2020 Spilsbury Holdings Ltd 3 | pragma solidity >=0.6.10 <0.8.0; 4 | 5 | import {ERC20} from '@openzeppelin/contracts/token/ERC20/ERC20.sol'; 6 | 7 | /** 8 | * @dev Warning: do not deploy in real environments, for testing only 9 | * ERC20 contract where anybody is able to mint 10 | */ 11 | contract ERC20Mintable is ERC20 { 12 | uint8 public asset_decimals = 18; 13 | 14 | constructor() public ERC20('Test', 'TEST') {} 15 | 16 | function mint(address _to, uint256 _value) public returns (bool) { 17 | _mint(_to, _value); 18 | return true; 19 | } 20 | 21 | function decimals() public view virtual override returns (uint8) { 22 | return asset_decimals; 23 | } 24 | 25 | function setDecimals(uint8 _decimals) external { 26 | asset_decimals = _decimals; 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /solidity/contracts/Types.sol: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: GPL-2.0-only 2 | // Copyright 2020 Spilsbury Holdings Ltd 3 | 4 | pragma solidity >=0.6.0 <0.8.0; 5 | pragma experimental ABIEncoderV2; 6 | 7 | library Types { 8 | enum AztecAssetType { 9 | NOT_USED, 10 | ETH, 11 | ERC20, 12 | VIRTUAL 13 | } 14 | 15 | struct AztecAsset { 16 | uint256 id; 17 | address erc20Address; 18 | AztecAssetType assetType; 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /solidity/contracts/UniswapBridge.sol: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: GPL-2.0-only 2 | // Copyright 2020 Spilsbury Holdings Ltd 3 | pragma solidity >=0.6.6 <0.8.0; 4 | pragma experimental ABIEncoderV2; 5 | 6 | import {SafeMath} from '@openzeppelin/contracts/math/SafeMath.sol'; 7 | import {IERC20} from '@openzeppelin/contracts/token/ERC20/IERC20.sol'; 8 | 9 | import {UniswapV2Library} from '@uniswap/v2-periphery/contracts/libraries/UniswapV2Library.sol'; 10 | import {IUniswapV2Router02} from '@uniswap/v2-periphery/contracts/interfaces/IUniswapV2Router02.sol'; 11 | 12 | import {IDefiBridge} from './interfaces/IDefiBridge.sol'; 13 | import {Types} from './Types.sol'; 14 | 15 | // import 'hardhat/console.sol'; 16 | 17 | contract UniswapBridge is IDefiBridge { 18 | using SafeMath for uint256; 19 | 20 | address public immutable defiBridgeProxy; 21 | address public weth; 22 | 23 | IUniswapV2Router02 router; 24 | 25 | constructor(address _defiBridgeProxy, address _router) public { 26 | defiBridgeProxy = _defiBridgeProxy; 27 | router = IUniswapV2Router02(_router); 28 | weth = router.WETH(); 29 | } 30 | 31 | receive() external payable {} 32 | 33 | function convert( 34 | Types.AztecAsset[4] calldata assets, 35 | uint64, 36 | uint256, 37 | uint256 inputValue 38 | ) 39 | external 40 | payable 41 | override 42 | returns ( 43 | uint256 outputValueA, 44 | uint256, 45 | bool isAsync 46 | ) 47 | { 48 | require(msg.sender == defiBridgeProxy, 'UniswapBridge: INVALID_CALLER'); 49 | isAsync = false; 50 | uint256[] memory amounts; 51 | uint256 deadline = block.timestamp; 52 | // TODO This should check the pair exists on UNISWAP instead of blindly trying to swap. 53 | 54 | if (assets[0].assetType == Types.AztecAssetType.ETH && assets[2].assetType == Types.AztecAssetType.ERC20) { 55 | address[] memory path = new address[](2); 56 | path[0] = weth; 57 | path[1] = assets[2].erc20Address; 58 | amounts = router.swapExactETHForTokens{value: inputValue}(0, path, defiBridgeProxy, deadline); 59 | outputValueA = amounts[1]; 60 | } else if ( 61 | assets[0].assetType == Types.AztecAssetType.ERC20 && assets[2].assetType == Types.AztecAssetType.ETH 62 | ) { 63 | address[] memory path = new address[](2); 64 | path[0] = assets[0].erc20Address; 65 | path[1] = weth; 66 | require( 67 | IERC20(assets[0].erc20Address).approve(address(router), inputValue), 68 | 'UniswapBridge: APPROVE_FAILED' 69 | ); 70 | amounts = router.swapExactTokensForETH(inputValue, 0, path, defiBridgeProxy, deadline); 71 | outputValueA = amounts[1]; 72 | } else { 73 | // TODO what about swapping tokens? 74 | revert('UniswapBridge: INCOMPATIBLE_ASSET_PAIR'); 75 | } 76 | } 77 | 78 | function canFinalise( 79 | Types.AztecAsset[4] calldata, 80 | uint64, 81 | uint256 82 | ) external view override returns (bool) { 83 | return false; 84 | } 85 | 86 | function finalise( 87 | Types.AztecAsset[4] calldata, 88 | uint64, 89 | uint256 90 | ) external payable override returns (uint256, uint256) { 91 | require(false); 92 | } 93 | } 94 | -------------------------------------------------------------------------------- /solidity/contracts/interfaces/IDefiBridge.sol: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: GPL-2.0-only 2 | // Copyright 2020 Spilsbury Holdings Ltd 3 | pragma solidity >=0.6.6 <0.8.0; 4 | pragma experimental ABIEncoderV2; 5 | 6 | import {Types} from '../Types.sol'; 7 | 8 | interface IDefiBridge { 9 | /** 10 | * Input cases: 11 | * Case 1: 1 real asset. 12 | * Case 2: 1 virtual asset. 13 | * Case 3: 1 real asset 1 virtual asset. 14 | * 15 | * Output cases: 16 | * Case 1: 1 real asset. 17 | * Case 2: 2 real assets. 18 | * Case 3: 1 real asset 1 virtual asset. 19 | * Case 4: 1 virtual asset. 20 | * 21 | * Example use cases: 22 | * 1-1: Swapping. 23 | * 1-2: Swapping with incentives (2nd output reward token). 24 | * 1-3: Borrowing. Lock up collateral, get back loan asset and virtual position asset. 25 | * 1-4: Opening lending position OR Purchasing NFT. Input real asset, get back virtual asset representing NFT or position. 26 | * 2-1: Selling NFT. Input the virtual asset, get back a real asset. 27 | * 2-2: Closing a lending position. Get back original asset and reward asset. 28 | * 2-3: Claiming fees from an open position. 29 | * 2-4: Voting on a 1-4 case. 30 | * 3-1: Repaying a borrow. Return loan plus interest. Get collateral back. 31 | * 3-2: Repaying a borrow. Return loan plus interest. Get collateral plus reward token. (AAVE) 32 | * 3-3: Partial loan repayment. 33 | * 3-4: DAO voting stuff. 34 | * 35 | * This function is called from the DefiBridgeProxy after the tokens or ETH have been sent to the bridge. 36 | * This function should call the defi protocol, and return the output assets to the DefiBridgeProxy if they are ETH or tokens, unless convert returns isAsync = true. 37 | * @param assets assets. 38 | * @param inputValue the total amount input, if there are two input assets equal amounts of both will have been input. 39 | * @param interactionNonce a globally unique identifier for this defi interaction. 40 | * @param auxData passthrough data for the bridge contract (could contain data for e.g. slippage, nftID, etc.) 41 | * @return outputValueA the amount of outputAssetA returned from this interaction, should be 0 if async. 42 | * @return outputValueB the amount of outputAssetB returned from this interaction, should be 0 if async or bridge only returns 1 asset. 43 | * @return isAsync a flag to signal if this bridge interaction will return assets at a later date with a call finalise(). 44 | */ 45 | function convert( 46 | Types.AztecAsset[4] calldata assets, 47 | uint64 auxData, 48 | uint256 interactionNonce, 49 | uint256 inputValue 50 | ) 51 | external 52 | payable 53 | returns ( 54 | uint256 outputValueA, 55 | uint256 outputValueB, 56 | bool 57 | ); 58 | 59 | /** 60 | * @dev This function is called via the DefiBridgeProxy. 61 | * @param interactionNonce the interaction nonce. 62 | * @return true if the asynchronous interaction denoted by interactionNonce can be finalised. 63 | */ 64 | function canFinalise( 65 | Types.AztecAsset[4] calldata assets, 66 | uint64 auxData, 67 | uint256 interactionNonce 68 | ) external view returns (bool); 69 | 70 | /** 71 | * This function is called via the DefiBridgeProxy. It should transfer the output assets specified by original call to `convert` with `interactionNonce`. 72 | * The defi bridge proxy will check it has received the return values if the asset types are ETH or ERC20. 73 | * @param interactionNonce the interaction nonce of an async defi interaction being finalised. 74 | * @return outputValueA the return value of output asset A 75 | * @return outputValueB optional return value of output asset B 76 | */ 77 | function finalise( 78 | Types.AztecAsset[4] calldata assets, 79 | uint64 auxData, 80 | uint256 interactionNonce 81 | ) external payable returns (uint256 outputValueA, uint256 outputValueB); 82 | } 83 | -------------------------------------------------------------------------------- /solidity/ensure_versions.js: -------------------------------------------------------------------------------- 1 | const path = require('path'); 2 | const { readFileSync, writeFileSync } = require('fs'); 3 | 4 | // https://github.com/Uniswap/uniswap-v2-periphery/pull/53 5 | const safeMathFilename = path.resolve(__dirname, './node_modules/@uniswap/v2-periphery/contracts/libraries/SafeMath.sol'); 6 | 7 | try { 8 | const content = readFileSync(safeMathFilename, 'utf-8'); 9 | writeFileSync( 10 | safeMathFilename, 11 | content.replace('pragma solidity =0.6.6;', 'pragma solidity >=0.6.6;'), 12 | 'utf-8', 13 | ); 14 | } catch (e) { 15 | } 16 | -------------------------------------------------------------------------------- /solidity/hardhat.config.ts: -------------------------------------------------------------------------------- 1 | import { HardhatUserConfig } from 'hardhat/config'; 2 | import '@nomiclabs/hardhat-waffle'; 3 | import '@nomiclabs/hardhat-ethers'; 4 | 5 | const config: HardhatUserConfig = { 6 | solidity: { 7 | version: '0.6.10', 8 | settings: { 9 | evmVersion: 'berlin', 10 | optimizer: { enabled: true, runs: 200 }, 11 | }, 12 | }, 13 | networks: { 14 | ganache: { 15 | url: `http://${process.env.GANACHE_HOST || 'localhost'}:8545`, 16 | }, 17 | hardhat: { 18 | blockGasLimit: 15000000, 19 | gasPrice: 10, 20 | hardfork: 'berlin', 21 | }, 22 | }, 23 | paths: { 24 | artifacts: './src/artifacts', 25 | }, 26 | }; 27 | 28 | export default config; 29 | -------------------------------------------------------------------------------- /solidity/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "@aztec/defi-bridge-starter", 3 | "version": "1.0.0", 4 | "main": "index.js", 5 | "license": "GPLv2", 6 | "private": true, 7 | "scripts": { 8 | "build": "yarn clean && yarn compile", 9 | "compile": "node ./ensure_versions && yarn hardhat compile", 10 | "clean": "rm -rf ./cache ./src/artifacts", 11 | "test": "yarn compile && NODE_NO_WARNINGS=1 yarn jest --runInBand", 12 | "deploy": "ts-node ./src/deploy/index.ts" 13 | }, 14 | "jest": { 15 | "transform": { 16 | "^.+\\.ts$": "ts-jest" 17 | }, 18 | "testRegex": ".*\\.test\\.ts$", 19 | "rootDir": "./src", 20 | "testTimeout": 20000, 21 | "slowTestThreshold": 60 22 | }, 23 | "dependencies": { 24 | "@uniswap/v2-core": "^1.0.1", 25 | "@uniswap/v2-periphery": "^1.1.0-beta.0", 26 | "dotenv": "^8.2.0", 27 | "ethers": "5.0.29", 28 | "typescript": "^3.5.3" 29 | }, 30 | "devDependencies": { 31 | "@nomiclabs/hardhat-ethers": "2.0.1", 32 | "@nomiclabs/hardhat-waffle": "^2.0.0", 33 | "@openzeppelin/contracts": "^3.3.0", 34 | "@types/jest": "^26.0.15", 35 | "@types/mocha": "^9.0.0", 36 | "@typescript-eslint/eslint-plugin": "^4.1.1", 37 | "@typescript-eslint/parser": "^4.1.1", 38 | "eslint": "^7.9.0", 39 | "eslint-config-prettier": "^6.11.0", 40 | "ethereum-waffle": "3.0.0", 41 | "ethereumjs-util": "^7.0.7", 42 | "hardhat": "^2.4.3", 43 | "jest": "^27.0.6", 44 | "prettier": "^2.2.1", 45 | "prettier-plugin-solidity": "^1.0.0-beta.3", 46 | "ts-jest": "^27.0.3", 47 | "ts-node": "^8.9.1" 48 | }, 49 | "resolutions": { 50 | "ethers": "5.0.29", 51 | "@ethersproject/abstract-signer": "5.0.13" 52 | } 53 | } 54 | -------------------------------------------------------------------------------- /solidity/src/contracts/defi_bridge_proxy.ts: -------------------------------------------------------------------------------- 1 | import { Provider } from '@ethersproject/providers'; 2 | import { Contract, ContractFactory, Signer } from 'ethers'; 3 | import abi from '../artifacts/contracts/DefiBridgeProxy.sol/DefiBridgeProxy.json'; 4 | 5 | export interface SendTxOptions { 6 | gasPrice?: bigint; 7 | gasLimit?: number; 8 | } 9 | 10 | export enum AztecAssetType { 11 | NOT_USED, 12 | ETH, 13 | ERC20, 14 | VIRTUAL, 15 | } 16 | 17 | export interface AztecAsset { 18 | id?: number; 19 | assetType?: AztecAssetType; 20 | erc20Address?: string; 21 | } 22 | 23 | const assetToArray = (asset: AztecAsset) => [ 24 | asset.id || 0, 25 | asset.erc20Address || '0x0000000000000000000000000000000000000000', 26 | asset.assetType || 0, 27 | ]; 28 | 29 | export class DefiBridgeProxy { 30 | private contract: Contract; 31 | 32 | constructor(public address: string, provider: Provider) { 33 | this.contract = new Contract(this.address, abi.abi, provider); 34 | } 35 | 36 | static async deploy(signer: Signer) { 37 | const factory = new ContractFactory(abi.abi, abi.bytecode, signer); 38 | const contract = await factory.deploy(); 39 | return new DefiBridgeProxy(contract.address, signer.provider!); 40 | } 41 | 42 | async deployBridge(signer: Signer, abi: any, args: any[]) { 43 | const factory = new ContractFactory(abi.abi, abi.bytecode, signer); 44 | const contract = await factory.deploy(this.contract.address, ...args); 45 | return contract.address; 46 | } 47 | 48 | /** 49 | * @param signer Signer sending the tx. 50 | * @param bridgeAddress Target bridge contract address. 51 | * @param assets [inputAssetA, inputAssetB, outputAssetA, outputAssetB]. 52 | * @param auxInputData 8 bytes of opaque data sent to the bridge contract. 53 | * @param interactionNonce The current unique interaction nonce. 54 | * @param inputValue To total input value. 55 | * @param options Ethereum tx send options. 56 | * @returns 57 | */ 58 | async convert( 59 | signer: Signer, 60 | bridgeAddress: string, 61 | assets: AztecAsset[], 62 | auxInputData: bigint, 63 | interactionNonce: bigint, 64 | inputValue: bigint, 65 | options: SendTxOptions = {}, 66 | ) { 67 | const contract = new Contract(this.contract.address, this.contract.interface, signer); 68 | const { gasLimit, gasPrice } = options; 69 | 70 | const tx = await contract.convert( 71 | bridgeAddress, 72 | assets.map(assetToArray), 73 | auxInputData, 74 | interactionNonce, 75 | inputValue, 76 | { gasLimit, gasPrice }, 77 | ); 78 | const receipt = await tx.wait(); 79 | 80 | const parsedLogs = receipt.logs 81 | .filter((l: any) => l.address == contract.address) 82 | .map((l: any) => contract.interface.parseLog(l)); 83 | 84 | const { outputValueA, outputValueB, isAsync } = parsedLogs[0].args; 85 | 86 | return { 87 | isAsync, 88 | outputValueA: BigInt(outputValueA.toString()), 89 | outputValueB: BigInt(outputValueB.toString()), 90 | }; 91 | } 92 | 93 | async canFinalise(bridgeAddress: string, assets: AztecAsset[], auxInputData: bigint, interactionNonce: bigint) { 94 | return await this.contract.canFinalise(bridgeAddress, assets.map(assetToArray), auxInputData, interactionNonce); 95 | } 96 | 97 | async finalise( 98 | signer: Signer, 99 | bridgeAddress: string, 100 | assets: AztecAsset[], 101 | auxInputData: bigint, 102 | interactionNonce: bigint, 103 | options: SendTxOptions = {}, 104 | ) { 105 | const contract = new Contract(this.contract.address, this.contract.interface, signer); 106 | const { gasLimit, gasPrice } = options; 107 | const tx = await contract.finalise(bridgeAddress, assets.map(assetToArray), auxInputData, interactionNonce, { 108 | gasLimit, 109 | gasPrice, 110 | }); 111 | const receipt = await tx.wait(); 112 | 113 | const parsedLogs = receipt.logs 114 | .filter((l: any) => l.address == contract.address) 115 | .map((l: any) => contract.interface.parseLog(l)); 116 | 117 | const { outputValueA, outputValueB, isAsync } = parsedLogs[0].args; 118 | 119 | return { 120 | isAsync, 121 | outputValueA: BigInt(outputValueA.toString()), 122 | outputValueB: BigInt(outputValueB.toString()), 123 | }; 124 | } 125 | } 126 | -------------------------------------------------------------------------------- /solidity/src/contracts/uniswap_bridge.test.ts: -------------------------------------------------------------------------------- 1 | import { ethers } from 'hardhat'; 2 | import { deployErc20 } from '../deploy/deploy_erc20'; 3 | import { deployUniswap, createPair } from '../deploy/deploy_uniswap'; 4 | import abi from '../artifacts/contracts/UniswapBridge.sol/UniswapBridge.json'; 5 | import { Contract, Signer } from 'ethers'; 6 | import { DefiBridgeProxy, AztecAssetType } from './defi_bridge_proxy'; 7 | 8 | describe('defi bridge', function () { 9 | let bridgeProxy: DefiBridgeProxy; 10 | let uniswapBridgeAddress: string; 11 | let signer: Signer; 12 | let signerAddress: string; 13 | let erc20: Contract; 14 | 15 | beforeAll(async () => { 16 | [signer] = await ethers.getSigners(); 17 | signerAddress = await signer.getAddress(); 18 | erc20 = await deployErc20(signer); 19 | const univ2 = await deployUniswap(signer); 20 | await createPair(signer, univ2, erc20); 21 | 22 | bridgeProxy = await DefiBridgeProxy.deploy(signer); 23 | uniswapBridgeAddress = await bridgeProxy.deployBridge(signer, abi, [univ2.address]); 24 | 25 | // Bridge proxy can be thought of as the rollup contract. Fund it. 26 | // TODO: Might need to do for tokens. 27 | await signer.sendTransaction({ 28 | to: bridgeProxy.address, 29 | value: 10000n, 30 | }); 31 | }); 32 | 33 | it('should swap ETH to ERC20 tokens', async () => { 34 | const { isAsync, outputValueA, outputValueB } = await bridgeProxy.convert( 35 | signer, 36 | uniswapBridgeAddress, 37 | [ 38 | { 39 | assetType: AztecAssetType.ETH, 40 | id: 0, 41 | }, 42 | { 43 | assetType: AztecAssetType.NOT_USED, 44 | }, 45 | { 46 | assetType: AztecAssetType.ERC20, 47 | id: 1, 48 | erc20Address: erc20.address, 49 | }, 50 | { 51 | assetType: AztecAssetType.NOT_USED, 52 | }, 53 | ], 54 | 0n, 55 | 1n, 56 | 1000n, 57 | ); 58 | 59 | const proxyBalance = BigInt((await erc20.balanceOf(bridgeProxy.address)).toString()); 60 | expect(proxyBalance).toBe(outputValueA); 61 | expect(outputValueB).toBe(0n); 62 | expect(isAsync).toBe(false); 63 | }); 64 | }); 65 | -------------------------------------------------------------------------------- /solidity/src/deploy/deploy_erc20.ts: -------------------------------------------------------------------------------- 1 | import { Contract, ContractFactory, Signer } from 'ethers'; 2 | import ERC20Mintable from '../artifacts/contracts/ERC20Mintable.sol/ERC20Mintable.json'; 3 | 4 | export async function deployErc20(signer: Signer, decimals = 18) { 5 | console.log('Deploying ERC20...'); 6 | const erc20Factory = new ContractFactory(ERC20Mintable.abi, ERC20Mintable.bytecode, signer); 7 | const erc20 = await erc20Factory.deploy(); 8 | console.log(`ERC20 contract address: ${erc20.address}`); 9 | if (decimals !== 18) { 10 | console.log(`Changing decimals to: ${decimals}...`); 11 | await erc20.setDecimals(decimals); 12 | } 13 | return erc20; 14 | } 15 | -------------------------------------------------------------------------------- /solidity/src/deploy/deploy_uniswap.ts: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | import { ContractTransaction } from '@ethersproject/contracts'; 3 | import UniswapV2FactoryJson from '@uniswap/v2-core/build/UniswapV2Factory.json'; 4 | import UniswapV2PairJson from '@uniswap/v2-core/build/UniswapV2Pair.json'; 5 | import IWETH from '@uniswap/v2-periphery/build/IWETH.json'; 6 | import WETH9 from '@uniswap/v2-periphery/build/WETH9.json'; 7 | import UniswapV2Router02Json from '@uniswap/v2-periphery/build/UniswapV2Router02.json'; 8 | import { Contract, ContractFactory, Signer } from 'ethers'; 9 | 10 | export const createPair = async ( 11 | owner: Signer, 12 | router: Contract, 13 | asset: Contract, 14 | initialTokenSupply = 1000n * 10n ** 18n, 15 | initialEthSupply = 10n ** 18n, 16 | ) => { 17 | const factory = new Contract(await router.factory(), UniswapV2FactoryJson.abi, owner); 18 | const weth = new Contract(await router.WETH(), IWETH.abi, owner); 19 | const ZERO_ADDRESS = '0x0000000000000000000000000000000000000000'; 20 | 21 | if ((await factory.getPair(asset.address, weth.address)) != ZERO_ADDRESS) { 22 | console.log(`UniswapPair [${await asset.name()} - WETH] already created.`); 23 | return; 24 | } 25 | 26 | const minConfirmations = [1337, 31337].indexOf(await owner.getChainId()) >= 0 ? 1 : 3; 27 | const withConfirmation = async (action: Promise) => { 28 | const tx2 = await action; 29 | await tx2.wait(minConfirmations); 30 | }; 31 | 32 | console.log(`Create UniswapPair [${await asset.name()} - WETH]...`); 33 | await withConfirmation(factory.createPair(asset.address, weth.address)); 34 | const pairAddress = await factory.getPair(asset.address, weth.address); 35 | const pair = new Contract(pairAddress, UniswapV2PairJson.abi, owner); 36 | console.log(`Pair contract address: ${pairAddress}`); 37 | 38 | await withConfirmation(asset.mint(pair.address, initialTokenSupply)); 39 | 40 | await withConfirmation(weth.deposit({ value: initialEthSupply })); 41 | await withConfirmation(weth.transfer(pair.address, initialEthSupply)); 42 | 43 | // Don't do this in production. 44 | await pair.mint(await owner.getAddress()); 45 | console.log(`Initial token supply: ${initialTokenSupply}`); 46 | console.log(`Initial ETH supply: ${initialEthSupply}`); 47 | }; 48 | 49 | export const deployUniswap = async (owner: Signer) => { 50 | console.log('Deploying UniswapFactory...'); 51 | const UniswapFactory = new ContractFactory(UniswapV2FactoryJson.abi, UniswapV2FactoryJson.bytecode, owner); 52 | const factory = await UniswapFactory.deploy(await owner.getAddress()); 53 | console.log(`UniswapFactory contract address: ${factory.address}`); 54 | 55 | console.log('Deploying WETH...'); 56 | const WETHFactory = new ContractFactory(WETH9.abi, WETH9.bytecode, owner); 57 | const weth = await WETHFactory.deploy(); 58 | console.log(`WETH contract address: ${weth.address}`); 59 | 60 | console.log('Deploying UniswapV2Router...'); 61 | const UniswapV2Router = new ContractFactory(UniswapV2Router02Json.abi, UniswapV2Router02Json.bytecode, owner); 62 | const router = await UniswapV2Router.deploy(factory.address, weth.address); 63 | console.log(`UniswapV2Router contract address: ${router.address}`); 64 | 65 | return router; 66 | }; 67 | -------------------------------------------------------------------------------- /solidity/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "target": "esnext", 4 | "moduleResolution": "node", 5 | "lib": ["dom", "esnext", "es2017.object"], 6 | "outDir": "dest", 7 | "module": "commonjs", 8 | "strict": true, 9 | "noImplicitAny": true, 10 | "noImplicitThis": false, 11 | "esModuleInterop": true, 12 | "declaration": true, 13 | "emitDecoratorMetadata": true, 14 | "experimentalDecorators": true, 15 | "inlineSourceMap": true, 16 | "declarationMap": true, 17 | "resolveJsonModule": true 18 | }, 19 | "include": ["src"], 20 | "files": ["./hardhat.config.ts"] 21 | } 22 | --------------------------------------------------------------------------------