├── .circleci └── config.yml ├── .gitignore ├── .gitmodules ├── .prettierrc ├── AZTEC Setup Protocol Report .pdf ├── README.md ├── build-client.sh ├── build-image ├── .mbt.yml └── Dockerfile ├── ci-scripts ├── build.sh ├── deploy-public.sh └── deploy.sh ├── docker-compose.yml ├── job-server ├── .dockerignore ├── .mbt.yml ├── Dockerfile ├── README.md ├── docker-compose.yml ├── package.json ├── redis-scripts │ ├── complete_job.lua │ ├── get_job.lua │ └── setup.lua ├── src │ └── index.ts ├── terraform │ └── main.tf ├── tsconfig.json └── yarn.lock ├── setup-iac ├── main.tf ├── output.tf └── us-east-2 │ ├── main.tf │ └── output.tf ├── setup-mpc-alerts ├── .dockerignore ├── .mbt.yml ├── Dockerfile ├── package.json ├── src │ ├── app.ts │ ├── calculate-metrics.ts │ └── index.ts ├── terraform │ └── main.tf ├── tsconfig.json ├── tsconfig.prod.json └── yarn.lock ├── setup-mpc-client-bash ├── .gitignore ├── README.md ├── compute.sh ├── download.sh ├── download_signed.sh ├── recover-address │ ├── index.js │ ├── package-lock.json │ └── package.json ├── sign.sh ├── simulate_client.sh ├── upload.sh ├── upload_signed.sh ├── upload_transcript.sh └── verify.sh ├── setup-mpc-client ├── .dockerignore ├── .mbt.yml ├── Dockerfile ├── README.md ├── client.sh ├── compute_all.sh ├── docker-compose.yml ├── package.json ├── src │ ├── app.ts │ ├── compute.ts │ ├── downloader.ts │ ├── exports.ts │ ├── index.ts │ ├── terminal-interface.ts │ ├── terminal-kit │ │ └── index.ts │ └── uploader.ts ├── tsconfig.json ├── tsconfig.prod.json └── yarn.lock ├── setup-mpc-common ├── .dockerignore ├── .mbt.yml ├── Dockerfile ├── package.json ├── src │ ├── fifo.ts │ ├── hash-files.test.ts │ ├── hash-files.ts │ ├── http-client.ts │ ├── index.ts │ ├── mpc-server.ts │ └── mpc-state.ts ├── tsconfig.json ├── tsconfig.prod.json └── yarn.lock ├── setup-mpc-map ├── .dockerignore ├── .mbt.yml ├── Dockerfile ├── package.json ├── server │ └── index.js ├── src │ ├── @types │ │ └── cesium.d.ts │ ├── coordinator.ts │ ├── css │ │ └── main.css │ ├── index.html │ ├── index.ts │ ├── marker.ts │ ├── shooting-stars.ts │ └── viewer.ts ├── terraform │ └── main.tf ├── tsconfig.json ├── webpack.dev.js ├── webpack.prod.js └── yarn.lock ├── setup-mpc-server ├── .dockerignore ├── .gitignore ├── .mbt.yml ├── Dockerfile ├── README.md ├── docker-compose.yml ├── package.json ├── src │ ├── app.test.ts │ ├── app.ts │ ├── fs-async.ts │ ├── index.ts │ ├── maxmind │ │ ├── GeoLite2-City.mmdb │ │ └── index.ts │ ├── participant-selector.ts │ ├── publisher.ts │ ├── range-proof-publisher.ts │ ├── s3-explorer │ │ └── index.html │ ├── sealer.ts │ ├── server.ts │ ├── state-store.ts │ ├── state │ │ ├── advance-state.test.ts │ │ ├── advance-state.ts │ │ ├── create-participant.ts │ │ ├── default-state.ts │ │ ├── order-waiting-participants.test.ts │ │ ├── order-waiting-participants.ts │ │ ├── reset-participant.ts │ │ ├── select-participants.test.ts │ │ └── select-participants.ts │ ├── transcript-store.ts │ └── verifier.ts ├── terraform │ ├── main.tf │ └── variables.tf ├── tsconfig.json ├── tsconfig.prod.json └── yarn.lock ├── setup-mpc-webterm ├── .dockerignore ├── .mbt.yml ├── Dockerfile ├── package.json ├── server │ └── index.js ├── src │ ├── index.css │ ├── index.html │ └── index.ts ├── terraform │ └── main.tf ├── tsconfig.json ├── webpack.dev.js ├── webpack.prod.js └── yarn.lock ├── setup-post-process ├── .dockerignore ├── .mbt.yml ├── Dockerfile ├── REAME.md ├── docker-compose.yml ├── run ├── run-job └── terraform │ ├── create-spot-fleet-request.js │ └── main.tf ├── setup-tools ├── .dockerignore ├── .gitignore ├── .mbt.yml ├── CMakeLists.txt ├── Dockerfile.build ├── Dockerfile.deploy ├── README.md ├── build.sh ├── cmake │ └── HunterConfig.cmake ├── depends │ └── CMakeLists.txt ├── deploy.sh ├── docker-compose.yml ├── include │ ├── aztec_common.hpp │ └── blake2.h ├── src │ ├── CMakeLists.txt │ ├── aztec_common │ │ ├── CMakeLists.txt │ │ ├── assert.hpp │ │ ├── batch_normalize.hpp │ │ ├── checksum.hpp │ │ ├── compression.hpp │ │ ├── libff_types.hpp │ │ ├── streaming.cpp │ │ ├── streaming.hpp │ │ ├── streaming_g1.cpp │ │ ├── streaming_g1.hpp │ │ ├── streaming_g2.cpp │ │ ├── streaming_g2.hpp │ │ ├── streaming_range.hpp │ │ ├── streaming_transcript.cpp │ │ ├── streaming_transcript.hpp │ │ └── timer.hpp │ ├── generate_h │ │ ├── CMakeLists.txt │ │ ├── main.cpp │ │ ├── range_multi_exp.cpp │ │ └── range_multi_exp.hpp │ ├── generator │ │ ├── CMakeLists.txt │ │ ├── compute_generator_polynomial.hpp │ │ ├── compute_generator_polynomial.tcc │ │ └── main.cpp │ ├── print-point │ │ ├── CMakeLists.txt │ │ └── main.cpp │ ├── range-prep │ │ ├── CMakeLists.txt │ │ └── main.cpp │ ├── range │ │ ├── CMakeLists.txt │ │ ├── main.cpp │ │ ├── range_multi_exp.cpp │ │ └── range_multi_exp.hpp │ ├── range_verify │ │ ├── CMakeLists.txt │ │ └── main.cpp │ ├── setup │ │ ├── CMakeLists.txt │ │ ├── main.cpp │ │ ├── setup.cpp │ │ ├── setup.hpp │ │ └── utils.hpp │ └── verify │ │ ├── CMakeLists.txt │ │ ├── main.cpp │ │ ├── verifier.cpp │ │ └── verifier.hpp └── test │ ├── CMakeLists.txt │ ├── test_aztec_common.cpp │ ├── test_compute_range_polynomial.cpp │ ├── test_setup.cpp │ └── test_utils.hpp └── tslint.json /.circleci/config.yml: -------------------------------------------------------------------------------- 1 | version: 2.1 2 | jobs: 3 | build_build_image: 4 | docker: 5 | - image: circleci/python:3.6.7-jessie 6 | steps: 7 | - checkout 8 | - run: 9 | name: 'Check if build image needs rebuilding' 10 | command: | 11 | LAST_SUCCESSFUL_BUILD_URL="https://circleci.com/api/v1.1/project/github/$CIRCLE_PROJECT_USERNAME/$CIRCLE_PROJECT_REPONAME/tree/master?filter=successful&limit=1" 12 | LAST_SUCCESSFUL_COMMIT=`curl -Ss -u "$CIRCLE_TOKEN:" $LAST_SUCCESSFUL_BUILD_URL | jq -r '.[0]["vcs_revision"]'` 13 | echo "Last successful commit: ${LAST_SUCCESSFUL_COMMIT}" 14 | if ! git diff --name-only ${LAST_SUCCESSFUL_COMMIT} ${CIRCLE_SHA1} | grep -q ^build-image/.*; then circleci step halt; fi 15 | - setup_remote_docker 16 | - run: 17 | name: 'Login to dockerhub' 18 | command: docker login -u aztecprotocolci -p "${DOCKERHUB_PASSWORD}" 19 | - run: 20 | name: 'Build and push build image' 21 | command: | 22 | cd build-image 23 | docker build -t aztecprotocol/build-image . 24 | docker push aztecprotocol/build-image:latest 25 | 26 | build_and_test: 27 | docker: 28 | - image: aztecprotocol/build-image 29 | steps: 30 | - run: 31 | name: 'Setup environment variables' 32 | command: | 33 | LAST_SUCCESSFUL_BUILD_URL="https://circleci.com/api/v1.1/project/github/$CIRCLE_PROJECT_USERNAME/$CIRCLE_PROJECT_REPONAME/tree/master?filter=successful&limit=10" 34 | echo export LAST_SUCCESSFUL_COMMIT=`curl -Ss -u "$CIRCLE_TOKEN:" $LAST_SUCCESSFUL_BUILD_URL | jq -r '[.[] | select(.build_parameters.CIRCLE_JOB != "build_build_image")][0].vcs_revision'` >> $BASH_ENV 35 | cat $BASH_ENV 36 | - checkout 37 | - run: 38 | name: 'Pull submodules' 39 | command: git submodule init && git submodule update 40 | - setup_remote_docker: 41 | docker_layer_caching: true 42 | - run: 43 | name: 'ECR login' 44 | command: $(aws ecr get-login --no-include-email) 45 | - run: 46 | name: 'Build' 47 | command: | 48 | mbt build diff --from ${LAST_SUCCESSFUL_COMMIT} --to ${CIRCLE_SHA1} 49 | - run: 50 | name: 'Deploy if master' 51 | command: | 52 | if [ "${CIRCLE_BRANCH}" != "master" ]; then exit 0; fi 53 | mbt run-in diff --from ${LAST_SUCCESSFUL_COMMIT} --to ${CIRCLE_SHA1} -m deploy --fail-fast 54 | docker login -u aztecprotocolci -p "${DOCKERHUB_PASSWORD}" 55 | mbt run-in diff --from ${LAST_SUCCESSFUL_COMMIT} --to ${CIRCLE_SHA1} -m deploy-public --fail-fast 56 | 57 | workflows: 58 | build_and_push_images: 59 | jobs: 60 | - build_build_image 61 | - build_and_test: 62 | requires: 63 | - build_build_image 64 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .vscode 2 | node_modules 3 | .terraform 4 | *.log 5 | *.ignore 6 | dest 7 | dist 8 | setup-tools/range_db 9 | .env -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "setup-tools/cmake/cable"] 2 | path = setup-tools/cmake/cable 3 | url = https://github.com/ethereum/cable 4 | [submodule "setup-tools/depends/libff"] 5 | path = setup-tools/depends/libff 6 | url = https://github.com/AztecProtocol/libff 7 | branch = master 8 | [submodule "setup-tools/depends/libfqfft"] 9 | path = setup-tools/depends/libfqfft 10 | url = https://github.com/scipr-lab/libfqfft.git 11 | [submodule "setup-tools/depends/blake2"] 12 | path = setup-tools/depends/blake2 13 | url = https://github.com/BLAKE2/BLAKE2.git 14 | [submodule "setup-tools/depends/barretenberg"] 15 | path = setup-tools/depends/barretenberg 16 | url = https://github.com/AztecProtocol/barretenberg 17 | -------------------------------------------------------------------------------- /.prettierrc: -------------------------------------------------------------------------------- 1 | { 2 | "singleQuote": true, 3 | "trailingComma": "es5", 4 | "printWidth": 120 5 | } -------------------------------------------------------------------------------- /AZTEC Setup Protocol Report .pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AztecProtocol/Setup/ce51e843674493723ee7151d856cbf3599a57b53/AZTEC Setup Protocol Report .pdf -------------------------------------------------------------------------------- /build-client.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | git submodule init && git submodule update 4 | cd ./setup-tools 5 | ./build.sh 6 | cd ../setup-mpc-common 7 | docker build -t 278380418400.dkr.ecr.eu-west-2.amazonaws.com/setup-mpc-common:latest . 8 | cd ../setup-mpc-client 9 | docker build -t aztecprotocol/setup-mpc-client:latest . -------------------------------------------------------------------------------- /build-image/.mbt.yml: -------------------------------------------------------------------------------- 1 | name: build-image 2 | build: 3 | default: 4 | cmd: ../ci-scripts/build.sh 5 | args: 6 | - aztecprotocol/build-image 7 | commands: 8 | deploy-public: 9 | cmd: ../ci-scripts/deploy-public.sh 10 | args: 11 | - build-image 12 | -------------------------------------------------------------------------------- /build-image/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM circleci/python:3.6.8-jessie-node 2 | 3 | USER root 4 | RUN apt update 5 | 6 | # Terraform 7 | ENV TF_IN_AUTOMATION 1 8 | ENV TERRAFORM_VERSION 0.13.3 9 | RUN curl -sSL https://releases.hashicorp.com/terraform/${TERRAFORM_VERSION}/terraform_${TERRAFORM_VERSION}_linux_amd64.zip -o terraform.zip \ 10 | && unzip terraform.zip \ 11 | && mv terraform /usr/local/bin/ \ 12 | && rm terraform.zip 13 | 14 | # AWS CLI 15 | RUN sudo pip install --upgrade awscli 16 | 17 | # MBT 18 | ENV MBT_VERSION 0.22.0 19 | RUN curl -sSL -o /usr/local/bin/mbt https://bintray.com/buddyspike/bin/download_file?file_path=mbt_linux_x86_64/0.22.0/0.22.0/mbt_linux_x86_64 \ 20 | && chmod +x /usr/local/bin/mbt 21 | 22 | USER circleci -------------------------------------------------------------------------------- /ci-scripts/build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | IMAGE_NAME=$1 5 | 6 | # Docker layer caching can have old layers. Pull given images from remote if we haven't rebuilt this run. 7 | for DEP in $2; do 8 | if [ ! -f /tmp/${DEP#*/}.rebuilt ]; then 9 | docker pull $DEP:latest 10 | fi 11 | done 12 | 13 | docker build -t $IMAGE_NAME:latest . 14 | if [ -n "$CIRCLE_SHA1" ]; then 15 | docker tag $IMAGE_NAME:latest $IMAGE_NAME:$CIRCLE_SHA1 16 | fi 17 | 18 | touch /tmp/${IMAGE_NAME#*/}.rebuilt -------------------------------------------------------------------------------- /ci-scripts/deploy-public.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | docker push aztecprotocol/$1:latest 5 | if [ -n "$CIRCLE_SHA1" ]; then 6 | docker push aztecprotocol/$1:$CIRCLE_SHA1 7 | fi -------------------------------------------------------------------------------- /ci-scripts/deploy.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | LIFECYCLE_POLICY='{ 5 | "rules": [ 6 | { 7 | "rulePriority": 1, 8 | "description": "No more than 10 images.", 9 | "selection": { 10 | "tagStatus": "any", 11 | "countType": "imageCountMoreThan", 12 | "countNumber": 10 13 | }, 14 | "action": { 15 | "type": "expire" 16 | } 17 | } 18 | ] 19 | } 20 | ' 21 | 22 | REGION=${3:-$AWS_DEFAULT_REGION} 23 | 24 | $(aws ecr get-login --region $REGION --no-include-email) 25 | 26 | aws ecr describe-repositories --region $REGION --repository-names $1 > /dev/null 2>&1 || \ 27 | (aws ecr create-repository --region $REGION --repository-name $1 && \ 28 | aws ecr put-lifecycle-policy --region $REGION --repository-name $1 --lifecycle-policy-text "$LIFECYCLE_POLICY") 29 | docker push 278380418400.dkr.ecr.$REGION.amazonaws.com/$1:latest 30 | if [ -n "$CIRCLE_SHA1" ]; then 31 | docker push 278380418400.dkr.ecr.$REGION.amazonaws.com/$1:$CIRCLE_SHA1 32 | fi 33 | 34 | # Apply terraform. 35 | if [ -d ./terraform ]; then 36 | cd terraform 37 | terraform init -input=false 38 | terraform apply -input=false -auto-approve 39 | fi 40 | 41 | # Don't trigger service restart if argument 2 is -. 42 | [ "$2" != "-" ] || exit 0 43 | 44 | # Restart with latest image. 45 | SERVICE_NAME=${2:-$1} 46 | CLUSTER=${4:-setup} 47 | if aws ecs list-services --region $REGION --cluster $CLUSTER | grep $SERVICE_NAME > /dev/null; then 48 | aws ecs update-service --region $REGION --cluster $CLUSTER --service $SERVICE_NAME --force-new-deployment 49 | fi -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | services: 3 | setup-mpc-server: 4 | build: ./setup-mpc-server 5 | image: 278380418400.dkr.ecr.eu-west-2.amazonaws.com/setup-mpc-server 6 | environment: 7 | INFURA_API_KEY: $INFURA_API_KEY 8 | STORE_PATH: './store' 9 | TMP_PATH: './store/tmp' 10 | ports: 11 | - '8081:80' 12 | volumes: 13 | - ./setup-mpc-server:/usr/src/setup-mpc-server 14 | - ./setup-mpc-common:/usr/src/setup-mpc-server/node_modules/setup-mpc-common 15 | - ${HOME}/.aws:/root/.aws 16 | 17 | setup-mpc-map: 18 | build: ./setup-mpc-map 19 | image: 278380418400.dkr.ecr.eu-west-2.amazonaws.com/setup-mpc-map 20 | ports: 21 | - '8080:80' 22 | volumes: 23 | - ./setup-mpc-map:/usr/src/setup-mpc-map 24 | depends_on: 25 | - setup-mpc-server 26 | 27 | job-server: 28 | build: ./job-server 29 | image: 278380418400.dkr.ecr.eu-west-2.amazonaws.com/job-server 30 | environment: 31 | NODE_ENV: production 32 | REDIS_URL: redis://redis 33 | ports: 34 | - '8082:80' 35 | volumes: 36 | - ./job-server:/usr/src/job-server 37 | depends_on: 38 | - redis 39 | 40 | redis: 41 | image: redis 42 | 43 | setup-post-process: 44 | build: ./setup-post-process 45 | image: 278380418400.dkr.ecr.us-east-2.amazonaws.com/setup-post-process 46 | environment: 47 | MPC_SERVER_HOST: setup-mpc-server 48 | JOB_SERVER_HOST: job-server 49 | volumes: 50 | - ./setup-post-process:/usr/src/setup-post-process 51 | - ${HOME}/.aws:/root/.aws 52 | depends_on: 53 | - job-server 54 | -------------------------------------------------------------------------------- /job-server/.dockerignore: -------------------------------------------------------------------------------- 1 | node_modules 2 | Dockerfile 3 | **/.terraform -------------------------------------------------------------------------------- /job-server/.mbt.yml: -------------------------------------------------------------------------------- 1 | name: job-server 2 | build: 3 | default: 4 | cmd: ../ci-scripts/build.sh 5 | args: 6 | - 278380418400.dkr.ecr.eu-west-2.amazonaws.com/job-server 7 | commands: 8 | deploy: 9 | cmd: ../ci-scripts/deploy.sh 10 | args: 11 | - job-server 12 | - setup-job-server 13 | -------------------------------------------------------------------------------- /job-server/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM node:10-alpine 2 | WORKDIR /usr/src/job-server 3 | COPY . . 4 | RUN yarn install && yarn build && rm -rf node_modules && yarn install --production && yarn cache clean 5 | CMD ["yarn", "start"] -------------------------------------------------------------------------------- /job-server/README.md: -------------------------------------------------------------------------------- 1 | # Generic Job Server 2 | 3 | This is a small redis backed server that allows for the scheduling of jobs based on a job id integer. 4 | 5 | # Running 6 | 7 | 1. `docker-compose build` to create `aztec/job-server`. 8 | 2. `docker-compose up` to run the image, mounting local code into the container and watching for changes. 9 | 10 | # Endpoints 11 | 12 | - `GET /create-jobs?num=10000` - Reset the entire system and add 10,000 jobs. 13 | - `GET /job` - Return a job id to process. Optional `num` query parameter to request a batch. 14 | - `PUT /complete/` - Mark the given job id as complete, and store the body as the result. 15 | - `GET /result` - Download the results. This can be called early to stream the results as they arrive. 16 | -------------------------------------------------------------------------------- /job-server/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | services: 3 | job-server: 4 | build: . 5 | image: 278380418400.dkr.ecr.eu-west-2.amazonaws.com/job-server 6 | command: yarn start:dev 7 | environment: 8 | NODE_ENV: production 9 | REDIS_URL: redis://redis 10 | ports: 11 | - '8082:80' 12 | volumes: 13 | - .:/usr/src/job-server 14 | 15 | redis: 16 | image: redis 17 | -------------------------------------------------------------------------------- /job-server/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "job-server", 3 | "version": "1.0.0", 4 | "main": "index.js", 5 | "license": "MIT", 6 | "dependencies": { 7 | "bluebird": "^3.5.5", 8 | "fs": "^0.0.1-security", 9 | "koa": "^2.7.0", 10 | "koa-bodyparser": "^4.2.1", 11 | "koa-router": "^7.4.0", 12 | "path": "^0.12.7", 13 | "q": "^1.5.1", 14 | "redis": "^2.8.0" 15 | }, 16 | "scripts": { 17 | "build": "tsc", 18 | "start": "node ./dest", 19 | "start:dev": "tsc-watch --onSuccess 'yarn start'" 20 | }, 21 | "devDependencies": { 22 | "@types/bluebird": "^3.5.27", 23 | "@types/koa": "^2.0.49", 24 | "@types/koa-bodyparser": "^4.3.0", 25 | "@types/koa-router": "^7.0.42", 26 | "@types/node": "^12.6.9", 27 | "@types/redis": "^2.8.13", 28 | "tsc-watch": "^2.2.1", 29 | "typescript": "^3.5.3" 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /job-server/redis-scripts/complete_job.lua: -------------------------------------------------------------------------------- 1 | local job = ARGV[1] 2 | local result = ARGV[2] 3 | -- this is not the fastest as lrem is 0(n) 4 | redis.pcall('lrem', 'pendingJobs', 1, job) 5 | 6 | -- add the result to a sorted set by job id so we can return this later 7 | redis.pcall('set', 'complete:'..job, result) 8 | 9 | return 1 -------------------------------------------------------------------------------- /job-server/redis-scripts/get_job.lua: -------------------------------------------------------------------------------- 1 | local function generateJob() 2 | local from = tonumber(redis.pcall('get', 'from')) or 0 3 | local to = tonumber(redis.pcall('get', 'to')) or 0 4 | 5 | if from == to then 6 | return nil 7 | end 8 | 9 | redis.pcall('rpush', 'pendingJobs', from) 10 | redis.pcall('del', 'job:'..from) 11 | redis.pcall('set', 'from', from + 1) 12 | 13 | return tostring(from) 14 | end 15 | 16 | local function getExpiredJob() 17 | local currentIndex = 0 18 | 19 | while true do 20 | local job = redis.pcall('lindex', 'pendingJobs', currentIndex) 21 | if job == false then 22 | return nil 23 | end 24 | local jobPending = redis.pcall('exists', 'job:'..job) 25 | if tonumber(jobPending) == 0 then 26 | return job 27 | end 28 | currentIndex = currentIndex + 1 29 | end 30 | end 31 | 32 | local function getJobs(num, expirationTime) 33 | local jobs = {} 34 | 35 | while table.getn(jobs) < num do 36 | local job = getExpiredJob() or generateJob() 37 | if job == nil then 38 | break; 39 | end 40 | redis.pcall('setex', 'job:'..job, expirationTime, job) 41 | table.insert(jobs, job) 42 | end 43 | 44 | return jobs; 45 | end 46 | 47 | return table.concat(getJobs(tonumber(ARGV[1]), tonumber(ARGV[2])), '\n') -------------------------------------------------------------------------------- /job-server/redis-scripts/setup.lua: -------------------------------------------------------------------------------- 1 | local from = tonumber(ARGV[1]) 2 | local numberOfJobs = tonumber(ARGV[2]) 3 | 4 | redis.pcall('flushall') 5 | redis.pcall('set', 'from', from) 6 | redis.pcall('set', 'to', from + numberOfJobs) 7 | redis.pcall('del', 'pendingJobs') -------------------------------------------------------------------------------- /job-server/src/index.ts: -------------------------------------------------------------------------------- 1 | import { readdirSync, readFileSync } from 'fs'; 2 | import redis, { RedisClient } from 'redis'; 3 | import bluebird from 'bluebird'; 4 | import http from 'http'; 5 | import Koa from 'koa'; 6 | import Router from 'koa-router'; 7 | import bodyParser from 'koa-bodyparser'; 8 | import { Readable } from 'stream'; 9 | 10 | bluebird.promisifyAll(redis); 11 | 12 | declare module 'redis' { 13 | interface RedisClient { 14 | scriptAsync: any; 15 | evalshaAsync: any; 16 | getAsync: any; 17 | mgetAsync: any; 18 | } 19 | } 20 | 21 | const { PORT = 80, REDIS_URL = 'redis://localhost:6379', JOB_EXPIRATION_TIME = '900' } = process.env; 22 | const SCRIPT_DIR = './redis-scripts'; 23 | 24 | async function loadScripts(redisClient: RedisClient) { 25 | console.log('Loading scripts...'); 26 | const files = readdirSync(SCRIPT_DIR); 27 | const shas = await Promise.all( 28 | files.map(file => redisClient.scriptAsync('load', readFileSync(`${SCRIPT_DIR}/${file}`))) 29 | ); 30 | 31 | return files.reduce((a, file, i) => ({ ...a, [file]: shas[i] }), {}); 32 | } 33 | 34 | function createResultStream(redisClient: RedisClient, from: number, num: number) { 35 | return new Readable({ 36 | async read() { 37 | if (!num) { 38 | this.push(null); 39 | return; 40 | } 41 | 42 | const batch = Math.min(num, 100); 43 | const keys = Array(batch) 44 | .fill(0) 45 | .map((_, idx) => `complete:${from + idx}`); 46 | const results = await redisClient.mgetAsync(...keys); 47 | const nullIndex = results.indexOf(null); 48 | const toSend = nullIndex === -1 ? batch : nullIndex; 49 | 50 | from += toSend; 51 | num -= toSend; 52 | 53 | if (toSend === 0) { 54 | await new Promise(resolve => setTimeout(resolve, 1000)); 55 | } 56 | 57 | const chunk = results.slice(0, toSend).join('\n'); 58 | 59 | this.push(chunk); 60 | }, 61 | }); 62 | } 63 | 64 | async function getResults(redisClient: RedisClient, from: number, num: number) { 65 | const keys = new Array(num).fill(0).map((_, idx) => `complete:${from + idx}`); 66 | const results: string[] = await redisClient.mgetAsync(...keys); 67 | 68 | if (results.some(r => r === null)) { 69 | return []; 70 | } 71 | 72 | return results; 73 | } 74 | 75 | function app(redisClient: RedisClient, scripts: { [k: string]: string }) { 76 | const router = new Router(); 77 | 78 | router.get('/', async ctx => { 79 | ctx.body = 'OK'; 80 | }); 81 | 82 | router.get('/create-jobs', async ctx => { 83 | scripts = await loadScripts(redisClient); 84 | const { from = 0, num = 100 } = ctx.query; 85 | ctx.body = await redisClient.evalshaAsync(scripts['setup.lua'], 0, from, num); 86 | }); 87 | 88 | router.get('/job', async ctx => { 89 | const numJobs = ctx.query.num || 1; 90 | ctx.body = await redisClient.evalshaAsync(scripts['get_job.lua'], 0, numJobs, JOB_EXPIRATION_TIME); 91 | }); 92 | 93 | router.put('/complete/:id', async ctx => { 94 | await redisClient.evalshaAsync(scripts['complete_job.lua'], 0, ctx.params.id, ctx.request.body); 95 | ctx.body = ctx.request.body; 96 | }); 97 | 98 | router.get('/result', async ctx => { 99 | const to = Number(await redisClient.getAsync('to')); 100 | let from: number = +ctx.query.from || 0; 101 | let num = +ctx.query.num || to - from; 102 | 103 | if (ctx.query.stream) { 104 | ctx.body = createResultStream(redisClient, from, num); 105 | } else { 106 | const results = await getResults(redisClient, from, num); 107 | if (!results.length) { 108 | ctx.status = 404; 109 | return; 110 | } 111 | ctx.body = results.join('\n'); 112 | } 113 | }); 114 | 115 | const app = new Koa(); 116 | app.use(bodyParser({ enableTypes: ['text'] })); 117 | app.use(router.routes()); 118 | 119 | return app; 120 | } 121 | 122 | async function main() { 123 | const redisClient = redis.createClient(REDIS_URL); 124 | const scripts = await loadScripts(redisClient); 125 | console.log('Scripts loaded: ', scripts); 126 | 127 | const server = http.createServer(app(redisClient, scripts).callback()); 128 | server.listen(PORT); 129 | console.log(`Server listening on port ${PORT}.`); 130 | 131 | const shutdown = async () => { 132 | console.log('Shutting down.'); 133 | await new Promise(resolve => server.close(resolve)); 134 | console.log('Shutdown complete.'); 135 | }; 136 | 137 | process.once('SIGINT', shutdown); 138 | process.once('SIGTERM', shutdown); 139 | } 140 | 141 | main().catch(console.error); 142 | -------------------------------------------------------------------------------- /job-server/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "target": "esnext", 4 | "module": "commonjs", 5 | "moduleResolution": "node", 6 | "lib": ["dom", "esnext", "es2017.object"], 7 | "outDir": "dest", 8 | "strict": true, 9 | "noImplicitAny": true, 10 | "noImplicitThis": false, 11 | "esModuleInterop": true, 12 | "declaration": true 13 | }, 14 | "include": ["src"] 15 | } 16 | -------------------------------------------------------------------------------- /setup-iac/output.tf: -------------------------------------------------------------------------------- 1 | output "vpc_id" { 2 | value = "${aws_vpc.setup.id}" 3 | } 4 | 5 | output "vpc_main_route_table_id" { 6 | value = "${aws_vpc.setup.main_route_table_id}" 7 | } 8 | 9 | output "route_table_az1_private_id" { 10 | value = "${aws_route_table.private_az1.id}" 11 | } 12 | 13 | output "route_table_az2_private_id" { 14 | value = "${aws_route_table.private_az2.id}" 15 | } 16 | 17 | output "local_service_discovery_id" { 18 | value = "${aws_service_discovery_private_dns_namespace.local.id}" 19 | } 20 | 21 | output "ecs_spot_fleet_role_arn" { 22 | value = "${aws_iam_role.ec2_spot_fleet_role.arn}" 23 | } 24 | 25 | output "ecs_task_execution_role_arn" { 26 | value = "${aws_iam_role.ecs_task_execution_role.arn}" 27 | } 28 | 29 | output "ecs_cluster_id" { 30 | value = "${aws_ecs_cluster.setup.id}" 31 | } 32 | 33 | output "ecs_cluster_name" { 34 | value = "${aws_ecs_cluster.setup.name}" 35 | } 36 | 37 | output "subnet_az1_id" { 38 | value = "${aws_subnet.public_az1.id}" 39 | } 40 | 41 | output "subnet_az2_id" { 42 | value = "${aws_subnet.public_az2.id}" 43 | } 44 | 45 | output "subnet_az1_private_id" { 46 | value = "${aws_subnet.private_az1.id}" 47 | } 48 | 49 | output "subnet_az2_private_id" { 50 | value = "${aws_subnet.private_az2.id}" 51 | } 52 | 53 | output "security_group_private_id" { 54 | value = "${aws_security_group.private.id}" 55 | } 56 | 57 | output "security_group_public_id" { 58 | value = "${aws_security_group.public.id}" 59 | } 60 | 61 | output "alb_arn" { 62 | value = "${aws_alb.setup.arn}" 63 | } 64 | 65 | output "alb_listener_arn" { 66 | value = "${aws_alb_listener.https_listener.arn}" 67 | } 68 | 69 | output "ecs_instance_profile_name" { 70 | value = "${aws_iam_instance_profile.ecs.name}" 71 | } 72 | 73 | output "ecs_instance_key_pair_name" { 74 | value = "${aws_key_pair.instance_key_pair.key_name}" 75 | } 76 | 77 | output "bastion_private_ip" { 78 | value = "${aws_instance.bastion.private_ip}" 79 | } 80 | -------------------------------------------------------------------------------- /setup-iac/us-east-2/output.tf: -------------------------------------------------------------------------------- 1 | output "ecs_main_cluster_id" { 2 | value = "${aws_ecs_cluster.main.id}" 3 | } 4 | 5 | output "ecs_main_cluster_name" { 6 | value = "${aws_ecs_cluster.main.name}" 7 | } 8 | 9 | output "subnet_az1_private_id" { 10 | value = "${aws_subnet.private_az1.id}" 11 | } 12 | 13 | output "subnet_az2_private_id" { 14 | value = "${aws_subnet.private_az2.id}" 15 | } 16 | 17 | output "subnet_az3_private_id" { 18 | value = "${aws_subnet.private_az3.id}" 19 | } 20 | 21 | output "security_group_private_id" { 22 | value = "${aws_security_group.private.id}" 23 | } 24 | 25 | output "instance_key_pair_name" { 26 | value = "${aws_key_pair.instance_key_pair.key_name}" 27 | } 28 | -------------------------------------------------------------------------------- /setup-mpc-alerts/.dockerignore: -------------------------------------------------------------------------------- 1 | dest 2 | node_modules -------------------------------------------------------------------------------- /setup-mpc-alerts/.mbt.yml: -------------------------------------------------------------------------------- 1 | name: setup-mpc-alerts 2 | build: 3 | default: 4 | cmd: ../ci-scripts/build.sh 5 | args: 6 | - 278380418400.dkr.ecr.eu-west-2.amazonaws.com/setup-mpc-alerts 7 | - 278380418400.dkr.ecr.eu-west-2.amazonaws.com/setup-mpc-common 8 | commands: 9 | deploy-public: 10 | cmd: ../ci-scripts/deploy.sh 11 | args: 12 | - setup-mpc-alerts 13 | dependencies: 14 | - setup-mpc-common 15 | -------------------------------------------------------------------------------- /setup-mpc-alerts/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM 278380418400.dkr.ecr.eu-west-2.amazonaws.com/setup-mpc-common:latest 2 | 3 | FROM node:10 4 | WORKDIR /usr/src/setup-mpc-common 5 | COPY --from=0 /usr/src/setup-mpc-common . 6 | RUN yarn link 7 | WORKDIR /usr/src/setup-mpc-client 8 | COPY package.json yarn.lock ./ 9 | RUN yarn install 10 | COPY . . 11 | RUN yarn build 12 | CMD ["yarn", "--silent", "start"] -------------------------------------------------------------------------------- /setup-mpc-alerts/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "setup-mpc-alerts", 3 | "version": "1.0.0", 4 | "main": "dest/index.js", 5 | "license": "MIT", 6 | "scripts": { 7 | "start": "node ./dest", 8 | "start:dev": "tsc-watch -p tsconfig.prod.json --onSuccess 'yarn start'", 9 | "build": "tsc -p tsconfig.prod.json", 10 | "postinstall": "yarn link setup-mpc-common" 11 | }, 12 | "devDependencies": { 13 | "@types/moment": "^2.13.0", 14 | "@types/node": "^12.12.3", 15 | "tsc-watch": "^4.0.0", 16 | "typescript": "^3.5.3" 17 | }, 18 | "dependencies": { 19 | "@slack/webhook": "^5.0.2", 20 | "moment": "^2.24.0" 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /setup-mpc-alerts/src/app.ts: -------------------------------------------------------------------------------- 1 | import moment from 'moment'; 2 | import { applyDelta, MpcServer, MpcState, Participant } from 'setup-mpc-common'; 3 | import { IncomingWebhook } from '@slack/webhook'; 4 | import { calculateMetrics } from './calculate-metrics'; 5 | 6 | export class App { 7 | private interval!: NodeJS.Timeout; 8 | private state?: MpcState; 9 | private running?: Participant; 10 | private alerted = false; 11 | private slack: IncomingWebhook; 12 | 13 | constructor(private server: MpcServer, private alertTimeLeft: number, private token: string) { 14 | const webhook = `https://hooks.slack.com/services/T8P21L9SL/BPZNB6448/${token}`; 15 | this.slack = new IncomingWebhook(webhook); 16 | } 17 | 18 | public start() { 19 | this.send(':unicorn_face: Alert application started.'); 20 | this.updateState(); 21 | } 22 | 23 | public stop() { 24 | clearTimeout(this.interval); 25 | } 26 | 27 | private updateState = async () => { 28 | try { 29 | // Then get the latest state from the server. 30 | const remoteStateDelta = await this.server.getState(this.state ? this.state.sequence : undefined); 31 | 32 | // this.state updates atomically in this code block, allowing the ui to update independently. 33 | if (!this.state) { 34 | this.state = remoteStateDelta; 35 | } else if (this.state.startSequence !== remoteStateDelta.startSequence) { 36 | this.state = await this.server.getState(); 37 | } else { 38 | this.state = applyDelta(this.state, remoteStateDelta); 39 | } 40 | 41 | const running = this.state.participants.find(p => p.state === 'RUNNING'); 42 | 43 | // No ones running, do nothing. 44 | if (!running && !this.running) { 45 | return; 46 | } 47 | 48 | if (running && this.running) { 49 | if (running.address.equals(this.running.address)) { 50 | // Nearing timeout. 51 | this.alertIfTimeout(running); 52 | } else { 53 | // We've advanced to a new participant. 54 | this.alertParticipantFinished(this.running); 55 | this.running = running; 56 | this.alerted = false; 57 | } 58 | } else if (this.running && !running) { 59 | // We've just finished but haven't started a new participant. 60 | this.alertParticipantFinished(this.running); 61 | this.running = running; 62 | this.alerted = false; 63 | } 64 | 65 | this.running = running; 66 | } catch (err) { 67 | console.error(err); 68 | } finally { 69 | this.scheduleUpdate(); 70 | } 71 | }; 72 | 73 | private alertParticipantFinished(running: Participant) { 74 | const previous = this.state!.participants.find(p => p.address.equals(running.address))!; 75 | if (previous.state === 'COMPLETE') { 76 | this.send(`:tada: Participant complete: \`${previous.address}\``); 77 | } else { 78 | this.send(`:boom: Participant failed \`${previous.address}\`: ${previous.error}.`); 79 | } 80 | } 81 | 82 | private alertIfTimeout(p: Participant) { 83 | const { 84 | downloadProgress, 85 | computeProgress, 86 | uploadProgress, 87 | verifyProgress, 88 | verifyTimeout, 89 | totalTimeout, 90 | } = calculateMetrics(this.state!, p); 91 | 92 | if (totalTimeout < this.alertTimeLeft || (p.tier > 1 && verifyTimeout < this.alertTimeLeft)) { 93 | if (!this.alerted) { 94 | this.alerted = true; 95 | this.send( 96 | `:exclamation: Participant \`${p.address}\` will timeout in ${this.alertTimeLeft}s.\n` + 97 | `\`\`\`download: ${downloadProgress.toFixed(2)}\n` + 98 | `compute: ${computeProgress.toFixed(2)}\n` + 99 | `upload: ${uploadProgress.toFixed(2)}\n` + 100 | `verify: ${verifyProgress.toFixed(2)}\`\`\`` 101 | ); 102 | } 103 | } else { 104 | this.alerted = false; 105 | } 106 | } 107 | 108 | private send(text: string) { 109 | console.log(text); 110 | if (this.token) { 111 | this.slack.send(text); 112 | } 113 | } 114 | 115 | private scheduleUpdate = () => { 116 | this.interval = setTimeout(this.updateState, 1000); 117 | }; 118 | } 119 | -------------------------------------------------------------------------------- /setup-mpc-alerts/src/calculate-metrics.ts: -------------------------------------------------------------------------------- 1 | import { Participant, MpcState } from 'setup-mpc-common'; 2 | import moment = require('moment'); 3 | 4 | export interface Metrics { 5 | downloadProgress: number; 6 | computeProgress: number; 7 | uploadProgress: number; 8 | verifyProgress: number; 9 | verifyTimeout: number; 10 | totalTimeout: number; 11 | } 12 | 13 | export function calculateMetrics(state: MpcState, p: Participant): Metrics { 14 | const { invalidateAfter, numG1Points, numG2Points, pointsPerTranscript } = state!; 15 | const completeWithin = p.invalidateAfter || invalidateAfter; 16 | const verifyWithin = (2 * completeWithin) / (Math.max(numG1Points, numG2Points) / pointsPerTranscript); 17 | const verifyTimeout = Math.max( 18 | 0, 19 | moment(p.lastVerified || p.startedAt!) 20 | .add(verifyWithin, 's') 21 | .diff(moment(), 's') 22 | ); 23 | 24 | const totalTimeout = Math.max( 25 | 0, 26 | moment(p.startedAt!) 27 | .add(completeWithin, 's') 28 | .diff(moment(), 's') 29 | ); 30 | 31 | const totalData = p.transcripts.reduce((a, t) => a + t.size, 0); 32 | const totalDownloaded = p.transcripts.reduce((a, t) => a + t.downloaded, 0); 33 | const totalUploaded = p.transcripts.reduce((a, t) => a + t.uploaded, 0); 34 | const downloadProgress = totalData ? (totalDownloaded / totalData) * 100 : 0; 35 | const uploadProgress = totalData ? (totalUploaded / totalData) * 100 : 0; 36 | const computeProgress = p.computeProgress; 37 | const verifyProgress = p.verifyProgress; 38 | 39 | return { 40 | downloadProgress, 41 | uploadProgress, 42 | computeProgress, 43 | verifyProgress, 44 | verifyTimeout, 45 | totalTimeout, 46 | }; 47 | } 48 | -------------------------------------------------------------------------------- /setup-mpc-alerts/src/index.ts: -------------------------------------------------------------------------------- 1 | import { HttpClient } from 'setup-mpc-common'; 2 | import { App } from './app'; 3 | 4 | async function main() { 5 | const { 6 | API_URL = 'https://ignition.aztecprotocol.com/api', 7 | ALERT_TIME_LEFT = '600', 8 | SLACK_MPC_TOKEN = '', 9 | } = process.env; 10 | 11 | const server = new HttpClient(API_URL); 12 | const app = new App(server, +ALERT_TIME_LEFT, SLACK_MPC_TOKEN); 13 | 14 | app.start(); 15 | 16 | const shutdown = () => { 17 | app.stop(); 18 | process.exit(0); 19 | }; 20 | process.once('SIGINT', shutdown); 21 | process.once('SIGTERM', shutdown); 22 | } 23 | 24 | main().catch(err => console.log(err.message)); 25 | -------------------------------------------------------------------------------- /setup-mpc-alerts/terraform/main.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | backend "s3" { 3 | bucket = "aztec-terraform" 4 | key = "setup/setup-mpc-alerts" 5 | region = "eu-west-2" 6 | } 7 | } 8 | 9 | data "terraform_remote_state" "setup_iac" { 10 | backend = "s3" 11 | config = { 12 | bucket = "aztec-terraform" 13 | key = "setup/setup-iac" 14 | region = "eu-west-2" 15 | } 16 | } 17 | 18 | provider "aws" { 19 | profile = "default" 20 | region = "eu-west-2" 21 | } 22 | 23 | variable "SLACK_MPC_TOKEN" { 24 | type = "string" 25 | } 26 | 27 | resource "aws_ecs_task_definition" "setup_mpc_alerts" { 28 | family = "setup-mpc-alerts" 29 | requires_compatibilities = ["FARGATE"] 30 | network_mode = "awsvpc" 31 | cpu = "256" 32 | memory = "512" 33 | execution_role_arn = "${data.terraform_remote_state.setup_iac.outputs.ecs_task_execution_role_arn}" 34 | 35 | container_definitions = < $SETUP_DIR/transcript$TRANSCRIPT.dat 13 | done -------------------------------------------------------------------------------- /setup-mpc-client-bash/download_signed.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | : ${API_URL=https://ignition.aztecprotocol.com} 5 | : ${TRANSCRIPTS=20} 6 | : ${SETUP_DIR=$(pwd)/setup_db} 7 | 8 | PREV_ADDRESS=$(echo $PREV_ADDRESS | tr '[:upper:]' '[:lower:]') 9 | 10 | if [ ! -d "./recover-address/node_modules" ]; then 11 | pushd ./recover-address 12 | npm install 13 | popd 14 | fi 15 | 16 | mkdir -p $SETUP_DIR 17 | rm -f $SETUP_DIR/* 18 | for TRANSCRIPT in $(seq 0 $[TRANSCRIPTS - 1]); do 19 | echo Downloading transcript $PREV_ADDRESS/$TRANSCRIPT... 20 | FILENAME=$SETUP_DIR/transcript$TRANSCRIPT 21 | curl -s -S $API_URL/api/data/$PREV_ADDRESS/$TRANSCRIPT > $FILENAME.dat 22 | curl -s -S $API_URL/api/signature/$PREV_ADDRESS/$TRANSCRIPT > $FILENAME.sig 23 | RECOVERED=$(node recover-address 0x$(shasum -a 256 $FILENAME.dat | cut -f1 -d ' ') $(cat $FILENAME.sig)) 24 | if [ "${RECOVERED}" != "${PREV_ADDRESS}" ]; then 25 | echo "Signature verification failed for transcript $TRANSCRIPT." 26 | exit 1 27 | fi 28 | done -------------------------------------------------------------------------------- /setup-mpc-client-bash/recover-address/index.js: -------------------------------------------------------------------------------- 1 | const { recover } = require('web3x/utils'); 2 | 3 | try { 4 | console.log( 5 | recover(process.argv[2], process.argv[3]) 6 | .toString() 7 | .toLowerCase() 8 | ); 9 | } catch (err) { 10 | console.log('Failed to recover address.'); 11 | process.exit(1); 12 | } 13 | -------------------------------------------------------------------------------- /setup-mpc-client-bash/recover-address/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "recover-address", 3 | "version": "1.0.0", 4 | "description": "", 5 | "main": "index.js", 6 | "scripts": { 7 | "test": "echo \"Error: no test specified\" && exit 1" 8 | }, 9 | "author": "", 10 | "license": "ISC", 11 | "dependencies": { 12 | "web3x": "^4.0.4" 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /setup-mpc-client-bash/sign.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | : ${ETH_URL=http://localhost:8545} 5 | : ${SETUP_DIR=$(pwd)/setup_db} 6 | : ${TRANSCRIPTS=20} 7 | 8 | for TRANSCRIPT in $(seq 0 $[TRANSCRIPTS - 1]); do 9 | echo Signing transcript $ADDRESS/$TRANSCRIPT... 10 | curl -s -S -X POST --data "{\"jsonrpc\":\"2.0\",\"method\":\"eth_sign\",\"params\":[\"$ADDRESS\", \"$(shasum -a 256 $SETUP_DIR/transcript${TRANSCRIPT}_out.dat)\"],\"id\":1}" $ETH_URL | jq -j .result > $SETUP_DIR/transcript${TRANSCRIPT}_out.sig 11 | done -------------------------------------------------------------------------------- /setup-mpc-client-bash/simulate_client.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | set -x 4 | 5 | : ${API_URL=https://ignition.aztecprotocol.com} 6 | : ${ETH_URL=http://localhost:8545} 7 | : ${SETUP_DIR=$(pwd)/setup_db} 8 | : ${TRANSCRIPTS=20} 9 | 10 | function sendMessage() { 11 | SIGNATURE=$(curl -s -S -X POST --data "{\"jsonrpc\":\"2.0\",\"method\":\"eth_sign\",\"params\":[\"$ADDRESS\", \"$(echo -n "$1" | xxd -pu -c1000)\"],\"id\":1}" $ETH_URL | jq -j .result) 12 | if [ -z "$3" ]; then 13 | curl -s -S $API_URL/api$2 -H "X-Signature: $SIGNATURE" 14 | else 15 | curl -s -S $API_URL/api$2 -H "X-Signature: $SIGNATURE" -H "Content-Type: application/json" -X PATCH -d "$1" 16 | fi 17 | } 18 | 19 | sendMessage "ping" "/ping/$ADDRESS" 20 | sleep 1 21 | while true; do 22 | sendMessage "{\"runningState\":\"OFFLINE\",\"computeProgress\":0}" "/participant/$ADDRESS" 1 23 | sleep 5 24 | done -------------------------------------------------------------------------------- /setup-mpc-client-bash/upload.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | : ${TRANSCRIPTS=20} 5 | 6 | for TRANSCRIPT in $(seq 0 $[TRANSCRIPTS - 1]); do 7 | TRANSCRIPT=$TRANSCRIPT ./upload_transcript.sh 8 | done -------------------------------------------------------------------------------- /setup-mpc-client-bash/upload_signed.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | : ${API_URL=https://ignition.aztecprotocol.com} 5 | : ${SETUP_DIR=$(pwd)/setup_db} 6 | : ${SIGS=$SETUP_DIR/sigs.json} 7 | : ${TRANSCRIPTS=20} 8 | 9 | for TRANSCRIPT in $(seq 0 $[TRANSCRIPTS - 1]); do 10 | SIGNATURE=$(cat $SIGS | jq -r ".[$TRANSCRIPT]") 11 | echo "Uploading transcript $TRANSCRIPT with signature $SIGNATURE..." 12 | curl -s -S $API_URL/api/data/$ADDRESS/$TRANSCRIPT --upload-file $SETUP_DIR/transcript${TRANSCRIPT}${TRANSCRIPT_POSTFIX}.dat -H "X-Signature: $SIGNATURE" > /dev/null 13 | done -------------------------------------------------------------------------------- /setup-mpc-client-bash/upload_transcript.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | set -x 4 | 5 | : ${ETH_URL=http://localhost:8545} 6 | : ${API_URL=https://ignition.aztecprotocol.com} 7 | : ${SETUP_DIR=$(pwd)/setup_db} 8 | : ${TRANSCRIPT=0} 9 | : ${TRANSCRIPT_POSTFIX=_out} 10 | 11 | echo Uploading transcript $ADDRESS/$TRANSCRIPT... 12 | PING_SIG=$(curl -s -S -X POST --data "{\"jsonrpc\":\"2.0\",\"method\":\"eth_sign\",\"params\":[\"$ADDRESS\", \"70696e67\"],\"id\":1}" $ETH_URL | jq -j .result) 13 | DATA_SIG=$(curl -s -S -X POST --data "{\"jsonrpc\":\"2.0\",\"method\":\"eth_sign\",\"params\":[\"$ADDRESS\", \"$(shasum -a 256 $SETUP_DIR/transcript${TRANSCRIPT}${TRANSCRIPT_POSTFIX}.dat | cut -f1 -d ' ')\"],\"id\":1}" $ETH_URL | jq -j .result) 14 | curl -s -S $API_URL/api/data/$ADDRESS/$TRANSCRIPT --upload-file $SETUP_DIR/transcript${TRANSCRIPT}${TRANSCRIPT_POSTFIX}.dat -H "X-Signature: $PING_SIG,$DATA_SIG" > /dev/null -------------------------------------------------------------------------------- /setup-mpc-client-bash/verify.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | : ${SETUP_DIR=$(pwd)/setup_db} 5 | : ${TRANSCRIPTS=20} 6 | 7 | ../setup-tools/build/verify 100800000 1 5040000 0 $SETUP_DIR/transcript0.dat 8 | for TRANSCRIPT in $(seq 1 $[TRANSCRIPTS - 1]); do 9 | ../setup-tools/build/verify 100800000 1 5040000 $TRANSCRIPT $SETUP_DIR/transcript${TRANSCRIPT}.dat $SETUP_DIR/transcript0.dat $SETUP_DIR/transcript$[TRANSCRIPT -1].dat 10 | done -------------------------------------------------------------------------------- /setup-mpc-client/.dockerignore: -------------------------------------------------------------------------------- 1 | dest 2 | node_modules -------------------------------------------------------------------------------- /setup-mpc-client/.mbt.yml: -------------------------------------------------------------------------------- 1 | name: setup-mpc-client 2 | build: 3 | default: 4 | cmd: ../ci-scripts/build.sh 5 | args: 6 | - aztecprotocol/setup-mpc-client 7 | - '278380418400.dkr.ecr.eu-west-2.amazonaws.com/setup-mpc-common 278380418400.dkr.ecr.eu-west-2.amazonaws.com/setup-tools' 8 | commands: 9 | deploy-public: 10 | cmd: ../ci-scripts/deploy-public.sh 11 | args: 12 | - setup-mpc-client 13 | dependencies: 14 | - setup-mpc-common 15 | - setup-tools 16 | -------------------------------------------------------------------------------- /setup-mpc-client/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM 278380418400.dkr.ecr.eu-west-2.amazonaws.com/setup-tools:latest 2 | FROM 278380418400.dkr.ecr.eu-west-2.amazonaws.com/setup-mpc-common:latest 3 | 4 | FROM node:10 5 | WORKDIR /usr/src/setup-mpc-common 6 | COPY --from=1 /usr/src/setup-mpc-common . 7 | RUN yarn link 8 | WORKDIR /usr/src/setup-mpc-client 9 | COPY package.json yarn.lock ./ 10 | RUN yarn install 11 | COPY . . 12 | RUN yarn build 13 | 14 | FROM ubuntu:latest 15 | RUN apt update && \ 16 | apt install -y curl && \ 17 | curl -sL https://deb.nodesource.com/setup_10.x | bash - && \ 18 | curl -sS https://dl.yarnpkg.com/debian/pubkey.gpg | apt-key add - && \ 19 | echo "deb https://dl.yarnpkg.com/debian/ stable main" | tee /etc/apt/sources.list.d/yarn.list && \ 20 | apt update && \ 21 | apt install -y nodejs yarn && \ 22 | apt clean 23 | COPY --from=0 /usr/src/setup-tools/setup /usr/src/setup-tools/setup 24 | COPY --from=0 /usr/src/setup-tools/setup-fast /usr/src/setup-tools/setup-fast 25 | RUN mkdir /usr/src/setup_db 26 | WORKDIR /usr/src/setup-mpc-common 27 | COPY --from=1 /usr/src/setup-mpc-common . 28 | RUN yarn link 29 | WORKDIR /usr/src/setup-mpc-client 30 | COPY --from=2 /usr/src/setup-mpc-client . 31 | RUN yarn link setup-mpc-common 32 | CMD ["yarn", "--silent", "start"] -------------------------------------------------------------------------------- /setup-mpc-client/client.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | if [ -n "$2" ]; then 3 | docker-compose run --rm -e EXIT_ON_COMPLETE=$3 -e API_URL=https://setup-staging.aztecprotocol.com/api -e PRIVATE_KEY=$1 --use-aliases --service-ports setup-mpc-client 4 | else 5 | docker-compose run --rm -e EXIT_ON_COMPLETE=$3 -e PRIVATE_KEY=$1 --use-aliases --service-ports setup-mpc-client 6 | fi 7 | -------------------------------------------------------------------------------- /setup-mpc-client/compute_all.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | ./client.sh 0xf90214f59d15c663d4d13b06b115838ee8a397af1dbf6535479a818d6e32a26a $1 $2 4 | ./client.sh 0xf64420705123ff35ad7c8129018603ba617967543ba08702d1786c54818daeb3 $1 $2 5 | ./client.sh 0x45957c9e386fc5eb87d295278c062a46949c91e9b3cfe25628be4a337374f448 $1 $2 6 | ./client.sh 0x920dd6d7d446ee9ca8abe4a380d8c2bebb39582e7d54af61fb1e83fdd4c925d6 $1 $2 7 | ./client.sh 0x3ce0bd0162b8575f67cbc587765443eddbf9d44571598b793327e2efcd66cba1 $1 $2 8 | ./client.sh 0x469844c8ae3d0a18ec3f52779cd0e6d87a3c0395176f575d50e5810a086cf318 $1 $2 9 | ./client.sh 0xa9f519cb7a3fff927a05408128e8c7c1127b4bccccd4b1a848c2389af3d98d44 $1 $2 10 | ./client.sh 0x36d89e1f7ee3de9a386b8740219e1498200929950a5fa74f61a34ae8f5cfd583 $1 $2 11 | ./client.sh 0x2e858c3290c0a01f8f392d4828cd07f864e3a2ae6e82b3ebad8ffa106f7904b7 $1 $2 12 | ./client.sh 0xa96bc82f643bbd0bc7fd26cb5d56a6c4f7bd9a9f7cac3f497618c2c26c15aad6 $1 $2 13 | -------------------------------------------------------------------------------- /setup-mpc-client/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | services: 3 | setup-mpc-client: 4 | build: . 5 | image: aztecprotocol/setup-mpc-client 6 | tty: true 7 | environment: 8 | API_URL: 'http://setup-mpc-server/api' 9 | PRIVATE_KEY: '0xf90214f59d15c663d4d13b06b115838ee8a397af1dbf6535479a818d6e32a26a' 10 | volumes: 11 | - .:/usr/src/setup-mpc-client 12 | - ../setup-mpc-common:/usr/src/setup-mpc-client/node_modules/setup-mpc-common 13 | 14 | networks: 15 | default: 16 | external: 17 | name: setup_default 18 | -------------------------------------------------------------------------------- /setup-mpc-client/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "setup-mpc-client", 3 | "version": "1.0.0", 4 | "main": "dest/exports.js", 5 | "license": "MIT", 6 | "scripts": { 7 | "start": "node ./dest 2> ./err.log", 8 | "start:dev": "tsc-watch -p tsconfig.prod.json --onSuccess 'yarn start' 2> ./err.log", 9 | "build": "tsc -p tsconfig.prod.json", 10 | "postinstall": "yarn link setup-mpc-common" 11 | }, 12 | "devDependencies": { 13 | "@types/humanize-duration": "^3.18.0", 14 | "@types/isomorphic-fetch": "^0.0.35", 15 | "@types/node": "^12.6.2", 16 | "@types/progress-stream": "^2.0.0", 17 | "@types/terminal-kit": "^1.28.0", 18 | "tsc-watch": "^2.2.1", 19 | "tslint": "^5.18.0", 20 | "tslint-config-prettier": "^1.18.0", 21 | "typescript": "^3.5.3" 22 | }, 23 | "dependencies": { 24 | "chalk": "^2.4.2", 25 | "form-data": "^2.5.0", 26 | "humanize-duration": "^3.20.1", 27 | "isomorphic-fetch": "^2.2.1", 28 | "moment": "^2.24.0", 29 | "progress-stream": "^2.0.0", 30 | "terminal-kit": "^1.28.7", 31 | "web3x": "^4.0.3" 32 | } 33 | } 34 | -------------------------------------------------------------------------------- /setup-mpc-client/src/downloader.ts: -------------------------------------------------------------------------------- 1 | import { EventEmitter } from 'events'; 2 | import { createWriteStream, existsSync, statSync } from 'fs'; 3 | import progress from 'progress-stream'; 4 | import { MemoryFifo, MpcServer, Transcript } from 'setup-mpc-common'; 5 | 6 | const sleep = (ms: number) => new Promise(resolve => setTimeout(resolve, ms)); 7 | 8 | export class Downloader extends EventEmitter { 9 | private cancelled = false; 10 | private queue: MemoryFifo = new MemoryFifo(); 11 | 12 | constructor(private server: MpcServer) { 13 | super(); 14 | } 15 | 16 | public async run() { 17 | console.error('Downloader starting...'); 18 | while (true) { 19 | const transcript = await this.queue.get(); 20 | if (!transcript) { 21 | break; 22 | } 23 | await this.downloadTranscriptWithRetry(transcript); 24 | } 25 | console.error('Downloader complete.'); 26 | } 27 | 28 | public put(transcript: Transcript) { 29 | this.queue.put(transcript); 30 | } 31 | 32 | public end() { 33 | this.queue.end(); 34 | } 35 | 36 | public cancel() { 37 | this.cancelled = true; 38 | this.queue.cancel(); 39 | } 40 | 41 | public isDownloaded(transcript: Transcript) { 42 | const filename = `../setup_db/transcript${transcript.num}.dat`; 43 | if (existsSync(filename)) { 44 | const stat = statSync(filename); 45 | if (stat.size === transcript.size && transcript.downloaded === transcript.size) { 46 | return true; 47 | } 48 | } 49 | } 50 | 51 | private async downloadTranscriptWithRetry(transcript: Transcript) { 52 | while (!this.cancelled) { 53 | try { 54 | console.error(`Downloading transcript ${transcript.num}`); 55 | await this.downloadTranscript(transcript); 56 | this.emit('downloaded', transcript); 57 | break; 58 | } catch (err) { 59 | console.error(`Failed to download transcript ${transcript.num}: ${err.message}`); 60 | await sleep(1000); 61 | } 62 | } 63 | } 64 | 65 | private async downloadTranscript(transcript: Transcript) { 66 | const filename = `../setup_db/transcript${transcript.num}.dat`; 67 | if (this.isDownloaded(transcript)) { 68 | return; 69 | } 70 | const readStream = await this.server.downloadData(transcript.fromAddress!, transcript.num); 71 | const progStream = progress({ length: transcript.size, time: 1000 }); 72 | const writeStream = createWriteStream(filename); 73 | 74 | progStream.on('progress', progress => { 75 | this.emit('progress', transcript, progress.transferred); 76 | }); 77 | 78 | return new Promise((resolve, reject) => { 79 | readStream.on('error', reject); 80 | progStream.on('error', reject); 81 | writeStream.on('error', reject); 82 | writeStream.on('finish', () => { 83 | if (this.isDownloaded(transcript)) { 84 | resolve(); 85 | } else { 86 | reject(new Error('File not fully downloaded.')); 87 | } 88 | }); 89 | readStream.pipe(progStream).pipe(writeStream); 90 | }); 91 | } 92 | } 93 | -------------------------------------------------------------------------------- /setup-mpc-client/src/exports.ts: -------------------------------------------------------------------------------- 1 | export * from './app'; 2 | // export * from './setup-mpc-common'; 3 | -------------------------------------------------------------------------------- /setup-mpc-client/src/index.ts: -------------------------------------------------------------------------------- 1 | import { HttpClient } from 'setup-mpc-common'; 2 | import { Account } from 'web3x/account'; 3 | import { hexToBuffer } from 'web3x/utils'; 4 | import { App } from './app'; 5 | 6 | async function main() { 7 | const { 8 | API_URL = 'https://ignition.aztecprotocol.com/api', 9 | PRIVATE_KEY = '', 10 | COMPUTE_OFFLINE = 0, 11 | EXIT_ON_COMPLETE = 0, 12 | } = process.env; 13 | 14 | if (!PRIVATE_KEY && !process.stdout.isTTY) { 15 | throw new Error('If spectating (no private key) you must run in interactive mode.'); 16 | } 17 | 18 | const myAccount = PRIVATE_KEY ? Account.fromPrivate(hexToBuffer(PRIVATE_KEY)) : undefined; 19 | const server = new HttpClient(API_URL, myAccount); 20 | const app = new App( 21 | server, 22 | myAccount, 23 | process.stdout, 24 | process.stdout.rows!, 25 | process.stdout.columns!, 26 | +COMPUTE_OFFLINE > 0, 27 | +EXIT_ON_COMPLETE > 0 || !process.stdout.isTTY 28 | ); 29 | 30 | app.start(); 31 | 32 | process.stdout.on('resize', () => app.resize(process.stdout.columns!, process.stdout.rows!)); 33 | 34 | const shutdown = () => { 35 | app.stop(); 36 | process.exit(0); 37 | }; 38 | process.once('SIGINT', shutdown); 39 | process.once('SIGTERM', shutdown); 40 | } 41 | 42 | main().catch(err => console.log(err.message)); 43 | -------------------------------------------------------------------------------- /setup-mpc-client/src/terminal-kit/index.ts: -------------------------------------------------------------------------------- 1 | import chalkmod from 'chalk'; 2 | import { Writable } from 'stream'; 3 | 4 | const options: any = { enabled: true, level: 2 }; 5 | const chalk = new chalkmod.constructor(options); 6 | 7 | export class TerminalKit { 8 | private x: number = 0; 9 | private y: number = 0; 10 | 11 | constructor(private stream: Writable, public height: number, public width: number) {} 12 | 13 | private getNewYPos(str: string) { 14 | let xPos = this.x; 15 | let yPos = this.y; 16 | for (const char of str) { 17 | if (char === '\n' || xPos === this.width) { 18 | xPos = 0; 19 | yPos += 1; 20 | } else { 21 | xPos += 1; 22 | } 23 | } 24 | return yPos; 25 | } 26 | 27 | public white(str: string = '') { 28 | this.y = this.getNewYPos(str); 29 | this.stream.write(chalk.white(str)); 30 | return this; 31 | } 32 | 33 | public yellow(str: string = '') { 34 | this.y = this.getNewYPos(str); 35 | this.stream.write(chalk.yellow(str)); 36 | return this; 37 | } 38 | 39 | public cyan(str: string = '') { 40 | this.y = this.getNewYPos(str); 41 | this.stream.write(chalk.cyan(str)); 42 | return this; 43 | } 44 | 45 | public red(str: string = '') { 46 | this.y = this.getNewYPos(str); 47 | this.stream.write(chalk.red(str)); 48 | return this; 49 | } 50 | 51 | public redBright(str: string = '') { 52 | this.y = this.getNewYPos(str); 53 | this.stream.write(chalk.redBright(str)); 54 | return this; 55 | } 56 | 57 | public blue(str: string = '') { 58 | this.y = this.getNewYPos(str); 59 | this.stream.write(chalk.blue(str)); 60 | return this; 61 | } 62 | 63 | public green(str: string = '') { 64 | this.y = this.getNewYPos(str); 65 | this.stream.write(chalk.green(str)); 66 | return this; 67 | } 68 | 69 | public magentaBright(str: string = '') { 70 | this.y = this.getNewYPos(str); 71 | this.stream.write(chalk.magentaBright(str)); 72 | return this; 73 | } 74 | 75 | public yellowBright(str: string = '') { 76 | this.y = this.getNewYPos(str); 77 | this.stream.write(chalk.yellowBright(str)); 78 | return this; 79 | } 80 | 81 | public grey(str: string = '') { 82 | this.y = this.getNewYPos(str); 83 | this.stream.write(chalk.gray(str)); 84 | return this; 85 | } 86 | 87 | public clear() { 88 | this.moveTo(0, 0); 89 | this.stream.write('\u001B[2J'); 90 | this.stream.write('\u001B[3J'); 91 | } 92 | 93 | public eraseLine() { 94 | this.moveTo(0, this.y); 95 | this.stream.write('\u001B[0K'); 96 | } 97 | 98 | public moveTo(x: number, y: number) { 99 | this.x = Math.min(x, this.width - 1); 100 | this.y = Math.min(y, this.height - 1); 101 | this.stream.write(`\u001B[${y + 1};${x + 1}H`); 102 | } 103 | 104 | public nextLine(n: number) { 105 | this.moveTo(0, this.y + n); 106 | } 107 | 108 | public eraseDisplayBelow() { 109 | this.stream.write('\u001B[0J'); 110 | } 111 | 112 | public hideCursor(hide: boolean = true) { 113 | if (hide) { 114 | this.stream.write('\u001B[?25l'); 115 | } else { 116 | this.stream.write('\u001B[?25h'); 117 | } 118 | } 119 | 120 | public getCursorLocation() { 121 | return { 122 | x: this.x, 123 | y: this.y, 124 | }; 125 | } 126 | } 127 | -------------------------------------------------------------------------------- /setup-mpc-client/src/uploader.ts: -------------------------------------------------------------------------------- 1 | import { EventEmitter } from 'events'; 2 | import { unlink } from 'fs'; 3 | import { MemoryFifo, MpcServer } from 'setup-mpc-common'; 4 | import { Address } from 'web3x/address'; 5 | 6 | const sleep = (ms: number) => new Promise(resolve => setTimeout(resolve, ms)); 7 | 8 | export class Uploader extends EventEmitter { 9 | private cancelled = false; 10 | private queue: MemoryFifo = new MemoryFifo(); 11 | 12 | constructor(private server: MpcServer, private address: Address) { 13 | super(); 14 | } 15 | 16 | public async run() { 17 | console.error('Uploader starting...'); 18 | while (true) { 19 | const num = await this.queue.get(); 20 | if (num === null) { 21 | break; 22 | } 23 | await this.uploadTranscriptWithRetry(num); 24 | } 25 | console.error('Uploader complete.'); 26 | } 27 | 28 | public put(transcriptNum: number) { 29 | this.queue.put(transcriptNum); 30 | } 31 | 32 | public cancel() { 33 | this.cancelled = true; 34 | this.queue.cancel(); 35 | } 36 | 37 | public end() { 38 | this.queue.end(); 39 | } 40 | 41 | private async uploadTranscriptWithRetry(num: number) { 42 | const filename = `../setup_db/transcript${num}_out.dat`; 43 | while (!this.cancelled) { 44 | try { 45 | console.error(`Uploading: `, filename); 46 | await this.server.uploadData(this.address, num, filename, undefined, transferred => { 47 | this.emit('progress', num, transferred); 48 | }); 49 | await new Promise(resolve => unlink(filename, resolve)); 50 | this.emit('uploaded', num); 51 | break; 52 | } catch (err) { 53 | console.error(`Failed to upload transcript ${num}: ${err.message}`); 54 | await sleep(1000); 55 | } 56 | } 57 | } 58 | } 59 | -------------------------------------------------------------------------------- /setup-mpc-client/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "target": "esnext", 4 | "module": "commonjs", 5 | "moduleResolution": "node", 6 | "lib": ["dom", "esnext", "es2017.object"], 7 | "outDir": "dest", 8 | "strict": true, 9 | "noImplicitAny": true, 10 | "noImplicitThis": false, 11 | "esModuleInterop": true, 12 | "declaration": true 13 | }, 14 | "include": ["src"] 15 | } 16 | -------------------------------------------------------------------------------- /setup-mpc-client/tsconfig.prod.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": ".", 3 | "exclude": ["**/*.test.*"] 4 | } 5 | -------------------------------------------------------------------------------- /setup-mpc-common/.dockerignore: -------------------------------------------------------------------------------- 1 | dest 2 | node_modules 3 | Dockerfile -------------------------------------------------------------------------------- /setup-mpc-common/.mbt.yml: -------------------------------------------------------------------------------- 1 | name: setup-mpc-common 2 | build: 3 | default: 4 | cmd: ../ci-scripts/build.sh 5 | args: 6 | - 278380418400.dkr.ecr.eu-west-2.amazonaws.com/setup-mpc-common 7 | commands: 8 | deploy: 9 | cmd: ../ci-scripts/deploy.sh 10 | args: 11 | - setup-mpc-common 12 | -------------------------------------------------------------------------------- /setup-mpc-common/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM node:10 2 | WORKDIR /usr/src/setup-mpc-common 3 | COPY . . 4 | RUN yarn install && yarn test && yarn build && rm -rf node_modules && yarn install --production 5 | 6 | FROM node:10-alpine 7 | COPY --from=0 /usr/src/setup-mpc-common /usr/src/setup-mpc-common -------------------------------------------------------------------------------- /setup-mpc-common/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "setup-mpc-common", 3 | "version": "1.0.0", 4 | "main": "dest/index.js", 5 | "license": "MIT", 6 | "files": [ 7 | "dest" 8 | ], 9 | "scripts": { 10 | "build": "tsc -p tsconfig.prod.json", 11 | "test": "jest" 12 | }, 13 | "jest": { 14 | "transform": { 15 | "^.+\\.ts$": "ts-jest" 16 | }, 17 | "testRegex": ".*\\.test\\.(tsx?|js)$", 18 | "moduleFileExtensions": [ 19 | "ts", 20 | "tsx", 21 | "js", 22 | "jsx", 23 | "json", 24 | "node" 25 | ], 26 | "rootDir": "./src" 27 | }, 28 | "dependencies": { 29 | "@types/progress-stream": "^2.0.0", 30 | "isomorphic-fetch": "^2.2.1", 31 | "moment": "^2.24.0", 32 | "progress-stream": "^2.0.0", 33 | "web3x": "^4.0.3" 34 | }, 35 | "devDependencies": { 36 | "@types/isomorphic-fetch": "^0.0.35", 37 | "@types/jest": "^24.0.15", 38 | "@types/node": "^12.6.8", 39 | "jest": "^24.8.0", 40 | "ts-jest": "^24.0.2", 41 | "tslint": "^5.18.0", 42 | "tslint-config-prettier": "^1.18.0", 43 | "typescript": "^3.5.3" 44 | } 45 | } 46 | -------------------------------------------------------------------------------- /setup-mpc-common/src/fifo.ts: -------------------------------------------------------------------------------- 1 | export class MemoryFifo { 2 | private waiting: ((item: T | null) => void)[] = []; 3 | private items: T[] = []; 4 | private flushing: boolean = false; 5 | 6 | public async length(): Promise { 7 | return this.items.length; 8 | } 9 | 10 | public async get(timeout?: number): Promise { 11 | if (this.items.length) { 12 | return Promise.resolve(this.items.shift()!); 13 | } 14 | 15 | if (this.items.length === 0 && this.flushing) { 16 | return Promise.resolve(null); 17 | } 18 | 19 | return new Promise((resolve, reject) => { 20 | this.waiting.push(resolve); 21 | 22 | if (timeout) { 23 | setTimeout(() => { 24 | const index = this.waiting.findIndex(r => r === resolve); 25 | if (index > -1) { 26 | this.waiting.splice(index, 1); 27 | const err = new Error('Timeout getting item from queue.'); 28 | reject(err); 29 | } 30 | }, timeout * 1000); 31 | } 32 | }); 33 | } 34 | 35 | public async put(item: T) { 36 | if (this.flushing) { 37 | return; 38 | } else if (this.waiting.length) { 39 | this.waiting.shift()!(item); 40 | } else { 41 | this.items.push(item); 42 | } 43 | } 44 | 45 | public end() { 46 | this.flushing = true; 47 | this.waiting.forEach(resolve => resolve(null)); 48 | } 49 | 50 | public cancel() { 51 | this.flushing = true; 52 | this.items = []; 53 | this.waiting.forEach(resolve => resolve(null)); 54 | } 55 | } 56 | -------------------------------------------------------------------------------- /setup-mpc-common/src/hash-files.test.ts: -------------------------------------------------------------------------------- 1 | import { Readable } from 'stream'; 2 | import { hashStreams } from './hash-files'; 3 | 4 | describe('hash-files', () => { 5 | it('should create correct hash', async () => { 6 | const file1 = new Readable(); 7 | const file2 = new Readable(); 8 | 9 | file1.push('somejunk1'); 10 | file1.push(null); 11 | 12 | file2.push('somejunk2'); 13 | file2.push(null); 14 | 15 | const hash = await hashStreams([file1, file2]); 16 | expect(hash.toString('hex')).toBe('227a2c8dc5f3e429ce95820c613385e9bf8b9e44092b5f89b887419198c50efa'); 17 | }); 18 | }); 19 | -------------------------------------------------------------------------------- /setup-mpc-common/src/hash-files.ts: -------------------------------------------------------------------------------- 1 | import { createHash } from 'crypto'; 2 | import { createReadStream } from 'fs'; 3 | import { Readable } from 'stream'; 4 | 5 | export function hashFiles(paths: string[]) { 6 | return hashStreams(paths.map(p => createReadStream(p))); 7 | } 8 | 9 | export function hashStreams(streams: Readable[]) { 10 | return new Promise(resolve => { 11 | const hash = createHash('sha256'); 12 | 13 | hash.on('readable', () => { 14 | resolve(hash.read() as Buffer); 15 | }); 16 | 17 | const pipeNext = () => { 18 | const s = streams.shift(); 19 | if (!s) { 20 | hash.end(); 21 | } else { 22 | s.pipe( 23 | hash, 24 | { end: false } 25 | ); 26 | s.on('end', pipeNext); 27 | } 28 | }; 29 | 30 | pipeNext(); 31 | }); 32 | } 33 | -------------------------------------------------------------------------------- /setup-mpc-common/src/index.ts: -------------------------------------------------------------------------------- 1 | export * from './mpc-server'; 2 | export * from './http-client'; 3 | export * from './hash-files'; 4 | export * from './fifo'; 5 | export * from './mpc-state'; 6 | -------------------------------------------------------------------------------- /setup-mpc-common/src/mpc-state.ts: -------------------------------------------------------------------------------- 1 | import moment from 'moment'; 2 | import { Address } from 'web3x/address'; 3 | import { MpcState, Participant } from './mpc-server'; 4 | 5 | export function mpcStateFromJSON(json: any): MpcState { 6 | const { startTime, completedAt, participants, ...rest } = json; 7 | 8 | return { 9 | ...rest, 10 | startTime: moment(startTime), 11 | completedAt: completedAt ? moment(completedAt) : undefined, 12 | participants: participants.map(({ startedAt, lastUpdate, completedAt, address, transcripts, ...rest }: any) => ({ 13 | ...rest, 14 | startedAt: startedAt ? moment(startedAt) : undefined, 15 | lastUpdate: lastUpdate ? moment(lastUpdate) : undefined, 16 | completedAt: completedAt ? moment(completedAt) : undefined, 17 | address: Address.fromString(address), 18 | transcripts: transcripts.map(({ fromAddress, ...rest }: any) => ({ 19 | ...rest, 20 | fromAddress: fromAddress ? Address.fromString(fromAddress) : undefined, 21 | })), 22 | })), 23 | }; 24 | } 25 | 26 | export function cloneParticipant(participant: Participant): Participant { 27 | return { 28 | ...participant, 29 | transcripts: participant.transcripts.map(t => ({ ...t })), 30 | }; 31 | } 32 | 33 | export function cloneMpcState(state: MpcState): MpcState { 34 | return { 35 | ...state, 36 | participants: state.participants.map(cloneParticipant), 37 | }; 38 | } 39 | 40 | export function applyDelta(state: MpcState, delta: MpcState): MpcState { 41 | const participants = [...state.participants]; 42 | delta.participants.forEach(p => (participants[p.position - 1] = p)); 43 | return { 44 | ...delta, 45 | participants, 46 | }; 47 | } 48 | -------------------------------------------------------------------------------- /setup-mpc-common/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "target": "esnext", 4 | "module": "commonjs", 5 | "moduleResolution": "node", 6 | "lib": ["dom", "esnext", "es2017.object"], 7 | "outDir": "dest", 8 | "strict": true, 9 | "noImplicitAny": true, 10 | "noImplicitThis": false, 11 | "esModuleInterop": true, 12 | "declaration": true 13 | }, 14 | "include": ["src"] 15 | } 16 | -------------------------------------------------------------------------------- /setup-mpc-common/tsconfig.prod.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": ".", 3 | "exclude": ["**/*.test.*"] 4 | } 5 | -------------------------------------------------------------------------------- /setup-mpc-map/.dockerignore: -------------------------------------------------------------------------------- 1 | dist 2 | node_modules 3 | Dockerfile 4 | **/.terraform -------------------------------------------------------------------------------- /setup-mpc-map/.mbt.yml: -------------------------------------------------------------------------------- 1 | name: setup-mpc-map 2 | build: 3 | default: 4 | cmd: ../ci-scripts/build.sh 5 | args: 6 | - 278380418400.dkr.ecr.eu-west-2.amazonaws.com/setup-mpc-map 7 | - 278380418400.dkr.ecr.eu-west-2.amazonaws.com/setup-mpc-common 8 | commands: 9 | deploy: 10 | cmd: ../ci-scripts/deploy.sh 11 | args: 12 | - setup-mpc-map 13 | dependencies: 14 | - setup-mpc-common 15 | -------------------------------------------------------------------------------- /setup-mpc-map/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM 278380418400.dkr.ecr.eu-west-2.amazonaws.com/setup-mpc-common:latest 2 | 3 | FROM node:10-alpine 4 | WORKDIR /usr/src/setup-mpc-common 5 | COPY --from=0 /usr/src/setup-mpc-common . 6 | RUN yarn link 7 | WORKDIR /usr/src/setup-mpc-map 8 | COPY . . 9 | RUN yarn install && yarn build && rm -rf node_modules && yarn install --production && yarn cache clean 10 | CMD ["yarn", "start"] -------------------------------------------------------------------------------- /setup-mpc-map/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "setup-mpc-map", 3 | "version": "1.0.0", 4 | "main": "dist/index.js", 5 | "license": "MIT", 6 | "scripts": { 7 | "build": "webpack --config webpack.prod.js", 8 | "start": "node ./server", 9 | "start:dev": "webpack-dev-server --config webpack.dev.js", 10 | "postinstall": "yarn link setup-mpc-common" 11 | }, 12 | "dependencies": { 13 | "koa": "^2.5.2", 14 | "koa-proxy": "^1.0.0-alpha.3", 15 | "koa-static": "^5.0.0" 16 | }, 17 | "devDependencies": { 18 | "@types/node": "^12.7.1", 19 | "cesium": "^1.39.0", 20 | "copy-webpack-plugin": "^5.0.4", 21 | "css-loader": "^3.2.0", 22 | "html-webpack-plugin": "^3.2.0", 23 | "moment": "^2.24.0", 24 | "strip-pragma-loader": "^1.0.0", 25 | "style-loader": "^1.0.0", 26 | "ts-loader": "^6.0.4", 27 | "tslint": "^5.19.0", 28 | "tslint-config-prettier": "^1.18.0", 29 | "typescript": "^3.5.3", 30 | "uglifyjs-webpack-plugin": "^1.0.0-beta.3", 31 | "url-loader": "^0.6.2", 32 | "webpack": "^4.16.0", 33 | "webpack-cli": "^3.0.8", 34 | "webpack-dev-server": "^3.1.4" 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /setup-mpc-map/server/index.js: -------------------------------------------------------------------------------- 1 | const serve = require('koa-static'); 2 | const Koa = require('koa'); 3 | const proxy = require('koa-proxy'); 4 | 5 | const { PORT = '80' } = process.env; 6 | 7 | const shutdown = () => process.exit(0); 8 | process.once('SIGINT', shutdown); 9 | process.once('SIGTERM', shutdown); 10 | 11 | const app = new Koa(); 12 | 13 | // When running locally, calls to the api will hit this server rather 14 | // than being handled by the ALB. We forward the api calls onto the server here. 15 | if (process.env.NODE_ENV !== 'production') { 16 | app.use( 17 | proxy({ 18 | host: 'http://setup-mpc-server', 19 | match: /^\/api\//, 20 | }) 21 | ); 22 | } 23 | 24 | app.use(serve('dist')).listen(PORT); 25 | 26 | console.log(`Server listening on port ${PORT}`); 27 | -------------------------------------------------------------------------------- /setup-mpc-map/src/index.ts: -------------------------------------------------------------------------------- 1 | import 'cesium/Widgets/widgets.css'; 2 | import { HttpClient } from 'setup-mpc-common'; 3 | import { Coordinator } from './coordinator'; 4 | import './css/main.css'; 5 | import { Viewer } from './viewer'; 6 | 7 | async function main() { 8 | if (window.location.search) { 9 | window.location.search = ''; 10 | } 11 | const viewer = new Viewer(); 12 | const url = window.location; 13 | const apiUrl = `${url.protocol}//${url.hostname}:${url.port}/api`; 14 | const httpClient = new HttpClient(apiUrl); 15 | const coordinator = new Coordinator(viewer, httpClient); 16 | coordinator.start(); 17 | } 18 | 19 | main().catch(console.error); 20 | -------------------------------------------------------------------------------- /setup-mpc-map/src/marker.ts: -------------------------------------------------------------------------------- 1 | import Cesium from 'cesium/Cesium'; 2 | 3 | export class Marker { 4 | private markerClock = new Cesium.Clock(); 5 | 6 | constructor(private position: Cesium.Cartesian3) {} 7 | 8 | public tick() { 9 | this.markerClock.tick(); 10 | } 11 | 12 | public getEntity(): Partial { 13 | const start = Cesium.JulianDate.fromIso8601('2018-01-01T00:00:00.00Z'); 14 | const stop = Cesium.JulianDate.addSeconds(start, 1, new Cesium.JulianDate()); 15 | 16 | this.markerClock.startTime = start; 17 | this.markerClock.currentTime = start; 18 | this.markerClock.stopTime = stop; 19 | this.markerClock.clockRange = Cesium.ClockRange.LOOP_STOP; 20 | this.markerClock.shouldAnimate = true; 21 | 22 | const pulseProperty = new Cesium.SampledProperty(Number); 23 | pulseProperty.setInterpolationOptions({ 24 | interpolationDegree: 3, 25 | interpolationAlgorithm: Cesium.HermitePolynomialApproximation, 26 | }); 27 | pulseProperty.addSample(start, 1); 28 | pulseProperty.addSample(stop, 60000); 29 | 30 | const alphaProperty = new Cesium.SampledProperty(Number); 31 | alphaProperty.setInterpolationOptions({ 32 | interpolationDegree: 3, 33 | interpolationAlgorithm: Cesium.HermitePolynomialApproximation, 34 | }); 35 | alphaProperty.addSample(start, 0.9); 36 | alphaProperty.addSample(stop, 0); 37 | 38 | const pulsedSize = new Cesium.CallbackProperty(() => { 39 | return pulseProperty.getValue(this.markerClock.currentTime); 40 | }, false); 41 | 42 | const alphaColor = new Cesium.CallbackProperty(() => { 43 | return Cesium.Color.RED.withAlpha(alphaProperty.getValue(this.markerClock.currentTime)); 44 | }, false); 45 | 46 | return { 47 | position: this.position, 48 | name: 'Red circle on surface with outline', 49 | ellipse: { 50 | semiMinorAxis: pulsedSize, 51 | semiMajorAxis: pulsedSize, 52 | material: new Cesium.ColorMaterialProperty(alphaColor), 53 | outline: true, 54 | outlineColor: Cesium.Color.RED, 55 | }, 56 | }; 57 | } 58 | } 59 | -------------------------------------------------------------------------------- /setup-mpc-map/src/shooting-stars.ts: -------------------------------------------------------------------------------- 1 | import Cesium from 'cesium/Cesium'; 2 | import { LatLon } from './viewer'; 3 | 4 | export class ShootingStars { 5 | private entities: Cesium.Entity[] = []; 6 | 7 | constructor(locations: LatLon[], viewer: Cesium.Viewer) { 8 | const baseTime = viewer.clock.startTime; 9 | 10 | // Stick first location on the end, so we get a loop. Remove duplicate contiguous locations. 11 | locations = [...locations, locations[0]].filter( 12 | (el, i, a) => i === 0 || (el.lat !== a[i - 1].lat || el.lon !== a[i - 1].lon) 13 | ); 14 | 15 | if (locations.length < 2) { 16 | return; 17 | } 18 | 19 | viewer.clock.stopTime = Cesium.JulianDate.addSeconds(baseTime, 3 * (locations.length - 1), new Cesium.JulianDate()); 20 | 21 | for (let i = 1; i < locations.length; ++i) { 22 | const currentTime = Cesium.JulianDate.addSeconds(baseTime, 3 * (i - 1), new Cesium.JulianDate()); 23 | this.entities.push( 24 | this.createShootingStarPathEntity(locations[i - 1], locations[i], currentTime, viewer.scene.globe.ellipsoid) 25 | ); 26 | } 27 | } 28 | 29 | private createShootingStarPathEntity( 30 | start: LatLon, 31 | end: LatLon, 32 | startTime: Cesium.JulianDate, 33 | ellipsoid: Cesium.Ellipsoid 34 | ) { 35 | const q1Time = Cesium.JulianDate.addSeconds(startTime, 0.25, new Cesium.JulianDate()); 36 | const midTime = Cesium.JulianDate.addSeconds(startTime, 0.5, new Cesium.JulianDate()); 37 | const q2Time = Cesium.JulianDate.addSeconds(startTime, 0.75, new Cesium.JulianDate()); 38 | const endTime = Cesium.JulianDate.addSeconds(startTime, 1, new Cesium.JulianDate()); 39 | 40 | const property = new Cesium.SampledPositionProperty(); 41 | const startPoint = Cesium.Cartographic.fromDegrees(start.lon, start.lat, 0); 42 | const endPoint = Cesium.Cartographic.fromDegrees(end.lon, end.lat, 0); 43 | 44 | const geodesic = new Cesium.EllipsoidGeodesic(startPoint, endPoint); 45 | const q1Point = geodesic.interpolateUsingFraction(0.25, new Cesium.Cartographic()); 46 | q1Point.height = 200000; 47 | const midPoint = geodesic.interpolateUsingFraction(0.5, new Cesium.Cartographic()); 48 | midPoint.height = 300000; 49 | const q2Point = geodesic.interpolateUsingFraction(0.75, new Cesium.Cartographic()); 50 | q2Point.height = 200000; 51 | 52 | property.addSample(startTime, ellipsoid.cartographicToCartesian(startPoint)); 53 | property.addSample(q1Time, ellipsoid.cartographicToCartesian(q1Point)); 54 | property.addSample(midTime, ellipsoid.cartographicToCartesian(midPoint)); 55 | property.addSample(q2Time, ellipsoid.cartographicToCartesian(q2Point)); 56 | property.addSample(endTime, ellipsoid.cartographicToCartesian(endPoint)); 57 | property.setInterpolationOptions({ 58 | interpolationDegree: 5, 59 | interpolationAlgorithm: Cesium.LagrangePolynomialApproximation, 60 | }); 61 | 62 | // Create an Entity to show the arc. 63 | const arcEntity = new Cesium.Entity({ 64 | position: property, 65 | path: new Cesium.PathGraphics({ 66 | resolution: 0.05, 67 | material: new Cesium.PolylineGlowMaterialProperty({ 68 | glowPower: 0.16, 69 | taperPower: 0.1, 70 | color: Cesium.Color.CORNFLOWERBLUE, 71 | }), 72 | width: 10, 73 | leadTime: 0, 74 | trailTime: 0.5, 75 | }), 76 | }); 77 | 78 | return arcEntity; 79 | } 80 | 81 | public getEntities() { 82 | return this.entities; 83 | } 84 | } 85 | -------------------------------------------------------------------------------- /setup-mpc-map/terraform/main.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | backend "s3" { 3 | bucket = "aztec-terraform" 4 | key = "setup/setup-mpc-map" 5 | region = "eu-west-2" 6 | } 7 | } 8 | 9 | data "terraform_remote_state" "setup_iac" { 10 | backend = "s3" 11 | config = { 12 | bucket = "aztec-terraform" 13 | key = "setup/setup-iac" 14 | region = "eu-west-2" 15 | } 16 | } 17 | 18 | provider "aws" { 19 | profile = "default" 20 | region = "eu-west-2" 21 | } 22 | 23 | resource "aws_service_discovery_service" "setup_mpc_map" { 24 | name = "setup-mpc-map" 25 | 26 | health_check_custom_config { 27 | failure_threshold = 1 28 | } 29 | 30 | dns_config { 31 | namespace_id = "${data.terraform_remote_state.setup_iac.outputs.local_service_discovery_id}" 32 | 33 | dns_records { 34 | ttl = 10 35 | type = "A" 36 | } 37 | 38 | routing_policy = "MULTIVALUE" 39 | } 40 | } 41 | 42 | resource "aws_ecs_task_definition" "setup_mpc_map" { 43 | family = "setup-mpc-map" 44 | requires_compatibilities = ["FARGATE"] 45 | network_mode = "awsvpc" 46 | cpu = "256" 47 | memory = "512" 48 | execution_role_arn = "${data.terraform_remote_state.setup_iac.outputs.ecs_task_execution_role_arn}" 49 | 50 | container_definitions = <` 76 | 77 | Add a participant with tier level 2. 78 | 79 | `X-Signature` 80 | 81 | The text `SignMeWithYourPrivateKey`, signed by the admin address. 82 | 83 | ### Update User Progress 84 | 85 | `PATCH /api/participant/
` 86 | 87 | Updates telemetry around a participants progress. 88 | 89 | `Body` 90 | 91 | ``` 92 | { 93 | "runningState": "RUNNING", 94 | "computeProgress": 10.12, 95 | "transcripts": [ 96 | { 97 | "size": 1000000, 98 | "downloaded": 1000, 99 | "uploaded": 0, 100 | } 101 | ] 102 | } 103 | ``` 104 | 105 | `X-Signature` 106 | 107 | The body as returned by `JSON.stringify`, signed by the participant address. 108 | 109 | ### Ping User Online 110 | 111 | `GET /api/ping/
` 112 | 113 | Marks a participant as online. Must be called within every 10 seconds to ensure a user stays online. 114 | 115 | `X-Signature` 116 | 117 | The word `ping`, signed by the participant address. 118 | 119 | ### Download Transcript Signature 120 | 121 | `GET /api/signature/
/` 122 | 123 | Download a given participants transcript signature. 124 | 125 | ### Download Transcript 126 | 127 | `GET /api/data/
/` 128 | 129 | Download a given participants transcript. 130 | 131 | ### Upload Transcript 132 | 133 | `PUT /api/data/
/` 134 | 135 | Uploads a given participants transcript. 136 | 137 | `Body` 138 | 139 | The transcript file. 140 | 141 | `X-Signature` 142 | 143 | Two signatures, comma delimited. The first is the word `ping`, signed by the participant address. The second is the SHA256 sum of the transcript file, signed by the participant address. 144 | -------------------------------------------------------------------------------- /setup-mpc-server/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | services: 3 | setup-mpc-server: 4 | build: . 5 | image: 278380418400.dkr.ecr.eu-west-2.amazonaws.com/setup-mpc-server 6 | command: yarn start 7 | environment: 8 | NODE_ENV: production 9 | ports: 10 | - '8081:80' 11 | volumes: 12 | - .:/usr/src/setup-mpc-server 13 | - ../setup-mpc-common:/usr/src/setup-mpc-server/node_modules/setup-mpc-common 14 | -------------------------------------------------------------------------------- /setup-mpc-server/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "setup-mpc-server", 3 | "version": "1.0.4", 4 | "main": "dest/index.js", 5 | "license": "MIT", 6 | "scripts": { 7 | "build": "tsc -p tsconfig.prod.json && ln -f ./src/maxmind/GeoLite2-City.mmdb ./dest/maxmind/GeoLite2-City.mmdb", 8 | "start": "node ./dest", 9 | "start:dev": "tsc-watch -p tsconfig.prod.json --onSuccess 'yarn start'", 10 | "test": "jest --silent", 11 | "postinstall": "yarn link setup-mpc-common" 12 | }, 13 | "jest": { 14 | "transform": { 15 | "^.+\\.ts$": "ts-jest" 16 | }, 17 | "testRegex": ".*\\.test\\.(tsx?|js)$", 18 | "moduleFileExtensions": [ 19 | "ts", 20 | "tsx", 21 | "js", 22 | "jsx", 23 | "json", 24 | "node" 25 | ], 26 | "rootDir": "./src" 27 | }, 28 | "dependencies": { 29 | "@koa/cors": "^3.0.0", 30 | "@types/bn.js": "^4.11.5", 31 | "@types/node": "^12.6.8", 32 | "async-mutex": "^0.1.3", 33 | "aws-sdk": "^2.526.0", 34 | "bn.js": "^5.0.0", 35 | "form-data": "^2.5.0", 36 | "isomorphic-fetch": "^2.2.1", 37 | "koa": "^2.7.0", 38 | "koa-body": "^4.1.0", 39 | "koa-compress": "^3.0.0", 40 | "koa-router": "^7.4.0", 41 | "maxmind-db-reader": "^0.2.1", 42 | "moment": "^2.24.0", 43 | "path": "^0.12.7", 44 | "q": "^1.5.1", 45 | "seedrandom": "^3.0.3", 46 | "stream-meter": "^1.0.4", 47 | "tsc-watch": "^2.2.1", 48 | "typescript": "^3.5.3", 49 | "web3x": "^4.0.3" 50 | }, 51 | "devDependencies": { 52 | "@types/isomorphic-fetch": "^0.0.35", 53 | "@types/jest": "^24.0.15", 54 | "@types/koa": "^2.0.49", 55 | "@types/koa-compress": "^2.0.9", 56 | "@types/koa-router": "^7.0.42", 57 | "@types/seedrandom": "^2.4.28", 58 | "@types/stream-meter": "^0.0.21", 59 | "@types/supertest": "^2.0.8", 60 | "jest": "^24.8.0", 61 | "supertest": "^4.0.2", 62 | "ts-jest": "^24.0.2", 63 | "tslint": "^5.18.0", 64 | "tslint-config-prettier": "^1.18.0" 65 | } 66 | } 67 | -------------------------------------------------------------------------------- /setup-mpc-server/src/app.test.ts: -------------------------------------------------------------------------------- 1 | import { createHash } from 'crypto'; 2 | import moment from 'moment'; 3 | import { MpcServer } from 'setup-mpc-common'; 4 | import request from 'supertest'; 5 | import { Account } from 'web3x/account'; 6 | import { bufferToHex, hexToBuffer } from 'web3x/utils'; 7 | import { appFactory } from './app'; 8 | import { createParticipant } from './state/create-participant'; 9 | import { defaultState } from './state/default-state'; 10 | 11 | type Mockify = { [P in keyof T]: jest.Mock }; 12 | 13 | describe('app', () => { 14 | const account = Account.fromPrivate( 15 | hexToBuffer('0xf94ac892bbe482ca01cc43cce0f467d63baef67e37428209f8193fdc0e6d9013') 16 | ); 17 | const { signature: pingSig } = account.sign('ping'); 18 | let app: any; 19 | let mockServer: Mockify; 20 | 21 | beforeEach(() => { 22 | mockServer = { 23 | getState: jest.fn(), 24 | resetState: jest.fn(), 25 | loadState: jest.fn(), 26 | patchState: jest.fn(), 27 | addParticipant: jest.fn(), 28 | updateParticipant: jest.fn(), 29 | downloadData: jest.fn(), 30 | downloadSignature: jest.fn(), 31 | uploadData: jest.fn(), 32 | ping: jest.fn(), 33 | flushWaiting: jest.fn(), 34 | }; 35 | 36 | const state = defaultState(1234); 37 | const participant = createParticipant(0, moment(), 0, 1, account.address); 38 | participant.state = 'RUNNING'; 39 | state.participants.push(participant); 40 | mockServer.getState.mockResolvedValue(state); 41 | 42 | const mockParticipantSelector = { 43 | getCurrentBlockHeight: jest.fn(), 44 | }; 45 | app = appFactory(mockServer as any, account.address, mockParticipantSelector as any, undefined, '/tmp', 32); 46 | }); 47 | 48 | describe('GET /', () => { 49 | it('should return 200', async () => { 50 | const response = await request(app.callback()) 51 | .get('/') 52 | .send(); 53 | expect(response.status).toBe(200); 54 | }); 55 | }); 56 | 57 | describe('PUT /data', () => { 58 | it('should return 401 with no signature header', async () => { 59 | const response = await request(app.callback()) 60 | .put(`/data/${account.address}/0`) 61 | .send(); 62 | expect(response.status).toBe(401); 63 | expect(response.body.error).toMatch(/X-Signature/); 64 | }); 65 | 66 | it('should return 401 with transcript number out of range', async () => { 67 | const response = await request(app.callback()) 68 | .put(`/data/${account.address}/30`) 69 | .set('X-Signature', `${pingSig},placeholder2`) 70 | .send(); 71 | expect(response.status).toBe(401); 72 | expect(response.body.error).toMatch(/out of range/); 73 | }); 74 | 75 | it('should return 401 with bad signature', async () => { 76 | const body = 'hello world'; 77 | const badSig = `${pingSig},0x76195abb935b441f1553b2f6c60d272de5a56391dfcca8cf22399c4cb600dd26188a4f003176ccdf7f314cbe08740bf7414fadef0e74cb42e94745a836e9dd311d`; 78 | 79 | const response = await request(app.callback()) 80 | .put(`/data/${account.address}/0`) 81 | .set('X-Signature', badSig) 82 | .send(body); 83 | expect(response.status).toBe(401); 84 | expect(response.body.error).toMatch(/does not match X-Signature/); 85 | }); 86 | 87 | it('should return 429 with body length exceeding limit', async () => { 88 | const body = '000000000000000000000000000000000'; 89 | 90 | const response = await request(app.callback()) 91 | .put(`/data/${account.address}/0`) 92 | .set('X-Signature', `${pingSig},placeholder2`) 93 | .send(body); 94 | expect(response.status).toBe(429); 95 | expect(response.body.error).toMatch(/Stream exceeded/); 96 | }); 97 | 98 | it('should return 200 on success', async () => { 99 | const body = 'hello world'; 100 | const hash = createHash('sha256') 101 | .update(body) 102 | .digest(); 103 | const { signature: dataSig } = account.sign(bufferToHex(hash)); 104 | 105 | const response = await request(app.callback()) 106 | .put(`/data/${account.address}/0`) 107 | .set('X-Signature', `${pingSig},${dataSig}`) 108 | .send(body); 109 | expect(response.status).toBe(200); 110 | }); 111 | }); 112 | }); 113 | -------------------------------------------------------------------------------- /setup-mpc-server/src/fs-async.ts: -------------------------------------------------------------------------------- 1 | import { access, copyFile, exists, mkdir, readdir, readFile, rename, rmdir, stat, unlink, writeFile } from 'fs'; 2 | import { promisify } from 'util'; 3 | 4 | export const accessAsync = promisify(access); 5 | export const existsAsync = promisify(exists); 6 | export const renameAsync = promisify(rename); 7 | export const mkdirAsync = promisify(mkdir); 8 | export const unlinkAsync = promisify(unlink); 9 | export const writeFileAsync = promisify(writeFile); 10 | export const readFileAsync = promisify(readFile); 11 | export const readdirAsync = promisify(readdir); 12 | export const rmdirAsync = promisify(rmdir); 13 | export const statAsync = promisify(stat); 14 | export const copyFileAsync = promisify(copyFile); 15 | -------------------------------------------------------------------------------- /setup-mpc-server/src/index.ts: -------------------------------------------------------------------------------- 1 | import http from 'http'; 2 | import { Address } from 'web3x/address'; 3 | import { appFactory } from './app'; 4 | import { mkdirAsync } from './fs-async'; 5 | import { ParticipantSelectorFactory } from './participant-selector'; 6 | import { RangeProofPublisherFactory } from './range-proof-publisher'; 7 | import { Server } from './server'; 8 | import { DiskStateStore } from './state-store'; 9 | import { defaultState } from './state/default-state'; 10 | import { DiskTranscriptStoreFactory } from './transcript-store'; 11 | 12 | const { PORT = 80, STORE_PATH = './store', INFURA_API_KEY = '', JOB_SERVER_HOST = 'job-server' } = process.env; 13 | 14 | async function main() { 15 | const shutdown = async () => process.exit(0); 16 | process.once('SIGINT', shutdown); 17 | process.once('SIGTERM', shutdown); 18 | 19 | const adminAddress = Address.fromString('0x3a9b2101bff555793b85493b5171451fa00124c8'); 20 | const participantSelectorFactory = new ParticipantSelectorFactory(adminAddress, INFURA_API_KEY); 21 | const latestBlock = await participantSelectorFactory.getCurrentBlockHeight('ropsten'); 22 | const defaults = defaultState(latestBlock); 23 | const stateStore = new DiskStateStore(STORE_PATH + '/state', defaults); 24 | const transcriptStoreFactory = new DiskTranscriptStoreFactory(STORE_PATH); 25 | const rangeProofPublisherFactory = new RangeProofPublisherFactory(JOB_SERVER_HOST); 26 | 27 | const server = new Server(transcriptStoreFactory, stateStore, participantSelectorFactory, rangeProofPublisherFactory); 28 | await server.start(); 29 | 30 | const tmpPath = STORE_PATH + '/tmp'; 31 | await mkdirAsync(tmpPath, { recursive: true }); 32 | const app = appFactory(server, adminAddress, participantSelectorFactory, '/api', tmpPath); 33 | 34 | const httpServer = http.createServer(app.callback()); 35 | httpServer.listen(PORT); 36 | console.log(`Server listening on port ${PORT}.`); 37 | } 38 | 39 | main().catch(console.log); 40 | -------------------------------------------------------------------------------- /setup-mpc-server/src/maxmind/GeoLite2-City.mmdb: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AztecProtocol/Setup/ce51e843674493723ee7151d856cbf3599a57b53/setup-mpc-server/src/maxmind/GeoLite2-City.mmdb -------------------------------------------------------------------------------- /setup-mpc-server/src/maxmind/index.ts: -------------------------------------------------------------------------------- 1 | const mmdbreader = require('maxmind-db-reader'); 2 | const cities = mmdbreader.openSync(__dirname + '/GeoLite2-City.mmdb'); 3 | 4 | export interface GeoData { 5 | city?: string; 6 | country?: string; 7 | continent?: string; 8 | latitude?: number; 9 | longitude?: number; 10 | } 11 | 12 | export function getGeoData(ip: string) { 13 | try { 14 | const data = cities.getGeoDataSync(ip); 15 | if (!data) { 16 | return; 17 | } 18 | const geoData: GeoData = {}; 19 | if (data.city) { 20 | geoData.city = data.city.names.en; 21 | } 22 | if (data.country) { 23 | geoData.country = data.country.names.en; 24 | } 25 | if (data.continent) { 26 | geoData.continent = data.continent.names.en; 27 | } 28 | if (data.location) { 29 | geoData.latitude = data.location.latitude; 30 | geoData.longitude = data.location.longitude; 31 | } 32 | return geoData; 33 | } catch (e) { 34 | return; 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /setup-mpc-server/src/participant-selector.ts: -------------------------------------------------------------------------------- 1 | import { EventEmitter } from 'events'; 2 | import { EthNet } from 'setup-mpc-common'; 3 | import { Address } from 'web3x/address'; 4 | import { Eth } from 'web3x/eth'; 5 | import { HttpProvider } from 'web3x/providers'; 6 | 7 | export class ParticipantSelectorFactory { 8 | constructor(private signupAddress: Address, private projectId: string) {} 9 | 10 | public create(ethNet: EthNet, startBlock: number, selectBlock: number) { 11 | return new ParticipantSelector(ethNet, this.signupAddress, startBlock, selectBlock, this.projectId); 12 | } 13 | 14 | public async getCurrentBlockHeight(ethNet: EthNet) { 15 | const provider = new HttpProvider(`https://${ethNet}.infura.io/v3/${this.projectId}`); 16 | const eth = new Eth(provider); 17 | return await eth.getBlockNumber(); 18 | } 19 | } 20 | 21 | export class ParticipantSelector extends EventEmitter { 22 | private provider: HttpProvider; 23 | private eth: Eth; 24 | private cancelled = false; 25 | 26 | constructor( 27 | ethNet: EthNet, 28 | private signupAddress: Address, 29 | private startBlock: number, 30 | private selectBlock: number, 31 | private projectId: string 32 | ) { 33 | super(); 34 | 35 | this.provider = new HttpProvider(`https://${ethNet}.infura.io/v3/${this.projectId}`); 36 | this.eth = new Eth(this.provider); 37 | } 38 | 39 | public async run() { 40 | console.log('Block processor starting...'); 41 | let currentBlock = this.startBlock; 42 | while (!this.cancelled) { 43 | try { 44 | const block = await this.eth.getBlock(currentBlock, true); 45 | const participants = block.transactions 46 | .filter(t => (t.to ? t.to.equals(this.signupAddress) : false)) 47 | .map(t => t.from); 48 | this.emit('newParticipants', participants, currentBlock); 49 | if (currentBlock === this.selectBlock) { 50 | this.emit('selectParticipants', block.hash); 51 | } 52 | currentBlock += 1; 53 | } catch (err) { 54 | await new Promise(resolve => setTimeout(resolve, 10000)); 55 | } 56 | } 57 | console.log('Block processor complete.'); 58 | } 59 | 60 | public stop() { 61 | this.cancelled = true; 62 | this.removeAllListeners(); 63 | } 64 | } 65 | -------------------------------------------------------------------------------- /setup-mpc-server/src/state-store.ts: -------------------------------------------------------------------------------- 1 | import { existsSync, mkdirSync, readFileSync } from 'fs'; 2 | import { MpcState, mpcStateFromJSON } from 'setup-mpc-common'; 3 | import { existsAsync, renameAsync, writeFileAsync } from './fs-async'; 4 | 5 | export interface StateStore { 6 | setState(state: MpcState): Promise; 7 | getState(): Promise; 8 | saveState(): Promise; 9 | restoreState(name: string): Promise; 10 | exists(name: string): Promise; 11 | } 12 | 13 | export class MemoryStateStore implements StateStore { 14 | private state!: MpcState; 15 | 16 | public async setState(state: MpcState) { 17 | this.state = state; 18 | } 19 | 20 | public async getState(): Promise { 21 | return this.state; 22 | } 23 | 24 | public async saveState() {} 25 | 26 | public async restoreState(name: string): Promise { 27 | return this.state; 28 | } 29 | 30 | public async exists(name: string) { 31 | return false; 32 | } 33 | } 34 | 35 | export class DiskStateStore implements StateStore { 36 | private state: MpcState; 37 | private storeFile: string; 38 | 39 | constructor(private storePath: string, defaultState: MpcState) { 40 | this.storeFile = `${storePath}/state.json`; 41 | mkdirSync(storePath, { recursive: true }); 42 | 43 | if (existsSync(this.storeFile)) { 44 | const buffer = readFileSync(this.storeFile); 45 | // In the event that new state is added, we merge in the defaults. 46 | this.state = { 47 | ...defaultState, 48 | ...this.migrate(mpcStateFromJSON(JSON.parse(buffer.toString()))), 49 | }; 50 | 51 | this.state.startSequence = this.state.sequence; 52 | } else { 53 | this.state = defaultState; 54 | } 55 | } 56 | 57 | public async setState(state: MpcState) { 58 | try { 59 | this.state = state; 60 | // Atomic file update. 61 | await writeFileAsync(`${this.storeFile}.new`, JSON.stringify(this.state)); 62 | await renameAsync(`${this.storeFile}.new`, this.storeFile); 63 | } catch (err) { 64 | console.log(err); 65 | } 66 | } 67 | 68 | public async getState(): Promise { 69 | return this.state; 70 | } 71 | 72 | public async saveState() { 73 | const id = this.state.name || this.state.startTime.format('YYYYMMDDHHmmss'); 74 | await writeFileAsync(this.getStatePath(id), JSON.stringify(this.state)); 75 | } 76 | 77 | public async restoreState(name: string): Promise { 78 | const buffer = readFileSync(this.getStatePath(name)); 79 | this.state = mpcStateFromJSON(JSON.parse(buffer.toString())); 80 | this.state.startSequence = this.state.sequence; 81 | return this.state; 82 | } 83 | 84 | public async exists(name: string) { 85 | return await existsAsync(this.getStatePath(name)); 86 | } 87 | 88 | private getStatePath = (id: string) => 89 | `${this.storePath}/state_${id 90 | .replace(/[^A-Za-z0-9_ ]/g, '') 91 | .replace(/ +/g, '_') 92 | .toLowerCase()}.json`; 93 | 94 | private migrate(state: any) { 95 | // 001 - Discarded transcript complete flag in favour of state. 96 | for (const p of state.participants) { 97 | for (const t of p.transcripts) { 98 | if (t.complete !== undefined) { 99 | t.state = t.complete ? 'COMPLETE' : 'WAITING'; 100 | t.complete = undefined; 101 | } 102 | } 103 | } 104 | 105 | return state; 106 | } 107 | } 108 | -------------------------------------------------------------------------------- /setup-mpc-server/src/state/create-participant.ts: -------------------------------------------------------------------------------- 1 | import { Moment } from 'moment'; 2 | import { Participant } from 'setup-mpc-common'; 3 | import { Address } from 'web3x/address'; 4 | 5 | export function createParticipant( 6 | sequence: number, 7 | addedAt: Moment, 8 | position: number, 9 | tier: number, 10 | address: Address 11 | ): Participant { 12 | return { 13 | sequence, 14 | addedAt, 15 | online: false, 16 | state: 'WAITING', 17 | runningState: 'WAITING', 18 | position, 19 | priority: position, 20 | tier, 21 | computeProgress: 0, 22 | verifyProgress: 0, 23 | transcripts: [], 24 | address, 25 | fast: false, 26 | }; 27 | } 28 | -------------------------------------------------------------------------------- /setup-mpc-server/src/state/default-state.ts: -------------------------------------------------------------------------------- 1 | import moment = require('moment'); 2 | import { MpcState } from 'setup-mpc-common'; 3 | 4 | export function defaultState(latestBlock: number): MpcState { 5 | return { 6 | name: 'default', 7 | sequence: 0, 8 | statusSequence: 0, 9 | startSequence: 0, 10 | ceremonyState: 'PRESELECTION', 11 | paused: false, 12 | startTime: moment().add(20, 'seconds'), 13 | endTime: moment().add(1, 'hour'), 14 | network: 'ropsten', 15 | latestBlock, 16 | selectBlock: latestBlock + 1, 17 | maxTier2: 0, 18 | minParticipants: 5, 19 | numG1Points: 1000000, 20 | numG2Points: 1, 21 | pointsPerTranscript: 100000, 22 | invalidateAfter: 180, 23 | sealingProgress: 0, 24 | publishProgress: 0, 25 | rangeProofKmax: 100000, 26 | rangeProofSize: 100000, 27 | rangeProofProgress: 0, 28 | rangeProofsPerFile: 1024, 29 | participants: [], 30 | }; 31 | } 32 | -------------------------------------------------------------------------------- /setup-mpc-server/src/state/order-waiting-participants.test.ts: -------------------------------------------------------------------------------- 1 | import moment from 'moment'; 2 | import { Participant } from 'setup-mpc-common'; 3 | import { Wallet } from 'web3x/wallet'; 4 | import { createParticipant } from './create-participant'; 5 | import { orderWaitingParticipants } from './order-waiting-participants'; 6 | 7 | describe('order waiting participants', () => { 8 | const wallet = Wallet.fromMnemonic('alarm disagree index ridge tone outdoor betray pole forum source okay joy', 10); 9 | const addresses = wallet.currentAddresses(); 10 | let participants: Participant[]; 11 | 12 | beforeEach(() => { 13 | participants = addresses.map((a, i) => createParticipant(0, moment().add(i, 's'), i + 1, 1, a)); 14 | }); 15 | 16 | it('should correctly order participants', () => { 17 | const result = orderWaitingParticipants(participants, 0); 18 | expect(result.map(p => p.address)).toEqual(addresses); 19 | }); 20 | 21 | it('should correctly order online participants', () => { 22 | participants[3].online = true; 23 | participants[7].online = true; 24 | const result = orderWaitingParticipants(participants, 0); 25 | expect(result[0].address).toEqual(addresses[3]); 26 | expect(result[1].address).toEqual(addresses[7]); 27 | expect(result[2].address).toEqual(addresses[0]); 28 | }); 29 | 30 | it('should correctly order iter 0 participants', () => { 31 | participants[3].tier = 0; 32 | participants[8].tier = 0; 33 | participants[7].online = true; 34 | const result = orderWaitingParticipants(participants, 0); 35 | expect(result[0].address).toEqual(addresses[3]); 36 | expect(result[1].address).toEqual(addresses[8]); 37 | expect(result[2].address).toEqual(addresses[7]); 38 | expect(result[3].address).toEqual(addresses[0]); 39 | }); 40 | 41 | it('should correctly order tiered participants', () => { 42 | participants[0].tier = 3; 43 | participants[1].tier = 2; 44 | participants[2].tier = 3; 45 | participants[2].online = true; 46 | 47 | const result = orderWaitingParticipants(participants, 0); 48 | 49 | expect(result[9].address).toEqual(addresses[0]); 50 | expect(result[8].address).toEqual(addresses[1]); 51 | expect(result[0].address).toEqual(addresses[2]); 52 | }); 53 | 54 | it('should correctly order priority participants', () => { 55 | participants[0].tier = 2; 56 | participants[0].priority = 5; 57 | participants[1].tier = 2; 58 | participants[2].tier = 2; 59 | participants[2].online = true; 60 | participants[3].tier = 2; 61 | participants[4].tier = 2; 62 | participants[4].priority = 1; 63 | 64 | const result = orderWaitingParticipants(participants, 0); 65 | 66 | expect(result[6].address).toEqual(addresses[4]); 67 | }); 68 | }); 69 | -------------------------------------------------------------------------------- /setup-mpc-server/src/state/order-waiting-participants.ts: -------------------------------------------------------------------------------- 1 | import { cloneParticipant, Participant } from 'setup-mpc-common'; 2 | 3 | export function orderWaitingParticipants(participants: Participant[], sequence: number) { 4 | const indexOfFirstWaiting = participants.findIndex(p => p.state === 'WAITING'); 5 | 6 | const waiting = participants.slice(indexOfFirstWaiting).sort((a, b) => { 7 | if (a.tier === 0 || b.tier === 0) { 8 | return a.tier !== b.tier ? a.tier - b.tier : a.priority - b.priority; 9 | } 10 | if (a.online !== b.online) { 11 | return a.online ? -1 : 1; 12 | } 13 | if (a.tier !== b.tier) { 14 | return a.tier - b.tier; 15 | } 16 | return a.priority - b.priority; 17 | }); 18 | 19 | let orderedParticipants = [...participants.slice(0, indexOfFirstWaiting), ...waiting]; 20 | 21 | // Adjust positions based on new order and advance sequence numbers if position changed. 22 | orderedParticipants = orderedParticipants.map((p, i) => { 23 | if (p.position !== i + 1) { 24 | p = cloneParticipant(p); 25 | p.position = i + 1; 26 | p.sequence = sequence; 27 | } 28 | return p; 29 | }); 30 | 31 | return orderedParticipants; 32 | } 33 | -------------------------------------------------------------------------------- /setup-mpc-server/src/state/reset-participant.ts: -------------------------------------------------------------------------------- 1 | import { MpcState, Participant } from 'setup-mpc-common'; 2 | import { orderWaitingParticipants } from './order-waiting-participants'; 3 | 4 | export function resetParticipant(state: MpcState, p: Participant, invalidateAfter?: number) { 5 | // Reset participant. 6 | p.state = 'WAITING'; 7 | p.runningState = 'WAITING'; 8 | p.startedAt = undefined; 9 | p.lastVerified = undefined; 10 | p.error = undefined; 11 | p.invalidateAfter = invalidateAfter; 12 | p.computeProgress = 0; 13 | p.verifyProgress = 0; 14 | p.transcripts = []; 15 | 16 | const complete = state.participants 17 | .filter(p => p.state !== 'WAITING') 18 | .sort((a, b) => a.startedAt!.unix() - b.startedAt!.unix()); 19 | const waiting = state.participants.filter(p => p.state === 'WAITING'); 20 | 21 | state.participants = orderWaitingParticipants([...complete, ...waiting], state.sequence); 22 | } 23 | -------------------------------------------------------------------------------- /setup-mpc-server/src/state/select-participants.ts: -------------------------------------------------------------------------------- 1 | import seedrandom from 'seedrandom'; 2 | import { MpcState } from 'setup-mpc-common'; 3 | import { orderWaitingParticipants } from './order-waiting-participants'; 4 | 5 | function shuffle(seed: Buffer, array: T[]) { 6 | const prng = seedrandom(seed.toString('hex')); 7 | let m = array.length; 8 | let t: T; 9 | let i: number; 10 | 11 | // Fisher-Yates shuffle. 12 | while (m) { 13 | // Pick a remaining element. 14 | const n = prng.double(); 15 | i = Math.floor(n * m--); 16 | t = array[m]; 17 | 18 | // And swap it with the current element. 19 | array[m] = array[i]; 20 | array[i] = t; 21 | } 22 | } 23 | 24 | export function selectParticipants(state: MpcState, blockHash: Buffer) { 25 | if (state.ceremonyState !== 'PRESELECTION') { 26 | return; 27 | } 28 | 29 | console.log('Selecting participants.'); 30 | 31 | state.sequence += 1; 32 | state.statusSequence = state.sequence; 33 | state.ceremonyState = 'SELECTED'; 34 | 35 | let { participants } = state; 36 | const tier0 = participants.filter(t => t.tier === 0); 37 | const tier1 = participants.filter(t => t.tier === 1); 38 | const earlyBirds = participants.filter(t => t.tier === 2); 39 | const tier3 = participants.filter(t => t.tier === 3); 40 | shuffle(blockHash, tier1); 41 | shuffle(blockHash, earlyBirds); 42 | const tier2 = earlyBirds.slice(0, state.maxTier2); 43 | const tier2rejects = earlyBirds.slice(state.maxTier2).sort((a, b) => a.addedAt.valueOf() - b.addedAt.valueOf()); 44 | tier2rejects.forEach(p => (p.tier = 3)); 45 | 46 | participants = [...tier0, ...tier1, ...tier2, ...tier2rejects, ...tier3]; 47 | 48 | participants.forEach((p, i) => { 49 | p.sequence = state.sequence; 50 | p.priority = i + 1; 51 | }); 52 | 53 | state.participants = orderWaitingParticipants(participants, state.sequence); 54 | } 55 | -------------------------------------------------------------------------------- /setup-mpc-server/src/verifier.ts: -------------------------------------------------------------------------------- 1 | import { ChildProcess, spawn } from 'child_process'; 2 | import { MemoryFifo } from 'setup-mpc-common'; 3 | import { Address } from 'web3x/address'; 4 | import { TranscriptStore } from './transcript-store'; 5 | 6 | export interface VerifyItem { 7 | address: Address; 8 | num: number; 9 | } 10 | 11 | export class Verifier { 12 | private queue: MemoryFifo = new MemoryFifo(); 13 | public lastCompleteAddress?: Address; 14 | public runningAddress?: Address; 15 | private proc?: ChildProcess; 16 | private cancelled = false; 17 | 18 | constructor( 19 | private store: TranscriptStore, 20 | private numG1Points: number, 21 | private numG2Points: number, 22 | private pointsPerTranscript: number, 23 | private cb: (address: Address, num: number, verified: boolean) => Promise 24 | ) {} 25 | 26 | public async active() { 27 | return this.proc || (await this.queue.length()); 28 | } 29 | 30 | public async run() { 31 | console.log('Verifier started...'); 32 | while (true) { 33 | const item = await this.queue.get(); 34 | if (!item) { 35 | break; 36 | } 37 | const { address, num } = item; 38 | const transcriptPath = this.store.getUnverifiedTranscriptPath(address, num); 39 | 40 | try { 41 | if (!this.runningAddress) { 42 | // If we dequeued an item, someone should be running. 43 | throw new Error('No running address set.'); 44 | } 45 | 46 | if (!this.runningAddress.equals(address)) { 47 | // This address is no longer running. Just skip. 48 | continue; 49 | } 50 | 51 | if (await this.verifyTranscript(address, num, transcriptPath)) { 52 | console.log(`Verification succeeded: ${transcriptPath}...`); 53 | 54 | await this.cb(address, num, true); 55 | } else { 56 | await this.store.eraseUnverified(address, num); 57 | if (!this.cancelled) { 58 | await this.cb(address, num, false); 59 | } 60 | } 61 | } catch (err) { 62 | console.log(err); 63 | } 64 | } 65 | console.log('Verifier completed.'); 66 | } 67 | 68 | public put(item: VerifyItem) { 69 | this.queue.put(item); 70 | } 71 | 72 | public cancel() { 73 | this.cancelled = true; 74 | this.queue.cancel(); 75 | if (this.proc) { 76 | this.proc.kill(); 77 | } 78 | } 79 | 80 | private async verifyTranscript(address: Address, transcriptNumber: number, transcriptPath: string) { 81 | // Argument 0 is total number of G1 points in all transcripts. 82 | // Argument 1 is total number of G2 points in all transcripts. 83 | // Argument 2 is the total points per transcript. 84 | // Argument 3 is the expected transcript number. 85 | // Argument 4 is the transcript to verify. 86 | // Argument 5 is the 0th transcript of the sequence. 87 | const args = [ 88 | this.numG1Points.toString(), 89 | this.numG2Points.toString(), 90 | this.pointsPerTranscript.toString(), 91 | transcriptNumber.toString(), 92 | transcriptPath, 93 | this.store.getUnverifiedTranscriptPath(address, 0), 94 | ]; 95 | 96 | // Argument 6 is... 97 | if (transcriptNumber === 0) { 98 | // The previous participants 0th transcript, or nothing if no previous participant. 99 | if (this.lastCompleteAddress) { 100 | args.push(this.store.getVerifiedTranscriptPath(this.lastCompleteAddress, 0)); 101 | } 102 | } else { 103 | // The previous transcript in the sequence. 104 | args.push(this.store.getUnverifiedTranscriptPath(address, transcriptNumber - 1)); 105 | } 106 | 107 | console.log(`Verifiying transcript ${transcriptNumber}...`); 108 | return new Promise(resolve => { 109 | const binPath = '../setup-tools/verify'; 110 | const verify = spawn(binPath, args); 111 | this.proc = verify; 112 | 113 | verify.stdout.on('data', data => { 114 | console.log(data.toString()); 115 | }); 116 | 117 | verify.stderr.on('data', data => { 118 | console.log(data.toString()); 119 | }); 120 | 121 | verify.on('close', code => { 122 | this.proc = undefined; 123 | resolve(code === 0); 124 | }); 125 | }); 126 | } 127 | } 128 | -------------------------------------------------------------------------------- /setup-mpc-server/terraform/variables.tf: -------------------------------------------------------------------------------- 1 | variable "INFURA_API_KEY" { 2 | type = string 3 | } 4 | -------------------------------------------------------------------------------- /setup-mpc-server/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "target": "esnext", 4 | "module": "commonjs", 5 | "moduleResolution": "node", 6 | "lib": ["dom", "esnext", "es2017.object"], 7 | "outDir": "dest", 8 | "strict": true, 9 | "noImplicitAny": true, 10 | "noImplicitThis": false, 11 | "esModuleInterop": true, 12 | "declaration": true 13 | }, 14 | "include": ["src"] 15 | } 16 | -------------------------------------------------------------------------------- /setup-mpc-server/tsconfig.prod.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": ".", 3 | "exclude": ["**/*.test.*"] 4 | } 5 | -------------------------------------------------------------------------------- /setup-mpc-webterm/.dockerignore: -------------------------------------------------------------------------------- 1 | dist 2 | node_modules 3 | **/.terraform -------------------------------------------------------------------------------- /setup-mpc-webterm/.mbt.yml: -------------------------------------------------------------------------------- 1 | name: setup-mpc-webterm 2 | build: 3 | default: 4 | cmd: ../ci-scripts/build.sh 5 | args: 6 | - 278380418400.dkr.ecr.eu-west-2.amazonaws.com/setup-mpc-webterm 7 | - '278380418400.dkr.ecr.eu-west-2.amazonaws.com/setup-mpc-common aztecprotocol/setup-mpc-client' 8 | commands: 9 | deploy: 10 | cmd: ../ci-scripts/deploy.sh 11 | args: 12 | - setup-mpc-webterm 13 | dependencies: 14 | - setup-mpc-common 15 | - setup-mpc-client 16 | -------------------------------------------------------------------------------- /setup-mpc-webterm/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM 278380418400.dkr.ecr.eu-west-2.amazonaws.com/setup-mpc-common:latest 2 | FROM aztecprotocol/setup-mpc-client:latest 3 | 4 | FROM node:10 5 | WORKDIR /usr/src/setup-mpc-common 6 | COPY --from=0 /usr/src/setup-mpc-common . 7 | RUN yarn link 8 | WORKDIR /usr/src/setup-mpc-client 9 | COPY --from=1 /usr/src/setup-mpc-client . 10 | RUN yarn link 11 | WORKDIR /usr/src/setup-mpc-webterm 12 | COPY . . 13 | RUN yarn install && yarn build && rm -rf node_modules && yarn install --production && yarn cache clean 14 | CMD ["yarn", "start"] -------------------------------------------------------------------------------- /setup-mpc-webterm/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "setup-mpc-webterm", 3 | "version": "1.0.0", 4 | "main": "src/index.ts", 5 | "license": "MIT", 6 | "scripts": { 7 | "build": "webpack --config webpack.prod.js", 8 | "start": "node ./server", 9 | "start:dev": "webpack-dev-server --config webpack.dev.js", 10 | "postinstall": "yarn link setup-mpc-client && yarn link setup-mpc-common" 11 | }, 12 | "dependencies": { 13 | "koa": "^2.5.2", 14 | "koa-mount": "^4.0.0", 15 | "koa-proxy": "^1.0.0-alpha.3", 16 | "koa-static": "^5.0.0" 17 | }, 18 | "devDependencies": { 19 | "@types/fontfaceobserver": "^0.0.6", 20 | "@types/koa": "^2.0.46", 21 | "@types/koa-static": "^4.0.0", 22 | "@types/webpack-env": "^1.13.6", 23 | "@types/xterm": "^3.0.0", 24 | "css-loader": "^3.1.0", 25 | "fontfaceobserver": "^2.1.0", 26 | "html-webpack-plugin": "^3.2.0", 27 | "isomorphic-fetch": "^2.2.1", 28 | "moment": "^2.24.0", 29 | "style-loader": "^0.23.1", 30 | "ts-loader": "^6.0.4", 31 | "tslint": "^5.11.0", 32 | "tslint-config-prettier": "^1.15.0", 33 | "typescript": "^2.9.1", 34 | "webpack": "^4.16.0", 35 | "webpack-cli": "^3.0.8", 36 | "webpack-dev-server": "^3.1.4", 37 | "xterm": "^3.14.5" 38 | } 39 | } 40 | -------------------------------------------------------------------------------- /setup-mpc-webterm/server/index.js: -------------------------------------------------------------------------------- 1 | const serve = require('koa-static'); 2 | const mount = require('koa-mount'); 3 | const Koa = require('koa'); 4 | const proxy = require('koa-proxy'); 5 | 6 | const { PORT = '8080' } = process.env; 7 | 8 | const static = new Koa().use(serve('dist')); 9 | 10 | const app = new Koa(); 11 | 12 | // When running locally, calls to the api will hit this server rather 13 | // than being handled by the ALB. We forward the api calls onto the server here. 14 | if (process.env.NODE_ENV !== 'production') { 15 | app.use( 16 | proxy({ 17 | host: 'https://ignition.aztecprotocol.com', 18 | match: /^\/api\//, 19 | }) 20 | ); 21 | } 22 | 23 | app.use(mount('/terminal', static)).listen(PORT); 24 | 25 | console.log(`Server listening on port ${PORT}`); 26 | -------------------------------------------------------------------------------- /setup-mpc-webterm/src/index.css: -------------------------------------------------------------------------------- 1 | :root { 2 | --black: #000000; 3 | --grey: #c7c7c7; 4 | --red: #ff8272; 5 | --green: #64fa72; 6 | --yellow: #fefdc2; 7 | --blue: #a5d5fe; 8 | --magenta: #ff8ffd; 9 | --cyan: #d0d1fe; 10 | --white: #f1f1f1; 11 | --brightBlack: #8e8e8e; 12 | --brightRed: #ffc4bd; 13 | --brightGreen: #d6fcb9; 14 | --brightYellow: #fefdd5; 15 | --brightBlue: #c1e3fe; 16 | --brightMagenta: #ffb1fe; 17 | --brightCyan: #e5e6fe; 18 | --brightWhite: #feffff; 19 | } 20 | 21 | html, 22 | body { 23 | -webkit-text-size-adjust: none; 24 | background-color: #000; 25 | height: 100%; 26 | margin: 0; 27 | } 28 | 29 | #screen { 30 | z-index: 10; 31 | position: absolute; 32 | width: 2000px; 33 | height: 5000px; 34 | top: 114px; 35 | } 36 | 37 | #logo { 38 | background-image: url("data:image/svg+xml,%0A%3Csvg width='599' height='142' viewBox='0 0 599 142' fill='none' xmlns='http://www.w3.org/2000/svg'%3E%3Cpath fill-rule='evenodd' clip-rule='evenodd' d='M76.3675 5.65685L135.765 65.0538C138.889 68.178 138.889 73.2433 135.765 76.3675L76.3675 135.764C73.2433 138.889 68.178 138.889 65.0538 135.764L5.65687 76.3675C2.53267 73.2433 2.53267 68.178 5.65687 65.0538L65.0538 5.65685C68.178 2.53266 73.2433 2.53266 76.3675 5.65685ZM76.3675 26.8701C73.2433 23.7459 68.178 23.7459 65.0538 26.8701L26.8701 65.0538C23.7459 68.178 23.7459 73.2433 26.8701 76.3675L65.0538 114.551C68.178 117.675 73.2433 117.675 76.3675 114.551L114.551 76.3675C117.675 73.2433 117.675 68.178 114.551 65.0538L76.3675 26.8701Z' fill='white'/%3E%3Cpath opacity='0.9' d='M93.3381 65.0538L76.3675 48.0833C73.2433 44.9591 68.178 44.9591 65.0538 48.0833L48.0833 65.0538C44.9591 68.178 44.9591 73.2433 48.0833 76.3675L65.0538 93.3381C68.178 96.4623 73.2433 96.4623 76.3675 93.3381L93.3381 76.3675C96.4623 73.2433 96.4623 68.178 93.3381 65.0538Z' fill='white'/%3E%3Cpath d='M211.32 32.0014L245.589 109.72H229.76L221.706 90.8052H187.216L179.718 109.72H164L195.991 32.0014H211.32ZM191.77 79.4005H216.819L203.711 48.6356L191.77 79.4005ZM333.497 32.0014L288.287 97.9818H333.719V109.72H262.517L307.671 43.7399H265.238V32.0014H333.497ZM424.461 32.0014V43.7399H396.913V109.72H381.917V43.7399H354.369V32.0014H424.461ZM500.984 32.0014V43.7399H465.827V64.1571H499.484V75.8956H465.827V97.9818H501.761V109.72H450.831V32.0014H500.984ZM571.453 31C575.859 31 580.172 31.5007 584.393 32.5021C588.615 33.5035 593.169 35.2095 598.056 37.6203V51.751C593.28 49.0064 588.864 47.0222 584.81 45.7983C580.756 44.5744 576.525 43.9624 572.119 43.9624C566.602 43.9624 561.622 45.1029 557.179 47.3838C552.736 49.6648 549.283 52.8822 546.821 57.0361C544.358 61.1901 543.127 65.8075 543.127 70.8887C543.127 76.0811 544.349 80.7356 546.793 84.8525C549.236 88.9693 552.68 92.1496 557.123 94.3935C561.566 96.6374 566.565 97.7593 572.119 97.7593C576.377 97.7593 580.441 97.1473 584.31 95.9234C588.179 94.6994 593.058 92.4927 598.945 89.3031V103.434C593.909 106.067 589.281 107.94 585.06 109.053C580.839 110.165 575.914 110.722 570.286 110.722C562.103 110.722 554.791 109.09 548.348 105.826C541.905 102.562 536.842 97.8613 533.158 91.7231C529.473 85.5849 527.631 78.8627 527.631 71.5563C527.631 63.7306 529.529 56.7024 533.324 50.4715C537.119 44.2406 542.34 39.4377 548.987 36.0626C555.633 32.6875 563.121 31 571.453 31Z' fill='white'/%3E%3C/svg%3E%0A"); 39 | background-size: auto 100%; 40 | background-repeat: no-repeat; 41 | background-position-y: 50%; 42 | background-position-x: 50%; 43 | width: 100%; 44 | height: 50px; 45 | margin-top: 15px; 46 | margin-bottom: 30px; 47 | margin-left: auto; 48 | margin-right: auto; 49 | } 50 | 51 | #links { 52 | font-family: 'menlo'; 53 | font-size: 12px; 54 | width: 100%; 55 | height: 14px; 56 | text-align: center; 57 | margin-left: auto; 58 | margin-right: auto; 59 | margin-bottom: 30px; 60 | } 61 | 62 | a.link { 63 | color: var(--grey); 64 | text-decoration: none; 65 | } 66 | 67 | a.link:hover { 68 | color: var(--yellow); 69 | text-decoration: none; 70 | } 71 | 72 | .blue { 73 | color: var(--blue); 74 | } 75 | 76 | #terminal { 77 | width: 880px; 78 | height: 5000px; 79 | margin: auto; 80 | padding-left: 5px; 81 | padding-right: 5px; 82 | } 83 | 84 | .xterm .xterm-viewport { 85 | overflow-y: visible; 86 | } 87 | -------------------------------------------------------------------------------- /setup-mpc-webterm/src/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | AZTEC Trusted Setup Multi Party Computation 7 | 8 | 9 | 10 | 11 | 14 |
15 |
16 |
17 |
18 | 19 | 20 | -------------------------------------------------------------------------------- /setup-mpc-webterm/src/index.ts: -------------------------------------------------------------------------------- 1 | import FontFaceObserver from 'fontfaceobserver'; 2 | import { App } from 'setup-mpc-client'; 3 | import { HttpClient } from 'setup-mpc-common'; 4 | import { Terminal } from 'xterm'; 5 | 6 | require('xterm/dist/xterm.css'); 7 | require('./index.css'); 8 | 9 | import * as fit from 'xterm/lib/addons/fit/fit'; 10 | 11 | Terminal.applyAddon(fit); 12 | 13 | declare global { 14 | interface Window { 15 | app: App; 16 | term: Terminal; 17 | } 18 | } 19 | 20 | async function main() { 21 | let fontFamily; 22 | 23 | if (window.navigator.platform === 'MacIntel') { 24 | fontFamily = 'menlo'; 25 | } else { 26 | const font = new FontFaceObserver('Roboto Mono'); 27 | await font.load(); 28 | fontFamily = 'Roboto Mono'; 29 | } 30 | 31 | const term = new Terminal({ rendererType: window.devicePixelRatio > 1 ? 'dom' : 'canvas' }); 32 | term.setOption('theme', { 33 | background: '#000000', 34 | foreground: '#c7c7c7', 35 | ansiBlack: '#000000', 36 | red: '#ff8272', 37 | green: '#64fa72', 38 | yellow: '#fefdc2', 39 | blue: '#a5d5fe', 40 | magenta: '#ff8ffd', 41 | cyan: '#d0d1fe', 42 | white: '#f1f1f1', 43 | brightBlack: '#8e8e8e', 44 | brightRed: '#ffc4bd', 45 | brightGreen: '#d6fcb9', 46 | brightYellow: '#fefdd5', 47 | brightBlue: '#c1e3fe', 48 | brightMagenta: '#ffb1fe', 49 | brightCyan: '#e5e6fe', 50 | brightWhite: '#feffff', 51 | }); 52 | term.setOption('fontFamily', fontFamily); 53 | term.setOption('fontSize', 12); 54 | term.open(document.getElementById('terminal') as HTMLElement); 55 | 56 | const url = window.location; 57 | const apiUrl = `${url.protocol}//${url.hostname}:${url.port}/api`; 58 | const server = new HttpClient(apiUrl); 59 | const app = new App(server, undefined, term as any, term.rows, term.cols); 60 | 61 | window.app = app; 62 | window.term = term; 63 | 64 | term.on('resize', ({ cols, rows }) => app.resize(cols, rows)); 65 | (term as any).fit(); 66 | 67 | window.onresize = () => (term as any).fit(); 68 | 69 | await app.start(); 70 | } 71 | 72 | // tslint:disable-next-line:no-console 73 | main().catch(console.error); 74 | -------------------------------------------------------------------------------- /setup-mpc-webterm/terraform/main.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | backend "s3" { 3 | bucket = "aztec-terraform" 4 | key = "setup/setup-mpc-webterm" 5 | region = "eu-west-2" 6 | } 7 | } 8 | 9 | data "terraform_remote_state" "setup_iac" { 10 | backend = "s3" 11 | config = { 12 | bucket = "aztec-terraform" 13 | key = "setup/setup-iac" 14 | region = "eu-west-2" 15 | } 16 | } 17 | 18 | provider "aws" { 19 | profile = "default" 20 | region = "eu-west-2" 21 | } 22 | 23 | resource "aws_service_discovery_service" "setup_mpc_webterm" { 24 | name = "setup-mpc-webterm" 25 | 26 | health_check_custom_config { 27 | failure_threshold = 1 28 | } 29 | 30 | dns_config { 31 | namespace_id = "${data.terraform_remote_state.setup_iac.outputs.local_service_discovery_id}" 32 | 33 | dns_records { 34 | ttl = 10 35 | type = "A" 36 | } 37 | 38 | routing_policy = "MULTIVALUE" 39 | } 40 | } 41 | 42 | resource "aws_ecs_task_definition" "setup_mpc_webterm" { 43 | family = "setup-mpc-webterm" 44 | requires_compatibilities = ["FARGATE"] 45 | network_mode = "awsvpc" 46 | cpu = "256" 47 | memory = "512" 48 | execution_role_arn = "${data.terraform_remote_state.setup_iac.outputs.ecs_task_execution_role_arn}" 49 | 50 | container_definitions = < /dev/null 48 | fi 49 | echo "Job $JOBNUM complete in $((END-START))s: $RESULT" 50 | done -------------------------------------------------------------------------------- /setup-tools/.dockerignore: -------------------------------------------------------------------------------- 1 | build 2 | setup_db* 3 | Dockerfile* 4 | .* 5 | *.sh -------------------------------------------------------------------------------- /setup-tools/.gitignore: -------------------------------------------------------------------------------- 1 | build 2 | cmake/cable 3 | setup_db* -------------------------------------------------------------------------------- /setup-tools/.mbt.yml: -------------------------------------------------------------------------------- 1 | name: setup-tools 2 | build: 3 | default: 4 | cmd: ./build.sh 5 | commands: 6 | deploy: 7 | cmd: ./deploy.sh -------------------------------------------------------------------------------- /setup-tools/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 3.14) 2 | 3 | include(GNUInstallDirs) 4 | 5 | set(CMAKE_C_STANDARD 11) 6 | set(CMAKE_C_EXTENSIONS ON) 7 | set(CMAKE_CXX_STANDARD 17) 8 | set(CMAKE_CXX_STANDARD_REQUIRED TRUE) 9 | set(CMAKE_CXX_EXTENSIONS ON) 10 | 11 | set(PROJECT_VERSION 0.1.0) 12 | project(aztec-trusted-setup) 13 | 14 | set(include_dir ${CMAKE_CURRENT_SOURCE_DIR}/include) 15 | 16 | set(private_include_dir ${PROJECT_SOURCE_DIR}/src) 17 | set(DEPENDS_DIR ${PROJECT_SOURCE_DIR}/depends) 18 | 19 | SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-unused-parameter") 20 | 21 | ### SETUP 22 | # Create a directory to store the trusted setup output 23 | add_custom_target(create-setup-db-directory ALL 24 | COMMAND ${CMAKE_COMMAND} -E make_directory ${PROJECT_SOURCE_DIR}/setup_db) 25 | 26 | ### DEPENDENCIES 27 | 28 | # GMP 29 | find_path(GMP_INCLUDE_DIR NAMES gmp.h) 30 | find_library(GMP_LIBRARIES NAMES gmp libgmp) 31 | find_library(GMPXX_LIBRARIES NAMES gmpxx libgmpxx) 32 | 33 | add_compile_options(-Werror -Wall -Wextra) 34 | add_compile_options(-fno-stack-protector) 35 | #add_compile_options(-Wno-unused-const-variable) 36 | 37 | option( 38 | ENABLE_LIBFF_PROFILING 39 | "Enable libff performance profiling" 40 | OFF 41 | ) 42 | if("${ENABLE_LIBFF_PROFILING}") 43 | add_definitions(-DENABLE_LIBFF_PROFILING) 44 | endif() 45 | 46 | # SET LIBFF CURVE TO ALT_BN128 47 | set( 48 | CURVE 49 | "ALT_BN128" 50 | CACHE 51 | STRING 52 | "Default curve: one of ALT_BN128, BN128, EDWARDS, MNT4, MNT6" 53 | ) 54 | 55 | set(USE_ASM ON) 56 | 57 | # DEFAULT MULTICORE OFF 58 | option( 59 | MULTICORE 60 | "Enable parallelized execution, using OpenMP" 61 | OFF 62 | ) 63 | 64 | # DEFAULT PROCPS OFF 65 | option( 66 | WITH_PROCPS 67 | "Use procps for memory profiling" 68 | OFF 69 | ) 70 | 71 | # Seriously, turn this damn thing off 72 | add_definitions( 73 | -DNO_PROCPS 74 | ) 75 | 76 | add_subdirectory(depends) 77 | 78 | add_subdirectory(src) 79 | 80 | option(SETUP_TESTING "Build tests" ON) 81 | 82 | if(SETUP_TESTING) 83 | enable_testing() 84 | add_subdirectory(test) 85 | endif() 86 | -------------------------------------------------------------------------------- /setup-tools/Dockerfile.build: -------------------------------------------------------------------------------- 1 | FROM ubuntu:latest 2 | RUN apt-get update && apt-get install -y build-essential wget libgmp3-dev pkg-config libssl-dev 3 | RUN wget https://cmake.org/files/v3.15/cmake-3.15.4.tar.gz \ 4 | && tar zxfv cmake-3.15.4.tar.gz \ 5 | && cd cmake-3.15.4 \ 6 | && ./bootstrap \ 7 | && make -j8 \ 8 | && make install \ 9 | && cd .. \ 10 | && rm -rf cmake* 11 | RUN apt-get install -y git 12 | WORKDIR /usr/src/setup-tools 13 | COPY . . 14 | RUN mkdir build && cd build && cmake .. && make -j8 15 | RUN ./build/test/setup_tests -------------------------------------------------------------------------------- /setup-tools/Dockerfile.deploy: -------------------------------------------------------------------------------- 1 | FROM aztec/setup-tools-build 2 | FROM ubuntu:latest 3 | WORKDIR /usr/src/setup-tools 4 | COPY --from=0 \ 5 | /usr/src/setup-tools/build/setup \ 6 | /usr/src/setup-tools/build/setup-fast \ 7 | /usr/src/setup-tools/build/seal \ 8 | /usr/src/setup-tools/build/verify \ 9 | /usr/src/setup-tools/build/compute_generator_polynomial \ 10 | /usr/src/setup-tools/build/prep_range_data \ 11 | /usr/src/setup-tools/build/compute_range_polynomial \ 12 | /usr/src/setup-tools/build/print_point \ 13 | /usr/src/setup-tools/build/generate_h \ 14 | ./ -------------------------------------------------------------------------------- /setup-tools/build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | docker build -f Dockerfile.build -t aztec/setup-tools-build . 3 | docker build -f Dockerfile.deploy -t 278380418400.dkr.ecr.eu-west-2.amazonaws.com/setup-tools . -------------------------------------------------------------------------------- /setup-tools/cmake/HunterConfig.cmake: -------------------------------------------------------------------------------- 1 | set(HUNTER_CONFIGURATION_TYPES Release CACHE STRING "Build type of Hunter packages") 2 | 3 | include(HunterGate) 4 | 5 | HunterGate( 6 | URL "https://github.com/ruslo/hunter/archive/v0.23.64.tar.gz" 7 | SHA1 "a5f3c4999e03173d28b8469c4da4545dea740a41" 8 | ) 9 | -------------------------------------------------------------------------------- /setup-tools/depends/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | add_subdirectory(libff) 2 | 3 | OPTION(IS_LIBFQFFT_PARENT OFF) 4 | add_subdirectory(libfqfft) 5 | 6 | add_library( 7 | blake2 STATIC 8 | ${DEPENDS_DIR}/blake2/ref/blake2b-ref.c 9 | ${DEPENDS_DIR}/blake2/ref/blake2-impl.h 10 | ${DEPENDS_DIR}/blake2/ref/blake2.h 11 | ) 12 | 13 | OPTION(BARRETENBERG_TESTING OFF) 14 | add_subdirectory(barretenberg) -------------------------------------------------------------------------------- /setup-tools/deploy.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | aws ecr describe-repositories --repository-names setup-tools > /dev/null 2>&1 || aws ecr create-repository --repository-name setup-tools 4 | docker push 278380418400.dkr.ecr.eu-west-2.amazonaws.com/setup-tools -------------------------------------------------------------------------------- /setup-tools/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | services: 3 | build-env: 4 | build: 5 | context: . 6 | dockerfile: Dockerfile.build 7 | image: aztec/setup-tools-build 8 | entrypoint: /bin/bash 9 | tty: true 10 | environment: 11 | MAKEFLAGS: '-j8' 12 | volumes: 13 | - .:/usr/src/setup-tools 14 | 15 | setup-tools: 16 | build: 17 | context: . 18 | dockerfile: Dockerfile.deploy 19 | image: aztec/setup-tools 20 | depends_on: 21 | - build-env 22 | -------------------------------------------------------------------------------- /setup-tools/include/aztec_common.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | // #include "../src/aztec_common/streaming.hpp" -------------------------------------------------------------------------------- /setup-tools/src/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | add_subdirectory(generator) 2 | add_subdirectory(generate_h) 3 | add_subdirectory(setup) 4 | add_subdirectory(aztec_common) 5 | add_subdirectory(print-point) 6 | add_subdirectory(range) 7 | add_subdirectory(range-prep) 8 | add_subdirectory(verify) 9 | -------------------------------------------------------------------------------- /setup-tools/src/aztec_common/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | # aztec_common 2 | # copyright spilsbury holdings 2019 3 | 4 | add_library( 5 | aztec_common STATIC 6 | ${include_dir}/aztec_common.hpp 7 | batch_normalize.hpp 8 | checksum.hpp 9 | compression.hpp 10 | libff_types.hpp 11 | streaming_g1.hpp 12 | streaming_g1.cpp 13 | streaming_g2.hpp 14 | streaming_g2.cpp 15 | streaming_transcript.hpp 16 | streaming_transcript.cpp 17 | streaming_range.hpp 18 | streaming.hpp 19 | streaming.cpp 20 | ) 21 | 22 | set_target_properties(aztec_common PROPERTIES LINKER_LANGUAGE CXX) 23 | 24 | target_link_libraries( 25 | aztec_common 26 | PUBLIC 27 | ${GMP_LIBRARIES} 28 | blake2 29 | barretenberg 30 | ) 31 | 32 | target_include_directories( 33 | aztec_common 34 | PUBLIC 35 | ${DEPENDS_DIR}/blake2/ref 36 | ${DEPENDS_DIR}/libff 37 | ${GMP_INCLUDE_DIR} 38 | ${DEPENDS_DIR}/barretenberg/src 39 | ) 40 | 41 | install( 42 | DIRECTORY 43 | ${include_dir}/ 44 | DESTINATION ${CMAKE_INSTALL_INCLUDEDIR} 45 | ) 46 | -------------------------------------------------------------------------------- /setup-tools/src/aztec_common/assert.hpp: -------------------------------------------------------------------------------- 1 | /** 2 | * Setup 3 | * Copyright Spilsbury Holdings 2019 4 | **/ 5 | #pragma once 6 | 7 | #include 8 | 9 | // compiler should optimize this out in release builds, without triggering 10 | // an unused variable warning 11 | #define DONT_EVALUATE(expression) \ 12 | { \ 13 | true ? static_cast(0) : static_cast((expression)); \ 14 | } 15 | 16 | #ifdef NDEBUG 17 | #define ASSERT(expression) DONT_EVALUATE((expression)) 18 | #else 19 | #define ASSERT(expression) assert((expression)) 20 | #endif // NDEBUG -------------------------------------------------------------------------------- /setup-tools/src/aztec_common/batch_normalize.hpp: -------------------------------------------------------------------------------- 1 | /** 2 | * Setup 3 | * Copyright Spilsbury Holdings 2019 4 | **/ 5 | #pragma once 6 | 7 | #include 8 | 9 | namespace batch_normalize 10 | { 11 | 12 | template 13 | void batch_normalize(size_t start, size_t number, GroupT *x) 14 | { 15 | FieldT accumulator = FieldT::one(); 16 | FieldT *temporaries = static_cast(malloc(number * sizeof(FieldT))); 17 | for (size_t i = 0; i < number; ++i) 18 | { 19 | temporaries[i] = accumulator; 20 | accumulator = accumulator * x[i + start].Z; 21 | } 22 | accumulator = accumulator.inverse(); 23 | 24 | FieldT zzInv; 25 | FieldT zInv; 26 | for (size_t i = number - 1; i < (size_t)(-1); --i) 27 | { 28 | zInv = accumulator * temporaries[i]; 29 | zzInv = zInv * zInv; 30 | x[i + start].X = x[i + start].X * zzInv; 31 | x[i + start].Y = x[i + start].Y * (zzInv * zInv); 32 | accumulator = accumulator * x[i + start].Z; // temporaries[2 * i + 1]; 33 | x[i + start].Z = FieldT::one(); 34 | } 35 | free(temporaries); 36 | } 37 | 38 | } // namespace batch_normalize -------------------------------------------------------------------------------- /setup-tools/src/aztec_common/checksum.hpp: -------------------------------------------------------------------------------- 1 | /** 2 | * Setup 3 | * Copyright Spilsbury Holdings 2019 4 | **/ 5 | #pragma once 6 | 7 | #include 8 | #include 9 | #include 10 | 11 | namespace checksum 12 | { 13 | 14 | constexpr size_t BLAKE2B_CHECKSUM_LENGTH = 64; 15 | 16 | inline void create_checksum(char const *buffer, size_t buffer_size, char *checksum) 17 | { 18 | blake2b((void *)checksum, BLAKE2B_CHECKSUM_LENGTH, (void *)buffer, buffer_size, nullptr, 0); 19 | } 20 | 21 | } // namespace checksum -------------------------------------------------------------------------------- /setup-tools/src/aztec_common/compression.hpp: -------------------------------------------------------------------------------- 1 | /** 2 | * Setup 3 | * Copyright Spilsbury Holdings 2019 4 | **/ 5 | #pragma once 6 | #include 7 | #include 8 | #include "assert.hpp" 9 | 10 | namespace compression 11 | { 12 | 13 | template 14 | GroupT decompress(libff::bigint &x) 15 | { 16 | // get most significant limb of x 17 | mp_limb_t last = x.data[N - 1]; 18 | // convert msb to boolean value 19 | bool set = last >> (GMP_NUMB_BITS - 1); 20 | 21 | // create a mask where most significant bit of (mp_limb_t) is low, others high 22 | mp_limb_t mask = ~(mp_limb_t(1) << (GMP_NUMB_BITS - 1)); 23 | 24 | // remove y-bit from x 25 | x.data[N - 1] = x.data[N - 1] & mask; 26 | 27 | // convert x into a field element 28 | FieldT fq_x = FieldT(x); 29 | // compute y from x 30 | // TODO: Generalize to more than bn128 31 | FieldT fq_y2 = (fq_x.squared() * fq_x) + 3; 32 | FieldT fq_y = fq_y2.sqrt(); 33 | // (check this is a valid solution) 34 | ASSERT(fq_y.squared() == fq_y2); 35 | // invert y if sign of root does not match 36 | bool is_odd = fq_y.as_bigint().test_bit(0); 37 | fq_y = (is_odd != set) ? -fq_y : fq_y; 38 | return GroupT(fq_x, fq_y, FieldT::one()); 39 | } 40 | 41 | }; // namespace compression -------------------------------------------------------------------------------- /setup-tools/src/aztec_common/libff_types.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | 11 | using ppT = libff::alt_bn128_pp; 12 | using Fq = libff::Fq; 13 | using Fqe = libff::Fqe; 14 | using G1 = libff::G1; 15 | using G2 = libff::G2; 16 | using G1_precomp = libff::G1_precomp; 17 | using G2_precomp = libff::G2_precomp; 18 | using Fr = libff::Fr; 19 | using Fqk = libff::Fqk; 20 | using GT = libff::GT; 21 | 22 | constexpr int GMP_NUMB_BYTES = GMP_NUMB_BITS / 8; -------------------------------------------------------------------------------- /setup-tools/src/aztec_common/streaming.hpp: -------------------------------------------------------------------------------- 1 | /** 2 | * Setup 3 | * Copyright Spilsbury Holdings 2019 4 | **/ 5 | #pragma once 6 | #include 7 | #include "libff_types.hpp" 8 | #include "checksum.hpp" 9 | 10 | #define __bswap_64 __builtin_bswap64 11 | 12 | namespace streaming 13 | { 14 | constexpr bool USE_COMPRESSION = false; 15 | 16 | void write_field_elements_to_file(std::vector &coefficients, std::string const &filename); 17 | 18 | void read_field_elements_from_file(std::vector &coefficients, std::string const &filename); 19 | 20 | size_t get_file_size(std::string const &filename); 21 | 22 | std::vector read_file_into_buffer(std::string const &filename, size_t offset = 0, size_t size = 0); 23 | 24 | void write_buffer_to_file(std::string const &filename, std::vector const &buffer); 25 | 26 | bool is_file_exist(std::string const &fileName); 27 | 28 | std::vector validate_checksum(std::vector const &buffer); 29 | 30 | void add_checksum_to_buffer(char *buffer, size_t message_size); 31 | 32 | template 33 | void __bswap_bigint(libff::bigint &val) 34 | { 35 | for (size_t i = 0; i < N; ++i) 36 | { 37 | val.data[i] = __bswap_64(val.data[i]); 38 | } 39 | } 40 | 41 | inline bool isLittleEndian() 42 | { 43 | int num = 42; 44 | return (*(char *)&num == 42); 45 | } 46 | 47 | template 48 | void write_bigint_to_buffer(libff::bigint &value, char *buffer) 49 | { 50 | mp_limb_t temp; 51 | for (size_t i = 0; i < N; ++i) 52 | { 53 | if (isLittleEndian()) 54 | { 55 | temp = __builtin_bswap64(value.data[i]); 56 | } 57 | memcpy(buffer + (i * GMP_NUMB_BYTES), &temp, GMP_NUMB_BYTES); 58 | } 59 | } 60 | 61 | inline int32_t read_int32_t(char const *buffer) 62 | { 63 | return isLittleEndian() ? __builtin_bswap32(*(int32_t *)buffer) : *(int32_t *)buffer; 64 | } 65 | 66 | inline void write_int32_t(char const *buffer, int32_t length) 67 | { 68 | *(int32_t *)buffer = isLittleEndian() ? __builtin_bswap32(length) : length; 69 | } 70 | 71 | }; // namespace streaming -------------------------------------------------------------------------------- /setup-tools/src/aztec_common/streaming_g1.cpp: -------------------------------------------------------------------------------- 1 | #include "streaming_g1.hpp" 2 | #include "streaming.hpp" 3 | #include "compression.hpp" 4 | 5 | namespace streaming 6 | { 7 | 8 | void write_g1_element_to_buffer(G1 const &element, char *buffer) 9 | { 10 | constexpr size_t num_limbs = sizeof(element.X) / GMP_NUMB_BYTES; 11 | libff::bigint x = element.X.as_bigint(); 12 | libff::bigint y = element.Y.as_bigint(); 13 | if (USE_COMPRESSION) 14 | { 15 | mp_limb_t set = ((mp_limb_t)y.test_bit(0)) << (GMP_NUMB_BITS - 1); 16 | x.data[x.N - 1] = x.data[x.N - 1] | set; 17 | write_bigint_to_buffer(x, buffer); 18 | } 19 | else 20 | { 21 | write_bigint_to_buffer(x, buffer); 22 | write_bigint_to_buffer(y, buffer + (num_limbs * GMP_NUMB_BYTES)); 23 | } 24 | } 25 | 26 | void write_g1_elements_to_buffer(std::vector const &elements, char *buffer) 27 | { 28 | constexpr size_t bytes_per_element = USE_COMPRESSION ? sizeof(Fq) : sizeof(Fq) * 2; 29 | 30 | for (size_t i = 0; i < elements.size(); ++i) 31 | { 32 | size_t byte_position = bytes_per_element * i; 33 | write_g1_element_to_buffer(elements[i], buffer + byte_position); 34 | } 35 | } 36 | 37 | G1 read_g1_element_from_buffer(char *buffer) 38 | { 39 | constexpr size_t num_limbs = sizeof(Fq) / GMP_NUMB_BYTES; 40 | 41 | libff::bigint x; 42 | G1 element; 43 | 44 | // = &elements[i].X; 45 | // = &elements[i].Y; 46 | memcpy(&x, buffer, sizeof(Fq)); 47 | if (isLittleEndian()) 48 | { 49 | __bswap_bigint(x); 50 | } 51 | if (USE_COMPRESSION) 52 | { 53 | element = compression::decompress(x); 54 | // TODO: something here 55 | } 56 | else 57 | { 58 | libff::bigint y; 59 | memcpy(&y, &buffer[sizeof(Fq)], sizeof(Fq)); 60 | if (isLittleEndian()) 61 | { 62 | __bswap_bigint(y); 63 | } 64 | element.X = Fq(x); 65 | element.Y = Fq(y); 66 | element.Z = Fq::one(); 67 | 68 | if (!element.is_well_formed()) 69 | { 70 | throw std::runtime_error("G1 points are not on the curve!"); 71 | } 72 | } 73 | return element; 74 | } 75 | 76 | void read_g1_elements_from_buffer(std::vector &elements, char *buffer, size_t buffer_size) 77 | { 78 | constexpr size_t bytes_per_element = USE_COMPRESSION ? sizeof(Fq) : sizeof(Fq) * 2; 79 | size_t num_elements = buffer_size / bytes_per_element; 80 | elements.reserve(elements.size() + num_elements); 81 | 82 | for (size_t i = 0; i < num_elements; ++i) 83 | { 84 | elements.push_back(read_g1_element_from_buffer(&buffer[i * bytes_per_element])); 85 | } 86 | } 87 | 88 | } // namespace streaming -------------------------------------------------------------------------------- /setup-tools/src/aztec_common/streaming_g1.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include "libff_types.hpp" 3 | 4 | namespace streaming 5 | { 6 | 7 | void write_g1_element_to_buffer(G1 &element, char *buffer); 8 | 9 | void read_g1_elements_from_buffer(std::vector &elements, char *buffer, size_t buffer_size); 10 | 11 | void write_g1_elements_to_buffer(std::vector const &elements, char *buffer); 12 | 13 | } // namespace streaming -------------------------------------------------------------------------------- /setup-tools/src/aztec_common/streaming_g2.cpp: -------------------------------------------------------------------------------- 1 | #include "streaming_g2.hpp" 2 | #include "streaming.hpp" 3 | 4 | namespace streaming 5 | { 6 | 7 | void write_g2_element_to_buffer(G2 const &element, char *buffer) 8 | { 9 | constexpr size_t num_limbs = sizeof(element.X.c0) / GMP_NUMB_BYTES; 10 | 11 | libff::bigint x0 = element.X.c0.as_bigint(); 12 | libff::bigint x1 = element.X.c1.as_bigint(); 13 | libff::bigint y0 = element.Y.c0.as_bigint(); 14 | if (USE_COMPRESSION) 15 | { 16 | mp_limb_t set = ((mp_limb_t)y0.test_bit(0)) << (GMP_NUMB_BITS - 1); 17 | x1.data[x1.N - 1] = x1.data[x1.N - 1] | set; 18 | write_bigint_to_buffer(x0, buffer); 19 | write_bigint_to_buffer(x1, buffer + (num_limbs * GMP_NUMB_BYTES)); 20 | } 21 | else 22 | { 23 | libff::bigint y1 = element.Y.c1.as_bigint(); 24 | write_bigint_to_buffer(x0, buffer); 25 | write_bigint_to_buffer(x1, buffer + (num_limbs * GMP_NUMB_BYTES)); 26 | write_bigint_to_buffer(y0, buffer + (num_limbs * GMP_NUMB_BYTES * 2)); 27 | write_bigint_to_buffer(y1, buffer + (num_limbs * GMP_NUMB_BYTES * 3)); 28 | } 29 | } 30 | 31 | void write_g2_elements_to_buffer(std::vector const &elements, char *buffer) 32 | { 33 | constexpr size_t bytes_per_element = USE_COMPRESSION ? sizeof(Fqe) : sizeof(Fqe) * 2; 34 | 35 | for (size_t i = 0; i < elements.size(); ++i) 36 | { 37 | size_t byte_position = bytes_per_element * i; 38 | write_g2_element_to_buffer(elements[i], buffer + byte_position); 39 | } 40 | } 41 | 42 | G2 read_g2_element_from_buffer(char *buffer) 43 | { 44 | constexpr size_t num_limbs = sizeof(Fq) / GMP_NUMB_BYTES; 45 | G2 element; 46 | libff::bigint x0; 47 | libff::bigint x1; 48 | 49 | // = &elements[i].X; 50 | // = &elements[i].Y; 51 | memcpy(&x0, buffer, sizeof(Fq)); 52 | memcpy(&x1, &buffer[sizeof(Fq)], sizeof(Fq)); 53 | 54 | if (isLittleEndian()) 55 | { 56 | __bswap_bigint(x0); 57 | __bswap_bigint(x1); 58 | } 59 | if (USE_COMPRESSION) 60 | { 61 | // elements[i] = compression::decompress(x); 62 | // TODO: something here 63 | } 64 | else 65 | { 66 | libff::bigint y0; 67 | libff::bigint y1; 68 | memcpy(&y0, &buffer[2 * sizeof(Fq)], sizeof(Fq)); 69 | memcpy(&y1, &buffer[3 * sizeof(Fq)], sizeof(Fq)); 70 | if (isLittleEndian()) 71 | { 72 | __bswap_bigint(y0); 73 | __bswap_bigint(y1); 74 | } 75 | element.X.c0 = Fq(x0); 76 | element.X.c1 = Fq(x1); 77 | element.Y.c0 = Fq(y0); 78 | element.Y.c1 = Fq(y1); 79 | element.Z.c0 = Fq::one(); 80 | element.Z.c1 = Fq::zero(); 81 | if (!element.is_well_formed()) 82 | { 83 | throw std::runtime_error("G2 points are not on the curve!"); 84 | } 85 | } 86 | return element; 87 | } 88 | 89 | void read_g2_elements_from_buffer(std::vector &elements, char *buffer, size_t buffer_size) 90 | { 91 | constexpr size_t bytes_per_element = USE_COMPRESSION ? sizeof(Fqe) : sizeof(Fqe) * 2; 92 | size_t num_elements = buffer_size / bytes_per_element; 93 | 94 | elements.reserve(elements.size() + num_elements); 95 | 96 | for (size_t i = 0; i < num_elements; ++i) 97 | { 98 | elements.push_back(read_g2_element_from_buffer(&buffer[i * bytes_per_element])); 99 | } 100 | } 101 | 102 | } // namespace streaming -------------------------------------------------------------------------------- /setup-tools/src/aztec_common/streaming_g2.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include "libff_types.hpp" 3 | 4 | namespace streaming 5 | { 6 | 7 | void write_g2_element_to_buffer(G2 &element, char *buffer); 8 | 9 | void read_g2_elements_from_buffer(std::vector &elements, char *buffer, size_t buffer_size); 10 | 11 | void write_g2_elements_to_buffer(std::vector const &elements, char *buffer); 12 | 13 | } -------------------------------------------------------------------------------- /setup-tools/src/aztec_common/streaming_range.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include "./streaming.hpp" 3 | #include "./streaming_g1.hpp" 4 | #include 5 | #include 6 | #include 7 | 8 | #include "omp.h" 9 | 10 | constexpr size_t POINTS_PER_RANGE_FILE = 1000; 11 | constexpr size_t MAX_RANGE = 10000000; 12 | namespace streaming 13 | { 14 | namespace bb = barretenberg; 15 | 16 | bb::g1::affine_element decompress(const bb::fq::field_t& x_in) 17 | { 18 | bb::fq::field_t uncompressed = x_in; 19 | bool y_bit_flag = (uncompressed.data[3] >> 63ULL) == 1ULL; 20 | 21 | uncompressed.data[3] = uncompressed.data[3] & 0x7fffffffffffffffULL; 22 | bb::fq::field_t x; 23 | bb::fq::to_montgomery_form(uncompressed, x); 24 | bb::fq::field_t three{{ 3, 0, 0, 0}}; 25 | bb::fq::to_montgomery_form(three, three); 26 | bb::fq::field_t yy; 27 | bb::fq::__sqr(x, yy); 28 | bb::fq::__mul(yy, x, yy); 29 | bb::fq::__add(yy, three, yy); 30 | bb::fq::field_t y; 31 | bb::fq::__sqrt(yy, y); 32 | 33 | bb::fq::field_t y_raw; 34 | bb::fq::from_montgomery_form(y, y_raw); 35 | 36 | bool is_odd = ((y_raw.data[0] & 1ULL) == 1ULL); 37 | if (is_odd != y_bit_flag) 38 | { 39 | bb::fq::neg(y, y); 40 | } 41 | bb::g1::affine_element result; 42 | result.x = x; 43 | result.y = y; 44 | return result; 45 | } 46 | 47 | bb::g1::affine_element read_bberg_element_from_buffer(char *buffer) 48 | { 49 | bb::fq::field_t x; 50 | bb::fq::field_t x_buf; 51 | 52 | 53 | memcpy(&x_buf, buffer, sizeof(bb::fq::field_t)); 54 | 55 | if (isLittleEndian()) 56 | { 57 | x.data[3] = __bswap_64(x_buf.data[0]); 58 | x.data[2] = __bswap_64(x_buf.data[1]); 59 | x.data[1] = __bswap_64(x_buf.data[2]); 60 | x.data[0] = __bswap_64(x_buf.data[3]); 61 | } 62 | else 63 | { 64 | x.data[3] = (x_buf.data[0]); 65 | x.data[2] = (x_buf.data[1]); 66 | x.data[1] = (x_buf.data[2]); 67 | x.data[0] = (x_buf.data[3]); 68 | } 69 | 70 | bb::g1::affine_element element = decompress(x); 71 | if (!bb::g1::on_curve(element) || bb::g1::is_point_at_infinity(element)) { 72 | throw std::runtime_error("G1 points are not on the curve!"); 73 | } 74 | return element; 75 | } 76 | 77 | void read_bberg_elements_to_file(bb::g1::affine_element* elements, char *buffer, size_t buffer_size, bool force_compression) 78 | { 79 | const size_t bytes_per_element = sizeof(bb::fq::field_t); 80 | size_t num_elements = buffer_size / bytes_per_element; 81 | 82 | for (size_t i = 0; i < num_elements; ++i) 83 | { 84 | elements[i] = read_bberg_element_from_buffer(&buffer[i * bytes_per_element]); 85 | } 86 | } 87 | 88 | void read_file(std::string range_path, std::vector& points) 89 | { 90 | constexpr size_t num_files = (MAX_RANGE / POINTS_PER_RANGE_FILE) + 1; 91 | const size_t num_threads = omp_get_max_threads(); 92 | const size_t files_per_thread = num_files / num_threads; 93 | const size_t leftovers = num_files - (files_per_thread * num_threads); 94 | #pragma omp parallel for 95 | for (size_t j = 0; j < num_threads; ++j) 96 | { 97 | size_t start = (j * files_per_thread); 98 | size_t end = (j + 1) * files_per_thread; 99 | if (j == num_threads - 1) 100 | { 101 | end += leftovers; 102 | } 103 | for (size_t i = start; i < end; ++i) 104 | { 105 | if ((i % 100) == 0) 106 | { 107 | printf("i = %lu \n", i); 108 | } 109 | size_t g1_buffer_size = 32 * POINTS_PER_RANGE_FILE; 110 | if (i == num_files - 1) 111 | { 112 | g1_buffer_size = 32; // only 1 point here 113 | } 114 | std::string filename = range_path + "data" + std::to_string(i * POINTS_PER_RANGE_FILE) + ".dat"; 115 | 116 | auto buffer = read_file_into_buffer(filename); 117 | read_bberg_elements_to_file(&points[i * POINTS_PER_RANGE_FILE], &buffer[0], g1_buffer_size, true); 118 | } 119 | } 120 | } 121 | } 122 | -------------------------------------------------------------------------------- /setup-tools/src/aztec_common/streaming_transcript.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include "streaming.hpp" 3 | 4 | constexpr size_t POINTS_PER_TRANSCRIPT = 10000000; 5 | 6 | namespace streaming 7 | { 8 | 9 | struct Manifest 10 | { 11 | uint32_t transcript_number; 12 | uint32_t total_transcripts; 13 | uint32_t total_g1_points; 14 | uint32_t total_g2_points; 15 | uint32_t num_g1_points; 16 | uint32_t num_g2_points; 17 | uint32_t start_from; 18 | }; 19 | 20 | size_t get_transcript_size(Manifest const &manifest); 21 | 22 | void read_manifest(std::vector &buffer, Manifest &manifest); 23 | 24 | std::vector read_checksum(std::string const &path); 25 | 26 | void read_transcript(std::vector &g1_x, std::vector &g2_x, Manifest &manifest, std::string const &path); 27 | 28 | void read_transcript_manifest(Manifest &manifest, std::string const &path); 29 | 30 | void read_transcript_g1_points(std::vector &g1_x, std::string const &path, int offset, size_t num); 31 | 32 | void read_transcript_g2_points(std::vector &g2_x, std::string const &path, int offset, size_t num); 33 | 34 | void write_transcript(std::vector const &g1_x, std::vector const &g2_x, Manifest const &manifest, std::string const &path); 35 | 36 | std::string getTranscriptInPath(std::string const &dir, size_t num); 37 | 38 | void read_transcripts_g1_points(std::vector &g1_x, std::string const &dir); 39 | 40 | } // namespace streaming -------------------------------------------------------------------------------- /setup-tools/src/aztec_common/timer.hpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | 7 | class Timer 8 | { 9 | private: 10 | struct timespec _startTime; 11 | struct timespec _endTime; 12 | 13 | public: 14 | Timer() 15 | : _endTime({}) 16 | { 17 | start(); 18 | } 19 | 20 | void start() 21 | { 22 | clock_gettime(CLOCK_REALTIME, &_startTime); 23 | } 24 | 25 | void end() 26 | { 27 | clock_gettime(CLOCK_REALTIME, &_endTime); 28 | } 29 | 30 | std::string toString() const 31 | { 32 | struct timespec endTime; 33 | if (_endTime.tv_nsec == 0 && _endTime.tv_sec == 0) 34 | { 35 | clock_gettime(CLOCK_REALTIME, &endTime); 36 | } 37 | else 38 | { 39 | endTime = _endTime; 40 | } 41 | 42 | long seconds = endTime.tv_sec - _startTime.tv_sec; 43 | long ns = endTime.tv_nsec - _startTime.tv_nsec; 44 | 45 | if (_startTime.tv_nsec > endTime.tv_nsec) 46 | { // clock underflow 47 | --seconds; 48 | ns += 1000000000; 49 | } 50 | 51 | return std::to_string((double)seconds + (double)ns / (double)1000000000); 52 | } 53 | }; -------------------------------------------------------------------------------- /setup-tools/src/generate_h/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | 2 | # trusted_setup_post_processing range 3 | # copyright spilsbury holdings 2019 4 | 5 | find_package (Threads) 6 | find_package (OpenMP) 7 | 8 | add_executable( 9 | generate_h 10 | range_multi_exp.hpp 11 | range_multi_exp.cpp 12 | main.cpp 13 | ) 14 | 15 | if(OpenMP_CXX_FOUND) 16 | target_link_libraries(generate_h PUBLIC OpenMP::OpenMP_CXX) 17 | endif() 18 | 19 | target_link_libraries( 20 | generate_h 21 | PRIVATE 22 | aztec_common 23 | ff 24 | ${CMAKE_THREAD_LIBS_INIT} 25 | ${GMP_LIBRARIES} 26 | barretenberg 27 | ) 28 | 29 | target_include_directories( 30 | generate_h 31 | PRIVATE 32 | ${DEPENDS_DIR}/libff 33 | ${DEPENDS_DIR}/libfqfft 34 | ${include_dir} 35 | ${private_include_dir} 36 | ${DEPENDS_DIR}/barretenberg/src 37 | ) 38 | 39 | set_target_properties(generate_h PROPERTIES RUNTIME_OUTPUT_DIRECTORY ../..) 40 | -------------------------------------------------------------------------------- /setup-tools/src/generate_h/main.cpp: -------------------------------------------------------------------------------- 1 | /** 2 | * Setup 3 | * Copyright Spilsbury Holdings 2019 4 | **/ 5 | #include 6 | #include 7 | #include "range_multi_exp.hpp" 8 | 9 | int main(int argc, char **argv) 10 | { 11 | if (argc < 4) 12 | { 13 | std::cout << "usage: " << argv[0] << " [batches]" << std::endl; 14 | return 1; 15 | } 16 | const std::string generator_path = argv[1]; 17 | const std::string g1x_path = argv[2]; 18 | const size_t kmax = strtol(argv[3], NULL, 0); 19 | const size_t batches = argc > 4 ? strtol(argv[4], NULL, 0) : 4; 20 | 21 | libff::alt_bn128_pp::init_public_params(); 22 | 23 | try 24 | { 25 | generate_h::compute_h(generator_path, g1x_path, kmax + 1, batches); 26 | } 27 | catch (std::exception const &e) 28 | { 29 | std::cerr << e.what() << std::endl; 30 | return 1; 31 | } 32 | return 0; 33 | } -------------------------------------------------------------------------------- /setup-tools/src/generate_h/range_multi_exp.cpp: -------------------------------------------------------------------------------- 1 | /** 2 | * Setup 3 | * Copyright Spilsbury Holdings 2019 4 | **/ 5 | #include "range_multi_exp.hpp" 6 | 7 | #include 8 | #include 9 | 10 | #include 11 | 12 | #include 13 | #include 14 | #include 15 | #include 16 | 17 | namespace generate_h 18 | { 19 | 20 | void *map_file(std::string const &filename) 21 | { 22 | int fd = open(filename.c_str(), O_RDONLY); 23 | assert(fd != -1); 24 | 25 | struct stat sb; 26 | if (fstat(fd, &sb) != -1) 27 | { 28 | assert(false); 29 | } 30 | 31 | void *data = mmap(0, sb.st_size, PROT_READ, MAP_PRIVATE, fd, 0); 32 | assert(data != MAP_FAILED); 33 | close(fd); 34 | 35 | return data; 36 | } 37 | 38 | bb::g1::element process_range(bb::g1::affine_element *const &powers_of_x, bb::fr::field_t *const &generator_coefficients, size_t start, size_t num) 39 | { 40 | // Scalars are mutated, so copy them first. 41 | auto range_coefficients = std::vector(num); 42 | memcpy(&range_coefficients[0], generator_coefficients + 1 + start, num * sizeof(bb::fr::field_t)); 43 | return bb::scalar_multiplication::pippenger_low_memory(&range_coefficients[0], powers_of_x + 1 + start, num); 44 | } 45 | 46 | bb::g1::element batch_process_range(size_t polynomial_degree, size_t batch_num, bb::g1::affine_element *const &g1_x, bb::fr::field_t *const &generator_polynomial) 47 | { 48 | size_t batch_size = polynomial_degree / batch_num; 49 | size_t leftovers = polynomial_degree % batch_size; 50 | 51 | bb::g1::element result = {.x = {0}, .y = {0}, .z = {0}}; 52 | bb::g1::set_infinity(result); 53 | for (size_t i = 0; i < batch_num; ++i) 54 | { 55 | auto r = process_range(g1_x, generator_polynomial, batch_size * i, (i == batch_num - 1) ? batch_size + leftovers : batch_size); 56 | bb::g1::add(r, result, result); 57 | } 58 | 59 | return result; 60 | } 61 | 62 | void compute_h(std::string const &generator_path, std::string const &g1x_path, size_t polynomial_degree, size_t batches) 63 | { 64 | Timer total_timer; 65 | 66 | std::cerr << "Loading data..." << std::endl; 67 | Timer data_timer; 68 | bb::fr::field_t *generator_coefficients = (bb::fr::field_t *)map_file(generator_path); 69 | bb::g1::affine_element *g1_x = (bb::g1::affine_element *)map_file(g1x_path); 70 | std::cerr << "Loaded in " << data_timer.toString() << "s" << std::endl; 71 | 72 | Timer compute_timer; 73 | bb::g1::element result = batch_process_range(polynomial_degree, batches, g1_x, generator_coefficients); 74 | 75 | std::cerr << "Compute time: " << compute_timer.toString() << "s" << std::endl; 76 | std::cerr << "Total time: " << total_timer.toString() << "s" << std::endl; 77 | 78 | bb::g1::affine_element r; 79 | bb::g1::jacobian_to_affine(result, r); 80 | bb::fq::from_montgomery_form(r.x, r.x); 81 | bb::fq::from_montgomery_form(r.y, r.y); 82 | gmp_printf("[\"0x%064Nx\",\"0x%064Nx\"]\n", r.x.data, 4L, r.y.data, 4L); 83 | } 84 | 85 | } // namespace generate_h -------------------------------------------------------------------------------- /setup-tools/src/generate_h/range_multi_exp.hpp: -------------------------------------------------------------------------------- 1 | /** 2 | * Setup 3 | * Copyright Spilsbury Holdings 2019 4 | **/ 5 | #pragma once 6 | 7 | #include 8 | #include 9 | #include 10 | 11 | namespace bb = barretenberg; 12 | 13 | namespace generate_h 14 | { 15 | 16 | bb::g1::element process_range(bb::g1::affine_element *const &powers_of_x, bb::fr::field_t *const &generator_coefficients, size_t start, size_t num); 17 | 18 | bb::g1::element batch_process_range(size_t polynomial_degree, size_t batch_num, bb::g1::affine_element *const &g1_x, bb::fr::field_t *const &generator_polynomial); 19 | 20 | void compute_h(std::string const &generator_path, std::string const& g1x_path, size_t polynomial_degree, size_t batches); 21 | 22 | } // namespace generate_h -------------------------------------------------------------------------------- /setup-tools/src/generator/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | 2 | # trusted_setup_post_processing 3 | # copyright spilsbury holdings 2019 4 | 5 | add_executable( 6 | compute_generator_polynomial 7 | compute_generator_polynomial.tcc 8 | compute_generator_polynomial.hpp main.cpp 9 | ) 10 | target_link_libraries( 11 | compute_generator_polynomial 12 | PRIVATE 13 | aztec_common 14 | ff 15 | ${GMP_LIBRARIES} 16 | ) 17 | target_include_directories( 18 | compute_generator_polynomial 19 | PRIVATE 20 | ${DEPENDS_DIR}/libff/ 21 | ${DEPENDS_DIR}/libfqfft/ 22 | ${private_include_dir} 23 | ${include_dir} 24 | ) 25 | 26 | set_target_properties(compute_generator_polynomial PROPERTIES RUNTIME_OUTPUT_DIRECTORY ../..) 27 | -------------------------------------------------------------------------------- /setup-tools/src/generator/compute_generator_polynomial.hpp: -------------------------------------------------------------------------------- 1 | /** 2 | * Setup 3 | * Copyright Spilsbury Holdings 2019 4 | * 5 | **/ 6 | #pragma once 7 | 8 | namespace generator 9 | { 10 | template 11 | void compute_generator_polynomial(const size_t kmax); 12 | } 13 | #include "compute_generator_polynomial.tcc" -------------------------------------------------------------------------------- /setup-tools/src/generator/compute_generator_polynomial.tcc: -------------------------------------------------------------------------------- 1 | /** 2 | * Setup 3 | * Copyright Spilsbury Holdings 2019 4 | * 5 | **/ 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | 12 | #include 13 | #include 14 | #include 15 | #include 16 | 17 | #include 18 | #include 19 | #include 20 | 21 | #include 22 | #include 23 | 24 | #include 25 | 26 | #include "compute_generator_polynomial.hpp" 27 | 28 | namespace generator 29 | { 30 | template 31 | std::vector compute_generator_polynomial(const size_t kmax) 32 | { 33 | size_t polynomial_degree = kmax + 1; 34 | size_t log2_polynomial_degree = log2(polynomial_degree); 35 | size_t acc = 1 << log2_polynomial_degree; 36 | if (acc < polynomial_degree) 37 | { 38 | ++log2_polynomial_degree; 39 | acc = acc * 2; 40 | } 41 | 42 | size_t num_rounds = log2_polynomial_degree; 43 | 44 | std::vector> coefficients; 45 | 46 | FieldT work_var = FieldT::zero(); 47 | 48 | // We want to compute the coefficients of the polynomial P(X) = (X - 0)(X - 1)...(X - n) 49 | // Start by creating vector of n degree-1 polynomials: X - 0, X - 1, ..., X - n 50 | // Then call `libfqfft::_polynomial_multiplication` on polynomial pairs 51 | // This will create n/2 vector of degree-2 polynomials: (X - 0)(X - 1), (X - 2)(X - 3), ..., (X - n-1)(X - n) 52 | // Repeat process of multiplying polynomial pairs, until we compute P(X) 53 | for (size_t i = 0; i < polynomial_degree; ++i) 54 | { 55 | std::vector init; 56 | init.emplace_back(work_var); 57 | init.emplace_back(FieldT::one()); 58 | coefficients.emplace_back(init); 59 | work_var -= FieldT::one(); 60 | } 61 | 62 | for (size_t i = polynomial_degree; i < acc; ++i) 63 | { 64 | std::vector init; 65 | init.emplace_back(FieldT::one()); 66 | coefficients.emplace_back(init); 67 | } 68 | 69 | for (size_t i = 0; i < num_rounds; ++i) 70 | { 71 | std::vector> work_vector; 72 | for (size_t j = 0; j < coefficients.size(); j += 2) 73 | { 74 | std::vector c(1, FieldT::zero()); 75 | libfqfft::_polynomial_multiplication(c, coefficients[j], coefficients[j + 1]); 76 | c.emplace_back(FieldT::zero()); 77 | work_vector.emplace_back(c); 78 | } 79 | work_vector.swap(coefficients); 80 | } 81 | 82 | libfqfft::_condense(coefficients[0]); 83 | return coefficients[0]; 84 | } 85 | } // namespace generator 86 | -------------------------------------------------------------------------------- /setup-tools/src/generator/main.cpp: -------------------------------------------------------------------------------- 1 | /** 2 | * Setup 3 | * Copyright Spilsbury Holdings 2019 4 | * 5 | **/ 6 | #include 7 | 8 | #include "compute_generator_polynomial.hpp" 9 | 10 | int main(int argc, char **argv) 11 | { 12 | if (argc != 3) 13 | { 14 | std::cout << "usage: " << argv[0] << " " << std::endl; 15 | return 1; 16 | } 17 | const size_t kmax = strtol(argv[1], NULL, 0); 18 | const std::string output = argv[2]; 19 | 20 | printf("initializing libff \n"); 21 | libff::alt_bn128_pp::init_public_params(); 22 | printf("calling compute generator poly \n"); 23 | std::vector coefficients = generator::compute_generator_polynomial>(kmax); 24 | printf("computed polynomial coefficients, writing to disk...\n"); 25 | 26 | // Write the data out in memory format. Assumes this is run on the same arch as compute_range_polynomial. 27 | std::ofstream file(output); 28 | file.write((char *)&coefficients[0], coefficients.size() * sizeof(Fr)); 29 | 30 | return 0; 31 | } -------------------------------------------------------------------------------- /setup-tools/src/print-point/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | find_package (Threads) 2 | 3 | add_executable( 4 | print_point 5 | main.cpp 6 | ) 7 | 8 | target_link_libraries( 9 | print_point 10 | PRIVATE 11 | ff 12 | ${CMAKE_THREAD_LIBS_INIT} 13 | ${GMP_LIBRARIES} 14 | aztec_common 15 | ) 16 | 17 | target_include_directories( 18 | print_point 19 | PRIVATE 20 | ${DEPENDS_DIR}/libff 21 | ${DEPENDS_DIR}/blake2b/ref 22 | ${private_include_dir} 23 | ) 24 | 25 | set_target_properties(print_point PROPERTIES RUNTIME_OUTPUT_DIRECTORY ../..) 26 | -------------------------------------------------------------------------------- /setup-tools/src/print-point/main.cpp: -------------------------------------------------------------------------------- 1 | /** 2 | * Setup 3 | * Copyright Spilsbury Holdings 2019 4 | **/ 5 | #include 6 | #include 7 | #include 8 | 9 | int main(int argc, char **argv) 10 | { 11 | if (argc != 4) 12 | { 13 | std::cout << "usage: " << argv[0] << " " << std::endl; 14 | return 1; 15 | } 16 | std::string const transcript_path(argv[1]); 17 | std::string const curve(argv[2]); 18 | size_t const point_num = strtol(argv[3], NULL, 0); 19 | 20 | if (!streaming::is_file_exist(transcript_path)) 21 | { 22 | std::cout << "Transcript not found: " << transcript_path << std::endl; 23 | return 1; 24 | } 25 | 26 | libff::alt_bn128_pp::init_public_params(); 27 | 28 | try 29 | { 30 | std::vector g1_x; 31 | std::vector g2_x; 32 | 33 | if (curve == "g1") 34 | { 35 | streaming::read_transcript_g1_points(g1_x, transcript_path, point_num, 1); 36 | if (g1_x.size() != 1) 37 | { 38 | throw std::runtime_error("Point not found."); 39 | } 40 | G1 point = g1_x[0]; 41 | point.to_affine_coordinates(); 42 | gmp_printf("[\"0x%064Nx\",\"0x%064Nx\"]\n", 43 | point.X.as_bigint().data, 4L, 44 | point.Y.as_bigint().data, 4L); 45 | } 46 | else 47 | { 48 | streaming::read_transcript_g2_points(g2_x, transcript_path, point_num, 1); 49 | if (g2_x.size() != 1) 50 | { 51 | throw std::runtime_error("Point not found."); 52 | } 53 | G2 point = g2_x[0]; 54 | point.to_affine_coordinates(); 55 | gmp_printf("[\"0x%064Nx\",\"0x%064Nx\",\"0x%064Nx\",\"0x%064Nx\"]\n", 56 | point.X.c0.as_bigint().data, 4L, 57 | point.X.c1.as_bigint().data, 4L, 58 | point.Y.c0.as_bigint().data, 4L, 59 | point.Y.c1.as_bigint().data, 4L); 60 | } 61 | 62 | return 0; 63 | } 64 | catch (std::exception const &err) 65 | { 66 | std::cerr << err.what() << std::endl; 67 | return 1; 68 | } 69 | } -------------------------------------------------------------------------------- /setup-tools/src/range-prep/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | 2 | # trusted_setup_post_processing range 3 | # copyright spilsbury holdings 2019 4 | 5 | add_executable( 6 | prep_range_data 7 | main.cpp 8 | ) 9 | 10 | target_link_libraries( 11 | prep_range_data 12 | PRIVATE 13 | aztec_common 14 | ff 15 | ${CMAKE_THREAD_LIBS_INIT} 16 | ${GMP_LIBRARIES} 17 | barretenberg 18 | ) 19 | 20 | target_include_directories( 21 | prep_range_data 22 | PRIVATE 23 | ${DEPENDS_DIR}/libff 24 | ${DEPENDS_DIR}/libfqfft 25 | ${include_dir} 26 | ${private_include_dir} 27 | ${DEPENDS_DIR}/barretenberg/src 28 | ) 29 | 30 | set_target_properties(prep_range_data PROPERTIES RUNTIME_OUTPUT_DIRECTORY ../..) 31 | -------------------------------------------------------------------------------- /setup-tools/src/range-prep/main.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | 6 | namespace bb = barretenberg; 7 | 8 | void transform_g1x(std::string const &setup_db_path, std::string const& output) 9 | { 10 | std::ofstream file(output); 11 | G1 one = G1::one(); 12 | one.to_affine_coordinates(); 13 | file.write((char *)&one, sizeof(bb::g1::affine_element)); 14 | 15 | size_t num = 0; 16 | std::string filename = streaming::getTranscriptInPath(setup_db_path, num); 17 | 18 | while (streaming::is_file_exist(filename)) 19 | { 20 | std::cout << "Loading " << filename << "..." << std::endl; 21 | streaming::Manifest manifest; 22 | streaming::read_transcript_manifest(manifest, filename); 23 | std::vector g1_x; 24 | streaming::read_transcript_g1_points(g1_x, filename, 0, manifest.num_g1_points); 25 | 26 | // Transform to affine. 27 | std::vector bx(g1_x.size()); 28 | for (size_t i = 0; i < g1_x.size(); ++i) 29 | { 30 | g1_x[i].to_affine_coordinates(); 31 | memcpy(&bx[i], &g1_x[i], sizeof(bb::g1::affine_element)); 32 | } 33 | 34 | std::cout << "Writing " << g1_x.size() << " points..." << std::endl; 35 | file.write((char *)&bx[0], bx.size() * sizeof(bb::g1::affine_element)); 36 | 37 | filename = streaming::getTranscriptInPath(setup_db_path, ++num); 38 | } 39 | 40 | if (num == 0) 41 | { 42 | throw std::runtime_error("No input files found."); 43 | } 44 | 45 | std::cout << "Done." << std::endl; 46 | } 47 | 48 | int main(int argc, char **argv) 49 | { 50 | if (argc != 3) 51 | { 52 | std::cout << "usage: " << argv[0] << " " << std::endl; 53 | return 1; 54 | } 55 | const std::string setup_db_path = argv[1]; 56 | const std::string output = argv[2]; 57 | 58 | libff::alt_bn128_pp::init_public_params(); 59 | 60 | try 61 | { 62 | transform_g1x(setup_db_path, output); 63 | } 64 | catch (std::exception const &err) 65 | { 66 | std::cout << err.what() << std::endl; 67 | return 1; 68 | } 69 | 70 | return 0; 71 | } -------------------------------------------------------------------------------- /setup-tools/src/range/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | 2 | # trusted_setup_post_processing range 3 | # copyright spilsbury holdings 2019 4 | 5 | find_package (Threads) 6 | find_package (OpenMP) 7 | 8 | add_executable( 9 | compute_range_polynomial 10 | range_multi_exp.hpp 11 | range_multi_exp.cpp 12 | main.cpp 13 | ) 14 | 15 | if(OpenMP_CXX_FOUND) 16 | target_link_libraries(compute_range_polynomial PUBLIC OpenMP::OpenMP_CXX) 17 | endif() 18 | 19 | target_link_libraries( 20 | compute_range_polynomial 21 | PRIVATE 22 | aztec_common 23 | ff 24 | ${CMAKE_THREAD_LIBS_INIT} 25 | ${GMP_LIBRARIES} 26 | barretenberg 27 | ) 28 | 29 | target_include_directories( 30 | compute_range_polynomial 31 | PRIVATE 32 | ${DEPENDS_DIR}/libff 33 | ${DEPENDS_DIR}/libfqfft 34 | ${include_dir} 35 | ${private_include_dir} 36 | ${DEPENDS_DIR}/barretenberg/src 37 | ) 38 | 39 | set_target_properties(compute_range_polynomial PROPERTIES RUNTIME_OUTPUT_DIRECTORY ../..) 40 | -------------------------------------------------------------------------------- /setup-tools/src/range/main.cpp: -------------------------------------------------------------------------------- 1 | /** 2 | * Setup 3 | * Copyright Spilsbury Holdings 2019 4 | **/ 5 | #include 6 | #include "range_multi_exp.hpp" 7 | 8 | int main(int argc, char **argv) 9 | { 10 | if (argc < 5) 11 | { 12 | std::cout << "usage: " << argv[0] << " " << std::endl; 13 | return 1; 14 | } 15 | const std::string generator_path = argv[1]; 16 | const std::string g1x_path = argv[2]; 17 | const size_t range_index = (size_t)atoi(argv[3]); 18 | const size_t kmax = strtol(argv[4], NULL, 0); 19 | const size_t batches = argc > 5 ? strtol(argv[5], NULL, 0) : 4; 20 | 21 | compute_range_polynomials(generator_path, g1x_path, range_index, kmax + 1, batches); 22 | return 0; 23 | } -------------------------------------------------------------------------------- /setup-tools/src/range/range_multi_exp.hpp: -------------------------------------------------------------------------------- 1 | /** 2 | * Setup 3 | * Copyright Spilsbury Holdings 2019 4 | **/ 5 | #pragma once 6 | 7 | #include 8 | #include 9 | #include 10 | 11 | namespace bb = barretenberg; 12 | 13 | bb::g1::element process_range(int range_index, bb::fr::field_t &fa, bb::g1::affine_element *const powers_of_x, bb::fr::field_t *const generator_coefficients, size_t start, size_t num); 14 | 15 | bb::g1::element batch_process_range(size_t range_index, size_t polynomial_degree, size_t batch_num, bb::g1::affine_element *const &g1_x, bb::fr::field_t *const &generator_polynomial); 16 | 17 | void compute_range_polynomials(std::string const &generator_path, std::string const &g1x_path, size_t range_index, size_t polynomial_degree, size_t batches); -------------------------------------------------------------------------------- /setup-tools/src/range_verify/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | 2 | # trusted_setup_post_processing range 3 | # copyright spilsbury holdings 2019 4 | 5 | find_package (Threads) 6 | find_package (OpenMP) 7 | 8 | add_executable( 9 | verify_range_points 10 | main.cpp 11 | ) 12 | 13 | if(OpenMP_CXX_FOUND) 14 | target_link_libraries(verify_range_points PUBLIC OpenMP::OpenMP_CXX) 15 | endif() 16 | 17 | target_link_libraries( 18 | verify_range_points 19 | PRIVATE 20 | aztec_common 21 | ff 22 | ${CMAKE_THREAD_LIBS_INIT} 23 | ${GMP_LIBRARIES} 24 | barretenberg 25 | ) 26 | 27 | target_include_directories( 28 | verify_range_points 29 | PRIVATE 30 | ${DEPENDS_DIR}/libff 31 | ${DEPENDS_DIR}/libfqfft 32 | ${include_dir} 33 | ${private_include_dir} 34 | ${DEPENDS_DIR}/barretenberg/src 35 | ) 36 | 37 | set_target_properties(verify_range_points PROPERTIES RUNTIME_OUTPUT_DIRECTORY ../..) 38 | -------------------------------------------------------------------------------- /setup-tools/src/setup/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | # trusted_setup_post_processing setup 2 | # copyright spilsbury holdings 2019 3 | 4 | find_package (Threads) 5 | 6 | # Standard setup binary. 7 | add_executable( 8 | setup 9 | setup.cpp 10 | setup.hpp 11 | utils.hpp 12 | main.cpp 13 | ) 14 | 15 | target_link_libraries( 16 | setup 17 | PRIVATE 18 | ff 19 | ${CMAKE_THREAD_LIBS_INIT} 20 | ${GMP_LIBRARIES} 21 | aztec_common 22 | ) 23 | 24 | target_include_directories( 25 | setup 26 | PRIVATE 27 | ${DEPENDS_DIR}/libff 28 | ${DEPENDS_DIR}/blake2b/ref 29 | ${private_include_dir} 30 | ) 31 | 32 | set_target_properties(setup PROPERTIES RUNTIME_OUTPUT_DIRECTORY ../..) 33 | 34 | # Super fast binary. Includes Barretenberg. 35 | add_executable( 36 | setup-fast 37 | setup.cpp 38 | setup.hpp 39 | utils.hpp 40 | main.cpp 41 | ) 42 | 43 | target_link_libraries( 44 | setup-fast 45 | PRIVATE 46 | ff 47 | ${CMAKE_THREAD_LIBS_INIT} 48 | ${GMP_LIBRARIES} 49 | aztec_common 50 | barretenberg 51 | ) 52 | 53 | target_include_directories( 54 | setup-fast 55 | PRIVATE 56 | ${DEPENDS_DIR}/libff 57 | ${DEPENDS_DIR}/blake2b/ref 58 | ${private_include_dir} 59 | ${DEPENDS_DIR}/barretenberg/src 60 | ) 61 | 62 | target_compile_definitions(setup-fast PRIVATE SUPERFAST=1) 63 | 64 | set_target_properties(setup-fast PROPERTIES RUNTIME_OUTPUT_DIRECTORY ../..) 65 | 66 | # Sealing binary. Toxic waste is hash of previous transcripts. Includes barretenberg. 67 | add_executable( 68 | seal 69 | setup.cpp 70 | setup.hpp 71 | utils.hpp 72 | main.cpp 73 | ) 74 | 75 | target_link_libraries( 76 | seal 77 | PRIVATE 78 | ff 79 | ${CMAKE_THREAD_LIBS_INIT} 80 | ${GMP_LIBRARIES} 81 | aztec_common 82 | barretenberg 83 | ) 84 | 85 | target_include_directories( 86 | seal 87 | PRIVATE 88 | ${DEPENDS_DIR}/libff 89 | ${DEPENDS_DIR}/blake2b/ref 90 | ${private_include_dir} 91 | ${DEPENDS_DIR}/barretenberg/src 92 | ) 93 | 94 | target_compile_definitions(seal PRIVATE SEALING=1) 95 | 96 | set_target_properties(seal PROPERTIES RUNTIME_OUTPUT_DIRECTORY ../..) -------------------------------------------------------------------------------- /setup-tools/src/setup/main.cpp: -------------------------------------------------------------------------------- 1 | /** 2 | * Setup 3 | * Copyright Spilsbury Holdings 2019 4 | **/ 5 | #include 6 | 7 | #include 8 | #include 9 | #include 10 | 11 | #include "setup.hpp" 12 | 13 | int main(int argc, char **argv) 14 | { 15 | if (argc < 2 || argc > 4) 16 | { 17 | std::cerr << "usage: " << argv[0] << " [ ]" << std::endl; 18 | return 1; 19 | } 20 | std::string const dir = argv[1]; 21 | 22 | libff::alt_bn128_pp::init_public_params(); 23 | 24 | try 25 | { 26 | struct stat info; 27 | if (stat(dir.c_str(), &info) != 0) 28 | { 29 | throw std::runtime_error("Transcript directory not found."); 30 | } 31 | 32 | #ifdef SEALING 33 | seal(dir); 34 | #else 35 | size_t num_g1_points = (argc >= 3) ? strtol(argv[2], NULL, 0) : 0; 36 | size_t num_g2_points = (argc == 4) ? strtol(argv[3], NULL, 0) : 1; 37 | 38 | run_setup(dir, num_g1_points, num_g2_points); 39 | #endif 40 | } 41 | catch (std::exception const &err) 42 | { 43 | std::cerr << err.what() << std::endl; 44 | return 1; 45 | } 46 | 47 | return 0; 48 | } -------------------------------------------------------------------------------- /setup-tools/src/setup/setup.hpp: -------------------------------------------------------------------------------- 1 | /** 2 | * Setup 3 | * Copyright Spilsbury Holdings 2019 4 | **/ 5 | #pragma once 6 | #include 7 | #include 8 | #include 9 | 10 | void compute_g1_thread(Fr const &_y, std::vector &g_x, size_t transcript_start, size_t thread_start, size_t thread_range, std::atomic &progress); 11 | 12 | void run_setup(std::string const &dir, size_t num_g1_points, size_t num_g2_points); 13 | 14 | #ifdef SEALING 15 | void seal(std::string const &dir); 16 | #endif -------------------------------------------------------------------------------- /setup-tools/src/setup/utils.hpp: -------------------------------------------------------------------------------- 1 | /** 2 | * Setup 3 | * Copyright Spilsbury Holdings 2019 4 | * 5 | **/ 6 | #pragma once 7 | 8 | #include 9 | #include 10 | 11 | namespace utils 12 | { 13 | 14 | inline bool isLittleEndian() 15 | { 16 | int num = 42; 17 | return (*(char *)&num == 42); 18 | } 19 | 20 | template 21 | void batch_normalize(size_t start, size_t number, GroupT *x, GroupT *alpha_x) 22 | { 23 | FieldT accumulator = FieldT::one(); 24 | FieldT *temporaries = static_cast(malloc(2 * number * sizeof(FieldT))); 25 | for (size_t i = 0; i < number; ++i) 26 | { 27 | temporaries[2 * i] = accumulator; 28 | accumulator = accumulator * x[i + start].Z; 29 | temporaries[2 * i + 1] = accumulator; 30 | accumulator = accumulator * alpha_x[i + start].Z; 31 | } 32 | accumulator = accumulator.inverse(); 33 | 34 | FieldT zzInv; 35 | FieldT zInv; 36 | for (size_t i = number - 1; i < (size_t)(-1); --i) 37 | { 38 | zInv = accumulator * temporaries[2 * i + 1]; 39 | zzInv = zInv * zInv; 40 | alpha_x[i + start].X = alpha_x[i + start].X * zzInv; 41 | alpha_x[i + start].Y = alpha_x[i + start].Y * (zzInv * zInv); 42 | accumulator = accumulator * alpha_x[i + start].Z; // temporaries[2 * i + 1]; 43 | alpha_x[i + start].Z = FieldT::one(); 44 | zInv = accumulator * temporaries[2 * i]; 45 | zzInv = zInv * zInv; 46 | x[i + start].X = x[i + start].X * zzInv; 47 | x[i + start].Y = x[i + start].Y * (zzInv * zInv); 48 | accumulator = accumulator * x[i + start].Z; // temporaries[2 * i + 1]; 49 | x[i + start].Z = FieldT::one(); 50 | } 51 | free(temporaries); 52 | } 53 | 54 | template 55 | void batch_normalize(size_t start, size_t number, GroupT *x) 56 | { 57 | FieldT accumulator = FieldT::one(); 58 | FieldT *temporaries = static_cast(malloc(number * sizeof(FieldT))); 59 | for (size_t i = 0; i < number; ++i) 60 | { 61 | temporaries[i] = accumulator; 62 | accumulator = accumulator * x[i + start].Z; 63 | } 64 | accumulator = accumulator.inverse(); 65 | 66 | FieldT zzInv; 67 | FieldT zInv; 68 | for (size_t i = number - 1; i < (size_t)(-1); --i) 69 | { 70 | zInv = accumulator * temporaries[i]; 71 | zzInv = zInv * zInv; 72 | x[i + start].X = x[i + start].X * zzInv; 73 | x[i + start].Y = x[i + start].Y * (zzInv * zInv); 74 | accumulator = accumulator * x[i + start].Z; // temporaries[2 * i + 1]; 75 | x[i + start].Z = FieldT::one(); 76 | } 77 | free(temporaries); 78 | } 79 | 80 | template 81 | FieldT convert_buffer_to_field_element(char *buffer, size_t size) 82 | { 83 | if (size < sizeof(FieldT)) 84 | { 85 | // throw an error if the buffer size is too small. 86 | // Don't want to just zero-pad, it is likely that something has gone wrong - with our current use-case, 87 | // buffer should be from a PRNG, which won't translate into a uniformly randomly distributed element Fp 88 | // if the buffer is too small 89 | throw std::runtime_error("cannot convert buffer to field element: buffer too small"); 90 | } 91 | constexpr size_t num_limbs = sizeof(FieldT) / GMP_NUMB_BYTES; 92 | 93 | FieldT element; 94 | auto element_bigint = element.as_bigint(); 95 | mp_limb_t *element_ptr = (mp_limb_t *)buffer; 96 | for (size_t j = 0; j < num_limbs; ++j) 97 | { 98 | mp_limb_t limb = element_ptr[j]; 99 | if (isLittleEndian()) 100 | { 101 | limb = __builtin_bswap64(limb); 102 | } 103 | element_bigint.data[j] = limb; 104 | } 105 | element = FieldT(element_bigint); 106 | return element; 107 | } 108 | } // namespace utils -------------------------------------------------------------------------------- /setup-tools/src/verify/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | find_package (Threads) 2 | 3 | add_executable( 4 | verify 5 | main.cpp 6 | verifier.hpp 7 | verifier.cpp 8 | ) 9 | 10 | target_link_libraries( 11 | verify 12 | PRIVATE 13 | ff 14 | ${CMAKE_THREAD_LIBS_INIT} 15 | ${GMP_LIBRARIES} 16 | aztec_common 17 | ) 18 | 19 | target_include_directories( 20 | verify 21 | PRIVATE 22 | ${DEPENDS_DIR}/libff 23 | ${DEPENDS_DIR}/blake2b/ref 24 | ${private_include_dir} 25 | ) 26 | 27 | set_target_properties(verify PROPERTIES RUNTIME_OUTPUT_DIRECTORY ../..) 28 | -------------------------------------------------------------------------------- /setup-tools/src/verify/verifier.hpp: -------------------------------------------------------------------------------- 1 | /** 2 | * Setup 3 | * Copyright Spilsbury Holdings 2019 4 | **/ 5 | #pragma once 6 | 7 | #include 8 | 9 | template 10 | struct VerificationKey 11 | { 12 | GroupT lhs; 13 | GroupT rhs; 14 | }; 15 | 16 | bool same_ratio(VerificationKey const &g1_key, VerificationKey const &g2_key); 17 | 18 | template 19 | VerificationKey same_ratio_preprocess(std::vector const &g_x); 20 | 21 | bool validate_polynomial_evaluation(std::vector const &evaluation, G2 const &comparator); 22 | 23 | bool validate_polynomial_evaluation(std::vector const &evaluation, G1 const &comparator); 24 | 25 | bool validate_transcript( 26 | G1 &g1_0, 27 | G2 &g2_0, 28 | std::vector const &g1_x, 29 | std::vector const &g2_x, 30 | std::vector const &g1_x_previous, 31 | std::vector const &g2_y); 32 | 33 | bool validate_manifest(streaming::Manifest const &manifest, size_t total_g1_points, size_t total_g2_points, size_t points_per_transcript, size_t transcript_number); -------------------------------------------------------------------------------- /setup-tools/test/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | # aztec_trusted_setup 2 | # Copyright 2019 Spilsbury Holdings Ltd 3 | 4 | include(FetchContent) 5 | 6 | ### DEPENDENCIES 7 | FetchContent_Declare( 8 | googletest 9 | GIT_REPOSITORY https://github.com/google/googletest.git 10 | GIT_TAG release-1.8.0 11 | ) 12 | 13 | FetchContent_GetProperties(googletest) 14 | if(NOT googletest_POPULATED) 15 | FetchContent_Populate(googletest) 16 | add_subdirectory(${googletest_SOURCE_DIR} ${googletest_BINARY_DIR} EXCLUDE_FROM_ALL) 17 | endif() 18 | 19 | mark_as_advanced( 20 | BUILD_GMOCK BUILD_GTEST BUILD_SHARED_LIBS 21 | gmock_build_tests gtest_build_samples gtest_build_tests 22 | gtest_disable_pthreads gtest_force_shared_crt gtest_hide_internal_symbols 23 | ) 24 | 25 | set(setup_private_include_dir ${PROJECT_SOURCE_DIR}/src) 26 | 27 | find_package (OpenMP) 28 | 29 | add_executable( 30 | setup_tests 31 | test_aztec_common.cpp 32 | test_setup.cpp 33 | test_compute_range_polynomial.cpp 34 | ../src/verify/verifier.cpp 35 | ../src/range/range_multi_exp.cpp 36 | ../src/generate_h/range_multi_exp.cpp 37 | ../src/setup/setup.cpp 38 | ) 39 | 40 | if(OpenMP_CXX_FOUND) 41 | target_link_libraries(setup_tests PUBLIC OpenMP::OpenMP_CXX) 42 | endif() 43 | 44 | target_compile_options(setup_tests PRIVATE -Wno-sign-compare) 45 | 46 | target_link_libraries( 47 | setup_tests 48 | PRIVATE 49 | ff 50 | aztec_common 51 | barretenberg 52 | gtest 53 | gmock 54 | gtest_main 55 | ) 56 | 57 | target_include_directories( 58 | setup_tests 59 | PRIVATE 60 | ${PROJECT_SOURCE_DIR}/depends/libfqfft 61 | ${private_include_dir} 62 | ${DEPENDS_DIR}/barretenberg/src 63 | ) 64 | 65 | set_target_properties( 66 | setup_tests 67 | PROPERTIES RUNTIME_OUTPUT_DIRECTORY . 68 | ) 69 | 70 | add_test( 71 | TARGET 72 | setup_tests 73 | TEST_PREFIX 74 | ${PROJECT_NAME}/tests/ 75 | ) 76 | 77 | target_compile_definitions(setup_tests PRIVATE SUPERFAST=1) -------------------------------------------------------------------------------- /setup-tools/test/test_setup.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include "test_utils.hpp" 8 | 9 | TEST(setup, batch_normalize_works) 10 | { 11 | libff::init_alt_bn128_params(); 12 | 13 | size_t N = 100; 14 | constexpr size_t num_limbs = sizeof(Fq) / GMP_NUMB_BYTES; 15 | 16 | std::vector points; 17 | std::vector normalized; 18 | std::vector dummy; 19 | 20 | points.reserve(100); 21 | normalized.reserve(100); 22 | for (size_t i = 0; i < N; ++i) 23 | { 24 | G1 point = G1::random_element(); 25 | points.emplace_back(point); 26 | normalized.emplace_back(point); 27 | dummy.emplace_back(point); 28 | } 29 | utils::batch_normalize(0, N, &normalized[0], &dummy[0]); 30 | for (size_t i = 0; i < N; ++i) 31 | { 32 | points[i].to_affine_coordinates(); 33 | test_utils::validate_g1_point(points[i], normalized[i]); 34 | } 35 | } 36 | 37 | TEST(setup, same_ratio) 38 | { 39 | libff::init_alt_bn128_params(); 40 | size_t N = 100; 41 | std::vector points(N, G1::one()); 42 | Fr y = Fr::random_element(); 43 | std::atomic progress(0); 44 | compute_g1_thread(y, points, 0, 0, N, progress); 45 | 46 | VerificationKey g2_key; 47 | g2_key.lhs = y * G2::one(); 48 | g2_key.rhs = G2::one(); 49 | VerificationKey g1_key; 50 | 51 | g1_key = same_ratio_preprocess(points); 52 | 53 | bool result = same_ratio(g1_key, g2_key); 54 | 55 | EXPECT_EQ(result, true); 56 | } 57 | 58 | TEST(setup, validate_polynomial_evaluation) 59 | { 60 | libff::init_alt_bn128_params(); 61 | size_t N = 100; 62 | std::vector points(N, G1::one()); 63 | Fr y = Fr::random_element(); 64 | std::atomic progress(0); 65 | compute_g1_thread(y, points, 0, 0, N, progress); 66 | G2 comparator = y * G2::one(); 67 | 68 | bool result = validate_polynomial_evaluation(points, comparator); 69 | 70 | EXPECT_EQ(result, true); 71 | } 72 | 73 | TEST(setup, validate_transcript) 74 | { 75 | libff::init_alt_bn128_params(); 76 | 77 | constexpr size_t num_limbs = sizeof(Fq) / GMP_NUMB_BYTES; 78 | size_t N = 100; 79 | std::vector g1_x_prev, g1_x; 80 | std::vector g2_x_prev, g2_x; 81 | G2 g2_y; 82 | 83 | { 84 | Fr y = Fr::random_element(); 85 | Fr accumulator = y; 86 | for (size_t i = 0; i < N; ++i) 87 | { 88 | g1_x_prev.emplace_back(G1::one()); 89 | g2_x_prev.emplace_back(accumulator * G2::one()); 90 | 91 | accumulator = accumulator * y; 92 | } 93 | std::atomic progress(0); 94 | compute_g1_thread(y, g1_x_prev, 0, 0, N, progress); 95 | } 96 | 97 | { 98 | Fr y = Fr::random_element(); 99 | Fr accumulator = y; 100 | for (size_t i = 0; i < N; ++i) 101 | { 102 | g1_x.emplace_back(g1_x_prev[i]); 103 | g2_x.emplace_back(accumulator * g2_x_prev[i]); 104 | 105 | accumulator = accumulator * y; 106 | } 107 | std::atomic progress(0); 108 | compute_g1_thread(y, g1_x, 0, 0, N, progress); 109 | g2_y = libff::fixed_window_wnaf_exp(5, G2::one(), y.as_bigint()); 110 | } 111 | 112 | bool result = validate_transcript(g1_x[0], g2_x[0], g1_x, g2_x, {g1_x_prev[0]}, {g2_y}); 113 | EXPECT_EQ(result, true); 114 | } 115 | -------------------------------------------------------------------------------- /setup-tools/test/test_utils.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include "stddef.h" 4 | 5 | #include 6 | #include 7 | 8 | namespace test_utils 9 | { 10 | template 11 | void validate_g1_point(libff::alt_bn128_G1 &result, libff::alt_bn128_G1 &expected) 12 | { 13 | libff::bigint result_x = result.X.as_bigint(); 14 | libff::bigint result_y = result.Y.as_bigint(); 15 | libff::bigint expected_x = expected.X.as_bigint(); 16 | libff::bigint expected_y = expected.Y.as_bigint(); 17 | 18 | for (size_t i = 0; i < N; ++i) 19 | { 20 | EXPECT_EQ(result_x.data[i], expected_x.data[i]); 21 | EXPECT_EQ(result_y.data[i], expected_y.data[i]); 22 | } 23 | } 24 | 25 | template 26 | void validate_g2_point(libff::alt_bn128_G2 &result, libff::alt_bn128_G2 &expected) 27 | { 28 | libff::bigint result_x0 = result.X.c0.as_bigint(); 29 | libff::bigint result_y0 = result.Y.c0.as_bigint(); 30 | libff::bigint result_x1 = result.X.c1.as_bigint(); 31 | libff::bigint result_y1 = result.Y.c1.as_bigint(); 32 | 33 | libff::bigint expected_x0 = expected.X.c0.as_bigint(); 34 | libff::bigint expected_y0 = expected.Y.c0.as_bigint(); 35 | libff::bigint expected_x1 = expected.X.c1.as_bigint(); 36 | libff::bigint expected_y1 = expected.Y.c1.as_bigint(); 37 | 38 | for (size_t i = 0; i < N; ++i) 39 | { 40 | EXPECT_EQ(result_x0.data[i], expected_x0.data[i]); 41 | EXPECT_EQ(result_y0.data[i], expected_y0.data[i]); 42 | EXPECT_EQ(result_x1.data[i], expected_x1.data[i]); 43 | EXPECT_EQ(result_y1.data[i], expected_y1.data[i]); 44 | } 45 | } 46 | } // namespace utils -------------------------------------------------------------------------------- /tslint.json: -------------------------------------------------------------------------------- 1 | { 2 | "defaultSeverity": "error", 3 | "extends": ["tslint:recommended", "tslint-config-prettier"], 4 | "rules": { 5 | "no-bitwise": false, 6 | "no-empty": [true, "allow-empty-functions"], 7 | "no-console": false, 8 | "interface-name": false, 9 | "interface-over-type-literal": false, 10 | "object-literal-sort-keys": false, 11 | "member-ordering": false, 12 | "no-shadowed-variable": false, 13 | "max-classes-per-file": false, 14 | "no-var-requires": false, 15 | "array-type": [true, "array"] 16 | } 17 | } 18 | --------------------------------------------------------------------------------