├── .github └── workflows │ ├── ci.yml │ └── docker_release.yml ├── .gitignore ├── .gitmodules ├── Cargo.lock ├── Cargo.toml ├── LICENSE ├── README.md ├── deployment ├── Dockerfile-gcp ├── Dockerfile-near-node-initializer ├── gcp-start.sh └── initialize-near-node.sh ├── devnet ├── .gitignore ├── Cargo.lock ├── Cargo.toml ├── README.md ├── config.yaml.template └── src │ ├── account.rs │ ├── cli.rs │ ├── constants.rs │ ├── devnet.rs │ ├── funding.rs │ ├── loadtest.rs │ ├── main.rs │ ├── mpc.rs │ ├── queries.rs │ ├── rpc.rs │ ├── terraform.rs │ ├── terraform │ └── describe.rs │ ├── tx.rs │ └── types.rs ├── infra ├── .gitignore ├── README.md ├── configs │ └── mpc_cloud_config.yml ├── modules │ ├── instance-from-tpl │ │ ├── main.tf │ │ ├── metadata.yaml │ │ ├── outputs.tf │ │ ├── variables.tf │ │ └── versions.tf │ └── mig_template │ │ ├── main.tf │ │ ├── metadata.yaml │ │ ├── outputs.tf │ │ ├── variables.tf │ │ └── versions.tf ├── partner-mainnet │ ├── main.tf │ ├── network.tf │ ├── outputs.tf │ ├── resources.tf │ ├── terraform-mainnet-example.tfvars │ └── variables.tf ├── partner-testnet │ ├── main.tf │ ├── network.tf │ ├── outputs.tf │ ├── resources.tf │ ├── terraform-testnet-example.tfvars │ └── variables.tf └── scripts │ ├── generate_keys │ ├── Cargo.lock │ ├── Cargo.toml │ └── src │ │ └── main.rs │ ├── keys │ ├── Cargo.toml │ └── src │ │ ├── hpke.rs │ │ └── lib.rs │ ├── mpc_init.sh │ └── upload_secrets │ ├── do-not-commit-example.txt │ └── upload_secrets.sh ├── libs └── chain-signatures │ ├── .gitignore │ ├── Cargo.lock │ ├── Cargo.toml │ ├── contract │ ├── .cargo │ │ └── config.toml │ ├── Cargo.toml │ ├── readme.md │ ├── src │ │ ├── config │ │ │ ├── consts.rs │ │ │ ├── impls.rs │ │ │ └── mod.rs │ │ ├── crypto_shared │ │ │ ├── kdf.rs │ │ │ ├── mod.rs │ │ │ └── types.rs │ │ ├── errors │ │ │ ├── impls.rs │ │ │ └── mod.rs │ │ ├── legacy_contract_state │ │ │ ├── impls.rs │ │ │ └── mod.rs │ │ ├── lib.rs │ │ ├── primitives │ │ │ ├── code_hash.rs │ │ │ ├── domain.rs │ │ │ ├── key_state.rs │ │ │ ├── mod.rs │ │ │ ├── participants.rs │ │ │ ├── signature.rs │ │ │ ├── test_utils.rs │ │ │ ├── thresholds.rs │ │ │ └── votes.rs │ │ ├── state │ │ │ ├── initializing.rs │ │ │ ├── key_event.rs │ │ │ ├── mod.rs │ │ │ ├── resharing.rs │ │ │ └── running.rs │ │ ├── storage_keys.rs │ │ ├── update.rs │ │ ├── utils.rs │ │ └── v0_state │ │ │ └── mod.rs │ └── tests │ │ ├── back_compatibility.rs │ │ ├── common.rs │ │ ├── sign.rs │ │ ├── updates.rs │ │ ├── user_views.rs │ │ └── vote.rs │ ├── keys │ ├── Cargo.toml │ └── src │ │ ├── hpke.rs │ │ └── lib.rs │ └── rust-toolchain.toml ├── node ├── Cargo.toml └── src │ ├── assets.rs │ ├── async_testing.rs │ ├── background.rs │ ├── cli.rs │ ├── config.rs │ ├── coordinator.rs │ ├── db.rs │ ├── indexer │ ├── configs.rs │ ├── fake.rs │ ├── handler.rs │ ├── lib.rs │ ├── mod.rs │ ├── participants.rs │ ├── real.rs │ ├── stats.rs │ ├── tx_sender.rs │ ├── tx_signer.rs │ └── types.rs │ ├── key_events.rs │ ├── keyshare │ ├── compat.rs │ ├── gcp.rs │ ├── local.rs │ ├── mod.rs │ ├── permanent.rs │ ├── temporary.rs │ └── test_utils.rs │ ├── main.rs │ ├── metrics.rs │ ├── mpc_client.rs │ ├── network.rs │ ├── network │ ├── computation.rs │ ├── conn.rs │ ├── constants.rs │ ├── handshake.rs │ ├── indexer_heights.rs │ └── signing.rs │ ├── p2p.rs │ ├── primitives.rs │ ├── protocol.rs │ ├── protocol_version.rs │ ├── providers │ ├── ecdsa │ │ ├── kdf.rs │ │ ├── key_generation.rs │ │ ├── key_resharing.rs │ │ ├── mod.rs │ │ ├── presign.rs │ │ ├── sign.rs │ │ └── triple.rs │ ├── eddsa │ │ ├── kdf.rs │ │ ├── key_generation.rs │ │ ├── key_resharing.rs │ │ ├── mod.rs │ │ └── sign.rs │ └── mod.rs │ ├── runtime.rs │ ├── sign_request.rs │ ├── signing │ ├── debug.rs │ ├── metrics.rs │ ├── mod.rs │ ├── queue.rs │ └── recent_blocks_tracker.rs │ ├── tee │ └── mod.rs │ ├── tests │ ├── basic_cluster.rs │ ├── benchmark.rs │ ├── faulty.rs │ ├── mod.rs │ ├── multidomain.rs │ ├── research.rs │ └── resharing.rs │ ├── tracing.rs │ ├── tracking.rs │ └── web.rs ├── pytest ├── common_lib │ ├── __init__.py │ ├── constants.py │ ├── contract_state.py │ ├── contracts.py │ ├── shared.py │ └── signature.py ├── config.json ├── exec_pytest.sh ├── nearcore_pytest │ ├── __init__.py │ └── pyproject.toml ├── pytest.ini ├── readme.md ├── requirements.txt └── tests │ ├── conftest.py │ ├── test_contract_update.py │ ├── test_contracts │ ├── migration │ │ ├── .gitignore │ │ ├── Cargo.lock │ │ ├── Cargo.toml │ │ ├── migration_contract.wasm │ │ └── src │ │ │ └── lib.rs │ └── parallel │ │ ├── Cargo.lock │ │ ├── Cargo.toml │ │ ├── res │ │ └── contract.wasm │ │ ├── rust-toolchain.toml │ │ └── src │ │ └── lib.rs │ ├── test_key_event.py │ ├── test_lost_assets.py │ ├── test_parallel_sign_calls.py │ ├── test_signature_request.py │ ├── test_signature_request_during_resharing.py │ ├── test_web_endpoints.py │ └── test_without_respond_yaml.py ├── rust-toolchain.toml ├── scripts ├── .env_example └── update-mpc-node.sh └── third-party-licenses ├── README.md ├── about.hbs ├── about.toml └── licenses.html /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | 3 | concurrency: 4 | group: ${{ github.workflow }}-${{ github.ref }} 5 | cancel-in-progress: true 6 | 7 | on: 8 | workflow_dispatch: 9 | pull_request: 10 | merge_group: 11 | 12 | 13 | jobs: 14 | ci-tests: 15 | name: "Run tests" 16 | runs-on: warp-ubuntu-2204-x64-8x 17 | timeout-minutes: 60 18 | permissions: 19 | contents: read 20 | 21 | steps: 22 | - name: Checkout repository 23 | uses: actions/checkout@v4 24 | 25 | - name: Initialize submodules 26 | run: git submodule update --init --recursive 27 | 28 | - name: Cache Rust dependencies 29 | uses: WarpBuilds/rust-cache@v2 30 | with: 31 | cache-on-failure: true 32 | cache-all-crates: true 33 | 34 | - name: Run Clippy fmt 35 | run: | 36 | CARGO_TARGET_DIR="target/clippy" \ 37 | RUSTFLAGS="-D warnings" \ 38 | cargo clippy --all-features --all-targets --locked 39 | 40 | cd devnet 41 | CARGO_TARGET_DIR="target/clippy" \ 42 | RUSTFLAGS="-D warnings" \ 43 | cargo clippy --all-features --all-targets --locked 44 | cd .. 45 | 46 | cd libs/chain-signatures 47 | CARGO_TARGET_DIR="target/clippy" \ 48 | RUSTFLAGS="-D warnings" \ 49 | cargo clippy --all-features --all-targets --locked 50 | cd ../.. 51 | 52 | - name: Run Cargo fmt 53 | run: | 54 | cargo fmt -- --check 55 | 56 | cd devnet 57 | cargo fmt -- --check 58 | cd .. 59 | 60 | cd libs/chain-signatures 61 | cargo fmt -- --check 62 | cd ../.. 63 | 64 | - name: Install cargo-nextest 65 | run: cargo install cargo-nextest 66 | 67 | - name: Install wasm-opt from crates.io 68 | run: | 69 | cargo install wasm-opt --locked 70 | echo "${HOME}/.cargo/bin" >> $GITHUB_PATH 71 | 72 | - name: Run cargo-nextest 73 | run: cargo nextest run --release --locked 74 | 75 | - name: Run contract tests 76 | run: | 77 | cd libs/chain-signatures 78 | cargo nextest run -p mpc-contract --release --locked 79 | 80 | - name: Download near core binary from S3 81 | id: download-neard 82 | continue-on-error: true 83 | run: | 84 | os=$(uname) 85 | arch=$(uname -m) 86 | os_and_arch=${os}-${arch} 87 | cd libs/nearcore 88 | 89 | branch_name=$(git branch -r --contains HEAD | grep -o 'origin/[^ ]*' | sed 's|origin/||' | head -n 1 || echo "no-branch") 90 | commit_hash=$(git rev-parse HEAD || echo "no-commit") 91 | 92 | url="https://s3.us-west-1.amazonaws.com/build.nearprotocol.com/nearcore/${os_and_arch}/${branch_name}/${commit_hash}/neard" 93 | 94 | mkdir -p target/release 95 | status_code=$(curl -v -o target/release/neard -w "%{http_code}" "${url}") 96 | if [ "$status_code" -ne 200 ]; then 97 | echo "curl failed with URL: ${url}, Status Code: ${status_code}" 98 | exit 1 99 | fi 100 | chmod +x target/release/neard 101 | 102 | - name: Build near core as fallback 103 | if: steps.download-neard.outcome != 'success' 104 | run: | 105 | cd libs/nearcore 106 | cargo build -p neard --release 107 | 108 | - name: Build mpc node 109 | run: cargo build -p mpc-node --release 110 | 111 | - name: Setup python 112 | uses: actions/setup-python@v4 113 | with: 114 | python-version: '3.11' 115 | 116 | - name: Setup virtualenv 117 | run: | 118 | python3 -m venv pytest/venv 119 | source pytest/venv/bin/activate 120 | cd pytest 121 | pip install -r requirements.txt 122 | 123 | - name: Run pytest 124 | run: | 125 | source pytest/venv/bin/activate 126 | cd pytest 127 | pytest -m "not ci_excluded" -s -x 128 | -------------------------------------------------------------------------------- /.github/workflows/docker_release.yml: -------------------------------------------------------------------------------- 1 | name: Docker Build and Publish 2 | 3 | concurrency: 4 | group: ${{ github.workflow }}-${{ github.ref }} 5 | cancel-in-progress: true 6 | 7 | on: 8 | workflow_dispatch: 9 | inputs: 10 | build-ref: 11 | default: 'main' 12 | description: "The branch, tag or SHA to build MPC Docker image from. Default to latest commit on main branch." 13 | type: string 14 | 15 | 16 | jobs: 17 | docker-image-build: 18 | name: "Build and push MPC Docker image" 19 | runs-on: warp-ubuntu-2204-x64-8x 20 | permissions: 21 | contents: read 22 | 23 | steps: 24 | - name: Checkout repository 25 | uses: actions/checkout@v4 26 | with: 27 | ref: ${{ github.event.inputs.build-ref }} 28 | 29 | - name: Get short SHA 30 | shell: bash 31 | run: | 32 | echo "sha_short=$(git rev-parse --short HEAD)" >> "$GITHUB_ENV" 33 | 34 | - name: Initialize submodules 35 | run: git submodule update --init --recursive 36 | 37 | - name: Login to Docker Hub 38 | uses: docker/login-action@v3 39 | with: 40 | username: ${{ secrets.DOCKERHUB_USERNAME }} 41 | password: ${{ secrets.DOCKERHUB_TOKEN }} 42 | 43 | - name: Build and push MPC Docker image to Docker Hub 44 | uses: Warpbuilds/build-push-action@v6 45 | with: 46 | context: . 47 | profile-name: "mpc-image-builder" 48 | push: true 49 | file: deployment/Dockerfile-gcp 50 | tags: nearone/mpc-node-gcp:latest,nearone/mpc-node-gcp:${{ github.event.inputs.build-ref }},nearone/mpc-node-gcp:${{ github.event.inputs.build-ref }}-${{ env.sha_short }} 51 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # IDE configs 2 | .idea 3 | venv 4 | .vscode 5 | 6 | /target/ 7 | /pytest/output.log 8 | /pytest/venv/ 9 | /pytest/*/__pycache__/ 10 | /pytest/nearcore_pytest/nearcore_pytest.egg-info/ 11 | /pytest/tests/test_contracts/parallel/target/ 12 | 13 | /devnet/generate_keys/target/ 14 | /libs/chain-signatures/res/ 15 | /libs/chain-signatures/target/ 16 | /node/presignature_network_report_best_case.json 17 | /node/signature_network_report_best_case.json 18 | /node/triple_network_report_best_case.json 19 | /node/triple_network_report_worst_case.json 20 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "libs/nearcore"] 2 | path = libs/nearcore 3 | url = https://github.com/near/nearcore 4 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [workspace] 2 | members = ["node"] 3 | resolver = "2" 4 | exclude = ["libs", "pytest"] 5 | 6 | [profile.dev-release] 7 | inherits = "release" 8 | debug = true 9 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2025 NEAR One Limited. 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ## mpc 2 | This repository contains the code for the Near mpc node. It is a rewrite of [Near mpc](https://github.com/near/mpc_old). 3 | 4 | ### Dependencies and submodules 5 | - **Nearcore Node**: This repository depends on the nearcore node, included as a submodule in the `/libs` directory. 6 | - **Other Dependencies**: All other dependencies are handled by Cargo. 7 | 8 | ### How it works 9 | 10 | There are two main parts of the binary: NEAR indexer and mpc signing: 11 | - NEAR Indexer: this is a NEAR node that tracks the shard where the signing smart contract is on. For mainnet, it is `v1.signer`. 12 | The indexer tracks incoming requests by looking at successful calls to the `sign` function. Each request is hashed and gets mapped to a 13 | specific node in the mpc network, which is known as the leader for this specific request. The leader initiates the signing process and submits the final signature back to the smart contract. If the leader is offline, there is a secondary leader who can initiate the signing 14 | - MPC signing: A threshold ecdsa implementation based on [cait-sith](https://cronokirby.com/posts/2023/03/some-bits-about-cait-sith/). Each node does the following: 15 | * Participates in Beaver triple generation in the background. Each node both initiates triple generation and passively participates in triple generation initiated by other nodes. This is constantly running until each node generates 1M Beaver triples. 16 | * Presignature generation. It also runs in the background. Each presignature generation requires two Beaver triples. 17 | * Signature generation. When a request comes in, a signature can be generated using a presignature and one round of communication. 18 | 19 | One thing to note is that from Beaver triple generation to signature generation, the request denotes the participating set and it is guaranteed that if a Beaver triple is generated by a specific set of participants, presignatures and signatures using that Beaver triple is generated by the same set of participants. 20 | 21 | ### Testing: 22 | - **Unit Tests**: Run with `cargo test --release` (`--release` flag is advised for performance reasons). 23 | - **integration Test** : Located in the `/pytest` directory. 24 | 25 | 26 | ### Compilation: 27 | This repository uses `rust-toolchain.toml` files, as some code sections may require specific compiler versions. Be aware of potential overrides from: 28 | - Directory-specific toolchain overrides 29 | - Environment variables 30 | 31 | For more information, refer to the [Rustup book on overrides](https://rust-lang.github.io/rustup/overrides.html). 32 | 33 | ### Contributions 34 | 35 | The NEAR MPC Node is actively maintained by **[NEAR One](https://github.com/Near-One)** and **[HOT Labs](https://github.com/hot-dao)** HOT Labs, with valuable contributions from the broader open-source community. 36 | 37 | We welcome contributions in the form of issues, feature requests, and pull requests. Please ensure any changes are well-documented and tested. For major changes, open an issue to discuss the proposed modifications first. 38 | -------------------------------------------------------------------------------- /deployment/Dockerfile-gcp: -------------------------------------------------------------------------------- 1 | FROM rust:latest AS builder 2 | RUN apt-get update -y && apt-get install -y --no-install-recommends clang # needed for rocksdb 3 | 4 | WORKDIR /app 5 | COPY node node 6 | COPY Cargo.lock Cargo.lock 7 | COPY Cargo.toml Cargo.toml 8 | COPY rust-toolchain.toml rust-toolchain.toml 9 | COPY libs/chain-signatures libs/chain-signatures 10 | 11 | RUN cargo build --locked --release 12 | 13 | FROM google/cloud-sdk:debian_component_based AS runtime 14 | RUN apt-get update -y \ 15 | && apt-get install -y --no-install-recommends openssl ca-certificates 16 | 17 | ENV SSL_CERT_FILE=/etc/ssl/certs/ca-certificates.crt 18 | 19 | WORKDIR /app 20 | COPY --from=builder /app/target/release/mpc-node mpc-node 21 | COPY deployment/gcp-start.sh /app/gcp-start.sh 22 | RUN chmod +x /app/gcp-start.sh 23 | CMD [ "/app/gcp-start.sh" ] 24 | -------------------------------------------------------------------------------- /deployment/Dockerfile-near-node-initializer: -------------------------------------------------------------------------------- 1 | FROM nearprotocol/nearcore:latest 2 | RUN apt-get update -y && apt-get install -y --no-install-recommends python3 3 | 4 | ADD deployment/initialize-near-node.sh /initialize-near-node.sh 5 | CMD ["bash", "/initialize-near-node.sh"] 6 | -------------------------------------------------------------------------------- /deployment/gcp-start.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -eo pipefail 3 | 4 | 5 | # This script is intended to be used for running nearone/mpc in a GCP environment. 6 | # It will initialize the Near node in case it is not initialized yet and start the MPC node. 7 | 8 | 9 | MPC_NODE_CONFIG_FILE="$MPC_HOME_DIR/config.yaml" 10 | NEAR_NODE_CONFIG_FILE="$MPC_HOME_DIR/config.json" 11 | 12 | initialize_near_node() { 13 | ./mpc-node init --dir $1 --chain-id $MPC_ENV --download-genesis --download-config 14 | python3 << EOF 15 | import json; 16 | config = json.load(open("$NEAR_NODE_CONFIG_FILE")) 17 | 18 | # boot nodes must be filled in or else the node will not have any peers. 19 | config['network']['boot_nodes'] = "${NEAR_BOOT_NODES}" 20 | config['store']['load_mem_tries_for_tracked_shards'] = True 21 | config['state_sync']['sync']['ExternalStorage']['external_storage_fallback_threshold'] = 0 22 | 23 | # Track whichever shard the contract account is on. 24 | config['tracked_shards'] = [] 25 | config['tracked_accounts'] = ["$MPC_CONTRACT_ID"] 26 | json.dump(config, open("$NEAR_NODE_CONFIG_FILE", 'w'), indent=2) 27 | EOF 28 | } 29 | 30 | initialize_mpc_config() { 31 | cat < "$1" 32 | # Configuration File 33 | my_near_account_id: $MPC_ACCOUNT_ID 34 | web_ui: 35 | host: 0.0.0.0 36 | port: 8080 37 | triple: 38 | concurrency: 2 39 | desired_triples_to_buffer: 1000000 40 | timeout_sec: 60 41 | parallel_triple_generation_stagger_time_sec: 1 42 | presignature: 43 | concurrency: 16 44 | desired_presignatures_to_buffer: 8192 45 | timeout_sec: 60 46 | signature: 47 | timeout_sec: 60 48 | indexer: 49 | validate_genesis: false 50 | sync_mode: Latest 51 | concurrency: 1 52 | mpc_contract_id: $MPC_CONTRACT_ID 53 | port_override: 80 54 | finality: optimistic 55 | cores: 12 56 | EOF 57 | } 58 | 59 | # Check and initialize Near node config if needed 60 | if [ -r "$NEAR_NODE_CONFIG_FILE" ]; then 61 | echo "Near node is already initialized" 62 | else 63 | echo "Initializing Near node" 64 | initialize_near_node $MPC_HOME_DIR && echo "Near node initialized" 65 | fi 66 | 67 | # Check and initialize MPC config if needed 68 | if [ -r "$MPC_NODE_CONFIG_FILE" ]; then 69 | echo "MPC node is already initialized" 70 | else 71 | echo "Initializing MPC node" 72 | initialize_mpc_config $MPC_NODE_CONFIG_FILE && echo "MPC node initialized" 73 | fi 74 | 75 | # Check if MPC_SECRET_STORE_KEY is empty - if so, fetch from GCP Secret Manager 76 | if [ -z "${MPC_SECRET_STORE_KEY}" ]; then 77 | echo "MPC_SECRET_STORE_KEY not provided in environment, will fetch from GCP Secret Manager..." 78 | export MPC_SECRET_STORE_KEY=$(gcloud secrets versions access latest --project $GCP_PROJECT_ID --secret=$GCP_LOCAL_ENCRYPTION_KEY_SECRET_ID) 79 | else 80 | echo "Using provided MPC_SECRET_STORE_KEY from environment" 81 | fi 82 | 83 | # Check if MPC_P2P_PRIVATE_KEY is empty - if so, fetch from GCP Secret Manager 84 | if [ -z "${MPC_P2P_PRIVATE_KEY}" ]; then 85 | echo "MPC_P2P_PRIVATE_KEY not provided in environment, will fetch from GCP Secret Manager..." 86 | export MPC_P2P_PRIVATE_KEY=$(gcloud secrets versions access latest --project $GCP_PROJECT_ID --secret=$GCP_P2P_PRIVATE_KEY_SECRET_ID) 87 | else 88 | echo "Using provided MPC_P2P_PRIVATE_KEY from environment" 89 | fi 90 | 91 | # Check if MPC_ACCOUNT_SK is empty - if so, fetch from GCP Secret Manager 92 | if [ -z "${MPC_ACCOUNT_SK}" ]; then 93 | echo "MPC_ACCOUNT_SK not provided in environment, will fetch from GCP Secret Manager..." 94 | export MPC_ACCOUNT_SK=$(gcloud secrets versions access latest --project $GCP_PROJECT_ID --secret=$GCP_ACCOUNT_SK_SECRET_ID) 95 | else 96 | echo "Using provided MPC_ACCOUNT_SK from environment" 97 | fi 98 | 99 | 100 | echo "Starting mpc node..." 101 | /app/mpc-node start 102 | -------------------------------------------------------------------------------- /deployment/initialize-near-node.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x -e 3 | 4 | # This is for development: clear the whole data directory and start over. 5 | if [ "$RESET_DATA" = "1" ]; then 6 | echo "Resetting data" 7 | rm -rf /data/* 8 | fi 9 | 10 | echo Initializing Near node using chain ID ${CHAIN_ID:?"CHAIN_ID is required"}, tracking contract ${CONTRACT:?"CONTRACT is required"} 11 | 12 | HOME_DIR=/data 13 | 14 | # Initialize the node from config and genesis. 15 | neard --home ${HOME_DIR} init --chain-id=$CHAIN_ID --download-genesis --download-config 16 | 17 | # Fill in configs that we need to tweak for the mpc setup. 18 | python3 << EOF 19 | import json; 20 | config = json.load(open('/data/config.json')) 21 | 22 | # boot nodes must be filled in or else the node will not have any peers. 23 | config['network']['boot_nodes'] = "${BOOT_NODES}" 24 | 25 | config['state_sync']['sync']['ExternalStorage']['external_storage_fallback_threshold'] = 0 26 | 27 | # Track whichever shard the contract account is on. 28 | config['tracked_shards'] = [] 29 | config['tracked_accounts'] = ["$CONTRACT"] 30 | json.dump(config, open('/data/config.json', 'w'), indent=2) 31 | EOF 32 | 33 | # Run the node. The node will catch up via epoch sync, header sync, state sync, and finally block sync. 34 | # the node is ready when the logs start printing block hashes in the status line. 35 | neard --home ${HOME_DIR} run 36 | -------------------------------------------------------------------------------- /devnet/.gitignore: -------------------------------------------------------------------------------- 1 | devnet_setup.yaml 2 | devnet_setup.yaml.bak 3 | config.yaml 4 | *.tfvars.json 5 | target/ -------------------------------------------------------------------------------- /devnet/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "devnet" 3 | version = "0.1.0" 4 | edition = "2021" 5 | license = "MIT" 6 | 7 | [dependencies] 8 | anyhow = "1.0.97" 9 | borsh = { version = "1.5.7", features = ["derive"] } 10 | clap = { version = "4.5.31", features = ["derive"] } 11 | flume = "0.11.1" 12 | futures = "0.3.31" 13 | hex = "0.4.3" 14 | near-crypto = "0.28.0" 15 | near-jsonrpc-client = "0.15.1" 16 | near-jsonrpc-primitives = "0.28.0" 17 | near-primitives = "0.28.0" 18 | near-sdk = { version = "5.8.1" } 19 | rand = "0.9.0" 20 | reqwest = "0.12.12" 21 | serde = { version = "1.0.218", features = ["derive"] } 22 | serde_json = "1.0.140" 23 | serde_yaml = "0.9.34" 24 | tokio = { version = "1.43.0", features = ["full"] } 25 | 26 | legacy-mpc-contract = { package = "mpc-contract", git = "https://github.com/Near-One/mpc/", rev = "1d4954dff28e8eb988fb7762eff414a602a2b124" } 27 | mpc-contract = { path = "../libs/chain-signatures/contract", features = ["test-utils", "dev-utils"]} 28 | 29 | 30 | [workspace] 31 | -------------------------------------------------------------------------------- /devnet/config.yaml.template: -------------------------------------------------------------------------------- 1 | rpcs: 2 | - url: http://replace_me:3030 3 | rate_limit: 5 4 | max_concurrency: 30 5 | 6 | infra_ops_path: /replace_me/infra-ops 7 | 8 | # Optional own funding account 9 | # funding_account: 10 | # account_id: your-funding-account.testnet 11 | # kind: FundingAccount 12 | # access_keys: 13 | # - ed25519:your_private_key_here -------------------------------------------------------------------------------- /devnet/src/constants.rs: -------------------------------------------------------------------------------- 1 | pub const ONE_NEAR: u128 = 1_000_000_000_000_000_000_000_000; 2 | /// Below this minimum balance we consider an account to be possibly unusable. 3 | pub const MINIMUM_BALANCE_TO_REMAIN_IN_ACCOUNTS: u128 = ONE_NEAR / 10; 4 | /// When we need to refill an account (during an update operation), we will not 5 | /// refill it if it's more than this percent of the desired balance. That way, we don't 6 | /// end up topping up accounts all the time with tiny amounts. 7 | pub const PERCENT_OF_ORIGINAL_BALANCE_BELOW_WHICH_TO_REFILL: u128 = 70; 8 | 9 | /// The default docker image to deploy the node with. 10 | pub const DEFAULT_MPC_DOCKER_IMAGE: &str = "nearone/mpc-node-gcp:testnet-release"; 11 | /// The default parallel signing contract path to test with. 12 | pub const DEFAULT_PARALLEL_SIGN_CONTRACT_PATH: &str = 13 | "../pytest/tests/test_contracts/parallel/res/contract.wasm"; 14 | /// Address of the mpc contract on testnet 15 | pub const TESTNET_CONTRACT_ACCOUNT_ID: &str = "v1.signer-prod.testnet"; 16 | -------------------------------------------------------------------------------- /devnet/src/devnet.rs: -------------------------------------------------------------------------------- 1 | use crate::account::OperatingAccounts; 2 | use crate::rpc::NearRpcClients; 3 | use crate::types::{DevnetSetupRepository, LoadtestSetup, MpcNetworkSetup}; 4 | use near_jsonrpc_client::methods; 5 | use std::collections::HashMap; 6 | use std::sync::Arc; 7 | 8 | /// Live state of the setup for the entire devnet. 9 | /// Upon dropping, saves the state to devnet_setup.yaml 10 | pub struct OperatingDevnetSetup { 11 | pub accounts: OperatingAccounts, 12 | pub mpc_setups: HashMap, 13 | pub loadtest_setups: HashMap, 14 | } 15 | 16 | impl OperatingDevnetSetup { 17 | const SETUP_FILENAME: &str = "devnet_setup.yaml"; 18 | 19 | /// Load the setup from disk. 20 | pub async fn load(client: Arc) -> Self { 21 | if !std::fs::exists(Self::SETUP_FILENAME).unwrap() { 22 | std::fs::write( 23 | Self::SETUP_FILENAME, 24 | serde_yaml::to_string(&DevnetSetupRepository::default()).unwrap(), 25 | ) 26 | .unwrap(); 27 | } 28 | let setup_data = std::fs::read_to_string(Self::SETUP_FILENAME).unwrap(); 29 | let setup: DevnetSetupRepository = serde_yaml::from_str(&setup_data).unwrap(); 30 | let recent_block_hash = client 31 | .submit(methods::block::RpcBlockRequest { 32 | block_reference: near_primitives::types::BlockReference::Finality( 33 | near_primitives::types::Finality::Final, 34 | ), 35 | }) 36 | .await 37 | .unwrap() 38 | .header 39 | .hash; 40 | 41 | let accounts = OperatingAccounts::new(setup.accounts, recent_block_hash, client); 42 | Self { 43 | accounts, 44 | mpc_setups: setup.mpc_setups, 45 | loadtest_setups: setup.loadtest_setups, 46 | } 47 | } 48 | } 49 | 50 | impl Drop for OperatingDevnetSetup { 51 | fn drop(&mut self) { 52 | let setup = DevnetSetupRepository { 53 | accounts: self.accounts.to_data(), 54 | mpc_setups: self.mpc_setups.clone(), 55 | loadtest_setups: self.loadtest_setups.clone(), 56 | }; 57 | let setup_data = serde_yaml::to_string(&setup).unwrap(); 58 | if std::fs::exists(OperatingDevnetSetup::SETUP_FILENAME).unwrap() { 59 | // Make a backup, just in case the CLI crashed and saved some invalid middle state. 60 | std::fs::rename( 61 | OperatingDevnetSetup::SETUP_FILENAME, 62 | format!("{}.bak", OperatingDevnetSetup::SETUP_FILENAME), 63 | ) 64 | .unwrap(); 65 | } 66 | std::fs::write(OperatingDevnetSetup::SETUP_FILENAME, setup_data).unwrap(); 67 | } 68 | } 69 | -------------------------------------------------------------------------------- /devnet/src/main.rs: -------------------------------------------------------------------------------- 1 | use clap::Parser; 2 | use cli::Cli; 3 | 4 | mod account; 5 | mod cli; 6 | mod constants; 7 | mod devnet; 8 | mod funding; 9 | mod loadtest; 10 | mod mpc; 11 | mod queries; 12 | mod rpc; 13 | mod terraform; 14 | mod tx; 15 | mod types; 16 | 17 | #[tokio::main] 18 | async fn main() { 19 | Cli::parse().run().await; 20 | } 21 | -------------------------------------------------------------------------------- /devnet/src/queries.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Arc; 2 | 3 | use crate::rpc::NearRpcClients; 4 | use anyhow::anyhow; 5 | use near_jsonrpc_client::methods::query::RpcQueryRequest; 6 | use near_jsonrpc_primitives::types::query::QueryResponseKind; 7 | use near_primitives::{ 8 | types::{BlockReference, Finality}, 9 | views::{ContractCodeView, QueryRequest}, 10 | }; 11 | use near_sdk::AccountId; 12 | 13 | /// fetches the contract code and hash from `target`. 14 | pub async fn get_contract_code( 15 | client: &Arc, 16 | target: AccountId, 17 | ) -> anyhow::Result { 18 | let request = RpcQueryRequest { 19 | block_reference: BlockReference::Finality(Finality::Final), 20 | request: QueryRequest::ViewCode { account_id: target }, 21 | }; 22 | match client.submit(request).await?.kind { 23 | QueryResponseKind::ViewCode(code) => Ok(code), 24 | _ => Err(anyhow!("unexpected response")), 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /devnet/src/rpc.rs: -------------------------------------------------------------------------------- 1 | use crate::types::RpcConfig; 2 | use futures::future::BoxFuture; 3 | use futures::FutureExt; 4 | use near_jsonrpc_client::{methods, JsonRpcClient, MethodCallResult}; 5 | use std::ops::Deref; 6 | use std::sync::Arc; 7 | 8 | /// An aggregation of multiple RPC endpoints, each with its own QPS and concurrency limits. 9 | /// When using this aggregated client, any request will be automatically subject to these limits, 10 | /// and will use as many RPC endpoints as needed to saturate each client's limits. 11 | pub struct NearRpcClients { 12 | rpcs: Vec, 13 | } 14 | 15 | /// A single RPC endpoint with its own QPS and concurrency limits. 16 | struct NearRpcClient { 17 | client: Arc, 18 | /// Rate limiter. The way it works is we can receive a token when we're allowed to send per 19 | /// the rate limit. 20 | receiver: flume::Receiver<()>, 21 | /// Concurrency limiter. In-flight requests have a semaphore permit. 22 | concurrency: Arc, 23 | rate_limit: usize, 24 | } 25 | 26 | impl NearRpcClient { 27 | fn new(config: RpcConfig) -> Self { 28 | let client = JsonRpcClient::connect(config.url); 29 | let concurrency = tokio::sync::Semaphore::new(config.max_concurrency); 30 | let (sender, receiver) = flume::bounded(config.rate_limit); 31 | tokio::spawn(async move { 32 | let mut interval = tokio::time::interval( 33 | std::time::Duration::from_secs(1).div_f64(config.rate_limit as f64), 34 | ); 35 | interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Skip); 36 | loop { 37 | interval.tick().await; 38 | if sender.send_async(()).await.is_err() { 39 | break; 40 | } 41 | } 42 | }); 43 | Self { 44 | client: Arc::new(client), 45 | receiver, 46 | concurrency: Arc::new(concurrency), 47 | rate_limit: config.rate_limit, 48 | } 49 | } 50 | 51 | /// Wait until we're both allowed to send a request and there is enough concurrency remaining. 52 | async fn ready(&self) -> RpcClientPermit { 53 | let concurrency_permit = self.concurrency.clone().acquire_owned().await.unwrap(); 54 | self.receiver.recv_async().await.unwrap(); 55 | RpcClientPermit { 56 | _concurrency_permit: concurrency_permit, 57 | client: self.client.clone(), 58 | } 59 | } 60 | } 61 | 62 | /// Represents that we're allowed to send a request per both rate limit and concurrency control. 63 | pub struct RpcClientPermit { 64 | _concurrency_permit: tokio::sync::OwnedSemaphorePermit, 65 | client: Arc, 66 | } 67 | 68 | impl Deref for RpcClientPermit { 69 | type Target = JsonRpcClient; 70 | 71 | fn deref(&self) -> &Self::Target { 72 | &self.client 73 | } 74 | } 75 | 76 | impl NearRpcClients { 77 | pub async fn new(rpcs: Vec) -> Self { 78 | let rpcs = rpcs.into_iter().map(NearRpcClient::new).collect(); 79 | Self { rpcs } 80 | } 81 | 82 | /// Requests a permit to send a request, subject to rate limit and concurrency control. 83 | /// A request can be immediately sent after this function returns. 84 | pub async fn lease(&self) -> RpcClientPermit { 85 | let (permit, _, _) = 86 | futures::future::select_all(self.rpcs.iter().map(|rpc| rpc.ready().boxed())).await; 87 | permit 88 | } 89 | 90 | pub async fn submit(&self, method: M) -> MethodCallResult 91 | where 92 | M: methods::RpcMethod, 93 | { 94 | let rpc = self.lease().await; 95 | rpc.call(method).await 96 | } 97 | 98 | /// Convenient function to perform a request with retries. Each request is subject to the same 99 | /// limits as lease(). 100 | pub async fn with_retry( 101 | &self, 102 | max_retries: usize, 103 | f: impl for<'a> Fn(&'a JsonRpcClient) -> BoxFuture<'a, anyhow::Result>, 104 | ) -> anyhow::Result { 105 | let mut retries = 0; 106 | loop { 107 | let permit = self.lease().await; 108 | match f(&permit).await { 109 | Ok(result) => return Ok(result), 110 | Err(err) => { 111 | retries += 1; 112 | if retries >= max_retries { 113 | return Err(err); 114 | } 115 | } 116 | } 117 | tokio::time::sleep(std::time::Duration::from_secs(1)).await; 118 | } 119 | } 120 | 121 | /// Total QPS the system can handle. 122 | pub fn total_qps(&self) -> usize { 123 | self.rpcs.iter().map(|rpc| rpc.rate_limit).sum() 124 | } 125 | } 126 | -------------------------------------------------------------------------------- /devnet/src/terraform/describe.rs: -------------------------------------------------------------------------------- 1 | use serde::Deserialize; 2 | 3 | /// Partial JSON schema for `terraform show -json` output. 4 | #[derive(Deserialize)] 5 | pub(super) struct TerraformInfraShowOutput { 6 | pub values: RootValues, 7 | } 8 | 9 | #[derive(Deserialize)] 10 | pub(super) struct RootValues { 11 | pub root_module: RootModule, 12 | } 13 | 14 | #[derive(Deserialize)] 15 | pub(super) struct RootModule { 16 | pub resources: Vec, 17 | } 18 | 19 | #[derive(Deserialize)] 20 | pub(super) struct Resource { 21 | pub address: String, 22 | #[serde(rename = "type")] 23 | pub type_: String, 24 | pub values: ResourceValues, 25 | } 26 | 27 | #[derive(Deserialize)] 28 | #[serde(untagged)] 29 | pub(super) enum ResourceValues { 30 | GoogleComputeInstance(GoogleComputeInstance), 31 | Other(Other), 32 | } 33 | 34 | #[derive(Deserialize, Clone)] 35 | pub(super) struct GoogleComputeInstance { 36 | pub machine_type: String, 37 | pub network_interface: Vec, 38 | pub zone: String, 39 | } 40 | 41 | #[derive(Deserialize, Clone)] 42 | pub(super) struct NetworkInterface { 43 | pub access_config: Vec, 44 | } 45 | 46 | #[derive(Deserialize, Clone)] 47 | pub(super) struct AccessConfig { 48 | pub nat_ip: String, 49 | } 50 | 51 | #[derive(Deserialize, Clone)] 52 | pub(super) struct Other {} 53 | 54 | impl GoogleComputeInstance { 55 | pub fn nat_ip(&self) -> Option { 56 | self.network_interface 57 | .first() 58 | .and_then(|ni| ni.access_config.first()) 59 | .map(|ac| ac.nat_ip.clone()) 60 | } 61 | } 62 | 63 | impl Resource { 64 | pub fn as_mpc_nomad_client(&self) -> Option<(usize, GoogleComputeInstance)> { 65 | let name_start = "google_compute_instance.nomad_client_mpc["; 66 | if self.type_ == "google_compute_instance" && self.address.starts_with(name_start) { 67 | let index: usize = self.address[name_start.len()..self.address.len() - 1] 68 | .parse() 69 | .unwrap(); 70 | if let ResourceValues::GoogleComputeInstance(instance) = &self.values { 71 | Some((index, instance.clone())) 72 | } else { 73 | None 74 | } 75 | } else { 76 | None 77 | } 78 | } 79 | 80 | pub fn as_mpc_nomad_server(&self) -> Option { 81 | let name = "google_compute_instance.nomad_server"; 82 | if self.type_ == "google_compute_instance" && self.address == name { 83 | if let ResourceValues::GoogleComputeInstance(instance) = &self.values { 84 | Some(instance.clone()) 85 | } else { 86 | None 87 | } 88 | } else { 89 | None 90 | } 91 | } 92 | } 93 | -------------------------------------------------------------------------------- /devnet/src/tx.rs: -------------------------------------------------------------------------------- 1 | use near_jsonrpc_client::methods::tx::RpcTransactionResponse; 2 | use std::fmt::Debug; 3 | 4 | pub trait IntoReturnValueExt { 5 | /// Converts the RPC call result to a return value, or error if the result is anything else. 6 | fn into_return_value(self) -> anyhow::Result>; 7 | } 8 | 9 | impl IntoReturnValueExt for Result { 10 | fn into_return_value(self) -> anyhow::Result> { 11 | match self { 12 | Ok(tx_response) => { 13 | let Some(outcome) = tx_response.final_execution_outcome else { 14 | return Err(anyhow::anyhow!("Final execution outcome not found")); 15 | }; 16 | let outcome = outcome.into_outcome(); 17 | match outcome.status { 18 | near_primitives::views::FinalExecutionStatus::Failure(tx_execution_error) => { 19 | Err(anyhow::anyhow!( 20 | "Transaction failed: {:?}", 21 | tx_execution_error 22 | )) 23 | } 24 | near_primitives::views::FinalExecutionStatus::SuccessValue(value) => Ok(value), 25 | _ => Err(anyhow::anyhow!("Transaction failed: {:?}", outcome.status)), 26 | } 27 | } 28 | Err(e) => Err(anyhow::anyhow!("Transaction failed: {:?}", e)), 29 | } 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /infra/.gitignore: -------------------------------------------------------------------------------- 1 | # Local .terraform directories 2 | **/.terraform/* 3 | .terraform* 4 | # .tfstate files 5 | *.tfstate 6 | *.tfstate.* 7 | 8 | # Crash log files 9 | crash.log 10 | crash.*.log 11 | 12 | # Exclude all .tfvars files, which are likely to contain sensitive data, such as 13 | # password, private keys, and other secrets. These should not be part of version 14 | # control as they are data points which are potentially sensitive and subject 15 | # to change depending on the environment. 16 | *.tfvars 17 | *.tfvars.json 18 | !terraform-dev*.tfvars 19 | !backend-config-*.tfvars 20 | !terraform-testnet-example.tfvars 21 | !backend.tfvars 22 | !terraform-mainnet-example.tfvars 23 | secrets.txt 24 | 25 | # Ignore override files as they are usually used to override resources locally and so 26 | # are not checked in 27 | override.tf 28 | override.tf.json 29 | *_override.tf 30 | *_override.tf.json 31 | 32 | # Include override files you do wish to add to version control using negated pattern 33 | # !example_override.tf 34 | 35 | # Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan 36 | # example: *tfplan* 37 | 38 | # Ignore CLI configuration files 39 | .terraformrc 40 | terraform.rc -------------------------------------------------------------------------------- /infra/README.md: -------------------------------------------------------------------------------- 1 | # Multichain Infrastructure Overview 2 | 3 | ## Environments: 4 | - Mainnet (Production) 5 | - Testnet (Production) 6 | 7 | ## Deployment: 8 | 9 | ### Mainnet/Testnet 10 | #### Please keep in mind that this is a live environment, and any changes you make may also effect our ecosystem partners. Ensure your new changes are rigorously tested, and will not break mainnet/Testnet. This deployment is semi-automated. 11 | ### "Break Glass" Deployment of Production environment 12 | #### **This should only be used if the environment is completely broken** 13 | - Deployment steps: 14 | 1. Make sure you have [terraform installed](https://developer.hashicorp.com/terraform/install) on your local machine 15 | 2. Navigate to the `infra` directory, and then the `partner-testnet` directory 16 | 3. Verify the variables in both `resources.tf line 3` and `terraform-testnet-example.tfvars lines 2, 13-14` are up to date 17 | 4. Run the `terraform init` command to initialize the infrastructure 18 | 5. Run `terraform plan --var-file=terraform-testnet.tfvars` and ensure the changes are indeed what you want to change 19 | 6. Run `terraform apply --var-file=terraform-testnet.tfvars`, This will replace the instance templates with new changes, and rebuild the VMs from scratch. 20 | - *Note: This will cause downtime, **MAKE SURE YOU ACTUALLY WANT TO DO THIS AND NOTIFY PARTNERS IN TELEGRAM CHANNEL "NEAR MPC Node Operators" If you don't have access to that telegram channel, you should probably not be doing this*** 21 | 7. Verify that the container has been started by ssh'ing to at least one of the VMs and running `docker ps` 22 | - *Note: use ```gcloud compute ssh multichain-testnet-partner-0``` or similar to ssh into machine, contact SRE if you have IAM issues* 23 | *Note: [Detailed guide](https://docs.google.com/document/d/1trjDL1oP57lHN9ZdhIbSSpxKMWwUmiBUyri4XKlHiHE/edit?usp=sharing) -------------------------------------------------------------------------------- /infra/configs/mpc_cloud_config.yml: -------------------------------------------------------------------------------- 1 | #cloud-config 2 | 3 | # Execute on every boot: mound file systems and start container 4 | runcmd: 5 | - mount -o discard,defaults /dev/sdb ${data_dir} 6 | - systemctl daemon-reload 7 | - systemctl start mpc-node.service 8 | - iptables -A INPUT -p tcp -j ACCEPT 9 | write_files: 10 | # Systemd service descriptor which will start (and restart) container 11 | - path: /etc/systemd/system/mpc-node.service 12 | content: | 13 | [Unit] 14 | Description=Start MPC Node 15 | 16 | [Service] 17 | ExecStartPre=-/usr/bin/docker stop mpc-node && /usr/bin/docker rm mpc-node 18 | ExecStart=/usr/bin/docker run --name mpc-node --net host -v ${data_dir}:/data -e MPC_HOME_DIR="/data" -e RUST_BACKTRACE="full" -e RUST_LOG="mpc=debug,info" -e GCP_PROJECT_ID=${gcp_project_id} -e GCP_KEYSHARE_SECRET_ID=${gcp_keyshare_secret_id} -e GCP_LOCAL_ENCRYPTION_KEY_SECRET_ID=${gcp_local_encryption_key_secret_id} -e GCP_P2P_PRIVATE_KEY_SECRET_ID=${gcp_p2p_private_key_secret_id} -e GCP_ACCOUNT_SK_SECRET_ID=${gcp_account_sk_secret_id} -e MPC_ACCOUNT_ID=${mpc_account_id} -e NEAR_BOOT_NODES=${near_boot_nodes} -e MPC_LOCAL_ADDRESS=${mpc_local_address} -e MPC_CONTRACT_ID=${mpc_contract_id} -e MPC_ENV=${chain_id} ${docker_image} 19 | ExecStop=/usr/bin/docker stop mpc-node 20 | ExecStopPost=/usr/bin/docker rm mpc-node 21 | Restart=on-failure 22 | -------------------------------------------------------------------------------- /infra/modules/instance-from-tpl/main.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | hostname = var.hostname == "" ? "default" : var.hostname 3 | num_instances = length(var.static_ips) == 0 ? var.num_instances : length(var.static_ips) 4 | 5 | # local.static_ips is the same as var.static_ips with a dummy element appended 6 | # at the end of the list to work around "list does not have any elements so cannot 7 | # determine type" error when var.static_ips is empty 8 | static_ips = concat(var.static_ips, ["NOT_AN_IP"]) 9 | 10 | zones = length(var.zones) == 0 ? data.google_compute_zones.available.names : var.zones 11 | 12 | instance_group_count = min( 13 | local.num_instances, 14 | length(local.zones), 15 | ) 16 | } 17 | 18 | ############### 19 | # Data Sources 20 | ############### 21 | 22 | data "google_compute_zones" "available" { 23 | project = var.project_id 24 | region = var.region 25 | status = "UP" 26 | } 27 | 28 | resource "google_compute_instance_from_template" "compute_instance" { 29 | provider = google 30 | count = local.num_instances 31 | name = local.hostname 32 | project = var.project_id 33 | zone = local.zones[count.index % length(local.zones)] 34 | 35 | network_interface { 36 | network = var.network 37 | subnetwork = var.subnetwork 38 | subnetwork_project = var.subnetwork_project 39 | network_ip = length(var.static_ips) == 0 ? "" : element(local.static_ips, count.index) 40 | 41 | dynamic "access_config" { 42 | # convert to map to use lookup function with default value 43 | for_each = lookup({ for k, v in var.access_config : k => v }, count.index, []) 44 | content { 45 | nat_ip = access_config.value.nat_ip 46 | network_tier = access_config.value.network_tier 47 | } 48 | } 49 | 50 | dynamic "ipv6_access_config" { 51 | # convert to map to use lookup function with default value 52 | for_each = lookup({ for k, v in var.ipv6_access_config : k => v }, count.index, []) 53 | content { 54 | network_tier = ipv6_access_config.value.network_tier 55 | } 56 | } 57 | } 58 | 59 | dynamic "network_interface" { 60 | for_each = var.additional_networks 61 | content { 62 | network = network_interface.value.network 63 | subnetwork = network_interface.value.subnetwork 64 | subnetwork_project = network_interface.value.subnetwork_project 65 | network_ip = length(network_interface.value.network_ip) > 0 ? network_interface.value.network_ip : null 66 | dynamic "access_config" { 67 | for_each = network_interface.value.access_config 68 | content { 69 | nat_ip = access_config.value.nat_ip 70 | network_tier = access_config.value.network_tier 71 | } 72 | } 73 | dynamic "ipv6_access_config" { 74 | for_each = network_interface.value.ipv6_access_config 75 | content { 76 | network_tier = ipv6_access_config.value.network_tier 77 | } 78 | } 79 | } 80 | } 81 | 82 | source_instance_template = var.instance_template 83 | } 84 | -------------------------------------------------------------------------------- /infra/modules/instance-from-tpl/outputs.tf: -------------------------------------------------------------------------------- 1 | 2 | output "available_zones" { 3 | description = "List of available zones in region" 4 | value = data.google_compute_zones.available.names 5 | } 6 | 7 | output "self_links" { 8 | value = google_compute_instance_from_template.compute_instance[*].self_link 9 | } 10 | 11 | output "hostname" { 12 | value = local.hostname 13 | } 14 | 15 | output "ip_address" { 16 | value = local.static_ips 17 | } -------------------------------------------------------------------------------- /infra/modules/instance-from-tpl/variables.tf: -------------------------------------------------------------------------------- 1 | variable "project_id" { 2 | type = string 3 | description = "The GCP project ID" 4 | default = null 5 | } 6 | 7 | variable "network" { 8 | description = "Network to deploy to. Only one of network or subnetwork should be specified." 9 | type = string 10 | default = "" 11 | } 12 | 13 | variable "region" { 14 | description = "The GCP region where the unmanaged instance group resides." 15 | type = string 16 | } 17 | 18 | variable "subnetwork" { 19 | description = "Subnet to deploy to. Only one of network or subnetwork should be specified." 20 | type = string 21 | default = "" 22 | } 23 | 24 | variable "subnetwork_project" { 25 | description = "The project that subnetwork belongs to" 26 | type = string 27 | default = "" 28 | } 29 | 30 | variable "additional_networks" { 31 | description = "Additional network interface details for GCE, if any." 32 | default = [] 33 | type = list(object({ 34 | network = string 35 | subnetwork = string 36 | subnetwork_project = string 37 | network_ip = string 38 | access_config = list(object({ 39 | nat_ip = string 40 | network_tier = string 41 | })) 42 | ipv6_access_config = list(object({ 43 | network_tier = string 44 | })) 45 | })) 46 | } 47 | 48 | variable "hostname" { 49 | description = "Hostname of instances" 50 | type = string 51 | default = "" 52 | } 53 | 54 | variable "static_ips" { 55 | type = list(string) 56 | description = "List of static IPs for VM instances" 57 | default = [] 58 | } 59 | 60 | variable "num_instances" { 61 | description = "Number of instances to create. This value is ignored if static_ips is provided." 62 | type = string 63 | default = "1" 64 | } 65 | 66 | variable "named_ports" { 67 | description = "Named name and named port" 68 | type = list(object({ 69 | name = string 70 | port = number 71 | })) 72 | default = [] 73 | } 74 | 75 | variable "instance_template" { 76 | description = "Instance template self_link used to create compute instances" 77 | type = string 78 | } 79 | 80 | variable "access_config" { 81 | description = "Access configurations, i.e. IPs via which the VM instance can be accessed via the Internet." 82 | type = list(list(object({ 83 | nat_ip = string 84 | network_tier = string 85 | }))) 86 | default = [] 87 | } 88 | 89 | variable "ipv6_access_config" { 90 | description = "IPv6 access configurations. Currently a max of 1 IPv6 access configuration is supported. If not specified, the instance will have no external IPv6 Internet access." 91 | type = list(list(object({ 92 | network_tier = string 93 | }))) 94 | default = [] 95 | } 96 | 97 | variable "hostname_suffix_separator" { 98 | type = string 99 | description = "Separator character to compose hostname when add_hostname_suffix is set to true." 100 | default = "-" 101 | } 102 | 103 | variable "zones" { 104 | type = list(string) 105 | description = "(Optional) List of availability zones to create VM instances in" 106 | default = [] 107 | } 108 | -------------------------------------------------------------------------------- /infra/modules/instance-from-tpl/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">=0.13.0" 3 | required_providers { 4 | google = { 5 | source = "hashicorp/google" 6 | version = ">= 4.48, < 6" 7 | } 8 | google-beta = { 9 | source = "hashicorp/google-beta" 10 | version = ">= 4.48, < 6" 11 | } 12 | } 13 | provider_meta "google" { 14 | module_name = "blueprints/terraform/terraform-google-vm:mig/v10.1.1" 15 | } 16 | provider_meta "google-beta" { 17 | module_name = "blueprints/terraform/terraform-google-vm:mig/v10.1.1" 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /infra/modules/mig_template/outputs.tf: -------------------------------------------------------------------------------- 1 | output "self_link_unique" { 2 | description = "Unique self-link of instance template (recommended output to use instead of self_link)" 3 | value = google_compute_instance_template.tpl.self_link_unique 4 | } 5 | 6 | output "self_link" { 7 | description = "Self-link of instance template" 8 | value = google_compute_instance_template.tpl.self_link 9 | } 10 | 11 | output "name" { 12 | description = "Name of instance template" 13 | value = google_compute_instance_template.tpl.name 14 | } 15 | 16 | output "tags" { 17 | description = "Tags that will be associated with instance(s)" 18 | value = google_compute_instance_template.tpl.tags 19 | } 20 | -------------------------------------------------------------------------------- /infra/modules/mig_template/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">=0.13.0" 3 | required_providers { 4 | google = { 5 | source = "hashicorp/google" 6 | version = ">= 4.67, < 6" 7 | } 8 | } 9 | provider_meta "google" { 10 | module_name = "blueprints/terraform/terraform-google-vm:instance_template/v10.1.1" 11 | } 12 | } 13 | -------------------------------------------------------------------------------- /infra/partner-mainnet/network.tf: -------------------------------------------------------------------------------- 1 | module "vpc" { 2 | count = var.create_network ? 1 : 0 3 | source = "terraform-google-modules/network/google" 4 | version = "~> 9.0" 5 | 6 | project_id = var.project_id 7 | network_name = var.network 8 | routing_mode = "GLOBAL" 9 | 10 | subnets = [ 11 | { 12 | subnet_name = var.subnetwork 13 | subnet_ip = "10.10.10.0/24" 14 | subnet_region = var.region 15 | } 16 | ] 17 | 18 | routes = [ 19 | { 20 | name = "egress-internet" 21 | description = "route through IGW to access internet" 22 | destination_range = "0.0.0.0/0" 23 | tags = "egress-inet" 24 | next_hop_internet = "true" 25 | } 26 | ] 27 | 28 | ingress_rules = [ 29 | { 30 | name = "allow-iap-ssh" 31 | description = "this rule allows you to connect to your VM via SSH without port 22 being public" 32 | source_ranges = ["35.235.240.0/20"] 33 | target_tags = ["allow-ssh"] 34 | allow = [ 35 | { 36 | protocol = "tcp", 37 | ports = ["22"] 38 | } 39 | ] 40 | }, 41 | ] 42 | } 43 | 44 | resource "google_compute_router" "router" { 45 | count = var.create_network ? 1 : 0 46 | name = "default" 47 | network = var.network 48 | project = var.project_id 49 | region = var.region 50 | } 51 | 52 | resource "google_compute_router_nat" "nat" { 53 | count = var.create_network ? 1 : 0 54 | name = "nat" 55 | router = google_compute_router.router[count.index].name 56 | region = var.region 57 | nat_ip_allocate_option = "AUTO_ONLY" 58 | source_subnetwork_ip_ranges_to_nat = "ALL_SUBNETWORKS_ALL_IP_RANGES" 59 | } -------------------------------------------------------------------------------- /infra/partner-mainnet/outputs.tf: -------------------------------------------------------------------------------- 1 | output "node_public_ip" { 2 | value = google_compute_address.external_ips[*].address 3 | } 4 | -------------------------------------------------------------------------------- /infra/partner-mainnet/resources.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | backend "gcs" { 3 | bucket = "multichain-terraform-{your_entity_name}" 4 | prefix = "state/mainnet" 5 | } 6 | 7 | required_providers { 8 | google = { 9 | source = "hashicorp/google" 10 | version = "4.73.0" 11 | } 12 | } 13 | } -------------------------------------------------------------------------------- /infra/partner-mainnet/terraform-mainnet-example.tfvars: -------------------------------------------------------------------------------- 1 | env = "mainnet" 2 | project_id = "" 3 | network = "default" 4 | subnetwork = "default" 5 | image = "docker.io/nearone/mpc-node-gcp:mainnet-release" 6 | region = "europe-west1" 7 | zone = "europe-west1-b" 8 | near_boot_nodes = "ed25519:46peZ8rcRucjVPSSjfYEzUbSpqM8VEvCFsVwVFozRK6Q@65.108.96.254:24567,ed25519:DHowcbPk8DXYbefJgy7p74rqAn1wpJweeYuWmch6DEYd@65.109.70.223:24567,ed25519:2q7pyVVqW2vqYW6KPnoVu1HaPFaTkN2mnD9o76XAy88c@198.244.165.131:24567,ed25519:bZy5XwAekxbeth8btCoNaFnRZhZA6wZY8Q6NwqAmWv6@148.113.8.54:24567,ed25519:D7KoxvdbaiGFsx6UERAqEzeS5xYNRjoPaKrM54B8JdP6@194.182.189.12:24567,ed25519:3QT2JpTNBDREUm7ez2xCU1YKEHGqm9osAbg9hCPix3Ua@54.173.255.47:24567,ed25519:B5cpwy1LX9J6HUZiyeie9FZb1m8Qw8Q9JruFrYvMj3v1@103.50.32.50:22710,ed25519:6oSQBoaLLR2ttvqbPAEVT5TKfwYUyi3qYu7CXA5B5ERV@162.55.25.119:24567,ed25519:HZfT8ypFWQLb3YT4eyDLGim8mmFyUzxHkbCogmwWX2mE@142.132.192.24:24567,ed25519:HApx4szhWVtca1KL82hLmLXtAXrSWVVQkQJcT2BBVTnD@65.21.65.102:24567,ed25519:8DMLbdvVUVaL5KikT3Lqf1tXFW7iBF2yDFtKsc79JUow@34.135.231.166:24567,ed25519:BKgRXmMYuWXwTzS9izTh1g7NYL5q2A8azywfz9i8eUg9@65.109.69.56:24567" 9 | # These will be specific to your node 10 | node_configs = [ 11 | { 12 | # Each node has a unique account ID 13 | account = "{your_near_account_id}" 14 | domain = "{your_domain_name}" 15 | # These 4 values below should match your secret names in google secrets manager 16 | gcp_local_encryption_key_secret_id = "multichain-local-encryption-key-mainnet-0" 17 | gcp_keyshare_secret_id = "multichain-sk-share-mainnet-0" 18 | gcp_p2p_private_key_secret_id = "multichain-sign-sk-mainnet-0" 19 | gcp_account_sk_secret_id = "multichain-account-sk-mainnet-0" 20 | }, 21 | ] -------------------------------------------------------------------------------- /infra/partner-mainnet/variables.tf: -------------------------------------------------------------------------------- 1 | variable "project_id" { 2 | description = "The project ID to deploy resource into" 3 | type = string 4 | } 5 | 6 | variable "subnetwork" { 7 | description = "The name of the subnetwork to deploy instances into" 8 | type = string 9 | } 10 | 11 | variable "mig_name" { 12 | description = "The desired name to assign to the deployed managed instance group" 13 | type = string 14 | default = "mpc-mig" 15 | } 16 | 17 | variable "image" { 18 | description = "The Docker image to deploy to GCE instances. Note: This is a public image repository used for updating your nodes, please do not change this" 19 | type = string 20 | } 21 | 22 | variable "region" { 23 | description = "The GCP region to deploy instances into" 24 | type = string 25 | } 26 | 27 | variable "zone" { 28 | type = string 29 | } 30 | 31 | variable "network" { 32 | description = "The GCP network" 33 | type = string 34 | } 35 | 36 | variable "additional_metadata" { 37 | type = map(any) 38 | description = "Additional metadata to attach to the instance" 39 | default = { 40 | cos-update-strategy : "update_enabled" 41 | } 42 | } 43 | 44 | variable "service_account" { 45 | type = object({ 46 | email = string, 47 | scopes = list(string) 48 | }) 49 | default = { 50 | email = "" 51 | scopes = ["cloud-platform"] 52 | } 53 | } 54 | 55 | variable "node_configs" { 56 | type = list(object({ 57 | account = string 58 | domain = string 59 | gcp_local_encryption_key_secret_id = string 60 | gcp_keyshare_secret_id = string 61 | gcp_p2p_private_key_secret_id = string 62 | gcp_account_sk_secret_id = string 63 | })) 64 | } 65 | 66 | variable "env" { 67 | type = string 68 | } 69 | 70 | variable "near_boot_nodes" { 71 | type = string 72 | } 73 | 74 | variable "create_network" { 75 | default = false 76 | description = "Do you want to create a new VPC network (true) or use default GCP network (false)?" 77 | } 78 | -------------------------------------------------------------------------------- /infra/partner-testnet/network.tf: -------------------------------------------------------------------------------- 1 | module "vpc" { 2 | count = var.create_network ? 1 : 0 3 | source = "terraform-google-modules/network/google" 4 | version = "~> 9.0" 5 | 6 | project_id = var.project_id 7 | network_name = var.network 8 | routing_mode = "GLOBAL" 9 | 10 | subnets = [ 11 | { 12 | subnet_name = var.subnetwork 13 | subnet_ip = "10.10.10.0/24" 14 | subnet_region = var.region 15 | } 16 | ] 17 | 18 | routes = [ 19 | { 20 | name = "egress-internet" 21 | description = "route through IGW to access internet" 22 | destination_range = "0.0.0.0/0" 23 | tags = "egress-inet" 24 | next_hop_internet = "true" 25 | } 26 | ] 27 | 28 | ingress_rules = [ 29 | { 30 | name = "allow-iap-ssh" 31 | description = "this rule allows you to connect to your VM via SSH without port 22 being public" 32 | source_ranges = ["35.235.240.0/20"] 33 | target_tags = ["allow-ssh"] 34 | allow = [ 35 | { 36 | protocol = "tcp", 37 | ports = ["22"] 38 | } 39 | ] 40 | }, 41 | ] 42 | } 43 | 44 | resource "google_compute_router" "router" { 45 | name = "default" 46 | network = var.network 47 | project = var.project_id 48 | region = var.region 49 | } 50 | 51 | resource "google_compute_router_nat" "nat" { 52 | name = "nat" 53 | router = google_compute_router.router.name 54 | region = var.region 55 | nat_ip_allocate_option = "AUTO_ONLY" 56 | source_subnetwork_ip_ranges_to_nat = "ALL_SUBNETWORKS_ALL_IP_RANGES" 57 | } -------------------------------------------------------------------------------- /infra/partner-testnet/outputs.tf: -------------------------------------------------------------------------------- 1 | output "node_public_ip" { 2 | value = google_compute_address.external_ips[*].address 3 | } 4 | -------------------------------------------------------------------------------- /infra/partner-testnet/resources.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | backend "gcs" { 3 | bucket = "multichain-terraform-{your_entity_name}" 4 | prefix = "state/testnet" 5 | } 6 | 7 | required_providers { 8 | google = { 9 | source = "hashicorp/google" 10 | version = "4.73.0" 11 | } 12 | } 13 | } -------------------------------------------------------------------------------- /infra/partner-testnet/terraform-testnet-example.tfvars: -------------------------------------------------------------------------------- 1 | env = "testnet" 2 | project_id = "" 3 | network = "default" 4 | subnetwork = "default" 5 | image = "docker.io/nearone/mpc-node-gcp:testnet-release" 6 | region = "europe-west1" 7 | zone = "europe-west1-b" 8 | near_boot_nodes = "ed25519:EPH7y1nFPbtDqA3yNqpCX11JbLYT1dNbyD5axMM34fiZ@91.237.141.25:24567,ed25519:8UKENS6qMEr9ErfMyJEiDrsvrTbP9LkFcZeusy261Z5q@66.23.239.58:24568,ed25519:E319a9GQ3VmnQsNjtzDQ2XggkddnYiThmf1RUFQVQoZD@135.181.59.45:24568,ed25519:CZnYNFkwVc7puGKeVczEET3F7niQrmta9qBbrrCmQMoV@15.204.102.233:24567,ed25519:8QFAJ4kLg9rTXPkWrtZpTt6HRj4F38zfjQMcMR51QZDR@65.108.142.173:24567,ed25519:6KqNf95KiCriWByjXNJWGVYKc88Ff7vjs1LDGdqktFZD@54.184.146.36:24567,ed25519:7Bq35uKsRvoB8UAyF566LYSazdzdmwuUwAAwheBUy8iA@37.27.98.72:24567" 9 | # These will be specific to your node 10 | node_configs = [ 11 | { 12 | # Each node has a unique account ID 13 | account = "{your_near_account_id}" 14 | domain = "" 15 | # These 4 values below should match your secret names in google secrets manager 16 | gcp_local_encryption_key_secret_id = "multichain-local-encryption-key-testnet-0" 17 | gcp_keyshare_secret_id = "multichain-sk-share-testnet-0" 18 | gcp_p2p_private_key_secret_id = "multichain-sign-sk-testnet-0" 19 | gcp_account_sk_secret_id = "multichain-account-sk-testnet-0" 20 | }, 21 | ] -------------------------------------------------------------------------------- /infra/partner-testnet/variables.tf: -------------------------------------------------------------------------------- 1 | variable "project_id" { 2 | description = "The project ID to deploy resource into" 3 | type = string 4 | } 5 | 6 | variable "network" { 7 | description = "The GCP network" 8 | type = string 9 | } 10 | 11 | variable "subnetwork" { 12 | description = "The name of the subnetwork to deploy instances into" 13 | type = string 14 | } 15 | 16 | variable "image" { 17 | description = "The Docker image to deploy to GCE instances. Note: This is a public image repository used for updating your nodes, please do not change this" 18 | type = string 19 | } 20 | 21 | variable "region" { 22 | description = "The GCP region to deploy instances into" 23 | type = string 24 | } 25 | 26 | variable "zone" { 27 | type = string 28 | } 29 | 30 | variable "near_boot_nodes" { 31 | type = string 32 | } 33 | 34 | variable "service_account" { 35 | type = object({ 36 | email = string, 37 | scopes = list(string) 38 | }) 39 | default = { 40 | email = "" 41 | scopes = ["cloud-platform"] 42 | } 43 | } 44 | 45 | variable "node_configs" { 46 | type = list(object({ 47 | account = string 48 | domain = string 49 | gcp_local_encryption_key_secret_id = string 50 | gcp_keyshare_secret_id = string 51 | gcp_p2p_private_key_secret_id = string 52 | gcp_account_sk_secret_id = string 53 | })) 54 | } 55 | 56 | variable "env" { 57 | type = string 58 | } 59 | 60 | variable "create_network" { 61 | default = false 62 | description = "Do you want to create a new VPC network (true) or use default GCP network (false)?" 63 | } -------------------------------------------------------------------------------- /infra/scripts/generate_keys/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "generate_keys" 3 | version = "0.1.0" 4 | edition = "2021" 5 | license = "MIT" 6 | 7 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 8 | 9 | [dependencies] 10 | hex = "0.4.3" 11 | near-crypto = "0.23" 12 | rand = "0.8" 13 | 14 | [workspace] 15 | -------------------------------------------------------------------------------- /infra/scripts/generate_keys/src/main.rs: -------------------------------------------------------------------------------- 1 | use rand::Rng; 2 | 3 | fn main() { 4 | let sign_sk = near_crypto::SecretKey::from_random(near_crypto::KeyType::ED25519); 5 | let sign_pk = sign_sk.public_key(); 6 | println!("p2p public key sign_pk: {}", sign_pk); 7 | println!("p2p secret key sign_sk: {}", sign_sk); 8 | let near_account_sk = near_crypto::SecretKey::from_random(near_crypto::KeyType::ED25519); 9 | let near_account_pk = near_account_sk.public_key(); 10 | println!("near account public key: {}", near_account_pk); 11 | println!("near account secret key: {}", near_account_sk); 12 | let mut rng = rand::thread_rng(); 13 | let random_bytes: [u8; 16] = rng.gen(); 14 | let hex_string: String = random_bytes.iter().map(|byte| format!("{:02x}", byte)).collect(); 15 | println!("near local encryption key: {}", hex_string.to_uppercase()); 16 | } -------------------------------------------------------------------------------- /infra/scripts/keys/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "mpc-keys" 3 | version = "0.1.0" 4 | edition = "2021" 5 | license = "MIT" 6 | 7 | [lib] 8 | crate-type = ["cdylib", "lib"] 9 | 10 | [dependencies] 11 | borsh = "1.5.0" 12 | hpke = { version = "0.11", features = ["serde_impls", "std"] } 13 | serde = { version = "1", features = ["derive"] } 14 | rand = { version = "0.8" } 15 | 16 | [dev-dependencies] 17 | hex = "*" 18 | -------------------------------------------------------------------------------- /infra/scripts/keys/src/lib.rs: -------------------------------------------------------------------------------- 1 | pub mod hpke; 2 | -------------------------------------------------------------------------------- /infra/scripts/mpc_init.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -exo pipefail 3 | 4 | # Takes disk alias and mount path as parameters 5 | function disk_initial_setup { 6 | # Terraform attaches disks in random (well, not easily determinable) order 7 | # We first need to figure out device name 8 | disk_suffix=$1 9 | path=$2 10 | 11 | # Ensure that we can see attached disk 12 | echo "Looking for disk by id google*-$disk_suffix..." 13 | until L=$(readlink /dev/disk/by-id/google*-$disk_suffix) 14 | do 15 | sleep 1 16 | done 17 | disk=$(realpath /dev/disk/by-id/$L) 18 | 19 | echo Mounting $disk_suffix disk with name $disk to path $path 20 | 21 | # Ensure we can see the attached disk. 22 | echo "Looking for $disk..." 23 | until ls $disk 24 | do 25 | sleep 1 26 | done 27 | 28 | # Format the device, if necessary. 29 | until file -s $(realpath $disk) | cut -d , -f1 | grep ext4 30 | do 31 | mkfs.ext4 -m 0 -E lazy_itable_init=0,lazy_journal_init=0,discard $disk 32 | done 33 | 34 | # Ensure the disk is formatted. 35 | until file -s $(realpath $disk) | cut -d , -f1 | grep ext4 36 | do 37 | echo "Disk not formatted as ext4... exiting!" 38 | exit 1 39 | done 40 | fsck.ext4 -p $disk 41 | 42 | # Create a mount point. 43 | mkdir -p $path 44 | # ... and mount. 45 | mount -o discard,defaults $disk $path 46 | 47 | # this will instantiate UUID variable with disk UUID value 48 | # $ blkid /dev/sdc | cut -d ' ' -f 2 49 | # UUID="2b2e50ed-03f9-4831-8922-58d90f5aaaaa" 50 | eval $(blkid $disk | cut -d ' ' -f 2) 51 | 52 | # Clearing disk from fstab (if present) 53 | sed -i -e "\|$disk|d" -e "\|$UUID|d" /etc/fstab 54 | echo "UUID=$UUID $path ext4 discard,defaults,nofail 0 2" >> /etc/fstab 55 | 56 | resize2fs $disk 57 | } 58 | 59 | ##################################################################### 60 | # Disk initial setup 61 | ##################################################################### 62 | #If already have some of data dirs then initial setup was already done 63 | # TODO: Improve at skip step of disk attach. Do not exit on this step. 64 | MPC_DIR=/home/mpc 65 | if [ -d "$MPC_DIR/data" ]; then 66 | echo "Data directory already exist, there is no need to initial setup, exiting..." 67 | else 68 | disk_initial_setup mpc-partner-*net-* "$MPC_DIR" 69 | mkdir -p $MPC_DIR/data 70 | fi 71 | 72 | -------------------------------------------------------------------------------- /infra/scripts/upload_secrets/do-not-commit-example.txt: -------------------------------------------------------------------------------- 1 | multichain-local-encryption-key-mainnet-0=REPLACE_WITH_KEY 2 | multichain-sk-share-mainnet-0=REPLACE_WITH_KEY 3 | multichain-sign-sk-mainnet-0=REPLACE_WITH_KEY 4 | multichain-account-sk-mainnet-0=REPLACE_WITH_KEY -------------------------------------------------------------------------------- /infra/scripts/upload_secrets/upload_secrets.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Usage: ./upload_secrets.sh -d DEST_PROJECT -f SECRETS_FILE.txt 4 | 5 | # Secret file format: 6 | # multichain-account-sk-mainnet-0=foo 7 | # multichain-account-sk-mainnet-1=bar 8 | # multichain-account-sk-mainnet-2=baz 9 | 10 | # Should be a .txt file ^ 11 | 12 | # DO NOT COMMIT THE TXT FILE TO SOURCE CONTROL, DELETE AFTER SECRETS HAVE BEEN CREATED AND NETWORK IS FUNCTIONAL 13 | 14 | while getopts ":d:f:" opt; do 15 | case $opt in 16 | d) DEST_PROJECT="$OPTARG" 17 | ;; 18 | f) SECRETS_FILE="$OPTARG" 19 | ;; 20 | \?) echo "Invalid option -$OPTARG" >&2 21 | exit 1 22 | ;; 23 | esac 24 | done 25 | 26 | if [ -z "$DEST_PROJECT" ] || [ -z "$SECRETS_FILE" ]; then 27 | echo "Usage: $0 -d DEST_PROJECT -f SECRETS_FILE" 28 | exit 1 29 | fi 30 | 31 | while read -r line || [ -n "$line" ]; do 32 | line=$(echo "$line" | xargs) 33 | 34 | SECRET_NAME=$(echo "$line" | cut -d '=' -f 1) 35 | SECRET_VALUE=$(echo "$line" | cut -d '=' -f 2-) 36 | 37 | echo "Creating secret: $SECRET_NAME in project: $DEST_PROJECT" 38 | printf "%s" "$SECRET_VALUE" | gcloud secrets create "$SECRET_NAME" --data-file=- --project="$DEST_PROJECT" --replication-policy="automatic" 39 | 40 | if [ $? -ne 0 ]; then 41 | echo "Failed to create secret: $SECRET_NAME in project: $DEST_PROJECT" 42 | continue 43 | fi 44 | done < "$SECRETS_FILE" 45 | 46 | echo "Secret creation completed." 47 | 48 | -------------------------------------------------------------------------------- /libs/chain-signatures/.gitignore: -------------------------------------------------------------------------------- 1 | .contract.itest.build.lock 2 | -------------------------------------------------------------------------------- /libs/chain-signatures/Cargo.toml: -------------------------------------------------------------------------------- 1 | [workspace] 2 | members = [ 3 | "contract", 4 | ] 5 | resolver = "2" 6 | 7 | [workspace.metadata.test] 8 | doc = true 9 | 10 | [profile.release] 11 | lto = "fat" 12 | codegen-units = 1 13 | strip = true 14 | panic = "abort" 15 | opt-level="s" 16 | -------------------------------------------------------------------------------- /libs/chain-signatures/contract/.cargo/config.toml: -------------------------------------------------------------------------------- 1 | [profile.release] 2 | codegen-units = 1 3 | # Tell `rustc` to optimize for small code size. 4 | opt-level = "z" 5 | lto = true 6 | debug = false 7 | panic = "abort" 8 | overflow-checks = true 9 | -------------------------------------------------------------------------------- /libs/chain-signatures/contract/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "mpc-contract" 3 | version = "2.1.2-rc.1" 4 | edition = "2021" 5 | license = "MIT" 6 | 7 | [lib] 8 | crate-type = ["cdylib", "lib"] 9 | 10 | [dependencies] 11 | anyhow = "1" 12 | curve25519-dalek = { version = "4.1.3", features = [ 13 | "group", 14 | "serde", 15 | ], default-features = false } 16 | borsh = "1.5.0" 17 | near-sdk = { version = "5.11.0", features = [ 18 | "legacy", 19 | "unit-testing", 20 | "unstable", 21 | ] } 22 | rand = { version = "0.8", optional = true } 23 | serde = { version = "1", features = ["derive"] } 24 | serde_with = { version = "3.12.0" } 25 | serde_json = "1" 26 | schemars = "0.8" 27 | k256 = { version = "0.13.4", features = [ 28 | "sha256", 29 | "ecdsa", 30 | "serde", 31 | "arithmetic", 32 | "expose-field", 33 | ] } 34 | 35 | near-gas = { version = "0.2.5", features = ["serde", "borsh", "schemars"] } 36 | near-account-id = "1" 37 | thiserror = "1" 38 | sha3 = "0.10.8" 39 | hex = "0.4.3" 40 | 41 | [target.'cfg(target_arch = "wasm32")'.dependencies] 42 | getrandom = { version = "0.2.12", features = ["custom"] } 43 | 44 | 45 | [dev-dependencies] 46 | rand = "0.8" 47 | tokio = { version = "1", features = ["full"] } 48 | sha2 = "0.10.8" 49 | reqwest = { version = "0.11", features = ["json"] } 50 | base64 = "0.21" 51 | 52 | # crypto dependencies 53 | ed25519-dalek = { version = "2.1.1", features = ["serde", "digest"] } 54 | ecdsa = { version = "0.16.9", features = ["digest", "hazmat"] } 55 | signature = "2.2.0" 56 | digest = "0.10.7" 57 | 58 | # near dependencies 59 | near-crypto = "0.26.0" 60 | near-workspaces = "0.18" 61 | near-primitives = "=0.29.2" # not entirely clear why this is needed but otherwise tests don't compile 62 | cait-sith = { git = "https://github.com/Near-One/cait-sith", rev = "5e0ce40a16dc3e0889277f66bb2a6400d6ef36a5", features = [ 63 | "k256", 64 | ] } 65 | frost-ed25519 = "2.1.0" 66 | fs2 = "0.4" 67 | rstest = "0.25.0" 68 | 69 | [features] 70 | test-utils = ["rand"] 71 | dev-utils = [] 72 | -------------------------------------------------------------------------------- /libs/chain-signatures/contract/src/config/consts.rs: -------------------------------------------------------------------------------- 1 | // Default for `key_event_timeout_blocks`. 2 | pub const DEFAULT_KEY_EVENT_TIMEOUT_BLOCKS: u64 = 30; 3 | -------------------------------------------------------------------------------- /libs/chain-signatures/contract/src/config/impls.rs: -------------------------------------------------------------------------------- 1 | use crate::legacy_contract_state; 2 | 3 | use super::consts::DEFAULT_KEY_EVENT_TIMEOUT_BLOCKS; 4 | use super::{Config, InitConfig}; 5 | impl Default for Config { 6 | fn default() -> Self { 7 | Config { 8 | key_event_timeout_blocks: DEFAULT_KEY_EVENT_TIMEOUT_BLOCKS, 9 | } 10 | } 11 | } 12 | 13 | impl From> for Config { 14 | fn from(value: Option) -> Self { 15 | match value { 16 | None => Config::default(), 17 | Some(init_config) => Config { 18 | key_event_timeout_blocks: init_config 19 | .key_event_timeout_blocks 20 | .unwrap_or(DEFAULT_KEY_EVENT_TIMEOUT_BLOCKS), 21 | }, 22 | } 23 | } 24 | } 25 | impl From<&legacy_contract_state::ConfigV1> for Config { 26 | fn from(_config: &legacy_contract_state::ConfigV1) -> Self { 27 | Config { 28 | key_event_timeout_blocks: DEFAULT_KEY_EVENT_TIMEOUT_BLOCKS, 29 | } 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /libs/chain-signatures/contract/src/config/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod consts; 2 | mod impls; 3 | use near_sdk::near; 4 | 5 | /// Config for V2 of the contract. 6 | #[near(serializers=[borsh, json])] 7 | #[derive(Clone, Debug, PartialEq, Eq)] 8 | pub struct Config { 9 | /// If a key event attempt has not successfully completed within this many blocks, 10 | /// it is considered failed. 11 | pub key_event_timeout_blocks: u64, 12 | } 13 | 14 | /// Config for initializing V2 of the contract. 15 | #[near(serializers=[borsh, json])] 16 | #[derive(Clone, Debug, PartialEq, Eq)] 17 | pub struct InitConfig { 18 | pub key_event_timeout_blocks: Option, 19 | } 20 | 21 | #[cfg(test)] 22 | mod tests { 23 | use super::*; 24 | use serde_json; 25 | 26 | #[test] 27 | fn test_config_serialization() { 28 | let config = Config { 29 | key_event_timeout_blocks: 2000, 30 | }; 31 | let json = serde_json::to_string(&config).unwrap(); 32 | let deserialized: Config = serde_json::from_str(&json).unwrap(); 33 | assert_eq!(config, deserialized); 34 | } 35 | 36 | #[test] 37 | fn test_init_config_serialization() { 38 | let init_config = InitConfig { 39 | key_event_timeout_blocks: None, 40 | }; 41 | let json = serde_json::to_string(&init_config).unwrap(); 42 | let deserialized: InitConfig = serde_json::from_str(&json).unwrap(); 43 | assert_eq!(init_config, deserialized); 44 | } 45 | 46 | #[test] 47 | fn test_init_config_to_config_conversion() { 48 | let init_config = InitConfig { 49 | key_event_timeout_blocks: None, 50 | }; 51 | let config: Config = Some(init_config).into(); 52 | use consts::DEFAULT_KEY_EVENT_TIMEOUT_BLOCKS; 53 | assert_eq!( 54 | config.key_event_timeout_blocks, 55 | DEFAULT_KEY_EVENT_TIMEOUT_BLOCKS 56 | ); 57 | } 58 | } 59 | -------------------------------------------------------------------------------- /libs/chain-signatures/contract/src/crypto_shared/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod kdf; 2 | pub mod types; 3 | 4 | use k256::{elliptic_curve::sec1::FromEncodedPoint, EncodedPoint}; 5 | pub use kdf::{derive_key_secp256k1, derive_tweak, x_coordinate}; 6 | pub use types::{ 7 | ed25519_types, 8 | k256_types::{self, SerializableScalar}, 9 | SignatureResponse, 10 | }; 11 | 12 | // Our wasm runtime doesn't support good syncronous entropy. 13 | // We could use something VRF + pseudorandom here, but someone would likely shoot themselves in the foot with it. 14 | // Our crypto libraries should definately panic, because they normally expect randomness to be private 15 | #[cfg(target_arch = "wasm32")] 16 | use getrandom::{register_custom_getrandom, Error}; 17 | #[cfg(target_arch = "wasm32")] 18 | pub fn randomness_unsupported(_: &mut [u8]) -> Result<(), Error> { 19 | Err(Error::UNSUPPORTED) 20 | } 21 | #[cfg(target_arch = "wasm32")] 22 | register_custom_getrandom!(randomness_unsupported); 23 | 24 | pub fn near_public_key_to_affine_point(pk: near_sdk::PublicKey) -> k256_types::PublicKey { 25 | // TODO: We should encode the curve type as a generic parameter to the key, 26 | // to enforce this check at compile time. 27 | assert_eq!( 28 | pk.curve_type(), 29 | near_sdk::CurveType::SECP256K1, 30 | "Expected a key on the SECP256K1 curve" 31 | ); 32 | 33 | let mut bytes = pk.into_bytes(); 34 | bytes[0] = 0x04; 35 | let point = EncodedPoint::from_bytes(bytes).unwrap(); 36 | k256_types::PublicKey::from_encoded_point(&point).unwrap() 37 | } 38 | -------------------------------------------------------------------------------- /libs/chain-signatures/contract/src/primitives/code_hash.rs: -------------------------------------------------------------------------------- 1 | use near_sdk::{log, near}; 2 | use std::collections::BTreeMap; 3 | 4 | use super::key_state::AuthenticatedParticipantId; 5 | 6 | /// Hash of a Docker image running in the TEE environment. 7 | #[near(serializers=[borsh, json])] 8 | #[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone)] 9 | pub struct CodeHash([u8; 32]); 10 | 11 | /// Tracks votes to add whitelisted TEE code hashes. Each participant can at any given time vote for 12 | /// a code hash to add. 13 | #[near(serializers=[borsh, json])] 14 | #[derive(Debug, Clone, Default, PartialEq, Eq)] 15 | pub struct CodeHashesVotes { 16 | pub proposal_by_account: BTreeMap, 17 | } 18 | 19 | impl CodeHashesVotes { 20 | /// Casts a vote for the proposal and returns the total number of participants who have voted 21 | /// for the same code hash. If the participant already voted, their previous vote is replaced. 22 | pub fn vote(&mut self, proposal: CodeHash, participant: &AuthenticatedParticipantId) -> u64 { 23 | if self 24 | .proposal_by_account 25 | .insert(participant.clone(), proposal.clone()) 26 | .is_some() 27 | { 28 | log!("removed old vote for signer"); 29 | } 30 | let total = self.count_votes(&proposal); 31 | log!("total votes for proposal: {}", total); 32 | total 33 | } 34 | 35 | /// Counts the total number of participants who have voted for the given code hash. 36 | fn count_votes(&self, proposal: &CodeHash) -> u64 { 37 | self.proposal_by_account 38 | .values() 39 | .filter(|&prop| prop == proposal) 40 | .count() as u64 41 | } 42 | 43 | /// Clears all proposals. 44 | pub fn clear_votes(&mut self) { 45 | self.proposal_by_account.clear(); 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /libs/chain-signatures/contract/src/primitives/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod code_hash; 2 | pub mod domain; 3 | pub mod key_state; 4 | pub mod participants; 5 | pub mod signature; 6 | #[cfg(any(test, feature = "test-utils"))] 7 | pub mod test_utils; 8 | pub mod thresholds; 9 | pub mod votes; 10 | -------------------------------------------------------------------------------- /libs/chain-signatures/contract/src/primitives/test_utils.rs: -------------------------------------------------------------------------------- 1 | use crate::crypto_shared::types::PublicKeyExtended; 2 | use crate::primitives::{ 3 | participants::{ParticipantInfo, Participants}, 4 | thresholds::{Threshold, ThresholdParameters}, 5 | }; 6 | use curve25519_dalek::EdwardsPoint; 7 | use k256::elliptic_curve::Group; 8 | use near_sdk::{AccountId, CurveType, PublicKey}; 9 | use rand::{distributions::Uniform, Rng}; 10 | use std::collections::BTreeMap; 11 | 12 | pub fn bogus_ed25519_public_key_extended() -> PublicKeyExtended { 13 | let rng = rand::thread_rng(); 14 | 15 | let edwards_point = EdwardsPoint::random(rng); 16 | let compressed_edwards_point = edwards_point.compress(); 17 | let near_public_key_compressed = PublicKey::from_parts( 18 | CurveType::ED25519, 19 | compressed_edwards_point.as_bytes().into(), 20 | ) 21 | .unwrap(); 22 | 23 | PublicKeyExtended::Ed25519 { 24 | near_public_key_compressed, 25 | edwards_point, 26 | } 27 | } 28 | 29 | pub fn bogus_ed25519_near_public_key() -> PublicKey { 30 | bogus_ed25519_public_key_extended().into() 31 | } 32 | 33 | #[test] 34 | fn test_random_public_key() { 35 | let pk1 = bogus_ed25519_near_public_key(); 36 | let pk2 = bogus_ed25519_near_public_key(); 37 | assert_ne!(pk1, pk2, "Random keys should be different"); 38 | } 39 | 40 | pub fn gen_account_id() -> AccountId { 41 | let lower_case = Uniform::new_inclusive(b'a', b'z'); 42 | let random_string: String = rand::thread_rng() 43 | .sample_iter(&lower_case) 44 | .take(12) 45 | .map(char::from) 46 | .collect(); 47 | let account_id: String = format!("dummy.account.{}", random_string); 48 | account_id.parse().unwrap() 49 | } 50 | 51 | #[test] 52 | fn test_random_account_id() { 53 | let acc1 = gen_account_id(); 54 | let acc2 = gen_account_id(); 55 | assert_ne!(acc1, acc2, "Random keys should be different"); 56 | } 57 | 58 | pub fn gen_participant(i: usize) -> (AccountId, ParticipantInfo) { 59 | ( 60 | gen_account_id(), 61 | ParticipantInfo { 62 | url: format!("near{}", i), 63 | sign_pk: bogus_ed25519_near_public_key(), 64 | }, 65 | ) 66 | } 67 | 68 | pub fn min_thrershold(n: usize) -> usize { 69 | ((n as f64) * 0.6).ceil() as usize 70 | } 71 | 72 | pub fn gen_accounts_and_info(n: usize) -> BTreeMap { 73 | (0..n).map(gen_participant).collect() 74 | } 75 | 76 | pub fn gen_participants(n: usize) -> Participants { 77 | let mut participants = Participants::new(); 78 | for i in 0..n { 79 | let (account_id, info) = gen_participant(i); 80 | let _ = participants.insert(account_id, info); 81 | } 82 | participants 83 | } 84 | 85 | pub fn gen_seed() -> [u8; 32] { 86 | let mut rng = rand::thread_rng(); 87 | let mut seed = [0u8; 32]; 88 | rng.fill(&mut seed); 89 | seed 90 | } 91 | 92 | pub fn gen_threshold_params(max_n: usize) -> ThresholdParameters { 93 | let n: usize = rand::thread_rng().gen_range(2..max_n + 1); 94 | let k_min = min_thrershold(n); 95 | let k = rand::thread_rng().gen_range(k_min..n + 1); 96 | ThresholdParameters::new(gen_participants(n), Threshold::new(k as u64)).unwrap() 97 | } 98 | -------------------------------------------------------------------------------- /libs/chain-signatures/contract/src/primitives/votes.rs: -------------------------------------------------------------------------------- 1 | use super::thresholds::ThresholdParameters; 2 | use super::{key_state::AuthenticatedAccountId, participants::Participants}; 3 | use near_sdk::{log, near}; 4 | use std::collections::BTreeMap; 5 | 6 | /// Tracks votes for ThresholdParameters (new participants and threshold). 7 | /// Each current participant can maintain one vote. 8 | #[near(serializers=[borsh, json])] 9 | #[derive(Debug, Default, PartialEq)] 10 | #[cfg_attr(feature = "dev-utils", derive(Clone))] 11 | pub struct ThresholdParametersVotes { 12 | proposal_by_account: BTreeMap, 13 | } 14 | 15 | impl ThresholdParametersVotes { 16 | /// return the number of votes for `proposal` casted by members of `participants` 17 | pub fn n_votes(&self, proposal: &ThresholdParameters, participants: &Participants) -> u64 { 18 | self.proposal_by_account 19 | .iter() 20 | .filter(|&(acc, prop)| { 21 | participants 22 | .participants() 23 | .iter() 24 | .any(|(acc_id, _, _)| acc.get() == acc_id) 25 | && prop == proposal 26 | }) 27 | .count() as u64 28 | } 29 | /// Registers a vote by `participant` for `proposal`. 30 | /// Removes any existing votes by `participant`. 31 | /// Returns the number of participants who have voted for the same proposal (including the new 32 | /// vote). 33 | pub fn vote( 34 | &mut self, 35 | proposal: &ThresholdParameters, 36 | participant: AuthenticatedAccountId, 37 | ) -> u64 { 38 | if self 39 | .proposal_by_account 40 | .insert(participant, proposal.clone()) 41 | .is_some() 42 | { 43 | log!("removed one vote for signer"); 44 | } 45 | self.proposal_by_account 46 | .values() 47 | .filter(|&prop| prop == proposal) 48 | .count() as u64 49 | } 50 | } 51 | 52 | #[cfg(test)] 53 | mod tests { 54 | use super::ThresholdParametersVotes; 55 | use crate::primitives::{ 56 | key_state::AuthenticatedAccountId, 57 | participants::Participants, 58 | test_utils::{gen_participant, gen_threshold_params}, 59 | }; 60 | use near_sdk::{test_utils::VMContextBuilder, testing_env}; 61 | 62 | #[test] 63 | fn test_voting_and_removal() { 64 | let mut participants = Participants::default(); 65 | let p0 = gen_participant(0); 66 | participants.insert(p0.0.clone(), p0.1).expect("error"); 67 | let mut ctx = VMContextBuilder::new(); 68 | ctx.signer_account_id(p0.0); 69 | testing_env!(ctx.build()); 70 | let participant = 71 | AuthenticatedAccountId::new(&participants).expect("expected authentication"); 72 | let params = gen_threshold_params(30); 73 | let mut votes = ThresholdParametersVotes::default(); 74 | assert_eq!(votes.vote(¶ms, participant.clone()), 1); 75 | assert_eq!(votes.n_votes(¶ms, &participants), 1); 76 | let params2 = gen_threshold_params(30); 77 | assert_eq!(votes.vote(¶ms2, participant), 1); 78 | assert_eq!(votes.n_votes(¶ms2, &participants), 1); 79 | assert_eq!(votes.n_votes(¶ms, &participants), 0); 80 | 81 | // new participant 82 | let p1 = gen_participant(1); 83 | participants.insert(p1.0.clone(), p1.1).expect("error"); 84 | ctx.signer_account_id(p1.0); 85 | testing_env!(ctx.build()); 86 | let participant = 87 | AuthenticatedAccountId::new(&participants).expect("expected authentication"); 88 | assert_eq!(votes.vote(¶ms, participant.clone()), 1); 89 | assert_eq!(votes.n_votes(¶ms2, &participants), 1); 90 | assert_eq!(votes.vote(¶ms2, participant), 2); 91 | assert_eq!(votes.n_votes(¶ms2, &participants), 2); 92 | assert_eq!(votes.n_votes(¶ms2, params2.participants()), 0); 93 | assert_eq!(votes.n_votes(¶ms, &participants), 0); 94 | } 95 | } 96 | -------------------------------------------------------------------------------- /libs/chain-signatures/contract/src/storage_keys.rs: -------------------------------------------------------------------------------- 1 | use near_sdk::{near, BorshStorageKey}; 2 | 3 | // !!! IMPORTANT !!! 4 | // for backwards compatibility, ensure the order is preserved and only append to this list 5 | // Renaming is OK. 6 | #[near(serializers=[borsh] )] 7 | #[derive(Hash, Clone, Debug, PartialEq, Eq, BorshStorageKey)] 8 | pub enum StorageKey { 9 | _DeprecatedPendingRequests, 10 | /// Proposed updates to the contract code and config. 11 | _DeprecatedProposedUpdatesEntries, 12 | _DeprecatedRequestsByTimestamp, 13 | /// Pending signature requests. 14 | PendingRequestsV2, 15 | ProposedUpdatesEntriesV2, 16 | ProposedUpdatesVotesV2, 17 | } 18 | -------------------------------------------------------------------------------- /libs/chain-signatures/contract/tests/user_views.rs: -------------------------------------------------------------------------------- 1 | pub mod common; 2 | use common::init_env_secp256k1; 3 | use near_sdk::{CurveType, PublicKey}; 4 | use serde_json::json; 5 | use std::str::FromStr; 6 | #[tokio::test] 7 | async fn test_key_version() -> anyhow::Result<()> { 8 | let (_, contract, _, _) = init_env_secp256k1(1).await; 9 | 10 | let version: u32 = contract 11 | .view("latest_key_version") 12 | .args_json(json!({})) 13 | .await 14 | .unwrap() 15 | .json() 16 | .unwrap(); 17 | assert_eq!(version, 0); 18 | Ok(()) 19 | } 20 | 21 | #[tokio::test] 22 | async fn test_public_key() -> anyhow::Result<()> { 23 | let (_, contract, _, _) = init_env_secp256k1(1).await; 24 | 25 | let key: String = contract 26 | .view("public_key") 27 | .args_json(json!({})) 28 | .await 29 | .unwrap() 30 | .json() 31 | .unwrap(); 32 | println!("{:?}", key); 33 | let pk = PublicKey::from_str(&key)?; 34 | assert_eq!(pk.curve_type(), CurveType::SECP256K1); 35 | Ok(()) 36 | } 37 | 38 | #[tokio::test] 39 | async fn test_derived_public_key() -> anyhow::Result<()> { 40 | let (_, contract, _, _) = init_env_secp256k1(1).await; 41 | 42 | let key: String = contract 43 | .view("derived_public_key") 44 | .args_json(json!({ 45 | "path": "test", 46 | "predecessor": "alice.near" 47 | })) 48 | .await 49 | .unwrap() 50 | .json() 51 | .unwrap(); 52 | let pk = PublicKey::from_str(&key)?; 53 | assert_eq!(pk.curve_type(), CurveType::SECP256K1); 54 | Ok(()) 55 | } 56 | -------------------------------------------------------------------------------- /libs/chain-signatures/keys/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "mpc-keys" 3 | version = "0.1.0" 4 | edition = "2021" 5 | license = "MIT" 6 | 7 | [lib] 8 | crate-type = ["cdylib", "lib"] 9 | 10 | [dependencies] 11 | borsh = "1.5.0" 12 | hpke = { version = "0.11", features = ["serde_impls", "std"] } 13 | serde = { version = "1", features = ["derive"] } 14 | rand = { version = "0.8" } 15 | 16 | [dev-dependencies] 17 | hex = "*" 18 | -------------------------------------------------------------------------------- /libs/chain-signatures/keys/src/lib.rs: -------------------------------------------------------------------------------- 1 | pub mod hpke; 2 | -------------------------------------------------------------------------------- /libs/chain-signatures/rust-toolchain.toml: -------------------------------------------------------------------------------- 1 | [toolchain] 2 | channel = "1.85.1" 3 | components = ["rustfmt", "clippy", "rust-analyzer", "rust-src"] 4 | targets = ["wasm32-unknown-unknown"] 5 | -------------------------------------------------------------------------------- /node/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "mpc-node" 3 | version = "2.0.0-rc.1" 4 | edition = "2021" 5 | license = "MIT" 6 | 7 | [dependencies] 8 | actix = "0.13.0" 9 | aes-gcm = "0.10.3" 10 | anyhow = "1.0.92" 11 | async-trait = "0.1.83" 12 | axum = "0.7.9" 13 | backon = { version = "1.5.1", features = ["tokio-sleep"] } 14 | borsh = { version = "1.5.1", features = ["derive"] } 15 | cait-sith = { git = "https://github.com/Near-One/cait-sith", rev = "68d38188468733dd890dabd8d35b8b7a011e71dd", features = [ 16 | "k256", 17 | ] } 18 | clap = { version = "4.5.20", features = ["derive", "env"] } 19 | dstack-sdk = { git = "https://github.com/Dstack-TEE/dstack.git", tag = "v0.5.2" } 20 | flume = "0.11.1" 21 | futures = "0.3.31" 22 | futures-util = "0.3.31" 23 | gcloud-sdk = { version = "0.26.2", default-features = false, features = [ 24 | "google-cloud-secretmanager-v1", 25 | "tls-webpki-roots", 26 | ] } 27 | hex = { version = "0.4.3", features = ["serde"] } 28 | hex-literal = "0.4.1" 29 | hkdf = "0.12.4" 30 | http = "1.3.1" 31 | humantime = "2.1.0" 32 | k256 = "0.13.4" 33 | lazy_static = "1.5.0" 34 | lru = "0.12.5" 35 | near-sdk = "5.2.1" 36 | prometheus = "0.13.4" 37 | rand = "0.8.5" 38 | rand_xorshift = "0.3" 39 | rcgen = "0.13.1" 40 | rocksdb = "0.21.0" 41 | rustls = { version = "0.23.23", default-features = false } 42 | reqwest = { version = "0.12.9", features = ["multipart"] } 43 | serde = { version = "1.0.214", features = ["derive"] } 44 | serde_json = "1.0.132" 45 | serde_yaml = "0.9.34" 46 | sha3 = "0.10.8" 47 | tempfile = "=3.14.0" 48 | time = "0.3.41" 49 | tokio = { version = "1.41.0", features = ["full"] } 50 | tokio-util = "0.7.12" 51 | tokio-rustls = { version = "0.26.1", default-features = false } 52 | tokio-stream = { version = "0.1" } 53 | tracing = "0.1.40" 54 | tracing-subscriber = { version = "0.3.18", features = ["env-filter"] } 55 | url = "2" 56 | x509-parser = "0.16.0" 57 | 58 | curve25519-dalek = "4.1.3" 59 | 60 | near-indexer = { git = "https://github.com/near/nearcore", tag = "2.6.0-rc.1" } 61 | near-indexer-primitives = { git = "https://github.com/near/nearcore", tag = "2.6.0-rc.1" } 62 | near-client = { git = "https://github.com/near/nearcore", tag = "2.6.0-rc.1" } 63 | near-config-utils = { git = "https://github.com/near/nearcore", tag = "2.6.0-rc.1" } 64 | near-crypto = { git = "https://github.com/near/nearcore", tag = "2.6.0-rc.1" } 65 | near-o11y = { git = "https://github.com/near/nearcore", tag = "2.6.0-rc.1" } 66 | near-time = { git = "https://github.com/near/nearcore", tag = "2.6.0-rc.1" } 67 | # todo: update once 1.1.0 is pulbished (though the api did not change) 68 | legacy-mpc-contract = { package = "mpc-contract", git = "https://github.com/Near-One/mpc/", rev = "1d4954dff28e8eb988fb7762eff414a602a2b124" } 69 | mpc-contract = { path = "../libs/chain-signatures/contract/", features = [ 70 | "dev-utils", 71 | ] } 72 | # Pinned to resolve compilation errors with conflicting implementations of PartialOrd trait 73 | # See error E0283 with multiple impls satisfying `u128: std::cmp::PartialOrd<_>` 74 | deranged = "=0.4.0" 75 | 76 | [dev-dependencies] 77 | assert_matches = "1.5.0" 78 | serial_test = "3.2.0" 79 | reqwest = "0.12.9" 80 | itertools = "0.12.1" 81 | mpc-contract = { path = "../libs/chain-signatures/contract/", features = [ 82 | "test-utils", 83 | ] } 84 | -------------------------------------------------------------------------------- /node/src/async_testing.rs: -------------------------------------------------------------------------------- 1 | use futures::FutureExt; 2 | use std::future::Future; 3 | use std::pin::Pin; 4 | use std::task::{Context, Poll}; 5 | 6 | pub enum MaybeReady<'a, T> { 7 | Ready(T), 8 | Future(Pin + 'a>>), 9 | } 10 | 11 | /// Runs the future once, without allowing it to await for anything. 12 | /// Returns either the result, or a future representing the remaining work. 13 | pub fn run_future_once<'a, T>(future: impl Future + 'a) -> MaybeReady<'a, T> { 14 | let mut pinned = Box::pin(future); 15 | match pinned.poll_unpin(&mut Context::from_waker(futures::task::noop_waker_ref())) { 16 | Poll::Ready(output) => MaybeReady::Ready(output), 17 | Poll::Pending => MaybeReady::Future(pinned), 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /node/src/background.rs: -------------------------------------------------------------------------------- 1 | use std::sync::atomic::AtomicUsize; 2 | use std::sync::Arc; 3 | 4 | /// Tracks number of in-flight generations so we don't generate too many at the same time. 5 | pub struct InFlightGenerationTracker { 6 | generations_in_flight: Arc, 7 | } 8 | 9 | impl InFlightGenerationTracker { 10 | pub fn new() -> Self { 11 | Self { 12 | generations_in_flight: Arc::new(AtomicUsize::new(0)), 13 | } 14 | } 15 | 16 | pub fn in_flight(&self, count: usize) -> InFlightGenerations { 17 | InFlightGenerations::new(self.generations_in_flight.clone(), count) 18 | } 19 | 20 | pub fn num_in_flight(&self) -> usize { 21 | self.generations_in_flight 22 | .load(std::sync::atomic::Ordering::Relaxed) 23 | } 24 | 25 | pub fn num_in_flight_atomic(&self) -> Arc { 26 | self.generations_in_flight.clone() 27 | } 28 | } 29 | 30 | /// Drop guard to increment and decrement number of generations in flight. 31 | pub struct InFlightGenerations { 32 | generations_in_flight: Arc, 33 | count: usize, 34 | } 35 | 36 | impl InFlightGenerations { 37 | pub fn new(generations_in_flight: Arc, count: usize) -> Self { 38 | generations_in_flight.fetch_add(count, std::sync::atomic::Ordering::Relaxed); 39 | Self { 40 | generations_in_flight, 41 | count, 42 | } 43 | } 44 | } 45 | 46 | impl Drop for InFlightGenerations { 47 | fn drop(&mut self) { 48 | self.generations_in_flight 49 | .fetch_sub(self.count, std::sync::atomic::Ordering::Relaxed); 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /node/src/indexer/configs.rs: -------------------------------------------------------------------------------- 1 | use crate::config::{IndexerConfig, SyncMode}; 2 | use near_indexer::near_primitives::types::Gas; 3 | 4 | impl IndexerConfig { 5 | pub(crate) fn to_near_indexer_config( 6 | &self, 7 | home_dir: std::path::PathBuf, 8 | ) -> near_indexer::IndexerConfig { 9 | near_indexer::IndexerConfig { 10 | home_dir, 11 | sync_mode: self.sync_mode.clone().into(), 12 | await_for_node_synced: near_indexer::AwaitForNodeSyncedEnum::WaitForFullSync, 13 | finality: self.finality.clone(), 14 | validate_genesis: self.validate_genesis, 15 | } 16 | } 17 | } 18 | 19 | impl From for near_indexer::SyncModeEnum { 20 | fn from(sync_mode: SyncMode) -> Self { 21 | match sync_mode { 22 | SyncMode::Interruption => Self::FromInterruption, 23 | SyncMode::Latest => Self::LatestSynced, 24 | SyncMode::Block(args) => Self::BlockHeight(args.height), 25 | } 26 | } 27 | } 28 | 29 | #[derive(clap::Parser, Debug)] 30 | pub(crate) struct InitConfigArgs { 31 | /// The directory in which to write the config files 32 | #[clap(long)] 33 | pub home_dir: String, 34 | /// chain/network id (localnet, testnet, devnet, betanet) 35 | #[clap(short, long)] 36 | pub chain_id: Option, 37 | /// Account ID for the validator key 38 | #[clap(long)] 39 | pub account_id: Option, 40 | /// Specify private key generated from seed (TESTING ONLY) 41 | #[clap(long)] 42 | pub test_seed: Option, 43 | /// Number of shards to initialize the chain with 44 | #[clap(short, long, default_value = "1")] 45 | pub num_shards: u64, 46 | /// Makes block production fast (TESTING ONLY) 47 | #[clap(short, long)] 48 | pub fast: bool, 49 | /// Genesis file to use when initialize testnet (including downloading) 50 | #[clap(short, long)] 51 | pub genesis: Option, 52 | #[clap(long)] 53 | /// Download the verified NEAR genesis file automatically. 54 | pub download_genesis: bool, 55 | /// Specify a custom download URL for the genesis-file. 56 | #[clap(long)] 57 | pub download_genesis_url: Option, 58 | /// Specify a custom download URL for the records-file. 59 | #[clap(long)] 60 | pub download_records_url: Option, 61 | #[clap(long)] 62 | /// Download the verified NEAR config file automatically. 63 | pub download_config: bool, 64 | /// Specify a custom download URL for the config file. 65 | #[clap(long)] 66 | pub download_config_url: Option, 67 | /// Specify the boot nodes to bootstrap the network 68 | pub boot_nodes: Option, 69 | /// Specify a custom max_gas_burnt_view limit. 70 | #[clap(long)] 71 | pub max_gas_burnt_view: Option, 72 | } 73 | 74 | impl From for near_indexer::InitConfigArgs { 75 | fn from(config_args: InitConfigArgs) -> Self { 76 | Self { 77 | chain_id: config_args.chain_id, 78 | account_id: config_args.account_id, 79 | test_seed: config_args.test_seed, 80 | num_shards: config_args.num_shards, 81 | fast: config_args.fast, 82 | genesis: config_args.genesis, 83 | download_genesis: config_args.download_genesis, 84 | download_genesis_url: config_args.download_genesis_url, 85 | download_records_url: config_args.download_records_url, 86 | download_config: if config_args.download_config { 87 | Some(near_config_utils::DownloadConfigType::RPC) 88 | } else { 89 | None 90 | }, 91 | download_config_url: config_args.download_config_url, 92 | boot_nodes: config_args.boot_nodes, 93 | max_gas_burnt_view: config_args.max_gas_burnt_view, 94 | } 95 | } 96 | } 97 | -------------------------------------------------------------------------------- /node/src/indexer/lib.rs: -------------------------------------------------------------------------------- 1 | use actix::Addr; 2 | use anyhow::bail; 3 | use mpc_contract::state::ProtocolContractState; 4 | use near_client::ClientActor; 5 | use near_client::Status; 6 | use near_indexer_primitives::types; 7 | use near_indexer_primitives::types::AccountId; 8 | use near_indexer_primitives::views::QueryRequest; 9 | use near_indexer_primitives::views::QueryResponseKind::CallResult; 10 | use near_o11y::WithSpanContextExt; 11 | use std::time::Duration; 12 | use tokio::time; 13 | 14 | const INTERVAL: Duration = Duration::from_millis(500); 15 | 16 | pub(crate) async fn wait_for_full_sync(client: &Addr) { 17 | loop { 18 | time::sleep(INTERVAL).await; 19 | 20 | let Ok(Ok(status)) = client 21 | .send( 22 | Status { 23 | is_health_check: false, 24 | detailed: false, 25 | } 26 | .with_span_context(), 27 | ) 28 | .await 29 | else { 30 | continue; 31 | }; 32 | 33 | if !status.sync_info.syncing { 34 | return; 35 | } 36 | } 37 | } 38 | 39 | pub(crate) async fn get_mpc_contract_state( 40 | mpc_contract_id: AccountId, 41 | client: &actix::Addr, 42 | ) -> anyhow::Result<(u64, ProtocolContractState)> { 43 | let request = QueryRequest::CallFunction { 44 | account_id: mpc_contract_id, 45 | method_name: "state".to_string(), 46 | args: vec![].into(), 47 | }; 48 | let query = near_client::Query { 49 | block_reference: types::BlockReference::Finality(types::Finality::Final), 50 | request, 51 | }; 52 | let response = client.send(query.with_span_context()).await??; 53 | match response.kind { 54 | CallResult(result) => Ok(( 55 | response.block_height, 56 | serde_json::from_slice(&result.result)?, 57 | )), 58 | _ => { 59 | bail!("got unexpected response querying mpc contract state") 60 | } 61 | } 62 | } 63 | -------------------------------------------------------------------------------- /node/src/indexer/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod configs; 2 | #[cfg(test)] 3 | pub mod fake; 4 | pub mod handler; 5 | pub mod lib; 6 | pub mod participants; 7 | pub mod real; 8 | pub mod stats; 9 | pub mod tx_sender; 10 | pub mod tx_signer; 11 | pub mod types; 12 | 13 | use handler::ChainBlockUpdate; 14 | use near_indexer_primitives::types::AccountId; 15 | use participants::ContractState; 16 | use std::sync::Arc; 17 | use tokio::sync::{mpsc, watch}; 18 | use types::ChainSendTransactionRequest; 19 | 20 | pub(crate) struct IndexerState { 21 | /// For querying blockchain state. 22 | view_client: actix::Addr, 23 | /// For querying blockchain sync status. 24 | client: actix::Addr, 25 | /// For sending txs to the chain. 26 | tx_processor: actix::Addr, 27 | /// AccountId for the mpc contract. 28 | mpc_contract_id: AccountId, 29 | } 30 | 31 | impl IndexerState { 32 | pub fn new( 33 | view_client: actix::Addr, 34 | client: actix::Addr, 35 | tx_processor: actix::Addr, 36 | mpc_contract_id: AccountId, 37 | ) -> Self { 38 | Self { 39 | view_client, 40 | client, 41 | tx_processor, 42 | mpc_contract_id, 43 | } 44 | } 45 | } 46 | 47 | /// API to interact with the indexer. Can be replaced by a dummy implementation. 48 | /// The MPC node implementation needs this and only this to be able to interact 49 | /// with the indexer. 50 | /// TODO(#155): This would be the interface to abstract away having an indexer 51 | /// running in a separate process. 52 | pub struct IndexerAPI { 53 | /// Provides the current contract state as well as updates to it. 54 | pub contract_state_receiver: watch::Receiver, 55 | /// Provides block updates (signature requests and other relevant receipts). 56 | /// It is in a mutex, because the logical "owner" of this receiver can 57 | /// change over time (specifically, when we transition from the Running 58 | /// state to a Resharing state to the Running state again, two different 59 | /// tasks would successively "own" the receiver). 60 | /// We do not want to re-create the channel, because while resharing is 61 | /// happening we want to buffer the signature requests. 62 | pub block_update_receiver: Arc>>, 63 | /// Sender to request transactions be signed (by a TransactionSigner that 64 | /// the indexer is initialized with) and sent to the chain. 65 | pub txn_sender: mpsc::Sender, 66 | } 67 | -------------------------------------------------------------------------------- /node/src/indexer/real.rs: -------------------------------------------------------------------------------- 1 | use super::handler::listen_blocks; 2 | use super::participants::{monitor_contract_state, ContractState}; 3 | use super::stats::{indexer_logger, IndexerStats}; 4 | use super::tx_sender::handle_txn_requests; 5 | use super::{IndexerAPI, IndexerState}; 6 | use crate::config::{load_respond_config_file, IndexerConfig, RespondConfigFile}; 7 | use mpc_contract::state::ProtocolContractState; 8 | use near_crypto::SecretKey; 9 | use near_sdk::AccountId; 10 | use std::path::PathBuf; 11 | use std::sync::Arc; 12 | use tokio::sync::{mpsc, oneshot, watch, Mutex}; 13 | 14 | /// Spawns a real indexer, returning a handle to the indexer, [`IndexerApi`]. 15 | /// 16 | /// If an unrecoverable error occurs, the spawned indexer will terminate, and the provided [`oneshot::Sender`] 17 | /// will be used to propagate the error. 18 | pub fn spawn_real_indexer( 19 | home_dir: PathBuf, 20 | indexer_config: IndexerConfig, 21 | my_near_account_id: AccountId, 22 | account_secret_key: SecretKey, 23 | protocol_state_sender: watch::Sender, 24 | indexer_exit_sender: oneshot::Sender>, 25 | ) -> IndexerAPI { 26 | let (chain_config_sender, chain_config_receiver) = 27 | tokio::sync::watch::channel::(ContractState::WaitingForSync); 28 | let (block_update_sender, block_update_receiver) = mpsc::unbounded_channel(); 29 | let (chain_txn_sender, chain_txn_receiver) = mpsc::channel(10000); 30 | 31 | // TODO(#156): replace actix with tokio 32 | std::thread::spawn(move || { 33 | actix::System::new().block_on(async { 34 | let indexer = 35 | near_indexer::Indexer::new(indexer_config.to_near_indexer_config(home_dir.clone())) 36 | .expect("Failed to initialize the Indexer"); 37 | let respond_config = match load_respond_config_file(&home_dir) { 38 | Ok(Some(respond_config)) => respond_config, 39 | Ok(None) => { 40 | tracing::warn!("No respond.yaml provided. Using the node's main account to send respond transactions."); 41 | RespondConfigFile { 42 | account_id: my_near_account_id.clone(), 43 | access_keys: vec![account_secret_key.clone()], 44 | } 45 | } 46 | Err(err) => { 47 | panic!("respond.yaml is provided but failed to parse: {err:?}"); 48 | } 49 | }; 50 | let stream = indexer.streamer(); 51 | let (view_client, client, tx_processor) = indexer.client_actors(); 52 | let indexer_state = Arc::new(IndexerState::new( 53 | view_client, 54 | client, 55 | tx_processor, 56 | indexer_config.mpc_contract_id.clone(), 57 | )); 58 | // TODO: migrate this into IndexerState 59 | let stats: Arc> = Arc::new(Mutex::new(IndexerStats::new())); 60 | actix::spawn(monitor_contract_state( 61 | indexer_state.clone(), 62 | indexer_config.port_override, 63 | chain_config_sender, 64 | protocol_state_sender, 65 | )); 66 | actix::spawn(indexer_logger(Arc::clone(&stats), indexer_state.view_client.clone())); 67 | actix::spawn(handle_txn_requests( 68 | chain_txn_receiver, 69 | my_near_account_id, 70 | account_secret_key.clone(), 71 | respond_config, 72 | indexer_state.clone(), 73 | )); 74 | let indexer_result = listen_blocks( 75 | stream, 76 | indexer_config.concurrency, 77 | Arc::clone(&stats), 78 | indexer_config.mpc_contract_id, 79 | block_update_sender, 80 | ) 81 | .await; 82 | 83 | if indexer_exit_sender.send(indexer_result).is_err() { 84 | tracing::error!("Indexer thread could not send result back to main driver.") 85 | }; 86 | }); 87 | }); 88 | 89 | IndexerAPI { 90 | contract_state_receiver: chain_config_receiver, 91 | block_update_receiver: Arc::new(Mutex::new(block_update_receiver)), 92 | txn_sender: chain_txn_sender, 93 | } 94 | } 95 | -------------------------------------------------------------------------------- /node/src/indexer/stats.rs: -------------------------------------------------------------------------------- 1 | use near_indexer_primitives::types; 2 | use near_o11y::WithSpanContextExt; 3 | use std::sync::Arc; 4 | use tokio::sync::Mutex; 5 | 6 | #[derive(Debug, Clone)] 7 | pub(crate) struct IndexerStats { 8 | pub block_heights_processing: std::collections::BTreeSet, 9 | pub blocks_processed_count: u64, 10 | pub last_processed_block_height: u64, 11 | } 12 | 13 | impl IndexerStats { 14 | pub fn new() -> Self { 15 | Self { 16 | block_heights_processing: std::collections::BTreeSet::new(), 17 | blocks_processed_count: 0, 18 | last_processed_block_height: 0, 19 | } 20 | } 21 | } 22 | 23 | pub(crate) async fn indexer_logger( 24 | stats: Arc>, 25 | view_client: actix::Addr, 26 | ) { 27 | let interval_secs = 10; 28 | let mut prev_blocks_processed_count: u64 = 0; 29 | 30 | loop { 31 | tokio::time::sleep(std::time::Duration::from_secs(interval_secs)).await; 32 | let stats_lock = stats.lock().await; 33 | let stats_copy = stats_lock.clone(); 34 | drop(stats_lock); 35 | 36 | let block_processing_speed: f64 = ((stats_copy.blocks_processed_count 37 | - prev_blocks_processed_count) as f64) 38 | / (interval_secs as f64); 39 | 40 | let time_to_catch_the_tip_duration = if block_processing_speed > 0.0 { 41 | if let Ok(block_height) = fetch_latest_block(&view_client).await { 42 | let blocks_behind = if block_height > stats_copy.last_processed_block_height { 43 | block_height - stats_copy.last_processed_block_height 44 | } else { 45 | 0 // We're ahead of the chain tip, no catching up needed 46 | }; 47 | 48 | Some(std::time::Duration::from_millis( 49 | ((blocks_behind as f64 / block_processing_speed) * 1000f64) as u64, 50 | )) 51 | } else { 52 | None 53 | } 54 | } else { 55 | None 56 | }; 57 | 58 | tracing::info!( 59 | target: "indexer", 60 | "# {} | Blocks processing: {}| Blocks done: {}. Bps {:.2} b/s {}", 61 | stats_copy.last_processed_block_height, 62 | stats_copy.block_heights_processing.len(), 63 | stats_copy.blocks_processed_count, 64 | block_processing_speed, 65 | if let Some(duration) = time_to_catch_the_tip_duration.filter(|d| d.as_secs() > 0) { 66 | format!( 67 | " | {} to catch up the tip", 68 | humantime::format_duration(duration) 69 | ) 70 | } else { 71 | "".to_string() 72 | } 73 | ); 74 | prev_blocks_processed_count = stats_copy.blocks_processed_count; 75 | } 76 | } 77 | 78 | async fn fetch_latest_block( 79 | client: &actix::Addr, 80 | ) -> anyhow::Result { 81 | let block = client 82 | .send( 83 | near_client::GetBlock(types::BlockReference::Finality(types::Finality::Final)) 84 | .with_span_context(), 85 | ) 86 | .await??; 87 | Ok(block.header.height) 88 | } 89 | -------------------------------------------------------------------------------- /node/src/indexer/tx_signer.rs: -------------------------------------------------------------------------------- 1 | use crate::config::RespondConfigFile; 2 | use crate::indexer::types::ChainSendTransactionRequest; 3 | use near_crypto::{InMemorySigner, PublicKey, SecretKey, Signer}; 4 | use near_indexer::near_primitives::account::AccessKey; 5 | use near_indexer_primitives::near_primitives::transaction::{ 6 | FunctionCallAction, SignedTransaction, Transaction, TransactionV0, 7 | }; 8 | use near_indexer_primitives::types::{AccountId, Gas}; 9 | use near_indexer_primitives::CryptoHash; 10 | use std::sync::Arc; 11 | use std::sync::Mutex; 12 | 13 | pub(crate) struct TransactionSigner { 14 | signer: Signer, 15 | nonce: Mutex, 16 | } 17 | 18 | impl TransactionSigner { 19 | pub(crate) fn from_key(account_id: AccountId, key: SecretKey) -> Self { 20 | TransactionSigner { 21 | signer: InMemorySigner::from_secret_key(account_id, key), 22 | nonce: Mutex::new(0), 23 | } 24 | } 25 | 26 | /// Atomically increments the nonce and returns the previous value 27 | fn make_nonce(&self, last_known_block_height: u64) -> u64 { 28 | let min_nonce = AccessKey::ACCESS_KEY_NONCE_RANGE_MULTIPLIER * last_known_block_height; 29 | let mut nonce = self.nonce.lock().unwrap(); 30 | let new_nonce = std::cmp::max(min_nonce, *nonce + 1); 31 | *nonce = new_nonce; 32 | new_nonce 33 | } 34 | 35 | pub(crate) fn create_and_sign_function_call_tx( 36 | &self, 37 | receiver_id: AccountId, 38 | method_name: String, 39 | args: Vec, 40 | gas: Gas, 41 | block_hash: CryptoHash, 42 | block_height: u64, 43 | ) -> SignedTransaction { 44 | let action = FunctionCallAction { 45 | method_name, 46 | args, 47 | gas, 48 | deposit: 0, 49 | }; 50 | let signer_id = match &self.signer { 51 | Signer::InMemory(InMemorySigner { account_id, .. }) => account_id.clone(), 52 | _ => unreachable!(), 53 | }; 54 | let transaction = Transaction::V0(TransactionV0 { 55 | signer_id, 56 | public_key: self.signer.public_key().clone(), 57 | nonce: self.make_nonce(block_height), 58 | receiver_id, 59 | block_hash, 60 | actions: vec![action.into()], 61 | }); 62 | 63 | let tx_hash = transaction.get_hash_and_size().0; 64 | let signature = self.signer.sign(tx_hash.as_ref()); 65 | 66 | SignedTransaction::new(signature, transaction.clone()) 67 | } 68 | 69 | pub(crate) fn public_key(&self) -> PublicKey { 70 | self.signer.public_key() 71 | } 72 | } 73 | 74 | pub(crate) struct TransactionSigners { 75 | /// Signers that we cycle through for responding to signature requests. 76 | /// These can correspond to arbitrary near accounts. 77 | respond_signers: Vec>, 78 | /// Signer we use for signing vote_pk, vote_reshared, etc., which must 79 | /// correspond to the account that this node runs under. 80 | owner_signer: Arc, 81 | /// next respond signer to use. 82 | next_id: usize, 83 | } 84 | 85 | impl TransactionSigners { 86 | pub fn new( 87 | respond_config: RespondConfigFile, 88 | owner_account_id: AccountId, 89 | owner_secret_key: SecretKey, 90 | ) -> anyhow::Result { 91 | let respond_signers = respond_config 92 | .access_keys 93 | .iter() 94 | .map(|key| { 95 | Arc::new(TransactionSigner::from_key( 96 | respond_config.account_id.clone(), 97 | key.clone(), 98 | )) 99 | }) 100 | .collect::>(); 101 | let owner_signer = Arc::new(TransactionSigner::from_key( 102 | owner_account_id, 103 | owner_secret_key, 104 | )); 105 | anyhow::ensure!( 106 | !respond_signers.is_empty(), 107 | "At least one responding access key must be provided", 108 | ); 109 | Ok(TransactionSigners { 110 | respond_signers, 111 | owner_signer, 112 | next_id: 0, 113 | }) 114 | } 115 | 116 | fn next_respond_signer(&mut self) -> Arc { 117 | let signer = self.respond_signers[self.next_id].clone(); 118 | self.next_id = (self.next_id + 1) % self.respond_signers.len(); 119 | signer 120 | } 121 | 122 | fn owner_signer(&self) -> Arc { 123 | self.owner_signer.clone() 124 | } 125 | 126 | pub fn signer_for(&mut self, req: &ChainSendTransactionRequest) -> Arc { 127 | match req { 128 | ChainSendTransactionRequest::Respond(_) => self.next_respond_signer(), 129 | _ => self.owner_signer(), 130 | } 131 | } 132 | } 133 | -------------------------------------------------------------------------------- /node/src/keyshare/compat.rs: -------------------------------------------------------------------------------- 1 | use super::permanent::LegacyRootKeyshareData; 2 | use super::{Keyshare, KeyshareData}; 3 | use cait_sith::ecdsa::KeygenOutput; 4 | use cait_sith::frost_core::keys::SigningShare; 5 | use cait_sith::frost_secp256k1::VerifyingKey; 6 | use mpc_contract::primitives::domain::DomainId; 7 | use mpc_contract::primitives::key_state::{AttemptId, EpochId, KeyEventId}; 8 | 9 | /// For compatibility while we perform the refactoring. 10 | /// Converts the new format keyshares array to the old format. 11 | pub fn legacy_ecdsa_key_from_keyshares( 12 | keyshares: &[Keyshare], 13 | ) -> anyhow::Result { 14 | if keyshares.len() != 1 { 15 | anyhow::bail!("Expected exactly one keyshare, got {}", keyshares.len()); 16 | } 17 | let keyshare = &keyshares[0]; 18 | if keyshare.key_id.domain_id != DomainId::legacy_ecdsa_id() { 19 | anyhow::bail!( 20 | "Expected keyshare for legacy ECDSA domain, got {:?}", 21 | keyshare.key_id.domain_id 22 | ); 23 | } 24 | let KeyshareData::Secp256k1(secp256k1_data) = &keyshare.data else { 25 | anyhow::bail!( 26 | "Expected keyshare for legacy ECDSA domain, got {:?}", 27 | keyshare.key_id.domain_id 28 | ); 29 | }; 30 | Ok(LegacyRootKeyshareData { 31 | epoch: keyshare.key_id.epoch_id.get(), 32 | private_share: secp256k1_data.private_share.to_scalar(), 33 | public_key: secp256k1_data.public_key.to_element().to_affine(), 34 | }) 35 | } 36 | 37 | impl Keyshare { 38 | /// Converts the legacy keyshare to a keyshare in the new format. 39 | pub fn from_legacy(legacy: &LegacyRootKeyshareData) -> Self { 40 | Self { 41 | key_id: KeyEventId::new( 42 | EpochId::new(legacy.epoch), 43 | DomainId::legacy_ecdsa_id(), 44 | AttemptId::legacy_attempt_id(), 45 | ), 46 | data: KeyshareData::Secp256k1(KeygenOutput { 47 | private_share: SigningShare::new(legacy.private_share), 48 | public_key: VerifyingKey::new(legacy.public_key.into()), 49 | }), 50 | } 51 | } 52 | } 53 | -------------------------------------------------------------------------------- /node/src/keyshare/gcp.rs: -------------------------------------------------------------------------------- 1 | use super::permanent::PermanentKeyStorageBackend; 2 | use anyhow::Context; 3 | use gcloud_sdk::google::cloud::secretmanager::v1::secret_manager_service_client::SecretManagerServiceClient; 4 | use gcloud_sdk::google::cloud::secretmanager::v1::secret_version::State; 5 | use gcloud_sdk::google::cloud::secretmanager::v1::{ 6 | AccessSecretVersionRequest, AddSecretVersionRequest, ListSecretVersionsRequest, 7 | }; 8 | use gcloud_sdk::proto_ext::secretmanager::SecretPayload; 9 | use gcloud_sdk::{GoogleApi, GoogleAuthMiddleware, SecretValue}; 10 | 11 | /// Keyshare storage that loads and stores the key from Google Secret Manager. 12 | pub struct GcpPermanentKeyStorageBackend { 13 | secrets_client: GoogleApi>, 14 | project_id: String, 15 | secret_id: String, 16 | } 17 | 18 | impl GcpPermanentKeyStorageBackend { 19 | pub async fn new(project_id: String, secret_id: String) -> anyhow::Result { 20 | let secrets_client = GoogleApi::from_function( 21 | SecretManagerServiceClient::new, 22 | "https://secretmanager.googleapis.com", 23 | None, 24 | ) 25 | .await 26 | .context("Failed to create SecretManagerServiceClient")?; 27 | 28 | Ok(Self { 29 | secrets_client, 30 | project_id, 31 | secret_id, 32 | }) 33 | } 34 | } 35 | 36 | #[async_trait::async_trait] 37 | impl PermanentKeyStorageBackend for GcpPermanentKeyStorageBackend { 38 | async fn load(&self) -> anyhow::Result>> { 39 | let secret_name = format!("projects/{}/secrets/{}", self.project_id, self.secret_id); 40 | let versions = self 41 | .secrets_client 42 | .get() 43 | .list_secret_versions(ListSecretVersionsRequest { 44 | parent: secret_name, 45 | ..Default::default() 46 | }) 47 | .await 48 | .context("Failed to list secret versions")? 49 | .into_inner(); 50 | 51 | let Some(latest_version) = versions 52 | .versions 53 | .into_iter() 54 | .find(|version| version.state() == State::Enabled) 55 | else { 56 | return Ok(None); 57 | }; 58 | let secret = self 59 | .secrets_client 60 | .get() 61 | .access_secret_version(AccessSecretVersionRequest { 62 | name: latest_version.name, 63 | }) 64 | .await 65 | .context("Failed to access secret version")? 66 | .into_inner() 67 | .payload 68 | .ok_or_else(|| anyhow::anyhow!("Secret version has no payload"))?; 69 | 70 | Ok(Some(secret.data.as_sensitive_bytes().to_vec())) 71 | } 72 | 73 | async fn store(&self, data: &[u8], _identifier: &str) -> anyhow::Result<()> { 74 | let secret_name = format!("projects/{}/secrets/{}", self.project_id, self.secret_id); 75 | self.secrets_client 76 | .get() 77 | .add_secret_version(AddSecretVersionRequest { 78 | parent: secret_name, 79 | payload: Some(SecretPayload { 80 | data: SecretValue::new(data.to_vec()), 81 | ..Default::default() 82 | }), 83 | }) 84 | .await 85 | .context("Failed to create secret version")?; 86 | Ok(()) 87 | } 88 | } 89 | -------------------------------------------------------------------------------- /node/src/keyshare/local.rs: -------------------------------------------------------------------------------- 1 | use super::permanent::PermanentKeyStorageBackend; 2 | use crate::db; 3 | use aes_gcm::{Aes128Gcm, KeyInit}; 4 | use anyhow::Context; 5 | use sha3::digest::generic_array::GenericArray; 6 | use std::path::PathBuf; 7 | use tokio::io::AsyncWriteExt; 8 | 9 | /// Stores the permanent keyshare in a local encrypted file. 10 | pub struct LocalPermanentKeyStorageBackend { 11 | home_dir: PathBuf, 12 | permanent_key_dir: PathBuf, 13 | encryption_key: [u8; 16], 14 | } 15 | 16 | impl LocalPermanentKeyStorageBackend { 17 | pub async fn new(home_dir: PathBuf, key: [u8; 16]) -> anyhow::Result { 18 | let permanent_key_dir = home_dir.join("permanent_keys"); 19 | tokio::fs::create_dir_all(&permanent_key_dir).await?; 20 | Ok(Self { 21 | home_dir, 22 | permanent_key_dir, 23 | encryption_key: key, 24 | }) 25 | } 26 | } 27 | 28 | #[async_trait::async_trait] 29 | impl PermanentKeyStorageBackend for LocalPermanentKeyStorageBackend { 30 | async fn load(&self) -> anyhow::Result>> { 31 | let keyfile = self.home_dir.join("key"); 32 | if !keyfile.exists() { 33 | return Ok(None); 34 | } 35 | let data = tokio::fs::read(&keyfile) 36 | .await 37 | .context("Failed to read key file")?; 38 | let cipher = Aes128Gcm::new(GenericArray::from_slice(&self.encryption_key)); 39 | let decrypted = db::decrypt(&cipher, &data).context("Failed to decrypt key file")?; 40 | Ok(Some(decrypted)) 41 | } 42 | 43 | async fn store(&self, data: &[u8], identifier: &str) -> anyhow::Result<()> { 44 | let cipher = Aes128Gcm::new(GenericArray::from_slice(&self.encryption_key)); 45 | let encrypted = db::encrypt(&cipher, data); 46 | // Write the new permanent keyshare to a separate file, and then create a link to it. 47 | // That way, we don't lose any keys if we somehow mess up. 48 | let keyfile_for_epoch = self.permanent_key_dir.join(identifier); 49 | let mut file = tokio::fs::File::create_new(&keyfile_for_epoch) 50 | .await 51 | .context("Failed to create PermanentKeyshareData file")?; 52 | file.write_all(&encrypted) 53 | .await 54 | .context("Failed to write PermanentKeyshareData file")?; 55 | file.sync_all() 56 | .await 57 | .context("Failed to sync PermanentKeyshareData file")?; 58 | drop(file); 59 | 60 | let keyfile = self.home_dir.join("key"); 61 | tokio::fs::remove_file(&keyfile).await.ok(); 62 | tokio::fs::hard_link(&keyfile_for_epoch, &keyfile) 63 | .await 64 | .context("Failed to link PermanentKeyshareData file")?; 65 | Ok(()) 66 | } 67 | } 68 | 69 | #[cfg(test)] 70 | mod tests { 71 | use crate::keyshare::local::LocalPermanentKeyStorageBackend; 72 | use crate::keyshare::permanent::PermanentKeyStorageBackend; 73 | 74 | #[tokio::test] 75 | async fn test_local_keyshare_storage_backend() { 76 | let dir = tempfile::tempdir().unwrap(); 77 | let encryption_key = [1; 16]; 78 | let storage = 79 | LocalPermanentKeyStorageBackend::new(dir.path().to_path_buf(), encryption_key) 80 | .await 81 | .unwrap(); 82 | assert!(storage.load().await.unwrap().is_none()); 83 | storage.store(b"123", "id1").await.unwrap(); 84 | assert_eq!(storage.load().await.unwrap().unwrap(), b"123"); 85 | storage.store(b"456", "id2").await.unwrap(); 86 | assert_eq!(storage.load().await.unwrap().unwrap(), b"456"); 87 | assert!(std::fs::exists(dir.path().join("permanent_keys/id1")).unwrap()); 88 | } 89 | } 90 | -------------------------------------------------------------------------------- /node/src/keyshare/test_utils.rs: -------------------------------------------------------------------------------- 1 | use super::permanent::PermanentKeyshareData; 2 | use super::{Keyshare, KeyshareData}; 3 | use crate::tests::TestGenerators; 4 | use cait_sith::ecdsa::KeygenOutput; 5 | use mpc_contract::primitives::domain::DomainId; 6 | use mpc_contract::primitives::key_state::{EpochId, KeyEventId, KeyForDomain, Keyset}; 7 | 8 | pub fn generate_dummy_keyshare(epoch_id: u64, domain_id: u64, attempt_id: u64) -> Keyshare { 9 | let key = TestGenerators::new(2, 2) 10 | .make_ecdsa_keygens() 11 | .into_iter() 12 | .next() 13 | .unwrap() 14 | .1; 15 | Keyshare { 16 | key_id: KeyEventId::new( 17 | EpochId::new(epoch_id), 18 | DomainId(domain_id), 19 | serde_json::from_str(&format!("{}", attempt_id)).unwrap(), 20 | ), 21 | data: KeyshareData::Secp256k1(KeygenOutput { 22 | private_share: key.private_share, 23 | public_key: key.public_key, 24 | }), 25 | } 26 | } 27 | 28 | fn permanent_keyshare_from_keyshares( 29 | epoch_id: u64, 30 | keyshares: &[Keyshare], 31 | ) -> PermanentKeyshareData { 32 | PermanentKeyshareData { 33 | epoch_id: EpochId::new(epoch_id), 34 | keyshares: keyshares.to_vec(), 35 | } 36 | } 37 | 38 | fn keyset_from_permanent_keyshare(permanent: &PermanentKeyshareData) -> Keyset { 39 | let keys = permanent 40 | .keyshares 41 | .iter() 42 | .map(|keyshare| KeyForDomain { 43 | domain_id: keyshare.key_id.domain_id, 44 | key: keyshare.public_key().unwrap().try_into().unwrap(), 45 | attempt: keyshare.key_id.attempt_id, 46 | }) 47 | .collect(); 48 | Keyset::new(permanent.epoch_id, keys) 49 | } 50 | 51 | pub struct KeysetBuilder { 52 | epoch_id: u64, 53 | keys: Vec, 54 | } 55 | 56 | impl KeysetBuilder { 57 | pub fn new(epoch_id: u64) -> Self { 58 | Self { 59 | epoch_id, 60 | keys: Vec::new(), 61 | } 62 | } 63 | 64 | pub fn from_keyshares(epoch_id: u64, keyshares: &[Keyshare]) -> Self { 65 | Self { 66 | epoch_id, 67 | keys: keyshares.to_vec(), 68 | } 69 | } 70 | 71 | pub fn keyshares(&self) -> &[Keyshare] { 72 | &self.keys 73 | } 74 | 75 | pub fn add_keyshare(&mut self, keyshare: Keyshare) -> &mut Self { 76 | self.keys.push(keyshare); 77 | self 78 | } 79 | 80 | pub fn keyset(&self) -> Keyset { 81 | keyset_from_permanent_keyshare(&self.permanent_key_data()) 82 | } 83 | 84 | pub fn permanent_key_data(&self) -> PermanentKeyshareData { 85 | permanent_keyshare_from_keyshares(self.epoch_id, &self.keys) 86 | } 87 | 88 | pub fn generated(&self) -> Vec { 89 | self.keyset().domains 90 | } 91 | } 92 | -------------------------------------------------------------------------------- /node/src/main.rs: -------------------------------------------------------------------------------- 1 | use clap::Parser; 2 | use tracing::init_logging; 3 | 4 | mod assets; 5 | #[cfg(test)] 6 | mod async_testing; 7 | mod background; 8 | mod cli; 9 | mod config; 10 | mod coordinator; 11 | mod db; 12 | mod indexer; 13 | mod key_events; 14 | mod keyshare; 15 | mod metrics; 16 | mod mpc_client; 17 | mod network; 18 | mod p2p; 19 | mod primitives; 20 | mod protocol; 21 | mod protocol_version; 22 | mod providers; 23 | mod runtime; 24 | mod sign_request; 25 | pub mod signing; 26 | mod tee; 27 | #[cfg(test)] 28 | mod tests; 29 | mod tracing; 30 | mod tracking; 31 | mod web; 32 | 33 | fn main() -> anyhow::Result<()> { 34 | init_logging(); 35 | futures::executor::block_on(cli::Cli::parse().run()) 36 | } 37 | -------------------------------------------------------------------------------- /node/src/network/computation.rs: -------------------------------------------------------------------------------- 1 | use tracing::info; 2 | 3 | use super::NetworkTaskChannel; 4 | use crate::tracking; 5 | use std::future::Future; 6 | 7 | /// Interface for a computation that is leader-centric: 8 | /// - If any follower's computation returns error, it automatically sends an Abort message to 9 | /// the leader, causing the leader to fail as well. 10 | /// - If the leader's computation returns error, it automatically sends an Abort message to 11 | /// all followers, causing their computation to fail as well. 12 | /// 13 | /// If leader_waits_for_success returns true, then additionally: 14 | /// - Followers who succeed send a Success message to the leader. 15 | /// - The leader will wait for all Success messages before returning. 16 | /// 17 | /// The leader_waits_for_success is for asset generation, where the owner of the asset wants 18 | /// to only mark it as completed when all followers have persisted their share of the asset. 19 | #[async_trait::async_trait] 20 | pub trait MpcLeaderCentricComputation: Sized + 'static { 21 | /// Performs the computation itself, without worrying about failure propagation or 22 | /// waiting for success of followers. 23 | async fn compute(self, channel: &mut NetworkTaskChannel) -> anyhow::Result; 24 | fn leader_waits_for_success(&self) -> bool; 25 | 26 | /// Performs the computation. DO NOT override this function. 27 | fn perform_leader_centric_computation( 28 | self, 29 | mut channel: NetworkTaskChannel, 30 | timeout: std::time::Duration, 31 | ) -> impl Future> + 'static { 32 | info!("Performing leader centric computation."); 33 | let leader_waits_for_success = self.leader_waits_for_success(); 34 | let sender = channel.sender(); 35 | let sender_clone = sender.clone(); 36 | 37 | // We'll wrap the following future in a timeout below. 38 | let fut = async move { 39 | if !sender.is_leader() { 40 | sender.initialize_all_participants_connections().await?; 41 | } 42 | let result = self.compute(&mut channel).await; 43 | let result = match result { 44 | Ok(result) => result, 45 | Err(err) => { 46 | sender.communicate_failure(&err); 47 | return Err(err); 48 | } 49 | }; 50 | if leader_waits_for_success && sender.is_leader() { 51 | if let Err(err) = channel.wait_for_followers_to_succeed().await { 52 | sender.communicate_failure(&err); 53 | return Err(err); 54 | } 55 | } 56 | Ok(result) 57 | }; 58 | 59 | async move { 60 | let sender = sender_clone; 61 | let result = tokio::time::timeout(timeout, fut).await; 62 | let result = match result { 63 | Ok(result) => result, 64 | Err(_) => { 65 | let err = anyhow::anyhow!("Timeout"); 66 | sender.communicate_failure(&err); 67 | return Err(err); 68 | } 69 | }; 70 | if result.is_ok() { 71 | if !sender.is_leader() && leader_waits_for_success { 72 | sender.communicate_success()?; 73 | } 74 | tracking::set_progress("Computation complete"); 75 | } 76 | result 77 | } 78 | } 79 | } 80 | -------------------------------------------------------------------------------- /node/src/network/constants.rs: -------------------------------------------------------------------------------- 1 | /// Maximum length of a single network message. This is a security measure 2 | /// to prevent a malicious node from sending a huge message that would 3 | /// cause OOM errors. 4 | pub const MAX_MESSAGE_LEN: u32 = 100 * 1024 * 1024; 5 | /// Timeout in seconds for reading messages from the network peers. 6 | /// This is to prevent hanging peer connections. 7 | pub const MESSAGE_READ_TIMEOUT_SECS: u64 = 30; 8 | -------------------------------------------------------------------------------- /node/src/network/handshake.rs: -------------------------------------------------------------------------------- 1 | use crate::protocol_version::MPC_PROTOCOL_VERSION; 2 | use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt}; 3 | 4 | /// Arbitrary magic byte to distinguish from older protocol where we didn't send 5 | /// the protocol version at all. 6 | const MAGIC_BYTE: u8 = 0xcc; 7 | 8 | /// Performs a p2p handshake with the other side of the connection; this is done the first thing 9 | /// for each connection. Fails if the handshake result is unexpected. 10 | pub async fn p2p_handshake( 11 | conn: &mut T, 12 | timeout: std::time::Duration, 13 | ) -> anyhow::Result<()> { 14 | tokio::time::timeout(timeout, async move { 15 | let mut handshake_buf = [0u8; 5]; 16 | handshake_buf[0] = MAGIC_BYTE; 17 | handshake_buf[1..].copy_from_slice(&MPC_PROTOCOL_VERSION.to_be_bytes()); 18 | conn.write_all(&handshake_buf).await?; 19 | 20 | let mut other_handshake = [0u8; 5]; 21 | conn.read_exact(&mut other_handshake).await?; 22 | if other_handshake[0] != MAGIC_BYTE { 23 | anyhow::bail!("Invalid magic byte in handshake"); 24 | } 25 | 26 | let other_protocol_version = u32::from_be_bytes(other_handshake[1..].try_into().unwrap()); 27 | if other_protocol_version != MPC_PROTOCOL_VERSION { 28 | anyhow::bail!( 29 | "Incompatible protocol version; we have {}, they have {}", 30 | MPC_PROTOCOL_VERSION, 31 | other_protocol_version 32 | ); 33 | } 34 | anyhow::Ok(()) 35 | }) 36 | .await? 37 | } 38 | 39 | #[cfg(test)] 40 | mod tests { 41 | use super::p2p_handshake; 42 | use crate::network::handshake::MAGIC_BYTE; 43 | use crate::protocol_version::MPC_PROTOCOL_VERSION; 44 | use std::future::Future; 45 | use tokio::io::{AsyncReadExt, AsyncWriteExt}; 46 | 47 | const TIMEOUT: std::time::Duration = std::time::Duration::from_secs(1); 48 | 49 | fn do_handshake(mut a: tokio::io::DuplexStream) -> impl Future> { 50 | let handle = tokio::spawn(async move { p2p_handshake(&mut a, TIMEOUT).await }); 51 | async move { handle.await? } 52 | } 53 | 54 | #[tokio::test] 55 | async fn test_p2p_handshake_same_version() { 56 | let (a, b) = tokio::io::duplex(1024); 57 | let a_result = do_handshake(a); 58 | let b_result = do_handshake(b); 59 | a_result.await.unwrap(); 60 | b_result.await.unwrap(); 61 | } 62 | 63 | #[tokio::test] 64 | async fn test_p2p_handshake_different_version() { 65 | let (a, mut b) = tokio::io::duplex(1024); 66 | let a_result = do_handshake(a); 67 | let mut buf = [0u8; 5]; 68 | b.read_exact(&mut buf).await.unwrap(); 69 | buf[1..].copy_from_slice(&(MPC_PROTOCOL_VERSION + 1).to_be_bytes()); 70 | b.write_all(&buf).await.unwrap(); 71 | let err = a_result.await.unwrap_err(); 72 | assert_eq!( 73 | err.to_string(), 74 | format!( 75 | "Incompatible protocol version; we have {}, they have {}", 76 | MPC_PROTOCOL_VERSION, 77 | MPC_PROTOCOL_VERSION + 1 78 | ) 79 | ); 80 | } 81 | 82 | #[tokio::test] 83 | async fn test_p2p_handshake_invalid_magic_byte() { 84 | let (a, mut b) = tokio::io::duplex(1024); 85 | let a_result = do_handshake(a); 86 | let buf = [0u8; 10]; 87 | b.write_all(&buf).await.unwrap(); 88 | let err = a_result.await.unwrap_err(); 89 | assert_eq!(err.to_string(), "Invalid magic byte in handshake"); 90 | } 91 | 92 | #[tokio::test] 93 | async fn test_p2p_handshake_response_too_short_and_hangs() { 94 | for i in 0..=4 { 95 | let (a, mut b) = tokio::io::duplex(1024); 96 | let a_result = do_handshake(a); 97 | let buf = vec![MAGIC_BYTE; i]; 98 | b.write_all(&buf).await.unwrap(); 99 | let err = a_result.await.unwrap_err(); 100 | assert_eq!(err.to_string(), "deadline has elapsed"); 101 | } 102 | } 103 | } 104 | -------------------------------------------------------------------------------- /node/src/network/indexer_heights.rs: -------------------------------------------------------------------------------- 1 | use crate::primitives::ParticipantId; 2 | use std::collections::HashMap; 3 | use std::sync::atomic::AtomicU64; 4 | 5 | /// Tracks the block height of the indexer of each participant. 6 | pub struct IndexerHeightTracker { 7 | /// Keys are readonly; values are updated atomically. 8 | pub heights: HashMap, 9 | } 10 | 11 | impl IndexerHeightTracker { 12 | pub fn new(participants: &[ParticipantId]) -> Self { 13 | let mut heights = HashMap::new(); 14 | for participant in participants { 15 | heights.insert(*participant, AtomicU64::new(0)); 16 | } 17 | Self { heights } 18 | } 19 | 20 | pub fn set_height(&self, participant: ParticipantId, height: u64) { 21 | let atomic = self.heights.get(&participant).unwrap(); 22 | let current = atomic.load(std::sync::atomic::Ordering::Relaxed); 23 | if height > current { 24 | atomic.store(height, std::sync::atomic::Ordering::Relaxed); 25 | } 26 | } 27 | 28 | pub fn get_heights(&self) -> HashMap { 29 | self.heights 30 | .iter() 31 | .map(|(participant, height)| { 32 | ( 33 | *participant, 34 | height.load(std::sync::atomic::Ordering::Relaxed), 35 | ) 36 | }) 37 | .collect() 38 | } 39 | } 40 | -------------------------------------------------------------------------------- /node/src/network/signing.rs: -------------------------------------------------------------------------------- 1 | use super::MeshNetworkClient; 2 | use crate::primitives::ParticipantId; 3 | use crate::signing::queue::NetworkAPIForSigning; 4 | use std::collections::{HashMap, HashSet}; 5 | 6 | impl NetworkAPIForSigning for MeshNetworkClient { 7 | fn alive_participants(&self) -> HashSet { 8 | self.all_alive_participant_ids().into_iter().collect() 9 | } 10 | 11 | fn indexer_heights(&self) -> HashMap { 12 | self.get_indexer_heights() 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /node/src/protocol_version.rs: -------------------------------------------------------------------------------- 1 | /// This must be incremented every time we introduce an incompatible protocol 2 | /// change. 3 | /// 4 | /// A change is an incompatible protocol change UNLESS a binary compiled with 5 | /// the code before the change is compatible with a binary compiled after the 6 | /// change. 7 | /// 8 | /// A binary A is compatible with binary B if a cluster of nodes where some are 9 | /// running A and some are running B can still function normally, even if 10 | /// neither group have threshold number of nodes. 11 | /// 12 | /// The effect of this protocol version is that nodes with different protocol 13 | /// versions will refuse to connect to each other. That way, when we introduce 14 | /// an incompatible protocol change, we are effectively creating a new network 15 | /// that is separate from the old network, thus requiring only threshold number 16 | /// of nodes to upgrade. 17 | pub const MPC_PROTOCOL_VERSION: u32 = 5; 18 | -------------------------------------------------------------------------------- /node/src/providers/ecdsa/key_generation.rs: -------------------------------------------------------------------------------- 1 | use crate::network::computation::MpcLeaderCentricComputation; 2 | use crate::network::NetworkTaskChannel; 3 | use crate::protocol::run_protocol; 4 | use crate::providers::ecdsa::{EcdsaSignatureProvider, KeygenOutput}; 5 | use cait_sith::protocol::Participant; 6 | 7 | impl EcdsaSignatureProvider { 8 | pub(super) async fn run_key_generation_client_internal( 9 | threshold: usize, 10 | channel: NetworkTaskChannel, 11 | ) -> anyhow::Result { 12 | let key = KeyGenerationComputation { threshold } 13 | .perform_leader_centric_computation( 14 | channel, 15 | // TODO(#195): Move timeout here instead of in Coordinator. 16 | std::time::Duration::from_secs(60), 17 | ) 18 | .await?; 19 | tracing::info!("Ecdsa secp256k1 key generation completed"); 20 | 21 | Ok(key) 22 | } 23 | } 24 | 25 | /// Runs the key generation protocol, returning the key generated. 26 | /// This protocol is identical for the leader and the followers. 27 | pub struct KeyGenerationComputation { 28 | threshold: usize, 29 | } 30 | 31 | #[async_trait::async_trait] 32 | impl MpcLeaderCentricComputation for KeyGenerationComputation { 33 | async fn compute(self, channel: &mut NetworkTaskChannel) -> anyhow::Result { 34 | let cs_participants = channel 35 | .participants() 36 | .iter() 37 | .copied() 38 | .map(Participant::from) 39 | .collect::>(); 40 | let me = channel.my_participant_id(); 41 | let protocol = 42 | cait_sith::ecdsa::dkg_ecdsa::keygen(&cs_participants, me.into(), self.threshold)?; 43 | run_protocol("ecdsa key generation", channel, protocol).await 44 | } 45 | 46 | fn leader_waits_for_success(&self) -> bool { 47 | false 48 | } 49 | } 50 | 51 | #[cfg(test)] 52 | mod tests { 53 | use crate::network::computation::MpcLeaderCentricComputation; 54 | use crate::network::testing::run_test_clients; 55 | use crate::network::{MeshNetworkClient, NetworkTaskChannel}; 56 | use crate::providers::ecdsa::key_generation::KeyGenerationComputation; 57 | use crate::providers::ecdsa::{EcdsaTaskId, KeygenOutput}; 58 | use crate::tests::TestGenerators; 59 | use crate::tracking::testing::start_root_task_with_periodic_dump; 60 | use mpc_contract::primitives::domain::DomainId; 61 | use mpc_contract::primitives::key_state::{AttemptId, EpochId, KeyEventId}; 62 | use std::sync::Arc; 63 | use tokio::sync::mpsc; 64 | 65 | #[tokio::test] 66 | async fn test_key_generation() { 67 | start_root_task_with_periodic_dump(async move { 68 | let results = run_test_clients( 69 | TestGenerators::new(4, 3).participant_ids(), 70 | run_keygen_client, 71 | ) 72 | .await 73 | .unwrap(); 74 | println!("{:?}", results); 75 | }) 76 | .await; 77 | } 78 | 79 | async fn run_keygen_client( 80 | client: Arc, 81 | mut channel_receiver: mpsc::UnboundedReceiver, 82 | ) -> anyhow::Result { 83 | let participant_id = client.my_participant_id(); 84 | let all_participant_ids = client.all_participant_ids(); 85 | // We'll have the first participant be the leader. 86 | let channel = if participant_id == all_participant_ids[0] { 87 | client.new_channel_for_task( 88 | EcdsaTaskId::KeyGeneration { 89 | key_event: KeyEventId::new( 90 | EpochId::new(42), 91 | DomainId::legacy_ecdsa_id(), 92 | AttemptId::legacy_attempt_id(), 93 | ), 94 | }, 95 | client.all_participant_ids(), 96 | )? 97 | } else { 98 | channel_receiver 99 | .recv() 100 | .await 101 | .ok_or_else(|| anyhow::anyhow!("No channel"))? 102 | }; 103 | let key = KeyGenerationComputation { threshold: 3 } 104 | .perform_leader_centric_computation(channel, std::time::Duration::from_secs(60)) 105 | .await?; 106 | 107 | Ok(key) 108 | } 109 | } 110 | -------------------------------------------------------------------------------- /node/src/providers/eddsa/kdf.rs: -------------------------------------------------------------------------------- 1 | //! Key Derivation Function for eddsa keys. 2 | use cait_sith::eddsa::KeygenOutput; 3 | use cait_sith::frost_ed25519::keys::SigningShare; 4 | use cait_sith::frost_ed25519::{Ed25519Group, Group, VerifyingKey}; 5 | use curve25519_dalek::Scalar; 6 | 7 | pub(crate) fn derive_keygen_output(keygen_output: &KeygenOutput, tweak: [u8; 32]) -> KeygenOutput { 8 | let tweak = Scalar::from_bytes_mod_order(tweak); 9 | let private_share = SigningShare::new(keygen_output.private_share.to_scalar() + tweak); 10 | let public_key = VerifyingKey::new( 11 | keygen_output.public_key.to_element() + Ed25519Group::generator() * tweak, 12 | ); 13 | KeygenOutput { 14 | private_share, 15 | public_key, 16 | } 17 | } 18 | 19 | #[cfg(test)] 20 | mod test { 21 | use rand::Rng; 22 | 23 | use super::*; 24 | 25 | #[test] 26 | fn test_private_key_derivation() { 27 | let random_bytes: [u8; 32] = rand::thread_rng().gen(); 28 | 29 | let scalar = Scalar::from_bytes_mod_order(random_bytes); 30 | let private_share = SigningShare::new(scalar); 31 | 32 | let public_key_element = Ed25519Group::generator() * scalar; 33 | let public_key = VerifyingKey::new(public_key_element); 34 | 35 | let keygen_output = KeygenOutput { 36 | private_share, 37 | public_key, 38 | }; 39 | 40 | let tweak: [u8; 32] = rand::thread_rng().gen(); 41 | let derived_keygen_output = derive_keygen_output(&keygen_output, tweak); 42 | 43 | assert_eq!( 44 | derived_keygen_output.public_key.to_element(), 45 | derived_keygen_output.private_share.to_scalar() * Ed25519Group::generator(), 46 | ); 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /node/src/providers/eddsa/key_generation.rs: -------------------------------------------------------------------------------- 1 | use crate::network::computation::MpcLeaderCentricComputation; 2 | use crate::network::NetworkTaskChannel; 3 | use crate::protocol::run_protocol; 4 | use crate::providers::eddsa::EddsaSignatureProvider; 5 | use cait_sith::eddsa::KeygenOutput; 6 | use cait_sith::protocol::Participant; 7 | 8 | impl EddsaSignatureProvider { 9 | pub(super) async fn run_key_generation_client_internal( 10 | threshold: usize, 11 | channel: NetworkTaskChannel, 12 | ) -> anyhow::Result { 13 | let key = KeyGenerationComputation { threshold } 14 | .perform_leader_centric_computation( 15 | channel, 16 | // TODO(#195): Move timeout here instead of in Coordinator. 17 | std::time::Duration::from_secs(60), 18 | ) 19 | .await?; 20 | tracing::info!("Eddsa key generation completed"); 21 | 22 | Ok(key) 23 | } 24 | } 25 | 26 | /// Runs the key generation protocol, returning the key generated. 27 | /// This protocol is identical for the leader and the followers. 28 | pub struct KeyGenerationComputation { 29 | threshold: usize, 30 | } 31 | 32 | #[async_trait::async_trait] 33 | impl MpcLeaderCentricComputation for KeyGenerationComputation { 34 | async fn compute(self, channel: &mut NetworkTaskChannel) -> anyhow::Result { 35 | let cs_participants = channel 36 | .participants() 37 | .iter() 38 | .copied() 39 | .map(Participant::from) 40 | .collect::>(); 41 | let me = channel.my_participant_id(); 42 | let protocol = 43 | cait_sith::eddsa::dkg_ed25519::keygen(&cs_participants, me.into(), self.threshold)?; 44 | run_protocol("eddsa key generation", channel, protocol).await 45 | } 46 | 47 | fn leader_waits_for_success(&self) -> bool { 48 | false 49 | } 50 | } 51 | 52 | #[cfg(test)] 53 | mod tests { 54 | use crate::network::computation::MpcLeaderCentricComputation; 55 | use crate::network::testing::run_test_clients; 56 | use crate::network::{MeshNetworkClient, NetworkTaskChannel}; 57 | use crate::providers::eddsa::key_generation::KeyGenerationComputation; 58 | use crate::providers::eddsa::EddsaTaskId; 59 | use crate::tests::TestGenerators; 60 | use crate::tracking::testing::start_root_task_with_periodic_dump; 61 | use cait_sith::eddsa::KeygenOutput; 62 | use mpc_contract::primitives::domain::DomainId; 63 | use mpc_contract::primitives::key_state::{AttemptId, EpochId, KeyEventId}; 64 | use std::sync::Arc; 65 | use tokio::sync::mpsc; 66 | 67 | #[tokio::test] 68 | async fn eddsa_test_key_generation() { 69 | start_root_task_with_periodic_dump(async move { 70 | let results = run_test_clients( 71 | TestGenerators::new(4, 3).participant_ids(), 72 | run_keygen_client, 73 | ) 74 | .await 75 | .unwrap(); 76 | println!("{:?}", results); 77 | }) 78 | .await; 79 | } 80 | 81 | async fn run_keygen_client( 82 | client: Arc, 83 | mut channel_receiver: mpsc::UnboundedReceiver, 84 | ) -> anyhow::Result { 85 | let participant_id = client.my_participant_id(); 86 | let all_participant_ids = client.all_participant_ids(); 87 | // We'll have the first participant be the leader. 88 | let channel = if participant_id == all_participant_ids[0] { 89 | client.new_channel_for_task( 90 | EddsaTaskId::KeyGeneration { 91 | key_event: KeyEventId::new( 92 | EpochId::new(42), 93 | DomainId::legacy_ecdsa_id(), 94 | AttemptId::legacy_attempt_id(), 95 | ), 96 | }, 97 | client.all_participant_ids(), 98 | )? 99 | } else { 100 | channel_receiver 101 | .recv() 102 | .await 103 | .ok_or_else(|| anyhow::anyhow!("No channel"))? 104 | }; 105 | let key = KeyGenerationComputation { threshold: 3 } 106 | .perform_leader_centric_computation(channel, std::time::Duration::from_secs(60)) 107 | .await?; 108 | 109 | Ok(key) 110 | } 111 | } 112 | -------------------------------------------------------------------------------- /node/src/providers/mod.rs: -------------------------------------------------------------------------------- 1 | //! This module abstracts the Signature Schema, 2 | //! i.e., we might want to use ECDSA over the Secp256k1 curve, EdDSA over Ed25519, or something else. 3 | //! `SignatureProvider` exposes an interface for such add-ons. Alongside it, helper functions 4 | //! (like `RegisterMpcTask`) are exposed, which somewhat guarantees that if the code compiles, 5 | //! you don’t need to add anything more internally for it to work. 6 | //! 7 | //! As a reference, check the existing implementations. 8 | 9 | pub mod ecdsa; 10 | pub mod eddsa; 11 | 12 | use crate::config::ParticipantsConfig; 13 | use crate::network::NetworkTaskChannel; 14 | use crate::primitives::{MpcTaskId, ParticipantId}; 15 | use crate::sign_request::SignatureId; 16 | pub use ecdsa::EcdsaSignatureProvider; 17 | pub use ecdsa::EcdsaTaskId; 18 | use std::str::FromStr; 19 | use std::sync::Arc; 20 | 21 | /// The interface that defines the requirements for a signing schema to be correctly used in the code. 22 | pub trait SignatureProvider { 23 | type PublicKey: PublicKeyConversion; 24 | type SecretShare; 25 | type KeygenOutput; 26 | 27 | type Signature; 28 | 29 | /// Trait bound `Into` serves as a way to see what logic needs to be added, 30 | /// when introducing new `TaskId`. Implementation of the trait should be trivial. 31 | type TaskId: Into; 32 | 33 | /// Generates a signature. 34 | /// The implementation should handle the key derivation function (KDF) if needed. 35 | /// Only the leader should call this function. 36 | async fn make_signature( 37 | self: Arc, 38 | id: SignatureId, 39 | ) -> anyhow::Result<(Self::Signature, Self::PublicKey)>; 40 | 41 | /// Executes the key generation protocol. 42 | /// Returns once key generation is complete or encounters an error. 43 | /// This should only succeed if all participants are online and running this function. 44 | /// 45 | /// Both leaders and followers call this function. 46 | /// 47 | /// It drains `channel_receiver` until the required task is found, meaning these clients must not be run in parallel. 48 | async fn run_key_generation_client( 49 | threshold: usize, 50 | channel: NetworkTaskChannel, 51 | ) -> anyhow::Result; 52 | 53 | /// Executes the key resharing protocol. This can only succeed if all new participants are online. 54 | /// Both leaders and followers call this function. 55 | /// It drains `channel_receiver` until the required task is found, meaning these clients must not be run in parallel. 56 | async fn run_key_resharing_client( 57 | new_threshold: usize, 58 | key_share: Option, 59 | public_key: Self::PublicKey, 60 | old_participants: &ParticipantsConfig, 61 | channel: NetworkTaskChannel, 62 | ) -> anyhow::Result; 63 | 64 | /// Expected to be called in a common loop that handles received channels and redirects them 65 | /// to the respective `SignatureProvider`. 66 | /// This function is called during the "normal MPC run", 67 | /// i.e., it should fail if it receives messages from the `KeyGeneration` or `KeyResharing` stage. 68 | async fn process_channel(self: Arc, channel: NetworkTaskChannel) -> anyhow::Result<()>; 69 | 70 | /// Spawns any auxiliary logic that performs pre-computation (typically meant to optimize signature delay). 71 | async fn spawn_background_tasks(self: Arc) -> anyhow::Result<()>; 72 | } 73 | 74 | /// A resource might be generated with a set of some participants `A`. 75 | /// This trait helps check whether the current set of participants contains `A`. 76 | pub trait HasParticipants { 77 | fn is_subset_of_active_participants(&self, active_participants: &[ParticipantId]) -> bool; 78 | } 79 | 80 | /// Helper functions to convert back and forth public key types 81 | pub trait PublicKeyConversion: Sized { 82 | fn to_near_public_key(&self) -> anyhow::Result; 83 | fn from_near_crypto(public_key: &near_crypto::PublicKey) -> anyhow::Result; 84 | 85 | // Don't implement it 86 | fn from_near_sdk(public_key: &near_sdk::PublicKey) -> anyhow::Result { 87 | let near_crypto = near_crypto::PublicKey::from_str(&String::from(public_key))?; 88 | Self::from_near_crypto(&near_crypto) 89 | } 90 | } 91 | -------------------------------------------------------------------------------- /node/src/runtime.rs: -------------------------------------------------------------------------------- 1 | use std::ops::Deref; 2 | 3 | /// Tokio Runtime cannot be dropped in an asynchronous context (for good reason). 4 | /// However, we need to be able to drop it in two scenarios: 5 | /// - Integration tests, where we want to start up and shut down the CLI 6 | /// multiple times. 7 | /// - When the contract transitions in and out of the Running state (such as 8 | /// for key resharing), we need to tear down the existing tasks (including 9 | /// network) and restart with a new configuration. We need to ensure that 10 | /// all existing tasks have terminated before starting the new configuration. 11 | /// The only way to do that reliably is by dropping the runtime. If we cannot 12 | /// drop the runtime in an async context, we'd have to rely on std::thread, 13 | /// but that itself is difficult to deal with (mostly that we cannot easily 14 | /// abort it and would have to rely on additional notifications). 15 | /// 16 | /// Yes, this is an ugly workaround. But in our use case, the async task that 17 | /// would be dropping a runtime is always on a thread that blocks on that task 18 | /// and that task only. 19 | pub struct AsyncDroppableRuntime(Option); 20 | 21 | impl AsyncDroppableRuntime { 22 | pub fn new(runtime: tokio::runtime::Runtime) -> Self { 23 | Self(Some(runtime)) 24 | } 25 | } 26 | 27 | impl Deref for AsyncDroppableRuntime { 28 | type Target = tokio::runtime::Runtime; 29 | 30 | fn deref(&self) -> &Self::Target { 31 | self.0.as_ref().unwrap() 32 | } 33 | } 34 | 35 | impl Drop for AsyncDroppableRuntime { 36 | fn drop(&mut self) { 37 | if let Some(runtime) = self.0.take() { 38 | std::thread::scope(|s| { 39 | s.spawn(|| drop(runtime)); 40 | }); 41 | } 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /node/src/sign_request.rs: -------------------------------------------------------------------------------- 1 | use crate::db::{DBCol, SecretDB}; 2 | use crate::metrics; 3 | use mpc_contract::primitives::domain::DomainId; 4 | use mpc_contract::primitives::signature::{Payload, Tweak}; 5 | use near_indexer_primitives::CryptoHash; 6 | use serde::{Deserialize, Serialize}; 7 | use std::sync::Arc; 8 | use tokio::sync::broadcast; 9 | 10 | pub type SignatureId = CryptoHash; 11 | 12 | #[derive(Serialize, Deserialize, Debug, Clone)] 13 | pub struct SignatureRequest { 14 | /// The unique ID that identifies the signature, and can also uniquely identify the response. 15 | pub id: SignatureId, 16 | /// The receipt that generated the signature request, which can be used to look up on chain. 17 | pub receipt_id: CryptoHash, 18 | pub payload: Payload, 19 | pub tweak: Tweak, 20 | pub entropy: [u8; 32], 21 | pub timestamp_nanosec: u64, 22 | pub domain: DomainId, 23 | } 24 | 25 | pub struct SignRequestStorage { 26 | db: Arc, 27 | add_sender: broadcast::Sender, 28 | } 29 | 30 | impl SignRequestStorage { 31 | pub fn new(db: Arc) -> anyhow::Result { 32 | let (tx, _) = tokio::sync::broadcast::channel(500); 33 | Ok(Self { db, add_sender: tx }) 34 | } 35 | 36 | /// If given request is already in the database, returns false. 37 | /// Otherwise, inserts the request and returns true. 38 | pub fn add(&self, request: &SignatureRequest) -> bool { 39 | let key = borsh::to_vec(&request.id).unwrap(); 40 | if self 41 | .db 42 | .get(DBCol::SignRequest, &key) 43 | .expect("Unrecoverable error reading from database") 44 | .is_some() 45 | { 46 | return false; 47 | } 48 | let value_ser = serde_json::to_vec(&request).unwrap(); 49 | let mut update = self.db.update(); 50 | update.put(DBCol::SignRequest, &key, &value_ser); 51 | update 52 | .commit() 53 | .expect("Unrecoverable error writing to database"); 54 | let _ = self.add_sender.send(request.id); 55 | true 56 | } 57 | 58 | /// Blocks until a signature request with given id is present, then returns it. 59 | /// This behavior is necessary because a peer might initiate computation for a signature 60 | /// request before our indexer has caught up to the request. We need proof of the request 61 | /// from on-chain in order to participate in the computation. 62 | pub async fn get(&self, id: SignatureId) -> Result { 63 | let key = borsh::to_vec(&id)?; 64 | let mut rx = self.add_sender.subscribe(); 65 | if let Some(request_ser) = self.db.get(DBCol::SignRequest, &key)? { 66 | return Ok(serde_json::from_slice(&request_ser)?); 67 | } 68 | loop { 69 | let added_id = match rx.recv().await { 70 | Ok(added_id) => added_id, 71 | Err(e) => { 72 | metrics::SIGN_REQUEST_CHANNEL_FAILED.inc(); 73 | return Err(anyhow::anyhow!("Error in sign_request channel recv, {}", e)); 74 | } 75 | }; 76 | if added_id == id { 77 | break; 78 | } 79 | } 80 | let request_ser = self.db.get(DBCol::SignRequest, &key)?.unwrap(); 81 | Ok(serde_json::from_slice(&request_ser)?) 82 | } 83 | } 84 | -------------------------------------------------------------------------------- /node/src/signing/metrics.rs: -------------------------------------------------------------------------------- 1 | use lazy_static::lazy_static; 2 | use prometheus::{exponential_buckets, linear_buckets}; 3 | 4 | lazy_static! { 5 | pub static ref MPC_PENDING_SIGNATURES_QUEUE_SIZE: prometheus::IntGauge = 6 | prometheus::register_int_gauge!( 7 | "mpc_pending_signatures_queue_size", 8 | "Number of pending signature requests in the queue" 9 | ) 10 | .unwrap(); 11 | pub static ref MPC_PENDING_SIGNATURES_QUEUE_BLOCKS_INDEXED: prometheus::IntCounter = 12 | prometheus::register_int_counter!( 13 | "mpc_pending_signatures_queue_blocks_indexed", 14 | "Number of blocks indexed by the pending signatures queue" 15 | ) 16 | .unwrap(); 17 | pub static ref MPC_PENDING_SIGNATURES_QUEUE_FINALIZED_BLOCKS_INDEXED: prometheus::IntCounter = 18 | prometheus::register_int_counter!( 19 | "mpc_pending_signatures_queue_finalized_blocks_indexed", 20 | "Number of finalized blocks indexed by the pending signatures queue" 21 | ) 22 | .unwrap(); 23 | pub static ref MPC_PENDING_SIGNATURES_QUEUE_REQUESTS_INDEXED: prometheus::IntCounter = 24 | prometheus::register_int_counter!( 25 | "mpc_pending_signatures_queue_requests_indexed", 26 | "Number of signature requests indexed by the pending signatures queue" 27 | ) 28 | .unwrap(); 29 | pub static ref MPC_PENDING_SIGNATURES_QUEUE_RESPONSES_INDEXED: prometheus::IntCounter = 30 | prometheus::register_int_counter!( 31 | "mpc_pending_signatures_queue_responses_indexed", 32 | "Number of signature responses indexed by the pending signatures queue" 33 | ) 34 | .unwrap(); 35 | pub static ref MPC_PENDING_SIGNATURES_QUEUE_MATCHING_RESPONSES_INDEXED: prometheus::IntCounter = 36 | prometheus::register_int_counter!( 37 | "mpc_pending_signatures_queue_matching_responses_indexed", 38 | "Number of signature responses that match previously indexed signature requests, 39 | indexed by the pending signatures queue" 40 | ) 41 | .unwrap(); 42 | 43 | pub static ref MPC_PENDING_SIGNATURES_QUEUE_ATTEMPTS_GENERATED: prometheus::IntCounter = 44 | prometheus::register_int_counter!( 45 | "mpc_pending_signatures_queue_attempts_generated", 46 | "Number of attempts generated by the pending signatures queue to compute a signature as the leader" 47 | ) 48 | .unwrap(); 49 | 50 | pub static ref SIGNATURE_REQUEST_RESPONSE_LATENCY_BLOCKS: prometheus::Histogram = { 51 | // High resolution for 1-9 blocks 52 | let mut buckets = linear_buckets(1.0, 1.0, 9).unwrap(); 53 | // 10, 15, 22.5, 33.75, 50, ..., 256, 384 54 | let exponential_buckets = exponential_buckets(10.0, 1.5, 91).unwrap(); 55 | buckets.extend(exponential_buckets); 56 | 57 | prometheus::register_histogram!( 58 | "mpc_signature_request_response_latency_blocks", 59 | "The number of blocks between when a signature request is seen and when the corresponding response is seen.", 60 | buckets). 61 | unwrap() 62 | 63 | }; 64 | 65 | pub static ref SIGNATURE_REQUEST_RESPONSE_LATENCY_SECONDS: prometheus::Histogram = 66 | prometheus::register_histogram!( 67 | "mpc_signature_request_response_latency_seconds", 68 | "The duration, in seconds, between when a signature request is seen and when the corresponding response is seen.", 69 | // 2s - 3s - 4.5s .... - 115s 70 | exponential_buckets(2.0, 1.5, 10).unwrap() 71 | ).unwrap(); 72 | } 73 | -------------------------------------------------------------------------------- /node/src/signing/mod.rs: -------------------------------------------------------------------------------- 1 | mod debug; 2 | mod metrics; 3 | pub mod queue; 4 | pub mod recent_blocks_tracker; 5 | -------------------------------------------------------------------------------- /node/src/tee/mod.rs: -------------------------------------------------------------------------------- 1 | #![allow(dead_code)] 2 | 3 | use anyhow::{bail, Context}; 4 | use backon::{BackoffBuilder, ExponentialBuilder}; 5 | use dstack_sdk::dstack_client::{DstackClient, TcbInfo}; 6 | use hex::ToHex; 7 | use http::status::StatusCode; 8 | use reqwest::multipart::Form; 9 | use serde::{Deserialize, Serialize}; 10 | use sha3::{Digest, Sha3_384}; 11 | use std::time::Duration; 12 | use tracing::{error, info}; 13 | 14 | /// Endpoint to contact dstack service. 15 | /// Set to [`None`] which defaults to `/var/run/dstack.sock` 16 | const ENDPOINT: Option<&str> = None; 17 | /// URL for usbmission of tdx quote. Returns collateral to be used for verification. 18 | const PHALA_TDX_QUOTE_UPLOAD_URL: &str = "https://proof.t16z.com/api/upload"; 19 | /// Expected HTTP [`StatusCode`] for a successful submission. 20 | const PHALA_SUCCESS_STATUS_CODE: StatusCode = StatusCode::OK; 21 | /// The maximum duration to wait for retrying request to Phala's endpoint, [`PHALA_TDX_QUOTE_UPLOAD_URL`]. 22 | const MAX_BACKOFF_DURATION: Duration = Duration::from_secs(60); 23 | /// Number of bytes for the report data. 24 | /// report_data: [u8; 64] = [version(1 byte) || sha384(TLS pub key || account public key ) || zero padding] 25 | const REPORT_DATA_SIZE: usize = 64; 26 | 27 | #[derive(Serialize, Deserialize)] 28 | pub struct TeeAttestation { 29 | tcb_info: TcbInfo, 30 | tdx_quote: String, 31 | collateral: String, 32 | } 33 | 34 | #[derive(Deserialize)] 35 | struct UploadResponse { 36 | quote_collateral: String, 37 | #[serde(rename = "checksum")] 38 | _checksum: String, 39 | } 40 | 41 | pub struct BinaryVersion(u8); 42 | 43 | /// Generates a [`TeeAttestation`] for this node, which can be used to send to the contract to prove that 44 | /// the node is running in a `TEE` context. 45 | /// 46 | /// Returns an [`anyhow::Error`] if a non-transient error occurs, that prevents the node 47 | /// from generating the attestation. 48 | pub async fn create_remote_attestation_info( 49 | binary_version: BinaryVersion, 50 | tls_public_key: near_crypto::ED25519PublicKey, 51 | account_public_key: near_crypto::ED25519PublicKey, 52 | ) -> anyhow::Result { 53 | let client = DstackClient::new(ENDPOINT); 54 | 55 | let client_info_response = client.info().await?; 56 | let tcb_info = client_info_response.tcb_info; 57 | 58 | let report_data = { 59 | let mut hasher = Sha3_384::new(); 60 | hasher.update(tls_public_key.0); 61 | hasher.update(account_public_key.0); 62 | 63 | let public_keys_hash = hasher.finalize(); 64 | 65 | let mut report_data = [0_u8; REPORT_DATA_SIZE]; 66 | 67 | report_data[0] = binary_version.0; 68 | report_data[1..33].copy_from_slice(&public_keys_hash); 69 | 70 | report_data 71 | }; 72 | 73 | let tdx_quote: String = client 74 | .get_quote(report_data.into()) 75 | .await? 76 | .quote 77 | .encode_hex(); 78 | 79 | let quote_upload_response = { 80 | let reqwest_client = reqwest::Client::new(); 81 | let tdx_quote = tdx_quote.clone(); 82 | 83 | let upload_tdx_quote = async move || { 84 | let form = Form::new().text("hex", tdx_quote.clone()); 85 | 86 | let response = reqwest_client 87 | .post(PHALA_TDX_QUOTE_UPLOAD_URL) 88 | .multipart(form) 89 | .send() 90 | .await?; 91 | 92 | let status = response.status(); 93 | 94 | if status != PHALA_SUCCESS_STATUS_CODE { 95 | bail!("Got unexpected HTTP status code: response from phala http_endpoint: {:?}, expected: {:?}", status, PHALA_SUCCESS_STATUS_CODE); 96 | } 97 | 98 | response 99 | .json::() 100 | .await 101 | .context("Failed to deserialize response from Phala.") 102 | }; 103 | 104 | let mut backoff = ExponentialBuilder::default() 105 | .with_max_delay(MAX_BACKOFF_DURATION) 106 | .without_max_times() 107 | .with_jitter() 108 | .build(); 109 | 110 | // Loop until we have a response. 111 | loop { 112 | match upload_tdx_quote().await { 113 | Err(err) => { 114 | let duration = backoff.next().unwrap_or(MAX_BACKOFF_DURATION); 115 | error!("Failed to upload tdx_quote to Phala due to: {:?}", err); 116 | info!("Retrying tdx_quote upload to Phala in {:?}", duration); 117 | continue; 118 | } 119 | Ok(response) => break response, 120 | } 121 | } 122 | }; 123 | 124 | let collateral = quote_upload_response.quote_collateral; 125 | Ok(TeeAttestation { 126 | tdx_quote, 127 | tcb_info, 128 | collateral, 129 | }) 130 | } 131 | -------------------------------------------------------------------------------- /node/src/tests/basic_cluster.rs: -------------------------------------------------------------------------------- 1 | use crate::p2p::testing::PortSeed; 2 | use crate::tests::{request_signature_and_await_response, IntegrationTestSetup}; 3 | use crate::tracking::AutoAbortTask; 4 | use mpc_contract::primitives::domain::{DomainConfig, DomainId, SignatureScheme}; 5 | use near_o11y::testonly::init_integration_logger; 6 | use near_time::Clock; 7 | use serial_test::serial; 8 | 9 | // Make a cluster of four nodes, test that we can generate keyshares 10 | // and then produce signatures. 11 | #[tokio::test] 12 | #[serial] 13 | async fn test_basic_cluster() { 14 | init_integration_logger(); 15 | const NUM_PARTICIPANTS: usize = 4; 16 | const THRESHOLD: usize = 3; 17 | const TXN_DELAY_BLOCKS: u64 = 1; 18 | let temp_dir = tempfile::tempdir().unwrap(); 19 | let mut setup = IntegrationTestSetup::new( 20 | Clock::real(), 21 | temp_dir.path(), 22 | (0..NUM_PARTICIPANTS) 23 | .map(|i| format!("test{}", i).parse().unwrap()) 24 | .collect(), 25 | THRESHOLD, 26 | TXN_DELAY_BLOCKS, 27 | PortSeed::BASIC_CLUSTER_TEST, 28 | ); 29 | 30 | let domain = DomainConfig { 31 | id: DomainId(0), 32 | scheme: SignatureScheme::Secp256k1, 33 | }; 34 | 35 | { 36 | let mut contract = setup.indexer.contract_mut().await; 37 | contract.initialize(setup.participants.clone()); 38 | contract.add_domains(vec![domain.clone()]); 39 | } 40 | 41 | let _runs = setup 42 | .configs 43 | .into_iter() 44 | .map(|config| AutoAbortTask::from(tokio::spawn(config.run()))) 45 | .collect::>(); 46 | 47 | assert!(request_signature_and_await_response( 48 | &mut setup.indexer, 49 | "user0", 50 | &domain, 51 | std::time::Duration::from_secs(60) 52 | ) 53 | .await 54 | .is_some()); 55 | } 56 | -------------------------------------------------------------------------------- /node/src/tests/benchmark.rs: -------------------------------------------------------------------------------- 1 | use super::TestGenerators; 2 | use k256::elliptic_curve::Field; 3 | use k256::Scalar; 4 | 5 | #[test] 6 | fn benchmark_single_threaded_presignature_generation() { 7 | let generator = TestGenerators::new(10, 7); 8 | let keygens = generator.make_ecdsa_keygens(); 9 | let triple0s = generator.make_triples(); 10 | let triple1s = generator.make_triples(); 11 | 12 | let start_time = std::time::Instant::now(); 13 | const COUNT: usize = 1000; 14 | for _ in 0..COUNT { 15 | let _ = generator.make_presignatures(&triple0s, &triple1s, &keygens); 16 | } 17 | let end_time = std::time::Instant::now(); 18 | println!( 19 | "Time taken per presignature: {:?}", 20 | (end_time - start_time) / COUNT as u32 21 | ); 22 | } 23 | 24 | #[test] 25 | fn benchmark_single_threaded_signature_generation() { 26 | let generator = TestGenerators::new(10, 7); 27 | let keygens = generator.make_ecdsa_keygens(); 28 | let triple0s = generator.make_triples(); 29 | let triple1s = generator.make_triples(); 30 | let presignatures = generator.make_presignatures(&triple0s, &triple1s, &keygens); 31 | 32 | let start_time = std::time::Instant::now(); 33 | const COUNT: usize = 1000; 34 | for _ in 0..COUNT { 35 | let _ = generator.make_signature( 36 | &presignatures, 37 | keygens 38 | .iter() 39 | .next() 40 | .unwrap() 41 | .1 42 | .public_key 43 | .to_element() 44 | .to_affine(), 45 | Scalar::random(&mut rand::thread_rng()), 46 | ); 47 | } 48 | let end_time = std::time::Instant::now(); 49 | println!( 50 | "Time taken per signature: {:?}", 51 | (end_time - start_time) / COUNT as u32 52 | ); 53 | } 54 | -------------------------------------------------------------------------------- /node/src/tests/faulty.rs: -------------------------------------------------------------------------------- 1 | use crate::indexer::participants::ContractState; 2 | use crate::p2p::testing::PortSeed; 3 | use crate::tests::{request_signature_and_await_response, IntegrationTestSetup}; 4 | use crate::tracking::AutoAbortTask; 5 | use mpc_contract::primitives::domain::{DomainConfig, DomainId, SignatureScheme}; 6 | use near_o11y::testonly::init_integration_logger; 7 | use near_sdk::AccountId; 8 | use near_time::Clock; 9 | use rand::Rng; 10 | use serial_test::serial; 11 | 12 | // Make a cluster of four nodes. Test the following: 13 | // 1. Shut down one node and confirms that signatures can still be generated. 14 | // 2. Stop another node and assert that no signatures can be generated. 15 | // 3. Restart the node that was later shutdown and assert that signatures can be generated again 16 | #[tokio::test] 17 | #[serial] 18 | async fn test_faulty_cluster() { 19 | init_integration_logger(); 20 | const NUM_PARTICIPANTS: usize = 4; 21 | const THRESHOLD: usize = 3; 22 | const TXN_DELAY_BLOCKS: u64 = 1; 23 | let temp_dir = tempfile::tempdir().unwrap(); 24 | let accounts = (0..NUM_PARTICIPANTS) 25 | .map(|i| format!("test{}", i).parse().unwrap()) 26 | .collect::>(); 27 | let mut setup = IntegrationTestSetup::new( 28 | Clock::real(), 29 | temp_dir.path(), 30 | accounts.clone(), 31 | THRESHOLD, 32 | TXN_DELAY_BLOCKS, 33 | PortSeed::FAULTY_CLUSTER_TEST, 34 | ); 35 | 36 | let domain = DomainConfig { 37 | id: DomainId(0), 38 | scheme: SignatureScheme::Secp256k1, 39 | }; 40 | 41 | { 42 | let mut contract = setup.indexer.contract_mut().await; 43 | contract.initialize(setup.participants); 44 | contract.add_domains(vec![domain.clone()]); 45 | } 46 | 47 | let _runs = setup 48 | .configs 49 | .into_iter() 50 | .map(|config| AutoAbortTask::from(tokio::spawn(config.run()))) 51 | .collect::>(); 52 | 53 | tracing::info!("Waiting for key generation to complete"); 54 | setup 55 | .indexer 56 | .wait_for_contract_state(|state| matches!(state, ContractState::Running(_))) 57 | .await; 58 | tracing::info!("Key generation complete"); 59 | 60 | let Some(signature_delay) = request_signature_and_await_response( 61 | &mut setup.indexer, 62 | "user0", 63 | &domain, 64 | std::time::Duration::from_secs(60), 65 | ) 66 | .await 67 | else { 68 | panic!("Timed out generating the first signature"); 69 | }; 70 | 71 | // first step: drop one node, and make sure signatures can still be generated 72 | let mut rng = rand::thread_rng(); 73 | let to_drop: usize = rng.gen_range(0..NUM_PARTICIPANTS); 74 | tracing::info!("Bringing down one node #{}", to_drop); 75 | let disabled1 = setup.indexer.disable(accounts[to_drop].clone()).await; 76 | 77 | assert!(request_signature_and_await_response( 78 | &mut setup.indexer, 79 | "user1", 80 | &domain, 81 | signature_delay * 2 82 | ) 83 | .await 84 | .is_some()); 85 | tracing::info!("Step 1 complete"); 86 | 87 | // Second step: drop another node, and make sure signatures cannot be generated 88 | let another_to_drop = loop { 89 | let i = rng.gen_range(0..NUM_PARTICIPANTS); 90 | if i != to_drop { 91 | break i; 92 | } 93 | }; 94 | tracing::info!("Bringing down another node #{}", another_to_drop); 95 | let disabled2 = setup 96 | .indexer 97 | .disable(accounts[another_to_drop].clone()) 98 | .await; 99 | assert!(request_signature_and_await_response( 100 | &mut setup.indexer, 101 | "user2", 102 | &domain, 103 | signature_delay * 2 104 | ) 105 | .await 106 | .is_none()); 107 | tracing::info!("Step 2 complete"); 108 | 109 | // Third step: bring up the dropped node in step 2, and make sure signatures can be generated again 110 | disabled2.reenable_and_wait_till_running().await; 111 | assert!(request_signature_and_await_response( 112 | &mut setup.indexer, 113 | "user3", 114 | &domain, 115 | signature_delay * 2 116 | ) 117 | .await 118 | .is_some()); 119 | tracing::info!("Step 3 complete"); 120 | 121 | drop(disabled1); 122 | } 123 | -------------------------------------------------------------------------------- /node/src/tests/multidomain.rs: -------------------------------------------------------------------------------- 1 | use crate::p2p::testing::PortSeed; 2 | use crate::tests::{request_signature_and_await_response, IntegrationTestSetup}; 3 | use crate::tracking::AutoAbortTask; 4 | use mpc_contract::primitives::domain::{DomainConfig, DomainId, SignatureScheme}; 5 | use near_o11y::testonly::init_integration_logger; 6 | use near_time::Clock; 7 | use serial_test::serial; 8 | 9 | // Make a cluster of four nodes, test that we can generate keyshares 10 | // and then produce signatures. 11 | #[tokio::test] 12 | #[serial] 13 | async fn test_basic_multidomain() { 14 | init_integration_logger(); 15 | const NUM_PARTICIPANTS: usize = 4; 16 | const THRESHOLD: usize = 3; 17 | const TXN_DELAY_BLOCKS: u64 = 1; 18 | let temp_dir = tempfile::tempdir().unwrap(); 19 | let mut setup = IntegrationTestSetup::new( 20 | Clock::real(), 21 | temp_dir.path(), 22 | (0..NUM_PARTICIPANTS) 23 | .map(|i| format!("test{}", i).parse().unwrap()) 24 | .collect(), 25 | THRESHOLD, 26 | TXN_DELAY_BLOCKS, 27 | PortSeed::BASIC_MULTIDOMAIN_TEST, 28 | ); 29 | 30 | let mut domains = vec![ 31 | DomainConfig { 32 | id: DomainId(0), 33 | scheme: SignatureScheme::Secp256k1, 34 | }, 35 | DomainConfig { 36 | id: DomainId(1), 37 | scheme: SignatureScheme::Ed25519, 38 | }, 39 | ]; 40 | 41 | { 42 | let mut contract = setup.indexer.contract_mut().await; 43 | contract.initialize(setup.participants.clone()); 44 | contract.add_domains(domains.clone()); 45 | } 46 | 47 | let _runs = setup 48 | .configs 49 | .into_iter() 50 | .map(|config| AutoAbortTask::from(tokio::spawn(config.run()))) 51 | .collect::>(); 52 | 53 | for domain in &domains { 54 | assert!(request_signature_and_await_response( 55 | &mut setup.indexer, 56 | &format!("user{}", domain.id.0), 57 | domain, 58 | std::time::Duration::from_secs(60) 59 | ) 60 | .await 61 | .is_some()); 62 | } 63 | 64 | { 65 | let new_domains = vec![ 66 | DomainConfig { 67 | id: DomainId(2), 68 | scheme: SignatureScheme::Ed25519, 69 | }, 70 | DomainConfig { 71 | id: DomainId(3), 72 | scheme: SignatureScheme::Secp256k1, 73 | }, 74 | ]; 75 | let mut contract = setup.indexer.contract_mut().await; 76 | contract.add_domains(new_domains.clone()); 77 | domains.extend(new_domains); 78 | } 79 | 80 | for domain in &domains { 81 | assert!(request_signature_and_await_response( 82 | &mut setup.indexer, 83 | &format!("user{}", domain.id.0), 84 | domain, 85 | std::time::Duration::from_secs(60) 86 | ) 87 | .await 88 | .is_some()); 89 | } 90 | 91 | { 92 | let mut contract = setup.indexer.contract_mut().await; 93 | contract.start_resharing(setup.participants); 94 | } 95 | 96 | for domain in &domains { 97 | assert!(request_signature_and_await_response( 98 | &mut setup.indexer, 99 | &format!("user{}", domain.id.0), 100 | domain, 101 | std::time::Duration::from_secs(60) 102 | ) 103 | .await 104 | .is_some()); 105 | } 106 | } 107 | -------------------------------------------------------------------------------- /node/src/tracing.rs: -------------------------------------------------------------------------------- 1 | pub fn init_logging() { 2 | tracing_subscriber::fmt() 3 | .with_env_filter(tracing_subscriber::EnvFilter::from_default_env()) 4 | .try_init() 5 | .ok(); 6 | } 7 | -------------------------------------------------------------------------------- /pytest/common_lib/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/near/mpc/da7a5a072e7adb11948898ef81cb46bf7a7d87af/pytest/common_lib/__init__.py -------------------------------------------------------------------------------- /pytest/common_lib/constants.py: -------------------------------------------------------------------------------- 1 | import pathlib 2 | import os 3 | 4 | MPC_REPO_DIR = pathlib.Path(__file__).resolve().parents[2] 5 | MPC_BINARY_PATH = os.path.join(MPC_REPO_DIR / 'target' / 'release', 'mpc-node') 6 | CONFIG_PATH = os.path.join(MPC_REPO_DIR / 'pytest' / 'config.json') 7 | 8 | TIMEOUT = 60 9 | SHORT_TIMEOUT = 10 10 | NEAR_BASE = 10**24 11 | TGAS = 10**12 12 | # Tgas required by the contract for a sign call. 13 | GAS_FOR_SIGN_CALL = 10 14 | # Deposit in Yoctonear required for a sign call. 15 | SIGNATURE_DEPOSIT = 1 16 | -------------------------------------------------------------------------------- /pytest/common_lib/contracts.py: -------------------------------------------------------------------------------- 1 | import json 2 | import base64 3 | from utils import load_binary_file, requests 4 | from enum import Enum 5 | from borsh_construct import Vec, U8, CStruct, U64, Option, U32 6 | from .constants import MPC_REPO_DIR 7 | 8 | COMPILED_CONTRACT_PATH = MPC_REPO_DIR / 'libs' / 'chain-signatures' / 'res' / 'mpc_contract.wasm' 9 | MIGRATE_CURRENT_CONTRACT_PATH = MPC_REPO_DIR / 'pytest' / 'tests' / 'test_contracts' / 'migration' / 'migration_contract.wasm' 10 | TESTNET_ACCOUNT_ID = "v1.signer-prod.testnet" 11 | MAINNET_ACCOUNT_ID = "v1.signer" 12 | 13 | 14 | def build_view_code_request(account_id: str) -> dict: 15 | return { 16 | "jsonrpc": "2.0", 17 | "id": "dontcare", 18 | "method": "query", 19 | "params": { 20 | "request_type": "view_code", 21 | "finality": "final", 22 | "account_id": account_id 23 | } 24 | } 25 | 26 | 27 | class NetworkRpc(Enum): 28 | MAINNET = "https://rpc.mainnet.near.org" 29 | TESTNET = "https://rpc.testnet.near.org" 30 | 31 | 32 | def fetch_contract_code(account_id: str, network: NetworkRpc) -> bytearray: 33 | print(f"fetching contract at {account_id} from {network}\n") 34 | request_payload = build_view_code_request(account_id) 35 | response = requests.post(network.value, json=request_payload) 36 | response.raise_for_status() 37 | 38 | result = response.json() 39 | try: 40 | code_base64 = result["result"]["code_base64"] 41 | return bytearray(base64.b64decode(code_base64)) 42 | except KeyError: 43 | raise RuntimeError("Invalid RPC response or contract code not found.") 44 | 45 | 46 | def fetch_testnet_contract() -> bytearray: 47 | # add a sanity check that the hash of this code matches a specific hash 48 | return fetch_contract_code(TESTNET_ACCOUNT_ID, NetworkRpc.TESTNET) 49 | 50 | 51 | def fetch_mainnet_contract() -> bytearray: 52 | # add a sanity check that the hash of this code matches a specific hash 53 | return fetch_contract_code(MAINNET_ACCOUNT_ID, NetworkRpc.MAINNET) 54 | 55 | 56 | def load_mpc_contract() -> bytearray: 57 | """ 58 | Returns the current contract. 59 | """ 60 | return load_binary_file(COMPILED_CONTRACT_PATH) 61 | 62 | 63 | class ConfigV2: 64 | 65 | def __init__(self, max_num_requests_to_remove, request_timeout_blocks): 66 | self.max_num_requests_to_remove = max_num_requests_to_remove 67 | self.request_timeout_blocks = request_timeout_blocks 68 | 69 | def dump_json(self): 70 | return json.dumps({ 71 | "request_timeout_blocks": 72 | self.max_num_requests_to_remove, 73 | "key_event_timeout_blocks": 74 | self.request_timeout_blocks 75 | }) 76 | 77 | def get(self): 78 | return { 79 | "max_num_requests_to_remove": self.max_num_requests_to_remove, 80 | "request_timeout_blocks": self.request_timeout_blocks 81 | } 82 | 83 | 84 | ConfigV2Borsh = CStruct("key_event_timeout_blocks" / U64) 85 | ProposeUpdateArgsV2 = CStruct("code" / Option(Vec(U8)), 86 | "config" / Option(ConfigV2Borsh)) 87 | 88 | 89 | class UpdateArgsV2: 90 | 91 | def __init__(self, code_path=None, config=None): 92 | self.code_path = code_path 93 | self.config = config 94 | self._code = None 95 | 96 | def borsh_serialize(self): 97 | return ProposeUpdateArgsV2.build({ 98 | 'code': 99 | self.code(), 100 | 'config': 101 | self.config.get() if self.config is not None else None 102 | }) 103 | 104 | def code(self): 105 | if self.code_path == None: 106 | return None 107 | if self._code is None: 108 | self._code = load_binary_file(self.code_path) 109 | return self._code 110 | 111 | def dump_json(self): 112 | assert self.config is not None 113 | return self.config.dump_json() 114 | -------------------------------------------------------------------------------- /pytest/common_lib/signature.py: -------------------------------------------------------------------------------- 1 | import pathlib 2 | import sys 3 | import random 4 | import json 5 | import base64 6 | 7 | from typing import Dict, Literal, Optional 8 | 9 | from common_lib.contract_state import Domain, SignatureScheme 10 | 11 | sys.path.append(str(pathlib.Path(__file__).resolve().parents[1])) 12 | 13 | PayloadType = Literal["Ecdsa", "Eddsa"] 14 | 15 | signature_scheme_to_payload: Dict[SignatureScheme, PayloadType] = { 16 | 'Secp256k1': 'Ecdsa', 17 | 'Ed25519': 'Eddsa', 18 | } 19 | 20 | 21 | def generate_payload(payload_type: PayloadType) -> dict[PayloadType, str]: 22 | return {payload_type: random.getrandbits(256).to_bytes(32, 'big').hex()} 23 | 24 | 25 | def generate_sign_args( 26 | domain: Domain, 27 | path: str = 'test', 28 | payload: Optional[dict[PayloadType, str]] = None) -> dict: 29 | if payload is None: 30 | payload = generate_payload(signature_scheme_to_payload[domain.scheme]) 31 | return { 32 | 'request': { 33 | 'domain_id': domain.id, 34 | 'path': path, 35 | 'payload_v2': payload, 36 | } 37 | } 38 | 39 | 40 | def assert_signature_success(res): 41 | try: 42 | signature_base64 = res['result']['status']['SuccessValue'] 43 | except KeyError: 44 | raise AssertionError(json.dumps(res, indent=1)) 45 | 46 | signature_base64 += '=' * ((4 - len(signature_base64) % 4) % 4) 47 | signature = json.loads(base64.b64decode(signature_base64)) 48 | print("\033[96mSign Response ✓\033[0m") 49 | return signature 50 | -------------------------------------------------------------------------------- /pytest/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "local": "True", 3 | "near_root": "../libs/nearcore/target/release", 4 | "binary_name": "neard", 5 | "release": "False" 6 | } -------------------------------------------------------------------------------- /pytest/nearcore_pytest/__init__.py: -------------------------------------------------------------------------------- 1 | import sys 2 | from pathlib import Path 3 | 4 | # Dynamically add the nearcore/pytest/lib folder to sys.path 5 | LIB_PATH = Path( 6 | __file__).resolve().parents[2] / "libs" / "nearcore" / "pytest" / "lib" 7 | if str(LIB_PATH) not in sys.path: 8 | sys.path.insert(0, str(LIB_PATH)) 9 | 10 | # Debugging output 11 | print(f"LIB_PATH added to sys.path: {LIB_PATH}") 12 | print(f"sys.path contents:\n{sys.path}") 13 | -------------------------------------------------------------------------------- /pytest/nearcore_pytest/pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["setuptools>=60.0", "wheel"] 3 | build-backend = "setuptools.build_meta" 4 | 5 | [project] 6 | name = "nearcore_pytest" 7 | version = "0.1.0" 8 | description = "A wrapper package for nearcore pytest" 9 | requires-python = ">=3.6" 10 | 11 | [tool.setuptools] 12 | package-dir = {"" = "../../libs/nearcore/pytest/lib"} 13 | include-package-data = true 14 | 15 | -------------------------------------------------------------------------------- /pytest/pytest.ini: -------------------------------------------------------------------------------- 1 | [pytest] 2 | markers = 3 | slow: marks tests as slow (deselect with '-m "not slow"') 4 | ci_excluded: marks tests that should be excluded from ci 5 | -------------------------------------------------------------------------------- /pytest/readme.md: -------------------------------------------------------------------------------- 1 | # How to run the python test 2 | 3 | Simply run `exec_pytest.sh` (optinal flag `--verbose` and `--reset-submodules`) or execute the steps below with the current directory at root of the mpc git repo. 4 | 5 | ## Run tests with pytest 6 | 1. Ensure submodules are clean and point to the correct commit. Use the following commands at your own risk: 7 | ```bash 8 | git submodule foreach --recursive git reset --hard 9 | git submodule foreach --recursive git clean -fdx 10 | git submodule update --init --recursive --force 11 | ``` 12 | 13 | 2. Build nearcore and main node: 14 | ```bash 15 | # build nearcore: 16 | cd libs/nearcore && cargo build -p neard --release 17 | 18 | # build the main node 19 | cd ../.. && cargo build -p mpc-node --release 20 | ``` 21 | 22 | 3. Set up virtualenv (optional, but recommended): 23 | ```bash 24 | cd pytest && python3 -m venv venv 25 | 26 | # activate virtualenv: 27 | source venv/bin/activate 28 | 29 | # install requirements: 30 | pip install -r requirements.txt 31 | ``` 32 | 33 | 4. Run pytest: 34 | ```bash 35 | pytest # -v -s optional flags for verbosity and -m "not slow" to skip slow tests 36 | ``` 37 | 38 | Run individual tests with e.g. `pytest tests/test_contract_update.py::test_code_update` 39 | -------------------------------------------------------------------------------- /pytest/requirements.txt: -------------------------------------------------------------------------------- 1 | -r ../libs/nearcore/pytest/requirements.txt 2 | -e ./nearcore_pytest 3 | borsh-construct 4 | pytest==8.3.4 5 | gitpython 6 | pytest-rerunfailures 7 | ruamel.yaml 8 | -------------------------------------------------------------------------------- /pytest/tests/conftest.py: -------------------------------------------------------------------------------- 1 | # conftest.py 2 | """ 3 | Fixtures for pytest 4 | """ 5 | import pytest 6 | import atexit 7 | import subprocess 8 | import git 9 | import sys 10 | import shutil 11 | from pathlib import Path 12 | import os 13 | 14 | from cluster import CONFIG_ENV_VAR 15 | 16 | sys.path.append(str(Path(__file__).resolve().parents[1])) 17 | from common_lib import constants, contracts 18 | 19 | 20 | @pytest.fixture(autouse=True, scope="function") 21 | def run_atexit_cleanup(): 22 | """ 23 | Runs atexit BEFORE the pytest concludes. 24 | Without the -s flag, pytest redirects the output of stdout and stderr, 25 | but closes those pipes BEFORE executing atexit, 26 | resulting in a failed test in case atexit attempts to write to stdout or stderr. 27 | """ 28 | yield 29 | atexit._run_exitfuncs() 30 | 31 | 32 | @pytest.fixture(autouse=True, scope="session") 33 | def set_config(): 34 | """ 35 | Sets the environment variable for the nearcore config if not already set. 36 | """ 37 | if CONFIG_ENV_VAR not in os.environ: 38 | os.environ[CONFIG_ENV_VAR] = constants.CONFIG_PATH 39 | 40 | 41 | @pytest.fixture(scope="session", autouse=True) 42 | def compile_contract(): 43 | """ 44 | This function navigates to the chain-signatures directory, compiles the mpc-contract and moves it in the res folder. 45 | This ensures that the pytests will always use the source code inside chain-signatures/contract. 46 | """ 47 | print("compiling contract") 48 | git_repo = git.Repo('.', search_parent_directories=True) 49 | git_root = Path(git_repo.git.rev_parse("--show-toplevel")) 50 | chain_signatures = git_root / "libs" / "chain-signatures" 51 | 52 | subprocess.run([ 53 | "cargo", "build", "-p", "mpc-contract", 54 | "--target=wasm32-unknown-unknown", "--release" 55 | ], 56 | cwd=chain_signatures, 57 | check=True, 58 | stdout=sys.stdout, 59 | stderr=sys.stderr) 60 | 61 | subprocess.run(["wasm-opt", "-Oz", "target/wasm32-unknown-unknown/release/mpc_contract.wasm", "-o", "target/wasm32-unknown-unknown/release/mpc_contract.wasm"], 62 | cwd=chain_signatures, 63 | check=True, 64 | stdout=sys.stdout, 65 | stderr=sys.stderr) 66 | 67 | compiled_contract = chain_signatures / "target" / "wasm32-unknown-unknown" / "release" / "mpc_contract.wasm" 68 | os.makedirs(os.path.dirname(contracts.COMPILED_CONTRACT_PATH), 69 | exist_ok=True) 70 | shutil.copy(compiled_contract, contracts.COMPILED_CONTRACT_PATH) 71 | -------------------------------------------------------------------------------- /pytest/tests/test_contract_update.py: -------------------------------------------------------------------------------- 1 | #/usr/bin/env python3 2 | """ 3 | Starts 2 near validators and 2 mpc nodes. 4 | Deploys v0 mpc contract. 5 | Proposes a contract update (v1). 6 | votes on the contract update. 7 | Verifies that the update was executed. 8 | """ 9 | 10 | import sys 11 | import time 12 | import pathlib 13 | import pytest 14 | from utils import load_binary_file 15 | 16 | from common_lib import contracts 17 | 18 | sys.path.append(str(pathlib.Path(__file__).resolve().parents[1])) 19 | from common_lib import shared 20 | from common_lib.contracts import COMPILED_CONTRACT_PATH, MIGRATE_CURRENT_CONTRACT_PATH, UpdateArgsV2, fetch_mainnet_contract, fetch_testnet_contract, load_mpc_contract 21 | 22 | 23 | def deploy_and_init_v2(domains=['Secp256k1', 'Ed25519']): 24 | cluster, mpc_nodes = shared.start_cluster_with_mpc( 25 | 2, 4, 1, contracts.load_mpc_contract()) 26 | cluster.init_cluster(participants=mpc_nodes[:2], 27 | threshold=2, 28 | domains=domains) 29 | cluster.contract_state().print() 30 | return cluster, mpc_nodes 31 | 32 | 33 | def test_update_from_current(): 34 | cluster, mpc_nodes = shared.start_cluster_with_mpc(2, 3, 1, 35 | load_mpc_contract()) 36 | cluster.init_cluster(mpc_nodes, 2) 37 | cluster.send_and_await_signature_requests(1) 38 | new_contract = UpdateArgsV2(MIGRATE_CURRENT_CONTRACT_PATH) 39 | cluster.propose_update(new_contract.borsh_serialize()) 40 | for node in cluster.get_voters()[0:2]: 41 | cluster.vote_update(node, 0) 42 | cluster.assert_is_deployed(new_contract.code()) 43 | 44 | 45 | @pytest.mark.parametrize("fetch_contract", 46 | [fetch_mainnet_contract, fetch_testnet_contract]) 47 | def test_update_to_current(fetch_contract): 48 | current = fetch_contract() 49 | cluster, mpc_nodes = shared.start_cluster_with_mpc(4, 4, 1, current) 50 | cluster.define_candidate_set(mpc_nodes) 51 | cluster.update_participant_status(assert_contract=False) 52 | cluster.init_contract(threshold=3) 53 | cluster.add_domains(signature_schemes=['Secp256k1', 'Ed25519'], 54 | ignore_vote_errors=True) 55 | cluster.send_and_await_signature_requests(1) 56 | 57 | time.sleep(1) 58 | # introduce some state: 59 | args = { 60 | 'prospective_epoch_id': 1, 61 | 'proposal': cluster.make_threshold_parameters(3) 62 | } 63 | for node in cluster.mpc_nodes[0:2]: 64 | tx = node.sign_tx(cluster.mpc_contract_account(), 65 | 'vote_new_parameters', args) 66 | node.send_txn_and_check_success(tx) 67 | cluster.contract_state().print() 68 | new_contract = UpdateArgsV2(COMPILED_CONTRACT_PATH) 69 | cluster.propose_update(new_contract.borsh_serialize()) 70 | for node in cluster.get_voters()[0:3]: 71 | cluster.vote_update(node, 0) 72 | time.sleep(2) 73 | cluster.assert_is_deployed(new_contract.code()) 74 | cluster.wait_for_state("Running") 75 | cluster.contract_state().print() 76 | cluster.send_and_await_signature_requests(1) 77 | -------------------------------------------------------------------------------- /pytest/tests/test_contracts/migration/.gitignore: -------------------------------------------------------------------------------- 1 | target 2 | -------------------------------------------------------------------------------- /pytest/tests/test_contracts/migration/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "migration" 3 | version = "0.1.0" 4 | edition = "2021" 5 | license = "MIT" 6 | 7 | [lib] 8 | crate-type = ["cdylib"] 9 | 10 | [dependencies] 11 | near-sdk = { version = "5.11.0"} 12 | borsh = "1.5.0" 13 | 14 | [profile.release] 15 | codegen-units = 1 16 | opt-level = "z" 17 | lto = true 18 | -------------------------------------------------------------------------------- /pytest/tests/test_contracts/migration/migration_contract.wasm: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/near/mpc/da7a5a072e7adb11948898ef81cb46bf7a7d87af/pytest/tests/test_contracts/migration/migration_contract.wasm -------------------------------------------------------------------------------- /pytest/tests/test_contracts/migration/src/lib.rs: -------------------------------------------------------------------------------- 1 | use near_sdk::borsh::{BorshDeserialize, BorshSerialize}; 2 | use near_sdk::env::log_str; 3 | use near_sdk::near_bindgen; 4 | 5 | #[derive(BorshDeserialize, BorshSerialize, Debug)] 6 | #[near_bindgen] 7 | pub struct Contract {} 8 | 9 | #[near_bindgen] 10 | impl Contract { 11 | #[private] 12 | #[init(ignore_state)] 13 | #[handle_result] 14 | pub fn migrate() -> Result { 15 | log_str("Migration called"); 16 | Ok(Self {}) 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /pytest/tests/test_contracts/parallel/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "parallel-contract" 3 | version = "0.2.0" 4 | edition = "2021" 5 | license = "MIT" 6 | 7 | [lib] 8 | crate-type = ["cdylib", "lib"] 9 | 10 | [dependencies] 11 | borsh = "1.5.0" 12 | near-sdk = { version = "5.2.1", features = ["legacy", "unit-testing"] } 13 | serde = { version = "1", features = ["derive"] } 14 | sha2 = "0.10" 15 | hex = "0.4" 16 | -------------------------------------------------------------------------------- /pytest/tests/test_contracts/parallel/res/contract.wasm: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/near/mpc/da7a5a072e7adb11948898ef81cb46bf7a7d87af/pytest/tests/test_contracts/parallel/res/contract.wasm -------------------------------------------------------------------------------- /pytest/tests/test_contracts/parallel/rust-toolchain.toml: -------------------------------------------------------------------------------- 1 | [toolchain] 2 | channel = "1.81.0" # seems to be required by near runtime (for now) 3 | targets = ["wasm32-unknown-unknown"] 4 | -------------------------------------------------------------------------------- /pytest/tests/test_contracts/parallel/src/lib.rs: -------------------------------------------------------------------------------- 1 | use std::collections::BTreeMap; 2 | 3 | use near_sdk::borsh::{BorshDeserialize, BorshSerialize}; 4 | use near_sdk::serde::Serialize; 5 | use near_sdk::{env, near_bindgen, serde_json, AccountId, Gas, NearToken, Promise, PromiseResult}; 6 | use sha2::{Digest, Sha256}; 7 | 8 | #[derive(Serialize)] 9 | #[serde(rename_all = "PascalCase")] 10 | pub enum Payload { 11 | Ecdsa(String), 12 | Eddsa(String), 13 | } 14 | #[derive(Serialize)] 15 | pub struct SignRequest { 16 | pub path: String, 17 | pub payload_v2: Option, 18 | pub domain_id: Option, 19 | } 20 | 21 | #[derive(Serialize)] 22 | pub struct SignArgs { 23 | pub request: SignRequest, 24 | } 25 | 26 | #[near_bindgen] 27 | #[derive(BorshDeserialize, BorshSerialize, Default)] 28 | pub struct TestContract; 29 | 30 | #[near_bindgen] 31 | impl TestContract { 32 | pub fn make_parallel_sign_calls( 33 | &self, 34 | target_contract: AccountId, 35 | ecdsa_calls_by_domain: BTreeMap, 36 | eddsa_calls_by_domain: BTreeMap, 37 | seed: u64, 38 | ) -> Promise { 39 | fn build_calls( 40 | target_contract: &AccountId, 41 | domain_map: &BTreeMap, 42 | seed: u64, 43 | payload_builder: &F, 44 | ) -> Vec 45 | where 46 | F: Fn(String) -> Payload, 47 | { 48 | domain_map 49 | .iter() 50 | .flat_map(|(domain_id, num_calls)| { 51 | (0..*num_calls).map(move |i| { 52 | let mut hasher = Sha256::new(); 53 | hasher.update(format!("{seed}-{i}").as_str()); 54 | let hex_payload = hex::encode(hasher.finalize()); 55 | 56 | let args = SignArgs { 57 | request: SignRequest { 58 | payload_v2: Some(payload_builder(hex_payload)), 59 | path: "".to_string(), 60 | domain_id: Some(*domain_id), // assuming DomainId is Copy 61 | }, 62 | }; 63 | 64 | Promise::new(target_contract.clone()).function_call( 65 | "sign".to_string(), 66 | serde_json::to_vec(&args).unwrap(), 67 | NearToken::from_yoctonear(1), 68 | Gas::from_tgas(30), 69 | ) 70 | }) 71 | }) 72 | .collect() 73 | } 74 | 75 | let mut promises = Vec::new(); 76 | promises.extend(build_calls( 77 | &target_contract, 78 | &ecdsa_calls_by_domain, 79 | seed, 80 | &|hex| Payload::Ecdsa(hex), 81 | )); 82 | promises.extend(build_calls( 83 | &target_contract, 84 | &eddsa_calls_by_domain, 85 | seed + 1_000_000, // tweak seed offset to avoid collision if needed 86 | &|hex| Payload::Eddsa(hex), 87 | )); 88 | 89 | // Combine the calls using promise::and 90 | promises.reverse(); 91 | let mut combined_promise = promises.pop().unwrap(); 92 | while !promises.is_empty() { 93 | combined_promise = combined_promise.and(promises.pop().unwrap()); 94 | } 95 | 96 | // Attach a callback to log the final results 97 | combined_promise.then(Promise::new(env::current_account_id()).function_call( 98 | "handle_results".to_string(), 99 | vec![], 100 | NearToken::from_near(0), 101 | Gas::from_tgas(10), 102 | )) 103 | } 104 | 105 | #[private] 106 | pub fn handle_results(&self) -> u64 { 107 | let num_calls = env::promise_results_count(); 108 | env::log_str(format!("{num_calls} parallel calls completed!").as_str()); 109 | for i in 0..num_calls { 110 | let result = env::promise_result(i); 111 | env::log_str(&format!("sign #{i}: {:?}", result)); 112 | assert!(matches!(result, PromiseResult::Successful(_))); 113 | } 114 | num_calls 115 | } 116 | } 117 | -------------------------------------------------------------------------------- /pytest/tests/test_key_event.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """ 3 | Tests key resharing (adding and removing nodes). 4 | Starts 2 nodes, have node #3 join, then #4 join, 5 | then #1 leaves, and finally #2 leaves. 6 | At every step we check that signatures can still be produced. 7 | """ 8 | 9 | import pathlib 10 | import sys 11 | import time 12 | 13 | import pytest 14 | 15 | sys.path.append(str(pathlib.Path(__file__).resolve().parents[1])) 16 | from common_lib import shared 17 | from common_lib.contracts import load_mpc_contract 18 | 19 | 20 | def test_single_domain(): 21 | """ 22 | Tests single-domain key generation and resharing. 23 | 24 | The test starts with 2 nodes and one domain, performs key generation, and verifies 25 | that the attempt ID is incremented correctly. 26 | 27 | It performs multiple rounds of resharing while changing the participant set. 28 | 29 | Signature requests are sent after each resharing to verify liveness. 30 | """ 31 | cluster, mpc_nodes = shared.start_cluster_with_mpc(2, 4, 1, load_mpc_contract()) 32 | mpc_nodes[0].reserve_key_event_attempt(0, 0, 0) 33 | mpc_nodes[0].reserve_key_event_attempt(0, 0, 1) 34 | # start with 2 nodes 35 | cluster.init_cluster(participants=mpc_nodes[:2], threshold=2) 36 | assert cluster.contract_state().keyset().keyset[0].attempt_id == 2 37 | cluster.send_and_await_signature_requests(1) 38 | 39 | # two new nodes join, increase threshold 40 | cluster.do_resharing( 41 | new_participants=mpc_nodes[:4], new_threshold=3, prospective_epoch_id=1 42 | ) 43 | cluster.update_participant_status() 44 | cluster.send_and_await_signature_requests(1) 45 | 46 | cluster.do_resharing( 47 | new_participants=mpc_nodes[1:4], new_threshold=3, prospective_epoch_id=2 48 | ) 49 | 50 | cluster.update_participant_status() 51 | cluster.send_and_await_signature_requests(1) 52 | cluster.do_resharing( 53 | new_participants=mpc_nodes[0:4], 54 | new_threshold=3, 55 | prospective_epoch_id=3, 56 | wait_for_running=False, 57 | ) 58 | 59 | assert cluster.wait_for_state("Running"), "failed to start running" 60 | cluster.update_participant_status() 61 | cluster.send_and_await_signature_requests(1) 62 | 63 | # test for multiple attemps: 64 | 65 | mpc_nodes[0].reserve_key_event_attempt(4, 0, 0) 66 | mpc_nodes[0].reserve_key_event_attempt(4, 0, 1) 67 | cluster.do_resharing( 68 | new_participants=mpc_nodes[0:4], new_threshold=4, prospective_epoch_id=4 69 | ) 70 | cluster.update_participant_status() 71 | assert cluster.contract_state().keyset().keyset[0].attempt_id == 2 72 | cluster.send_and_await_signature_requests(1) 73 | 74 | 75 | def test_multi_domain(): 76 | """ 77 | Tests multi-domain key generation and resharing. 78 | 79 | The test starts with 2 nodes and one domain, then adds four more domains. 80 | It performs a resharing of the five domains from 2 to 4 nodes with an increased threshold. 81 | 82 | Afterwards, it adds another domain but cancels the key generation before completion. 83 | """ 84 | cluster, mpc_nodes = shared.start_cluster_with_mpc(2, 4, 1, load_mpc_contract()) 85 | 86 | # start with 2 nodes 87 | cluster.init_cluster(participants=mpc_nodes[:2], threshold=2) 88 | cluster.send_and_await_signature_requests(1) 89 | 90 | cluster.add_domains( 91 | ["Secp256k1", "Ed25519", "Secp256k1", "Ed25519"], wait_for_running=False 92 | ) 93 | cluster.wait_for_state("Running") 94 | ## two new nodes join, increase threshold 95 | cluster.do_resharing( 96 | new_participants=mpc_nodes[:4], new_threshold=3, prospective_epoch_id=1 97 | ) 98 | cluster.update_participant_status() 99 | 100 | mpc_nodes[1].reserve_key_event_attempt(1, 5, 0) 101 | mpc_nodes[1].reserve_key_event_attempt(1, 5, 1) 102 | cluster.add_domains(["Secp256k1"], wait_for_running=False) 103 | mpc_nodes[0].kill(False) 104 | for node in mpc_nodes[1:4]: 105 | print(f"{node.print()} voting to cancel domain") 106 | args = { 107 | "next_domain_id": 7, 108 | } 109 | tx = node.sign_tx(cluster.mpc_contract_account(), "vote_cancel_keygen", args) 110 | node.send_txn_and_check_success(tx) 111 | cluster.wait_for_state("Running") 112 | with pytest.raises(KeyError): 113 | cluster.contract_state().keyset().get_key(6) 114 | assert cluster.contract_state().protocol_state.next_domain_id() == 7 115 | -------------------------------------------------------------------------------- /pytest/tests/test_parallel_sign_calls.py: -------------------------------------------------------------------------------- 1 | #/usr/bin/env python3 2 | """ 3 | Starts 2 near validators and 2 mpc nodes. 4 | Deploys mpc contract. 5 | Deploys a test contract with a function that makes parallel sign calls. 6 | Calls the test function and ensures a successful response. 7 | """ 8 | 9 | import json 10 | import sys 11 | import base64 12 | import pytest 13 | import pathlib 14 | import time 15 | from utils import load_binary_file 16 | 17 | sys.path.append(str(pathlib.Path(__file__).resolve().parents[1])) 18 | from common_lib import shared, contracts, constants 19 | 20 | 21 | def load_parallel_sign_contract() -> bytearray: 22 | """ 23 | returns test contract for parallel sign 24 | """ 25 | path = constants.MPC_REPO_DIR / 'pytest/tests/test_contracts/parallel/res/contract.wasm' 26 | return load_binary_file(path) 27 | 28 | 29 | @pytest.mark.parametrize("num_parallel_signatures", [6]) 30 | def test_parallel_sign_calls(num_parallel_signatures): 31 | assert num_parallel_signatures % 2 == 0, "expected even number" 32 | # start cluster and deploy mpc contract 33 | mpc_contract = contracts.load_mpc_contract() 34 | cluster, mpc_nodes = shared.start_cluster_with_mpc(2, 2, 1, mpc_contract) 35 | cluster.init_cluster(mpc_nodes, 2) 36 | contract = load_parallel_sign_contract() 37 | cluster.deploy_secondary_contract(contract) 38 | 39 | # call `parallel_sign` and verify that it returns successfully 40 | res = cluster.make_function_call_on_secondary_contract( 41 | function_name='make_parallel_sign_calls', 42 | args={ 43 | 'target_contract': cluster.mpc_contract_account(), 44 | 'ecdsa_calls_by_domain': { 45 | 0: int(num_parallel_signatures / 2) 46 | }, 47 | 'eddsa_calls_by_domain': { 48 | 1: int(num_parallel_signatures / 2) 49 | }, 50 | 'seed': 23, 51 | }) 52 | 53 | # check the return value 54 | encoded_value = res['result']['status']['SuccessValue'] 55 | decoded_value = base64.b64decode(encoded_value).decode("utf-8") 56 | assert int(decoded_value) == num_parallel_signatures 57 | 58 | # check metrics to make sure signature requests are handled properly. 59 | started = time.time() 60 | while True: 61 | assert time.time( 62 | ) - started < constants.SHORT_TIMEOUT, "Waiting for metrics" 63 | metrics_good = True 64 | for i in range(len(cluster.mpc_nodes)): 65 | queue_size = cluster.get_int_metric_value_for_node( 66 | "mpc_pending_signatures_queue_size", i) 67 | requests_indexed = cluster.get_int_metric_value_for_node( 68 | "mpc_pending_signatures_queue_requests_indexed", i) 69 | responses_indexed = cluster.get_int_metric_value_for_node( 70 | "mpc_pending_signatures_queue_responses_indexed", i) 71 | matching_responses_indexed = cluster.get_int_metric_value_for_node( 72 | "mpc_pending_signatures_queue_matching_responses_indexed", i) 73 | print( 74 | f"Node {i}: queue_size={queue_size}, requests_indexed={requests_indexed}, responses_indexed={responses_indexed}, matching_responses_indexed={matching_responses_indexed}" 75 | ) 76 | if not (queue_size == 0 77 | and requests_indexed == num_parallel_signatures 78 | and responses_indexed == num_parallel_signatures 79 | and matching_responses_indexed == num_parallel_signatures): 80 | metrics_good = False 81 | led_signatures = cluster.get_int_metric_value( 82 | "mpc_pending_signatures_queue_attempts_generated") 83 | print(f"led_signatures={led_signatures}") 84 | if sum(led_signatures) != num_parallel_signatures: 85 | metrics_good = False 86 | if metrics_good: 87 | break 88 | time.sleep(1) 89 | print( 90 | "All requests and responses indexed, all signatures had exactly one leader, and signature queue is empty on all nodes. All Done." 91 | ) 92 | -------------------------------------------------------------------------------- /pytest/tests/test_signature_request.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """ 3 | Starts 2 near validators and 2 mpc nodes. 4 | Deploys mpc contract in 'libs/chain-signatures/res/mpc_contract.wasm' 5 | Sends signature requests. 6 | Verifies that the mpc nodes index the signature request. 7 | Waits for the signature responses. Fails if timeout is reached. 8 | """ 9 | 10 | import sys 11 | import pathlib 12 | import argparse 13 | import pytest 14 | 15 | sys.path.append(str(pathlib.Path(__file__).resolve().parents[1])) 16 | from common_lib import shared 17 | from common_lib.contracts import load_mpc_contract 18 | 19 | 20 | @pytest.mark.parametrize("num_requests, num_respond_access_keys", [(10, 1)]) 21 | def test_signature_lifecycle(num_requests, num_respond_access_keys): 22 | cluster, mpc_nodes = shared.start_cluster_with_mpc( 23 | 2, 3, num_respond_access_keys, load_mpc_contract()) 24 | cluster.init_cluster(mpc_nodes, 2) 25 | # removing one node should not be a problem. 26 | mpc_nodes[0].kill(False) 27 | cluster.send_and_await_signature_requests(num_requests) 28 | 29 | 30 | if __name__ == '__main__': 31 | parser = argparse.ArgumentParser() 32 | parser.add_argument("--num-requests", 33 | type=int, 34 | default=10, 35 | help="Number of signature requests to make") 36 | parser.add_argument( 37 | "--num-respond-access-keys", 38 | type=int, 39 | default=1, 40 | help="Number of access keys to provision for the respond signer account" 41 | ) 42 | args = parser.parse_args() 43 | 44 | test_signature_lifecycle(args.num_requests, args.num_respond_access_keys) 45 | -------------------------------------------------------------------------------- /pytest/tests/test_signature_request_during_resharing.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import pathlib 3 | import sys 4 | 5 | sys.path.append(str(pathlib.Path(__file__).resolve().parents[1])) 6 | from common_lib import shared 7 | from common_lib.contracts import load_mpc_contract 8 | 9 | 10 | def test_single_domain(): 11 | """ 12 | Tests that signature requests are still processed while performing key resharing. 13 | 14 | Test scenario: 15 | 1. Start with 2 nodes and 1 domain for Ecdsa as well as Eddsa. 16 | 2. Add 2 more nodes and start a key resharing. 17 | 3. While in resharing, kill one node such that the nodes are stuck in resharing. 18 | 4. Send a signature request to assert that it is processed while the network is in resharing state. 19 | """ 20 | # Have the nodes disabled 21 | cluster, mpc_nodes = shared.start_cluster_with_mpc(2, 22 | 4, 23 | 1, 24 | load_mpc_contract(), 25 | start_mpc_nodes=True) 26 | 27 | cluster.init_cluster(participants=mpc_nodes[:2], threshold=2) 28 | 29 | # Two new nodes join, increase threshold 30 | cluster.do_resharing( 31 | new_participants=mpc_nodes[:4], 32 | new_threshold=3, 33 | prospective_epoch_id=1, 34 | wait_for_running=False, 35 | ) 36 | 37 | # Kill one node such that resharing does not finish. 38 | mpc_nodes[3].kill() 39 | 40 | # sanity check 41 | assert cluster.wait_for_state( 42 | "Resharing" 43 | ), "State should still be in resharing. 4th node was killed." 44 | 45 | cluster.send_and_await_signature_requests(3) 46 | 47 | # sanity check 48 | assert cluster.wait_for_state( 49 | "Resharing" 50 | ), "State should still be in resharing. 4th node was killed." 51 | -------------------------------------------------------------------------------- /pytest/tests/test_web_endpoints.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """ 3 | Sanity checks that all web endpoints are properly served. 4 | """ 5 | 6 | import sys 7 | import pathlib 8 | import requests 9 | 10 | sys.path.append(str(pathlib.Path(__file__).resolve().parents[1])) 11 | from common_lib import shared 12 | from common_lib.contracts import load_mpc_contract 13 | 14 | 15 | def test_web_endpoints(): 16 | cluster, mpc_nodes = shared.start_cluster_with_mpc(2, 2, 1, 17 | load_mpc_contract()) 18 | cluster.init_cluster(participants=mpc_nodes, threshold=2) 19 | cluster.send_and_await_signature_requests(1) 20 | 21 | # ports are hardcoded... they come from PortSeed::CLI_FOR_PYTEST.web_port(i) 22 | for port in [20000, 20001]: 23 | response = requests.get(f'http://localhost:{port}/health') 24 | assert response.status_code == 200, response.status_code 25 | assert 'OK' in response.text, response.text 26 | 27 | response = requests.get(f'http://localhost:{port}/metrics') 28 | assert 'mpc_num_signature_requests_indexed' in response.text, response.text 29 | 30 | response = requests.get(f'http://localhost:{port}/debug/tasks') 31 | assert 'root:' in response.text, response.text 32 | 33 | response = requests.get(f'http://localhost:{port}/debug/blocks') 34 | assert 'Recent blocks:' in response.text, response.text 35 | assert '2 sign reqs:' in response.text, response.text 36 | 37 | response = requests.get(f'http://localhost:{port}/debug/signatures') 38 | assert 'Recent Signatures:' in response.text, response.text 39 | assert 'id:' in response.text, response.text 40 | 41 | response = requests.get(f'http://localhost:{port}/debug/contract') 42 | assert 'Contract is in Running state' in response.text, response.text 43 | -------------------------------------------------------------------------------- /pytest/tests/test_without_respond_yaml.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """ 3 | Starts an mpc cluster without respond.yaml configs. 4 | Verifies that signature requests are handled successfully. 5 | """ 6 | 7 | import os 8 | import sys 9 | import pathlib 10 | import pytest 11 | 12 | sys.path.append(str(pathlib.Path(__file__).resolve().parents[1])) 13 | from common_lib import shared 14 | from common_lib.contracts import load_mpc_contract 15 | 16 | 17 | @pytest.mark.parametrize("num_requests", [(10)]) 18 | def test_without_respond_yaml(num_requests): 19 | cluster, mpc_nodes = shared.start_cluster_with_mpc(2, 2, 0, 20 | load_mpc_contract()) 21 | cluster.init_cluster(participants=mpc_nodes, threshold=2) 22 | 23 | for node in cluster.mpc_nodes: 24 | home_dir_fnames = os.listdir(node.home_dir) 25 | assert 'config.yaml' in home_dir_fnames 26 | assert 'respond.yaml' not in home_dir_fnames 27 | 28 | cluster.send_and_await_signature_requests(num_requests) 29 | -------------------------------------------------------------------------------- /rust-toolchain.toml: -------------------------------------------------------------------------------- 1 | [toolchain] 2 | # This needs to be at least as high as the nearcore's toolchain version. 3 | channel = "1.86.0" 4 | components = ["rustfmt", "clippy", "rust-analyzer", "rust-src"] 5 | -------------------------------------------------------------------------------- /scripts/.env_example: -------------------------------------------------------------------------------- 1 | GCP_PROJECT_ID=$MY_GCP_PROJECT 2 | GCP_KEYSHARE_SECRET_ID=$multichain-sk-share-testnet-0 3 | GCP_LOCAL_ENCRYPTION_KEY_SECRET_ID=$multichain-local-encryption-key-0 4 | GCP_P2P_PRIVATE_KEY_SECRET_ID=$multichain-sign-sk-testnet-0 5 | GCP_ACCOUNT_SK_SECRET_ID=$multichain-account-sk-testnet-0 6 | MPC_ACCOUNT_ID=$MY_MPC_NEAR_ACCOUNT_ID 7 | MPC_LOCAL_ADDRESS=$MY_NODE_URL_OR_PUBLIC_IP 8 | NEAR_BOOT_NODES=ed25519:DwTtb2rs9hx7o67b2dPi4ZEXyrA51FnkPffXfj6TzMVR@5.9.115.9:33567,ed25519:GwttFJu9DA35CktWicx3BZz4NN4egY1BrKr7KmhkqNxA@23.88.7.159:24567,ed25519:84VeANmEVv7dZxAGTJ8dLbTaPTt2iu9e3GqmVoMN6gzR@34.105.102.48:24567,ed25519:GG6ocHjh5R2NGuuoMHrq6baeHnkzXMZGBZDeU2FFin46@136.243.144.110:24567,ed25519:3mRKbmCcBHrWwrowAvLfsbGjzmww7k2XRAw91UePTDRw@45.77.219.166:24567,ed25519:76RF2dPwSZPqFhXbE6Q8t6CMziYg7KvKaTDgViEExCgF@34.34.32.139:24567,ed25519:9AdZkwBaDqHX15P6exjyFrd9Rsrshxpu1bWDPhvAm7gK@3.34.219.176:24567,ed25519:9yftjQdZiaVJUzDFEiUnsKuK4KE4zs9svk1r1wDuiJKb@148.251.195.69:24567,ed25519:BLSfBESa7PZ7iGr5h2bfBtyw6fCz5vzLVMEYdR17c5CL@54.82.200.23:24567,ed25519:E319a9GQ3VmnQsNjtzDQ2XggkddnYiThmf1RUFQVQoZD@135.181.59.45:24568,ed25519:CSbAaNkPeZ26QENwrLGCNUgcZmcAgLRF7sprf5imzHoM@34.147.72.149:24567,ed25519:4TXpEEuzcri3w2qPFmuGmjuHGnGjVNiBANpiuKD6sfh1@34.53.3.180:24567 9 | MPC_CONTRACT_ID=v1.signer-prod.testnet 10 | MPC_ENV=testnet 11 | MPC_HOME_DIR=/data 12 | RUST_BACKTRACE=full 13 | RUST_LOG=mpc=debug,info 14 | -------------------------------------------------------------------------------- /scripts/update-mpc-node.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | CONTAINER_NAME="mpc-node" 4 | IMAGE_NAME="nearone/mpc-node-gcp:testnet-release" 5 | ENV_FILE=".env" 6 | VOLUME_PATH="/home/mpc/data:/data" 7 | 8 | # Get currently running container image ID (if exists) 9 | RUNNING_IMAGE_ID=$(docker inspect --format "{{.Image}}" "$CONTAINER_NAME" 2>/dev/null || echo "") 10 | 11 | # Pull latest image 12 | echo "📥 Pulling latest image: $IMAGE_NAME..." 13 | docker pull "$IMAGE_NAME" 14 | 15 | # Get the latest image ID 16 | LATEST_IMAGE_ID=$(docker inspect --format "{{.Id}}" "$IMAGE_NAME") 17 | 18 | # Compare the running container’s image with the latest pulled image 19 | if [ "$RUNNING_IMAGE_ID" == "$LATEST_IMAGE_ID" ]; then 20 | echo "✅ No update needed. The running container is already using the latest image." 21 | exit 0 22 | fi 23 | 24 | echo "🔄 New image detected. Proceeding with update..." 25 | 26 | # Stop and remove existing container if such exist 27 | echo "🛑 Stopping existing container..." 28 | docker stop "$CONTAINER_NAME" 29 | 30 | echo "🗑 Removing existing container..." 31 | docker rm "$CONTAINER_NAME" 32 | 33 | 34 | # Run the new container 35 | echo "🚀 Starting new container..." 36 | docker run -d --name "$CONTAINER_NAME" -p 8080:8080 -p 80:80 -p 3000:3030 --restart always -v "$VOLUME_PATH" --env-file "$ENV_FILE" "$IMAGE_NAME" 37 | 38 | # Check if the container is running 39 | if docker ps -q -f name="$CONTAINER_NAME"; then 40 | echo "✅ $CONTAINER_NAME is running successfully!" 41 | docker logs --tail 20 "$CONTAINER_NAME" 42 | else 43 | echo "❌ Failed to start $CONTAINER_NAME. Check logs for details." 44 | exit 1 45 | fi 46 | -------------------------------------------------------------------------------- /third-party-licenses/README.md: -------------------------------------------------------------------------------- 1 | # Third party license notices 2 | This folder holds third party license notices generated with `cargo about`. 3 | 4 | ### Why? 5 | Many open source licenses require us to include their license text and copyright notices when we distribute software that uses their code. 6 | 7 | ### How to re-generate: 8 | 1. Install [cargo about](https://github.com/EmbarkStudios/cargo-about). 9 | 2. From this folder, run `cargo about generate -m ../Cargo.toml about.hbs > licenses.html`. 10 | 11 | ### Web server integration 12 | The `licenses.html` file is automatically served by the MPC node web server at the `/licenses` endpoint. This ensures that third-party license information is accessible to users for compliance purposes when running the node. 13 | -------------------------------------------------------------------------------- /third-party-licenses/about.hbs: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 36 | 37 | 38 | 39 |
40 |
41 |

Third Party Licenses

42 |

This page lists the licenses of the projects used in the NEAR MPC node.

43 |
44 | 45 |

Overview of licenses:

46 |
    47 | {{#each overview}} 48 |
  • {{name}} ({{count}})
  • 49 | {{/each}} 50 |
51 | 52 |

All license text:

53 |
    54 | {{#each licenses}} 55 |
  • 56 |

    {{name}}

    57 |

    Used by:

    58 | 63 |
    {{text}}
    64 |
  • 65 | {{/each}} 66 |
67 |
68 | 69 | 70 | 71 | -------------------------------------------------------------------------------- /third-party-licenses/about.toml: -------------------------------------------------------------------------------- 1 | accepted = [ 2 | "Apache-2.0", 3 | "Apache-2.0 WITH LLVM-exception", 4 | "BSD-2-Clause", 5 | "BSD-3-Clause", 6 | "CC0-1.0", 7 | "ISC", 8 | "MIT", 9 | "MPL-2.0", 10 | "Unicode-3.0", 11 | "Zlib", 12 | "OpenSSL", 13 | ] 14 | 15 | ignore-build-dependencies = true 16 | ignore-dev-dependencies = true 17 | 18 | workarounds = [ 19 | "ring", 20 | "rustls", 21 | ] 22 | --------------------------------------------------------------------------------