├── bin └── .gitkeep ├── .github ├── CODEOWNERS └── workflows │ ├── format.yaml │ ├── test.yaml │ ├── lint.yaml │ ├── build-latest-docker-image.yaml │ ├── release.yaml │ └── build-docker-image-and-binaries.yaml ├── .gitignore ├── diagrams ├── protocol.pdf └── keygen.svg ├── src ├── encrypted_sled │ ├── testdata │ │ ├── chacha20poly1305_kdf_known_vector.golden │ │ └── encrypt_with_nonce_known_vector.golden │ ├── constants.rs │ ├── mod.rs │ ├── result.rs │ ├── record.rs │ ├── password.rs │ ├── tests.rs │ └── kv.rs ├── multisig │ ├── mod.rs │ ├── keygen.rs │ ├── key_presence.rs │ ├── keypair.rs │ ├── sign.rs │ ├── service.rs │ └── tests.rs ├── mnemonic │ ├── testdata │ │ └── seed_known_vector.golden │ ├── types.rs │ ├── mod.rs │ ├── file_io.rs │ ├── results.rs │ ├── bip39_bindings.rs │ └── cmd_handler.rs ├── tests │ ├── socket_address.rs │ ├── mod.rs │ ├── mnemonic.rs │ └── tofnd_party.rs ├── kv_manager │ ├── mod.rs │ ├── value.rs │ ├── types.rs │ ├── error.rs │ ├── sled_bindings.rs │ ├── tests.rs │ └── kv.rs ├── main.rs └── config │ └── mod.rs ├── .gitmodules ├── scripts └── copy-binaries-from-image.sh ├── docker-compose.yml ├── docker-compose.test.yml ├── Makefile ├── LICENSE-MIT ├── Dockerfile ├── Cargo.toml ├── entrypoint.sh ├── README.md └── LICENSE-APACHE /bin/.gitkeep: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /.github/CODEOWNERS: -------------------------------------------------------------------------------- 1 | * @axelarnetwork/protocol 2 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | /.vscode 3 | bin/tofnd* 4 | /.tofnd 5 | -------------------------------------------------------------------------------- /diagrams/protocol.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/axelarnetwork/tofnd/HEAD/diagrams/protocol.pdf -------------------------------------------------------------------------------- /src/encrypted_sled/testdata/chacha20poly1305_kdf_known_vector.golden: -------------------------------------------------------------------------------- 1 | "63799ab67f9e015ca29694e3df2d1ae66f5ac0aa886974378b9c424a9c7df885" -------------------------------------------------------------------------------- /src/multisig/mod.rs: -------------------------------------------------------------------------------- 1 | mod key_presence; 2 | mod keygen; 3 | mod keypair; 4 | pub mod service; 5 | mod sign; 6 | 7 | #[cfg(test)] 8 | mod tests; 9 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "proto"] 2 | path = proto 3 | url = https://github.com/axelarnetwork/grpc-protobuf.git 4 | branch = main 5 | fetchRecurseSubmodules = true 6 | -------------------------------------------------------------------------------- /src/mnemonic/testdata/seed_known_vector.golden: -------------------------------------------------------------------------------- 1 | "0bde96f14c35a66235478e0c16c152fcaf6301e4d9a81d3febc50879fe7e5438e6a8dd3e39bdf3ab7b12d6b44218710e17d7a2844ee9633fab0e03d9a6c8569b" -------------------------------------------------------------------------------- /scripts/copy-binaries-from-image.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | container_id=$(docker create axelar/tofnd:latest) 4 | docker cp "$container_id":/usr/local/bin/tofnd ./bin/ 5 | docker rm -v "$container_id" 6 | -------------------------------------------------------------------------------- /src/encrypted_sled/constants.rs: -------------------------------------------------------------------------------- 1 | //! Constants for [encrypted_sled] 2 | pub(super) const PASSWORD_VERIFICATION_KEY: &str = "verification_key"; 3 | pub(super) const PASSWORD_VERIFICATION_VALUE: &str = "verification_value"; 4 | pub(super) const PASSWORD_SALT_KEY: &[u8] = b"password_salt_key"; 5 | pub(super) const UNSAFE_PASSWORD: &str = "tofnd_unsafe_password"; 6 | -------------------------------------------------------------------------------- /src/mnemonic/types.rs: -------------------------------------------------------------------------------- 1 | //! Mnemonic types 2 | 3 | use serde::{Deserialize, Serialize}; 4 | use zeroize::Zeroize; 5 | 6 | /// Mnemonic type needs to be known globaly to create/access the mnemonic kv store 7 | #[derive(Zeroize, Debug, Clone, Serialize, Deserialize)] 8 | #[zeroize(drop)] 9 | pub struct Entropy(pub Vec); 10 | 11 | #[derive(Zeroize, Clone)] 12 | #[zeroize(drop)] 13 | pub struct Password(pub String); 14 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | # usage: 2 | # $ docker-compose up 3 | # or 4 | # $ docker-compose run -e MNEMONIC_CMD= tofnd 5 | 6 | volumes: 7 | tofnd: 8 | 9 | services: 10 | tofnd: 11 | build: . 12 | container_name: tofnd 13 | hostname: tofnd 14 | image: axelar/tofnd 15 | volumes: 16 | - tofnd:/.tofnd 17 | environment: 18 | # available cmds: auto, create, existing (default), import, export 19 | - MNEMONIC_CMD=auto 20 | -------------------------------------------------------------------------------- /src/tests/socket_address.rs: -------------------------------------------------------------------------------- 1 | //! socket address convertion tests 2 | 3 | use crate::addr; 4 | 5 | #[test] 6 | fn test_ips() { 7 | let valid_ips = ["0.0.0.0", "127.0.0.1"]; 8 | let invalid_ips = ["256.0.0.0"]; 9 | let ports = [0, 65535]; // no need to check for invalid ports because 0 <= u16 <= 65535 10 | 11 | valid_ips.map(|a| ports.map(|p| assert!(addr(a, p).is_ok()))); 12 | invalid_ips.map(|a| ports.map(|p| assert!(addr(a, p).is_err()))); 13 | } 14 | -------------------------------------------------------------------------------- /docker-compose.test.yml: -------------------------------------------------------------------------------- 1 | # usage: 2 | # $ docker-compose up 3 | # or 4 | # $ docker-compose -f docker-compose.test.yml run -e MNEMONIC_CMD= tofnd 5 | 6 | volumes: 7 | tofnd: 8 | 9 | services: 10 | tofnd: 11 | build: . 12 | container_name: tofnd-test 13 | hostname: tofnd 14 | image: axelar/tofnd 15 | volumes: 16 | - tofnd:/.tofnd 17 | environment: 18 | - NOPASSWORD=true 19 | - MNEMONIC_CMD=auto 20 | - TOFND_HOME=.tofnd 21 | -------------------------------------------------------------------------------- /src/tests/mod.rs: -------------------------------------------------------------------------------- 1 | mod mnemonic; 2 | mod socket_address; 3 | mod tofnd_party; 4 | 5 | lazy_static::lazy_static! { 6 | static ref MSG_TO_SIGN: Vec = vec![42; 32]; 7 | } 8 | 9 | const SLEEP_TIME: u64 = 1; 10 | const MAX_TRIES: u32 = 3; 11 | pub const DEFAULT_TEST_IP: &str = "0.0.0.0"; 12 | pub const DEFAULT_TEST_PORT: u16 = 0; // use port 0 and let the OS decide 13 | 14 | // Struct to pass in TofndParty constructor. 15 | struct InitParty { 16 | party_index: usize, 17 | } 18 | 19 | impl InitParty { 20 | fn new(party_index: usize) -> Self { 21 | Self { party_index } 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /.github/workflows/format.yaml: -------------------------------------------------------------------------------- 1 | name: Formatting 2 | 3 | on: 4 | - pull_request 5 | 6 | jobs: 7 | format: 8 | runs-on: ubuntu-latest 9 | steps: 10 | - name: Checkout code 11 | uses: actions/checkout@v4 12 | with: 13 | submodules: recursive 14 | 15 | - name: Install stable toolchain 16 | uses: actions-rs/toolchain@v1 17 | with: 18 | profile: minimal 19 | toolchain: 1.78.0 20 | override: true 21 | components: rustfmt 22 | 23 | - name: Run cargo fmt 24 | uses: actions-rs/cargo@v1 25 | with: 26 | command: fmt 27 | args: --all -- --check 28 | -------------------------------------------------------------------------------- /src/encrypted_sled/mod.rs: -------------------------------------------------------------------------------- 1 | //! Wrap a layer of encryption around [sled]. We use [chacha20poly1305] to encrypt/decrypt values. 2 | //! Specifically, use [chacha20poly1305::XChaCha20] because the nonces are generated randomly. 3 | //! To create an new [Db], an [Entropy] needs to be provided. 4 | 5 | mod constants; 6 | mod kv; 7 | mod password; 8 | mod record; 9 | mod result; 10 | 11 | // match the API of sled 12 | pub use kv::EncryptedDb as Db; 13 | pub use password::{Password, PasswordMethod}; 14 | pub use result::EncryptedDbError as Error; 15 | pub use result::EncryptedDbResult as Result; 16 | 17 | #[cfg(test)] 18 | mod tests; 19 | 20 | #[cfg(test)] 21 | pub use tests::get_test_password; 22 | -------------------------------------------------------------------------------- /src/multisig/keygen.rs: -------------------------------------------------------------------------------- 1 | use super::{keypair::KeyPair, service::MultisigService}; 2 | use crate::{ 3 | proto::{Algorithm, KeygenRequest}, 4 | TofndResult, 5 | }; 6 | use anyhow::anyhow; 7 | 8 | impl MultisigService { 9 | pub(super) async fn handle_keygen(&self, request: &KeygenRequest) -> TofndResult> { 10 | let algorithm = Algorithm::try_from(request.algorithm) 11 | .map_err(|_| anyhow!("Invalid algorithm: {}", request.algorithm))?; 12 | let secret_recovery_key = self.kv_manager.seed().await?; 13 | 14 | Ok( 15 | KeyPair::new(&secret_recovery_key, request.key_uid.as_bytes(), algorithm)? 16 | .encoded_verifying_key(), 17 | ) 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /src/encrypted_sled/testdata/encrypt_with_nonce_known_vector.golden: -------------------------------------------------------------------------------- 1 | { 2 | "encrypted_value": [ 3 | 98, 4 | 227, 5 | 247, 6 | 125, 7 | 18, 8 | 128, 9 | 210, 10 | 249, 11 | 235, 12 | 248, 13 | 0, 14 | 112, 15 | 6, 16 | 3, 17 | 156, 18 | 239, 19 | 40, 20 | 46, 21 | 208, 22 | 108, 23 | 246, 24 | 75, 25 | 59, 26 | 68, 27 | 48, 28 | 2 29 | ], 30 | "nonce": [ 31 | 1, 32 | 1, 33 | 1, 34 | 1, 35 | 1, 36 | 1, 37 | 1, 38 | 1, 39 | 1, 40 | 1, 41 | 1, 42 | 1, 43 | 1, 44 | 1, 45 | 1, 46 | 1, 47 | 1, 48 | 1, 49 | 1, 50 | 1, 51 | 1, 52 | 1, 53 | 1, 54 | 1 55 | ] 56 | } -------------------------------------------------------------------------------- /.github/workflows/test.yaml: -------------------------------------------------------------------------------- 1 | name: Testing 2 | 3 | on: 4 | - pull_request 5 | 6 | jobs: 7 | test: 8 | runs-on: ubuntu-latest 9 | steps: 10 | - name: Checkout code and submodule 11 | uses: actions/checkout@v4 12 | with: 13 | submodules: 'recursive' 14 | 15 | - name: Install protoc 16 | uses: arduino/setup-protoc@v3 17 | with: 18 | repo-token: ${{ secrets.GITHUB_TOKEN }} 19 | 20 | - name: Install stable toolchain 21 | uses: actions-rs/toolchain@v1 22 | with: 23 | profile: minimal 24 | toolchain: 1.78.0 25 | override: true 26 | components: rustfmt 27 | 28 | - name: Run cargo test 29 | run: cargo test --release --all-features 30 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: docker-image 2 | docker-image: git-submodule-setup 3 | @DOCKER_BUILDKIT=1 docker build --ssh default -t axelar/tofnd . 4 | 5 | .PHONY: copy-binary 6 | copy-binary-from-image: guard-SEMVER 7 | ./scripts/copy-binaries-from-image.sh 8 | mv bin/tofnd bin/tofnd-linux-amd64-${SEMVER} 9 | cd bin && sha256sum * > SHA256SUMS 10 | 11 | .PHONY: upload-binaries-to-s3 12 | upload-binaries-to-s3: guard-S3_PATH 13 | aws s3 cp ./bin ${S3_PATH}/ --recursive 14 | 15 | .PHONY: docker-image-all 16 | docker-image-all: git-submodule-setup 17 | make docker-image 18 | 19 | .PHONY: git-submodule-setup 20 | git-submodule-setup: 21 | git submodule init 22 | git submodule update 23 | 24 | guard-%: 25 | @ if [ -z '${${*}}' ]; then echo 'Environment variable $* not set' && exit 1; fi 26 | -------------------------------------------------------------------------------- /.github/workflows/lint.yaml: -------------------------------------------------------------------------------- 1 | name: Linting 2 | 3 | on: 4 | - pull_request 5 | 6 | jobs: 7 | lint: 8 | runs-on: ubuntu-latest 9 | steps: 10 | - name: Checkout code and submodule 11 | uses: actions/checkout@v4 12 | with: 13 | submodules: 'recursive' 14 | 15 | - name: Install protoc 16 | uses: arduino/setup-protoc@v3 17 | with: 18 | repo-token: ${{ secrets.GITHUB_TOKEN }} 19 | 20 | - name: Install stable toolchain 21 | uses: actions-rs/toolchain@v1 22 | with: 23 | profile: minimal 24 | toolchain: 1.78.0 25 | override: true 26 | components: rustfmt, clippy 27 | 28 | - name: Run cargo clippy with all features 29 | uses: actions-rs/cargo@v1 30 | with: 31 | command: clippy 32 | args: --all-features --all-targets -- -D warnings 33 | -------------------------------------------------------------------------------- /src/kv_manager/mod.rs: -------------------------------------------------------------------------------- 1 | //! Key-Value Store service. We use [sled] for the underlying db implementation. 2 | //! For every kvstore initialized, a daemon is spawned that serves basic 3 | //! database functionality using the "actor" pattern ([kv::Kv] is the "handle"): https://ryhl.io/blog/actors-with-tokio/ 4 | //! See https://tokio.rs/tokio/tutorial/channels for tokio channels 5 | //! See [kv] module for the public API. 6 | 7 | /// Custom error types for [kv] and [sled_bindings] 8 | pub mod error; 9 | /// public API of kv manager 10 | mod kv; 11 | /// sled bindings for basic kv operations 12 | mod sled_bindings; 13 | /// definition of kv_manager types and default paths 14 | mod types; 15 | /// wrapers for values stored by tofnd services 16 | mod value; 17 | 18 | pub use types::KeyReservation; 19 | pub use value::KvManager; 20 | 21 | // tests for low-level operations 22 | #[cfg(test)] 23 | mod tests; 24 | -------------------------------------------------------------------------------- /src/mnemonic/mod.rs: -------------------------------------------------------------------------------- 1 | //! This module handles mnemonic-related commands. A kv-store is used to insert and retrieve an [crate::gg20::Entropy]. 2 | //! 3 | //! Currently, the API supports the following [Cmd] commands: 4 | //! [Cmd::Existing]: Starts the gRPC daemon existing mnemonic; Fails if mnemonic does not exist. 5 | //! [Cmd::Create]: Creates a new mnemonic, inserts it in the kv-store, exports it to a file and exits; Fails if a mnemonic exists. 6 | //! [Cmd::Import]: Prompts user to give a new mnemonic, inserts it in the kv-store and exits; Fails if a mnemonic exists or if the provided string is not a valid bip39 mnemonic. 7 | //! [Cmd::Export]: Writes the existing mnemonic to a file and exits; Succeeds when there is an existing mnemonic, fails otherwise. 8 | 9 | mod bip39_bindings; 10 | mod cmd_handler; 11 | mod file_io; 12 | mod results; 13 | mod types; 14 | 15 | pub use cmd_handler::Cmd; 16 | pub use file_io::FileIo; 17 | pub use types::Entropy; 18 | -------------------------------------------------------------------------------- /src/tests/mnemonic.rs: -------------------------------------------------------------------------------- 1 | //! mnemonic tests at the TofndParty level 2 | 3 | use crate::mnemonic::Cmd; 4 | use testdir::testdir; 5 | 6 | use super::{tofnd_party::TofndParty, InitParty}; 7 | 8 | fn dummy_init_party() -> InitParty { 9 | InitParty::new(0) 10 | } 11 | 12 | #[should_panic] 13 | #[tokio::test] 14 | async fn mnemonic_existing() { 15 | let dir = testdir!(); 16 | // dummy init data 17 | let init_party = dummy_init_party(); 18 | // Existing should panic 19 | let _ = TofndParty::new(init_party, Cmd::Existing, &dir).await; 20 | } 21 | 22 | #[tokio::test] 23 | async fn mnemonic_create() { 24 | let dir = testdir!(); 25 | // dummy init data 26 | let init_party = dummy_init_party(); 27 | // Create should succeed 28 | let _ = TofndParty::new(init_party, Cmd::Create, &dir).await; 29 | } 30 | 31 | #[should_panic] 32 | #[tokio::test] 33 | async fn mnemonic_export_panic() { 34 | let dir = testdir!(); 35 | // dummy init data 36 | let init_party = dummy_init_party(); 37 | // Export should fail 38 | let _ = TofndParty::new(init_party, Cmd::Export, &dir).await; 39 | } 40 | -------------------------------------------------------------------------------- /.github/workflows/build-latest-docker-image.yaml: -------------------------------------------------------------------------------- 1 | name: Build and push latest docker image 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | workflow_dispatch: 8 | 9 | jobs: 10 | build-and-push-latest-docker-image: 11 | runs-on: ubuntu-latest 12 | steps: 13 | - name: Checkout code and submodule 14 | uses: actions/checkout@v4 15 | with: 16 | submodules: recursive 17 | 18 | - uses: nelonoel/branch-name@v1.0.1 19 | 20 | - name: fetch tags 21 | run: | 22 | git fetch --unshallow 23 | 24 | - name: Install SSH key 25 | uses: webfactory/ssh-agent@v0.4.1 26 | with: 27 | ssh-private-key: ${{ secrets.CICD_RSA_KEY }} 28 | 29 | - name: Build docker image 30 | run: | 31 | make docker-image 32 | 33 | - name: Push to ECR 34 | uses: jwalton/gh-ecr-push@v1 35 | with: 36 | access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} 37 | secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} 38 | region: us-east-2 39 | local-image: axelar/tofnd 40 | image: tofnd:${{ github.sha }} 41 | -------------------------------------------------------------------------------- /LICENSE-MIT: -------------------------------------------------------------------------------- 1 | Copyright (c) 2021 Axelar Foundation 2 | 3 | Permission is hereby granted, free of charge, to any 4 | person obtaining a copy of this software and associated 5 | documentation files (the "Software"), to deal in the 6 | Software without restriction, including without 7 | limitation the rights to use, copy, modify, merge, 8 | publish, distribute, sublicense, and/or sell copies of 9 | the Software, and to permit persons to whom the Software 10 | is furnished to do so, subject to the following 11 | conditions: 12 | 13 | The above copyright notice and this permission notice 14 | shall be included in all copies or substantial portions 15 | of the Software. 16 | 17 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF 18 | ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED 19 | TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A 20 | PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT 21 | SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY 22 | CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 23 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR 24 | IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 25 | DEALINGS IN THE SOFTWARE. 26 | -------------------------------------------------------------------------------- /src/encrypted_sled/result.rs: -------------------------------------------------------------------------------- 1 | //! Custom error handling 2 | 3 | #[derive(thiserror::Error, Debug)] 4 | pub enum EncryptedDbError { 5 | #[error("Your kv store may be corrupted. Sled error: {0}")] 6 | CorruptedKv(sled::Error), 7 | #[error("Password read error: {0}")] 8 | PasswordRead(#[from] std::io::Error), // rpassword::read_password() Error 9 | #[error("Password scrypt params error: {0}")] 10 | PasswordScryptParams(#[from] scrypt::errors::InvalidParams), 11 | #[error("Password scrypt error: {0}")] 12 | PasswordScryptError(#[from] scrypt::errors::InvalidOutputLen), 13 | #[error("Sled error: {0}")] 14 | SledError(#[from] sled::Error), 15 | #[error("Serialization error: failed to serialize the encrypted record")] 16 | Serialization, 17 | #[error("Deserialization error: failed to deserialize encrypted record bytes")] 18 | Deserialization, 19 | #[error("ChaCha20 encryption error: {0}")] 20 | Encryption(String), 21 | #[error("ChaCha20 decryption error: {0}")] 22 | Decryption(String), 23 | #[error("Wrong password")] 24 | WrongPassword, 25 | #[error("Missing password salt")] 26 | MissingPasswordSalt, 27 | #[error("Malformed password salt: {0}")] 28 | MalformedPasswordSalt(#[from] std::array::TryFromSliceError), 29 | } 30 | pub type EncryptedDbResult = Result; 31 | -------------------------------------------------------------------------------- /src/encrypted_sled/record.rs: -------------------------------------------------------------------------------- 1 | //! The value of [super::Db]. 2 | 3 | use chacha20poly1305::XNonce; 4 | use serde::{Deserialize, Serialize}; 5 | use sled::IVec; 6 | 7 | use tofn::sdk::api::{deserialize, serialize}; 8 | 9 | use super::result::{ 10 | EncryptedDbError::{Deserialization, Serialization}, 11 | EncryptedDbResult, 12 | }; 13 | 14 | /// The value of [super::Db]. 15 | #[derive(Serialize, Deserialize, Debug)] 16 | pub(super) struct EncryptedRecord { 17 | encrypted_value: Vec, 18 | nonce: [u8; 24], 19 | } 20 | 21 | impl EncryptedRecord { 22 | pub(super) fn new(encrypted_value: Vec, nonce: XNonce) -> Self { 23 | EncryptedRecord { 24 | encrypted_value, 25 | nonce: nonce.into(), 26 | } 27 | } 28 | 29 | /// Convert a [EncryptedRecord] to bytes using serde. 30 | pub(super) fn to_bytes(&self) -> EncryptedDbResult> { 31 | serialize(&self).map_err(|_| Serialization) 32 | } 33 | 34 | /// Convert bytes to a [EncryptedRecord] using serde. 35 | pub(super) fn from_bytes(bytes: &IVec) -> EncryptedDbResult { 36 | deserialize(bytes).ok_or(Deserialization) 37 | } 38 | } 39 | 40 | impl From for (Vec, XNonce) { 41 | fn from(record: EncryptedRecord) -> Self { 42 | (record.encrypted_value, record.nonce.into()) 43 | } 44 | } 45 | -------------------------------------------------------------------------------- /src/multisig/key_presence.rs: -------------------------------------------------------------------------------- 1 | //! This module handles the key_presence gRPC. 2 | //! Request includes [proto::message_in::Data::KeyPresenceRequest] struct and encrypted recovery info. 3 | 4 | use super::service::MultisigService; 5 | 6 | // logging 7 | use tracing::debug; 8 | 9 | // error handling 10 | use crate::{ 11 | proto::{self, Algorithm}, 12 | TofndResult, 13 | }; 14 | use anyhow::anyhow; 15 | 16 | impl MultisigService { 17 | pub(super) async fn handle_key_presence( 18 | &self, 19 | request: proto::KeyPresenceRequest, 20 | ) -> TofndResult { 21 | let algorithm = Algorithm::try_from(request.algorithm) 22 | .map_err(|_| anyhow!("Invalid algorithm: {}", request.algorithm))?; 23 | 24 | // check if mnemonic is available 25 | let _ = self 26 | .find_matching_seed(&request.key_uid, &request.pub_key, algorithm) 27 | .await?; 28 | 29 | // key presence for multisig always returns `Present`. 30 | // this is done in order to not break compatibility with axelar-core 31 | // TODO: better handling for multisig key presence. 32 | debug!( 33 | "[{}] key presence check for multisig always return Present", 34 | request.key_uid 35 | ); 36 | Ok(proto::key_presence_response::Response::Present) 37 | } 38 | } 39 | -------------------------------------------------------------------------------- /src/kv_manager/value.rs: -------------------------------------------------------------------------------- 1 | use std::{convert::TryFrom, path::PathBuf}; 2 | use tofn::sdk::api::{deserialize, serialize}; 3 | 4 | use crate::{ 5 | encrypted_sled::Password, 6 | mnemonic::{Entropy, FileIo}, 7 | }; 8 | 9 | use super::{ 10 | error::{InnerKvError, KvResult}, 11 | kv::Kv, 12 | }; 13 | 14 | /// Kv manager for grpc services 15 | #[derive(Clone)] 16 | pub struct KvManager { 17 | kv: Kv, 18 | io: FileIo, 19 | } 20 | 21 | impl KvManager { 22 | pub fn new(root: PathBuf, password: Password) -> KvResult { 23 | Ok(KvManager { 24 | kv: Kv::::new(root.clone(), password)?, 25 | io: FileIo::new(root), 26 | }) 27 | } 28 | pub fn kv(&self) -> &Kv { 29 | &self.kv 30 | } 31 | pub fn io(&self) -> &FileIo { 32 | &self.io 33 | } 34 | } 35 | 36 | /// Value type stored in the kv-store 37 | type KvValue = Vec; 38 | 39 | /// Create Entropy from KvValue 40 | impl TryFrom for Entropy { 41 | type Error = InnerKvError; 42 | fn try_from(v: KvValue) -> Result { 43 | deserialize(&v).ok_or(InnerKvError::DeserializationErr) 44 | } 45 | } 46 | 47 | /// Create KvValue from Entropy 48 | impl TryFrom for KvValue { 49 | type Error = InnerKvError; 50 | fn try_from(v: Entropy) -> Result { 51 | serialize(&v).map_err(|_| InnerKvError::SerializationErr) 52 | } 53 | } 54 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # syntax=docker/dockerfile:experimental 2 | 3 | FROM rust:1.78.0-bullseye as builder 4 | 5 | RUN set -ex \ 6 | && apt-get update \ 7 | && apt-get install -qq --no-install-recommends ca-certificates openssh-client git make protobuf-compiler 8 | 9 | WORKDIR /tofnd 10 | 11 | COPY ./Cargo.toml . 12 | COPY ./Cargo.lock . 13 | 14 | # build dependencies separately 15 | RUN mkdir src && echo 'fn main() {}' > src/main.rs 16 | RUN --mount=type=ssh cargo build --release 17 | 18 | COPY src ./src 19 | COPY proto ./proto 20 | COPY build.rs ./build.rs 21 | 22 | RUN rustup component add rustfmt 23 | 24 | # read features argument. Use "default" because [ -z "$features" ] doesn't work 25 | ARG features="default" 26 | RUN echo "installing with features: ["$features"]" 27 | 28 | # install tofnd 29 | # use --locked for CI builds: https://doc.rust-lang.org/cargo/commands/cargo-install.html#manifest-options 30 | RUN --mount=type=ssh if [ "$features" = "default" ]; then \ 31 | cargo install --locked --path .; \ 32 | else \ 33 | cargo install --locked --features ${features} --path .; \ 34 | fi 35 | 36 | FROM debian:bullseye-slim as runner 37 | RUN addgroup --system --gid 1001 axelard && adduser --system --uid 1000 --ingroup axelard axelard 38 | RUN mkdir /.tofnd && chown axelard /.tofnd 39 | USER axelard 40 | COPY --from=builder /tofnd/target/release/tofnd /usr/local/bin 41 | 42 | COPY ./entrypoint.sh / 43 | 44 | VOLUME [ "/.tofnd" ] 45 | 46 | ENV MNEMONIC_CMD "" 47 | ENV NOPASSWORD "" 48 | ENV TOFND_HOME "" 49 | ENTRYPOINT ["/entrypoint.sh"] 50 | -------------------------------------------------------------------------------- /src/kv_manager/types.rs: -------------------------------------------------------------------------------- 1 | //! useful types and default paths for the kv_manager 2 | 3 | use std::fmt::Debug; 4 | 5 | // default KV store names 6 | pub const DEFAULT_KV_NAME: &str = "kv"; 7 | 8 | /// default path of kv store 9 | /// the full name of the kv store is "DEFAULT_KV_PATH/kv_name" 10 | pub(super) const DEFAULT_KV_PATH: &str = "kvstore"; 11 | 12 | /// default value for reserved key 13 | pub(super) const DEFAULT_RESERVE: &str = ""; 14 | 15 | /// Returned from a successful `ReserveKey` command 16 | #[derive(Debug)] // disallow derive Clone, Copy 17 | pub struct KeyReservation { 18 | pub(super) key: String, 19 | } 20 | /// kv store needs PartialEq to complare values 21 | impl PartialEq for KeyReservation { 22 | fn eq(&self, other: &Self) -> bool { 23 | self.key == other.key 24 | } 25 | } 26 | 27 | // Provided by the requester and used by the manager task to send the command response back to the requester. 28 | type Responder = tokio::sync::oneshot::Sender>; 29 | 30 | #[derive(Debug)] 31 | pub(super) enum Command { 32 | ReserveKey { 33 | key: String, 34 | resp: Responder, 35 | }, 36 | UnreserveKey { 37 | reservation: KeyReservation, 38 | }, 39 | Put { 40 | reservation: KeyReservation, 41 | value: V, 42 | resp: Responder<()>, 43 | }, 44 | Get { 45 | key: String, // TODO should be &str except lifetimes... 46 | resp: Responder, 47 | }, 48 | Exists { 49 | key: String, // TODO should be &str except lifetimes... 50 | resp: Responder, 51 | }, 52 | Delete { 53 | key: String, 54 | resp: Responder<()>, 55 | }, 56 | } 57 | -------------------------------------------------------------------------------- /src/multisig/keypair.rs: -------------------------------------------------------------------------------- 1 | use crate::{proto::Algorithm, TofndResult}; 2 | use anyhow::anyhow; 3 | use tofn::{ 4 | ecdsa, ed25519, 5 | sdk::api::{MessageDigest, SecretRecoveryKey}, 6 | }; 7 | 8 | pub enum KeyPair { 9 | Ecdsa(ecdsa::KeyPair), 10 | Ed25519(ed25519::KeyPair), 11 | } 12 | 13 | impl KeyPair { 14 | /// Create a new `KeyPair` from the provided `SecretRecoveryKey` and `session_nonce` deterministically, for the given `algorithm`. 15 | pub fn new( 16 | secret_recovery_key: &SecretRecoveryKey, 17 | session_nonce: &[u8], 18 | algorithm: Algorithm, 19 | ) -> TofndResult { 20 | Ok(match algorithm { 21 | Algorithm::Ecdsa => { 22 | let key_pair = ecdsa::keygen(secret_recovery_key, session_nonce) 23 | .map_err(|_| anyhow!("Cannot generate keypair"))?; 24 | 25 | Self::Ecdsa(key_pair) 26 | } 27 | 28 | Algorithm::Ed25519 => { 29 | let key_pair = ed25519::keygen(secret_recovery_key, session_nonce) 30 | .map_err(|_| anyhow!("Cannot generate keypair"))?; 31 | 32 | Self::Ed25519(key_pair) 33 | } 34 | }) 35 | } 36 | 37 | pub fn encoded_verifying_key(&self) -> Vec { 38 | match self { 39 | Self::Ecdsa(key_pair) => key_pair.encoded_verifying_key().into(), 40 | Self::Ed25519(key_pair) => key_pair.encoded_verifying_key().into(), 41 | } 42 | } 43 | 44 | pub fn sign(&self, msg_to_sign: &MessageDigest) -> TofndResult> { 45 | match self { 46 | Self::Ecdsa(key_pair) => ecdsa::sign(key_pair.signing_key(), msg_to_sign), 47 | Self::Ed25519(key_pair) => ed25519::sign(key_pair, msg_to_sign), 48 | } 49 | .map_err(|_| anyhow!("signing failed")) 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /src/encrypted_sled/password.rs: -------------------------------------------------------------------------------- 1 | //! Handles the generation of an [Entropy] from user's password using [scrypt] pbkdf. 2 | use std::convert::{TryFrom, TryInto}; 3 | 4 | use super::{constants::UNSAFE_PASSWORD, result::EncryptedDbResult}; 5 | 6 | use sled::IVec; 7 | use zeroize::Zeroize; 8 | 9 | /// Safely store strings 10 | // TODO use https://docs.rs/secrecy ? 11 | #[derive(Zeroize, Clone)] 12 | #[zeroize(drop)] 13 | pub struct Password(String); 14 | 15 | impl AsRef<[u8]> for Password { 16 | fn as_ref(&self) -> &[u8] { 17 | self.0.as_bytes() 18 | } 19 | } 20 | 21 | pub struct PasswordSalt([u8; 32]); 22 | 23 | impl AsRef<[u8]> for PasswordSalt { 24 | fn as_ref(&self) -> &[u8] { 25 | &self.0 26 | } 27 | } 28 | 29 | impl From<[u8; 32]> for PasswordSalt { 30 | fn from(bytes: [u8; 32]) -> Self { 31 | Self(bytes) 32 | } 33 | } 34 | 35 | impl TryFrom for PasswordSalt { 36 | type Error = std::array::TryFromSliceError; 37 | 38 | fn try_from(value: IVec) -> Result { 39 | Ok(Self(value.as_ref().try_into()?)) 40 | } 41 | } 42 | 43 | use rpassword::read_password; 44 | 45 | /// Specifies how [password] will be retrieved 46 | #[derive(Clone, Debug)] 47 | pub enum PasswordMethod { 48 | NoPassword, 49 | Prompt, 50 | } 51 | impl PasswordMethod { 52 | /// Execute the password method to retrieve a password 53 | pub fn execute(&self) -> EncryptedDbResult { 54 | Ok(match self { 55 | Self::NoPassword => Password(UNSAFE_PASSWORD.to_string()), 56 | Self::Prompt => { 57 | println!("Please type your tofnd password:"); 58 | Password(read_password()?) 59 | } 60 | }) 61 | } 62 | } 63 | 64 | #[cfg(test)] 65 | impl From<&str> for Password { 66 | fn from(value: &str) -> Self { 67 | Self(value.to_string()) 68 | } 69 | } 70 | -------------------------------------------------------------------------------- /src/kv_manager/error.rs: -------------------------------------------------------------------------------- 1 | //! Custom error types for [kv_manager]. 2 | 3 | /// Note: While tofnd generally uses the [anyhow] crate for error handling, we 4 | /// use the [thiserror] crate here for two reasons: 5 | /// 1. [crate::gg20::mnemonic] errors can be potentially consumed by the caller 6 | /// of tofnd, so an analytical display of errors might be helpful in the future. 7 | /// One of the errors that are propagated to [crate::gg20::mnemonic] are 8 | /// [crate::kv_manager::error]s 9 | /// 2. This can be used as an example on how analytical error handling can be 10 | /// incorporated in other modules 11 | /// For more info, see discussion in https://github.com/axelarnetwork/tofnd/issues/28 12 | use crate::encrypted_sled; 13 | 14 | #[allow(clippy::enum_variant_names)] // allow Err postfix 15 | #[derive(thiserror::Error, Debug)] 16 | pub enum KvError { 17 | #[error("Kv initialization Error: {0}")] 18 | InitErr(#[from] encrypted_sled::Error), 19 | #[error("Recv Error: {0}")] // errors receiving from "actor pattern"'s channels 20 | RecvErr(#[from] tokio::sync::oneshot::error::RecvError), 21 | #[error("Send Error: {0}")] // errors sending to "actor pattern"'s channels 22 | SendErr(String), 23 | #[error("Reserve Error: {0}")] 24 | ReserveErr(InnerKvError), 25 | #[error("Put Error: {0}")] 26 | PutErr(InnerKvError), 27 | #[error("Get Error: {0}")] 28 | GetErr(InnerKvError), 29 | #[error("Delete Error: {0}")] 30 | DeleteErr(InnerKvError), 31 | #[error("Exits Error: {0}")] 32 | ExistsErr(InnerKvError), 33 | } 34 | pub type KvResult = Result; 35 | 36 | #[allow(clippy::enum_variant_names)] // allow Err postfix 37 | #[derive(thiserror::Error, Debug)] 38 | pub enum InnerKvError { 39 | #[error("Sled Error: {0}")] // Delegate Sled's errors 40 | SledErr(#[from] encrypted_sled::Error), 41 | #[error("Logical Error: {0}")] // Logical errors (eg double deletion) 42 | LogicalErr(String), 43 | #[error("Serialization Error: failed to serialize value")] 44 | SerializationErr, 45 | #[error("Deserialization Error: failed to deserialize kvstore bytes")] 46 | DeserializationErr, 47 | } 48 | pub(super) type InnerKvResult = Result; 49 | -------------------------------------------------------------------------------- /src/multisig/sign.rs: -------------------------------------------------------------------------------- 1 | use super::{keypair::KeyPair, service::MultisigService}; 2 | use crate::{ 3 | proto::{Algorithm, SignRequest}, 4 | TofndResult, 5 | }; 6 | use anyhow::anyhow; 7 | use std::convert::TryInto; 8 | use tofn::sdk::api::SecretRecoveryKey; 9 | 10 | impl MultisigService { 11 | pub(super) async fn handle_sign(&self, request: &SignRequest) -> TofndResult> { 12 | let algorithm = Algorithm::try_from(request.algorithm) 13 | .map_err(|_| anyhow!("Invalid algorithm: {}", request.algorithm))?; 14 | 15 | // re-generate secret key from seed, then sign 16 | let secret_recovery_key = self 17 | .find_matching_seed(&request.key_uid, &request.pub_key, algorithm) 18 | .await?; 19 | 20 | let key_pair = KeyPair::new(&secret_recovery_key, request.key_uid.as_bytes(), algorithm) 21 | .map_err(|_| anyhow!("key re-generation failed"))?; 22 | 23 | let signature = key_pair 24 | .sign(&request.msg_to_sign.as_slice().try_into()?) 25 | .map_err(|_| anyhow!("sign failed"))?; 26 | 27 | Ok(signature) 28 | } 29 | 30 | /// Given a `key_uid` and `pub_key`, find the matching mnemonic. 31 | /// If `pub_key` is [None], use the currently active mnemonic. 32 | pub(super) async fn find_matching_seed( 33 | &self, 34 | key_uid: &str, 35 | pub_key: &[u8], 36 | algorithm: Algorithm, 37 | ) -> TofndResult { 38 | if pub_key.is_empty() { 39 | return self 40 | .kv_manager 41 | .seed() 42 | .await 43 | .map_err(|_| anyhow!("could not find current mnemonic")); 44 | } 45 | 46 | let seed_key_iter = self 47 | .kv_manager 48 | .seed_key_iter() 49 | .await 50 | .map_err(|_e| anyhow!("could not iterate over mnemonic keys"))?; 51 | 52 | for seed_key in seed_key_iter { 53 | let secret_recovery_key = self.kv_manager.get_seed(&seed_key).await?; 54 | 55 | let key_pair = KeyPair::new(&secret_recovery_key, key_uid.as_bytes(), algorithm) 56 | .map_err(|_| anyhow!("key re-generation failed"))?; 57 | 58 | if pub_key == key_pair.encoded_verifying_key() { 59 | return Ok(secret_recovery_key); 60 | } 61 | } 62 | 63 | Err(anyhow!( 64 | "could not find a matching mnemonic for key {:?}", 65 | key_uid 66 | )) 67 | } 68 | } 69 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "tofnd" 3 | version = "1.0.1" 4 | authors = ["Interoplabs Eng "] 5 | edition = "2021" 6 | license = "MIT OR Apache-2.0" 7 | description = "A cryptographic signing service, used by the Axelar network" 8 | keywords = ["cryptography", "blockchain", "axelar", "ecdsa", "ed25519"] 9 | 10 | [dependencies] 11 | tofn = { version = "1.1" } 12 | 13 | # logging 14 | log = { version = "0.4",default-features = false } 15 | tracing = { version = "0.1", default-features = false } 16 | tracing-subscriber = { version = "0.3", features = ["json", "env-filter"] } 17 | atty = { version = "0.2", default-features = false } 18 | 19 | # CLI args 20 | clap = { version = "4.5", default-features = false, features = ["std", "cargo", "env", "string", "help"] } 21 | 22 | # kv store 23 | sled = { version = "0.34", default-features = false } 24 | serde = { version = "1.0", features = ["derive"], default-features = false } 25 | dirs = { version = "5.0", default-features = false } 26 | 27 | # kv store encryption 28 | chacha20poly1305 = { version = "0.10", features = ["alloc"], default-features = false } 29 | rand = { version = "0.8", default-features = false } 30 | rpassword = { version = "5.0", default-features = false } # future versions don't support reading both from stdin and tty at the same time 31 | scrypt = { version = "0.11", default-features = false, features = ["std"] } 32 | 33 | # gRPC server 34 | tonic = { version = "0.12" } # ensure tonic-build version matches this 35 | prost = { version = "0.13" } 36 | 37 | # async runtime 38 | tokio = { version = "1.38", features = ["rt-multi-thread", "macros", "signal", "net", "sync"], default-features = false } 39 | tokio-stream = { version = "0.1.15", features = ["net"], default-features = false } 40 | futures-util = { version = "0.3", default-features = false } 41 | 42 | # mnemonic 43 | tiny-bip39 = { version = "1.0.0", default-features = false} 44 | zeroize = { version = "1.8", features = ["zeroize_derive"], default-features = false} 45 | 46 | # error handling 47 | thiserror = { version = "1.0", default-features = false } 48 | anyhow = { version = "1.0", default-features = false } 49 | 50 | [build-dependencies] 51 | tonic-build = { version = "0.12" } 52 | 53 | [dev-dependencies] 54 | lazy_static = { version = "1.5", default-features = false} 55 | tracing-test = { version = "0.2", default-features = false } 56 | testdir = { version = "0.9", default-features = false } 57 | goldie = { version = "0.5" } 58 | hex = { version = "0.4" } 59 | 60 | # Don't abort in case there is a panic to clean up data 61 | [profile.dev] 62 | panic = "unwind" 63 | 64 | [profile.release] 65 | panic = "unwind" 66 | 67 | [features] 68 | -------------------------------------------------------------------------------- /src/mnemonic/file_io.rs: -------------------------------------------------------------------------------- 1 | //! This module handles file IO. 2 | 3 | use std::{io::Write, path::PathBuf}; 4 | 5 | use tracing::info; 6 | 7 | use super::types::Entropy; 8 | use super::{bip39_bindings::bip39_from_entropy, results::file_io::FileIoError::Exists}; 9 | 10 | /// name of export file 11 | const EXPORT_FILE: &str = "export"; 12 | 13 | use super::results::file_io::FileIoResult; 14 | 15 | /// FileIO wraps all IO functionality 16 | #[derive(Clone)] 17 | pub struct FileIo { 18 | export_path: PathBuf, 19 | } 20 | 21 | impl FileIo { 22 | /// FileIO constructor 23 | pub fn new(mut export_path: PathBuf) -> FileIo { 24 | export_path.push(EXPORT_FILE); 25 | FileIo { export_path } 26 | } 27 | 28 | /// Get the path of export file 29 | pub fn export_path(&self) -> &PathBuf { 30 | &self.export_path 31 | } 32 | 33 | /// Check if an exported file exists in the expected path 34 | /// Succeeds if no exported file exists, returns an error otherwise. 35 | pub fn check_if_not_exported(&self) -> FileIoResult<()> { 36 | if std::path::Path::new(&self.export_path()).exists() { 37 | return Err(Exists(self.export_path().clone())); 38 | } 39 | Ok(()) 40 | } 41 | 42 | /// Creates a file that contains an entropy in it's human-readable form 43 | pub(super) fn entropy_to_file(&self, entropy: Entropy) -> FileIoResult<()> { 44 | // delegate zeroization for entropy; no need to worry about mnemonic, it is cleaned automatically 45 | let mnemonic = bip39_from_entropy(entropy)?; 46 | let phrase = mnemonic.phrase(); 47 | 48 | // if there is an existing exported file raise an error 49 | self.check_if_not_exported()?; 50 | 51 | let mut file = std::fs::File::create(self.export_path())?; 52 | file.write_all(phrase.as_bytes())?; 53 | file.sync_all()?; 54 | 55 | info!("Mnemonic written in file {:?}", &self.export_path()); 56 | Ok(()) 57 | } 58 | } 59 | 60 | #[cfg(test)] 61 | mod tests { 62 | use super::*; 63 | use crate::mnemonic::bip39_bindings::{bip39_new_w24, tests::bip39_to_phrase}; 64 | use std::io::Read; 65 | use testdir::testdir; 66 | use tracing_test::traced_test; 67 | 68 | #[traced_test] 69 | #[test] 70 | fn test_write() { 71 | let entropy = bip39_new_w24(); 72 | 73 | let io = FileIo::new(testdir!()); 74 | let filepath = io.export_path(); 75 | io.entropy_to_file(entropy.clone()).unwrap(); 76 | let expected_content = bip39_to_phrase(entropy).unwrap(); 77 | 78 | let mut file = std::fs::File::open(filepath).unwrap(); 79 | let mut file_phrase = String::new(); 80 | file.read_to_string(&mut file_phrase).unwrap(); 81 | let file_content = file_phrase; 82 | 83 | assert_eq!(file_content, expected_content.0); 84 | } 85 | } 86 | -------------------------------------------------------------------------------- /src/mnemonic/results.rs: -------------------------------------------------------------------------------- 1 | //! Custom error types for [mnemonic]. 2 | 3 | /// Note: While tofnd generally uses the [anyhow] crate for error handling, we 4 | /// use the [thiserror] crate here for two reasons: 5 | /// 1. Mnemonic errors can be potentially consumed by the caller of tofnd, so an 6 | /// analytical display of errors might be helpful in the future 7 | /// 2. This can be used as an example on how analytical error handling can be 8 | /// incorporated in other modules 9 | /// For more info, see discussion in https://github.com/axelarnetwork/tofnd/issues/28 10 | 11 | pub(super) mod bip39 { 12 | #[derive(thiserror::Error, Debug)] 13 | pub enum Bip39Error { 14 | #[error("invalid entropy")] 15 | FromEntropy, 16 | #[error("invalid phrase")] 17 | FromPhrase, 18 | } 19 | pub type Bip39Result = Result; 20 | } 21 | 22 | pub(super) mod file_io { 23 | #[derive(thiserror::Error, Debug)] 24 | pub enum FileIoError { 25 | #[error("Bip39 error: {0}")] 26 | Bip39(#[from] super::bip39::Bip39Error), 27 | #[error("File IO error {0}")] 28 | FileIo(#[from] std::io::Error), 29 | #[error( 30 | "File {0} already exists. Remove file to use `-m existing` or `-m export` commands." 31 | )] 32 | Exists(std::path::PathBuf), 33 | } 34 | pub type FileIoResult = Result; 35 | } 36 | 37 | pub(super) mod mnemonic { 38 | #[derive(thiserror::Error, Debug)] 39 | pub enum InnerMnemonicError { 40 | #[error("File IO error: {0}")] 41 | FileIoErr(#[from] super::file_io::FileIoError), 42 | #[error("KvStore error: {0}")] 43 | KvErr(#[from] crate::kv_manager::error::KvError), 44 | #[error("Invalid mnemonic. See https://github.com/bitcoin/bips/blob/master/bip-0039.mediawiki. Bip39 error: {0}")] 45 | Bip39Error(#[from] super::bip39::Bip39Error), 46 | #[error("Failed to convert to SecretRecoveryKey")] 47 | IntoSecretRecoveryKey(#[from] std::array::TryFromSliceError), 48 | #[error("Password error: {0}")] 49 | PasswordErr(String), 50 | } 51 | pub type InnerMnemonicResult = Result; 52 | 53 | #[derive(thiserror::Error, Debug)] 54 | pub enum MnemonicError { 55 | #[error("Command not found: {0}")] 56 | WrongCommand(String), 57 | #[error("Cannot not use existing mnemonic: {0}")] 58 | ExistingErr(InnerMnemonicError), 59 | #[error("Cannot create mnemonic: {0}")] 60 | CreateErr(InnerMnemonicError), 61 | #[error("Cannot import mnemonic: {0}")] 62 | ImportErr(InnerMnemonicError), 63 | #[error("Cannot export mnemonic: {0}")] 64 | ExportErr(InnerMnemonicError), 65 | #[error("Cannot rotate mnemonic: {0}")] 66 | RotateErr(InnerMnemonicError), 67 | } 68 | pub type MnemonicResult = Result; 69 | pub type SeedResult = Result; 70 | } 71 | -------------------------------------------------------------------------------- /src/encrypted_sled/tests.rs: -------------------------------------------------------------------------------- 1 | use super::{kv::EncryptedDb, Password}; 2 | use testdir::testdir; 3 | 4 | #[test] 5 | fn test_encrypted_sled() { 6 | let db_path = testdir!("encrypted_sled"); 7 | let db = EncryptedDb::open(db_path, get_test_password()).unwrap(); 8 | 9 | // insert -> returns None 10 | let res = db.insert("key", "value").unwrap(); 11 | assert!(res.is_none()); 12 | 13 | // get -> returns 14 | let res = db.get("key").unwrap(); 15 | assert_eq!(res, Some(sled::IVec::from("value"))); 16 | 17 | // insert -> returns old value 18 | let res = db.insert("key", "value2").unwrap(); 19 | assert!(res.is_some()); 20 | 21 | // get -> returns new value 22 | let res = db.get("key").unwrap(); 23 | assert_eq!(res, Some(sled::IVec::from("value2"))); 24 | 25 | // get -> returns None because key1 does not exist 26 | let res = db.get("key1").unwrap(); 27 | assert!(res.is_none()); 28 | 29 | // contains -> returns Some(true) because key exists 30 | let res = db.contains_key("key").unwrap(); 31 | assert!(res); 32 | 33 | // contains -> returns None because key1 does not exist 34 | let res = db.contains_key("key1").unwrap(); 35 | assert!(!res); 36 | 37 | // remove -> returns because key exists 38 | let res = db.remove("key").unwrap(); 39 | assert_eq!(res, Some(sled::IVec::from("value2"))); 40 | 41 | // remove again -> returns None because key does not exist 42 | let res = db.remove("key").unwrap(); 43 | assert_eq!(res, None); 44 | } 45 | 46 | #[test] 47 | fn test_use_existing_salt() { 48 | let db_path = testdir!("use_existing_salt"); 49 | let db = EncryptedDb::open(&db_path, get_test_password()).unwrap(); 50 | drop(db); 51 | // open existing db 52 | assert!(EncryptedDb::open(&db_path, get_test_password()).is_ok()); 53 | } 54 | 55 | #[test] 56 | fn test_password() { 57 | let db_path = testdir!("test_password"); 58 | 59 | let db = EncryptedDb::open(&db_path, Password::from("super-secret password.")); 60 | assert!(db.is_ok()); 61 | drop(db); 62 | 63 | // try to open the kv store using a different password 64 | let db = EncryptedDb::open( 65 | &db_path, 66 | Password::from("super-secret password!"), // replace '.' with '!' 67 | ); 68 | assert!(matches!( 69 | db, 70 | Err(super::result::EncryptedDbError::WrongPassword) 71 | )); 72 | } 73 | 74 | #[test] 75 | fn test_large_input() { 76 | let db_path = testdir!("large_input"); 77 | 78 | let db = EncryptedDb::open(db_path, get_test_password()).unwrap(); 79 | 80 | let large_value = vec![0; 100000]; 81 | let res = db.insert("key", large_value.clone()).unwrap(); 82 | assert!(res.is_none()); 83 | 84 | let res = db.get("key").unwrap(); 85 | assert_eq!(res, Some(sled::IVec::from(large_value))); 86 | } 87 | 88 | pub fn get_test_password() -> Password { 89 | crate::encrypted_sled::PasswordMethod::NoPassword 90 | .execute() 91 | .unwrap() 92 | } 93 | -------------------------------------------------------------------------------- /.github/workflows/release.yaml: -------------------------------------------------------------------------------- 1 | name: Release 2 | 3 | on: 4 | workflow_dispatch: 5 | inputs: 6 | bumpType: 7 | description: Semver bump type to use ("major"/"minor"/"patch") 8 | required: true 9 | default: patch 10 | 11 | jobs: 12 | release: 13 | runs-on: ubuntu-latest 14 | steps: 15 | - name: Checkout code 16 | uses: actions/checkout@v4 17 | with: 18 | fetch-depth: '0' 19 | submodules: recursive 20 | 21 | - id: release 22 | uses: anothrNick/github-tag-action@1.39.0 23 | env: 24 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 25 | DEFAULT_BUMP: ${{ github.event.inputs.bumpType }} 26 | TAG_CONTEXT: branch 27 | RELEASE_BRANCHES: main,releases.* 28 | WITH_V: true 29 | DRY_RUN: true 30 | 31 | - id: read_toml 32 | uses: SebRollen/toml-action@v1.0.0 33 | with: 34 | file: 'Cargo.toml' 35 | field: 'package.version' 36 | 37 | - name: match toml with tag 38 | run: | 39 | new_tag=${{steps.release.outputs.new_tag}} 40 | new_tag="${new_tag:1}" 41 | if [ "${{steps.read_toml.outputs.value}}" != "$new_tag" ]; then 42 | echo "New Release Tag is $new_tag" 43 | echo "Cargo.toml Tag is ${{steps.read_toml.outputs.value}}" 44 | echo "Update Cargo.toml and Cargo.lock and try again" 45 | exit 1 46 | fi 47 | 48 | - id: parsed-release 49 | uses: booxmedialtd/ws-action-parse-semver@v1 50 | with: 51 | input_string: ${{ steps.release.outputs.tag }} 52 | 53 | - name: Generate release 54 | run: | 55 | new_release_branch="" 56 | 57 | if [ "${{ github.ref }}" = "refs/heads/main" ]; then 58 | if [ "${{ github.event.inputs.bumpType }}" = "major" ]; then 59 | let new_major=${{ steps.parsed-release.outputs.major }}+1 60 | new_release_branch="releases/$new_major.0.x" 61 | elif [ "${{ github.event.inputs.bumpType }}" = "minor" ]; then 62 | let new_minor=${{ steps.parsed-release.outputs.minor }}+1 63 | new_release_branch="releases/${{ steps.parsed-release.outputs.major }}.$new_minor.x" 64 | else 65 | echo "cannot make patch release from main branch" 66 | exit 1 67 | fi 68 | elif [[ "${{ github.ref }}" = refs/heads/releases/* ]]; then 69 | if [ "${{ github.event.inputs.bumpType }}" != "patch" ]; then 70 | echo "cannot make major/minor release from release branch" 71 | exit 1 72 | fi 73 | else 74 | echo "can only release from main or releases/* branches" 75 | exit 1 76 | fi 77 | 78 | git config --global user.email "cicd@axelar.network" 79 | git config --global user.name "axelar-cicd-bot" 80 | 81 | git commit --allow-empty -m "${{ steps.release.outputs.new_tag }}" 82 | git push 83 | 84 | if [ -n "$new_release_branch" ]; then 85 | git checkout -b $new_release_branch 86 | git push -u origin $new_release_branch 87 | fi 88 | 89 | - name: Bump version and push tag 90 | uses: anothrNick/github-tag-action@1.26.0 91 | env: 92 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 93 | DEFAULT_BUMP: ${{ github.event.inputs.bumpType }} 94 | TAG_CONTEXT: branch 95 | RELEASE_BRANCHES: main,releases.* 96 | WITH_V: true 97 | -------------------------------------------------------------------------------- /entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | OK=0 6 | ERR=1 7 | 8 | # create: create a new mnemonic and export it to $EXPORT_PATH 9 | create_mnemonic() { 10 | echo "Creating mnemonic ..." 11 | 12 | # check if mnemonic path exists 13 | if [ -f "$TOFND_HOME/kvstore/kv/db" ]; then 14 | echo "Skipping create because a kv-store was found at $TOFND_HOME" 15 | return $ERR 16 | fi 17 | 18 | (echo ${PASSWORD} | tofnd ${ARGS} -m create) && echo "... ok" && return $OK 19 | return $ERR 20 | } 21 | 22 | # import: import a mnemonic from $IMPORT_PATH 23 | import_mnemonic() { 24 | echo "Importing mnemonic ..." 25 | 26 | if [ -f "$TOFND_HOME/kvstore/kv/db" ]; then 27 | echo "Skipping import because a kv-store already exists at $TOFND_HOME" 28 | return $ERR 29 | fi 30 | 31 | if [ ! -f "$IMPORT_PATH" ]; then \ 32 | echo "No import file found at $IMPORT_PATH" 33 | return $ERR 34 | fi 35 | 36 | if [ -n "${NOPASSWORD}" ]; then \ 37 | echo "No password" 38 | ( cat $IMPORT_PATH | tofnd ${ARGS} -m import ) || return $ERR 39 | else 40 | echo "With password" 41 | ( (echo $PASSWORD && cat $IMPORT_PATH) | tofnd ${ARGS} -m import ) || return $ERR 42 | fi 43 | 44 | echo "... ok" 45 | return $OK 46 | } 47 | 48 | # export: export the mnemonic to $EXPORT_PATH 49 | export_mnemonic() { 50 | echo "Exporting mnemonic ..." 51 | echo ${PASSWORD} | tofnd ${ARGS} -m export || return $ERR 52 | echo "... ok" 53 | return $OK 54 | } 55 | 56 | # Get password from env var 57 | EMPTY_STRING="" 58 | PASSWORD="${PASSWORD:-$EMPTY_STRING}" 59 | 60 | # set tofnd root. TOFND_HOME can be set to a different path by the user. 61 | TOFND_HOME=${TOFND_HOME:-"./.tofnd"} 62 | IMPORT_PATH=$TOFND_HOME/import 63 | EXPORT_PATH=$TOFND_HOME/export 64 | 65 | echo "Using tofnd root:" $TOFND_HOME 66 | 67 | # gather user's args 68 | 69 | # add '--no-password' flag to args if enabled 70 | ARGS=${NOPASSWORD:+"--no-password"} 71 | # add '--address' flag to args if enabled 72 | ARGS+=${ADDRESS:+" --address ${ADDRESS}"} 73 | # add '--port' flag to args if enabled 74 | ARGS+=${PORT:+" --port ${PORT}"} 75 | 76 | # check mnemonic arg 77 | if [ -n "${MNEMONIC_CMD}" ]; then \ 78 | 79 | case ${MNEMONIC_CMD} in 80 | # auto: try to set up tofnd and then spin up tofnd with the existing mnemonic. 81 | # Order of set up: 1) import mnemonic, 2) create mnemonic. 82 | # If 2) then move the mnemonic to $IMPORT_PATH so that tofnd will not complain 83 | auto) 84 | echo "Trying import" && import_mnemonic \ 85 | || (echo "... skipping. Trying to create" && create_mnemonic && mv $EXPORT_PATH $IMPORT_PATH) \ 86 | || echo "... skipping" 87 | ;; 88 | 89 | existing) 90 | ;; 91 | 92 | create) 93 | create_mnemonic || exit $ERR 94 | exit $OK 95 | ;; 96 | 97 | import) 98 | import_mnemonic || exit $ERR 99 | exit $OK 100 | ;; 101 | 102 | export) 103 | export_mnemonic || exit $ERR 104 | exit $OK 105 | ;; 106 | 107 | *) 108 | echo "Unknown command: ${MNEMONIC_CMD}" 109 | exit $ERR 110 | ;; 111 | esac 112 | 113 | echo "Using existing mnemonic ..." 114 | ARGS+=" -m existing" 115 | fi 116 | 117 | # execute tofnd daemon 118 | exec echo ${PASSWORD} | tofnd ${ARGS} "$@"; \ 119 | 120 | -------------------------------------------------------------------------------- /src/main.rs: -------------------------------------------------------------------------------- 1 | use multisig::service::MultisigService; 2 | use proto::multisig_server::MultisigServer; 3 | use std::net::SocketAddr; 4 | use tokio::net::TcpListener; 5 | use tokio_stream::wrappers::TcpListenerStream; 6 | 7 | mod encrypted_sled; 8 | mod kv_manager; 9 | mod mnemonic; 10 | mod multisig; 11 | 12 | // gather logs; need to set RUST_LOG=info 13 | use tracing::{info, span, Level}; 14 | 15 | // error handling 16 | pub type TofndResult = anyhow::Result; 17 | 18 | // protocol buffers via tonic: https://github.com/hyperium/tonic/blob/master/examples/helloworld-tutorial.md#writing-our-server 19 | pub mod proto { 20 | tonic::include_proto!("tofnd"); 21 | } 22 | 23 | mod config; 24 | use config::parse_args; 25 | 26 | use crate::kv_manager::KvManager; 27 | 28 | fn set_up_logs() { 29 | // enable only tofnd and tofn debug logs - disable serde, tonic, tokio, etc. 30 | tracing_subscriber::fmt() 31 | .with_env_filter("tofnd=debug,tofn=debug") 32 | .json() 33 | .with_ansi(atty::is(atty::Stream::Stdout)) 34 | .with_target(false) 35 | .with_current_span(false) 36 | .flatten_event(true) // make logs complient with datadog 37 | .init(); 38 | } 39 | 40 | /// worker_threads defaults to the number of cpus on the system 41 | /// https://docs.rs/tokio/1.2.0/tokio/attr.main.html#multi-threaded-runtime 42 | #[tokio::main(flavor = "multi_thread")] 43 | async fn main() -> TofndResult<()> { 44 | set_up_logs(); // can't print any logs until they're set up 45 | let cfg = parse_args()?; 46 | let socket_address = addr(&cfg.ip, cfg.port)?; 47 | 48 | // immediately read an encryption password from stdin 49 | let password = cfg.password_method.execute()?; 50 | 51 | // set up span for logs 52 | let main_span = span!(Level::INFO, "main"); 53 | let _enter = main_span.enter(); 54 | let cmd = cfg.mnemonic_cmd.clone(); 55 | 56 | // this step takes a long time due to password-based decryption 57 | let kv_manager = KvManager::new(cfg.tofnd_path.clone(), password)? 58 | .handle_mnemonic(&cfg.mnemonic_cmd) 59 | .await?; 60 | 61 | if cmd.exit_after_cmd() { 62 | info!("Tofnd exited after using command <{:?}>. Run `./tofnd -m existing` to execute gRPC daemon.", cmd); 63 | return Ok(()); 64 | } 65 | 66 | let service = MultisigServer::new(MultisigService::new(kv_manager)); 67 | 68 | let incoming = TcpListener::bind(socket_address).await?; 69 | info!( 70 | "tofnd listen addr {:?}, use ctrl+c to shutdown", 71 | incoming.local_addr()? 72 | ); 73 | 74 | tonic::transport::Server::builder() 75 | .add_service(service) 76 | .serve_with_incoming_shutdown(TcpListenerStream::new(incoming), shutdown_signal()) 77 | .await?; 78 | 79 | Ok(()) 80 | } 81 | 82 | fn addr(ip: &str, port: u16) -> TofndResult { 83 | let socket_addr = format!("{}:{}", ip, port); 84 | socket_addr 85 | .parse::() 86 | .map_err(|err| anyhow::anyhow!(err)) 87 | } 88 | 89 | // graceful shutdown https://hyper.rs/guides/server/graceful-shutdown/ 90 | // can't use Result<> here because `serve_with_incoming_shutdown` expects F: Future, 91 | async fn shutdown_signal() { 92 | // Wait for the CTRL+C signal 93 | tokio::signal::ctrl_c() 94 | .await 95 | .expect("failed to install CTRL+C signal handler"); 96 | info!("tofnd shutdown signal received"); 97 | } 98 | 99 | #[cfg(test)] 100 | mod tests; 101 | -------------------------------------------------------------------------------- /src/multisig/service.rs: -------------------------------------------------------------------------------- 1 | use tonic::Response; 2 | use tonic::Status; 3 | 4 | use crate::kv_manager::KvManager; 5 | use crate::proto; 6 | 7 | use tracing::{error, info}; 8 | 9 | /// `MultisigService` is a gRPC service wrapper around tofn's keygen and signing functions 10 | #[derive(Clone)] 11 | pub struct MultisigService { 12 | pub(super) kv_manager: KvManager, 13 | } 14 | 15 | /// Create a new Multisig gRPC server 16 | impl MultisigService { 17 | pub fn new(kv_manager: KvManager) -> Self { 18 | Self { kv_manager } 19 | } 20 | } 21 | 22 | #[tonic::async_trait] 23 | impl proto::multisig_server::Multisig for MultisigService { 24 | async fn key_presence( 25 | &self, 26 | request: tonic::Request, 27 | ) -> Result, Status> { 28 | let request = request.into_inner(); 29 | 30 | let response = match self.handle_key_presence(request).await { 31 | Ok(res) => { 32 | info!("Key presence check completed succesfully"); 33 | res 34 | } 35 | Err(err) => { 36 | error!("Unable to complete key presence check: {}", err); 37 | proto::key_presence_response::Response::Fail 38 | } 39 | }; 40 | 41 | Ok(Response::new(proto::KeyPresenceResponse { 42 | response: response as i32, 43 | })) 44 | } 45 | 46 | async fn keygen( 47 | &self, 48 | request: tonic::Request, 49 | ) -> Result, Status> { 50 | let request = request.into_inner(); 51 | let result = match self.handle_keygen(&request).await { 52 | Ok(pub_key) => { 53 | info!( 54 | "[{}] Multisig Keygen with key id [{}] completed", 55 | request.party_uid, request.key_uid 56 | ); 57 | proto::keygen_response::KeygenResponse::PubKey(pub_key) 58 | } 59 | Err(err) => { 60 | error!( 61 | "[{}] Multisig Keygen with key id [{}] failed: {}", 62 | request.party_uid, 63 | request.key_uid, 64 | err.to_string() 65 | ); 66 | proto::keygen_response::KeygenResponse::Error(err.to_string()) 67 | } 68 | }; 69 | 70 | Ok(Response::new(proto::KeygenResponse { 71 | keygen_response: Some(result), 72 | })) 73 | } 74 | 75 | async fn sign( 76 | &self, 77 | request: tonic::Request, 78 | ) -> Result, Status> { 79 | let request = request.into_inner(); 80 | let result = match self.handle_sign(&request).await { 81 | Ok(pub_key) => { 82 | info!( 83 | "[{}] Multisig Sign with key id [{}] and message [{:?}] completed", 84 | request.party_uid, request.key_uid, request.msg_to_sign, 85 | ); 86 | proto::sign_response::SignResponse::Signature(pub_key) 87 | } 88 | Err(err) => { 89 | error!( 90 | "[{}] Multisig sign with key id [{}] and message [{:?}] failed: {}", 91 | request.party_uid, 92 | request.key_uid, 93 | request.msg_to_sign, 94 | err.to_string() 95 | ); 96 | proto::sign_response::SignResponse::Error(err.to_string()) 97 | } 98 | }; 99 | 100 | Ok(Response::new(proto::SignResponse { 101 | sign_response: Some(result), 102 | })) 103 | } 104 | } 105 | -------------------------------------------------------------------------------- /src/kv_manager/sled_bindings.rs: -------------------------------------------------------------------------------- 1 | //! Bindings for [sled::Db] operations. Errors are mapped to [super::error::InnerKvError]. 2 | 3 | use serde::{de::DeserializeOwned, Serialize}; 4 | use tofn::sdk::api::{deserialize, serialize}; 5 | 6 | use super::error::{InnerKvError::*, InnerKvResult}; 7 | use super::types::{KeyReservation, DEFAULT_RESERVE}; 8 | 9 | use crate::encrypted_sled; 10 | 11 | /// Reserves a key. New key's value is [DEFAULT_RESERVE]. 12 | /// Returns [SledErr] of [LogicalErr] on failure. 13 | pub(super) fn handle_reserve( 14 | kv: &encrypted_sled::Db, 15 | key: String, 16 | ) -> InnerKvResult { 17 | // search key in kv store. 18 | // If reserve key already exists inside our database, return an error 19 | if kv.contains_key(&key)? { 20 | return Err(LogicalErr(format!( 21 | "kv_manager key <{}> already reserved.", 22 | key 23 | ))); 24 | } 25 | 26 | // try to insert the new key with default value 27 | kv.insert(&key, DEFAULT_RESERVE)?; 28 | 29 | // return key reservation 30 | Ok(KeyReservation { key }) 31 | } 32 | 33 | /// Deletes an unreserved key if it exists. 34 | /// Returns [SledErr] of [LogicalErr] on failure. 35 | pub(super) fn handle_delete(kv: &encrypted_sled::Db, key: String) -> InnerKvResult<()> { 36 | if !kv.contains_key(&key)? { 37 | return Ok(()); 38 | } 39 | 40 | // check if key holds the default reserve value. If yes, can't delete it. 41 | if kv.get(&key)? == Some(sled::IVec::from(DEFAULT_RESERVE)) { 42 | return Err(LogicalErr(format!( 43 | "can't delete reserved key <{}> in kv store.", 44 | key 45 | ))); 46 | } 47 | 48 | kv.remove(&key)?; 49 | 50 | Ok(()) 51 | } 52 | 53 | /// Inserts a value to an existing key. 54 | /// Returns [SledErr] of [LogicalErr] on failure. 55 | pub(super) fn handle_put( 56 | kv: &encrypted_sled::Db, 57 | reservation: KeyReservation, 58 | value: V, 59 | ) -> InnerKvResult<()> 60 | where 61 | V: Serialize, 62 | { 63 | // check if key holds the default reserve value. If not, send an error. 64 | // Explanation of code ugliness: that's the standard way to compare a 65 | // sled retrieved value with a local value: 66 | // https://docs.rs/sled/0.34.6/sled/struct.Tree.html#examples-4 67 | if kv.get(&reservation.key)? != Some(sled::IVec::from(DEFAULT_RESERVE)) { 68 | return Err(LogicalErr(format!( 69 | "did not find reservation for key <{}> in kv store.", 70 | reservation.key 71 | ))); 72 | } 73 | 74 | // convert value into bytes 75 | let bytes = serialize(&value).map_err(|_| SerializationErr)?; 76 | 77 | // insert new value 78 | kv.insert(&reservation.key, bytes)?; 79 | 80 | Ok(()) 81 | } 82 | 83 | /// Get the value of an existing key. 84 | /// Returns [SledErr] of [LogicalErr] on failure. 85 | pub(super) fn handle_get(kv: &encrypted_sled::Db, key: String) -> InnerKvResult 86 | where 87 | V: DeserializeOwned, 88 | { 89 | // try to get value of 'key' 90 | let value = match kv.get(&key)? { 91 | Some(bytes) => deserialize(&bytes).ok_or(DeserializationErr)?, 92 | None => { 93 | return Err(LogicalErr(format!("key <{}> does not have a value.", key))); 94 | } 95 | }; 96 | 97 | // return value 98 | Ok(value) 99 | } 100 | 101 | /// Checks if a key exists in the kvstore. 102 | /// Returns [SledErr] of [LogicalErr] on failure. 103 | pub(super) fn handle_exists(kv: &encrypted_sled::Db, key: &str) -> InnerKvResult { 104 | kv.contains_key(key).map_err(|err| { 105 | LogicalErr(format!( 106 | "Could not perform 'contains_key' for key <{}> due to error: {}", 107 | key, err 108 | )) 109 | }) 110 | } 111 | -------------------------------------------------------------------------------- /src/mnemonic/bip39_bindings.rs: -------------------------------------------------------------------------------- 1 | //! This module provides wrappers for mnemonic creation, validation and seed 2 | //! extraction using the tiny-bip39 https://crates.io/crates/tiny-bip39 library. 3 | //! 4 | //! Default (and only) language is English. More languages to be added in the future. 5 | //! 6 | //! Zeroization: 7 | //! All functions that accept and/or return structs that implement zeroization: 8 | //! [crate::gg20::Password], [crate::gg20::Entropy], [bip39::Mnemonic], [bip39::Seed] 9 | 10 | use super::results::bip39::{Bip39Error::*, Bip39Result}; 11 | use super::types::{Entropy, Password}; 12 | use bip39::{Language, Mnemonic, Seed}; 13 | 14 | // TODO: we can enrich the API so that users can decide which language they want to use 15 | const DEFAUT_LANG: Language = Language::English; 16 | 17 | /// create a new 24 word mnemonic 18 | pub(super) fn bip39_new_w24() -> Entropy { 19 | let mnemonic = Mnemonic::new(bip39::MnemonicType::Words24, DEFAUT_LANG); 20 | Entropy(mnemonic.entropy().to_owned()) 21 | } 22 | 23 | /// create a [Mnemonic] from [Entropy]; takes ownership of entropy and zeroizes it before exit 24 | pub(super) fn bip39_from_entropy(entropy: Entropy) -> Bip39Result { 25 | // try to get mnemonic from entropy 26 | Mnemonic::from_entropy(&entropy.0, DEFAUT_LANG).map_err(|_| FromEntropy) 27 | } 28 | 29 | /// create an [Entropy] from [Mnemonic]; takes ownership of phrase and zeroizes it before exit 30 | pub(super) fn bip39_from_phrase(phrase: Password) -> Bip39Result { 31 | // matching feels better than map_err() here 32 | match Mnemonic::from_phrase(&phrase.0, DEFAUT_LANG) { 33 | Ok(mnemonic) => Ok(Entropy(mnemonic.entropy().to_owned())), 34 | Err(_) => Err(FromPhrase), 35 | } 36 | } 37 | 38 | /// extract [Seed] from [Mnemonic]; takes ownership of entropy and password and zeroizes them before exit 39 | pub(super) fn bip39_seed(entropy: Entropy, password: Password) -> Bip39Result { 40 | // matching feels better than map_err() here 41 | match bip39_from_entropy(entropy) { 42 | Ok(mnemonic) => Ok(Seed::new(&mnemonic, &password.0)), 43 | Err(_) => Err(FromEntropy), 44 | } 45 | } 46 | 47 | #[cfg(test)] 48 | pub mod tests { 49 | 50 | use super::*; 51 | use tracing::info; 52 | use tracing_test::traced_test; 53 | 54 | /// create a mnemonic from entropy; takes ownership of entropy and zeroizes it after 55 | pub fn bip39_to_phrase(entropy: Entropy) -> Bip39Result { 56 | match Mnemonic::from_entropy(&entropy.0, DEFAUT_LANG) { 57 | Ok(mnemonic) => Ok(Password(mnemonic.phrase().to_owned())), 58 | Err(_) => Err(FromEntropy), 59 | } 60 | } 61 | 62 | #[traced_test] 63 | #[test] 64 | fn create() { 65 | let entropy = bip39_new_w24(); 66 | let mnemonic = Mnemonic::from_entropy(&entropy.0, DEFAUT_LANG).unwrap(); 67 | let passphrase = mnemonic.phrase(); 68 | info!( 69 | "created passphrase [{}] from entropy [{:?}]", 70 | passphrase, &entropy 71 | ); 72 | } 73 | 74 | #[traced_test] 75 | #[test] 76 | fn from_entropy() { 77 | let ok_entropy = Entropy(vec![42; 16]); 78 | let err_entropy = Entropy(vec![42; 15]); 79 | 80 | assert!(bip39_from_entropy(ok_entropy).is_ok()); 81 | assert!(bip39_from_entropy(err_entropy).is_err()); 82 | } 83 | 84 | #[traced_test] 85 | #[test] 86 | fn seed_known_vector() { 87 | // Expected output: https://github.com/maciejhirsz/tiny-bip39/blob/master/src/seed.rs#L102 88 | let entropy = vec![ 89 | 0x33, 0xE4, 0x6B, 0xB1, 0x3A, 0x74, 0x6E, 0xA4, 0x1C, 0xDD, 0xE4, 0x5C, 0x90, 0x84, 90 | 0x6A, 0x79, 91 | ]; 92 | 93 | let output = 94 | hex::encode(bip39_seed(Entropy(entropy), Password("password".to_owned())).unwrap()); 95 | 96 | goldie::assert_json!(output); 97 | } 98 | } 99 | -------------------------------------------------------------------------------- /src/config/mod.rs: -------------------------------------------------------------------------------- 1 | use std::path::PathBuf; 2 | 3 | use clap::{builder::PossibleValuesParser, crate_version, value_parser, Arg, ArgAction, Command}; 4 | 5 | // error handling 6 | use crate::{encrypted_sled::PasswordMethod, mnemonic::Cmd, TofndResult}; 7 | use anyhow::anyhow; 8 | 9 | // TODO: move these into constants.rs 10 | const DEFAULT_PATH_ROOT: &str = ".tofnd"; 11 | const TOFND_HOME_ENV_VAR: &str = "TOFND_HOME"; 12 | const DEFAULT_MNEMONIC_CMD: &str = "existing"; 13 | const DEFAULT_IP: &str = "127.0.0.1"; 14 | const DEFAULT_PORT: &str = "50051"; 15 | const AVAILABLE_MNEMONIC_CMDS: &[&str] = &["existing", "create", "import", "export", "rotate"]; 16 | 17 | // default path is ~/.tofnd 18 | fn default_tofnd_dir() -> TofndResult { 19 | Ok(dirs::home_dir() 20 | .ok_or_else(|| anyhow!("no home dir"))? 21 | .join(DEFAULT_PATH_ROOT) 22 | .to_str() 23 | .ok_or_else(|| anyhow!("can't convert default dir to str"))? 24 | .into()) 25 | } 26 | 27 | // TODO: move to types.rs 28 | #[derive(Clone, Debug)] 29 | pub struct Config { 30 | pub ip: String, 31 | pub port: u16, 32 | pub mnemonic_cmd: Cmd, 33 | pub tofnd_path: PathBuf, 34 | pub password_method: PasswordMethod, 35 | } 36 | 37 | pub fn parse_args() -> TofndResult { 38 | let app = Command::new("tofnd") 39 | .about("A cryptographic signing service") 40 | .version(crate_version!()) 41 | .arg( 42 | Arg::new("ip") 43 | .long("address") 44 | .short('a') 45 | .required(false) 46 | .default_value(DEFAULT_IP), 47 | ) 48 | .arg( 49 | Arg::new("port") 50 | .long("port") 51 | .short('p') 52 | .required(false) 53 | .value_parser(value_parser!(u16)) 54 | .default_value(DEFAULT_PORT), 55 | ) 56 | .arg( 57 | Arg::new("no-password") 58 | .help( 59 | "Skip providing a password. (default: disabled) **Security warning:** If this option is set then on-disk storage is encrypted with a default (and insecure) password.", 60 | ) 61 | .long("no-password") 62 | .required(false) 63 | .action(ArgAction::SetTrue) 64 | .display_order(0), 65 | ) 66 | .arg( 67 | Arg::new("mnemonic") 68 | .long("mnemonic") 69 | .short('m') 70 | .required(false) 71 | .default_value(DEFAULT_MNEMONIC_CMD) 72 | .value_parser(PossibleValuesParser::new(AVAILABLE_MNEMONIC_CMDS)) 73 | ) 74 | .arg( 75 | Arg::new("directory") 76 | .long("directory") 77 | .short('d') 78 | .required(false) 79 | .env(TOFND_HOME_ENV_VAR) 80 | .default_value(default_tofnd_dir()?), 81 | ); 82 | 83 | let matches = app.get_matches(); 84 | 85 | let ip = matches 86 | .get_one::("ip") 87 | .ok_or_else(|| anyhow!("ip value"))? 88 | .clone(); 89 | let port = *matches 90 | .get_one::("port") 91 | .ok_or_else(|| anyhow!("port value"))?; 92 | let mnemonic_cmd = Cmd::from_string( 93 | matches 94 | .get_one::("mnemonic") 95 | .ok_or_else(|| anyhow!("cmd value"))?, 96 | )?; 97 | let tofnd_path = matches 98 | .get_one::("directory") 99 | .ok_or_else(|| anyhow!("directory value"))? 100 | .into(); 101 | let password_method = if matches.get_flag("no-password") { 102 | PasswordMethod::NoPassword 103 | } else { 104 | PasswordMethod::Prompt 105 | }; 106 | 107 | Ok(Config { 108 | ip, 109 | port, 110 | mnemonic_cmd, 111 | tofnd_path, 112 | password_method, 113 | }) 114 | } 115 | -------------------------------------------------------------------------------- /src/tests/tofnd_party.rs: -------------------------------------------------------------------------------- 1 | use super::{InitParty, DEFAULT_TEST_IP, DEFAULT_TEST_PORT, MAX_TRIES}; 2 | use crate::{ 3 | addr, 4 | config::Config, 5 | encrypted_sled::{get_test_password, PasswordMethod}, 6 | kv_manager::KvManager, 7 | mnemonic::Cmd, 8 | multisig::service::MultisigService, 9 | proto::{self, multisig_server::MultisigServer}, 10 | tests::SLEEP_TIME, 11 | }; 12 | 13 | use std::path::Path; 14 | use std::path::PathBuf; 15 | use tokio::time::{sleep, Duration}; 16 | use tokio::{net::TcpListener, sync::oneshot, task::JoinHandle}; 17 | use tokio_stream::wrappers::TcpListenerStream; 18 | 19 | use tracing::{info, warn}; 20 | 21 | // I tried to keep this struct private and return `impl Party` from new() but ran into so many problems with the Rust compiler 22 | // I also tried using Box but ran into this: https://github.com/rust-lang/rust/issues/63033 23 | #[allow(dead_code)] 24 | pub(super) struct TofndParty { 25 | tofnd_path: PathBuf, 26 | client: proto::multisig_client::MultisigClient, 27 | server_handle: JoinHandle<()>, 28 | server_shutdown_sender: oneshot::Sender<()>, 29 | server_port: u16, 30 | } 31 | 32 | impl TofndParty { 33 | pub(super) async fn new(init_party: InitParty, mnemonic_cmd: Cmd, testdir: &Path) -> Self { 34 | let tofnd_path = format!("test-key-{:02}", init_party.party_index); 35 | let tofnd_path = testdir.join(tofnd_path); 36 | 37 | // start server 38 | let (server_shutdown_sender, shutdown_receiver) = oneshot::channel::<()>(); 39 | 40 | let incoming = TcpListener::bind(addr(DEFAULT_TEST_IP, DEFAULT_TEST_PORT).unwrap()) 41 | .await 42 | .unwrap(); 43 | let server_addr = incoming.local_addr().unwrap(); 44 | let server_ip = server_addr.ip(); 45 | let server_port = server_addr.port(); 46 | info!("new party bound to port [{:?}]", server_port); 47 | 48 | let cfg = Config { 49 | mnemonic_cmd, 50 | ip: server_ip.to_string(), 51 | port: server_port, 52 | tofnd_path, 53 | password_method: PasswordMethod::NoPassword, 54 | }; 55 | 56 | // start service 57 | // sled does not support to rapidly open/close databases. 58 | // Unfortunately, for our restarts/recover tests we need to open 59 | // a database right after it is closed. We get around with that by 60 | // attempting to open the kv with some artificial delay. 61 | // https://github.com/spacejam/sled/issues/1234#issuecomment-754769425 62 | let mut tries = 0; 63 | let kv_manager = loop { 64 | match KvManager::new(cfg.tofnd_path.clone(), get_test_password()) { 65 | Ok(kv_manager) => break kv_manager, 66 | Err(err) => { 67 | tries += 1; 68 | warn!("({}/3) unable to start kv manager: {}", tries, err); 69 | } 70 | }; 71 | sleep(Duration::from_secs(SLEEP_TIME)).await; 72 | if tries == MAX_TRIES { 73 | panic!("could not start kv manager"); 74 | } 75 | }; 76 | let kv_manager = kv_manager.handle_mnemonic(&cfg.mnemonic_cmd).await.unwrap(); 77 | 78 | let service = MultisigServer::new(MultisigService::new(kv_manager)); 79 | 80 | // let (startup_sender, startup_receiver) = tokio::sync::oneshot::channel::<()>(); 81 | let server_handle = tokio::spawn(async move { 82 | tonic::transport::Server::builder() 83 | .add_service(service) 84 | .serve_with_incoming_shutdown(TcpListenerStream::new(incoming), async { 85 | shutdown_receiver.await.unwrap(); 86 | }) 87 | .await 88 | .unwrap(); 89 | // startup_sender.send(()).unwrap(); 90 | }); 91 | 92 | // TODO get the server to notify us after it's started, or perhaps just "yield" here 93 | // println!( 94 | // "new party [{}] TODO sleep waiting for server to start...", 95 | // server_port 96 | // ); 97 | // tokio::time::delay_for(std::time::Duration::from_millis(100)).await; 98 | // startup_receiver.await.unwrap(); 99 | // println!("party [{}] server started!", init.party_uids[my_id_index]); 100 | 101 | info!("new party [{}] connect to server...", server_port); 102 | let client = 103 | proto::multisig_client::MultisigClient::connect(format!("http://{}", server_addr)) 104 | .await 105 | .unwrap(); 106 | 107 | TofndParty { 108 | tofnd_path: cfg.tofnd_path, 109 | client, 110 | server_handle, 111 | server_shutdown_sender, 112 | server_port, 113 | } 114 | } 115 | } 116 | -------------------------------------------------------------------------------- /src/kv_manager/tests.rs: -------------------------------------------------------------------------------- 1 | //! [sled_bindings] tests 2 | 3 | use super::{ 4 | error::InnerKvError::LogicalErr, 5 | sled_bindings::{handle_exists, handle_get, handle_put, handle_reserve}, 6 | types::{KeyReservation, DEFAULT_RESERVE}, 7 | }; 8 | use crate::encrypted_sled; 9 | 10 | // testdir creates a test directory at $TMPDIR. 11 | // Mac: /var/folders/v4/x_j3jj7d6ql4gjdf7b7jvjhm0000gn/T/testdir-of-$(USER) 12 | // Linux: /tmp 13 | // Windows: /data/local/tmp 14 | // https://doc.rust-lang.org/std/env/fn.temp_dir.html#unix 15 | use testdir::testdir; 16 | use tofn::sdk::api::deserialize; 17 | 18 | fn clean_up(kv_name: &str, kv: encrypted_sled::Db) { 19 | assert!(kv.flush().is_ok()); 20 | std::fs::remove_dir_all(kv_name).unwrap(); 21 | } 22 | 23 | pub fn open_with_test_password

(db_name: P) -> encrypted_sled::Result 24 | where 25 | P: AsRef, 26 | { 27 | encrypted_sled::Db::open(db_name, encrypted_sled::get_test_password()) 28 | } 29 | 30 | #[test] 31 | fn reserve_success() { 32 | let kv_name = testdir!("reserve_success"); 33 | let kv = open_with_test_password(&kv_name).unwrap(); 34 | 35 | let key: String = "key".to_string(); 36 | assert_eq!( 37 | handle_reserve(&kv, key.clone()).unwrap(), 38 | KeyReservation { key: key.clone() } 39 | ); 40 | 41 | // check if default value was stored 42 | // get bytes 43 | let default_reserv = kv.get(&key).unwrap().unwrap(); 44 | // convert to value type 45 | assert!(default_reserv == DEFAULT_RESERVE); 46 | 47 | clean_up(kv_name.to_str().unwrap(), kv); 48 | } 49 | 50 | #[test] 51 | fn reserve_failure() { 52 | let kv_name = testdir!(); 53 | let kv = open_with_test_password(&kv_name).unwrap(); 54 | 55 | let key: String = "key".to_string(); 56 | handle_reserve(&kv, key.clone()).unwrap(); 57 | // try reserving twice 58 | let err = handle_reserve(&kv, key).err().unwrap(); 59 | assert!(matches!(err, LogicalErr(_))); 60 | clean_up(kv_name.to_str().unwrap(), kv); 61 | } 62 | 63 | #[test] 64 | fn put_success() { 65 | let kv_name = testdir!(); 66 | let kv = open_with_test_password(&kv_name).unwrap(); 67 | 68 | let key: String = "key".to_string(); 69 | handle_reserve(&kv, key.clone()).unwrap(); 70 | 71 | let value: String = "value".to_string(); 72 | assert!(handle_put(&kv, KeyReservation { key }, value).is_ok()); 73 | 74 | clean_up(kv_name.to_str().unwrap(), kv); 75 | } 76 | 77 | #[test] 78 | fn put_failure_no_reservation() { 79 | let kv_name = testdir!(); 80 | let kv = open_with_test_password(&kv_name).unwrap(); 81 | 82 | let key: String = "key".to_string(); 83 | 84 | let value: String = "value".to_string(); 85 | // try to add put a key without reservation and get an error 86 | let err = handle_put(&kv, KeyReservation { key: key.clone() }, value) 87 | .err() 88 | .unwrap(); 89 | assert!(matches!(err, LogicalErr(_))); 90 | // check if key was inserted 91 | assert!(!kv.contains_key(&key).unwrap()); 92 | 93 | clean_up(kv_name.to_str().unwrap(), kv); 94 | } 95 | 96 | #[test] 97 | fn put_failure_put_twice() { 98 | let kv_name = testdir!(); 99 | let kv = open_with_test_password(&kv_name).unwrap(); 100 | 101 | let key: String = "key".to_string(); 102 | let value = "value".to_string(); 103 | let value2 = "value2".to_string(); 104 | 105 | handle_reserve(&kv, key.clone()).unwrap(); 106 | handle_put(&kv, KeyReservation { key: key.clone() }, value.clone()).unwrap(); 107 | 108 | let err = handle_put(&kv, KeyReservation { key: key.clone() }, value2) 109 | .err() 110 | .unwrap(); 111 | assert!(matches!(err, LogicalErr(_))); 112 | 113 | // check if value was changed 114 | // get bytes 115 | let bytes = kv.get(&key).unwrap().unwrap(); 116 | // convert to value type 117 | let v: String = deserialize(&bytes).unwrap(); 118 | // check current value with first assigned value 119 | assert!(v == value); 120 | 121 | clean_up(kv_name.to_str().unwrap(), kv); 122 | } 123 | 124 | #[test] 125 | fn get_success() { 126 | let kv_name = testdir!(); 127 | let kv = open_with_test_password(&kv_name).unwrap(); 128 | 129 | let key: String = "key".to_string(); 130 | let value = "value"; 131 | handle_reserve(&kv, key.clone()).unwrap(); 132 | handle_put(&kv, KeyReservation { key: key.clone() }, value).unwrap(); 133 | let res = handle_get::(&kv, key); 134 | assert!(res.is_ok()); 135 | let res = res.unwrap(); 136 | assert_eq!(res, value); 137 | 138 | clean_up(kv_name.to_str().unwrap(), kv); 139 | } 140 | 141 | #[test] 142 | fn get_failure() { 143 | let kv_name = testdir!(); 144 | let kv = open_with_test_password(&kv_name).unwrap(); 145 | 146 | let key: String = "key".to_string(); 147 | let err = handle_get::(&kv, key).err().unwrap(); 148 | assert!(matches!(err, LogicalErr(_))); 149 | 150 | clean_up(kv_name.to_str().unwrap(), kv); 151 | } 152 | 153 | #[test] 154 | fn test_exists() { 155 | let kv_name = testdir!(); 156 | let kv = open_with_test_password(kv_name).unwrap(); 157 | let key: String = "key".to_string(); 158 | let value: String = "value".to_string(); 159 | 160 | // exists should fail 161 | let exists = handle_exists(&kv, &key); 162 | assert!(exists.is_ok()); 163 | assert!(!exists.unwrap()); // assert that the result is false 164 | 165 | // reserve key 166 | let reservation = handle_reserve(&kv, key.clone()).unwrap(); 167 | 168 | // exists should succeed 169 | let exists = handle_exists(&kv, &key); 170 | assert!(exists.is_ok()); 171 | assert!(exists.unwrap()); // check that the result is true 172 | 173 | // put key 174 | handle_put(&kv, reservation, value).unwrap(); 175 | 176 | // exists should succeed 177 | let exists = handle_exists(&kv, &key); 178 | assert!(exists.is_ok()); 179 | assert!(exists.unwrap()); // check that the result is true 180 | 181 | // remove key 182 | let remove = kv.remove(key.clone()); 183 | assert!(remove.is_ok()); 184 | 185 | // exists should succeed 186 | let exists = handle_exists(&kv, &key); 187 | assert!(exists.is_ok()); 188 | assert!(!exists.unwrap()); // check that the result is false 189 | } 190 | -------------------------------------------------------------------------------- /.github/workflows/build-docker-image-and-binaries.yaml: -------------------------------------------------------------------------------- 1 | name: Upload Binaries and Docker Image 2 | 3 | on: 4 | workflow_dispatch: 5 | inputs: 6 | tag: 7 | description: Github tag to release binaries for (reusing an existing tag will make the pipeline fail) 8 | required: true 9 | default: latest 10 | 11 | jobs: 12 | release-binaries: 13 | runs-on: ${{ matrix.os }} 14 | strategy: 15 | matrix: 16 | os: [ubuntu-latest, macos-latest] 17 | arch: [amd64, arm64] 18 | exclude: 19 | - {os: "ubuntu-latest", arch: "arm64"} 20 | 21 | permissions: 22 | contents: write 23 | packages: write 24 | id-token: write 25 | 26 | steps: 27 | 28 | - name: Configure AWS credentials 29 | uses: aws-actions/configure-aws-credentials@v1 30 | with: 31 | aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} 32 | aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} 33 | aws-region: us-east-2 34 | 35 | - name: Validate tag for binaries build 36 | env: 37 | SEMVER: ${{ github.event.inputs.tag }} 38 | run: | 39 | if [[ $SEMVER =~ v[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3} ]]; then echo "Tag is okay" && exit 0; else echo "invalid tag" && exit 1; fi 40 | aws s3 ls s3://axelar-releases/tofnd/"$SEMVER" && echo "tag already exists, use a new one" && exit 1 41 | 42 | - name: Checkout code 43 | uses: actions/checkout@v4 44 | with: 45 | fetch-depth: '0' 46 | ref: ${{ github.event.inputs.tag }} 47 | submodules: recursive 48 | 49 | - name: Install protoc 50 | uses: arduino/setup-protoc@v3 51 | with: 52 | repo-token: ${{ secrets.GITHUB_TOKEN }} 53 | 54 | - name: Install stable toolchain 55 | uses: actions-rs/toolchain@v1 56 | with: 57 | profile: minimal 58 | toolchain: 1.78.0 59 | override: true 60 | components: rustfmt, clippy 61 | 62 | - name: Build Mac OS binaries 63 | env: 64 | SEMVER: ${{ github.event.inputs.tag }} 65 | if: matrix.os == 'macos-latest' 66 | run: | 67 | OS="darwin" 68 | ARCH="${{ matrix.arch }}" 69 | mkdir tofndbin 70 | if [ "$ARCH" == "arm64" ] 71 | then 72 | rustup target add aarch64-apple-darwin 73 | cargo build --release --locked --target aarch64-apple-darwin 74 | mv /Users/runner/work/tofnd/tofnd/target/aarch64-apple-darwin/release/tofnd "./tofndbin/tofnd-$OS-$ARCH-$SEMVER" 75 | else 76 | cargo install --locked --path . 77 | mv "/Users/runner/work/tofnd/tofnd/target/release/tofnd" "./tofndbin/tofnd-$OS-$ARCH-$SEMVER" 78 | fi 79 | 80 | - name: Build Linux binaries 81 | env: 82 | SEMVER: ${{ github.event.inputs.tag }} 83 | if: matrix.os == 'ubuntu-latest' 84 | run: | 85 | OS="linux" 86 | ARCH="${{ matrix.arch }}" 87 | cargo install --locked --path . 88 | mkdir tofndbin 89 | mv "/home/runner/work/tofnd/tofnd/target/release/tofnd" "./tofndbin/tofnd-$OS-$ARCH-$SEMVER" 90 | 91 | - name: Test tofnd 92 | working-directory: ./tofndbin 93 | run: | 94 | file ./tofnd-* 95 | 96 | - name: Import GPG key 97 | id: import_gpg 98 | uses: crazy-max/ghaction-import-gpg@v4 99 | with: 100 | gpg_private_key: ${{ secrets.GPG_PRIVATE_KEY }} 101 | passphrase: ${{ secrets.GPG_PASSPHRASE }} 102 | 103 | - name: Sign binaries 104 | working-directory: ./tofndbin 105 | env: 106 | SEMVER: ${{ github.event.inputs.tag }} 107 | run: | 108 | if [ "$RUNNER_OS" == "Linux" ] 109 | then 110 | OS="linux" 111 | else 112 | OS="darwin" 113 | fi 114 | ARCH="${{ matrix.arch }}" 115 | gpg --armor --detach-sign tofnd-"$OS"-"$ARCH"-"$SEMVER" 116 | 117 | - name: Create zip and sha256 files 118 | working-directory: ./tofndbin 119 | run: | 120 | for i in `ls | grep -v .asc` 121 | do 122 | shasum -a 256 $i | awk '{print $1}' > $i.sha256 123 | zip $i.zip $i 124 | shasum -a 256 $i.zip | awk '{print $1}' > $i.zip.sha256 125 | done 126 | 127 | - name: Upload binaries to release 128 | uses: svenstaro/upload-release-action@v2 129 | with: 130 | repo_token: ${{ secrets.GITHUB_TOKEN }} 131 | file: ./tofndbin/* 132 | tag: ${{ github.event.inputs.tag }} 133 | overwrite: true 134 | file_glob: true 135 | 136 | - name: Upload binaries to S3 137 | env: 138 | S3_PATH: s3://axelar-releases/tofnd/${{ github.event.inputs.tag }} 139 | run: | 140 | aws s3 cp ./tofndbin ${S3_PATH}/ --recursive 141 | 142 | release-docker: 143 | 144 | runs-on: ${{ matrix.os }} 145 | strategy: 146 | matrix: 147 | os: [ubuntu-latest] 148 | 149 | permissions: 150 | contents: write 151 | packages: write 152 | id-token: write 153 | 154 | steps: 155 | 156 | - name: Checkout code for docker image build 157 | uses: actions/checkout@v4 158 | with: 159 | fetch-depth: '0' 160 | ref: ${{ github.event.inputs.tag }} 161 | submodules: recursive 162 | 163 | - name: Install Cosign 164 | if: matrix.os == 'ubuntu-latest' 165 | uses: sigstore/cosign-installer@v3.3.0 166 | with: 167 | cosign-release: 'v2.2.2' 168 | 169 | - name: Install SSH key 170 | if: matrix.os == 'ubuntu-latest' 171 | uses: webfactory/ssh-agent@v0.4.1 172 | with: 173 | ssh-private-key: ${{ secrets.CICD_RSA_KEY }} 174 | 175 | - name: Build docker image 176 | if: matrix.os == 'ubuntu-latest' 177 | run: | 178 | make docker-image 179 | 180 | - name: Login to DockerHub 181 | if: matrix.os == 'ubuntu-latest' 182 | uses: docker/login-action@v1 183 | with: 184 | username: ${{ secrets.DOCKER_HUB_USERNAME }} 185 | password: ${{ secrets.DOCKER_HUB_TOKEN }} 186 | 187 | - name: Push to DockerHub (release) 188 | if: matrix.os == 'ubuntu-latest' 189 | run: | 190 | docker tag axelar/tofnd:latest axelarnet/tofnd:${{ github.event.inputs.tag }} 191 | docker push axelarnet/tofnd:${{ github.event.inputs.tag }} 192 | 193 | - name: Sign the images with GitHub OIDC 194 | if: matrix.os == 'ubuntu-latest' 195 | run: cosign sign -y --oidc-issuer https://token.actions.githubusercontent.com ${TAGS} 196 | env: 197 | TAGS: axelarnet/tofnd:${{ github.event.inputs.tag }} 198 | COSIGN_EXPERIMENTAL: 1 199 | -------------------------------------------------------------------------------- /src/kv_manager/kv.rs: -------------------------------------------------------------------------------- 1 | //! Public API for kvstore operations 2 | //! Errors are mapped to [super::error::KvError] 3 | 4 | use crate::encrypted_sled::{self, Password}; 5 | 6 | use super::{ 7 | error::{KvError::*, KvResult}, 8 | sled_bindings::{handle_delete, handle_exists, handle_get, handle_put, handle_reserve}, 9 | types::{ 10 | Command::{self, *}, 11 | KeyReservation, DEFAULT_KV_NAME, DEFAULT_KV_PATH, 12 | }, 13 | }; 14 | use serde::{de::DeserializeOwned, Serialize}; 15 | use std::{fmt::Debug, path::PathBuf}; 16 | use tokio::sync::{mpsc, oneshot}; 17 | 18 | // logging 19 | use tracing::{info, warn}; 20 | 21 | #[derive(Clone)] 22 | pub struct Kv { 23 | sender: mpsc::UnboundedSender>, 24 | } 25 | 26 | // database functionality using the "actor" pattern (Kv is the "handle"): https://ryhl.io/blog/actors-with-tokio/ 27 | // see also https://tokio.rs/tokio/tutorial/channels 28 | impl Kv 29 | where 30 | V: Debug + Send + Sync + Serialize + DeserializeOwned, 31 | { 32 | /// Creates a new kv service. Returns [InitErr] on failure. 33 | /// the path of the kvstore is `root_path` + "/kvstore/" + `kv_name` 34 | pub fn new(root_path: PathBuf, password: Password) -> KvResult { 35 | let kv_path = root_path.join(DEFAULT_KV_PATH).join(DEFAULT_KV_NAME); 36 | // use to_string_lossy() instead of to_str() to avoid handling Option<&str> 37 | let kv_path = kv_path.to_string_lossy().to_string(); 38 | Self::with_db_name(kv_path, password) 39 | } 40 | 41 | /// Creates a kvstore at `full_db_name` and spawns a new kv_manager. Returns [InitErr] on failure. 42 | /// `full_db_name` is the name of the path of the kvstrore + its name 43 | /// Example: ~/tofnd/kvstore/database_1 44 | pub fn with_db_name(full_db_name: String, password: Password) -> KvResult { 45 | let (sender, rx) = mpsc::unbounded_channel(); 46 | 47 | // get kv store from db name before entering the kv_cmd_handler because 48 | // it's more convenient to return an error from outside of a tokio::span 49 | let kv = get_kv_store(&full_db_name, password)?; 50 | 51 | tokio::spawn(kv_cmd_handler(rx, kv)); 52 | Ok(Self { sender }) 53 | } 54 | 55 | /// Reserves a key in the kvstore with [super::types::DEFAULT_RESERV] value. 56 | /// Returns [ReserveErr] or [SendErr] on failure. 57 | pub async fn reserve_key(&self, key: String) -> KvResult { 58 | let (resp_tx, resp_rx) = oneshot::channel(); 59 | self.sender 60 | .send(ReserveKey { key, resp: resp_tx }) 61 | .map_err(|err| SendErr(err.to_string()))?; 62 | resp_rx.await?.map_err(ReserveErr) 63 | } 64 | 65 | /// Unreserves an existing reservation 66 | #[allow(dead_code)] 67 | pub async fn unreserve_key(&self, reservation: KeyReservation) { 68 | let _ = self.sender.send(UnreserveKey { reservation }); 69 | } 70 | 71 | /// Puts a new value given a [super::types::KeyReservation] 72 | /// Returns [PutErr] or [SendErr] on failure. 73 | pub async fn put(&self, reservation: KeyReservation, value: V) -> KvResult<()> { 74 | let (resp_tx, resp_rx) = oneshot::channel(); 75 | self.sender 76 | .send(Put { 77 | reservation, 78 | value, 79 | resp: resp_tx, 80 | }) 81 | .map_err(|e| SendErr(e.to_string()))?; 82 | resp_rx.await?.map_err(PutErr) 83 | } 84 | 85 | /// Gets a value given a key 86 | /// Returns [GetErr] or [SendErr] on failure. 87 | pub async fn get(&self, key: &str) -> KvResult { 88 | let (resp_tx, resp_rx) = oneshot::channel(); 89 | self.sender 90 | .send(Get { 91 | key: key.to_string(), 92 | resp: resp_tx, 93 | }) 94 | .map_err(|e| SendErr(e.to_string()))?; 95 | resp_rx.await?.map_err(GetErr) 96 | } 97 | 98 | /// Deletes an unreserved key 99 | /// Returns [DeleteErr] or [SendErr] on failure. 100 | pub async fn delete(&self, key: &str) -> KvResult<()> { 101 | let (resp_tx, resp_rx) = oneshot::channel(); 102 | self.sender 103 | .send(Delete { 104 | key: key.to_string(), 105 | resp: resp_tx, 106 | }) 107 | .map_err(|e| SendErr(e.to_string()))?; 108 | resp_rx.await?.map_err(DeleteErr) 109 | } 110 | 111 | /// Checks if a key exists in the kvstore 112 | /// Returns [ExistsErr] or [SendErr] on failure. 113 | pub async fn exists(&self, key: &str) -> KvResult { 114 | let (resp_tx, resp_rx) = oneshot::channel(); 115 | self.sender 116 | .send(Exists { 117 | key: key.to_string(), 118 | resp: resp_tx, 119 | }) 120 | .map_err(|e| SendErr(e.to_string()))?; 121 | resp_rx.await?.map_err(ExistsErr) 122 | } 123 | } 124 | 125 | /// Returns the db with name `db_name`, or creates a new if such DB does not exist 126 | /// Returns [sled::Error] on failure. 127 | /// Default path DB path is the executable's directory; The caller can specify a 128 | /// full path followed by the name of the DB 129 | /// Usage: 130 | /// let my_db = get_kv_store(&"my_current_dir_db")?; 131 | /// let my_db = get_kv_store(&"/tmp/my_tmp_bd")?; 132 | pub fn get_kv_store( 133 | db_name: &str, 134 | password: Password, 135 | ) -> encrypted_sled::Result { 136 | // create/open DB 137 | info!("START: decrypt kvstore"); 138 | let kv = encrypted_sled::Db::open(db_name, password)?; 139 | info!("DONE: decrypt kvstore"); 140 | 141 | // log whether the DB was newly created or not 142 | if kv.was_recovered() { 143 | info!("kv_manager found existing db [{}]", db_name); 144 | } else { 145 | info!( 146 | "kv_manager cannot open existing db [{}]. creating new db", 147 | db_name 148 | ); 149 | } 150 | Ok(kv) 151 | } 152 | 153 | // private handler function to process commands as per the "actor" pattern (see above) 154 | async fn kv_cmd_handler( 155 | mut rx: mpsc::UnboundedReceiver>, 156 | kv: encrypted_sled::Db, 157 | ) { 158 | // if resp.send() fails then log a warning and continue 159 | // see discussion https://github.com/axelarnetwork/tofnd/pull/15#discussion_r595426775 160 | while let Some(cmd) = rx.recv().await { 161 | // TODO better error handling and logging: we should log when `handle_*` fails 162 | // TODO refactor repeated code 163 | match cmd { 164 | ReserveKey { key, resp } => { 165 | if resp.send(handle_reserve(&kv, key)).is_err() { 166 | warn!("receiver dropped"); 167 | } 168 | } 169 | UnreserveKey { reservation } => { 170 | let _ = kv.remove(&reservation.key); 171 | } 172 | Put { 173 | reservation, 174 | value, 175 | resp, 176 | } => { 177 | if resp.send(handle_put(&kv, reservation, value)).is_err() { 178 | warn!("receiver dropped"); 179 | } 180 | } 181 | Get { key, resp } => { 182 | if resp.send(handle_get(&kv, key)).is_err() { 183 | warn!("receiver dropped"); 184 | } 185 | } 186 | Exists { key, resp } => { 187 | if resp.send(handle_exists(&kv, &key)).is_err() { 188 | warn!("receiver dropped"); 189 | } 190 | } 191 | Delete { key, resp } => { 192 | if resp.send(handle_delete(&kv, key)).is_err() { 193 | warn!("receiver dropped"); 194 | } 195 | } 196 | } 197 | } 198 | info!("kv_manager stop"); 199 | } 200 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # tofnd: A cryptographic signing service 2 | 3 | Tofnd is a [gRPC](https://grpc.io/) server written in Rust that wraps the [tofn](https://github.com/axelarnetwork/tofn) cryptography library. 4 | 5 | ## Install 6 | 7 | Install `protoc` 8 | 9 | ```bash 10 | # Ubuntu 11 | sudo apt install protobuf-compiler 12 | 13 | # MacOS 14 | brew install protobuf 15 | ``` 16 | 17 | ### Cargo 18 | 19 | ```bash 20 | cargo install tofnd 21 | ``` 22 | 23 | ### Releases 24 | 25 | Pre-built releases can be found [here](https://github.com/axelarnetwork/tofnd/releases) 26 | 27 | ### Source 28 | 29 | ```bash 30 | git clone git@github.com:axelarnetwork/tofnd.git --recursive 31 | ``` 32 | 33 | ```bash 34 | cargo install --locked --path . 35 | ``` 36 | 37 | ```bash 38 | ./target/release/tofnd --version 39 | ``` 40 | 41 | ## Development 42 | 43 | Run tests: 44 | 45 | ```bash 46 | cargo test --release 47 | ``` 48 | 49 | Generate golden files for relevant tests: 50 | 51 | ```bash 52 | GOLDIE_UPDATE=1 cargo test --release 53 | ``` 54 | 55 | ## Running the server 56 | 57 | ```bash 58 | # Initialize tofnd 59 | ./tofnd -m create 60 | 61 | # IMPORTANT: store the content of ./.tofnd/export file at a safe, offline place, and then delete the file 62 | rm ./.tofnd/export 63 | 64 | # start tofnd daemon 65 | ./tofnd 66 | ``` 67 | 68 | Terminate the server with `ctrl+C`. 69 | 70 | ## Password 71 | 72 | By default, `tofnd` prompts for a password from stdin immediately upon launch. This password is used to encrypt on-disk storage. It is the responsibility of the user to keep this password safe. 73 | 74 | Users may automate password entry as they see fit. Some examples follow. These examples are not necessarily secure as written---it's the responsibility of the user to secure password entry. 75 | 76 | ```bash 77 | # feed password from MacOS keyring 78 | security find-generic-password -a $(whoami) -s "tofnd" -w | ./tofnd 79 | 80 | # feed password from 1password-cli 81 | op get item tofnd --fields password | ./tofnd 82 | 83 | # feed password from Pass 84 | pass show tofnd | ./tofnd 85 | 86 | # feed password from environment variable `PASSWORD` 87 | echo $PASSWORD | ./tofnd 88 | 89 | # feed password from a file `password.txt` 90 | cat ./password.txt | ./tofnd 91 | ``` 92 | 93 | Sophisticated users may explicitly opt out of password entry via the `--no-password` terminal argument (see below). In this case, on-disk storage is not secure---it is the responsibility of the user to take additional steps to secure on-disk storage. 94 | 95 | ## Command line arguments 96 | 97 | We use [clap](https://clap.rs/) to manage command line arguments. 98 | 99 | Users can specify: 100 | 101 | 1. Tofnd's root folder. Use `--directory` or `-d` to specify a full or a relative path. If no argument is provided, then the environment variable `TOFND_HOME` is used. If no environment variable is set either, the default `./tofnd` directory is used. 102 | 2. The port number of the gRPC server (default is 50051). 103 | 3. `mnemonic` operations for their `tofnd` instance (default is `Existing`). 104 | For more information, see on mnemonic options, see [Mnemonic](#mnemonic). 105 | 4. By default, `tofnd` expects a password from the standard input. Users that don't want to use passwords can use the `--no-password` flag. **Attention: Use `--no-password` only for testing .** 106 | 107 | ```text 108 | A cryptographic signing service 109 | 110 | USAGE: 111 | tofnd [FLAGS] [OPTIONS] 112 | 113 | FLAGS: 114 | --no-password Skip providing a password. Disabled by default. **Important note** If --no-password is set, the 115 | a default (and public) password is used to encrypt. 116 | -h, --help Prints help information 117 | -V, --version Prints version information 118 | 119 | OPTIONS: 120 | -a, --address [default: 0.0.0.0] 121 | -d, --directory [env: TOFND_HOME=] [default: .tofnd] 122 | -m, --mnemonic [default: existing] [possible values: existing, create, import, export] 123 | -p, --port [default: 50051] 124 | ``` 125 | 126 | ## Docker 127 | 128 | ### Docker Setup 129 | 130 | To setup a `tofnd` container, use the `create` mnemonic command: 131 | 132 | ```bash 133 | docker-compose run -e MNEMONIC_CMD=create tofnd 134 | ``` 135 | 136 | This will initialize `tofnd`, and then exit. 137 | 138 | ### Execution 139 | 140 | To run a `tofnd` daemon inside a container, run: 141 | 142 | ```bash 143 | docker-compose up 144 | ``` 145 | 146 | ### Storage 147 | 148 | We use [data containers](https://docs.docker.com/engine/reference/commandline/volume_create/) to persist data across restarts. To clean up storage, remove all `tofnd` containers, and run 149 | 150 | ```bash 151 | docker volume rm tofnd_tofnd 152 | ``` 153 | 154 | ### Testing 155 | 156 | For testing purposes, `docker-compose.test.yml` is available, which is equivelent to `./tofnd --no-password`. To spin up a test `tofnd` container, run 157 | 158 | ```bash 159 | docker-compose -f docker-compose.test.yml up 160 | ``` 161 | 162 | ### The `auto` command 163 | 164 | In containerized environments the `auto` mnemonic command can be used. This command is implemented in `entrypoint.sh` and does the following: 165 | 166 | 1. Try to use existing mnemonic. If successful then launch `tofnd` server. 167 | 2. Try to import a mnemonic from file. If successful then launch `tofnd` server. 168 | 3. Create a new mnemonic. The newly created mnemonic is automatically written to the file `TOFND_HOME/export`---rename this file to `TOFND_HOME/import` so as to unblock future executions of tofnd. Then launch `tofnd` server. 169 | 170 | The rationale behind `auto` is that users can frictionlessly launch and restart their tofnd nodes without the need to execute multiple commands. 171 | `auto` is currently the default command only in `docker-compose.test.yml`, but users can edit the `docker-compose.yml` to use it at their own discretion. 172 | 173 | **Attention:** `auto` leaves the mnemonic on plain text on disk. You should remove the `TOFND_HOME/import` file and store the mnemonic at a safe, offline place. 174 | 175 | ## Mnemonic 176 | 177 | `Tofnd` uses the [tiny-bip39](https://docs.rs/crate/tiny-bip39) crate to enable users manage mnemonic passphrases. Currently, each party can use only one passphrase. 178 | 179 | ### Mnemonic options 180 | 181 | The command line API supports the following commands: 182 | 183 | * `Existing` Starts the gRPC daemon using an existing mnemonic; Fails if no mnemonic exist. 184 | 185 | * `Create` Creates a new mnemonic, inserts it in the kv-store, exports it to a file and exits; Fails if a mnemonic already exists. 186 | 187 | * `Import` Prompts user to give a new mnemonic from standard input, inserts it in the kv-store and exits; Fails if a mnemonic exists or if the provided string is not a valid bip39 mnemonic. 188 | 189 | * `Export` Writes the existing mnemonic to _/.tofnd/export_ and exits; Succeeds when there is an existing mnemonic. Fails if no mnemonic is stored, or the export file already exists. 190 | 191 | ## Zeroization 192 | 193 | We use the [zeroize](https://docs.rs/zeroize/1.1.1/zeroize/) crate to clear sensitive info for memory as a good practice. The data we clean are related to the mnemonic: 194 | 195 | 1. entropy 196 | 2. passwords 197 | 198 | Note that, [tiny-bip39](https://docs.rs/crate/tiny-bip39) also uses `zeroize` internally. 199 | 200 | ## KV Store 201 | 202 | To persist information between different gRPCs (i.e. _keygen_ and _sign_), we use a key-value storage based on [sled](https://sled.rs/). 203 | 204 | `Tofnd` uses an encrypted mnemonic KV Store which stores the entropy of a mnemonic passphrase. This entropy is used to derive user's keys. The KV Store is encrypted with a password provided by the user. The password is used to derive a key that encrypts the KV Store. 205 | 206 | ## Threshold cryptography 207 | 208 | For an implementation of the [GG20](https://eprint.iacr.org/2020/540.pdf) threshold-ECDSA protocol, 209 | see this version of [tofnd](https://github.com/axelarnetwork/tofnd/tree/v0.10.1). The GG20 protocol implementation should not be considered ready for production since it doesn't protect against recently discovered attacks on the protocol implementation. This was removed from `tofnd` as it is not being used in the Axelar protocol. 210 | 211 | ## License 212 | 213 | All crates licensed under either of 214 | 215 | * [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0) 216 | * [MIT license](http://opensource.org/licenses/MIT) 217 | 218 | at your option. 219 | 220 | ## Contribution 221 | 222 | Unless you explicitly state otherwise, any contribution intentionally submitted 223 | for inclusion in the work by you, as defined in the Apache-2.0 license, shall be 224 | dual licensed as above, without any additional terms or conditions. 225 | -------------------------------------------------------------------------------- /src/encrypted_sled/kv.rs: -------------------------------------------------------------------------------- 1 | //! Wrap [sled] with [chacha20poly1305] encryption. An [XChaCha20Entropy] is 2 | //! used as [XChaCha20Poly1305] cipher key to create an [EncryptedDb]. 3 | //! A new random [XChaCha20Nonce] is created every time a new value needs to be 4 | //! inserted, forming a [EncryptedRecord]:. The nonce is later 5 | //! used to decrypt and retrieve the originally inserted value. 6 | 7 | use std::convert::TryInto; 8 | 9 | use chacha20poly1305::aead::{AeadInPlace, KeyInit}; 10 | use chacha20poly1305::{self, XChaCha20Poly1305}; 11 | use rand::RngCore; 12 | 13 | use sled::IVec; 14 | use zeroize::Zeroize; 15 | 16 | use super::constants::*; 17 | use super::password::{Password, PasswordSalt}; 18 | use super::record::EncryptedRecord; 19 | use super::result::{EncryptedDbError::*, EncryptedDbResult}; 20 | 21 | /// A [sled] kv store with [XChaCha20Poly1305] value encryption. 22 | pub struct EncryptedDb { 23 | kv: sled::Db, 24 | cipher: XChaCha20Poly1305, 25 | } 26 | 27 | impl EncryptedDb { 28 | /// create a new [EncryptedDb] that wraps sled::open(db_name). 29 | /// Retrieves [XChaCha20Entropy] from a password-based-key-derivation-function and 30 | /// verifies that the password is valid. 31 | /// See [crate::password] for more info on pdkdf. 32 | pub fn open

(db_name: P, password: Password) -> EncryptedDbResult 33 | where 34 | P: AsRef, 35 | { 36 | let kv = sled::open(db_name).map_err(CorruptedKv)?; 37 | 38 | let password_salt: PasswordSalt = if kv.was_recovered() { 39 | // existing kv: get the existing password salt 40 | kv.get(PASSWORD_SALT_KEY)? 41 | .ok_or(MissingPasswordSalt)? 42 | .try_into()? 43 | } else { 44 | // new kv: choose a new password salt and store it 45 | let mut password_salt = [0u8; 32]; 46 | rand::thread_rng().fill_bytes(&mut password_salt); 47 | kv.insert(PASSWORD_SALT_KEY, &password_salt)?; 48 | password_salt.into() 49 | }; 50 | 51 | // zeroize key since we are no longer using it after creating cipher 52 | let mut key = Self::chacha20poly1305_kdf(password, password_salt)?; 53 | let cipher = XChaCha20Poly1305::new(&key); 54 | key.zeroize(); 55 | 56 | let encrypted_db = EncryptedDb { kv, cipher }; 57 | 58 | // verify that [password] is correct 59 | if encrypted_db.kv.was_recovered() { 60 | // existing kv: can we decrypt the verification value? 61 | encrypted_db 62 | .get(PASSWORD_VERIFICATION_KEY) 63 | .map_err(|_| WrongPassword)?; 64 | } else { 65 | // new kv: encrypt the verification value 66 | encrypted_db.insert(PASSWORD_VERIFICATION_KEY, PASSWORD_VERIFICATION_VALUE)?; 67 | } 68 | 69 | Ok(encrypted_db) 70 | } 71 | 72 | /// Recommended default params. Should NOT be changed without a migration of the kvstore. 73 | /// See [scrypt::Params] for more info. These are fixed instead of using [scrypt::Params::default()] 74 | /// to avoid regression if the default recommendation changes. 75 | fn scrypt_params() -> EncryptedDbResult { 76 | scrypt::Params::new(15, 8, 1, 32).map_err(PasswordScryptParams) 77 | } 78 | 79 | fn chacha20poly1305_kdf( 80 | password: Password, 81 | salt: PasswordSalt, 82 | ) -> EncryptedDbResult { 83 | let mut output = chacha20poly1305::Key::default(); 84 | 85 | scrypt::scrypt( 86 | password.as_ref(), 87 | salt.as_ref(), 88 | &Self::scrypt_params()?, 89 | output.as_mut_slice(), 90 | )?; 91 | 92 | Ok(output) 93 | } 94 | 95 | /// get a new random nonce to use for value encryption using [rand::thread_rng] 96 | fn generate_nonce() -> chacha20poly1305::XNonce { 97 | let mut bytes = chacha20poly1305::XNonce::default(); 98 | rand::thread_rng().fill_bytes(bytes.as_mut_slice()); 99 | bytes 100 | } 101 | 102 | /// create a new [EncryptedRecord] containing an encrypted value and a newly derived random nonce 103 | fn encrypt(&self, value: V) -> EncryptedDbResult 104 | where 105 | V: Into, 106 | { 107 | let nonce = Self::generate_nonce(); 108 | 109 | self.encrypt_with_nonce(value, nonce) 110 | } 111 | 112 | /// create a new [EncryptedRecord] containing an encrypted value and a given nonce. 113 | fn encrypt_with_nonce( 114 | &self, 115 | value: V, 116 | nonce: chacha20poly1305::XNonce, 117 | ) -> EncryptedDbResult 118 | where 119 | V: Into, 120 | { 121 | let mut value = value.into().to_vec(); 122 | 123 | // encrypt value 124 | self.cipher 125 | .encrypt_in_place(&nonce, b"", &mut value) 126 | .map_err(|e| Encryption(e.to_string()))?; 127 | 128 | // return record 129 | Ok(EncryptedRecord::new(value, nonce)) 130 | } 131 | 132 | /// derive a decrypted value from a [EncryptedRecord] containing an encrypted value and a random nonce 133 | fn decrypt_record_value(&self, record: EncryptedRecord) -> EncryptedDbResult { 134 | let (mut value, nonce) = record.into(); 135 | 136 | // decrypt value 137 | self.cipher 138 | .decrypt_in_place(&nonce, b"", &mut value) 139 | .map_err(|e| Decryption(e.to_string()))?; 140 | 141 | // return decrypted value 142 | Ok(value.into()) 143 | } 144 | 145 | /// derive a decrypted value from [EncryptedRecord] bytes 146 | fn decrypt(&self, record_bytes: Option) -> EncryptedDbResult> { 147 | let res = match record_bytes { 148 | Some(record_bytes) => { 149 | let record = EncryptedRecord::from_bytes(&record_bytes)?; 150 | let decrypted_value_bytes = self.decrypt_record_value(record)?; 151 | Some(decrypted_value_bytes) 152 | } 153 | None => None, 154 | }; 155 | Ok(res) 156 | } 157 | 158 | /// Insert a key to a new encrypted value, returning and decrypting the last value if it was set. 159 | pub fn insert(&self, key: K, value: V) -> EncryptedDbResult> 160 | where 161 | K: AsRef<[u8]>, 162 | V: Into, 163 | { 164 | let record = self.encrypt(value)?; 165 | let prev_record_bytes_opt = self.kv.insert(&key, record.to_bytes()?)?; 166 | self.decrypt(prev_record_bytes_opt) 167 | } 168 | 169 | /// Retrieve and decrypt a value from the `Tree` if it exists. 170 | pub fn get(&self, key: K) -> EncryptedDbResult> 171 | where 172 | K: AsRef<[u8]>, 173 | { 174 | let bytes_opt = self.kv.get(&key)?; 175 | self.decrypt(bytes_opt) 176 | } 177 | 178 | /// Returns `true` if the `Tree` contains a value for the specified key. 179 | pub fn contains_key(&self, key: K) -> EncryptedDbResult 180 | where 181 | K: AsRef<[u8]>, 182 | { 183 | Ok(self.kv.contains_key(&key)?) 184 | } 185 | 186 | /// Delete a value, decrypting and returning the old value if it existed. 187 | pub fn remove(&self, key: K) -> EncryptedDbResult> 188 | where 189 | K: AsRef<[u8]>, 190 | { 191 | let prev_val = self.kv.remove(&key)?; 192 | self.decrypt(prev_val) 193 | } 194 | 195 | /// Returns true if the database was recovered from a previous process. 196 | pub fn was_recovered(&self) -> bool { 197 | self.kv.was_recovered() 198 | } 199 | 200 | #[cfg(test)] 201 | pub fn flush(&self) -> EncryptedDbResult { 202 | Ok(self.kv.flush()?) 203 | } 204 | } 205 | 206 | #[cfg(test)] 207 | mod tests { 208 | use chacha20poly1305::{KeyInit, XChaCha20Poly1305, XNonce}; 209 | 210 | use super::EncryptedDb; 211 | use crate::encrypted_sled::{password::PasswordSalt, Password}; 212 | 213 | #[test] 214 | fn chacha20poly1305_kdf_known_vector() { 215 | let password = Password::from("test_password"); 216 | let salt = PasswordSalt::from([2; 32]); 217 | 218 | let key = hex::encode(EncryptedDb::chacha20poly1305_kdf(password, salt).unwrap()); 219 | 220 | goldie::assert_json!(key); 221 | } 222 | 223 | #[test] 224 | fn encrypt_with_nonce_known_vector() { 225 | // Create a mock EncryptedDb with a deterministic cipher 226 | let mock_db = EncryptedDb { 227 | kv: sled::Config::new().temporary(true).open().unwrap(), 228 | cipher: XChaCha20Poly1305::new(&chacha20poly1305::Key::from([5u8; 32])), 229 | }; 230 | 231 | let value = b"test_value"; 232 | let nonce = XNonce::from([1u8; 24]); 233 | 234 | let encrypted_record = mock_db.encrypt_with_nonce(value, nonce).unwrap(); 235 | 236 | goldie::assert_json!(&encrypted_record); 237 | 238 | let decrypted_value = mock_db.decrypt_record_value(encrypted_record).unwrap(); 239 | assert_eq!(decrypted_value.as_ref(), value); 240 | } 241 | } 242 | -------------------------------------------------------------------------------- /src/multisig/tests.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | addr, 3 | encrypted_sled::get_test_password, 4 | kv_manager::KvManager, 5 | proto::Algorithm, 6 | tests::{DEFAULT_TEST_IP, DEFAULT_TEST_PORT}, 7 | }; 8 | use tokio::{ 9 | self, 10 | net::TcpListener, 11 | sync::oneshot::{channel, Sender}, 12 | }; 13 | use tokio_stream::wrappers::TcpListenerStream; 14 | use tonic::transport::Channel; 15 | 16 | use super::service::MultisigService; 17 | 18 | use testdir::testdir; 19 | use tracing::error; 20 | use tracing_test::traced_test; 21 | 22 | use std::convert::TryInto; 23 | 24 | use crate::proto::{ 25 | key_presence_response::Response::Present, keygen_response::KeygenResponse, 26 | multisig_client::MultisigClient, multisig_server::MultisigServer, sign_response::SignResponse, 27 | KeyPresenceRequest, KeygenRequest, SignRequest, 28 | }; 29 | 30 | // set up tests 31 | async fn spin_test_service_and_client() -> (MultisigClient, Sender<()>) { 32 | // create root directory for service 33 | let root = testdir!(); 34 | 35 | // create a kv_manager 36 | let kv_manager = KvManager::new(root, get_test_password()) 37 | .unwrap() 38 | .handle_mnemonic(&crate::mnemonic::Cmd::Create) 39 | .await 40 | .unwrap(); 41 | 42 | // create service 43 | let service = MultisigServer::new(MultisigService::new(kv_manager)); 44 | 45 | // create incoming tcp server for service 46 | let incoming = TcpListener::bind(addr(DEFAULT_TEST_IP, DEFAULT_TEST_PORT).unwrap()) 47 | .await 48 | .unwrap(); 49 | 50 | // create shutdown channels 51 | let (shutdown_sender, shutdown_receiver) = channel::<()>(); 52 | 53 | // get server's address 54 | let server_addr = incoming.local_addr().unwrap(); 55 | 56 | // spin up multisig gRPC server with incoming shutdown 57 | tokio::spawn(async move { 58 | tonic::transport::Server::builder() 59 | .add_service(service) 60 | .serve_with_incoming_shutdown(TcpListenerStream::new(incoming), async { 61 | shutdown_receiver.await.unwrap(); 62 | }) 63 | .await 64 | .unwrap(); 65 | }); 66 | 67 | // create a client to multisig service 68 | let client = MultisigClient::connect(format!("http://{}", server_addr)) 69 | .await 70 | .unwrap(); 71 | 72 | // return the client and the shutdown channel for the service 73 | (client, shutdown_sender) 74 | } 75 | 76 | // dummy ctor for KeygenResult 77 | impl KeygenRequest { 78 | fn new(key_uid: &str, algorithm: Algorithm) -> KeygenRequest { 79 | KeygenRequest { 80 | key_uid: key_uid.to_string(), 81 | party_uid: String::default(), 82 | algorithm: algorithm as i32, 83 | } 84 | } 85 | } 86 | 87 | // dummy ctor for KeygenResult 88 | impl SignRequest { 89 | fn new(key_uid: &str, algorithm: Algorithm) -> SignRequest { 90 | SignRequest { 91 | key_uid: key_uid.to_string(), 92 | msg_to_sign: vec![32; 32], 93 | party_uid: String::default(), 94 | pub_key: vec![], 95 | algorithm: algorithm as i32, 96 | } 97 | } 98 | } 99 | 100 | // vec to array 101 | fn to_array(v: Vec) -> [T; N] { 102 | v.try_into() 103 | .unwrap_or_else(|v: Vec| panic!("Expected a Vec of length {} but it was {}", N, v.len())) 104 | } 105 | 106 | #[traced_test] 107 | #[tokio::test] 108 | async fn test_multisig_ecdsa_keygen_sign() { 109 | let key = "multisig key"; 110 | let (mut client, shutdown_sender) = spin_test_service_and_client().await; 111 | 112 | let request = KeygenRequest::new(key, Algorithm::Ecdsa); 113 | 114 | let response = client.keygen(request).await.unwrap().into_inner(); 115 | let pub_key = match response.keygen_response.unwrap() { 116 | KeygenResponse::PubKey(pub_key) => pub_key, 117 | KeygenResponse::Error(err) => { 118 | panic!("Got error from keygen: {}", err); 119 | } 120 | }; 121 | 122 | let request = SignRequest::new(key, Algorithm::Ecdsa); 123 | let msg_digest = request.msg_to_sign.as_slice().try_into().unwrap(); 124 | let response = client.sign(request).await.unwrap().into_inner(); 125 | let signature = match response.sign_response.unwrap() { 126 | SignResponse::Signature(signature) => signature, 127 | SignResponse::Error(err) => { 128 | panic!("Got error from sign: {}", err) 129 | } 130 | }; 131 | 132 | shutdown_sender.send(()).unwrap(); 133 | 134 | assert!(tofn::ecdsa::verify(&to_array(pub_key), &msg_digest, &signature,).unwrap()); 135 | } 136 | 137 | #[traced_test] 138 | #[tokio::test] 139 | async fn test_multisig_ed25519_keygen_sign() { 140 | let key = "multisig key"; 141 | let (mut client, shutdown_sender) = spin_test_service_and_client().await; 142 | 143 | let request = KeygenRequest::new(key, Algorithm::Ed25519); 144 | 145 | let response = client.keygen(request).await.unwrap().into_inner(); 146 | let pub_key = match response.keygen_response.unwrap() { 147 | KeygenResponse::PubKey(pub_key) => pub_key, 148 | KeygenResponse::Error(err) => { 149 | panic!("Got error from keygen: {}", err); 150 | } 151 | }; 152 | 153 | let request = SignRequest::new(key, Algorithm::Ed25519); 154 | let msg_digest = request.msg_to_sign.as_slice().try_into().unwrap(); 155 | let response = client.sign(request).await.unwrap().into_inner(); 156 | let signature = match response.sign_response.unwrap() { 157 | SignResponse::Signature(signature) => signature, 158 | SignResponse::Error(err) => { 159 | panic!("Got error from sign: {}", err) 160 | } 161 | }; 162 | 163 | shutdown_sender.send(()).unwrap(); 164 | 165 | assert!(tofn::ed25519::verify(&to_array(pub_key), &msg_digest, &signature,).unwrap()); 166 | } 167 | 168 | #[traced_test] 169 | #[tokio::test] 170 | async fn test_multisig_keygen_deterministic_and_unique_keys() { 171 | let key = "multisig key"; 172 | let (mut client, shutdown_sender) = spin_test_service_and_client().await; 173 | 174 | let mut seen_pub_keys = std::collections::HashSet::new(); 175 | 176 | for algorithm in [Algorithm::Ecdsa, Algorithm::Ed25519] { 177 | let request = KeygenRequest::new(key, algorithm); 178 | 179 | let response = client.keygen(request.clone()).await.unwrap().into_inner(); 180 | let pub_key1 = match response.keygen_response.unwrap() { 181 | KeygenResponse::PubKey(pub_key) => pub_key, 182 | KeygenResponse::Error(err) => { 183 | panic!("Got error from keygen: {}", err); 184 | } 185 | }; 186 | 187 | let response = client.keygen(request).await.unwrap().into_inner(); 188 | let pub_key2 = match response.keygen_response.unwrap() { 189 | KeygenResponse::PubKey(pub_key) => pub_key, 190 | KeygenResponse::Error(err) => { 191 | panic!("Got error from keygen: {}", err); 192 | } 193 | }; 194 | 195 | assert_eq!(pub_key1, pub_key2); 196 | 197 | assert!(seen_pub_keys.insert(pub_key1)); 198 | } 199 | 200 | shutdown_sender.send(()).unwrap(); 201 | } 202 | 203 | #[traced_test] 204 | #[tokio::test] 205 | async fn test_multisig_only_sign() { 206 | let key = "multisig key"; 207 | let (mut client, shutdown_sender) = spin_test_service_and_client().await; 208 | 209 | for algorithm in [Algorithm::Ecdsa, Algorithm::Ed25519] { 210 | let request = SignRequest::new(key, algorithm); 211 | let response = client.sign(request).await.unwrap().into_inner(); 212 | let _ = match response.sign_response.unwrap() { 213 | SignResponse::Signature(signature) => signature, 214 | SignResponse::Error(err) => { 215 | panic!("Got error from sign: {}", err) 216 | } 217 | }; 218 | } 219 | 220 | shutdown_sender.send(()).unwrap(); 221 | } 222 | 223 | #[traced_test] 224 | #[tokio::test] 225 | async fn test_multisig_short_key_fail() { 226 | let key = "k"; // too short key 227 | let (mut client, shutdown_sender) = spin_test_service_and_client().await; 228 | 229 | for algorithm in [Algorithm::Ecdsa, Algorithm::Ed25519] { 230 | let keygen_request = KeygenRequest::new(key, algorithm); 231 | let keygen_response = client.keygen(keygen_request).await.unwrap().into_inner(); 232 | 233 | if let KeygenResponse::Error(err) = keygen_response.clone().keygen_response.unwrap() { 234 | error!("{}", err); 235 | } 236 | assert!(matches!( 237 | keygen_response.keygen_response.unwrap(), 238 | KeygenResponse::Error(_) 239 | )); 240 | 241 | let sign_request = SignRequest::new(key, algorithm); 242 | let sign_response = client.sign(sign_request).await.unwrap().into_inner(); 243 | 244 | if let SignResponse::Error(err) = sign_response.clone().sign_response.unwrap() { 245 | error!("{}", err); 246 | } 247 | assert!(matches!( 248 | sign_response.sign_response.unwrap(), 249 | SignResponse::Error(_) 250 | )); 251 | } 252 | 253 | shutdown_sender.send(()).unwrap(); 254 | } 255 | 256 | #[traced_test] 257 | #[tokio::test] 258 | async fn test_multisig_truncated_msg_fail() { 259 | let key = "key-uid"; 260 | let (mut client, shutdown_sender) = spin_test_service_and_client().await; 261 | 262 | for algorithm in [Algorithm::Ecdsa, Algorithm::Ed25519] { 263 | // attempt sign with truncated msg digest 264 | let mut request = SignRequest::new(key, algorithm); 265 | request.msg_to_sign = vec![32; 31]; 266 | let response = client.sign(request.clone()).await.unwrap().into_inner(); 267 | if let SignResponse::Error(err) = response.clone().sign_response.unwrap() { 268 | error!("{}", err); 269 | } 270 | assert!(matches!( 271 | response.sign_response.unwrap(), 272 | SignResponse::Error(_) 273 | )); 274 | } 275 | 276 | shutdown_sender.send(()).unwrap(); 277 | } 278 | 279 | #[traced_test] 280 | #[tokio::test] 281 | async fn test_key_presence() { 282 | let (mut client, shutdown_sender) = spin_test_service_and_client().await; 283 | 284 | for algorithm in [Algorithm::Ecdsa, Algorithm::Ed25519] { 285 | let presence_request = KeyPresenceRequest { 286 | key_uid: "key_uid".to_string(), 287 | pub_key: vec![], 288 | algorithm: algorithm as i32, 289 | }; 290 | 291 | let response = client 292 | .key_presence(presence_request) 293 | .await 294 | .unwrap() 295 | .into_inner(); 296 | assert_eq!(response.response, Present as i32); 297 | } 298 | 299 | shutdown_sender.send(()).unwrap(); 300 | } 301 | -------------------------------------------------------------------------------- /LICENSE-APACHE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright 2021 Axelar Foundation 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /src/mnemonic/cmd_handler.rs: -------------------------------------------------------------------------------- 1 | // TODO: consider moving cmd_handler in KvManager 2 | 3 | use super::{ 4 | bip39_bindings::{bip39_from_phrase, bip39_new_w24, bip39_seed}, 5 | results::mnemonic::{ 6 | InnerMnemonicError::*, InnerMnemonicResult, MnemonicError::*, MnemonicResult, SeedResult, 7 | }, 8 | types::{Entropy, Password}, 9 | }; 10 | use crate::kv_manager::{ 11 | error::{InnerKvError, KvError}, 12 | KeyReservation, KvManager, 13 | }; 14 | use tofn::sdk::api::{deserialize, serialize, SecretRecoveryKey}; 15 | 16 | use rpassword::read_password; 17 | use std::convert::TryInto; 18 | use tracing::{error, info}; 19 | 20 | // default key to store mnemonic 21 | const MNEMONIC_KEY: &str = "mnemonic"; 22 | 23 | // key to store mnemonic count 24 | const MNEMONIC_COUNT_KEY: &str = "mnemonic_count"; 25 | 26 | // A user may decide to protect their mnemonic with a passphrase. 27 | // We pass an empty password since the mnemonic has sufficient entropy and will be backed up. 28 | // https://github.com/bitcoin/bips/blob/master/bip-0039.mediawiki#from-mnemonic-to-seed 29 | const MNEMONIC_PASSWORD: &str = ""; 30 | 31 | #[derive(Clone, Debug)] 32 | pub enum Cmd { 33 | Existing, 34 | Create, 35 | Import, 36 | Export, 37 | Rotate, 38 | } 39 | 40 | impl Cmd { 41 | pub fn from_string(cmd_str: &str) -> MnemonicResult { 42 | let cmd = match cmd_str { 43 | "existing" => Self::Existing, 44 | "create" => Self::Create, 45 | "import" => Self::Import, 46 | "export" => Self::Export, 47 | "rotate" => Self::Rotate, 48 | _ => return Err(WrongCommand(cmd_str.to_string())), 49 | }; 50 | Ok(cmd) 51 | } 52 | /// On [Cmd::Existing], continue tofnd. 53 | /// On [Cmd::Create], [Cmd::Import] or [Cmd::Export], exit tofnd. 54 | pub fn exit_after_cmd(&self) -> bool { 55 | match &self { 56 | Cmd::Existing => false, 57 | Cmd::Create => true, 58 | Cmd::Import => true, 59 | Cmd::Export => true, 60 | Cmd::Rotate => true, 61 | } 62 | } 63 | } 64 | 65 | /// implement mnemonic-specific functions for KvManager 66 | impl KvManager { 67 | /// get mnemonic seed from kv-store 68 | pub async fn seed(&self) -> SeedResult { 69 | self.get_seed(MNEMONIC_KEY).await 70 | } 71 | 72 | /// Get mnemonic seed under key 73 | pub async fn get_seed(&self, key: &str) -> SeedResult { 74 | let mnemonic = self 75 | .kv() 76 | .get(key) 77 | .await? 78 | .try_into() 79 | .map_err(KvError::GetErr)?; 80 | 81 | Ok( 82 | bip39_seed(mnemonic, Password(MNEMONIC_PASSWORD.to_owned()))? 83 | .as_bytes() 84 | .try_into()?, 85 | ) 86 | } 87 | 88 | pub async fn seed_key_iter(&self) -> InnerMnemonicResult> { 89 | let count = self.seed_count().await?; 90 | if count == 0 { 91 | return Err(KvErr(KvError::GetErr(InnerKvError::LogicalErr( 92 | "no mnemonic found".to_owned(), 93 | )))); 94 | } 95 | 96 | // To optimize performance, iterate from the latest mnemonic to the oldest 97 | // Latest mnemonic is stored under 'mnemonic' 98 | // Second latest mnemonic is stored under 'mnemonic_x' where x is the seed count 99 | // Older mnemonics are stored in decreasing order of index 100 | let mut keys = vec![String::from(MNEMONIC_KEY)]; 101 | 102 | for i in (1..count).rev() { 103 | keys.push(format!("{}_{}", MNEMONIC_KEY, i)) 104 | } 105 | 106 | Ok(keys) 107 | } 108 | 109 | /// async function that handles all mnemonic commands 110 | pub async fn handle_mnemonic(self, cmd: &Cmd) -> MnemonicResult { 111 | match cmd { 112 | Cmd::Existing => self.handle_existing().await.map_err(ExistingErr)?, 113 | Cmd::Create => self.handle_create().await.map_err(CreateErr)?, 114 | Cmd::Import => self.handle_import().await.map_err(ImportErr)?, 115 | Cmd::Export => self.handle_export().await.map_err(ExportErr)?, 116 | Cmd::Rotate => self.handle_rotate().await.map_err(RotateErr)?, 117 | }; 118 | Ok(self) 119 | } 120 | 121 | /// use the existing mnemonic to spin up a tofnd deamon. 122 | /// if an export file exists in the default path, returns an error. 123 | /// if an no mnemonic record exists in the kv-store, returns an error. 124 | async fn handle_existing(&self) -> InnerMnemonicResult<()> { 125 | // if there is an exported mnemonic, raise an error and don't start the daemon. 126 | // we do this to prevent users from accidentally leave their mnemonic on disk in plain text 127 | self.io().check_if_not_exported()?; 128 | 129 | // try to get mnemonic from kv-store 130 | match self.kv().exists(MNEMONIC_KEY).await? { 131 | true => Ok(()), 132 | false => Err(KvErr(KvError::ExistsErr(InnerKvError::LogicalErr( 133 | "Mnemonic not found".to_string(), 134 | )))), 135 | } 136 | } 137 | 138 | /// Get the mnemonic count in the kv store. 139 | pub async fn seed_count(&self) -> InnerMnemonicResult { 140 | match self.kv().get(MNEMONIC_COUNT_KEY).await { 141 | Ok(encoded_count) => Ok(deserialize(&encoded_count) 142 | .ok_or(KvErr(KvError::GetErr(InnerKvError::DeserializationErr)))?), 143 | // if MNEMONIC_COUNT_KEY does not exist then mnemonic count is either 0 or 1 144 | Err(KvError::GetErr(_)) => Ok(match self.kv().exists(MNEMONIC_KEY).await? { 145 | true => 1, 146 | false => 0, 147 | }), 148 | Err(_) => { 149 | error!(""); 150 | Err(PasswordErr(String::from(""))) 151 | } 152 | } 153 | } 154 | 155 | /// Get the next mnemonic key id. 156 | async fn get_next_key(&self) -> InnerMnemonicResult<(String, u32)> { 157 | let count = self.seed_count().await?; 158 | 159 | let key = match count { 160 | 0 => String::from(MNEMONIC_KEY), // latest mnemonic is preserved in the original key 161 | _ => std::format!("{}_{}", MNEMONIC_KEY, count), // count is 0-indexed 162 | }; 163 | 164 | Ok((key, count)) 165 | } 166 | 167 | /// inserts entropy to the kv-store 168 | /// takes ownership of entropy to delegate zeroization. 169 | async fn put_entropy( 170 | &self, 171 | reservation: KeyReservation, 172 | entropy: Entropy, 173 | ) -> InnerMnemonicResult<()> { 174 | match self 175 | .kv() 176 | .put(reservation, entropy.try_into().map_err(KvError::PutErr)?) 177 | .await 178 | { 179 | // if put is ok, write the phrase to a file 180 | Ok(()) => { 181 | info!("Mnemonic successfully added in kv store. Use the `-m export` command to retrieve it."); 182 | Ok(()) 183 | } 184 | // else return failure 185 | Err(err) => { 186 | error!("Cannot put mnemonic in kv store: {:?}", err); 187 | Err(KvErr(err)) 188 | } 189 | } 190 | } 191 | 192 | /// inserts entropy to the kv-store 193 | /// takes ownership of entropy to delegate zeroization. 194 | async fn handle_insert(&self, entropy: Entropy) -> InnerMnemonicResult<()> { 195 | let (key, count) = self.get_next_key().await?; 196 | 197 | info!( 198 | "Inserting mnemonic under key '{}' with total count '{}'", 199 | key, count 200 | ); 201 | 202 | let reservation = self.kv().reserve_key(key).await.map_err(|err| { 203 | error!("Cannot reserve mnemonic key: {:?}", err); 204 | KvErr(err) 205 | })?; 206 | 207 | // Insert before updating the count to minimize state corruption if it fails in the middle 208 | self.put_entropy(reservation, entropy).await?; 209 | 210 | // If delete isn't successful, the previous mnemonic count will still allow tofnd to work 211 | self.kv().delete(MNEMONIC_COUNT_KEY).await.map_err(|err| { 212 | error!("could not delete mnemonic count: {:?}", err); 213 | KvErr(err) 214 | })?; 215 | 216 | let count_reservation = self 217 | .kv() 218 | .reserve_key(MNEMONIC_COUNT_KEY.to_owned()) 219 | .await 220 | .map_err(|err| { 221 | error!("Cannot reserve mnemonic count key: {:?}", err); 222 | KvErr(err) 223 | })?; 224 | 225 | let encoded_count = serialize(&(count + 1)) 226 | .map_err(|_| KvErr(KvError::PutErr(InnerKvError::SerializationErr)))?; 227 | 228 | // If the new count isn't written, tofnd will still work with the latest mnemonic 229 | self.kv() 230 | .put(count_reservation, encoded_count) 231 | .await 232 | .map_err(|err| { 233 | error!("Could not update the mnemonic count in kv store: {:?}", err); 234 | KvErr(err) 235 | }) 236 | } 237 | 238 | /// Creates a new entropy, inserts the entropy in the kv-store and exports it to a file 239 | /// If a mnemonic already exists in the kv store or an exported file already exists in 240 | /// the default path, an error is produced 241 | async fn handle_create(&self) -> InnerMnemonicResult<()> { 242 | info!("Creating mnemonic"); 243 | 244 | if self.kv().exists(MNEMONIC_KEY).await? { 245 | error!("Mnemonic was already created"); 246 | return Err(KvErr(KvError::ReserveErr(InnerKvError::LogicalErr( 247 | "mnemonic was already present".to_owned(), 248 | )))); 249 | } 250 | 251 | // create a new entropy 252 | let new_entropy = bip39_new_w24(); 253 | 254 | self.handle_insert(new_entropy.clone()).await?; 255 | 256 | Ok(self.io().entropy_to_file(new_entropy)?) 257 | } 258 | 259 | /// Inserts a new mnemonic to the kv-store. 260 | /// If a mnemonic already exists in the kv store, a new entry is created 261 | /// storing it as a rotated out mnemonic. 262 | async fn handle_import(&self) -> InnerMnemonicResult<()> { 263 | info!("Importing mnemonic"); 264 | let imported_phrase = Password(read_password().map_err(|e| PasswordErr(e.to_string()))?); 265 | let imported_entropy = bip39_from_phrase(imported_phrase)?; 266 | self.handle_insert(imported_entropy).await 267 | } 268 | 269 | /// Exports the current mnemonic to a file 270 | async fn handle_export(&self) -> InnerMnemonicResult<()> { 271 | info!("Exporting mnemonic"); 272 | 273 | // try to get mnemonic from kv-store 274 | let entropy = self 275 | .kv() 276 | .get(MNEMONIC_KEY) 277 | .await 278 | .map_err(|err| { 279 | error!("Did not find mnemonic in kv store {:?}", err); 280 | err 281 | })? 282 | .try_into() 283 | .map_err(KvError::GetErr)?; 284 | 285 | // write to file 286 | info!("Mnemonic found in kv store"); 287 | Ok(self.io().entropy_to_file(entropy)?) 288 | } 289 | 290 | /// Rotates out existing mnemonic for new one in the kv-store and exports it to a file 291 | /// If an exported file already exists in the default path, an error is produced 292 | async fn handle_rotate(&self) -> InnerMnemonicResult<()> { 293 | info!("Rotating mnemonic"); 294 | // create a new entropy 295 | let new_entropy = bip39_new_w24(); 296 | 297 | // export right away in case of intermediate failures 298 | self.io().entropy_to_file(new_entropy.clone())?; 299 | 300 | let current_entropy: Entropy = self 301 | .kv() 302 | .get(MNEMONIC_KEY) 303 | .await? 304 | .try_into() 305 | .map_err(KvError::GetErr)?; 306 | 307 | self.handle_insert(current_entropy.clone()).await?; 308 | 309 | info!("reserving mnemonic"); 310 | 311 | self.kv().delete(MNEMONIC_KEY).await.map_err(|err| { 312 | error!("could not delete mnemonic being rotated out: {:?}", err); 313 | KvErr(err) 314 | })?; 315 | 316 | let reservation = self 317 | .kv() 318 | .reserve_key(MNEMONIC_KEY.to_owned()) 319 | .await 320 | .map_err(|err| { 321 | error!("Cannot reserve mnemonic key: {:?}", err); 322 | KvErr(err) 323 | })?; 324 | 325 | self.put_entropy(reservation, new_entropy).await?; 326 | 327 | Ok(()) 328 | } 329 | } 330 | 331 | #[cfg(test)] 332 | mod tests { 333 | use std::{io::Read, path::PathBuf}; 334 | use testdir::testdir; 335 | 336 | use crate::{ 337 | encrypted_sled::get_test_password, 338 | kv_manager::{ 339 | error::{InnerKvError, KvError}, 340 | KvManager, 341 | }, 342 | mnemonic::results::{file_io::FileIoError, mnemonic::InnerMnemonicError}, 343 | }; 344 | 345 | use super::*; 346 | use tracing_test::traced_test; 347 | 348 | // create a service 349 | fn get_kv_manager(testdir: PathBuf) -> KvManager { 350 | // create test dirs 351 | KvManager::new(testdir, get_test_password()).unwrap() 352 | } 353 | 354 | #[traced_test] 355 | #[tokio::test] 356 | async fn test_create() { 357 | let testdir = testdir!(); 358 | // create a service 359 | let kv = get_kv_manager(testdir); 360 | // first attempt should succeed 361 | assert!(kv.handle_create().await.is_ok()); 362 | // second attempt should fail 363 | assert!(matches!( 364 | kv.handle_create().await, 365 | Err(InnerMnemonicError::KvErr(KvError::ReserveErr( 366 | InnerKvError::LogicalErr(_) 367 | ))) 368 | )); 369 | } 370 | 371 | #[traced_test] 372 | #[tokio::test] 373 | async fn test_insert() { 374 | let testdir = testdir!(); 375 | // create a service 376 | let kv = get_kv_manager(testdir.clone()); 377 | // insert should succeed 378 | assert!(kv.handle_insert(bip39_new_w24()).await.is_ok()); 379 | // insert should succeed again 380 | assert!(kv.handle_insert(bip39_new_w24()).await.is_ok()); 381 | } 382 | 383 | #[traced_test] 384 | #[tokio::test] 385 | async fn test_export() { 386 | let testdir = testdir!(); 387 | // create a service 388 | let kv = get_kv_manager(testdir.clone()); 389 | // handle existing should fail 390 | assert!(matches!( 391 | kv.handle_existing().await, 392 | Err(InnerMnemonicError::KvErr(KvError::ExistsErr( 393 | InnerKvError::LogicalErr(_) 394 | ))) 395 | )); 396 | // mnemonic should not be exported 397 | assert!(kv.io().check_if_not_exported().is_ok()); 398 | // create a new mnemonic 399 | assert!(kv.handle_create().await.is_ok()); 400 | // mnemonic should now be exported 401 | assert!(kv.io().check_if_not_exported().is_err()); 402 | // export should fail because create also exports 403 | assert!(matches!( 404 | kv.handle_export().await, 405 | Err(InnerMnemonicError::FileIoErr(FileIoError::Exists(_))) 406 | )); 407 | // handle existing should fail because export file exists 408 | assert!(matches!( 409 | kv.handle_existing().await, 410 | Err(InnerMnemonicError::FileIoErr(FileIoError::Exists(_))) 411 | )); 412 | } 413 | 414 | #[traced_test] 415 | #[tokio::test] 416 | async fn test_existing() { 417 | let testdir = testdir!(); 418 | // create a service 419 | let kv = get_kv_manager(testdir.clone()); 420 | // create a new mnemonic 421 | assert!(kv.handle_create().await.is_ok()); 422 | // handle_existing should fail because export file exists 423 | assert!(matches!( 424 | kv.handle_existing().await, 425 | Err(InnerMnemonicError::FileIoErr(FileIoError::Exists(_))) 426 | )); 427 | // export should fail because export file exists 428 | assert!(matches!( 429 | kv.handle_export().await, 430 | Err(InnerMnemonicError::FileIoErr(FileIoError::Exists(_))) 431 | )); 432 | } 433 | 434 | #[traced_test] 435 | #[tokio::test] 436 | async fn test_rotate() { 437 | let testdir = testdir!(); 438 | let rotations = 5; 439 | 440 | // create a service 441 | let kv = get_kv_manager(testdir); 442 | 443 | let path = std::path::Path::new(kv.io().export_path()); 444 | 445 | let mut seeds: Vec = vec![]; 446 | 447 | for i in 0..rotations { 448 | if i == 0 { 449 | assert!(kv.handle_create().await.is_ok()); 450 | } else { 451 | assert!(kv.handle_rotate().await.is_ok()); 452 | } 453 | 454 | assert!(kv.io().check_if_not_exported().is_err()); 455 | 456 | let mut file = std::fs::File::open(path).expect("can't read exported mnemonic"); 457 | 458 | let mut phrase = String::new(); 459 | assert!(file.read_to_string(&mut phrase).is_ok()); 460 | 461 | assert!(std::fs::remove_file(path).is_ok()); 462 | 463 | seeds.push( 464 | bip39_seed( 465 | bip39_from_phrase(Password(phrase)).unwrap(), 466 | Password(MNEMONIC_PASSWORD.to_owned()), 467 | ) 468 | .unwrap() 469 | .as_bytes() 470 | .try_into() 471 | .unwrap(), 472 | ); 473 | } 474 | 475 | let mut ordered_keys = vec![MNEMONIC_KEY.into()]; 476 | 477 | for i in (1..rotations).rev() { 478 | ordered_keys.push(format!("{}_{}", MNEMONIC_KEY, i)); 479 | } 480 | 481 | assert_eq!( 482 | ordered_keys, 483 | kv.seed_key_iter().await.expect("failed to get seed keys") 484 | ); 485 | 486 | for (i, key) in kv.seed_key_iter().await.unwrap().iter().enumerate() { 487 | let seed = kv.get_seed(key).await.expect("failed to retrieve seed"); 488 | 489 | assert_eq!( 490 | format!("{:?}", seeds[rotations - i - 1]), 491 | format!("{:?}", seed), 492 | "seed {} with key {}", 493 | i, 494 | key 495 | ); 496 | } 497 | } 498 | } 499 | -------------------------------------------------------------------------------- /diagrams/keygen.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 |
broadcast
broadcast
thread result
thread result
execute protocol thread
execute protocol thread
first Traffic In
first Traffic...
spawn protocol threads
spawn protocol threads
Validate and sanitize KeygenInit arguments
Validate and sanitize KeygenInit argume...
subsequent Traffic In messages
subsequent Traffic I...
execute protocol thread
execute protocol thread
execute protocol thread
execute protocol thread
Traffic Out
Traffic Out
After the execution of the protocol is completed from every worker, process the results.

1. Aggregate results from all threads
2.1. If the results are valid, send protocol result
2.2. If the results are not valid, send an error message and close the stream
After the execution of the protocol is compl...
thread result
thread result
thread result
thread result
return Keygen result
return Keygen result
gRPC Client
1. creates the first Traffic In message to initiate Keygen
2. receives all Traffic Out or every round from a participant and forwards it to all other participants as Traffic In.
gRPC Client...
gRPC Client
gRPC Client
gRPC Server
gRPC Server
KV Store
KV Store
subsequent Traffic In messages
subsequent Traffic I...
init.rsexecute.rsresult.rs
gRPC channels

internal tofnd channels
gRPC channels...
broadcast.rs21333455566678
Keygen messages flow (gg20::keygen)
Keygen messages flow (gg20...
mod.rs
Viewer does not support full SVG 1.1
4 | --------------------------------------------------------------------------------