├── .gitignore ├── Cargo.lock ├── Cargo.toml ├── LICENSE ├── README.md ├── cmd ├── Cargo.toml └── src │ ├── args.rs │ └── main.rs ├── node ├── network │ ├── Cargo.toml │ └── src │ │ ├── behaviour.rs │ │ ├── broadcast.rs │ │ ├── config.rs │ │ ├── discovery.rs │ │ ├── error.rs │ │ ├── lib.rs │ │ ├── messages.rs │ │ └── service.rs ├── rpc-api │ ├── Cargo.toml │ └── src │ │ └── lib.rs ├── rpc │ ├── Cargo.toml │ └── src │ │ ├── handler.rs │ │ ├── lib.rs │ │ └── server.rs └── runtime │ ├── Cargo.toml │ └── src │ ├── coordination.rs │ ├── echo.rs │ ├── error.rs │ ├── execution.rs │ ├── lib.rs │ ├── negotiation.rs │ ├── network_proxy.rs │ ├── peerset.rs │ ├── peerset_cacher.rs │ ├── runtime.rs │ └── traits.rs ├── peer_config.example.json └── tss ├── Cargo.toml └── src ├── config.rs ├── factory.rs ├── keygen.rs ├── keysign.rs ├── lib.rs └── round_based.rs /.gitignore: -------------------------------------------------------------------------------- 1 | # Generated by Cargo 2 | # will have compiled files and executables 3 | /target/ 4 | 5 | # Remove Cargo.lock from gitignore if creating an executable, leave it for libraries 6 | # More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html 7 | party/Cargo.lock 8 | 9 | # These are backup files generated by rustfmt 10 | **/*.rs.bk 11 | 12 | .idea 13 | .DS_Store 14 | 15 | data 16 | 17 | config_peer*.json 18 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [workspace] 2 | 3 | members = [ 4 | "cmd", 5 | "tss", 6 | "node/network", 7 | "node/runtime", 8 | "node/rpc", 9 | "node/rpc-api", 10 | ] 11 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2022 Timofey Luin 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Threshold Signature Scheme over P2P transport 2 | This project implements [`rust-libp2p`](https://github.com/libp2p/rust-libp2p) transport for {t,n}-threshold signature schemes. 3 | 4 | Namely, the protocol used here is [Gennaro, Goldfeder 20 (GG20)](https://eprint.iacr.org/2020/540), which is a full multi-party computation (MPC) algorithm for threshold ECDSA with support for identifying malicious parties. Library that implements it and used here is [`ZenGo-X/multi-party-ecdsa`](https://github.com/ZenGo-X/multi-party-ecdsa). 5 | 6 | This codebase aims to follow the modular design of `rust-libp2p`, so could be repurposed to support other elliptic curves and signing schemes, assuming they indented to be run in the [round-based](https://docs.rs/round-based/latest/round_based/index.html) flow of MPC. 7 | 8 | ## Project structure 9 | - `node`: MPC node daemon 10 | - `network`: libp2p networking stack (broadcast, discovery). 11 | - `runtime`: engine between network and application layers responsible for orchestrating MPC communication and pre-computation coordination. 12 | - `rpc`: [`jsonrpc`](https://github.com/paritytech/jsonrpc) server, client, and API trait. 13 | - `rpc-api`: implements API trait. 14 | - `tss`: application layer implementing two MPC protocols 15 | - `keygen`: GG20 distributed key generation (DKG) 16 | - `keysign`: GG20 threshold signing 17 | - `cmd`: helpful CLI for deploying node and interacting with it over JsonRPC. 18 | 19 | ## Design principles 20 | 21 | ### The "room" abstraction 22 | The underlying networking protocol is structured around the "room" abstraction. Each room is a separate Req-Resp channel over which parties synchronously run MPC (one at the time). To join the room user have to know its name and at least one other user to bootstrap if Kademlia discovery used. 23 | 24 | ### Single Proposer; Multiple Joiners 25 | Pre-computation coordination is encoded as session types (see [blog post](https://cathieyun.medium.com/bulletproof-multi-party-computation-in-rust-with-session-types-b3da6e928d5d)) and follows a predefined flow where one party broadcasts a computation proposal and other parties in the room can answer. 26 | 27 | Along with the proposal specifying protocol by its id, the Proposer can include an arbitrary challenge serving as means of negotiation (e.g. ask to prove that party has a key share). 28 | 29 | After the Proposer sourced enough Joiners, she broadcasts a start message specifying chosen parties and arbitrary arguments relevant for the MPC (e.g. message to be signed). 30 | 31 | ### Echo broadcast 32 | To ensure reliable broadcast during computation, messages on wire are being echoed, i.e. echo broadcast: once messages from all known parties are received, relayers hash vector containing these messages along with their own ones and send it as an acknowledgment. Assuming relayers sort vectors in the same way (e.g. by party indexes) and all of them received consistent sets of messages, hashes will end up identical and broadcast reliability will be proven. 33 | 34 | ## Instructions 35 | 36 | ### Setup a new node 37 | The following command will setup a new node by generating new keypair into path `-p` (pattern :id will be replaced on the peer_id) and create peer config file in path `-c` setting peer address as `-m` and its RPC address as `-r`: 38 | ```bash 39 | cargo run -- setup -c ./peer_config0.json -r 127.0.0.1:8080 -m /ip4/127.0.0.1/tcp/4000 -p ./data/:id/ 40 | ``` 41 | Make sure to populate `boot_peers` array in each created config file for parties to be able to find each other through peer discovery. 42 | 43 | ### Deploy node 44 | The following command will deploy node with specified config and setup path (resolved by default using `:id` pattern mentioned above). For peer discovery either Kademlia, MDNS, both, or none of these can be used. 45 | ```bash 46 | cargo run -- deploy -c ./config_peer0.json --kademlia 47 | ``` 48 | 49 | ### Run DKG 50 | The following command will propose to compute a new shared key to `-n` parties in the room `-r` with threshold parameter `-t` using on behalf of a node with the specified RPC address `-a`: 51 | ```bash 52 | cargo run -- keygen -a ws://127.0.0.1:8080 -r tss/0 -t 2 -n 3 53 | ``` 54 | 55 | ### Run threshold singing 56 | The following command will propose to jointly sign message `-m` to `-t`+1 parties in the room `-r` on behalf of a node with the specified RPC address `-a`: 57 | ```bash 58 | cargo run -- sign -a ws://127.0.0.1:8080 -r tss/0 -t 2 -m "hello room!" 59 | ``` 60 | 61 | ## Limitations (future features) 62 | - Rooms need to be known in advance when deploying node. 63 | - The future goal is to have them activated inside the node dynamically. 64 | - All rooms source from a single Kad-DHT, which isn't practical. 65 | - Either DHT-per-room or internal separation needed to be implemented. 66 | - No key refresh/rotation protocol support available yet. 67 | - [ZenGo-X/fs-dkr](https://github.com/ZenGo-X/fs-dkr) will be added as soon it will be updated to the newer version of curv-kzen (v0.9). 68 | 69 | ## Warning 70 | **Do not use this in production.** Code here hasn't been audited and is likely not stable. 71 | It is no more that a prototype for learning and having fun doing it. 72 | -------------------------------------------------------------------------------- /cmd/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "mpc-ecdsa" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | [dependencies] 7 | log = "0.4" 8 | anyhow = "1.0.56" 9 | gumdrop = "0.8" 10 | futures = { version = "0.3.2", features = ["compat"] } 11 | futures-util = "0.3" 12 | tokio = { version = "1", default-features = false, features = ["macros", "rt-multi-thread"] } 13 | async-std = { version = "1.10.0", features = ["attributes", "tokio1"] } 14 | async-trait = "0.1" 15 | libp2p = "0.40.0" 16 | serde = {version = "1.0", features = ["derive"] } 17 | serde_json = "1" 18 | serde_ipld_dagcbor = "0.1.2" 19 | round-based = { version = "0.1.4" } 20 | multi-party-ecdsa = {git = "https://github.com/ZenGo-X/multi-party-ecdsa"} 21 | curv-kzen = "0.9" 22 | sha3 = "0.10" 23 | pretty_env_logger = "0.4.0" 24 | 25 | mpc-p2p = { path = "../node/network" } 26 | mpc-runtime = { path = "../node/runtime" } 27 | mpc-rpc = { path = "../node/rpc" } 28 | mpc-api = { path = "../node/rpc-api" } 29 | mpc-tss = { path = "../tss" } 30 | 31 | [dev-dependencies] 32 | round-based = { version = "0.1.4", features = ["dev"] } 33 | -------------------------------------------------------------------------------- /cmd/src/args.rs: -------------------------------------------------------------------------------- 1 | use gumdrop::Options; 2 | 3 | #[derive(Debug, Options, Clone)] 4 | pub struct MPCArgs { 5 | help: bool, 6 | #[options(command)] 7 | pub command: Option, 8 | } 9 | 10 | #[derive(Debug, Options, Clone)] 11 | pub enum Command { 12 | #[options(help = "Deploy MPC daemon")] 13 | Deploy(DeployArgs), 14 | 15 | #[options(help = "Keygen args")] 16 | Keygen(KeygenArgs), 17 | 18 | #[options(help = "Sign args")] 19 | Sign(SignArgs), 20 | 21 | #[options(help = "Setup args")] 22 | Setup(SetupArgs), 23 | } 24 | 25 | #[derive(Debug, Options, Clone)] 26 | pub struct DeployArgs { 27 | help: bool, 28 | 29 | #[options(help = "path to parties config")] 30 | pub config_path: String, 31 | 32 | #[options( 33 | help = "path to setup directory (where secret key saved)", 34 | default = "./data/:id/" 35 | )] 36 | pub path: String, 37 | 38 | #[options(help = "peer discovery with Kad-DHT")] 39 | pub kademlia: bool, 40 | 41 | #[options(help = "peer discovery with mdns")] 42 | pub mdns: bool, 43 | } 44 | 45 | #[derive(Debug, Options, Clone)] 46 | pub struct SetupArgs { 47 | help: bool, 48 | 49 | #[options(help = "libp2p multi address", default = "/ip4/127.0.0.1/tcp/4000")] 50 | pub multiaddr: String, 51 | 52 | #[options(help = "rpc address", default = "127.0.0.1:8080")] 53 | pub rpc_address: String, 54 | 55 | #[options(help = "path to configuration", default = "./config.json")] 56 | pub config_path: String, 57 | 58 | #[options(help = "path to setup artifacts", default = "./data/:id/")] 59 | pub path: String, 60 | } 61 | 62 | #[derive(Debug, Options, Clone)] 63 | pub struct KeygenArgs { 64 | help: bool, 65 | 66 | #[options(help = "mpc room")] 67 | pub room: String, 68 | 69 | #[options(help = "json rpc addresses")] 70 | pub address: String, 71 | 72 | #[options(help = "threshold number (T)")] 73 | pub threshold: u16, 74 | 75 | #[options(help = "number of parties (N)")] 76 | pub number_of_parties: u16, 77 | } 78 | 79 | #[derive(Debug, Options, Clone)] 80 | pub struct SignArgs { 81 | help: bool, 82 | 83 | #[options(help = "mpc room")] 84 | pub room: String, 85 | 86 | #[options(help = "json rpc addresses")] 87 | pub address: String, 88 | 89 | #[options(help = "threshold needed sign the messages (T)")] 90 | pub threshold: u16, 91 | 92 | #[options(help = "messages to sign")] 93 | pub messages: String, 94 | } 95 | -------------------------------------------------------------------------------- /cmd/src/main.rs: -------------------------------------------------------------------------------- 1 | #![feature(async_closure)] 2 | 3 | mod args; 4 | 5 | use crate::args::{Command, DeployArgs, KeygenArgs, MPCArgs, SetupArgs, SignArgs}; 6 | use anyhow::anyhow; 7 | use async_std::task; 8 | use futures::future::{FutureExt, TryFutureExt}; 9 | use futures::StreamExt; 10 | use gumdrop::Options; 11 | 12 | use mpc_api::RpcApi; 13 | use mpc_p2p::{NetworkWorker, NodeKeyConfig, Params, RoomArgs, Secret}; 14 | use mpc_rpc::server::JsonRPCServer; 15 | use mpc_runtime::{PersistentCacher, RuntimeDaemon}; 16 | use mpc_tss::{generate_config, Config, TssFactory}; 17 | use sha3::Digest; 18 | use std::error::Error; 19 | use std::path::Path; 20 | use std::{iter, process}; 21 | 22 | #[tokio::main] 23 | async fn main() -> Result<(), Box> { 24 | pretty_env_logger::init(); 25 | 26 | let args: MPCArgs = MPCArgs::parse_args_default_or_exit(); 27 | let command = args.command.unwrap_or_else(|| { 28 | eprintln!("[command] is required"); 29 | eprintln!("{}", MPCArgs::usage()); 30 | process::exit(2) 31 | }); 32 | 33 | match command { 34 | Command::Deploy(args) => deploy(args).await?, 35 | Command::Setup(args) => setup(args)?, 36 | Command::Keygen(args) => keygen(args).await?, 37 | Command::Sign(args) => sign(args).await?, 38 | } 39 | 40 | Ok(()) 41 | } 42 | 43 | fn setup(args: SetupArgs) -> Result<(), anyhow::Error> { 44 | generate_config( 45 | args.config_path, 46 | args.path, 47 | args.multiaddr, 48 | args.rpc_address, 49 | ) 50 | .map(|_| ()) 51 | } 52 | 53 | async fn deploy(args: DeployArgs) -> Result<(), anyhow::Error> { 54 | let config = Config::load_config(&args.config_path)?; 55 | let local_party = config.local.clone(); 56 | let local_peer_id = local_party.network_peer.peer_id; 57 | let path_str = args 58 | .path 59 | .to_string() 60 | .replace(":id", &*local_peer_id.to_base58()); 61 | let base_path = Path::new(&path_str); 62 | let node_key = NodeKeyConfig::Ed25519(Secret::File(base_path.join("secret.key")).into()); 63 | 64 | let boot_peers: Vec<_> = config.boot_peers.iter().map(|p| p.clone()).collect(); 65 | 66 | let (room_id, room_cfg, room_rx) = RoomArgs::new_full( 67 | "tss/0".to_string(), 68 | boot_peers.into_iter(), 69 | config.boot_peers.len(), 70 | ); 71 | 72 | let (net_worker, net_service) = { 73 | let cfg = Params { 74 | listen_address: local_party.network_peer.multiaddr.clone(), 75 | rooms: vec![room_cfg], 76 | mdns: args.mdns, 77 | kademlia: args.kademlia, 78 | }; 79 | 80 | NetworkWorker::new(node_key, cfg)? 81 | }; 82 | 83 | let net_task = task::spawn(async { 84 | net_worker.run().await; 85 | }); 86 | 87 | let local_peer_id = net_service.local_peer_id(); 88 | 89 | let (rt_worker, rt_service) = RuntimeDaemon::new( 90 | net_service, 91 | iter::once((room_id, room_rx)), 92 | TssFactory::new(format!("data/{}/key.share", local_peer_id.to_base58())), 93 | PersistentCacher::new(base_path.join("peerset"), local_peer_id.clone()), 94 | ); 95 | 96 | let rt_task = task::spawn(async { 97 | rt_worker.run().await; 98 | }); 99 | 100 | let rpc_server = { 101 | let handler = RpcApi::new(rt_service); 102 | JsonRPCServer::new( 103 | mpc_rpc::server::Config { 104 | host_address: local_party.rpc_addr, 105 | }, 106 | handler, 107 | ) 108 | .map_err(|e| anyhow!("json rpc server terminated with err: {}", e))? 109 | }; 110 | 111 | rpc_server.run().await.expect("expected RPC server to run"); 112 | 113 | let _ = rt_task.cancel().await; 114 | let _ = net_task.cancel().await; 115 | 116 | println!("bye"); 117 | 118 | Ok(()) 119 | } 120 | 121 | async fn keygen(args: KeygenArgs) -> anyhow::Result<()> { 122 | let res = mpc_rpc::new_client(args.address) 123 | .await? 124 | .keygen(args.room, args.number_of_parties, args.threshold) 125 | .await; 126 | 127 | let pub_key_hash = match res { 128 | Ok(pub_key) => sha3::Keccak256::digest(pub_key.to_bytes(true).to_vec()), 129 | Err(e) => { 130 | return Err(anyhow!("received error: {}", e)); 131 | } 132 | }; 133 | 134 | println!("Keygen finished! Keccak256 address => 0x{:x}", pub_key_hash); 135 | 136 | Ok(()) 137 | } 138 | 139 | async fn sign(args: SignArgs) -> anyhow::Result<()> { 140 | let res = mpc_rpc::new_client(args.address) 141 | .await? 142 | .sign(args.room, args.threshold, args.messages.as_bytes().to_vec()) 143 | .await 144 | .map_err(|e| anyhow!("error signing: {e}"))?; 145 | 146 | let signature = 147 | serde_json::to_string(&res).map_err(|e| anyhow!("error encoding signature: {e}"))?; 148 | println!("{}", signature); 149 | 150 | Ok(()) 151 | } 152 | -------------------------------------------------------------------------------- /node/network/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "mpc-p2p" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 7 | 8 | [dependencies] 9 | log = "0.4" 10 | anyhow = "1.0.56" 11 | derive_more = "0.99.16" 12 | libp2p = { version = "0.40.0" } 13 | serde = {version = "1.0", features = ["derive"] } 14 | serde_ipld_dagcbor = "0.1.2" 15 | parity-scale-codec = "3.1.2" 16 | hex = "0.4.0" 17 | async-std = { version = "1.9", features = ["unstable"]} 18 | futures = "0.3" 19 | futures-util = "0.3" 20 | async-trait = "0.1.53" 21 | parking_lot = "0.12.0" 22 | thiserror = "1" 23 | zeroize = "1.5.4" 24 | unsigned-varint = { version = "0.6.0", features = [ 25 | "futures", 26 | "asynchronous_codec", 27 | ] } 28 | smallvec = "1.8.0" 29 | arrayvec = "0.7" 30 | blake3 = "1.3" 31 | 32 | [dev-dependencies] 33 | async-std = { version = "1.9", features = ["attributes"] } 34 | -------------------------------------------------------------------------------- /node/network/src/behaviour.rs: -------------------------------------------------------------------------------- 1 | use crate::discovery::{DiscoveryBehaviour, DiscoveryOut}; 2 | use crate::{broadcast, MessageContext, Params, RoomId}; 3 | use futures::channel::mpsc; 4 | use libp2p::identify::{Identify, IdentifyConfig, IdentifyEvent}; 5 | use libp2p::identity::Keypair; 6 | use libp2p::kad::QueryId; 7 | use libp2p::ping::{Ping, PingEvent, PingFailure, PingSuccess}; 8 | use libp2p::swarm::NetworkBehaviourEventProcess; 9 | use libp2p::swarm::{NetworkBehaviour, NetworkBehaviourAction, PollParameters}; 10 | use libp2p::NetworkBehaviour; 11 | use libp2p::PeerId; 12 | use log::{debug, trace}; 13 | use std::borrow::Cow; 14 | use std::collections::VecDeque; 15 | use std::task::{Context, Poll}; 16 | 17 | const MPC_PROTOCOL_ID: &str = "/mpc/0.1.0"; 18 | 19 | #[derive(NetworkBehaviour)] 20 | #[behaviour(out_event = "BehaviourOut", poll_method = "poll", event_process = true)] 21 | pub(crate) struct Behaviour { 22 | ping: Ping, 23 | identify: Identify, 24 | discovery: DiscoveryBehaviour, 25 | /// Handles multiple communication of multiple generic protocols. 26 | broadcast: broadcast::Broadcast, 27 | 28 | #[behaviour(ignore)] 29 | events: VecDeque, 30 | } 31 | 32 | pub(crate) enum BehaviourOut { 33 | InboundMessage { 34 | /// Peer which sent us a message. 35 | peer: PeerId, 36 | /// Protocol name of the request. 37 | protocol: Cow<'static, str>, 38 | }, 39 | } 40 | 41 | impl Behaviour { 42 | pub fn new( 43 | local_key: &Keypair, 44 | broadcast_protocols: Vec, 45 | params: Params, 46 | ) -> Result { 47 | Ok(Behaviour { 48 | broadcast: broadcast::Broadcast::new(broadcast_protocols.into_iter())?, 49 | discovery: DiscoveryBehaviour::new(local_key.public(), params), 50 | identify: Identify::new(IdentifyConfig::new( 51 | MPC_PROTOCOL_ID.into(), 52 | local_key.public(), 53 | )), 54 | ping: Ping::default(), 55 | events: VecDeque::new(), 56 | }) 57 | } 58 | 59 | /// Initiates direct sending of a message. 60 | pub fn send_message( 61 | &mut self, 62 | peer: &PeerId, 63 | message: Vec, 64 | room_id: RoomId, 65 | ctx: MessageContext, 66 | pending_response: mpsc::Sender), broadcast::RequestFailure>>, 67 | connect: broadcast::IfDisconnected, 68 | ) { 69 | self.broadcast.send_message( 70 | peer, 71 | &room_id.as_protocol_id(), 72 | ctx, 73 | message, 74 | pending_response, 75 | connect, 76 | ) 77 | } 78 | 79 | /// Initiates broadcasting of a message. 80 | pub fn broadcast_message( 81 | &mut self, 82 | peer_ids: impl Iterator, 83 | message: Vec, 84 | room_id: RoomId, 85 | ctx: MessageContext, 86 | pending_response: Option< 87 | mpsc::Sender), broadcast::RequestFailure>>, 88 | >, 89 | connect: broadcast::IfDisconnected, 90 | ) { 91 | self.broadcast.broadcast_message( 92 | peer_ids, 93 | &room_id.as_protocol_id(), 94 | ctx, 95 | message, 96 | pending_response, 97 | connect, 98 | ); 99 | } 100 | 101 | /// Bootstrap Kademlia network. 102 | pub fn bootstrap(&mut self) -> Result { 103 | self.discovery.bootstrap() 104 | } 105 | 106 | /// Known peers. 107 | pub fn peers(&self, _room_id: RoomId) -> impl Iterator { 108 | self.discovery.peers().clone().into_iter() 109 | } 110 | 111 | /// Consumes the events list when polled. 112 | fn poll( 113 | &mut self, 114 | _: &mut Context, 115 | _: &mut impl PollParameters, 116 | ) -> Poll::ProtocolsHandler>> 117 | { 118 | if let Some(event) = self.events.pop_front() { 119 | return Poll::Ready(NetworkBehaviourAction::GenerateEvent(event)); 120 | } 121 | 122 | Poll::Pending 123 | } 124 | } 125 | 126 | impl NetworkBehaviourEventProcess for Behaviour { 127 | fn inject_event(&mut self, event: broadcast::BroadcastOut) { 128 | match event { 129 | broadcast::BroadcastOut::InboundMessage { 130 | peer, 131 | protocol, 132 | result: _, 133 | } => { 134 | self.events 135 | .push_back(BehaviourOut::InboundMessage { peer, protocol }); 136 | } 137 | broadcast::BroadcastOut::BroadcastFinished { 138 | peer, 139 | protocol, 140 | duration, 141 | result, 142 | } => { 143 | debug!( 144 | "broadcast for protocol {:?} finished with {:?} peer: {:?} took: {:?}", 145 | protocol.to_string(), 146 | result, 147 | peer, 148 | duration 149 | ); 150 | } 151 | } 152 | } 153 | } 154 | 155 | impl NetworkBehaviourEventProcess for Behaviour { 156 | fn inject_event(&mut self, event: DiscoveryOut) { 157 | match event { 158 | DiscoveryOut::Connected(..) => {} 159 | DiscoveryOut::Disconnected(..) => {} 160 | } 161 | } 162 | } 163 | 164 | impl NetworkBehaviourEventProcess for Behaviour { 165 | fn inject_event(&mut self, event: IdentifyEvent) { 166 | match event { 167 | IdentifyEvent::Received { peer_id, info } => { 168 | trace!("identified peer {:?}", peer_id); 169 | trace!("protocol_version {:?}", info.protocol_version); 170 | trace!("agent_version {:?}", info.agent_version); 171 | trace!("listen_addresses {:?}", info.listen_addrs); 172 | trace!("observed_address {:?}", info.observed_addr); 173 | trace!("protocols {:?}", info.protocols); 174 | } 175 | IdentifyEvent::Sent { .. } => (), 176 | IdentifyEvent::Pushed { .. } => (), 177 | IdentifyEvent::Error { .. } => (), 178 | } 179 | } 180 | } 181 | 182 | impl NetworkBehaviourEventProcess for Behaviour { 183 | fn inject_event(&mut self, event: PingEvent) { 184 | match event.result { 185 | Ok(PingSuccess::Ping { rtt }) => { 186 | trace!( 187 | "PingSuccess::Ping rtt to {} is {} ms", 188 | event.peer.to_base58(), 189 | rtt.as_millis() 190 | ); 191 | } 192 | Ok(PingSuccess::Pong) => { 193 | trace!("PingSuccess::Pong from {}", event.peer.to_base58()); 194 | } 195 | Err(PingFailure::Timeout) => { 196 | debug!("PingFailure::Timeout {}", event.peer.to_base58()); 197 | } 198 | Err(PingFailure::Other { error }) => { 199 | debug!("PingFailure::Other {}: {}", event.peer.to_base58(), error); 200 | } 201 | Err(PingFailure::Unsupported) => { 202 | debug!("PingFailure::Unsupported {}", event.peer.to_base58()); 203 | } 204 | } 205 | } 206 | } 207 | -------------------------------------------------------------------------------- /node/network/src/broadcast.rs: -------------------------------------------------------------------------------- 1 | // This file was a part of Substrate. 2 | // broadcast.rc <> request_response.rc 3 | 4 | // Copyright (C) 2019-2022 Parity Technologies (UK) Ltd. 5 | // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 6 | 7 | // This program is free software: you can redistribute it and/or modify 8 | // it under the terms of the GNU General Public License as published by 9 | // the Free Software Foundation, either version 3 of the License, or 10 | // (at your option) any later version. 11 | 12 | // This program is distributed in the hope that it will be useful, 13 | // but WITHOUT ANY WARRANTY; without even the implied warranty of 14 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 | // GNU General Public License for more details. 16 | 17 | // You should have received a copy of the GNU General Public License 18 | // along with this program. If not, see . 19 | 20 | use futures::{ 21 | channel::{mpsc, oneshot}, 22 | prelude::*, 23 | }; 24 | 25 | use libp2p::{ 26 | core::{ 27 | connection::{ConnectionId, ListenerId}, 28 | ConnectedPoint, Multiaddr, PeerId, 29 | }, 30 | request_response::{ 31 | handler::RequestResponseHandler, ProtocolSupport, RequestResponse, RequestResponseConfig, 32 | RequestResponseEvent, RequestResponseMessage, ResponseChannel, 33 | }, 34 | swarm::{ 35 | protocols_handler::multi::MultiHandler, IntoProtocolsHandler, NetworkBehaviour, 36 | NetworkBehaviourAction, PollParameters, ProtocolsHandler, 37 | }, 38 | }; 39 | 40 | use std::ops::Add; 41 | use std::{ 42 | borrow::Cow, 43 | collections::{hash_map::Entry, HashMap}, 44 | io, iter, 45 | pin::Pin, 46 | task::{Context, Poll}, 47 | time::{Duration, Instant}, 48 | }; 49 | 50 | use crate::messages::{GenericCodec, MessageContext, WireMessage}; 51 | 52 | pub use libp2p::request_response::{InboundFailure, OutboundFailure, RequestId}; 53 | use log::{error, info}; 54 | 55 | /// Configuration for a single request-response protocol. 56 | #[derive(Debug, Clone)] 57 | pub struct ProtocolConfig { 58 | /// Name of the protocol on the wire. Should be something like `/foo/bar`. 59 | pub name: Cow<'static, str>, 60 | 61 | /// Maximum allowed size, in bytes, of a request. 62 | /// 63 | /// Any request larger than this value will be declined as a way to avoid allocating too 64 | /// much memory for it. 65 | pub max_request_size: u64, 66 | 67 | /// Maximum allowed size, in bytes, of a response. 68 | /// 69 | /// Any response larger than this value will be declined as a way to avoid allocating too 70 | /// much memory for it. 71 | pub max_response_size: u64, 72 | 73 | /// Duration after which emitted requests are considered timed out. 74 | /// 75 | /// If you expect the response to come back quickly, you should set this to a smaller duration. 76 | pub request_timeout: Duration, 77 | 78 | /// Channel on which the networking service will send incoming messages. 79 | pub inbound_queue: Option>, 80 | } 81 | 82 | impl ProtocolConfig { 83 | pub fn new( 84 | name: Cow<'static, str>, 85 | inbound_queue: Option>, 86 | ) -> Self { 87 | Self { 88 | name, 89 | max_request_size: 8 * 1024 * 1024, 90 | max_response_size: 10 * 1024, 91 | request_timeout: Duration::from_secs(20), 92 | inbound_queue, 93 | } 94 | } 95 | } 96 | 97 | /// A single request received by a peer on a request-response protocol. 98 | #[derive(Debug)] 99 | pub struct IncomingMessage { 100 | /// Who sent the request. 101 | pub peer_id: PeerId, 102 | 103 | /// Who sent the request. 104 | pub peer_index: u16, 105 | 106 | /// Message sent by the remote. Will always be smaller than 107 | /// [`ProtocolConfig::max_request_size`]. 108 | pub payload: Vec, 109 | 110 | /// Message send to all peers in the room. 111 | pub is_broadcast: bool, 112 | 113 | /// Channel to send back the response. 114 | pub pending_response: oneshot::Sender, 115 | 116 | /// Protocol execution context. 117 | pub context: MessageContext, 118 | } 119 | 120 | /// Response for an incoming request to be send by a request protocol handler. 121 | #[derive(Debug)] 122 | pub struct OutgoingResponse { 123 | /// The payload of the response. 124 | /// 125 | /// `Err(())` if none is available e.g. due an error while handling the request. 126 | pub result: Result, ()>, 127 | 128 | /// If provided, the `oneshot::Sender` will be notified when the request has been sent to the 129 | /// peer. 130 | pub sent_feedback: Option>, 131 | } 132 | 133 | /// Event generated by the [`GenericBroadcast`]. 134 | #[derive(Debug)] 135 | pub enum BroadcastOut { 136 | /// A remote sent a request and either we have successfully answered it or an error happened. 137 | /// 138 | /// This event is generated for statistics purposes. 139 | InboundMessage { 140 | /// Peer which has emitted the request. 141 | peer: PeerId, 142 | /// Name of the protocol in question. 143 | protocol: Cow<'static, str>, 144 | /// Whether handling the request was successful or unsuccessful. 145 | result: Result, 146 | }, 147 | 148 | /// A request initiated using [`RequestResponsesBehaviour::send_request`] has succeeded or 149 | /// failed. 150 | /// 151 | /// This event is generated for statistics purposes. 152 | BroadcastFinished { 153 | /// Peer that we send a request to. 154 | peer: PeerId, 155 | /// Name of the protocol in question. 156 | protocol: Cow<'static, str>, 157 | /// Duration the request took. 158 | duration: Duration, 159 | /// Result of the request. 160 | result: Result<(), RequestFailure>, 161 | }, 162 | } 163 | 164 | /// Combination of a protocol name and a request id. 165 | /// 166 | /// Uniquely identifies an inbound or outbound request among all handled protocols. Note however 167 | /// that uniqueness is only guaranteed between two inbound and likewise between two outbound 168 | /// requests. There is no uniqueness guarantee in a set of both inbound and outbound 169 | /// [`ProtocolRequestId`]s. 170 | #[derive(Debug, Clone, PartialEq, Eq, Hash)] 171 | struct ProtocolRequestId { 172 | protocol: Cow<'static, str>, 173 | request_id: RequestId, 174 | } 175 | 176 | impl From<(Cow<'static, str>, RequestId)> for ProtocolRequestId { 177 | fn from((protocol, request_id): (Cow<'static, str>, RequestId)) -> Self { 178 | Self { 179 | protocol, 180 | request_id, 181 | } 182 | } 183 | } 184 | 185 | /// When sending a request, what to do on a disconnected recipient. 186 | #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] 187 | pub enum IfDisconnected { 188 | /// Try to connect to the peer. 189 | TryConnect, 190 | /// Just fail if the destination is not yet connected. 191 | ImmediateError, 192 | } 193 | 194 | /// Convenience functions for `IfDisconnected`. 195 | impl IfDisconnected { 196 | /// Shall we connect to a disconnected peer? 197 | pub fn should_connect(self) -> bool { 198 | match self { 199 | Self::TryConnect => true, 200 | Self::ImmediateError => false, 201 | } 202 | } 203 | } 204 | 205 | // This is a state of processing incoming request Message. 206 | // The main reason of this struct is to hold `get_peer_reputation` as a Future state. 207 | struct BroadcastMessage { 208 | peer: PeerId, 209 | request_id: RequestId, 210 | request: WireMessage, 211 | channel: ResponseChannel, ()>>, 212 | protocol: String, 213 | resp_builder: Option>, 214 | get_peer_index: Pin> + Send>>, 215 | } 216 | 217 | /// Generated by the response builder and waiting to be processed. 218 | struct RequestProcessingOutcome { 219 | peer: PeerId, 220 | request_id: RequestId, 221 | protocol: Cow<'static, str>, 222 | inner_channel: ResponseChannel, ()>>, 223 | response: OutgoingResponse, 224 | } 225 | 226 | /// Implementation of `NetworkBehaviour` that provides support for broadcast protocols. 227 | pub struct Broadcast { 228 | /// The multiple sub-protocols, by name. 229 | /// Contains the underlying libp2p `RequestResponse` behaviour, plus an optional 230 | /// "response builder" used to build responses for incoming requests. 231 | protocols: HashMap< 232 | Cow<'static, str>, 233 | ( 234 | RequestResponse, 235 | Option>, 236 | ), 237 | >, 238 | 239 | /// Pending requests, passed down to a [`RequestResponse`] behaviour, awaiting a reply. 240 | pending_requests: HashMap< 241 | ProtocolRequestId, 242 | ( 243 | Instant, 244 | Option), RequestFailure>>>, 245 | ), 246 | >, 247 | 248 | /// Whenever an incoming request arrives, a `Future` is added to this list and will yield the 249 | /// start time and the response to send back to the remote. 250 | pending_responses: stream::FuturesUnordered< 251 | Pin> + Send>>, 252 | >, 253 | 254 | /// Whenever an incoming request arrives, the arrival [`Instant`] is recorded here. 255 | pending_responses_arrival_time: HashMap, 256 | 257 | /// Whenever a response is received on `pending_responses`, insert a channel to be notified 258 | /// when the request has been sent out. 259 | send_feedback: HashMap>, 260 | } 261 | 262 | impl Broadcast { 263 | /// Creates a new behaviour. Must be passed a list of supported protocols. Returns an error if 264 | /// the same protocol is passed twice. 265 | pub fn new(list: impl Iterator) -> Result { 266 | let mut protocols = HashMap::new(); 267 | for protocol in list { 268 | let mut cfg = RequestResponseConfig::default(); 269 | cfg.set_connection_keep_alive(Duration::from_secs(20)); 270 | cfg.set_request_timeout(protocol.request_timeout); 271 | cfg.set_connection_keep_alive(protocol.request_timeout.add(Duration::from_secs(10))); 272 | 273 | let protocol_support = if protocol.inbound_queue.is_some() { 274 | ProtocolSupport::Full 275 | } else { 276 | ProtocolSupport::Outbound 277 | }; 278 | 279 | let rq_rp = RequestResponse::new( 280 | GenericCodec { 281 | max_request_size: protocol.max_request_size, 282 | max_response_size: protocol.max_response_size, 283 | }, 284 | iter::once((protocol.name.as_bytes().to_vec(), protocol_support)), 285 | cfg, 286 | ); 287 | 288 | match protocols.entry(protocol.name) { 289 | Entry::Vacant(e) => e.insert((rq_rp, protocol.inbound_queue)), 290 | Entry::Occupied(e) => { 291 | return Err(RegisterError::DuplicateProtocol(e.key().clone())) 292 | } 293 | }; 294 | } 295 | 296 | Ok(Self { 297 | protocols, 298 | pending_requests: Default::default(), 299 | pending_responses: Default::default(), 300 | pending_responses_arrival_time: Default::default(), 301 | send_feedback: Default::default(), 302 | }) 303 | } 304 | 305 | /// Initiates sending a request. 306 | /// 307 | /// If there is no established connection to the target peer, the behavior is determined by the 308 | /// choice of `connect`. 309 | /// 310 | /// An error is returned if the protocol doesn't match one that has been registered. 311 | pub fn send_message( 312 | &mut self, 313 | target: &PeerId, 314 | protocol_id: &str, 315 | ctx: MessageContext, 316 | payload: Vec, 317 | pending_response: mpsc::Sender), RequestFailure>>, 318 | connect: IfDisconnected, 319 | ) { 320 | self.send_wire_message( 321 | target, 322 | protocol_id, 323 | WireMessage { 324 | is_broadcast: false, 325 | payload, 326 | context: ctx, 327 | }, 328 | Some(pending_response), 329 | connect, 330 | ); 331 | } 332 | 333 | pub fn broadcast_message( 334 | &mut self, 335 | targets: impl Iterator, 336 | protocol_id: &str, 337 | ctx: MessageContext, 338 | payload: Vec, 339 | pending_response: Option), RequestFailure>>>, 340 | connect: IfDisconnected, 341 | ) { 342 | for target in targets { 343 | self.send_wire_message( 344 | &target, 345 | protocol_id, 346 | WireMessage { 347 | is_broadcast: true, 348 | payload: payload.clone(), 349 | context: ctx, 350 | }, 351 | pending_response.clone(), 352 | connect, 353 | ); 354 | } 355 | } 356 | 357 | fn send_wire_message( 358 | &mut self, 359 | target: &PeerId, 360 | protocol_id: &str, 361 | message: WireMessage, 362 | pending_response: Option), RequestFailure>>>, 363 | connect: IfDisconnected, 364 | ) { 365 | if let Some((protocol, _)) = self.protocols.get_mut(protocol_id) { 366 | if protocol.is_connected(target) || connect.should_connect() { 367 | let request_id = protocol.send_request(target, message); 368 | let prev_req_id = self.pending_requests.insert( 369 | (protocol_id.to_string().into(), request_id).into(), 370 | (Instant::now(), pending_response), 371 | ); 372 | debug_assert!(prev_req_id.is_none(), "Expect request id to be unique."); 373 | } else { 374 | if let Some(mut tx) = pending_response { 375 | if tx.try_send(Err(RequestFailure::NotConnected)).is_err() { 376 | log::debug!( 377 | target: "sub-libp2p", 378 | "Not connected to peer {:?}. At the same time local \ 379 | node is no longer interested in the result.", 380 | target, 381 | ); 382 | }; 383 | } 384 | } 385 | } else { 386 | if let Some(mut tx) = pending_response { 387 | if tx.try_send(Err(RequestFailure::UnknownProtocol)).is_err() { 388 | log::debug!( 389 | target: "sub-libp2p", 390 | "Unknown protocol {:?}. At the same time local \ 391 | node is no longer interested in the result.", 392 | protocol_id, 393 | ); 394 | }; 395 | } 396 | } 397 | } 398 | 399 | fn new_handler_with_replacement( 400 | &mut self, 401 | protocol: String, 402 | handler: RequestResponseHandler, 403 | ) -> ::ProtocolsHandler { 404 | let mut handlers: HashMap<_, _> = self 405 | .protocols 406 | .iter_mut() 407 | .map(|(p, (r, _))| (p.to_string(), NetworkBehaviour::new_handler(r))) 408 | .collect(); 409 | 410 | if let Some(h) = handlers.get_mut(&protocol) { 411 | *h = handler 412 | } 413 | 414 | MultiHandler::try_from_iter(handlers).expect( 415 | "Protocols are in a HashMap and there can be at most one handler per protocol name, \ 416 | which is the only possible error; qed", 417 | ) 418 | } 419 | } 420 | 421 | impl NetworkBehaviour for Broadcast { 422 | type ProtocolsHandler = 423 | MultiHandler as NetworkBehaviour>::ProtocolsHandler>; 424 | type OutEvent = BroadcastOut; 425 | 426 | fn new_handler(&mut self) -> Self::ProtocolsHandler { 427 | let iter = self 428 | .protocols 429 | .iter_mut() 430 | .map(|(p, (r, _))| (p.to_string(), NetworkBehaviour::new_handler(r))); 431 | 432 | MultiHandler::try_from_iter(iter).expect( 433 | "Protocols are in a HashMap and there can be at most one handler per protocol name, \ 434 | which is the only possible error; qed", 435 | ) 436 | } 437 | 438 | fn addresses_of_peer(&mut self, _: &PeerId) -> Vec { 439 | Vec::new() 440 | } 441 | 442 | fn inject_connected(&mut self, peer_id: &PeerId) { 443 | for (p, _) in self.protocols.values_mut() { 444 | NetworkBehaviour::inject_connected(p, peer_id) 445 | } 446 | } 447 | 448 | fn inject_disconnected(&mut self, peer_id: &PeerId) { 449 | for (p, _) in self.protocols.values_mut() { 450 | NetworkBehaviour::inject_disconnected(p, peer_id) 451 | } 452 | } 453 | 454 | fn inject_connection_established( 455 | &mut self, 456 | peer_id: &PeerId, 457 | conn: &ConnectionId, 458 | endpoint: &ConnectedPoint, 459 | failed_addresses: Option<&Vec>, 460 | ) { 461 | for (p, _) in self.protocols.values_mut() { 462 | NetworkBehaviour::inject_connection_established( 463 | p, 464 | peer_id, 465 | conn, 466 | endpoint, 467 | failed_addresses, 468 | ) 469 | } 470 | } 471 | 472 | fn inject_connection_closed( 473 | &mut self, 474 | peer_id: &PeerId, 475 | conn: &ConnectionId, 476 | endpoint: &ConnectedPoint, 477 | _handler: ::Handler, 478 | ) { 479 | for (p, _) in self.protocols.values_mut() { 480 | let handler = p.new_handler(); 481 | NetworkBehaviour::inject_connection_closed(p, peer_id, conn, endpoint, handler); 482 | } 483 | } 484 | 485 | fn inject_event( 486 | &mut self, 487 | peer_id: PeerId, 488 | connection: ConnectionId, 489 | (p_name, event): ::OutEvent, 490 | ) { 491 | if let Some((proto, _)) = self.protocols.get_mut(&*p_name) { 492 | return proto.inject_event(peer_id, connection, event); 493 | } 494 | 495 | log::warn!(target: "sub-libp2p", 496 | "inject_node_event: no request-response instance registered for protocol {:?}", p_name) 497 | } 498 | 499 | fn inject_dial_failure( 500 | &mut self, 501 | peer_id: Option, 502 | _: Self::ProtocolsHandler, 503 | error: &libp2p::swarm::DialError, 504 | ) { 505 | for (p, _) in self.protocols.values_mut() { 506 | let handler = p.new_handler(); 507 | NetworkBehaviour::inject_dial_failure(p, peer_id, handler, error) 508 | } 509 | } 510 | 511 | fn inject_new_listener(&mut self, id: ListenerId) { 512 | for (p, _) in self.protocols.values_mut() { 513 | NetworkBehaviour::inject_new_listener(p, id) 514 | } 515 | } 516 | 517 | fn inject_new_listen_addr(&mut self, id: ListenerId, addr: &Multiaddr) { 518 | for (p, _) in self.protocols.values_mut() { 519 | NetworkBehaviour::inject_new_listen_addr(p, id, addr) 520 | } 521 | } 522 | 523 | fn inject_expired_listen_addr(&mut self, id: ListenerId, addr: &Multiaddr) { 524 | for (p, _) in self.protocols.values_mut() { 525 | NetworkBehaviour::inject_expired_listen_addr(p, id, addr) 526 | } 527 | } 528 | 529 | fn inject_listener_error(&mut self, id: ListenerId, err: &(dyn std::error::Error + 'static)) { 530 | for (p, _) in self.protocols.values_mut() { 531 | NetworkBehaviour::inject_listener_error(p, id, err) 532 | } 533 | } 534 | 535 | fn inject_listener_closed(&mut self, id: ListenerId, reason: Result<(), &io::Error>) { 536 | for (p, _) in self.protocols.values_mut() { 537 | NetworkBehaviour::inject_listener_closed(p, id, reason) 538 | } 539 | } 540 | 541 | fn inject_new_external_addr(&mut self, addr: &Multiaddr) { 542 | for (p, _) in self.protocols.values_mut() { 543 | NetworkBehaviour::inject_new_external_addr(p, addr) 544 | } 545 | } 546 | 547 | fn inject_expired_external_addr(&mut self, addr: &Multiaddr) { 548 | for (p, _) in self.protocols.values_mut() { 549 | NetworkBehaviour::inject_expired_external_addr(p, addr) 550 | } 551 | } 552 | 553 | fn poll( 554 | &mut self, 555 | cx: &mut Context, 556 | params: &mut impl PollParameters, 557 | ) -> Poll> { 558 | loop { 559 | // Poll to see if any response is ready to be sent back. 560 | while let Poll::Ready(Some(outcome)) = self.pending_responses.poll_next_unpin(cx) { 561 | let RequestProcessingOutcome { 562 | peer: _, 563 | request_id, 564 | protocol: protocol_name, 565 | inner_channel, 566 | response: 567 | OutgoingResponse { 568 | result, 569 | sent_feedback, 570 | }, 571 | } = match outcome { 572 | Some(outcome) => outcome, 573 | None => continue, 574 | }; 575 | 576 | if let Ok(payload) = result { 577 | if let Some((protocol, _)) = self.protocols.get_mut(&*protocol_name) { 578 | if let Err(_) = protocol.send_response(inner_channel, Ok(payload)) { 579 | // Note: Failure is handled further below when receiving 580 | // `InboundFailure` event from `RequestResponse` behaviour. 581 | log::debug!( 582 | target: "sub-libp2p", 583 | "Failed to send response for {:?} on protocol {:?} due to a \ 584 | timeout or due to the connection to the peer being closed. \ 585 | Dropping response", 586 | request_id, protocol_name, 587 | ); 588 | } else { 589 | if let Some(sent_feedback) = sent_feedback { 590 | self.send_feedback 591 | .insert((protocol_name, request_id).into(), sent_feedback); 592 | } 593 | } 594 | } 595 | } 596 | } 597 | 598 | // Poll request-responses protocols. 599 | for (protocol, (behaviour, resp_builder)) in &mut self.protocols { 600 | while let Poll::Ready(ev) = behaviour.poll(cx, params) { 601 | let ev = match ev { 602 | // Main events we are interested in. 603 | NetworkBehaviourAction::GenerateEvent(ev) => ev, 604 | 605 | // Other events generated by the underlying behaviour are transparently 606 | // passed through. 607 | NetworkBehaviourAction::DialAddress { address, handler } => { 608 | log::error!( 609 | "The request-response isn't supposed to start dialing peers" 610 | ); 611 | let protocol = protocol.to_string(); 612 | let handler = self.new_handler_with_replacement(protocol, handler); 613 | return Poll::Ready(NetworkBehaviourAction::DialAddress { 614 | address, 615 | handler, 616 | }); 617 | } 618 | NetworkBehaviourAction::DialPeer { 619 | peer_id, 620 | condition, 621 | handler, 622 | } => { 623 | let protocol = protocol.to_string(); 624 | let handler = self.new_handler_with_replacement(protocol, handler); 625 | return Poll::Ready(NetworkBehaviourAction::DialPeer { 626 | peer_id, 627 | condition, 628 | handler, 629 | }); 630 | } 631 | NetworkBehaviourAction::NotifyHandler { 632 | peer_id, 633 | handler, 634 | event, 635 | } => { 636 | return Poll::Ready(NetworkBehaviourAction::NotifyHandler { 637 | peer_id, 638 | handler, 639 | event: ((*protocol).to_string(), event), 640 | }) 641 | } 642 | NetworkBehaviourAction::ReportObservedAddr { address, score } => { 643 | return Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { 644 | address, 645 | score, 646 | }) 647 | } 648 | NetworkBehaviourAction::CloseConnection { 649 | peer_id, 650 | connection, 651 | } => { 652 | return Poll::Ready(NetworkBehaviourAction::CloseConnection { 653 | peer_id, 654 | connection, 655 | }) 656 | } 657 | }; 658 | 659 | match ev { 660 | // Received a request from a remote. 661 | RequestResponseEvent::Message { 662 | peer, 663 | message: 664 | RequestResponseMessage::Request { 665 | request_id, 666 | request, 667 | channel, 668 | .. 669 | }, 670 | } => { 671 | self.pending_responses_arrival_time.insert( 672 | (protocol.clone(), request_id.clone()).into(), 673 | Instant::now(), 674 | ); 675 | 676 | let (tx, rx) = oneshot::channel(); 677 | 678 | // Submit the request to the "response builder" passed by the user at 679 | // initialization. 680 | if let Some(mut resp_builder) = resp_builder.clone() { 681 | let _ = resp_builder.try_send(IncomingMessage { 682 | peer_id: peer, 683 | peer_index: 0, 684 | is_broadcast: request.is_broadcast, 685 | payload: request.payload, 686 | context: request.context, 687 | pending_response: tx, 688 | }); 689 | } else { 690 | debug_assert!(false, "Received message on outbound-only protocol."); 691 | } 692 | 693 | let protocol = protocol.to_owned(); 694 | self.pending_responses.push(Box::pin(async move { 695 | if let Ok(response) = rx.await { 696 | Some(RequestProcessingOutcome { 697 | peer, 698 | request_id, 699 | protocol, 700 | inner_channel: channel, 701 | response, 702 | }) 703 | } else { 704 | None 705 | } 706 | })); 707 | } 708 | 709 | // Received a response from a remote to one of our requests. 710 | RequestResponseEvent::Message { 711 | peer, 712 | message: 713 | RequestResponseMessage::Response { 714 | request_id, 715 | response, 716 | }, 717 | .. 718 | } => { 719 | let (started, delivered) = match self 720 | .pending_requests 721 | .remove(&(protocol.clone(), request_id).into()) 722 | { 723 | Some((started, pending_response)) => { 724 | let delivered = match pending_response { 725 | Some(mut tx) => tx 726 | .try_send( 727 | response 728 | .map(|r| (peer.clone(), r)) 729 | .map_err(|()| RequestFailure::Refused), 730 | ) 731 | .map_err(|_| RequestFailure::Obsolete), 732 | None => Ok(()), 733 | }; 734 | 735 | (started, delivered) 736 | } 737 | None => { 738 | log::warn!( 739 | target: "sub-libp2p", 740 | "Received `RequestResponseEvent::Message` with unexpected request id {:?}", 741 | request_id, 742 | ); 743 | debug_assert!(false); 744 | continue; 745 | } 746 | }; 747 | 748 | let out = BroadcastOut::BroadcastFinished { 749 | peer, 750 | protocol: protocol.clone(), 751 | duration: started.elapsed(), 752 | result: delivered, 753 | }; 754 | 755 | return Poll::Ready(NetworkBehaviourAction::GenerateEvent(out)); 756 | } 757 | 758 | // One of our requests has failed. 759 | RequestResponseEvent::OutboundFailure { 760 | peer, 761 | request_id, 762 | error, 763 | .. 764 | } => { 765 | let started = match self 766 | .pending_requests 767 | .remove(&(protocol.clone(), request_id).into()) 768 | { 769 | Some((started, pending_response)) => { 770 | if let Some(mut tx) = pending_response { 771 | if tx 772 | .try_send(Err(RequestFailure::Network(error.clone()))) 773 | .is_err() 774 | { 775 | log::debug!( 776 | target: "sub-libp2p", 777 | "Request with id {:?} failed. At the same time local \ 778 | node is no longer interested in the result.", 779 | request_id, 780 | ); 781 | } 782 | } 783 | 784 | started 785 | } 786 | None => { 787 | log::warn!( 788 | target: "sub-libp2p", 789 | "Received `RequestResponseEvent::Message` with unexpected request id {:?}", 790 | request_id, 791 | ); 792 | debug_assert!(false); 793 | continue; 794 | } 795 | }; 796 | 797 | let out = BroadcastOut::BroadcastFinished { 798 | peer, 799 | protocol: protocol.clone(), 800 | duration: started.elapsed(), 801 | result: Err(RequestFailure::Network(error)), 802 | }; 803 | 804 | return Poll::Ready(NetworkBehaviourAction::GenerateEvent(out)); 805 | } 806 | 807 | // An inbound request failed, either while reading the request or due to 808 | // failing to send a response. 809 | RequestResponseEvent::InboundFailure { 810 | request_id, 811 | peer, 812 | error, 813 | .. 814 | } => { 815 | self.pending_responses_arrival_time 816 | .remove(&(protocol.clone(), request_id).into()); 817 | self.send_feedback 818 | .remove(&(protocol.clone(), request_id).into()); 819 | let out = BroadcastOut::InboundMessage { 820 | peer, 821 | protocol: protocol.clone(), 822 | result: Err(ResponseFailure::Network(error)), 823 | }; 824 | return Poll::Ready(NetworkBehaviourAction::GenerateEvent(out)); 825 | } 826 | 827 | // A response to an inbound request has been sent. 828 | RequestResponseEvent::ResponseSent { request_id, peer } => { 829 | let arrival_time = self 830 | .pending_responses_arrival_time 831 | .remove(&(protocol.clone(), request_id).into()) 832 | .map(|t| t.elapsed()) 833 | .expect( 834 | "Time is added for each inbound request on arrival and only \ 835 | removed on success (`ResponseSent`) or failure \ 836 | (`InboundFailure`). One can not receive a success event for a \ 837 | request that either never arrived, or that has previously \ 838 | failed; qed.", 839 | ); 840 | 841 | if let Some(send_feedback) = self 842 | .send_feedback 843 | .remove(&(protocol.clone(), request_id).into()) 844 | { 845 | let _ = send_feedback.send(()); 846 | } 847 | 848 | let out = BroadcastOut::InboundMessage { 849 | peer, 850 | protocol: protocol.clone(), 851 | result: Ok(arrival_time), 852 | }; 853 | 854 | return Poll::Ready(NetworkBehaviourAction::GenerateEvent(out)); 855 | } 856 | }; 857 | } 858 | } 859 | 860 | break Poll::Pending; 861 | } 862 | } 863 | } 864 | 865 | /// Error when registering a protocol. 866 | #[derive(Debug, thiserror::Error)] 867 | pub enum RegisterError { 868 | /// A protocol has been specified multiple times. 869 | #[error("{0}")] 870 | DuplicateProtocol(Cow<'static, str>), 871 | } 872 | 873 | /// Error in a request. 874 | #[derive(Debug, thiserror::Error)] 875 | #[allow(missing_docs)] 876 | pub enum RequestFailure { 877 | #[error("We are not currently connected to the requested peer.")] 878 | NotConnected, 879 | #[error("Given protocol hasn't been registered.")] 880 | UnknownProtocol, 881 | #[error("Remote has closed the substream before answering, thereby signaling that it considers the request as valid, but refused to answer it.")] 882 | Refused, 883 | #[error("The remote replied, but the local node is no longer interested in the response.")] 884 | Obsolete, 885 | /// Problem on the network. 886 | #[error("Problem on the network: {0}")] 887 | Network(OutboundFailure), 888 | } 889 | 890 | /// Error when processing a request sent by a remote. 891 | #[derive(Debug, thiserror::Error)] 892 | pub enum ResponseFailure { 893 | /// Problem on the network. 894 | #[error("Problem on the network: {0}")] 895 | Network(InboundFailure), 896 | } 897 | -------------------------------------------------------------------------------- /node/network/src/config.rs: -------------------------------------------------------------------------------- 1 | use crate::{broadcast, RoomId}; 2 | use anyhow::anyhow; 3 | use futures::channel::mpsc; 4 | use libp2p::identity::{ed25519, Keypair}; 5 | use libp2p::{multiaddr, Multiaddr, PeerId}; 6 | use serde::{Deserialize, Serialize}; 7 | use std::error::Error; 8 | use std::io::Write; 9 | use std::path::{Path, PathBuf}; 10 | use std::str::FromStr; 11 | use std::{fmt, fs, io}; 12 | use zeroize::Zeroize; 13 | 14 | #[derive(Clone)] 15 | pub struct Params { 16 | /// Multi-addresses to listen for incoming connections. 17 | pub listen_address: Multiaddr, 18 | /// Mdns discovery enabled. 19 | pub mdns: bool, 20 | /// Kademlia discovery enabled. 21 | pub kademlia: bool, 22 | /// Rooms 23 | pub rooms: Vec, 24 | } 25 | 26 | #[derive(Clone)] 27 | pub struct RoomArgs { 28 | pub id: RoomId, 29 | 30 | pub max_size: usize, 31 | 32 | /// Configuration for the default set of nodes that participate in computation. 33 | pub boot_peers: Vec, 34 | 35 | /// Channel on which the networking service will send incoming messages. 36 | pub inbound_queue: Option>, 37 | } 38 | 39 | impl RoomArgs { 40 | pub fn new_full( 41 | name: String, 42 | boot_peers: impl Iterator, 43 | max_size: usize, 44 | ) -> (RoomId, Self, mpsc::Receiver) { 45 | let id = RoomId::from(name); 46 | let (tx, rx) = mpsc::channel(max_size); 47 | let cfg = Self { 48 | id, 49 | max_size, 50 | boot_peers: boot_peers.collect(), 51 | inbound_queue: Some(tx), 52 | }; 53 | 54 | (id, cfg, rx) 55 | } 56 | } 57 | 58 | /// The configuration of a node's secret key, describing the type of key 59 | /// and how it is obtained. A node's identity keypair is the result of 60 | /// the evaluation of the node key configuration. 61 | #[derive(Clone)] 62 | pub enum NodeKeyConfig { 63 | /// A Ed25519 secret key configuration. 64 | Ed25519(Secret), 65 | } 66 | 67 | impl Default for NodeKeyConfig { 68 | fn default() -> NodeKeyConfig { 69 | Self::Ed25519(Secret::New) 70 | } 71 | } 72 | 73 | /// The configuration options for obtaining a secret key `K`. 74 | #[derive(Clone)] 75 | pub enum Secret { 76 | /// Use the given secret key `K`. 77 | Input(K), 78 | /// Read the secret key from a file. If the file does not exist, 79 | /// it is created with a newly generated secret key `K`. The format 80 | /// of the file is determined by `K`: 81 | /// 82 | /// * `ed25519::SecretKey`: An unencoded 32 bytes Ed25519 secret key. 83 | File(PathBuf), 84 | /// Always generate a new secret key `K`. 85 | New, 86 | } 87 | 88 | impl NodeKeyConfig { 89 | /// Evaluate a `NodeKeyConfig` to obtain an identity `Keypair`. 90 | pub fn into_keypair(self) -> io::Result { 91 | use NodeKeyConfig::*; 92 | match self { 93 | Ed25519(Secret::New) => Ok(Keypair::generate_ed25519()), 94 | Ed25519(Secret::Input(k)) => Ok(Keypair::Ed25519(k.into())), 95 | Ed25519(Secret::File(f)) => get_secret( 96 | f, 97 | |mut b| match String::from_utf8(b.to_vec()).ok().and_then(|s| { 98 | if s.len() == 64 { 99 | hex::decode(&s).ok() 100 | } else { 101 | None 102 | } 103 | }) { 104 | Some(s) => ed25519::SecretKey::from_bytes(s), 105 | _ => ed25519::SecretKey::from_bytes(&mut b), 106 | }, 107 | ed25519::SecretKey::generate, 108 | |b| b.as_ref().to_vec(), 109 | ) 110 | .map(ed25519::Keypair::from) 111 | .map(Keypair::Ed25519), 112 | } 113 | } 114 | 115 | pub fn persist>(k: Keypair, path: P) -> io::Result<()> { 116 | match k { 117 | Keypair::Ed25519(k) => { 118 | let sk = ed25519::SecretKey::from(k); 119 | let mut sk_vec = sk.as_ref().to_vec(); 120 | write_secret_file(path, &sk_vec)?; 121 | sk_vec.zeroize(); 122 | Ok(()) 123 | } 124 | _ => { 125 | panic!("unsupported curve"); 126 | } 127 | } 128 | } 129 | } 130 | 131 | /// Address of a node, including its identity. 132 | /// 133 | /// This struct represents a decoded version of a multiaddress that ends with `/p2p/`. 134 | #[derive(Debug, Clone, Serialize, Deserialize)] 135 | #[serde(try_from = "String", into = "String")] 136 | pub struct MultiaddrWithPeerId { 137 | /// Address of the node. 138 | pub multiaddr: Multiaddr, 139 | /// Its identity. 140 | pub peer_id: PeerId, 141 | } 142 | 143 | impl MultiaddrWithPeerId { 144 | /// Concatenates the multiaddress and peer ID into one multiaddress containing both. 145 | pub fn concat(&self) -> Multiaddr { 146 | let proto = multiaddr::Protocol::P2p(From::from(self.peer_id)); 147 | self.multiaddr.clone().with(proto) 148 | } 149 | } 150 | 151 | impl fmt::Display for MultiaddrWithPeerId { 152 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 153 | fmt::Display::fmt(&self.concat(), f) 154 | } 155 | } 156 | 157 | impl FromStr for MultiaddrWithPeerId { 158 | type Err = anyhow::Error; 159 | 160 | fn from_str(s: &str) -> Result { 161 | let (peer_id, multiaddr) = parse_str_addr(s)?; 162 | Ok(Self { peer_id, multiaddr }) 163 | } 164 | } 165 | 166 | impl From for String { 167 | fn from(ma: MultiaddrWithPeerId) -> String { 168 | format!("{}", ma) 169 | } 170 | } 171 | 172 | impl TryFrom for MultiaddrWithPeerId { 173 | type Error = anyhow::Error; 174 | fn try_from(string: String) -> Result { 175 | string 176 | .parse() 177 | .map_err(|e| anyhow!("parsing multiaddr_peer_id terminated with err: {}", e)) 178 | } 179 | } 180 | 181 | /// Parses a string address and splits it into Multiaddress and PeerId, if 182 | /// valid. 183 | pub fn parse_str_addr(addr_str: &str) -> Result<(PeerId, Multiaddr), anyhow::Error> { 184 | let addr: Multiaddr = addr_str.parse()?; 185 | parse_addr(addr) 186 | } 187 | 188 | /// Splits a Multiaddress into a Multiaddress and PeerId. 189 | pub fn parse_addr(mut addr: Multiaddr) -> Result<(PeerId, Multiaddr), anyhow::Error> { 190 | let who = match addr.pop() { 191 | Some(multiaddr::Protocol::P2p(key)) => { 192 | PeerId::from_multihash(key).map_err(|_| anyhow!("invalid peer id"))? 193 | } 194 | _ => return Err(anyhow!("peer id missing")), 195 | }; 196 | 197 | Ok((who, addr)) 198 | } 199 | 200 | // impl Eq for MultiaddrWithPeerId {} 201 | // 202 | // impl PartialEq for MultiaddrWithPeerId { 203 | // fn eq(&self, other: &Self) -> bool { 204 | // self.peer_id == other.peer_id 205 | // } 206 | // } 207 | // 208 | // impl PartialOrd for MultiaddrWithPeerId { 209 | // fn partial_cmp(&self, other: &Self) -> Option { 210 | // todo!() 211 | // } 212 | // } 213 | // 214 | // impl Ord for MultiaddrWithPeerId { 215 | // fn cmp(&self, other:&Self) -> Ordering { 216 | // let size1 = self.peer_id; 217 | // let size2 = other.value; 218 | // if self > size2 { 219 | // Ordering::Less 220 | // } 221 | // if size1 < size2 { 222 | // Ordering::Greater 223 | // } 224 | // Ordering::Equal 225 | // } 226 | // } 227 | 228 | /// Load a secret key from a file, if it exists, or generate a 229 | /// new secret key and write it to that file. In either case, 230 | /// the secret key is returned. 231 | fn get_secret(file: P, parse: F, generate: G, serialize: W) -> io::Result 232 | where 233 | P: AsRef, 234 | F: for<'r> FnOnce(&'r mut [u8]) -> Result, 235 | G: FnOnce() -> K, 236 | E: Error + Send + Sync + 'static, 237 | W: Fn(&K) -> Vec, 238 | { 239 | fs::read(&file) 240 | .and_then(|mut sk_bytes| { 241 | parse(&mut sk_bytes).map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e)) 242 | }) 243 | .or_else(|e| { 244 | if e.kind() == io::ErrorKind::NotFound { 245 | file.as_ref().parent().map_or(Ok(()), fs::create_dir_all)?; 246 | let sk = generate(); 247 | let mut sk_vec = serialize(&sk); 248 | write_secret_file(file, &sk_vec)?; 249 | sk_vec.zeroize(); 250 | Ok(sk) 251 | } else { 252 | Err(e) 253 | } 254 | }) 255 | } 256 | 257 | /// Write secret bytes to a file. 258 | fn write_secret_file

(path: P, sk_bytes: &[u8]) -> io::Result<()> 259 | where 260 | P: AsRef, 261 | { 262 | let mut file = open_secret_file(&path)?; 263 | file.write_all(sk_bytes) 264 | } 265 | 266 | /// Opens a file containing a secret key in write mode. 267 | #[cfg(unix)] 268 | fn open_secret_file

(path: P) -> io::Result 269 | where 270 | P: AsRef, 271 | { 272 | use std::os::unix::fs::OpenOptionsExt; 273 | fs::OpenOptions::new() 274 | .write(true) 275 | .create_new(true) 276 | .mode(0o600) 277 | .open(path) 278 | } 279 | -------------------------------------------------------------------------------- /node/network/src/discovery.rs: -------------------------------------------------------------------------------- 1 | use crate::Params; 2 | use async_std::task; 3 | use futures::prelude::*; 4 | 5 | use libp2p::swarm::DialError; 6 | use libp2p::{ 7 | core::{ 8 | connection::{ConnectionId, ListenerId}, 9 | ConnectedPoint, Multiaddr, PeerId, PublicKey, 10 | }, 11 | kad::{handler::KademliaHandlerProto, Kademlia, KademliaConfig, KademliaEvent, QueryId}, 12 | mdns::MdnsEvent, 13 | swarm::{ 14 | toggle::{Toggle, ToggleIntoProtoHandler}, 15 | IntoProtocolsHandler, NetworkBehaviour, NetworkBehaviourAction, PollParameters, 16 | ProtocolsHandler, 17 | }, 18 | }; 19 | use libp2p::{kad::record::store::MemoryStore, mdns::Mdns}; 20 | use log::{debug, error, info, trace, warn}; 21 | 22 | use std::collections::HashMap; 23 | use std::{ 24 | collections::{HashSet, VecDeque}, 25 | io, 26 | task::{Context, Poll}, 27 | }; 28 | 29 | /// Event generated by the `DiscoveryBehaviour`. 30 | #[derive(Debug)] 31 | pub enum DiscoveryOut { 32 | /// Event that notifies that we connected to the node with the given peer id. 33 | Connected(PeerId), 34 | 35 | /// Event that notifies that we disconnected with the node with the given peer id. 36 | Disconnected(PeerId), 37 | } 38 | 39 | /// Implementation of `NetworkBehaviour` that discovers the nodes on the network. 40 | pub struct DiscoveryBehaviour { 41 | /// User-defined list of nodes and their addresses. Typically includes bootstrap nodes and 42 | /// reserved nodes. 43 | user_defined: Vec<(PeerId, Multiaddr)>, 44 | /// Kademlia discovery. 45 | kademlia: Toggle>, 46 | /// Discovers nodes on the local network. 47 | mdns: Toggle, 48 | /// Events to return in priority when polled. 49 | pending_events: VecDeque, 50 | /// Number of nodes we're currently connected to. 51 | num_connections: u64, 52 | /// Keeps hash set of peers connected. 53 | peers: HashSet, 54 | /// Keeps hash map of peers and their multiaddresses 55 | peer_addresses: HashMap>, 56 | } 57 | 58 | impl DiscoveryBehaviour { 59 | pub fn new(local_public_key: PublicKey, params: Params) -> Self { 60 | let local_peer_id = local_public_key.to_peer_id(); 61 | let mut peers = HashSet::new(); 62 | let peer_addresses = HashMap::new(); 63 | 64 | let user_defined: Vec<_> = params 65 | .rooms 66 | .iter() 67 | .flat_map(|ra| ra.boot_peers.clone()) 68 | .map(|mwp| (mwp.peer_id, mwp.multiaddr)) 69 | .collect(); 70 | 71 | let kademlia_opt = { 72 | // Kademlia config 73 | let store = MemoryStore::new(local_peer_id.to_owned()); 74 | let kad_config = KademliaConfig::default(); 75 | 76 | if params.kademlia { 77 | let mut kademlia = Kademlia::with_config(local_peer_id, store, kad_config); 78 | for (peer_id, addr) in user_defined.iter() { 79 | kademlia.add_address(peer_id, addr.clone()); 80 | peers.insert(*peer_id); 81 | } 82 | info!("kademlia peers: {:?}", peers); 83 | if let Err(e) = kademlia.bootstrap() { 84 | warn!("Kademlia bootstrap failed: {}", e); 85 | } 86 | Some(kademlia) 87 | } else { 88 | None 89 | } 90 | }; 91 | 92 | let mdns_opt = if params.mdns { 93 | Some(task::block_on(async { 94 | Mdns::new(Default::default()) 95 | .await 96 | .expect("Could not start mDNS") 97 | })) 98 | } else { 99 | None 100 | }; 101 | 102 | DiscoveryBehaviour { 103 | user_defined, 104 | kademlia: kademlia_opt.into(), 105 | pending_events: VecDeque::new(), 106 | num_connections: 0, 107 | mdns: mdns_opt.into(), 108 | peers, 109 | peer_addresses, 110 | } 111 | } 112 | 113 | /// Returns reference to peer set. 114 | pub fn peers(&self) -> &HashSet { 115 | &self.peers 116 | } 117 | 118 | /// Returns a map of peer ids and their multiaddresses 119 | pub fn peer_addresses(&self) -> &HashMap> { 120 | &self.peer_addresses 121 | } 122 | 123 | /// Bootstrap Kademlia network 124 | pub fn bootstrap(&mut self) -> Result { 125 | if let Some(active_kad) = self.kademlia.as_mut() { 126 | active_kad.bootstrap().map_err(|e| e.to_string()) 127 | } else { 128 | Err("Kademlia is not activated".to_string()) 129 | } 130 | } 131 | } 132 | 133 | impl NetworkBehaviour for DiscoveryBehaviour { 134 | type ProtocolsHandler = ToggleIntoProtoHandler>; 135 | type OutEvent = DiscoveryOut; 136 | 137 | fn new_handler(&mut self) -> Self::ProtocolsHandler { 138 | self.kademlia.new_handler() 139 | } 140 | 141 | fn addresses_of_peer(&mut self, peer_id: &PeerId) -> Vec { 142 | let mut list = self 143 | .user_defined 144 | .iter() 145 | .filter_map(|(p, a)| if p == peer_id { Some(a.clone()) } else { None }) 146 | .collect::>(); 147 | 148 | { 149 | let mut list_to_filter = Vec::new(); 150 | if let Some(k) = self.kademlia.as_mut() { 151 | list_to_filter.extend(k.addresses_of_peer(peer_id)) 152 | } 153 | 154 | list_to_filter.extend(self.mdns.addresses_of_peer(peer_id)); 155 | 156 | list.extend(list_to_filter); 157 | } 158 | 159 | trace!("Addresses of {:?}: {:?}", peer_id, list); 160 | 161 | list 162 | } 163 | 164 | fn inject_connected(&mut self, peer_id: &PeerId) { 165 | let multiaddr = self.addresses_of_peer(peer_id); 166 | self.peer_addresses.insert(*peer_id, multiaddr); 167 | self.peers.insert(*peer_id); 168 | self.pending_events 169 | .push_back(DiscoveryOut::Connected(*peer_id)); 170 | 171 | self.kademlia.inject_connected(peer_id) 172 | } 173 | 174 | fn inject_disconnected(&mut self, peer_id: &PeerId) { 175 | self.pending_events 176 | .push_back(DiscoveryOut::Disconnected(*peer_id)); 177 | 178 | self.kademlia.inject_disconnected(peer_id) 179 | } 180 | 181 | fn inject_connection_established( 182 | &mut self, 183 | peer_id: &PeerId, 184 | conn: &ConnectionId, 185 | endpoint: &ConnectedPoint, 186 | failed_addresses: Option<&Vec>, 187 | ) { 188 | self.num_connections += 1; 189 | 190 | self.kademlia 191 | .inject_connection_established(peer_id, conn, endpoint, failed_addresses) 192 | } 193 | 194 | fn inject_connection_closed( 195 | &mut self, 196 | peer_id: &PeerId, 197 | conn: &ConnectionId, 198 | endpoint: &ConnectedPoint, 199 | handler: ::Handler, 200 | ) { 201 | self.num_connections -= 1; 202 | 203 | self.kademlia 204 | .inject_connection_closed(peer_id, conn, endpoint, handler) 205 | } 206 | 207 | fn inject_event( 208 | &mut self, 209 | peer_id: PeerId, 210 | connection: ConnectionId, 211 | event: <::Handler as ProtocolsHandler>::OutEvent, 212 | ) { 213 | if let Some(kad) = self.kademlia.as_mut() { 214 | return kad.inject_event(peer_id, connection, event); 215 | } 216 | error!("inject_node_event: no kademlia instance registered for protocol") 217 | } 218 | 219 | fn inject_dial_failure( 220 | &mut self, 221 | peer_id: Option, 222 | handler: Self::ProtocolsHandler, 223 | err: &DialError, 224 | ) { 225 | self.kademlia.inject_dial_failure(peer_id, handler, err) 226 | } 227 | 228 | fn inject_new_listen_addr(&mut self, id: ListenerId, addr: &Multiaddr) { 229 | self.kademlia.inject_new_listen_addr(id, addr) 230 | } 231 | 232 | fn inject_expired_listen_addr(&mut self, id: ListenerId, addr: &Multiaddr) { 233 | self.kademlia.inject_expired_listen_addr(id, addr); 234 | } 235 | 236 | fn inject_listener_error(&mut self, id: ListenerId, err: &(dyn std::error::Error + 'static)) { 237 | self.kademlia.inject_listener_error(id, err) 238 | } 239 | 240 | fn inject_listener_closed(&mut self, id: ListenerId, reason: Result<(), &io::Error>) { 241 | self.kademlia.inject_listener_closed(id, reason) 242 | } 243 | 244 | fn inject_new_external_addr(&mut self, addr: &Multiaddr) { 245 | self.kademlia.inject_new_external_addr(addr) 246 | } 247 | 248 | #[allow(clippy::type_complexity)] 249 | fn poll( 250 | &mut self, 251 | cx: &mut Context, 252 | params: &mut impl PollParameters, 253 | ) -> Poll> { 254 | // Immediately process the content of `discovered`. 255 | if let Some(ev) = self.pending_events.pop_front() { 256 | return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)); 257 | } 258 | 259 | // Poll Kademlia. 260 | while let Poll::Ready(ev) = self.kademlia.poll(cx, params) { 261 | match ev { 262 | NetworkBehaviourAction::GenerateEvent(ev) => match ev { 263 | KademliaEvent::RoutingUpdated { .. } => {} 264 | KademliaEvent::RoutablePeer { .. } => {} 265 | KademliaEvent::PendingRoutablePeer { .. } => {} 266 | other => { 267 | debug!("Kademlia event: {:?}", other) 268 | } 269 | }, 270 | NetworkBehaviourAction::DialAddress { address, handler } => { 271 | return Poll::Ready(NetworkBehaviourAction::DialAddress { address, handler }) 272 | } 273 | NetworkBehaviourAction::DialPeer { 274 | peer_id, 275 | condition, 276 | handler, 277 | } => { 278 | return Poll::Ready(NetworkBehaviourAction::DialPeer { 279 | peer_id, 280 | condition, 281 | handler, 282 | }) 283 | } 284 | NetworkBehaviourAction::NotifyHandler { 285 | peer_id, 286 | handler, 287 | event, 288 | } => { 289 | return Poll::Ready(NetworkBehaviourAction::NotifyHandler { 290 | peer_id, 291 | handler, 292 | event, 293 | }) 294 | } 295 | NetworkBehaviourAction::ReportObservedAddr { address, score } => { 296 | return Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { 297 | address, 298 | score, 299 | }) 300 | } 301 | NetworkBehaviourAction::CloseConnection { 302 | peer_id, 303 | connection, 304 | } => { 305 | return Poll::Ready(NetworkBehaviourAction::CloseConnection { 306 | peer_id, 307 | connection, 308 | }) 309 | } 310 | } 311 | } 312 | 313 | // Poll mdns. 314 | while let Poll::Ready(ev) = self.mdns.poll(cx, params) { 315 | match ev { 316 | NetworkBehaviourAction::GenerateEvent(event) => match event { 317 | MdnsEvent::Discovered(list) => { 318 | // Add any discovered peers to Kademlia 319 | for (peer_id, multiaddr) in list { 320 | if let Some(kad) = self.kademlia.as_mut() { 321 | kad.add_address(&peer_id, multiaddr); 322 | } 323 | } 324 | } 325 | MdnsEvent::Expired(_) => {} 326 | }, 327 | NetworkBehaviourAction::DialAddress { .. } => {} 328 | NetworkBehaviourAction::DialPeer { .. } => {} 329 | // Nothing to notify handler 330 | NetworkBehaviourAction::NotifyHandler { event, .. } => match event { 331 | _ => {} 332 | }, 333 | NetworkBehaviourAction::ReportObservedAddr { address, score } => { 334 | return Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { 335 | address, 336 | score, 337 | }) 338 | } 339 | NetworkBehaviourAction::CloseConnection { 340 | peer_id, 341 | connection, 342 | } => { 343 | return Poll::Ready(NetworkBehaviourAction::CloseConnection { 344 | peer_id, 345 | connection, 346 | }) 347 | } 348 | } 349 | } 350 | 351 | // Poll pending events 352 | if let Some(ev) = self.pending_events.pop_front() { 353 | return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)); 354 | } 355 | 356 | Poll::Pending 357 | } 358 | } 359 | -------------------------------------------------------------------------------- /node/network/src/error.rs: -------------------------------------------------------------------------------- 1 | use libp2p::{Multiaddr, PeerId}; 2 | use std::borrow::Cow; 3 | use std::fmt; 4 | 5 | /// Error type for the network. 6 | #[derive(derive_more::Display, derive_more::From)] 7 | pub enum Error { 8 | /// Io error 9 | Io(std::io::Error), 10 | /// The same node (based on address) is registered with two different peer ids. 11 | #[display( 12 | fmt = "The same node (`{}`) is registered with two different peer ids: `{}` and `{}`", 13 | address, 14 | first_id, 15 | second_id 16 | )] 17 | DuplicateNode { 18 | /// The address of the node. 19 | address: Multiaddr, 20 | /// The first peer id that was found for the node. 21 | first_id: PeerId, 22 | /// The second peer id that was found for the node. 23 | second_id: PeerId, 24 | }, 25 | /// The same request-response protocol has been registered multiple times. 26 | #[display(fmt = "Broadcast protocol registered multiple times: {}", protocol)] 27 | DuplicateBroadcastProtocol { 28 | /// Name of the protocol registered multiple times. 29 | protocol: Cow<'static, str>, 30 | }, 31 | } 32 | 33 | // Make `Debug` use the `Display` implementation. 34 | impl fmt::Debug for Error { 35 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 36 | fmt::Display::fmt(self, f) 37 | } 38 | } 39 | 40 | impl std::error::Error for Error { 41 | fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { 42 | match self { 43 | Self::Io(ref err) => Some(err), 44 | Self::DuplicateNode { .. } | Self::DuplicateBroadcastProtocol { .. } => None, 45 | } 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /node/network/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![feature(inherent_associated_types)] 2 | 3 | extern crate core; 4 | 5 | mod behaviour; 6 | pub mod broadcast; 7 | mod config; 8 | mod discovery; 9 | mod error; 10 | mod messages; 11 | mod service; 12 | 13 | pub use self::config::*; 14 | pub use self::messages::*; 15 | pub use self::service::*; 16 | use std::borrow::Cow; 17 | 18 | use arrayvec::ArrayString; 19 | 20 | /// The maximum allowed number of established connections per peer. 21 | const MAX_CONNECTIONS_PER_PEER: usize = 2; 22 | 23 | /// Identifier of a room in the peerset. 24 | #[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] 25 | pub struct RoomId(ArrayString<64>); 26 | 27 | impl RoomId { 28 | pub fn from(id: String) -> Self { 29 | Self(blake3::hash(id.as_bytes()).to_hex()) 30 | } 31 | 32 | pub fn as_str(&self) -> &str { 33 | self.0.as_str() 34 | } 35 | 36 | pub fn as_protocol_id(&self) -> Cow<'static, str> { 37 | Cow::Owned(format!("/room/{}", self.0.to_string())) 38 | } 39 | } 40 | -------------------------------------------------------------------------------- /node/network/src/messages.rs: -------------------------------------------------------------------------------- 1 | use futures::prelude::*; 2 | use libp2p::request_response::RequestResponseCodec; 3 | use std::io; 4 | 5 | pub struct WireMessage { 6 | pub context: MessageContext, 7 | pub payload: Vec, 8 | pub is_broadcast: bool, 9 | } 10 | 11 | #[derive(Clone, Copy, Debug)] 12 | pub struct MessageContext { 13 | pub message_type: MessageType, 14 | pub session_id: u64, 15 | pub protocol_id: u64, 16 | } 17 | 18 | #[derive(Clone, Copy, Debug)] 19 | pub enum MessageType { 20 | Coordination = 0, 21 | Computation, 22 | } 23 | 24 | /// Implements the libp2p [`RequestResponseCodec`] trait. Defines how streams of bytes are turned 25 | /// into requests and responses and vice-versa. 26 | #[derive(Debug, Clone)] 27 | #[doc(hidden)] // Needs to be public in order to satisfy the Rust compiler. 28 | pub struct GenericCodec { 29 | pub max_request_size: u64, 30 | pub max_response_size: u64, 31 | } 32 | 33 | #[async_trait::async_trait] 34 | impl RequestResponseCodec for GenericCodec { 35 | type Protocol = Vec; 36 | type Request = WireMessage; 37 | type Response = Result, ()>; 38 | 39 | async fn read_request( 40 | &mut self, 41 | _: &Self::Protocol, 42 | mut io: &mut T, 43 | ) -> io::Result 44 | where 45 | T: AsyncRead + Unpin + Send, 46 | { 47 | let message_type = match unsigned_varint::aio::read_u8(&mut io) 48 | .await 49 | .map_err(|err| io::Error::new(io::ErrorKind::InvalidInput, err))? 50 | { 51 | 0 => MessageType::Coordination, 52 | 1 => MessageType::Computation, 53 | _ => { 54 | panic!("unknown messages type"); 55 | } 56 | }; 57 | 58 | let is_broadcast = unsigned_varint::aio::read_u8(&mut io) 59 | .await 60 | .map_err(|err| io::Error::new(io::ErrorKind::InvalidInput, err))?; 61 | 62 | let session_id = unsigned_varint::aio::read_u64(&mut io) 63 | .await 64 | .map_err(|err| io::Error::new(io::ErrorKind::InvalidInput, err))?; 65 | 66 | let protocol_id = unsigned_varint::aio::read_u64(&mut io) 67 | .await 68 | .map_err(|err| io::Error::new(io::ErrorKind::InvalidInput, err))?; 69 | 70 | // Read the length. 71 | let length = unsigned_varint::aio::read_usize(&mut io) 72 | .await 73 | .map_err(|err| io::Error::new(io::ErrorKind::InvalidInput, err))?; 74 | 75 | if length > usize::try_from(self.max_request_size).unwrap_or(usize::MAX) { 76 | return Err(io::Error::new( 77 | io::ErrorKind::InvalidInput, 78 | format!( 79 | "Request size exceeds limit: {} > {}", 80 | length, self.max_request_size 81 | ), 82 | )); 83 | } 84 | 85 | // Read the payload. 86 | let mut buffer = vec![0; length]; 87 | io.read_exact(&mut buffer).await?; 88 | 89 | Ok(WireMessage { 90 | context: MessageContext { 91 | message_type, 92 | session_id, 93 | protocol_id, 94 | }, 95 | payload: buffer, 96 | is_broadcast: is_broadcast != 0, 97 | }) 98 | } 99 | 100 | async fn read_response( 101 | &mut self, 102 | _: &Self::Protocol, 103 | mut io: &mut T, 104 | ) -> io::Result 105 | where 106 | T: AsyncRead + Unpin + Send, 107 | { 108 | // Read the length. 109 | let length = match unsigned_varint::aio::read_usize(&mut io).await { 110 | Ok(l) => l, 111 | Err(unsigned_varint::io::ReadError::Io(err)) 112 | if matches!(err.kind(), io::ErrorKind::UnexpectedEof) => 113 | { 114 | return Ok(Err(())); 115 | } 116 | Err(err) => return Err(io::Error::new(io::ErrorKind::InvalidInput, err)), 117 | }; 118 | 119 | if length > usize::try_from(self.max_response_size).unwrap_or(usize::MAX) { 120 | return Err(io::Error::new( 121 | io::ErrorKind::InvalidInput, 122 | format!( 123 | "Response size exceeds limit: {} > {}", 124 | length, self.max_response_size 125 | ), 126 | )); 127 | } 128 | 129 | // Read the payload. 130 | let mut buffer = vec![0; length]; 131 | io.read_exact(&mut buffer).await?; 132 | Ok(Ok(buffer)) 133 | } 134 | 135 | async fn write_request( 136 | &mut self, 137 | _: &Self::Protocol, 138 | io: &mut T, 139 | req: Self::Request, 140 | ) -> io::Result<()> 141 | where 142 | T: AsyncWrite + Unpin + Send, 143 | { 144 | // Write message type 145 | { 146 | let mut buffer = unsigned_varint::encode::u8_buffer(); 147 | io.write_all(unsigned_varint::encode::u8( 148 | req.context.message_type as u8, 149 | &mut buffer, 150 | )) 151 | .await?; 152 | } 153 | 154 | // Write broadcast marker 155 | { 156 | let mut buffer = unsigned_varint::encode::u8_buffer(); 157 | io.write_all(unsigned_varint::encode::u8( 158 | req.is_broadcast as u8, 159 | &mut buffer, 160 | )) 161 | .await?; 162 | } 163 | 164 | // Write session_id 165 | { 166 | let mut buffer = unsigned_varint::encode::u64_buffer(); 167 | io.write_all(unsigned_varint::encode::u64( 168 | req.context.session_id, 169 | &mut buffer, 170 | )) 171 | .await?; 172 | } 173 | 174 | // Write protocol_id 175 | { 176 | let mut buffer = unsigned_varint::encode::u64_buffer(); 177 | io.write_all(unsigned_varint::encode::u64( 178 | req.context.protocol_id, 179 | &mut buffer, 180 | )) 181 | .await?; 182 | } 183 | 184 | // Write the length. 185 | { 186 | let mut buffer = unsigned_varint::encode::usize_buffer(); 187 | io.write_all(unsigned_varint::encode::usize( 188 | req.payload.len(), 189 | &mut buffer, 190 | )) 191 | .await?; 192 | } 193 | 194 | // Write the payload. 195 | io.write_all(&req.payload).await?; 196 | 197 | io.close().await?; 198 | Ok(()) 199 | } 200 | 201 | async fn write_response( 202 | &mut self, 203 | _: &Self::Protocol, 204 | io: &mut T, 205 | res: Self::Response, 206 | ) -> io::Result<()> 207 | where 208 | T: AsyncWrite + Unpin + Send, 209 | { 210 | // If `res` is an `Err`, we jump to closing the substream without writing anything on it. 211 | if let Ok(res) = res { 212 | // TODO: check the length? 213 | // Write the length. 214 | { 215 | let mut buffer = unsigned_varint::encode::usize_buffer(); 216 | io.write_all(unsigned_varint::encode::usize(res.len(), &mut buffer)) 217 | .await?; 218 | } 219 | 220 | // Write the payload. 221 | io.write_all(&res).await?; 222 | } 223 | 224 | io.close().await?; 225 | Ok(()) 226 | } 227 | } 228 | -------------------------------------------------------------------------------- /node/network/src/service.rs: -------------------------------------------------------------------------------- 1 | use crate::broadcast::IfDisconnected; 2 | use crate::error::Error; 3 | use crate::{ 4 | behaviour::{Behaviour, BehaviourOut}, 5 | broadcast, MessageContext, NodeKeyConfig, RoomId, 6 | }; 7 | use async_std::channel::{unbounded, Receiver, Sender}; 8 | use futures::channel::mpsc; 9 | use futures::select; 10 | use futures_util::stream::StreamExt; 11 | use libp2p::core::transport::upgrade; 12 | use libp2p::noise::NoiseConfig; 13 | use libp2p::swarm::SwarmEvent; 14 | use libp2p::tcp::TcpConfig; 15 | use libp2p::{mplex, noise, PeerId, Swarm, Transport}; 16 | use log::{info, warn}; 17 | use std::borrow::Cow; 18 | 19 | /// Events emitted by this Service. 20 | #[allow(clippy::large_enum_variant)] 21 | #[derive(Debug)] 22 | pub enum NetworkEvent { 23 | BroadcastMessage(PeerId, Cow<'static, str>), 24 | } 25 | 26 | /// Messages into the service to handle. 27 | #[derive(Debug)] 28 | pub enum NetworkMessage { 29 | RequestResponse { 30 | room_id: RoomId, 31 | context: MessageContext, 32 | message: MessageRouting, 33 | }, 34 | } 35 | 36 | #[derive(Debug)] 37 | pub enum MessageRouting { 38 | Broadcast( 39 | Vec, 40 | Option), broadcast::RequestFailure>>>, 41 | ), 42 | Multicast( 43 | Vec, 44 | Vec, 45 | Option), broadcast::RequestFailure>>>, 46 | ), 47 | SendDirect( 48 | PeerId, 49 | Vec, 50 | mpsc::Sender), broadcast::RequestFailure>>, 51 | ), 52 | } 53 | 54 | /// The Libp2pService listens to events from the Libp2p swarm. 55 | pub struct NetworkWorker { 56 | swarm: Swarm, 57 | from_service: Receiver, 58 | local_peer_id: PeerId, 59 | } 60 | 61 | #[derive(Clone)] 62 | pub struct NetworkService { 63 | /// Local copy of the `PeerId` of the local node. 64 | local_peer_id: PeerId, 65 | /// Channel for sending requests to worker. 66 | to_worker: Sender, 67 | } 68 | 69 | impl NetworkWorker { 70 | pub fn new( 71 | node_key: NodeKeyConfig, 72 | params: crate::Params, 73 | ) -> Result<(NetworkWorker, NetworkService), Error> { 74 | let keypair = node_key.into_keypair().map_err(|e| Error::Io(e))?; 75 | let local_peer_id = PeerId::from(keypair.public()); 76 | info!( 77 | target: "sub-libp2p", 78 | "🏷 Local node identity is: {}", 79 | local_peer_id.to_base58(), 80 | ); 81 | 82 | let transport = { 83 | let dh_keys = noise::Keypair::::new() 84 | .into_authentic(&keypair) 85 | .expect("Noise key generation failed"); 86 | 87 | TcpConfig::new() 88 | .upgrade(upgrade::Version::V1) 89 | .authenticate(NoiseConfig::xx(dh_keys).into_authenticated()) 90 | .multiplex(mplex::MplexConfig::new()) 91 | .boxed() 92 | }; 93 | 94 | let mut broadcast_protocols = vec![]; 95 | 96 | for rc in params.rooms.clone() { 97 | let protocol_id = Cow::Owned(rc.id.as_protocol_id().to_string()); 98 | let proto_cfg = broadcast::ProtocolConfig::new(protocol_id, rc.inbound_queue); 99 | 100 | broadcast_protocols.push(proto_cfg); 101 | } 102 | 103 | let behaviour = { 104 | match Behaviour::new(&keypair, broadcast_protocols, params.clone()) { 105 | Ok(b) => b, 106 | Err(broadcast::RegisterError::DuplicateProtocol(proto)) => { 107 | return Err(Error::DuplicateBroadcastProtocol { protocol: proto }); 108 | } 109 | } 110 | }; 111 | 112 | let mut swarm = Swarm::new(transport, behaviour, local_peer_id); 113 | 114 | // Listen on the addresses. 115 | if let Err(err) = swarm.listen_on(params.listen_address) { 116 | warn!(target: "sub-libp2p", "Can't listen on 'listen_address' because: {:?}", err) 117 | } 118 | 119 | let (network_sender_in, network_receiver_in) = unbounded(); 120 | 121 | let worker = NetworkWorker { 122 | local_peer_id, 123 | swarm, 124 | from_service: network_receiver_in, 125 | }; 126 | 127 | let service = NetworkService { 128 | local_peer_id, 129 | to_worker: network_sender_in, 130 | }; 131 | 132 | Ok((worker, service)) 133 | } 134 | 135 | /// Starts the libp2p service networking stack. 136 | pub async fn run(mut self) { 137 | // Bootstrap with Kademlia 138 | if let Err(e) = self.swarm.behaviour_mut().bootstrap() { 139 | warn!("Failed to bootstrap with Kademlia: {}", e); 140 | } 141 | 142 | let mut swarm_stream = self.swarm.fuse(); 143 | let mut network_stream = self.from_service.fuse(); 144 | 145 | loop { 146 | select! { 147 | swarm_event = swarm_stream.next() => match swarm_event { 148 | // Outbound events 149 | Some(event) => match event { 150 | SwarmEvent::Behaviour(BehaviourOut::InboundMessage{peer, protocol}) => { 151 | info!("Inbound message from {:?} related to {:?} protocol", peer, protocol); 152 | }, 153 | SwarmEvent::NewListenAddr { address, .. } => info!("Listening on {:?}", address), 154 | SwarmEvent::ConnectionEstablished { peer_id: _, .. } => { 155 | 156 | }, 157 | SwarmEvent::ConnectionClosed { peer_id: _, .. } => { } 158 | _ => continue 159 | } 160 | None => { break; } 161 | }, 162 | rpc_message = network_stream.next() => match rpc_message { 163 | // Inbound requests 164 | Some(request) => { 165 | let behaviour = swarm_stream.get_mut().behaviour_mut(); 166 | 167 | match request { 168 | NetworkMessage::RequestResponse { 169 | room_id, 170 | context, 171 | message 172 | } => { 173 | 174 | match message { 175 | MessageRouting::Broadcast(payload, response_sender) => { 176 | behaviour.broadcast_message( 177 | behaviour.peers(room_id), 178 | payload, 179 | room_id, 180 | context, 181 | response_sender, 182 | IfDisconnected::TryConnect, 183 | ) 184 | } 185 | MessageRouting::Multicast(peer_ids, payload, response_sender) => { 186 | behaviour.broadcast_message( 187 | peer_ids.into_iter(), 188 | payload, 189 | room_id, 190 | context, 191 | response_sender, 192 | IfDisconnected::TryConnect, 193 | ) 194 | } 195 | MessageRouting::SendDirect(peer_id, payload, response_sender) => { 196 | behaviour.send_message( 197 | &peer_id, 198 | payload, 199 | room_id, 200 | context, 201 | response_sender, 202 | IfDisconnected::TryConnect, 203 | ) 204 | } 205 | } 206 | } 207 | } 208 | } 209 | None => { break; } 210 | } 211 | }; 212 | } 213 | } 214 | } 215 | 216 | impl NetworkService { 217 | pub async fn broadcast_message( 218 | &self, 219 | room_id: &RoomId, 220 | context: MessageContext, 221 | payload: Vec, 222 | response_sender: Option), broadcast::RequestFailure>>>, 223 | ) { 224 | self.to_worker 225 | .send(NetworkMessage::RequestResponse { 226 | room_id: room_id.clone(), 227 | context, 228 | message: MessageRouting::Broadcast(payload, response_sender), 229 | }) 230 | .await 231 | .expect("expected worker worker channel to not be full"); 232 | } 233 | 234 | pub async fn broadcast_message_owned( 235 | self, 236 | room_id: RoomId, 237 | context: MessageContext, 238 | payload: Vec, 239 | response_sender: Option), broadcast::RequestFailure>>>, 240 | ) { 241 | self.to_worker 242 | .send(NetworkMessage::RequestResponse { 243 | room_id, 244 | context, 245 | message: MessageRouting::Broadcast(payload, response_sender), 246 | }) 247 | .await 248 | .expect("expected worker worker channel to not be full"); 249 | } 250 | 251 | pub async fn multicast_message( 252 | &self, 253 | room_id: &RoomId, 254 | peer_ids: impl Iterator, 255 | context: MessageContext, 256 | payload: Vec, 257 | response_sender: Option), broadcast::RequestFailure>>>, 258 | ) { 259 | self.to_worker 260 | .send(NetworkMessage::RequestResponse { 261 | room_id: room_id.clone(), 262 | context, 263 | message: MessageRouting::Multicast(peer_ids.collect(), payload, response_sender), 264 | }) 265 | .await 266 | .expect("expected worker worker channel to not be full"); 267 | } 268 | 269 | pub async fn multicast_message_owned( 270 | self, 271 | room_id: RoomId, 272 | peer_ids: impl Iterator, 273 | context: MessageContext, 274 | payload: Vec, 275 | response_sender: Option), broadcast::RequestFailure>>>, 276 | ) { 277 | self.to_worker 278 | .send(NetworkMessage::RequestResponse { 279 | room_id, 280 | context, 281 | message: MessageRouting::Multicast(peer_ids.collect(), payload, response_sender), 282 | }) 283 | .await 284 | .expect("expected worker worker channel to not be full"); 285 | } 286 | 287 | pub async fn send_message( 288 | &self, 289 | room_id: &RoomId, 290 | peer_id: PeerId, 291 | context: MessageContext, 292 | payload: Vec, 293 | response_sender: mpsc::Sender), broadcast::RequestFailure>>, 294 | ) { 295 | self.to_worker 296 | .send(NetworkMessage::RequestResponse { 297 | room_id: room_id.clone(), 298 | context, 299 | message: MessageRouting::SendDirect(peer_id, payload, response_sender), 300 | }) 301 | .await 302 | .expect("expected worker worker channel to not be full"); 303 | } 304 | 305 | pub async fn send_message_owned( 306 | self, 307 | room_id: RoomId, 308 | peer_id: PeerId, 309 | context: MessageContext, 310 | payload: Vec, 311 | response_sender: mpsc::Sender), broadcast::RequestFailure>>, 312 | ) { 313 | self.to_worker 314 | .send(NetworkMessage::RequestResponse { 315 | room_id, 316 | context, 317 | message: MessageRouting::SendDirect(peer_id, payload, response_sender), 318 | }) 319 | .await 320 | .expect("expected worker worker channel to not be full"); 321 | } 322 | 323 | pub fn local_peer_id(&self) -> PeerId { 324 | self.local_peer_id.clone() 325 | } 326 | } 327 | -------------------------------------------------------------------------------- /node/rpc-api/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "mpc-api" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 7 | 8 | [dependencies] 9 | log = "0.4" 10 | anyhow = "1.0.56" 11 | futures = { version = "0.3.2" } 12 | futures-util = "0.3" 13 | tokio = { version = "1" } 14 | async-std = { version = "1.10.0", features = ["attributes", "tokio1"] } 15 | async-trait = "0.1" 16 | libp2p = "0.40.0" 17 | serde = {version = "1.0", features = ["derive"] } 18 | serde_json = "1" 19 | serde_ipld_dagcbor = "0.1.2" 20 | round-based = { version = "0.1.4" } 21 | curv-kzen = "0.9" 22 | multi-party-ecdsa = {git = "https://github.com/ZenGo-X/multi-party-ecdsa"} 23 | pretty_env_logger = "0.4.0" 24 | unsigned-varint = { version = "0.6.0"} 25 | 26 | mpc-p2p = { path = "../network" } 27 | mpc-rpc = { path = "../rpc" } 28 | mpc-runtime = { path = "../runtime" } 29 | mpc-tss = { path = "../../tss" } 30 | 31 | [dev-dependencies] 32 | round-based = { version = "0.1.4", features = ["dev"] } 33 | -------------------------------------------------------------------------------- /node/rpc-api/src/lib.rs: -------------------------------------------------------------------------------- 1 | use async_std::task; 2 | use curv::elliptic::curves::{Point, Secp256k1}; 3 | use futures::channel::oneshot; 4 | 5 | use futures::future::TryFutureExt; 6 | 7 | use mpc_p2p::RoomId; 8 | use mpc_rpc::{RpcError, RpcErrorCode, RpcFuture, RpcResult}; 9 | 10 | use serde::de::DeserializeOwned; 11 | 12 | use multi_party_ecdsa::protocols::multi_party_ecdsa::gg_2020::party_i::SignatureRecid; 13 | use std::fmt::{Debug, Display}; 14 | use std::future::Future; 15 | use std::io::{BufWriter, Write}; 16 | use std::marker::PhantomData; 17 | use std::pin::Pin; 18 | use std::task::{Context, Poll}; 19 | 20 | pub struct RpcApi { 21 | rt_service: mpc_runtime::RuntimeService, 22 | } 23 | 24 | impl RpcApi { 25 | pub fn new(rt_service: mpc_runtime::RuntimeService) -> Self { 26 | Self { rt_service } 27 | } 28 | } 29 | 30 | impl mpc_rpc::JsonRPCHandler for RpcApi { 31 | fn keygen(&self, room: String, n: u16, t: u16) -> RpcFuture>> { 32 | let mut rt_service = self.rt_service.clone(); 33 | 34 | let (tx, rx) = oneshot::channel(); 35 | 36 | let mut io = BufWriter::new(vec![]); 37 | let mut buffer = unsigned_varint::encode::u16_buffer(); 38 | let _ = io.write_all(unsigned_varint::encode::u16(t, &mut buffer)); 39 | 40 | task::spawn(async move { 41 | rt_service 42 | .request_computation(RoomId::from(room), n, 0, io.buffer().to_vec(), tx) 43 | .await; 44 | }); 45 | 46 | AsyncResult::new_boxed(rx) 47 | } 48 | 49 | fn sign(&self, room: String, t: u16, msg: Vec) -> RpcFuture> { 50 | let mut rt_service = self.rt_service.clone(); 51 | 52 | let (tx, rx) = oneshot::channel(); 53 | task::spawn(async move { 54 | rt_service 55 | .request_computation(RoomId::from(room), t + 1, 1, msg, tx) 56 | .await; 57 | }); 58 | 59 | AsyncResult::new_boxed(rx) 60 | } 61 | } 62 | 63 | #[derive(Debug)] 64 | struct AsyncResult { 65 | rx: oneshot::Receiver, E>>, 66 | v: PhantomData, 67 | } 68 | 69 | impl Future for AsyncResult { 70 | type Output = RpcResult; 71 | 72 | fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { 73 | return match self.rx.try_recv() { 74 | Ok(Some(Ok(value))) => Poll::Ready(serde_ipld_dagcbor::from_slice(&*value).map_err( 75 | |e| RpcError { 76 | code: RpcErrorCode::InternalError, 77 | message: format!( 78 | "computation finished successfully but resulted an unexpected output: {e}" 79 | ), 80 | data: None, 81 | }, 82 | )), 83 | Ok(Some(Err(e))) => Poll::Ready(Err(RpcError { 84 | code: RpcErrorCode::InternalError, 85 | message: format!("computation terminated with err: {e}"), 86 | data: None, 87 | })), 88 | Ok(None) => { 89 | cx.waker().wake_by_ref(); 90 | Poll::Pending 91 | } 92 | Err(e) => Poll::Ready(Err(RpcError { 93 | code: RpcErrorCode::InternalError, 94 | message: format!("computation terminated with err: {e}"), 95 | data: None, 96 | })), 97 | }; 98 | } 99 | } 100 | 101 | impl AsyncResult { 102 | pub fn new_boxed(rx: oneshot::Receiver, E>>) -> Pin> { 103 | Box::pin(Self { 104 | rx, 105 | v: Default::default(), 106 | }) 107 | } 108 | } 109 | -------------------------------------------------------------------------------- /node/rpc/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "mpc-rpc" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 7 | 8 | [dependencies] 9 | anyhow = "1" 10 | async-std = "1.11.0" 11 | futures = { version = "0.3" } 12 | jsonrpc-core = "18" 13 | jsonrpc-derive = "18" 14 | jsonrpc-ws-server = "18" 15 | jsonrpc-core-client = { version = "18", features = ["ws"] } 16 | curv-kzen = "0.9" 17 | multi-party-ecdsa = {git = "https://github.com/ZenGo-X/multi-party-ecdsa"} 18 | -------------------------------------------------------------------------------- /node/rpc/src/handler.rs: -------------------------------------------------------------------------------- 1 | use crate::RpcResult; 2 | use anyhow::anyhow; 3 | use curv::elliptic::curves::{Point, Secp256k1}; 4 | use jsonrpc_core::BoxFuture; 5 | use jsonrpc_core_client::transports::ws; 6 | use jsonrpc_derive::rpc; 7 | use multi_party_ecdsa::protocols::multi_party_ecdsa::gg_2020::party_i::SignatureRecid; 8 | 9 | #[rpc] 10 | pub trait JsonRPCHandler { 11 | #[rpc(name = "keygen")] 12 | fn keygen(&self, room: String, n: u16, t: u16) -> BoxFuture>>; 13 | 14 | #[rpc(name = "sign")] 15 | fn sign(&self, room: String, t: u16, msg: Vec) -> BoxFuture>; 16 | } 17 | 18 | pub async fn new_client(url: String) -> Result { 19 | let cl = ws::connect::(&url.parse().unwrap()) 20 | .await 21 | .map_err(|e| anyhow!("node connection terminated w/ err: {:?}", e))?; 22 | 23 | Ok(cl) 24 | } 25 | -------------------------------------------------------------------------------- /node/rpc/src/lib.rs: -------------------------------------------------------------------------------- 1 | mod handler; 2 | pub mod server; 3 | 4 | pub use handler::*; 5 | 6 | pub use jsonrpc_ws_server::jsonrpc_core::{ 7 | BoxFuture as RpcFuture, Error as RpcError, ErrorCode as RpcErrorCode, Result as RpcResult, 8 | }; 9 | -------------------------------------------------------------------------------- /node/rpc/src/server.rs: -------------------------------------------------------------------------------- 1 | use crate::JsonRPCHandler; 2 | use anyhow::anyhow; 3 | use jsonrpc_core::IoHandler; 4 | use jsonrpc_ws_server::{Server, ServerBuilder}; 5 | 6 | pub struct JsonRPCServer { 7 | server: Server, 8 | } 9 | 10 | impl JsonRPCServer { 11 | pub fn new(config: Config, handler: T) -> Result 12 | where 13 | T: JsonRPCHandler, 14 | { 15 | let mut io = IoHandler::new(); 16 | io.extend_with(handler.to_delegate()); 17 | 18 | let server = ServerBuilder::new(io) 19 | .start(&config.host_address.parse().unwrap()) 20 | .map_err(|e| anyhow!("json rpc server start terminated with err: {:?}", e))?; 21 | 22 | Ok(Self { server }) 23 | } 24 | 25 | pub async fn run(self) -> Result<(), anyhow::Error> { 26 | self.server 27 | .wait() 28 | .map_err(|e| anyhow!("running json rpc server terminated with err: {:?}", e)) 29 | } 30 | } 31 | 32 | pub struct Config { 33 | pub host_address: String, 34 | } 35 | 36 | impl Default for Config { 37 | fn default() -> Self { 38 | Self { 39 | host_address: "127.0.0.1:8080".to_string(), 40 | } 41 | } 42 | } 43 | -------------------------------------------------------------------------------- /node/runtime/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "mpc-runtime" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 7 | 8 | [dependencies] 9 | log = "0.4" 10 | anyhow = "1.0.56" 11 | futures = "0.3.21" 12 | futures-util = "0.3" 13 | async-channel = "1.6.1" 14 | tokio = { version = "1", default-features = false, features = ["macros", "rt-multi-thread"] } 15 | async-std = { version = "1.10.0", features = ["attributes", "tokio1", "unstable"] } 16 | libp2p = "0.40.0" 17 | serde = {version = "1.0", features = ["derive"] } 18 | serde_json = "1" 19 | serde_ipld_dagcbor = "0.1.2" 20 | pretty_env_logger = "0.4.0" 21 | blake2 = "0.10" 22 | itertools = "0.10" 23 | async-trait = "0.1.53" 24 | unsigned-varint = { version = "0.6.0"} 25 | mpc-p2p = {path = "../network" } 26 | 27 | [dev-dependencies] 28 | round-based = { version = "0.1.4", features = ["dev"] } 29 | -------------------------------------------------------------------------------- /node/runtime/src/coordination.rs: -------------------------------------------------------------------------------- 1 | use crate::negotiation::{NegotiationChan, StartMsg}; 2 | use crate::network_proxy::ReceiverProxy; 3 | use crate::peerset::Peerset; 4 | use crate::{ComputeAgentAsync, PeersetMsg}; 5 | use async_std::stream; 6 | use async_std::stream::Interval; 7 | use futures::channel::{mpsc, oneshot}; 8 | use futures::Stream; 9 | use libp2p::PeerId; 10 | use log::info; 11 | use mpc_p2p::broadcast::OutgoingResponse; 12 | use mpc_p2p::{broadcast, MessageType, NetworkService, RoomId}; 13 | use std::future::Future; 14 | use std::pin::Pin; 15 | use std::task::{Context, Poll}; 16 | use std::time::Duration; 17 | 18 | pub(crate) struct Phase1Channel { 19 | id: RoomId, 20 | rx: Option>, 21 | on_local_rpc: oneshot::Receiver, 22 | service: NetworkService, 23 | } 24 | 25 | impl Phase1Channel { 26 | pub fn new( 27 | room_id: RoomId, 28 | room_rx: mpsc::Receiver, 29 | service: NetworkService, 30 | ) -> (Self, oneshot::Sender) { 31 | let (tx, rx) = oneshot::channel(); 32 | ( 33 | Self { 34 | id: room_id, 35 | rx: Some(room_rx), 36 | on_local_rpc: rx, 37 | service, 38 | }, 39 | tx, 40 | ) 41 | } 42 | } 43 | 44 | impl Future for Phase1Channel { 45 | type Output = Phase1Msg; 46 | 47 | fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { 48 | match self.rx.as_mut().unwrap().try_next() { 49 | Ok(Some(msg)) => match msg.context.message_type { 50 | MessageType::Coordination => { 51 | return Poll::Ready(Phase1Msg::FromRemote { 52 | peer_id: msg.peer_id, 53 | session_id: msg.context.session_id, 54 | protocol_id: msg.context.protocol_id, 55 | payload: msg.payload, 56 | response_tx: msg.pending_response, 57 | channel: Phase2Chan { 58 | id: self.id.clone(), 59 | rx: self.rx.take(), 60 | timeout: stream::interval(Duration::from_secs(15)), 61 | service: self.service.clone(), 62 | }, 63 | }); 64 | } 65 | MessageType::Computation => { 66 | panic!("unexpected message type") 67 | } 68 | }, 69 | _ => {} 70 | } 71 | 72 | if let Some(LocalRpcMsg { 73 | n, 74 | args, 75 | agent, 76 | on_done, 77 | }) = self.on_local_rpc.try_recv().unwrap() 78 | { 79 | return Poll::Ready(Phase1Msg::FromLocal { 80 | id: self.id.clone(), 81 | n, 82 | negotiation: NegotiationChan::new( 83 | self.id.clone(), 84 | self.rx.take().unwrap(), 85 | n, 86 | args, 87 | self.service.clone(), 88 | agent, 89 | on_done, 90 | ), 91 | }); 92 | } 93 | 94 | // Wake this task to be polled again. 95 | cx.waker().wake_by_ref(); 96 | Poll::Pending 97 | } 98 | } 99 | 100 | pub(crate) enum Phase1Msg { 101 | FromRemote { 102 | peer_id: PeerId, 103 | protocol_id: u64, 104 | session_id: u64, 105 | payload: Vec, // for negotiation and stuff 106 | response_tx: oneshot::Sender, // respond if negotiation is fine 107 | channel: Phase2Chan, // listens after we respond 108 | }, 109 | FromLocal { 110 | id: RoomId, 111 | n: u16, 112 | negotiation: NegotiationChan, 113 | }, 114 | } 115 | 116 | pub(crate) struct Phase2Chan { 117 | id: RoomId, 118 | rx: Option>, 119 | timeout: Interval, 120 | service: NetworkService, 121 | } 122 | 123 | impl Phase2Chan { 124 | pub fn abort(mut self) -> (RoomId, Phase1Channel, oneshot::Sender) { 125 | let (ch, tx) = Phase1Channel::new(self.id.clone(), self.rx.take().unwrap(), self.service); 126 | return (self.id, ch, tx); 127 | } 128 | } 129 | 130 | impl Future for Phase2Chan { 131 | type Output = Phase2Msg; 132 | 133 | fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { 134 | match self.rx.as_mut().unwrap().try_next() { 135 | Ok(Some(msg)) => match msg.context.message_type { 136 | MessageType::Coordination => { 137 | let (start_msg, peerset_rx) = 138 | match StartMsg::from_bytes(&*msg.payload, self.service.local_peer_id()) { 139 | Ok(res) => res, 140 | Err(_) => { 141 | let (ch, tx) = Phase1Channel::new( 142 | self.id.clone(), 143 | self.rx.take().unwrap(), 144 | self.service.clone(), 145 | ); 146 | return Poll::Ready(Phase2Msg::Abort(self.id.clone(), ch, tx)); 147 | } 148 | }; 149 | let parties = start_msg.parties; // todo: check with cache 150 | let (proxy, rx) = ReceiverProxy::new( 151 | self.id.clone(), 152 | self.rx.take().unwrap(), 153 | self.service.clone(), 154 | parties.clone(), 155 | ); 156 | return Poll::Ready(Phase2Msg::Start { 157 | room_id: self.id.clone(), 158 | room_receiver: rx, 159 | receiver_proxy: proxy, 160 | parties, 161 | peerset_rx, 162 | init_body: start_msg.body, 163 | }); 164 | } 165 | MessageType::Computation => { 166 | panic!("unexpected message type") 167 | } 168 | }, 169 | _ => {} 170 | } 171 | 172 | // Remote peer gone offline or refused taking in us in set - returning to Phase 1 173 | if let Poll::Ready(Some(_)) = Stream::poll_next(Pin::new(&mut self.timeout), cx) { 174 | let (ch, tx) = Phase1Channel::new( 175 | self.id.clone(), 176 | self.rx.take().unwrap(), 177 | self.service.clone(), 178 | ); 179 | return Poll::Ready(Phase2Msg::Abort(self.id.clone(), ch, tx)); 180 | } 181 | 182 | // Wake this task to be polled again. 183 | cx.waker().wake_by_ref(); 184 | Poll::Pending 185 | } 186 | } 187 | 188 | pub(crate) enum Phase2Msg { 189 | Start { 190 | room_id: RoomId, 191 | room_receiver: mpsc::Receiver, 192 | receiver_proxy: ReceiverProxy, 193 | parties: Peerset, 194 | peerset_rx: mpsc::Receiver, 195 | init_body: Vec, 196 | }, 197 | Abort(RoomId, Phase1Channel, oneshot::Sender), 198 | } 199 | 200 | pub(crate) struct LocalRpcMsg { 201 | pub n: u16, 202 | pub args: Vec, 203 | pub agent: Box, 204 | pub on_done: oneshot::Sender>>, 205 | } 206 | -------------------------------------------------------------------------------- /node/runtime/src/echo.rs: -------------------------------------------------------------------------------- 1 | use blake2::{Blake2s256, Digest}; 2 | use futures::channel::{mpsc, oneshot}; 3 | use futures_util::{pin_mut, select, FutureExt, SinkExt, StreamExt}; 4 | use libp2p::PeerId; 5 | use mpc_p2p::broadcast; 6 | use std::cmp::Ordering; 7 | use std::collections::BinaryHeap; 8 | use std::future::Future; 9 | use std::io::Write; 10 | 11 | pub(crate) struct EchoGadget { 12 | r: u16, 13 | n: usize, 14 | msgs: BinaryHeap, 15 | rx: mpsc::Receiver, 16 | } 17 | 18 | impl EchoGadget { 19 | pub fn new(n: usize) -> (Self, mpsc::Sender) { 20 | let (tx, rx) = mpsc::channel(n); 21 | 22 | let gadget = EchoGadget { 23 | r: 0, 24 | n, 25 | msgs: Default::default(), 26 | rx, 27 | }; 28 | 29 | (gadget, tx) 30 | } 31 | 32 | pub async fn wrap_execution( 33 | mut self, 34 | computation_fut: impl Future>, 35 | ) -> crate::Result<()> { 36 | let mut echo = Box::pin(self.proceed_round().fuse()); 37 | let future = computation_fut.fuse(); 38 | pin_mut!(future); 39 | 40 | loop { 41 | select! { 42 | echo_res = echo => match echo_res { 43 | Ok(s) => { 44 | echo = Box::pin(s.proceed_round().fuse()); 45 | }, 46 | Err(e) => { 47 | return Err(e); // TODO: forgot to notify agent about error 48 | } 49 | }, 50 | _comp_res = future => { 51 | return Ok(()); 52 | } 53 | } 54 | } 55 | } 56 | 57 | async fn proceed_round(&mut self) -> crate::Result<&mut Self> { 58 | loop { 59 | let msg = self.rx.select_next_some().await; 60 | self.msgs.push(msg); 61 | if self.msgs.len() == self.n { 62 | break; 63 | } 64 | } 65 | 66 | let mut hasher = Blake2s256::new(); 67 | let mut incoming_acks = vec![]; 68 | let mut outgoing_resp_rx = None; 69 | 70 | while let Some(echo_msg) = self.msgs.pop() { 71 | let _ = hasher.write(&*echo_msg.payload); 72 | match echo_msg.response { 73 | EchoResponse::Incoming(tx) => incoming_acks.push(tx), 74 | EchoResponse::Outgoing(resp_rx) => { 75 | let _ = outgoing_resp_rx.insert(resp_rx); 76 | } 77 | } 78 | } 79 | 80 | let mut outgoing_resp_rx = outgoing_resp_rx.expect("outgoing message was expected"); 81 | 82 | let echo_hash = hasher.finalize().to_vec(); 83 | for tx in incoming_acks.into_iter() { 84 | tx.send(broadcast::OutgoingResponse { 85 | result: Ok(echo_hash.clone()), 86 | sent_feedback: None, 87 | }) 88 | .expect("expected to be able to send acknowledgment with echoing module"); 89 | } 90 | 91 | let mut echo_hashes = vec![]; 92 | 93 | loop { 94 | echo_hashes.push(outgoing_resp_rx.select_next_some().await); 95 | 96 | if echo_hashes.len() == self.n - 1 { 97 | break; // todo: add timeout handling 98 | } 99 | } 100 | 101 | // there's a stupid bug below, todo: this index is not peer_index 102 | for (index, remote_echo) in echo_hashes.into_iter().enumerate() { 103 | match remote_echo { 104 | Ok((_peer_id, hash)) => { 105 | if hash != echo_hash { 106 | return Err(crate::Error::InconsistentEcho(index as u16)); 107 | } 108 | } 109 | Err(e) => { 110 | return Err(crate::Error::EchoFailed(e)); 111 | } 112 | } 113 | } 114 | 115 | self.r += 1; 116 | 117 | return Ok(self); 118 | } 119 | } 120 | 121 | pub(crate) struct EchoMessage { 122 | pub sender: u16, 123 | pub payload: Vec, 124 | pub response: EchoResponse, 125 | } 126 | 127 | impl Eq for EchoMessage {} 128 | 129 | impl PartialEq for EchoMessage { 130 | fn eq(&self, other: &Self) -> bool { 131 | self.sender == other.sender 132 | } 133 | } 134 | 135 | impl PartialOrd for EchoMessage { 136 | fn partial_cmp(&self, other: &Self) -> Option { 137 | Some(self.sender.cmp(&other.sender)) 138 | } 139 | } 140 | 141 | impl Ord for EchoMessage { 142 | fn cmp(&self, other: &Self) -> Ordering { 143 | self.sender.cmp(&other.sender) 144 | } 145 | } 146 | 147 | pub(crate) enum EchoResponse { 148 | Incoming(oneshot::Sender), 149 | Outgoing(mpsc::Receiver), broadcast::RequestFailure>>), 150 | } 151 | -------------------------------------------------------------------------------- /node/runtime/src/error.rs: -------------------------------------------------------------------------------- 1 | use mpc_p2p::broadcast::RequestFailure; 2 | use std::fmt::{Display, Formatter}; 3 | 4 | pub enum Error { 5 | Busy, 6 | InconsistentEcho(u16), 7 | EchoFailed(RequestFailure), 8 | UnknownProtocol(u64), 9 | InternalError(anyhow::Error), 10 | } 11 | 12 | pub type Result = std::result::Result; 13 | 14 | impl Display for Error { 15 | fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { 16 | match self { 17 | Error::Busy => write!(f, "protocol is already in computation"), 18 | Error::InconsistentEcho(i) => { 19 | write!(f, "inconsistent echo broadcast caused by party: {i}") 20 | } 21 | Error::EchoFailed(e) => write!(f, "echo broadcast terminated with error: {e}"), 22 | Error::UnknownProtocol(protocol_id) => { 23 | write!(f, "unknown protocol with id: {protocol_id}") 24 | } 25 | Error::InternalError(e) => { 26 | write!(f, "internal error occurred: {e}") 27 | } 28 | } 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /node/runtime/src/execution.rs: -------------------------------------------------------------------------------- 1 | use crate::echo::{EchoMessage, EchoResponse}; 2 | use crate::peerset::Peerset; 3 | use crate::{ComputeAgentAsync, MessageRouting, PeersetCacher, PeersetMsg, PersistentCacher}; 4 | use anyhow::anyhow; 5 | use async_std::task; 6 | use futures::channel::{mpsc, oneshot}; 7 | use futures::Stream; 8 | use futures_util::stream::FuturesOrdered; 9 | use futures_util::{FutureExt, StreamExt}; 10 | use libp2p::PeerId; 11 | use log::{error, info, warn}; 12 | use mpc_p2p::broadcast::OutgoingResponse; 13 | use mpc_p2p::{broadcast, MessageContext, MessageType, NetworkService, RoomId}; 14 | 15 | use std::future::Future; 16 | use std::pin::Pin; 17 | use std::task::{Context, Poll}; 18 | 19 | pub(crate) struct ProtocolExecution { 20 | state: Option, 21 | } 22 | 23 | struct ProtocolExecState { 24 | room_id: RoomId, 25 | local_peer_id: PeerId, 26 | protocol_id: u64, 27 | session_id: u64, 28 | network_service: NetworkService, 29 | parties: Peerset, 30 | peerset_rx: mpsc::Receiver, 31 | from_network: mpsc::Receiver, 32 | to_protocol: async_channel::Sender, 33 | from_protocol: async_channel::Receiver, 34 | echo_tx: mpsc::Sender, 35 | agent_future: Pin>> + Send>>, 36 | pending_futures: FuturesOrdered + Send>>>, 37 | cacher: PersistentCacher, 38 | on_done: Option>>>, 39 | i: u16, 40 | n: u16, 41 | } 42 | 43 | impl ProtocolExecution { 44 | pub fn new( 45 | room_id: RoomId, 46 | args: Vec, 47 | agent: Box, 48 | network_service: NetworkService, 49 | parties: Peerset, 50 | peerset_rx: mpsc::Receiver, 51 | cacher: PersistentCacher, 52 | from_network: mpsc::Receiver, 53 | echo_tx: mpsc::Sender, 54 | on_done: Option>>>, 55 | ) -> Self { 56 | let n = parties.size() as u16; 57 | let i = parties.index_of(parties.local_peer_id()).unwrap(); 58 | let protocol_id = agent.protocol_id(); 59 | let (to_protocol, from_runtime) = async_channel::bounded((n - 1) as usize); 60 | let (to_runtime, from_protocol) = async_channel::bounded((n - 1) as usize); 61 | 62 | let agent_future = agent.compute(parties.clone(), args, from_runtime, to_runtime); 63 | 64 | Self { 65 | state: Some(ProtocolExecState { 66 | room_id, 67 | local_peer_id: network_service.local_peer_id(), 68 | protocol_id, 69 | session_id: 0, 70 | network_service, 71 | parties, 72 | peerset_rx, 73 | from_network, 74 | to_protocol, 75 | from_protocol, 76 | echo_tx, 77 | agent_future, 78 | pending_futures: FuturesOrdered::new(), 79 | cacher, 80 | on_done, 81 | i, 82 | n, 83 | }), 84 | } 85 | } 86 | } 87 | 88 | impl Future for ProtocolExecution { 89 | type Output = crate::Result<()>; 90 | 91 | fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { 92 | let ProtocolExecState { 93 | room_id, 94 | local_peer_id, 95 | protocol_id, 96 | session_id, 97 | network_service, 98 | parties, 99 | peerset_rx: mut from_peerset, 100 | mut from_network, 101 | to_protocol, 102 | mut from_protocol, 103 | mut echo_tx, 104 | mut agent_future, 105 | mut pending_futures, 106 | mut cacher, 107 | on_done, 108 | i, 109 | n, 110 | } = self.state.take().unwrap(); 111 | 112 | if let Poll::Ready(Some(message)) = Stream::poll_next(Pin::new(&mut from_peerset), cx) { 113 | match message { 114 | PeersetMsg::ReadFromCache(tx) => { 115 | let _ = tx.send(cacher.read_peerset(&room_id)); 116 | } 117 | PeersetMsg::WriteToCache(peerset, tx) => { 118 | let _ = tx.send(cacher.write_peerset(&room_id, peerset)); 119 | } 120 | } 121 | } 122 | 123 | if let Poll::Ready(Some(message)) = Stream::poll_next(Pin::new(&mut from_protocol), cx) { 124 | info!("outgoing message to {:?}", message.to); 125 | 126 | match message.to { 127 | MessageRouting::PointToPoint(remote_index) => { 128 | let (res_tx, mut res_rx) = mpsc::channel(1); 129 | 130 | pending_futures.push( 131 | network_service 132 | .clone() 133 | .send_message_owned( 134 | room_id.clone(), 135 | parties[remote_index - 1], 136 | MessageContext { 137 | message_type: MessageType::Computation, 138 | session_id, 139 | protocol_id, 140 | }, 141 | message.body, 142 | res_tx, 143 | ) 144 | .boxed(), 145 | ); 146 | 147 | // todo: handle in same Future::poll 148 | task::spawn(async move { 149 | if let Err(e) = res_rx.select_next_some().await { 150 | error!("party responded with error: {e}"); 151 | } 152 | }); 153 | 154 | if let Some(tx) = message.sent { 155 | let _ = tx.send(()); 156 | } 157 | } 158 | MessageRouting::Broadcast => { 159 | let (res_tx, res_rx) = mpsc::channel((n - 1) as usize); 160 | 161 | pending_futures.push( 162 | network_service 163 | .clone() 164 | .multicast_message_owned( 165 | room_id.clone(), 166 | parties.clone().remotes_iter(), 167 | MessageContext { 168 | message_type: MessageType::Coordination, 169 | session_id, 170 | protocol_id, 171 | }, 172 | message.body.clone(), 173 | Some(res_tx), 174 | ) 175 | .boxed(), 176 | ); 177 | 178 | echo_tx 179 | .try_send(EchoMessage { 180 | sender: i + 1, 181 | payload: message.body, 182 | response: EchoResponse::Outgoing(res_rx), 183 | }) 184 | .expect("echo channel is expected to be open"); 185 | } 186 | } 187 | } 188 | 189 | loop { 190 | if let Poll::Ready(None) = 191 | Stream::poll_next(Pin::new(&mut pending_futures).as_mut(), cx) 192 | { 193 | break; 194 | } 195 | } 196 | 197 | if let Poll::Ready(Some(message)) = Stream::poll_next(Pin::new(&mut from_network), cx) { 198 | info!("incoming message from {}", message.peer_id.to_base58()); 199 | 200 | if message.is_broadcast { 201 | echo_tx 202 | .try_send(EchoMessage { 203 | sender: message.peer_index + 1, 204 | payload: message.payload.clone(), 205 | response: EchoResponse::Incoming(message.pending_response), 206 | }) 207 | .expect("echo channel is expected to be open"); 208 | } else { 209 | if let Err(_) = message.pending_response.send(OutgoingResponse { 210 | result: Ok(vec![]), 211 | sent_feedback: None, 212 | }) { 213 | warn!("failed sending acknowledgement to remote"); 214 | } 215 | } 216 | 217 | to_protocol 218 | .try_send(crate::IncomingMessage { 219 | from: message.peer_index + 1, 220 | to: if message.is_broadcast { 221 | MessageRouting::Broadcast 222 | } else { 223 | MessageRouting::PointToPoint(i + 1) 224 | }, 225 | body: message.payload, 226 | }) 227 | .expect("application channel is expected to be open"); 228 | } 229 | 230 | match Future::poll(Pin::new(&mut agent_future), cx) { 231 | Poll::Ready(Ok(res)) => { 232 | if let Some(tx) = on_done { 233 | let _ = tx.send(Ok(res)); 234 | } 235 | Poll::Ready(Ok(())) 236 | } 237 | Poll::Ready(Err(e)) => { 238 | let err = anyhow!("{e}"); 239 | if let Some(tx) = on_done { 240 | let _ = tx.send(Err(e)); 241 | } 242 | Poll::Ready(Err(crate::Error::InternalError(err))) 243 | } 244 | Poll::Pending => { 245 | let _ = self.state.insert(ProtocolExecState { 246 | room_id, 247 | local_peer_id, 248 | protocol_id, 249 | session_id, 250 | network_service, 251 | parties, 252 | peerset_rx: from_peerset, 253 | from_network, 254 | to_protocol, 255 | from_protocol, 256 | echo_tx, 257 | agent_future, 258 | pending_futures, 259 | cacher, 260 | on_done, 261 | i, 262 | n, 263 | }); 264 | 265 | // Wake this task to be polled again. 266 | cx.waker().wake_by_ref(); 267 | Poll::Pending 268 | } 269 | } 270 | } 271 | } 272 | -------------------------------------------------------------------------------- /node/runtime/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![feature(associated_type_defaults)] 2 | #![feature(async_closure)] 3 | 4 | mod coordination; 5 | mod echo; 6 | mod error; 7 | mod execution; 8 | mod negotiation; 9 | mod network_proxy; 10 | mod peerset; 11 | mod peerset_cacher; 12 | mod runtime; 13 | mod traits; 14 | 15 | pub use error::*; 16 | pub use peerset::*; 17 | pub use peerset_cacher::*; 18 | pub use runtime::*; 19 | pub use traits::*; 20 | -------------------------------------------------------------------------------- /node/runtime/src/negotiation.rs: -------------------------------------------------------------------------------- 1 | use crate::coordination::{LocalRpcMsg, Phase1Channel}; 2 | use crate::network_proxy::ReceiverProxy; 3 | use crate::peerset::Peerset; 4 | use crate::{ComputeAgentAsync, PeersetMsg}; 5 | use async_std::stream; 6 | use async_std::stream::Interval; 7 | use futures::channel::{mpsc, oneshot}; 8 | use futures::Stream; 9 | use futures_util::stream::FuturesOrdered; 10 | use futures_util::FutureExt; 11 | use libp2p::PeerId; 12 | use log::info; 13 | use mpc_p2p::{broadcast, MessageContext, MessageType, NetworkService, RoomId}; 14 | use std::borrow::BorrowMut; 15 | use std::collections::HashSet; 16 | use std::future::Future; 17 | use std::io::{BufReader, BufWriter, Read, Write}; 18 | use std::pin::Pin; 19 | use std::task::{Context, Poll}; 20 | use std::time::Duration; 21 | use std::{io, iter}; 22 | 23 | pub(crate) struct NegotiationChan { 24 | rx: Option>, 25 | timeout: Interval, 26 | agent: Option>, 27 | state: Option, 28 | } 29 | 30 | struct NegotiationState { 31 | id: RoomId, 32 | n: u16, 33 | args: Vec, 34 | service: NetworkService, 35 | peers: HashSet, 36 | responses: Option), broadcast::RequestFailure>>>, 37 | pending_futures: FuturesOrdered + Send>>>, 38 | on_done: oneshot::Sender>>, 39 | } 40 | 41 | impl NegotiationChan { 42 | pub fn new( 43 | room_id: RoomId, 44 | room_rx: mpsc::Receiver, 45 | n: u16, 46 | args: Vec, 47 | service: NetworkService, 48 | agent: Box, 49 | on_done: oneshot::Sender>>, 50 | ) -> Self { 51 | let local_peer_id = service.local_peer_id(); 52 | Self { 53 | rx: Some(room_rx), 54 | timeout: stream::interval(Duration::from_secs(15)), 55 | agent: Some(agent), 56 | state: Some(NegotiationState { 57 | id: room_id, 58 | n, 59 | args, 60 | service, 61 | peers: iter::once(local_peer_id).collect(), 62 | responses: None, 63 | pending_futures: Default::default(), 64 | on_done, 65 | }), 66 | } 67 | } 68 | } 69 | 70 | impl Future for NegotiationChan { 71 | type Output = NegotiationMsg; 72 | 73 | fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { 74 | let NegotiationState { 75 | id, 76 | n, 77 | args, 78 | service, 79 | mut peers, 80 | mut responses, 81 | mut pending_futures, 82 | on_done, 83 | } = self.state.take().unwrap(); 84 | 85 | loop { 86 | if let Poll::Ready(None) = 87 | Stream::poll_next(Pin::new(&mut pending_futures).as_mut(), cx) 88 | { 89 | break; 90 | } 91 | } 92 | 93 | if let Some(rx) = responses.borrow_mut() { 94 | match rx.try_next() { 95 | Ok(Some(Ok((peer_id, _)))) => { 96 | peers.insert(peer_id); 97 | if peers.len() == n as usize { 98 | let agent = self.agent.take().unwrap(); 99 | let peers_iter = peers.clone().into_iter(); 100 | let (parties, peerset_rx) = 101 | Peerset::new(peers_iter, service.local_peer_id()); 102 | let start_msg = StartMsg { 103 | parties: parties.clone(), 104 | body: args.clone(), 105 | }; 106 | pending_futures.push( 107 | service 108 | .clone() 109 | .multicast_message_owned( 110 | id.clone(), 111 | peers.clone().into_iter(), 112 | MessageContext { 113 | message_type: MessageType::Coordination, 114 | session_id: agent.session_id().into(), 115 | protocol_id: agent.protocol_id(), 116 | }, 117 | start_msg.to_bytes().unwrap(), 118 | None, 119 | ) 120 | .boxed(), 121 | ); 122 | 123 | loop { 124 | if let Poll::Ready(None) = 125 | Stream::poll_next(Pin::new(&mut pending_futures).as_mut(), cx) 126 | { 127 | break; 128 | } 129 | } 130 | 131 | let (receiver_proxy, room_receiver) = ReceiverProxy::new( 132 | id.clone(), 133 | self.rx.take().unwrap(), 134 | service.clone(), 135 | parties.clone(), 136 | ); 137 | return Poll::Ready(NegotiationMsg::Start { 138 | agent, 139 | on_done, 140 | room_receiver, 141 | receiver_proxy, 142 | parties, 143 | peerset_rx, 144 | args, 145 | }); 146 | } 147 | } 148 | _ => {} 149 | } 150 | } else { 151 | let agent = self.agent.as_ref().unwrap(); 152 | let (tx, rx) = mpsc::channel((n - 1) as usize); 153 | pending_futures.push( 154 | service 155 | .clone() 156 | .broadcast_message_owned( 157 | id.clone(), 158 | MessageContext { 159 | message_type: MessageType::Coordination, 160 | session_id: agent.session_id(), 161 | protocol_id: agent.protocol_id(), 162 | }, 163 | vec![], 164 | Some(tx), 165 | ) 166 | .boxed(), 167 | ); 168 | let _ = responses.insert(rx); 169 | } 170 | 171 | // It took too long for peerset to be assembled - reset to Phase 1. 172 | if let Poll::Ready(Some(())) = Stream::poll_next(Pin::new(&mut self.timeout), cx) { 173 | let (ch, tx) = Phase1Channel::new(id.clone(), self.rx.take().unwrap(), service.clone()); 174 | return Poll::Ready(NegotiationMsg::Abort(id.clone(), ch, tx)); 175 | } 176 | 177 | let _ = self.state.insert(NegotiationState { 178 | id, 179 | n, 180 | args, 181 | service, 182 | peers, 183 | responses, 184 | pending_futures, 185 | on_done, 186 | }); 187 | 188 | // Wake this task to be polled again. 189 | cx.waker().wake_by_ref(); 190 | Poll::Pending 191 | } 192 | } 193 | 194 | pub(crate) enum NegotiationMsg { 195 | Start { 196 | agent: Box, 197 | on_done: oneshot::Sender>>, 198 | room_receiver: mpsc::Receiver, 199 | receiver_proxy: ReceiverProxy, 200 | parties: Peerset, 201 | peerset_rx: mpsc::Receiver, 202 | args: Vec, 203 | }, 204 | Abort(RoomId, Phase1Channel, oneshot::Sender), 205 | } 206 | 207 | pub(crate) struct StartMsg { 208 | pub parties: Peerset, 209 | pub body: Vec, 210 | } 211 | 212 | impl StartMsg { 213 | pub(crate) fn from_bytes( 214 | b: &[u8], 215 | local_peer_id: PeerId, 216 | ) -> io::Result<(Self, mpsc::Receiver)> { 217 | let mut io = BufReader::new(b); 218 | 219 | // Read the peerset payload length. 220 | let peerset_len = unsigned_varint::io::read_usize(&mut io) 221 | .map_err(|err| io::Error::new(io::ErrorKind::InvalidInput, err))?; 222 | 223 | let mut peerset_buffer = vec![0; peerset_len]; 224 | io.read_exact(&mut peerset_buffer)?; 225 | 226 | // Read the body payload length. 227 | let length = unsigned_varint::io::read_usize(&mut io) 228 | .map_err(|err| io::Error::new(io::ErrorKind::InvalidInput, err))?; 229 | 230 | // Read the init message body. 231 | let mut body = vec![0; length]; 232 | io.read_exact(&mut body)?; 233 | 234 | let (parties, rx) = Peerset::from_bytes(&*peerset_buffer, local_peer_id); 235 | Ok((Self { parties, body }, rx)) 236 | } 237 | 238 | fn to_bytes(self) -> io::Result> { 239 | let b = vec![]; 240 | let mut io = BufWriter::new(b); 241 | 242 | let peerset_bytes = self.parties.to_bytes(); 243 | 244 | // Write the peerset payload size. 245 | { 246 | let mut buffer = unsigned_varint::encode::usize_buffer(); 247 | io.write_all(unsigned_varint::encode::usize( 248 | peerset_bytes.len(), 249 | &mut buffer, 250 | ))?; 251 | } 252 | 253 | io.write_all(&*peerset_bytes)?; 254 | 255 | // Write the body payload length. 256 | { 257 | let mut buffer = unsigned_varint::encode::usize_buffer(); 258 | io.write_all(unsigned_varint::encode::usize(self.body.len(), &mut buffer))?; 259 | } 260 | 261 | // Write the init message. 262 | io.write_all(&self.body)?; 263 | 264 | Ok(io.buffer().to_vec()) 265 | } 266 | } 267 | 268 | #[cfg(test)] 269 | mod tests { 270 | use crate::negotiation::StartMsg; 271 | use crate::peerset::Peerset; 272 | use libp2p::PeerId; 273 | use std::str::FromStr; 274 | 275 | #[test] 276 | fn start_msg_encoding() { 277 | let peer_ids = vec![ 278 | PeerId::from_str("12D3KooWMQmcJA5raTtuxqAguM5CiXRhEDumLNmZQ7PmKZizjFBX").unwrap(), 279 | PeerId::from_str("12D3KooWS4jk2BXKgyqygNEZScHSzntTKQCdHYiHRrZXiNE9mNHi").unwrap(), 280 | PeerId::from_str("12D3KooWHYG3YsVs9hTwbgPKVrTrPQBKc8FnDhV6bsJ4W37eds8p").unwrap(), 281 | ]; 282 | let local_peer_id = peer_ids[0]; 283 | let (mut peerset, _) = Peerset::new(peer_ids.into_iter(), local_peer_id); 284 | peerset.parties_indexes = vec![1, 2]; 285 | let start_msg = StartMsg { 286 | parties: peerset.clone(), 287 | body: vec![1, 2, 3], 288 | }; 289 | let encoded = StartMsg { 290 | parties: peerset, 291 | body: vec![1, 2, 3], 292 | } 293 | .to_bytes() 294 | .unwrap(); 295 | let (decoded, _) = StartMsg::from_bytes(&*encoded, local_peer_id).unwrap(); 296 | 297 | println!( 298 | "original: {:?}, {:?}", 299 | start_msg.parties.parties_indexes, 300 | start_msg.parties.clone().remotes_iter().collect::>() 301 | ); 302 | println!( 303 | "decoded: {:?}, {:?}", 304 | decoded.parties.parties_indexes, 305 | decoded.parties.clone().remotes_iter().collect::>() 306 | ); 307 | 308 | assert_eq!( 309 | start_msg.parties.parties_indexes, 310 | decoded.parties.parties_indexes 311 | ); 312 | } 313 | } 314 | -------------------------------------------------------------------------------- /node/runtime/src/network_proxy.rs: -------------------------------------------------------------------------------- 1 | use crate::coordination::{LocalRpcMsg, Phase1Channel}; 2 | use crate::peerset::Peerset; 3 | use futures::channel::{mpsc, oneshot}; 4 | use mpc_p2p::broadcast::IncomingMessage; 5 | use mpc_p2p::{broadcast, NetworkService, RoomId}; 6 | use std::future::Future; 7 | use std::pin::Pin; 8 | use std::task::{Context, Poll}; 9 | 10 | pub(crate) struct ReceiverProxy { 11 | id: RoomId, 12 | rx: Option>, 13 | tx: mpsc::Sender, 14 | service: NetworkService, 15 | parties: Peerset, 16 | } 17 | 18 | impl ReceiverProxy { 19 | pub fn new( 20 | room_id: RoomId, 21 | room_rx: mpsc::Receiver, 22 | service: NetworkService, 23 | parties: Peerset, 24 | ) -> (Self, mpsc::Receiver) { 25 | let (tx, rx) = mpsc::channel(parties.size() - 1); 26 | ( 27 | Self { 28 | id: room_id, 29 | rx: Some(room_rx), 30 | tx, 31 | service, 32 | parties, 33 | }, 34 | rx, 35 | ) 36 | } 37 | } 38 | 39 | impl Future for ReceiverProxy { 40 | type Output = (RoomId, Phase1Channel, oneshot::Sender); 41 | 42 | fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { 43 | if self.tx.is_closed() { 44 | let (ch, tx) = Phase1Channel::new( 45 | self.id.clone(), 46 | self.rx.take().unwrap(), 47 | self.service.clone(), 48 | ); 49 | return Poll::Ready((self.id.clone(), ch, tx)); 50 | } 51 | 52 | match self.rx.as_mut().unwrap().try_next() { 53 | Ok(Some(mut msg)) => match self.parties.index_of(&msg.peer_id) { 54 | Some(i) => { 55 | msg.peer_index = i; 56 | let _ = self.tx.try_send(msg); 57 | } 58 | None => { 59 | panic!("received message from unknown peer"); 60 | } 61 | }, 62 | _ => {} 63 | } 64 | 65 | // Wake this task to be polled again. 66 | cx.waker().wake_by_ref(); 67 | Poll::Pending 68 | } 69 | } 70 | -------------------------------------------------------------------------------- /node/runtime/src/peerset.rs: -------------------------------------------------------------------------------- 1 | use futures::channel::{mpsc, oneshot}; 2 | use futures_util::{SinkExt, StreamExt}; 3 | use itertools::Itertools; 4 | use libp2p::PeerId; 5 | use log::{info, warn}; 6 | 7 | use std::io::{BufReader, Read}; 8 | use std::ops::Index; 9 | 10 | #[derive(Clone)] 11 | pub struct Peerset { 12 | local_peer_id: PeerId, 13 | session_peers: Vec, 14 | pub parties_indexes: Vec, 15 | to_runtime: mpsc::Sender, 16 | } 17 | 18 | pub(crate) enum PeersetMsg { 19 | ReadFromCache(oneshot::Sender>), 20 | WriteToCache(Peerset, oneshot::Sender>), 21 | } 22 | 23 | impl Peerset { 24 | pub(crate) fn new( 25 | peers: impl Iterator, 26 | local_peer_id: PeerId, 27 | ) -> (Self, mpsc::Receiver) { 28 | let (tx, rx) = mpsc::channel(1); 29 | let peers: Vec<_> = peers.sorted_by_key(|p| p.to_bytes()).collect(); 30 | 31 | ( 32 | Self { 33 | local_peer_id, 34 | parties_indexes: (0..peers.len()).collect(), 35 | session_peers: peers, 36 | to_runtime: tx, 37 | }, 38 | rx, 39 | ) 40 | } 41 | 42 | pub(crate) fn from_bytes( 43 | bytes: &[u8], 44 | local_peer_id: PeerId, 45 | ) -> (Self, mpsc::Receiver) { 46 | let mut peers = vec![]; 47 | let mut active_indexes = vec![]; 48 | let mut reader = BufReader::new(bytes); 49 | 50 | loop { 51 | let mut buf = [0; 38]; 52 | if matches!(reader.read(&mut buf), Ok(n) if n == 38) { 53 | peers.push(PeerId::from_bytes(&buf).unwrap()) 54 | } else { 55 | break; 56 | } 57 | 58 | let mut buf = [0; 1]; 59 | reader.read(&mut buf).unwrap(); 60 | active_indexes.push(buf[0] as usize); 61 | } 62 | 63 | let peers: Vec<_> = peers.into_iter().sorted_by_key(|p| p.to_bytes()).collect(); 64 | 65 | let (tx, rx) = mpsc::channel(1); 66 | ( 67 | Self { 68 | local_peer_id, 69 | session_peers: peers, 70 | parties_indexes: active_indexes, 71 | to_runtime: tx, 72 | }, 73 | rx, 74 | ) 75 | } 76 | 77 | pub async fn recover_from_cache(&mut self) -> anyhow::Result<()> { 78 | let (tx, rx) = oneshot::channel(); 79 | let _ = self.to_runtime.send(PeersetMsg::ReadFromCache(tx)).await; 80 | let cache = rx.await.expect("runtime expected to serve protocol")?; 81 | let mut parties_indexes = vec![]; 82 | for peer_id in self.session_peers.iter().sorted_by_key(|p| p.to_bytes()) { 83 | match cache.index_of(peer_id) { 84 | Some(i) => { 85 | parties_indexes.push(cache.parties_indexes[i as usize]); 86 | } 87 | None => { 88 | warn!( 89 | "Peer {} does not appear in the peerset cache, skipping.", 90 | peer_id.to_base58() 91 | ) 92 | } 93 | } 94 | } 95 | 96 | self.parties_indexes = parties_indexes; 97 | Ok(()) 98 | } 99 | 100 | pub async fn save_to_cache(&mut self) -> anyhow::Result<()> { 101 | let (tx, rx) = oneshot::channel(); 102 | let _ = self 103 | .to_runtime 104 | .send(PeersetMsg::WriteToCache(self.clone(), tx)) 105 | .await; 106 | rx.await.expect("runtime expected to serve protocol") 107 | } 108 | 109 | pub fn index_of(&self, peer_id: &PeerId) -> Option { 110 | self.session_peers 111 | .iter() 112 | .position(|elem| *elem == *peer_id) 113 | .map(|i| i as u16) 114 | } 115 | 116 | pub fn size(&self) -> usize { 117 | self.session_peers.len() 118 | } 119 | 120 | pub fn to_bytes(&self) -> Vec { 121 | let mut buf = vec![]; 122 | 123 | for (i, peer_id) in self.session_peers.iter().enumerate() { 124 | buf.append(&mut peer_id.to_bytes()); 125 | buf.push(self.parties_indexes[i] as u8); 126 | } 127 | 128 | buf 129 | } 130 | 131 | pub fn remotes_iter(self) -> impl Iterator { 132 | self.session_peers 133 | .into_iter() 134 | .enumerate() 135 | .filter(move |(_i, p)| *p != self.local_peer_id) 136 | .map(|(_i, p)| p.clone()) 137 | } 138 | 139 | pub fn local_peer_id(&self) -> &PeerId { 140 | return &self.local_peer_id; 141 | } 142 | 143 | pub fn len(&self) -> usize { 144 | self.session_peers.len() 145 | } 146 | } 147 | 148 | impl Index for Peerset { 149 | type Output = PeerId; 150 | 151 | fn index(&self, index: u16) -> &Self::Output { 152 | &self.session_peers[index as usize] 153 | } 154 | } 155 | 156 | impl IntoIterator for Peerset { 157 | type Item = PeerId; 158 | type IntoIter = std::vec::IntoIter; 159 | 160 | fn into_iter(self) -> Self::IntoIter { 161 | self.session_peers.into_iter() 162 | } 163 | } 164 | 165 | #[cfg(test)] 166 | mod tests { 167 | use crate::peerset::Peerset; 168 | use libp2p::PeerId; 169 | use std::str::FromStr; 170 | 171 | #[test] 172 | fn peerset_encoding() { 173 | let peer_ids = vec![ 174 | PeerId::from_str("12D3KooWMQmcJA5raTtuxqAguM5CiXRhEDumLNmZQ7PmKZizjFBX").unwrap(), 175 | PeerId::from_str("12D3KooWHYG3YsVs9hTwbgPKVrTrPQBKc8FnDhV6bsJ4W37eds8p").unwrap(), 176 | ]; 177 | let local_peer_id = peer_ids[0]; 178 | let (mut peerset, _) = Peerset::new(peer_ids.into_iter(), local_peer_id); 179 | peerset.parties_indexes = vec![0, 2]; 180 | let encoded = peerset.to_bytes(); 181 | let (decoded, _) = Peerset::from_bytes(&*encoded, local_peer_id); 182 | 183 | println!( 184 | "original: {:?}, {:?}", 185 | peerset.parties_indexes, peerset.session_peers 186 | ); 187 | println!( 188 | "decoded: {:?}, {:?}", 189 | decoded.parties_indexes, decoded.session_peers 190 | ); 191 | 192 | assert_eq!(peerset.parties_indexes, decoded.parties_indexes); 193 | } 194 | } 195 | -------------------------------------------------------------------------------- /node/runtime/src/peerset_cacher.rs: -------------------------------------------------------------------------------- 1 | use crate::peerset::Peerset; 2 | use crate::PeersetCacher; 3 | use anyhow::anyhow; 4 | use async_std::path::{Path, PathBuf}; 5 | use libp2p::PeerId; 6 | use mpc_p2p::RoomId; 7 | use std::collections::HashMap; 8 | use std::fs; 9 | 10 | #[derive(Default)] 11 | pub struct EphemeralCacher { 12 | store: HashMap, 13 | } 14 | 15 | impl PeersetCacher for EphemeralCacher { 16 | fn read_peerset(&self, room_id: &RoomId) -> anyhow::Result { 17 | match self.store.get(room_id) { 18 | Some(p) => Ok(p.clone()), 19 | None => Err(anyhow!("no cache exists for room")), 20 | } 21 | } 22 | 23 | fn write_peerset(&mut self, room_id: &RoomId, peerset: Peerset) -> anyhow::Result<()> { 24 | self.store 25 | .entry(room_id.clone()) 26 | .and_modify(|e| *e = peerset.clone()) 27 | .or_insert(peerset); 28 | Ok(()) 29 | } 30 | } 31 | 32 | #[derive(Clone)] 33 | pub struct PersistentCacher { 34 | local_peer_id: PeerId, 35 | path: PathBuf, 36 | } 37 | 38 | impl PeersetCacher for PersistentCacher { 39 | fn read_peerset(&self, room_id: &RoomId) -> anyhow::Result { 40 | let buf = fs::read(self.path.join(room_id.as_str())) 41 | .map_err(|e| anyhow!("error reading peerset cache file: {e}"))?; 42 | 43 | let (peerset, _) = Peerset::from_bytes(&*buf, self.local_peer_id); 44 | 45 | Ok(peerset) 46 | } 47 | 48 | fn write_peerset(&mut self, room_id: &RoomId, peerset: Peerset) -> anyhow::Result<()> { 49 | let path = self.path.join(room_id.as_str()); 50 | let dir = path.parent().unwrap(); 51 | fs::create_dir_all(dir).unwrap(); 52 | fs::write(path, peerset.to_bytes()).map_err(|e| anyhow!("error writing to file: {e}"))?; 53 | 54 | Ok(()) 55 | } 56 | } 57 | 58 | impl PersistentCacher { 59 | pub fn new>(p: P, local_peer_id: PeerId) -> Self { 60 | Self { 61 | path: PathBuf::from(p.as_ref()), 62 | local_peer_id, 63 | } 64 | } 65 | } 66 | -------------------------------------------------------------------------------- /node/runtime/src/runtime.rs: -------------------------------------------------------------------------------- 1 | use crate::coordination::LocalRpcMsg; 2 | use crate::coordination::Phase2Msg; 3 | use crate::echo::EchoGadget; 4 | use crate::execution::ProtocolExecution; 5 | use crate::negotiation::NegotiationMsg; 6 | 7 | use crate::{coordination, PersistentCacher, ProtocolAgentFactory}; 8 | use anyhow::anyhow; 9 | use blake2::Digest; 10 | use futures::channel::{mpsc, oneshot}; 11 | use futures::StreamExt; 12 | use futures_util::stream::FuturesUnordered; 13 | use futures_util::{select, FutureExt, SinkExt}; 14 | use log::error; 15 | use mpc_p2p::broadcast::OutgoingResponse; 16 | use mpc_p2p::{broadcast, NetworkService, RoomId}; 17 | use std::collections::hash_map::Entry; 18 | use std::collections::HashMap; 19 | 20 | pub enum RuntimeMessage { 21 | RequestComputation { 22 | room_id: RoomId, 23 | n: u16, 24 | protocol_id: u64, 25 | args: Vec, 26 | on_done: oneshot::Sender>>, 27 | }, 28 | } 29 | 30 | #[derive(Clone)] 31 | pub struct RuntimeService { 32 | to_runtime: mpsc::Sender, 33 | } 34 | 35 | impl RuntimeService { 36 | pub async fn request_computation( 37 | &mut self, 38 | room_id: RoomId, 39 | n: u16, 40 | protocol_id: u64, 41 | args: Vec, 42 | on_done: oneshot::Sender>>, 43 | ) { 44 | self.to_runtime 45 | .send(RuntimeMessage::RequestComputation { 46 | room_id, 47 | n, 48 | protocol_id, 49 | args, 50 | on_done, 51 | }) 52 | .await 53 | .expect("request computation expected"); 54 | } 55 | } 56 | 57 | pub struct RuntimeDaemon { 58 | network_service: NetworkService, 59 | rooms: HashMap>, 60 | agents_factory: TFactory, 61 | from_service: mpsc::Receiver, 62 | peerset_cacher: PersistentCacher, 63 | } 64 | 65 | impl RuntimeDaemon { 66 | pub fn new( 67 | network_service: NetworkService, 68 | rooms: impl Iterator)>, 69 | agents_factory: TFactory, 70 | peerset_cacher: PersistentCacher, 71 | ) -> (Self, RuntimeService) { 72 | let (tx, rx) = mpsc::channel(2); 73 | 74 | let worker = Self { 75 | network_service, 76 | rooms: rooms.collect(), 77 | from_service: rx, 78 | agents_factory, 79 | peerset_cacher, 80 | }; 81 | 82 | let service = RuntimeService { to_runtime: tx }; 83 | 84 | (worker, service) 85 | } 86 | 87 | pub async fn run(self) { 88 | let mut protocol_executions = FuturesUnordered::new(); 89 | let mut network_proxies = FuturesUnordered::new(); 90 | let mut rooms_coordination = FuturesUnordered::new(); 91 | let mut rooms_rpc = HashMap::new(); 92 | 93 | let Self { 94 | network_service, 95 | rooms, 96 | agents_factory, 97 | from_service, 98 | peerset_cacher, 99 | } = self; 100 | 101 | for (room_id, rx) in rooms.into_iter() { 102 | let (ch, tx) = 103 | coordination::Phase1Channel::new(room_id.clone(), rx, network_service.clone()); 104 | rooms_coordination.push(ch); 105 | rooms_rpc.insert(room_id, tx); 106 | } 107 | 108 | let mut service_messages = from_service.fuse(); 109 | 110 | // loop { 111 | // match rooms_coordination.select_next_some().await 112 | // } 113 | 114 | loop { 115 | select! { 116 | srv_msg = service_messages.select_next_some() => { 117 | match srv_msg { 118 | RuntimeMessage::RequestComputation{ 119 | room_id, 120 | n, 121 | protocol_id, 122 | args, 123 | on_done, 124 | } => { 125 | match rooms_rpc.entry(room_id) { 126 | Entry::Occupied(e) => { 127 | let agent = match agents_factory.make(protocol_id) { 128 | Ok(a) => a, 129 | Err(_) => { 130 | on_done.send(Err(anyhow!("unknown protocol"))); 131 | continue; 132 | } 133 | }; 134 | let on_rpc = e.remove(); 135 | 136 | if on_rpc.is_canceled() { 137 | on_done.send(Err(anyhow!("protocol is busy"))); 138 | } else { 139 | on_rpc.send(LocalRpcMsg{n, args, agent, on_done}); 140 | } 141 | } 142 | Entry::Vacant(_) => { 143 | error!("{:?}", on_done.send(Err(anyhow!("protocol is busy")))); 144 | } 145 | } 146 | }, 147 | } 148 | }, 149 | coord_msg = rooms_coordination.select_next_some() => match coord_msg { 150 | coordination::Phase1Msg::FromRemote { 151 | peer_id: _, 152 | protocol_id, 153 | session_id: _, 154 | payload: _, 155 | response_tx, 156 | channel, 157 | } => { 158 | let agent = match agents_factory.make(protocol_id) { 159 | Ok(a) => a, 160 | Err(_) => { 161 | let (id, ch, tx) = channel.abort(); 162 | rooms_coordination.push(ch); 163 | rooms_rpc.insert(id, tx); 164 | continue; 165 | } 166 | }; 167 | 168 | response_tx.send(OutgoingResponse { 169 | result: Ok(vec![]), // todo: real negotiation logic 170 | sent_feedback: None, 171 | }); 172 | 173 | match channel.await { 174 | Phase2Msg::Start { 175 | room_id, 176 | room_receiver, 177 | receiver_proxy, 178 | parties, 179 | peerset_rx, 180 | init_body, 181 | } => { 182 | network_proxies.push(receiver_proxy); 183 | let (echo, echo_tx) = EchoGadget::new(parties.size()); 184 | protocol_executions.push(echo.wrap_execution(ProtocolExecution::new( 185 | room_id, 186 | init_body, 187 | agent, 188 | network_service.clone(), 189 | parties, 190 | peerset_rx, 191 | peerset_cacher.clone(), 192 | room_receiver, 193 | echo_tx, 194 | None, 195 | ))); 196 | } 197 | Phase2Msg::Abort(room_id, ch, tx) => { 198 | rooms_rpc.entry(room_id).and_modify(|e| *e = tx); 199 | rooms_coordination.push(ch); 200 | } 201 | } 202 | } 203 | coordination::Phase1Msg::FromLocal { 204 | id, 205 | n, 206 | negotiation, 207 | } => { 208 | match negotiation.await { 209 | NegotiationMsg::Start { 210 | agent, 211 | on_done, 212 | room_receiver, 213 | receiver_proxy, 214 | parties, 215 | peerset_rx, 216 | args, 217 | } => { 218 | network_proxies.push(receiver_proxy); 219 | let (echo, echo_tx) = EchoGadget::new(n as usize); 220 | protocol_executions.push(echo.wrap_execution(ProtocolExecution::new( 221 | id, 222 | args, 223 | agent, 224 | network_service.clone(), 225 | parties, 226 | peerset_rx, 227 | peerset_cacher.clone(), 228 | room_receiver, 229 | echo_tx, 230 | Some(on_done), 231 | ))); 232 | } 233 | NegotiationMsg::Abort(room_id, phase1, rpc_tx) => { 234 | rooms_coordination.push(phase1); 235 | rooms_rpc.insert(room_id, rpc_tx); 236 | continue; 237 | } 238 | }; 239 | } 240 | }, 241 | exec_res = protocol_executions.select_next_some() => match exec_res { 242 | Ok(_) => {} 243 | Err(e) => {error!("error during computation: {e}")} 244 | }, 245 | (room_id, phase1, rpc_tx) = network_proxies.select_next_some() => { 246 | rooms_coordination.push(phase1); 247 | rooms_rpc.insert(room_id, rpc_tx); 248 | } 249 | } 250 | 251 | // if let Ok(Some(srv_msg)) = self.from_service.try_next() { 252 | // let daemon = service_messages.get_ref(); 253 | // 254 | // match srv_msg { 255 | // 256 | // } 257 | // } 258 | } 259 | } 260 | } 261 | -------------------------------------------------------------------------------- /node/runtime/src/traits.rs: -------------------------------------------------------------------------------- 1 | use crate::peerset::Peerset; 2 | 3 | use futures::channel::oneshot; 4 | use mpc_p2p::RoomId; 5 | 6 | pub struct IncomingMessage { 7 | /// Index of party who sent the message. 8 | pub from: u16, 9 | 10 | /// Message sent by the remote. 11 | pub body: Vec, 12 | 13 | pub to: MessageRouting, 14 | } 15 | 16 | pub struct OutgoingMessage { 17 | /// Message sent by the remote. 18 | pub body: Vec, 19 | 20 | pub to: MessageRouting, 21 | 22 | pub sent: Option>, 23 | } 24 | 25 | #[derive(Copy, Clone, Debug)] 26 | pub enum MessageRouting { 27 | Broadcast, 28 | PointToPoint(u16), 29 | } 30 | 31 | pub trait ProtocolAgentFactory { 32 | fn make(&self, protocol_id: u64) -> crate::Result>; 33 | } 34 | 35 | #[async_trait::async_trait] 36 | pub trait ComputeAgentAsync: Send + Sync { 37 | fn session_id(&self) -> u64; 38 | 39 | fn protocol_id(&self) -> u64; 40 | 41 | async fn compute( 42 | self: Box, 43 | parties: Peerset, 44 | args: Vec, 45 | incoming: async_channel::Receiver, 46 | outgoing: async_channel::Sender, 47 | ) -> anyhow::Result>; 48 | } 49 | 50 | pub trait PeersetCacher { 51 | fn read_peerset(&self, room_id: &RoomId) -> anyhow::Result; 52 | 53 | fn write_peerset(&mut self, room_id: &RoomId, peerset: Peerset) -> anyhow::Result<()>; 54 | } 55 | -------------------------------------------------------------------------------- /peer_config.example.json: -------------------------------------------------------------------------------- 1 | { 2 | "local": { 3 | "network_peer": "/ip4/127.0.0.1/tcp/4000/p2p/12D3KooWMQmcJA5raTtuxqAguM5CiXRhEDumLNmZQ7PmKZizjFBX", 4 | "rpc_addr": "127.0.0.1:8080" 5 | }, 6 | "boot_peers": [ 7 | "/ip4/127.0.0.1/tcp/4001/p2p/12D3KooWS4jk2BXKgyqygNEZScHSzntTKQCdHYiHRrZXiNE9mNHi", 8 | "/ip4/127.0.0.1/tcp/4002/p2p/12D3KooWHYG3YsVs9hTwbgPKVrTrPQBKc8FnDhV6bsJ4W37eds8p" 9 | ] 10 | } 11 | -------------------------------------------------------------------------------- /tss/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "mpc-tss" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 7 | 8 | [dependencies] 9 | log = "0.4" 10 | anyhow = "1.0.56" 11 | futures = "0.3.21" 12 | futures-util = "0.3" 13 | async-channel = "1.6.1" 14 | tokio = { version = "1", default-features = false, features = ["macros", "rt-multi-thread"] } 15 | async-std = { version = "1.10.0", features = ["attributes", "tokio1"] } 16 | libp2p = "0.40.0" 17 | serde = {version = "1.0", features = ["derive"] } 18 | serde_json = "1" 19 | serde_ipld_dagcbor = "0.1.2" 20 | round-based = { version = "0.1.4" } 21 | multi-party-ecdsa = {git = "https://github.com/ZenGo-X/multi-party-ecdsa"} 22 | curv-kzen = "0.9" 23 | pretty_env_logger = "0.4.0" 24 | twox-hash = "1.6" 25 | blake2 = "0.10" 26 | async-trait = "0.1.53" 27 | unsigned-varint = { version = "0.6.0"} 28 | 29 | mpc-p2p = { path = "../node/network" } 30 | mpc-runtime = { path = "../node/runtime" } 31 | 32 | [dev-dependencies] 33 | round-based = { version = "0.1.4", features = ["dev"] } 34 | -------------------------------------------------------------------------------- /tss/src/config.rs: -------------------------------------------------------------------------------- 1 | use anyhow::anyhow; 2 | use libp2p::{Multiaddr, PeerId}; 3 | use mpc_p2p::{MultiaddrWithPeerId, NodeKeyConfig, Secret}; 4 | use serde::{Deserialize, Serialize}; 5 | use std::fs; 6 | use std::path::Path; 7 | use std::str::FromStr; 8 | 9 | #[derive(Clone, Serialize, Deserialize)] 10 | pub struct Config { 11 | pub local: PartyConfig, 12 | pub boot_peers: Vec, 13 | } 14 | 15 | impl Config { 16 | pub fn load_config>(path: P) -> Result { 17 | let file = fs::read_to_string(path) 18 | .map_err(|e| anyhow!("reading config terminated with err: {}", e))?; 19 | serde_json::from_str(file.as_str()) 20 | .map_err(|e| anyhow!("decoding config terminated with err: {}", e)) 21 | } 22 | } 23 | 24 | #[derive(Clone, Serialize, Deserialize)] 25 | pub struct PartyConfig { 26 | pub network_peer: MultiaddrWithPeerId, 27 | pub rpc_addr: String, 28 | } 29 | 30 | pub fn generate_config, S: AsRef>( 31 | cfg_path: P, 32 | setup_path: S, 33 | libp2p_addr: S, 34 | rpc_addr: S, 35 | ) -> Result 36 | where 37 | String: From, 38 | { 39 | let node_key = NodeKeyConfig::Ed25519(Secret::New); 40 | let keypair = node_key 41 | .into_keypair() 42 | .map_err(|e| anyhow!("keypair generating err: {}", e))?; 43 | let peer_id = PeerId::from(keypair.public()); 44 | let path = setup_path.as_ref().replace(":id", &*peer_id.to_base58()); 45 | let path = Path::new(&path); 46 | let dir = path.parent().unwrap(); 47 | fs::create_dir_all(dir).unwrap(); 48 | NodeKeyConfig::persist(keypair, path) 49 | .map_err(|e| anyhow!("secret key backup failed with err: {}", e))?; 50 | let multiaddr = Multiaddr::from_str(libp2p_addr.as_ref()) 51 | .map_err(|e| anyhow!("multiaddr parse err: {}", e))?; 52 | let network_peer = MultiaddrWithPeerId { multiaddr, peer_id }; 53 | 54 | let config = Config { 55 | local: PartyConfig { 56 | network_peer, 57 | rpc_addr: rpc_addr.into(), 58 | }, 59 | boot_peers: vec![], 60 | }; 61 | 62 | let json_bytes = serde_json::to_vec(&config) 63 | .map_err(|e| anyhow!("config encoding terminated with err: {}", e))?; 64 | 65 | fs::write(cfg_path, json_bytes.as_slice()).map_err(|e| anyhow!("writing config err: {}", e))?; 66 | 67 | Ok(config) 68 | } 69 | -------------------------------------------------------------------------------- /tss/src/factory.rs: -------------------------------------------------------------------------------- 1 | use crate::keysign::KeySign; 2 | use crate::KeyGen; 3 | use mpc_runtime::ComputeAgentAsync; 4 | 5 | pub struct TssFactory { 6 | key_path: String, 7 | } 8 | 9 | impl TssFactory { 10 | pub fn new(key_path: String) -> Self { 11 | Self { key_path } 12 | } 13 | } 14 | 15 | impl mpc_runtime::ProtocolAgentFactory for TssFactory { 16 | fn make(&self, protocol_id: u64) -> mpc_runtime::Result> { 17 | match protocol_id { 18 | 0 => Ok(Box::new(KeyGen::new(&self.key_path))), 19 | 1 => Ok(Box::new(KeySign::new(&self.key_path))), 20 | _ => Err(mpc_runtime::Error::UnknownProtocol(protocol_id)), 21 | } 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /tss/src/keygen.rs: -------------------------------------------------------------------------------- 1 | use anyhow::anyhow; 2 | use curv::elliptic::curves::{Point, Secp256k1}; 3 | 4 | use futures::future::TryFutureExt; 5 | use futures::StreamExt; 6 | use futures_util::{pin_mut, FutureExt}; 7 | use log::info; 8 | use mpc_runtime::{IncomingMessage, OutgoingMessage, Peerset}; 9 | use multi_party_ecdsa::protocols::multi_party_ecdsa::gg_2020::state_machine::keygen::{ 10 | Keygen, LocalKey, 11 | }; 12 | use round_based::AsyncProtocol; 13 | use std::fs::File; 14 | use std::hash::Hasher; 15 | use std::io::{BufReader, Write}; 16 | use std::path::Path; 17 | 18 | pub struct KeyGen { 19 | path: String, 20 | } 21 | 22 | #[async_trait::async_trait] 23 | impl mpc_runtime::ComputeAgentAsync for KeyGen { 24 | fn session_id(&self) -> u64 { 25 | 0 26 | } 27 | 28 | fn protocol_id(&self) -> u64 { 29 | 0 30 | } 31 | 32 | async fn compute( 33 | mut self: Box, 34 | mut parties: Peerset, 35 | args: Vec, 36 | incoming: async_channel::Receiver, 37 | outgoing: async_channel::Sender, 38 | ) -> anyhow::Result> { 39 | let n = parties.len() as u16; 40 | let i = parties.index_of(parties.local_peer_id()).unwrap() + 1; 41 | let mut io = BufReader::new(&*args); 42 | let t = unsigned_varint::io::read_u16(&mut io).unwrap(); 43 | 44 | let state_machine = 45 | Keygen::new(i, t, n).map_err(|e| anyhow!("failed building state {e}"))?; 46 | 47 | let (incoming, outgoing) = crate::round_based::state_replication(incoming, outgoing); 48 | 49 | let incoming = incoming.fuse(); 50 | pin_mut!(incoming, outgoing); 51 | 52 | let res = AsyncProtocol::new(state_machine, incoming, outgoing) 53 | .run() 54 | .await 55 | .map_err(|e| anyhow!("protocol execution terminated with error: {e}"))?; 56 | 57 | let pk = self.save_local_key(res)?; 58 | parties.save_to_cache().await?; 59 | 60 | let pk_bytes = serde_ipld_dagcbor::to_vec(&pk) 61 | .map_err(|e| anyhow!("error encoding public key {e}"))?; 62 | 63 | Ok(pk_bytes) 64 | } 65 | } 66 | 67 | impl KeyGen { 68 | pub fn new(p: &str) -> Self { 69 | Self { path: p.to_owned() } 70 | } 71 | 72 | fn save_local_key(&self, local_key: LocalKey) -> anyhow::Result> { 73 | let path = Path::new(self.path.as_str()); 74 | let dir = path.parent().unwrap(); 75 | std::fs::create_dir_all(dir).unwrap(); 76 | 77 | let mut file = File::create(path) 78 | .map_err(|e| anyhow!("writing share to disk terminated with error: {e}"))?; 79 | 80 | let share_bytes = serde_json::to_vec(&local_key) 81 | .map_err(|e| anyhow!("share serialization terminated with error: {e}"))?; 82 | 83 | file.write(&share_bytes) 84 | .map_err(|e| anyhow!("error writing local key to file: {e}"))?; 85 | 86 | Ok(local_key.y_sum_s) 87 | } 88 | } 89 | -------------------------------------------------------------------------------- /tss/src/keysign.rs: -------------------------------------------------------------------------------- 1 | use std::fs; 2 | 3 | use std::io::Read; 4 | 5 | use anyhow::anyhow; 6 | use curv::arithmetic::Converter; 7 | use curv::elliptic::curves::Secp256k1; 8 | use curv::BigInt; 9 | 10 | use futures::future::TryFutureExt; 11 | use futures::StreamExt; 12 | use futures_util::{pin_mut, FutureExt, SinkExt, TryStreamExt}; 13 | 14 | use multi_party_ecdsa::protocols::multi_party_ecdsa::gg_2020::state_machine::keygen::LocalKey; 15 | use multi_party_ecdsa::protocols::multi_party_ecdsa::gg_2020::state_machine::sign::{ 16 | OfflineStage, SignManual, 17 | }; 18 | use round_based::{AsyncProtocol, Msg}; 19 | 20 | use mpc_runtime::{IncomingMessage, OutgoingMessage, Peerset}; 21 | 22 | pub struct KeySign { 23 | path: String, 24 | } 25 | 26 | #[async_trait::async_trait] 27 | impl mpc_runtime::ComputeAgentAsync for KeySign { 28 | fn session_id(&self) -> u64 { 29 | 0 30 | } 31 | 32 | fn protocol_id(&self) -> u64 { 33 | 1 34 | } 35 | 36 | async fn compute( 37 | mut self: Box, 38 | mut parties: Peerset, 39 | args: Vec, 40 | rt_incoming: async_channel::Receiver, 41 | rt_outgoing: async_channel::Sender, 42 | ) -> anyhow::Result> { 43 | parties.recover_from_cache().await?; 44 | let i = parties.index_of(parties.local_peer_id()).unwrap() + 1; 45 | let n = parties.len(); 46 | let s_l = parties 47 | .parties_indexes 48 | .iter() 49 | .map(|i| (*i + 1) as u16) 50 | .collect(); 51 | let local_key = self.read_local_key()?; 52 | 53 | let state_machine = OfflineStage::new(i, s_l, local_key) 54 | .map_err(|e| anyhow!("failed building state {e}"))?; 55 | 56 | let (incoming, outgoing) = 57 | crate::round_based::state_replication(rt_incoming.clone(), rt_outgoing.clone()); 58 | 59 | let incoming = incoming.fuse(); 60 | pin_mut!(incoming, outgoing); 61 | 62 | let completed_offline_stage = AsyncProtocol::new(state_machine, incoming, outgoing) 63 | .run() 64 | .await 65 | .map_err(|e| anyhow!("protocol execution terminated with error: {e}"))?; 66 | 67 | let (incoming, outgoing) = crate::round_based::state_replication(rt_incoming, rt_outgoing); 68 | pin_mut!(incoming, outgoing); 69 | 70 | let (signing, partial_signature) = 71 | SignManual::new(BigInt::from_bytes(&*args), completed_offline_stage)?; 72 | 73 | outgoing 74 | .send(Msg { 75 | sender: i, 76 | receiver: None, 77 | body: partial_signature, 78 | }) 79 | .await 80 | .map_err(|_e| anyhow!("error sending partial signature"))?; 81 | 82 | let partial_signatures: Vec<_> = incoming 83 | .take(n - 1) 84 | .map_ok(|msg| msg.body) 85 | .try_collect() 86 | .await?; 87 | 88 | let sig = signing.complete(&partial_signatures)?; 89 | 90 | let signature_bytes = serde_ipld_dagcbor::to_vec(&sig) 91 | .map_err(|e| anyhow!("error encoding signature {e}"))?; 92 | 93 | Ok(signature_bytes) 94 | } 95 | } 96 | 97 | impl KeySign { 98 | pub fn new(p: &str) -> Self { 99 | Self { path: p.to_owned() } 100 | } 101 | 102 | fn read_local_key(&self) -> anyhow::Result> { 103 | let share_bytes = 104 | fs::read(self.path.as_str()).map_err(|e| anyhow!("error reading local key: {e}"))?; 105 | 106 | serde_json::from_slice(&share_bytes) 107 | .map_err(|e| anyhow!("error deserializing local key: {e}")) 108 | } 109 | } 110 | -------------------------------------------------------------------------------- /tss/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![feature(inherent_associated_types)] 2 | extern crate core; 3 | 4 | mod config; 5 | mod factory; 6 | mod keygen; 7 | mod keysign; 8 | mod round_based; 9 | 10 | pub use config::*; 11 | pub use factory::*; 12 | pub use keygen::*; 13 | -------------------------------------------------------------------------------- /tss/src/round_based.rs: -------------------------------------------------------------------------------- 1 | use anyhow::anyhow; 2 | use futures::channel::oneshot; 3 | use futures::{Sink, Stream}; 4 | use futures_util::{SinkExt, StreamExt}; 5 | use log::info; 6 | use mpc_runtime::{IncomingMessage, MessageRouting, OutgoingMessage}; 7 | use round_based::Msg; 8 | use serde::de::DeserializeOwned; 9 | use serde::Serialize; 10 | use std::fmt::Debug; 11 | 12 | pub(crate) fn state_replication( 13 | incoming: async_channel::Receiver, 14 | outgoing: async_channel::Sender, 15 | ) -> ( 16 | impl Stream, anyhow::Error>>, 17 | impl Sink, Error = anyhow::Error>, 18 | ) 19 | where 20 | M: Serialize + DeserializeOwned + Debug, 21 | { 22 | let incoming = incoming.map(move |msg: IncomingMessage| { 23 | let body: M = serde_ipld_dagcbor::from_slice(&*msg.body).unwrap(); 24 | 25 | Ok(Msg:: { 26 | sender: msg.from, 27 | receiver: match msg.to { 28 | MessageRouting::Broadcast => None, 29 | MessageRouting::PointToPoint(i) => Some(i), 30 | }, 31 | body, 32 | }) 33 | }); 34 | 35 | let outgoing = futures::sink::unfold(outgoing, move |outgoing, message: Msg| async move { 36 | let payload = serde_ipld_dagcbor::to_vec(&message.body).map_err(|e| anyhow!("{e}"))?; 37 | let (tx, rx) = oneshot::channel(); 38 | outgoing 39 | .send(OutgoingMessage { 40 | body: payload, 41 | to: match message.receiver { 42 | Some(remote_index) => MessageRouting::PointToPoint(remote_index), 43 | None => MessageRouting::Broadcast, 44 | }, 45 | sent: Some(tx), 46 | }) 47 | .await 48 | .expect("channel is expected to be open"); 49 | 50 | let _ = rx.await; 51 | 52 | Ok::<_, anyhow::Error>(outgoing) 53 | }); 54 | 55 | (incoming, outgoing) 56 | } 57 | --------------------------------------------------------------------------------