├── .github └── workflows │ └── main.yml ├── .gitignore ├── Cargo.toml ├── LICENSE ├── README.md ├── examples ├── handshake.rs ├── local_connect.rs └── sentry.rs └── src ├── disc.rs ├── disc ├── dns.rs ├── v4.rs └── v5.rs ├── ecies.rs ├── ecies ├── algorithm.rs └── proto.rs ├── errors.rs ├── lib.rs ├── mac.rs ├── node_filter.rs ├── peer.rs ├── rlpx.rs ├── transport.rs ├── types.rs └── util.rs /.github/workflows/main.yml: -------------------------------------------------------------------------------- 1 | on: [push, pull_request] 2 | 3 | name: Continuous integration 4 | 5 | jobs: 6 | ci: 7 | runs-on: ${{ matrix.os }} 8 | strategy: 9 | matrix: 10 | os: [ubuntu-latest, windows-latest, macOS-latest] 11 | 12 | steps: 13 | - uses: actions/checkout@v2 14 | 15 | - uses: actions-rs/toolchain@v1 16 | with: 17 | profile: minimal 18 | toolchain: stable 19 | override: true 20 | components: rustfmt, clippy 21 | 22 | - uses: actions-rs/cargo@v1 23 | with: 24 | command: fmt 25 | args: --all -- --check 26 | 27 | - uses: actions-rs/cargo@v1 28 | with: 29 | command: install 30 | args: cargo-hack 31 | 32 | - uses: actions-rs/cargo@v1 33 | with: 34 | command: hack 35 | args: check --all --ignore-private --each-feature --no-dev-deps 36 | 37 | - uses: actions-rs/cargo@v1 38 | with: 39 | command: check 40 | args: --all --all-targets --all-features 41 | 42 | - uses: actions-rs/cargo@v1 43 | with: 44 | command: test 45 | 46 | - uses: actions-rs/cargo@v1 47 | with: 48 | command: clippy 49 | args: -- -D warnings 50 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Generated by Cargo 2 | # will have compiled files and executables 3 | **/target 4 | 5 | # Generated by Nix 6 | **/result 7 | 8 | # Remove Cargo.lock from gitignore if creating an executable, leave it for libraries 9 | # More information here http://doc.crates.io/guide.html#cargotoml-vs-cargolock 10 | Cargo.lock 11 | 12 | # These are backup files generated by rustfmt 13 | **/*.rs.bk 14 | 15 | # IDEA files 16 | .idea/ -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "devp2p" 3 | version = "0.3.1" 4 | license = "Apache-2.0" 5 | authors = [ 6 | "Artem Vorotnikov ", 7 | "Wei Tang ", 8 | ] 9 | description = "Ethereum's devp2p protocol suite implementation in Rust." 10 | repository = "https://github.com/rust-ethereum/devp2p" 11 | edition = "2018" 12 | 13 | [dependencies] 14 | aes = "0.6" 15 | aes-ctr = "0.6" 16 | anyhow = "1" 17 | arrayvec = "0.5" 18 | async-stream = "0.3" 19 | async-trait = "0.1" 20 | auto_impl = "0.4" 21 | block-modes = "0.7" 22 | byteorder = "1" 23 | bytes = "1" 24 | cidr = "0.1" 25 | derive_more = "0.99" 26 | digest = "0.9" 27 | discv4 = { git = "https://github.com/rust-ethereum/discv4", optional = true } 28 | discv5 = { git = "https://github.com/vorot93/discv5", branch = "dev", optional = true } 29 | dnsdisc = { git = "https://github.com/rust-ethereum/dnsdisc", optional = true } 30 | educe = { version = "0.4", default-features = false, features = ["Clone", "Debug", "Default"] } 31 | enr = { git = "https://github.com/rust-ethereum/enr", default-features = false, features = ["rust-secp256k1"] } 32 | enum-primitive-derive = "0.2" 33 | ethereum-types = { version = "0.11", default-features = false, features = ["std", "rlp"] } 34 | funty = "<1.2" 35 | futures = "0.3" 36 | futures-intrusive = "0.4" 37 | generic-array = "0.14" 38 | hex = "0.4" 39 | hmac = "0.10" 40 | maplit = "1" 41 | num-traits = "0.2" 42 | parking_lot = "0.11" 43 | rand = "0.8" 44 | rlp = "0.5" 45 | rlp-derive = "0.1" 46 | secp256k1 = { version = "0.20", features = ["recovery"] } 47 | sha2 = "0.9" 48 | sha3 = "0.9" 49 | snap = "1" 50 | task-group = { git = "https://github.com/vorot93/task-group" } 51 | thiserror = "1" 52 | tokio = { version = "1", features = ["macros", "net", "sync", "time"] } 53 | tokio-stream = "0.1" 54 | tokio-util = { version = "0.6", features = ["codec"] } 55 | tracing = "0.1" 56 | tracing-futures = "0.2" 57 | uuid = { version = "0.8", features = ["v4"] } 58 | 59 | [dev-dependencies] 60 | hex-literal = "0.3" 61 | sha3 = "0.9" 62 | tokio = { version = "1", features = ["full"] } 63 | tracing-subscriber = "0.2" 64 | trust-dns-resolver = "0.20" 65 | 66 | [[example]] 67 | name = "sentry" 68 | required-features = ["discv4"] 69 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Moved to [rust-ethereum/sentry](https://github.com/rust-ethereum/sentry) repo 2 | -------------------------------------------------------------------------------- /examples/handshake.rs: -------------------------------------------------------------------------------- 1 | use devp2p::{ecies::ECIESStream, PeerId}; 2 | use hex_literal::hex; 3 | use secp256k1::SecretKey; 4 | use tokio::net::TcpStream; 5 | use tracing_subscriber::EnvFilter; 6 | 7 | const REMOTE_ID: PeerId = PeerId(hex!("d860a01f9722d78051619d1e2351aba3f43f943f6f00718d1b9baa4101932a1f5011f16bb2b1bb35db20d6fe28fa0bf09636d26a87d31de9ec6203eeedb1f666 ")); 8 | 9 | #[tokio::main] 10 | async fn main() { 11 | tracing_subscriber::fmt() 12 | .with_env_filter(EnvFilter::from_default_env()) 13 | .init(); 14 | 15 | ECIESStream::connect( 16 | TcpStream::connect("18.138.108.67:30303").await.unwrap(), 17 | SecretKey::new(&mut secp256k1::rand::thread_rng()), 18 | REMOTE_ID, 19 | ) 20 | .await 21 | .unwrap(); 22 | } 23 | -------------------------------------------------------------------------------- /examples/local_connect.rs: -------------------------------------------------------------------------------- 1 | use arrayvec::ArrayString; 2 | use async_trait::async_trait; 3 | use devp2p::*; 4 | use maplit::btreemap; 5 | use rand::{seq::SliceRandom, thread_rng}; 6 | use secp256k1::SecretKey; 7 | use std::{collections::HashMap, sync::Arc, time::Duration}; 8 | use tokio::time::sleep; 9 | use tracing::*; 10 | use tracing_subscriber::EnvFilter; 11 | 12 | #[derive(Debug)] 13 | struct DummyServer; 14 | 15 | #[async_trait] 16 | impl CapabilityServer for DummyServer { 17 | #[instrument(skip(self, peer), fields(peer=&*peer.to_string()))] 18 | fn on_peer_connect(&self, peer: PeerId, _: HashMap) { 19 | info!("Peer connected") 20 | } 21 | 22 | #[instrument(skip(self, peer, event), fields(peer=&*peer.to_string(), event=&*event.to_string()))] 23 | async fn on_peer_event(&self, peer: PeerId, event: InboundEvent) { 24 | info!("Received event"); 25 | } 26 | 27 | #[instrument(skip(self, peer), fields(peer=&*peer.to_string()))] 28 | async fn next(&self, peer: PeerId) -> OutboundEvent { 29 | futures::future::pending().await 30 | } 31 | } 32 | 33 | #[tokio::main] 34 | async fn main() { 35 | tracing_subscriber::fmt() 36 | .with_env_filter(EnvFilter::from_default_env()) 37 | .init(); 38 | 39 | // Bootnodes as used in OpenEthereum 40 | let bootnodes = vec![ 41 | "enode://d860a01f9722d78051619d1e2351aba3f43f943f6f00718d1b9baa4101932a1f5011f16bb2b1bb35db20d6fe28fa0bf09636d26a87d31de9ec6203eeedb1f666@18.138.108.67:30303", 42 | "enode://22a8232c3abc76a16ae9d6c3b164f98775fe226f0917b0ca871128a74a8e9630b458460865bab457221f1d448dd9791d24c4e5d88786180ac185df813a68d4de@3.209.45.79:30303", 43 | "enode://ca6de62fce278f96aea6ec5a2daadb877e51651247cb96ee310a318def462913b653963c155a0ef6c7d50048bba6e6cea881130857413d9f50a621546b590758@34.255.23.113:30303", 44 | "enode://279944d8dcd428dffaa7436f25ca0ca43ae19e7bcf94a8fb7d1641651f92d121e972ac2e8f381414b80cc8e5555811c2ec6e1a99bb009b3f53c4c69923e11bd8@35.158.244.151:30303", 45 | "enode://8499da03c47d637b20eee24eec3c356c9a2e6148d6fe25ca195c7949ab8ec2c03e3556126b0d7ed644675e78c4318b08691b7b57de10e5f0d40d05b09238fa0a@52.187.207.27:30303", 46 | "enode://103858bdb88756c71f15e9b5e09b56dc1be52f0a5021d46301dbbfb7e130029cc9d0d6f73f693bc29b665770fff7da4d34f3c6379fe12721b5d7a0bcb5ca1fc1@191.234.162.198:30303", 47 | "enode://715171f50508aba88aecd1250af392a45a330af91d7b90701c436b618c86aaa1589c9184561907bebbb56439b8f8787bc01f49a7c77276c58c1b09822d75e8e8@52.231.165.108:30303", 48 | "enode://5d6d7cd20d6da4bb83a1d28cadb5d409b64edf314c0335df658c1a54e32c7c4a7ab7823d57c39b6a757556e68ff1df17c748b698544a55cb488b52479a92b60f@104.42.217.25:30303", 49 | "enode://68f46370191198b71a1595dd453c489bbfe28036a9951fc0397fabd1b77462930b3c5a5359b20e99677855939be47b39fc8edcf1e9ff2522a922b86d233bf2df@144.217.153.76:30303", 50 | "enode://ffed6382e05ee42854d862f08e4e39b8452c50a5a5d399072c40f9a0b2d4ad34b0eb5312455ad8bcf0dcb4ce969dc89a9a9fd00183eaf8abf46bbcc59dc6e9d5@51.195.3.238:30303", 51 | "enode://b47b197244c054d385f25d7740b33cc7e2a74d6f715befad2b789fd3e3594bb1c8dd2ca2faf1a3bf6b4c9ec03e53b52301f722a2316b78976be03ccbe703c581@54.37.94.238:30303", 52 | "enode://5f7d0794c464b2fcd514d41e16e4b535a98ac792a71ca9667c7cef35595dc34c9a1b793c0622554cf87f34006942abb526af7d2e37d715ac32ed02170556cce2@51.161.101.207:30303", 53 | ]; 54 | 55 | let node = bootnodes 56 | .choose(&mut thread_rng()) 57 | .unwrap() 58 | .parse::() 59 | .unwrap(); 60 | 61 | debug!("Connecting to {}", node.addr); 62 | 63 | let swarm = Swarm::new( 64 | btreemap! { CapabilityId { name: CapabilityName(ArrayString::from("eth").unwrap()), version: 63 } => 15 }, 65 | Arc::new(DummyServer), 66 | SecretKey::new(&mut secp256k1::rand::thread_rng()), 67 | ) 68 | .await 69 | .unwrap(); 70 | 71 | swarm.add_peer(node).await.unwrap(); 72 | 73 | let timeout = 5; 74 | sleep(Duration::from_secs(timeout)).await; 75 | } 76 | -------------------------------------------------------------------------------- /examples/sentry.rs: -------------------------------------------------------------------------------- 1 | #![allow(dead_code)] 2 | 3 | use arrayvec::ArrayString; 4 | use async_stream::stream; 5 | use async_trait::async_trait; 6 | use devp2p::*; 7 | use ethereum_types::*; 8 | use futures::stream::BoxStream; 9 | use hex_literal::hex; 10 | use maplit::btreemap; 11 | use parking_lot::RwLock; 12 | use rlp_derive::{RlpDecodable, RlpEncodable}; 13 | use secp256k1::SecretKey; 14 | use std::{ 15 | collections::HashMap, 16 | sync::{ 17 | atomic::{AtomicUsize, Ordering}, 18 | Arc, 19 | }, 20 | }; 21 | use task_group::*; 22 | use tokio::{ 23 | sync::{ 24 | mpsc::{channel, Sender}, 25 | Mutex as AsyncMutex, 26 | }, 27 | time::sleep, 28 | }; 29 | use tokio_stream::{StreamExt, StreamMap}; 30 | use tracing::*; 31 | use tracing_subscriber::EnvFilter; 32 | 33 | const DISCV4_BOOTNODES: &[&str] = &[ 34 | "enode://d860a01f9722d78051619d1e2351aba3f43f943f6f00718d1b9baa4101932a1f5011f16bb2b1bb35db20d6fe28fa0bf09636d26a87d31de9ec6203eeedb1f666@18.138.108.67:30303", 35 | "enode://22a8232c3abc76a16ae9d6c3b164f98775fe226f0917b0ca871128a74a8e9630b458460865bab457221f1d448dd9791d24c4e5d88786180ac185df813a68d4de@3.209.45.79:30303", 36 | "enode://ca6de62fce278f96aea6ec5a2daadb877e51651247cb96ee310a318def462913b653963c155a0ef6c7d50048bba6e6cea881130857413d9f50a621546b590758@34.255.23.113:30303", 37 | "enode://279944d8dcd428dffaa7436f25ca0ca43ae19e7bcf94a8fb7d1641651f92d121e972ac2e8f381414b80cc8e5555811c2ec6e1a99bb009b3f53c4c69923e11bd8@35.158.244.151:30303", 38 | "enode://8499da03c47d637b20eee24eec3c356c9a2e6148d6fe25ca195c7949ab8ec2c03e3556126b0d7ed644675e78c4318b08691b7b57de10e5f0d40d05b09238fa0a@52.187.207.27:30303", 39 | "enode://103858bdb88756c71f15e9b5e09b56dc1be52f0a5021d46301dbbfb7e130029cc9d0d6f73f693bc29b665770fff7da4d34f3c6379fe12721b5d7a0bcb5ca1fc1@191.234.162.198:30303", 40 | "enode://715171f50508aba88aecd1250af392a45a330af91d7b90701c436b618c86aaa1589c9184561907bebbb56439b8f8787bc01f49a7c77276c58c1b09822d75e8e8@52.231.165.108:30303", 41 | "enode://5d6d7cd20d6da4bb83a1d28cadb5d409b64edf314c0335df658c1a54e32c7c4a7ab7823d57c39b6a757556e68ff1df17c748b698544a55cb488b52479a92b60f@104.42.217.25:30303", 42 | "enode://68f46370191198b71a1595dd453c489bbfe28036a9951fc0397fabd1b77462930b3c5a5359b20e99677855939be47b39fc8edcf1e9ff2522a922b86d233bf2df@144.217.153.76:30303", 43 | "enode://ffed6382e05ee42854d862f08e4e39b8452c50a5a5d399072c40f9a0b2d4ad34b0eb5312455ad8bcf0dcb4ce969dc89a9a9fd00183eaf8abf46bbcc59dc6e9d5@51.195.3.238:30303", 44 | "enode://b47b197244c054d385f25d7740b33cc7e2a74d6f715befad2b789fd3e3594bb1c8dd2ca2faf1a3bf6b4c9ec03e53b52301f722a2316b78976be03ccbe703c581@54.37.94.238:30303", 45 | "enode://5f7d0794c464b2fcd514d41e16e4b535a98ac792a71ca9667c7cef35595dc34c9a1b793c0622554cf87f34006942abb526af7d2e37d715ac32ed02170556cce2@51.161.101.207:30303", 46 | ]; 47 | 48 | fn eth() -> CapabilityName { 49 | CapabilityName(ArrayString::from("eth").unwrap()) 50 | } 51 | 52 | #[derive(Debug, Default)] 53 | struct TaskMetrics { 54 | count: AtomicUsize, 55 | } 56 | 57 | impl task_group::Metrics for TaskMetrics { 58 | fn task_started(&self, id: TaskId, name: String) { 59 | let c = self.count.fetch_add(1, Ordering::Relaxed); 60 | trace!("TASK+ | {} total | {} | id: {}", c + 1, name, id) 61 | } 62 | 63 | fn task_stopped(&self, id: TaskId, name: String) { 64 | let c = self.count.fetch_sub(1, Ordering::Relaxed); 65 | trace!("TASK- | {} total | {} | id: {}", c - 1, name, id) 66 | } 67 | } 68 | 69 | #[derive(Debug, RlpEncodable, RlpDecodable)] 70 | struct StatusMessage { 71 | protocol_version: usize, 72 | network_id: usize, 73 | total_difficulty: U256, 74 | best_hash: H256, 75 | genesis_hash: H256, 76 | } 77 | 78 | #[derive(Clone)] 79 | struct Pipes { 80 | sender: Sender, 81 | receiver: Arc>>, 82 | } 83 | 84 | #[derive(Default)] 85 | struct CapabilityServerImpl { 86 | peer_pipes: Arc>>, 87 | } 88 | 89 | impl CapabilityServerImpl { 90 | fn setup_pipes(&self, peer: PeerId, pipes: Pipes) { 91 | assert!(self.peer_pipes.write().insert(peer, pipes).is_none()); 92 | } 93 | fn get_pipes(&self, peer: PeerId) -> Pipes { 94 | self.peer_pipes.read().get(&peer).unwrap().clone() 95 | } 96 | fn teardown(&self, peer: PeerId) { 97 | self.peer_pipes.write().remove(&peer); 98 | } 99 | fn connected_peers(&self) -> usize { 100 | self.peer_pipes.read().len() 101 | } 102 | } 103 | 104 | #[async_trait] 105 | impl CapabilityServer for CapabilityServerImpl { 106 | #[instrument(skip(self, peer), fields(peer=&*peer.to_string()))] 107 | fn on_peer_connect(&self, peer: PeerId, caps: HashMap) { 108 | info!("Settting up peer state"); 109 | let status_message = StatusMessage { 110 | protocol_version: *caps.get(ð()).unwrap(), 111 | network_id: 1, 112 | total_difficulty: 17608636743620256866935_u128.into(), 113 | best_hash: H256::from(hex!( 114 | "28042e7e4d35a3482bf5f0d862501868b04c1734f483ceae3bf1393561951829" 115 | )), 116 | genesis_hash: H256::from(hex!( 117 | "d4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3" 118 | )), 119 | }; 120 | 121 | let first_message = OutboundEvent::Message { 122 | capability_name: eth(), 123 | message: Message { 124 | id: 0, 125 | data: rlp::encode(&status_message).into(), 126 | }, 127 | }; 128 | 129 | let (sender, mut receiver) = channel(1); 130 | 131 | self.setup_pipes( 132 | peer, 133 | Pipes { 134 | sender, 135 | receiver: Arc::new(AsyncMutex::new(Box::pin(stream! { 136 | yield first_message; 137 | 138 | while let Some(message) = receiver.recv().await { 139 | yield message; 140 | } 141 | }))), 142 | }, 143 | ); 144 | } 145 | #[instrument(skip(self, peer, event), fields(peer=&*peer.to_string(), event=&*event.to_string()))] 146 | async fn on_peer_event(&self, peer: PeerId, event: InboundEvent) { 147 | match event { 148 | InboundEvent::Disconnect { .. } => { 149 | self.teardown(peer); 150 | } 151 | InboundEvent::Message { message, .. } => { 152 | info!( 153 | "Received message with id {}, data {}", 154 | message.id, 155 | hex::encode(&message.data) 156 | ); 157 | 158 | if message.id == 0 { 159 | match rlp::decode::(&message.data) { 160 | Ok(v) => { 161 | info!("Decoded status message: {:?}", v); 162 | } 163 | Err(e) => { 164 | info!("Failed to decode status message: {}! Kicking peer.", e); 165 | let _ = self 166 | .get_pipes(peer) 167 | .sender 168 | .send(OutboundEvent::Disconnect { 169 | reason: DisconnectReason::ProtocolBreach, 170 | }) 171 | .await; 172 | 173 | return; 174 | } 175 | } 176 | } 177 | 178 | let out_id = match message.id { 179 | 3 => Some(4), 180 | 5 => Some(6), 181 | _ => None, 182 | }; 183 | 184 | if let Some(id) = out_id { 185 | let _ = self 186 | .get_pipes(peer) 187 | .sender 188 | .send(OutboundEvent::Message { 189 | capability_name: eth(), 190 | message: Message { 191 | id, 192 | data: rlp::encode_list::(&[]).into(), 193 | }, 194 | }) 195 | .await; 196 | } 197 | } 198 | } 199 | } 200 | #[instrument(skip(self, peer), fields(peer=&*peer.to_string()))] 201 | async fn next(&self, peer: PeerId) -> OutboundEvent { 202 | let outbound = self 203 | .get_pipes(peer) 204 | .receiver 205 | .lock() 206 | .await 207 | .next() 208 | .await 209 | .unwrap_or(OutboundEvent::Disconnect { 210 | reason: DisconnectReason::DisconnectRequested, 211 | }); 212 | 213 | info!("Sending outbound event {:?}", outbound); 214 | 215 | outbound 216 | } 217 | } 218 | 219 | #[tokio::main] 220 | async fn main() { 221 | tracing_subscriber::fmt() 222 | .with_env_filter(EnvFilter::from_default_env()) 223 | .init(); 224 | 225 | let secret_key = SecretKey::new(&mut secp256k1::rand::thread_rng()); 226 | 227 | let task_metrics = Arc::new(TaskMetrics::default()); 228 | let task_group = Arc::new(TaskGroup::new_with_metrics(task_metrics.clone())); 229 | 230 | let port = 30303; 231 | 232 | let mut discovery_tasks = StreamMap::new(); 233 | discovery_tasks.insert( 234 | "discv4".to_string(), 235 | Box::pin( 236 | Discv4Builder::default() 237 | .with_cache(20) 238 | .with_concurrent_lookups(50) 239 | .build( 240 | discv4::Node::new( 241 | format!("0.0.0.0:{}", port).parse().unwrap(), 242 | SecretKey::new(&mut secp256k1::rand::thread_rng()), 243 | DISCV4_BOOTNODES 244 | .iter() 245 | .map(|v| v.parse().unwrap()) 246 | .collect(), 247 | None, 248 | true, 249 | port, 250 | ) 251 | .await 252 | .unwrap(), 253 | ), 254 | ) as Discovery, 255 | ); 256 | 257 | let capability_server = Arc::new(CapabilityServerImpl::default()); 258 | 259 | let swarm = Swarm::builder() 260 | .with_task_group(task_group.clone()) 261 | .with_listen_options(ListenOptions { 262 | discovery_tasks, 263 | max_peers: 50, 264 | addr: format!("0.0.0.0:{}", port).parse().unwrap(), 265 | cidr: None, 266 | }) 267 | .build( 268 | btreemap! { CapabilityId { 269 | name: eth(), 270 | version: 63, 271 | } => 17 }, 272 | capability_server, 273 | secret_key, 274 | ) 275 | .await 276 | .unwrap(); 277 | 278 | loop { 279 | sleep(std::time::Duration::from_secs(5)).await; 280 | info!("Peers: {}.", swarm.connected_peers()); 281 | } 282 | } 283 | -------------------------------------------------------------------------------- /src/disc.rs: -------------------------------------------------------------------------------- 1 | use crate::types::*; 2 | use derive_more::From; 3 | use futures::stream::BoxStream; 4 | use std::{collections::HashMap, net::SocketAddr, task::Poll}; 5 | use tokio_stream::Stream; 6 | 7 | #[cfg(feature = "discv4")] 8 | mod v4; 9 | 10 | #[cfg(feature = "discv4")] 11 | pub use self::v4::{Discv4, Discv4Builder}; 12 | #[cfg(feature = "discv4")] 13 | pub use discv4; 14 | 15 | #[cfg(feature = "discv5")] 16 | mod v5; 17 | 18 | #[cfg(feature = "discv5")] 19 | pub use self::v5::Discv5; 20 | #[cfg(feature = "discv5")] 21 | pub use discv5; 22 | 23 | #[cfg(feature = "dnsdisc")] 24 | mod dns; 25 | 26 | #[cfg(feature = "dnsdisc")] 27 | pub use self::dns::DnsDiscovery; 28 | #[cfg(feature = "dnsdisc")] 29 | pub use dnsdisc; 30 | 31 | pub type Discovery = BoxStream<'static, anyhow::Result>; 32 | 33 | #[derive(Clone, Debug, From)] 34 | pub struct Bootnodes(pub HashMap); 35 | 36 | impl Stream for Bootnodes { 37 | type Item = anyhow::Result; 38 | 39 | fn poll_next( 40 | self: std::pin::Pin<&mut Self>, 41 | _: &mut std::task::Context<'_>, 42 | ) -> std::task::Poll> { 43 | if let Some((&addr, &id)) = self.0.iter().next() { 44 | Poll::Ready(Some(Ok(NodeRecord { id, addr }))) 45 | } else { 46 | Poll::Ready(None) 47 | } 48 | } 49 | } 50 | -------------------------------------------------------------------------------- /src/disc/dns.rs: -------------------------------------------------------------------------------- 1 | use crate::{types::*, util::*}; 2 | use dnsdisc::{Backend, Resolver}; 3 | use secp256k1::{PublicKey, SecretKey}; 4 | use std::{pin::Pin, sync::Arc, time::Duration}; 5 | use task_group::TaskGroup; 6 | use tokio::sync::mpsc::{channel, Receiver}; 7 | use tokio_stream::{Stream, StreamExt}; 8 | use tracing::*; 9 | 10 | const MAX_SINGLE_RESOLUTION: u64 = 10; 11 | const MAX_RESOLUTION_DURATION: u64 = 1800; 12 | 13 | pub struct DnsDiscovery { 14 | #[allow(unused)] 15 | tasks: TaskGroup, 16 | receiver: Receiver>, 17 | } 18 | 19 | impl DnsDiscovery { 20 | #[must_use] 21 | pub fn new( 22 | discovery: Arc>, 23 | domain: String, 24 | public_key: Option, 25 | ) -> Self { 26 | let tasks = TaskGroup::default(); 27 | 28 | let (tx, receiver) = channel(1); 29 | tasks.spawn_with_name("DNS discovery pump", async move { 30 | loop { 31 | let mut query = discovery.query(domain.clone(), public_key); 32 | let restart_at = 33 | std::time::Instant::now() + Duration::from_secs(MAX_RESOLUTION_DURATION); 34 | 35 | loop { 36 | match tokio::time::timeout( 37 | Duration::from_secs(MAX_SINGLE_RESOLUTION), 38 | query.next(), 39 | ) 40 | .await 41 | { 42 | Ok(Some(Err(e))) => { 43 | if tx.send(Err(e)).await.is_err() { 44 | return; 45 | } 46 | break; 47 | } 48 | Ok(Some(Ok(v))) => { 49 | if let Some(addr) = v.tcp_socket() { 50 | if tx 51 | .send(Ok(NodeRecord { 52 | addr, 53 | id: pk2id(&v.public_key()), 54 | })) 55 | .await 56 | .is_err() 57 | { 58 | return; 59 | } 60 | } 61 | } 62 | Ok(None) => { 63 | break; 64 | } 65 | Err(_) => {} 66 | } 67 | 68 | if std::time::Instant::now() > restart_at { 69 | trace!("Restarting DNS resolution"); 70 | break; 71 | } 72 | } 73 | } 74 | }); 75 | 76 | Self { tasks, receiver } 77 | } 78 | } 79 | 80 | impl Stream for DnsDiscovery { 81 | type Item = anyhow::Result; 82 | 83 | fn poll_next( 84 | mut self: std::pin::Pin<&mut Self>, 85 | cx: &mut std::task::Context<'_>, 86 | ) -> std::task::Poll> { 87 | Pin::new(&mut self.receiver).poll_recv(cx) 88 | } 89 | } 90 | -------------------------------------------------------------------------------- /src/disc/v4.rs: -------------------------------------------------------------------------------- 1 | use crate::types::*; 2 | use discv4::Node; 3 | use educe::Educe; 4 | use std::{pin::Pin, sync::Arc}; 5 | use task_group::TaskGroup; 6 | use tokio::sync::mpsc::{channel, Receiver}; 7 | use tokio_stream::Stream; 8 | 9 | #[derive(Educe)] 10 | #[educe(Default)] 11 | pub struct Discv4Builder { 12 | #[educe(Default(1))] 13 | concurrent_lookups: usize, 14 | #[educe(Default(20))] 15 | cache: usize, 16 | } 17 | 18 | impl Discv4Builder { 19 | pub fn with_concurrent_lookups(mut self, concurrent_lookups: usize) -> Self { 20 | self.concurrent_lookups = concurrent_lookups; 21 | self 22 | } 23 | 24 | pub fn with_cache(mut self, cache: usize) -> Self { 25 | self.cache = cache; 26 | self 27 | } 28 | 29 | pub fn build(self, node: Arc) -> Discv4 { 30 | Discv4::new(node, self.concurrent_lookups, self.cache) 31 | } 32 | } 33 | 34 | pub struct Discv4 { 35 | #[allow(unused)] 36 | tasks: TaskGroup, 37 | receiver: Receiver, 38 | } 39 | 40 | impl Discv4 { 41 | #[must_use] 42 | fn new(node: Arc, concurrent_lookups: usize, cache: usize) -> Self { 43 | let tasks = TaskGroup::default(); 44 | 45 | let (tx, receiver) = channel(cache); 46 | 47 | for i in 0..concurrent_lookups { 48 | let node = node.clone(); 49 | let tx = tx.clone(); 50 | tasks.spawn_with_name(format!("discv4 lookup #{}", i), { 51 | async move { 52 | let node = node.clone(); 53 | let tx = tx.clone(); 54 | loop { 55 | for record in node.lookup(rand::random()).await { 56 | let _ = tx 57 | .send(NodeRecord { 58 | addr: record.tcp_addr(), 59 | id: record.id, 60 | }) 61 | .await; 62 | } 63 | } 64 | } 65 | }); 66 | } 67 | 68 | Self { tasks, receiver } 69 | } 70 | } 71 | 72 | impl Stream for Discv4 { 73 | type Item = anyhow::Result; 74 | 75 | fn poll_next( 76 | mut self: std::pin::Pin<&mut Self>, 77 | cx: &mut std::task::Context<'_>, 78 | ) -> std::task::Poll> { 79 | Pin::new(&mut self.receiver) 80 | .poll_recv(cx) 81 | .map(|opt| opt.map(Ok)) 82 | } 83 | } 84 | -------------------------------------------------------------------------------- /src/disc/v5.rs: -------------------------------------------------------------------------------- 1 | use crate::{types::*, util::*}; 2 | use anyhow::anyhow; 3 | use async_stream::stream; 4 | use futures::stream::BoxStream; 5 | use futures_intrusive::channel::UnbufferedChannel; 6 | use secp256k1::PublicKey; 7 | use std::{pin::Pin, sync::Arc}; 8 | use task_group::TaskGroup; 9 | use tokio::{select, sync::mpsc::channel}; 10 | use tokio_stream::Stream; 11 | use tracing::*; 12 | 13 | pub struct Discv5 { 14 | #[allow(unused)] 15 | tasks: TaskGroup, 16 | receiver: BoxStream<'static, anyhow::Result>, 17 | } 18 | 19 | impl Discv5 { 20 | pub fn new(mut disc: discv5::Discv5, cache: usize) -> Self { 21 | let tasks = TaskGroup::default(); 22 | 23 | let errors = Arc::new(UnbufferedChannel::new()); 24 | let (tx, mut nodes) = channel(cache); 25 | 26 | tasks.spawn_with_name("discv5 pump", { 27 | let errors = errors.clone(); 28 | async move { 29 | async { 30 | loop { 31 | match disc.find_node(discv5::enr::NodeId::random()).await { 32 | Err(e) => { 33 | if errors 34 | .send(anyhow!("Discovery error: {}", e)) 35 | .await 36 | .is_err() 37 | { 38 | return; 39 | } 40 | } 41 | Ok(nodes) => { 42 | for node in nodes { 43 | if let Some(ip) = node.ip() { 44 | if let Some(port) = node.tcp() { 45 | if let discv5::enr::CombinedPublicKey::Secp256k1(pk) = 46 | node.public_key() 47 | { 48 | if tx 49 | .send(NodeRecord { 50 | addr: (ip, port).into(), 51 | id: pk2id( 52 | &PublicKey::from_slice(&pk.to_bytes()) 53 | .unwrap(), 54 | ), 55 | }) 56 | .await 57 | .is_err() 58 | { 59 | return; 60 | } 61 | } 62 | } 63 | } 64 | } 65 | } 66 | } 67 | } 68 | } 69 | .await; 70 | 71 | debug!("Discovery receivers dropped, shutting down"); 72 | } 73 | }); 74 | 75 | let (tx, mut receiver) = channel(1); 76 | tasks.spawn_with_name("discv4 pump 2", async move { 77 | loop { 78 | let err_fut = errors.receive(); 79 | let node_fut = nodes.recv(); 80 | 81 | select! { 82 | Some(error) = err_fut => { 83 | if tx.send(Err(error)).await.is_err() { 84 | return; 85 | } 86 | } 87 | Some(node) = node_fut => { 88 | if tx.send(Ok(node)).await.is_err() { 89 | return; 90 | } 91 | } 92 | else => { 93 | return; 94 | } 95 | } 96 | } 97 | }); 98 | 99 | Self { 100 | tasks, 101 | receiver: Box::pin(stream! { 102 | while let Some(v) = receiver.recv().await { 103 | yield v; 104 | } 105 | }), 106 | } 107 | } 108 | } 109 | 110 | impl Stream for Discv5 { 111 | type Item = anyhow::Result; 112 | 113 | fn poll_next( 114 | mut self: std::pin::Pin<&mut Self>, 115 | cx: &mut std::task::Context<'_>, 116 | ) -> std::task::Poll> { 117 | Pin::new(&mut self.receiver).poll_next(cx) 118 | } 119 | } 120 | -------------------------------------------------------------------------------- /src/ecies.rs: -------------------------------------------------------------------------------- 1 | //! ECIES protocol implementation 2 | 3 | mod algorithm; 4 | mod proto; 5 | 6 | pub use self::proto::{ECIESCodec, ECIESState, ECIESStream, EgressECIESValue, IngressECIESValue}; 7 | -------------------------------------------------------------------------------- /src/ecies/algorithm.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | errors::ECIESError, 3 | mac::*, 4 | types::*, 5 | util::{hmac_sha256, id2pk, pk2id, sha256}, 6 | }; 7 | use aes_ctr::{ 8 | cipher::{NewStreamCipher, StreamCipher}, 9 | Aes128Ctr, Aes256Ctr, 10 | }; 11 | use anyhow::Context; 12 | use byteorder::{BigEndian, ByteOrder, ReadBytesExt}; 13 | use bytes::{BufMut, Bytes, BytesMut}; 14 | use digest::Digest; 15 | use educe::Educe; 16 | use ethereum_types::{H128, H256}; 17 | use rand::{thread_rng, Rng}; 18 | use rlp::{Rlp, RlpStream}; 19 | use secp256k1::{ 20 | recovery::{RecoverableSignature, RecoveryId}, 21 | PublicKey, SecretKey, SECP256K1, 22 | }; 23 | use sha2::Sha256; 24 | use sha3::Keccak256; 25 | use std::convert::TryFrom; 26 | 27 | const PROTOCOL_VERSION: usize = 4; 28 | 29 | fn ecdh_x(public_key: &PublicKey, secret_key: &SecretKey) -> H256 { 30 | H256::from_slice( 31 | &secp256k1::ecdh::SharedSecret::new_with_hash(&public_key, &secret_key, |x, _| x.into()) 32 | [0..32], 33 | ) 34 | } 35 | 36 | fn kdf(secret: H256, s1: &[u8], dest: &mut [u8]) { 37 | // SEC/ISO/Shoup specify counter size SHOULD be equivalent 38 | // to size of hash output, however, it also notes that 39 | // the 4 bytes is okay. NIST specifies 4 bytes. 40 | let mut ctr = 1_u32; 41 | let mut written = 0_usize; 42 | while written < dest.len() { 43 | let mut hasher = Sha256::default(); 44 | let ctrs = [ 45 | (ctr >> 24) as u8, 46 | (ctr >> 16) as u8, 47 | (ctr >> 8) as u8, 48 | ctr as u8, 49 | ]; 50 | hasher.update(&ctrs); 51 | hasher.update(secret.as_bytes()); 52 | hasher.update(s1); 53 | let d = hasher.finalize(); 54 | dest[written..(written + 32)].copy_from_slice(&d); 55 | written += 32; 56 | ctr += 1; 57 | } 58 | } 59 | 60 | #[derive(Educe)] 61 | #[educe(Debug)] 62 | pub struct ECIES { 63 | #[educe(Debug(ignore))] 64 | secret_key: SecretKey, 65 | public_key: PublicKey, 66 | remote_public_key: Option, 67 | 68 | pub(crate) remote_id: Option, 69 | 70 | #[educe(Debug(ignore))] 71 | ephemeral_secret_key: SecretKey, 72 | ephemeral_public_key: PublicKey, 73 | ephemeral_shared_secret: Option, 74 | remote_ephemeral_public_key: Option, 75 | 76 | nonce: H256, 77 | remote_nonce: Option, 78 | 79 | ingress_aes: Option, 80 | egress_aes: Option, 81 | ingress_mac: Option, 82 | egress_mac: Option, 83 | 84 | init_msg: Option, 85 | remote_init_msg: Option, 86 | 87 | body_size: Option, 88 | } 89 | 90 | impl ECIES { 91 | fn new_static_client( 92 | secret_key: SecretKey, 93 | remote_id: PeerId, 94 | nonce: H256, 95 | ephemeral_secret_key: SecretKey, 96 | ) -> Result { 97 | let public_key = PublicKey::from_secret_key(SECP256K1, &secret_key); 98 | let remote_public_key = id2pk(remote_id)?; 99 | let ephemeral_public_key = PublicKey::from_secret_key(SECP256K1, &ephemeral_secret_key); 100 | 101 | Ok(Self { 102 | secret_key, 103 | public_key, 104 | ephemeral_secret_key, 105 | ephemeral_public_key, 106 | nonce, 107 | 108 | remote_public_key: Some(remote_public_key), 109 | remote_ephemeral_public_key: None, 110 | remote_nonce: None, 111 | ephemeral_shared_secret: None, 112 | init_msg: None, 113 | remote_init_msg: None, 114 | 115 | remote_id: Some(remote_id), 116 | 117 | body_size: None, 118 | egress_aes: None, 119 | ingress_aes: None, 120 | egress_mac: None, 121 | ingress_mac: None, 122 | }) 123 | } 124 | 125 | pub fn new_client(secret_key: SecretKey, remote_id: PeerId) -> Result { 126 | let nonce = H256::random(); 127 | let ephemeral_secret_key = SecretKey::new(&mut secp256k1::rand::thread_rng()); 128 | 129 | Self::new_static_client(secret_key, remote_id, nonce, ephemeral_secret_key) 130 | } 131 | 132 | pub fn new_static_server( 133 | secret_key: SecretKey, 134 | nonce: H256, 135 | ephemeral_secret_key: SecretKey, 136 | ) -> Result { 137 | let public_key = PublicKey::from_secret_key(SECP256K1, &secret_key); 138 | let ephemeral_public_key = PublicKey::from_secret_key(SECP256K1, &ephemeral_secret_key); 139 | 140 | Ok(Self { 141 | secret_key, 142 | public_key, 143 | ephemeral_secret_key, 144 | ephemeral_public_key, 145 | nonce, 146 | 147 | remote_public_key: None, 148 | remote_ephemeral_public_key: None, 149 | remote_nonce: None, 150 | ephemeral_shared_secret: None, 151 | init_msg: None, 152 | remote_init_msg: None, 153 | 154 | remote_id: None, 155 | 156 | body_size: None, 157 | egress_aes: None, 158 | ingress_aes: None, 159 | egress_mac: None, 160 | ingress_mac: None, 161 | }) 162 | } 163 | 164 | pub fn new_server(secret_key: SecretKey) -> Result { 165 | let nonce = H256::random(); 166 | let ephemeral_secret_key = SecretKey::new(&mut secp256k1::rand::thread_rng()); 167 | 168 | Self::new_static_server(secret_key, nonce, ephemeral_secret_key) 169 | } 170 | 171 | pub fn remote_id(&self) -> PeerId { 172 | self.remote_id.unwrap() 173 | } 174 | 175 | fn encrypt_message(&self, data: &[u8], out: &mut BytesMut) { 176 | out.reserve(secp256k1::constants::UNCOMPRESSED_PUBLIC_KEY_SIZE + 16 + data.len() + 32); 177 | 178 | let secret_key = SecretKey::new(&mut secp256k1::rand::thread_rng()); 179 | out.extend_from_slice( 180 | &PublicKey::from_secret_key(SECP256K1, &secret_key).serialize_uncompressed(), 181 | ); 182 | 183 | let x = ecdh_x(&self.remote_public_key.unwrap(), &secret_key); 184 | let mut key = [0_u8; 32]; 185 | kdf(x, &[], &mut key); 186 | 187 | let enc_key = H128::from_slice(&key[0..16]); 188 | let mac_key = sha256(&key[16..32]); 189 | 190 | let iv = H128::random(); 191 | let mut encryptor = Aes128Ctr::new(enc_key.as_ref().into(), iv.as_ref().into()); 192 | 193 | let mut encrypted = data.to_vec(); 194 | encryptor.encrypt(&mut encrypted); 195 | 196 | let total_size: u16 = u16::try_from(65 + 16 + data.len() + 32).unwrap(); 197 | 198 | let tag = hmac_sha256( 199 | mac_key.as_ref(), 200 | &[iv.as_bytes(), &encrypted], 201 | &total_size.to_be_bytes(), 202 | ); 203 | 204 | out.extend_from_slice(iv.as_bytes()); 205 | out.extend_from_slice(&encrypted); 206 | out.extend_from_slice(tag.as_ref()); 207 | } 208 | 209 | fn decrypt_message<'a>(&self, data: &'a mut [u8]) -> Result<&'a mut [u8], ECIESError> { 210 | let (auth_data, encrypted) = data.split_at_mut(2); 211 | let (pubkey_bytes, encrypted) = encrypted.split_at_mut(65); 212 | let public_key = PublicKey::from_slice(&pubkey_bytes) 213 | .with_context(|| format!("bad public key {}", hex::encode(pubkey_bytes)))?; 214 | let (data_iv, tag_bytes) = encrypted.split_at_mut(encrypted.len() - 32); 215 | let (iv, encrypted_data) = data_iv.split_at_mut(16); 216 | let tag = H256::from_slice(tag_bytes); 217 | 218 | let x = ecdh_x(&public_key, &self.secret_key); 219 | let mut key = [0_u8; 32]; 220 | kdf(x, &[], &mut key); 221 | let enc_key = H128::from_slice(&key[0..16]); 222 | let mac_key = sha256(&key[16..32]); 223 | 224 | let check_tag = hmac_sha256(mac_key.as_ref(), &[iv, encrypted_data], auth_data); 225 | if check_tag != tag { 226 | return Err(ECIESError::TagCheckFailed); 227 | } 228 | 229 | let mut decrypted_data = encrypted_data; 230 | 231 | let mut decryptor = Aes128Ctr::new(enc_key.as_ref().into(), (&*iv).into()); 232 | decryptor.decrypt(&mut decrypted_data); 233 | 234 | Ok(decrypted_data) 235 | } 236 | 237 | fn create_auth_unencrypted(&self) -> BytesMut { 238 | let x = ecdh_x(&self.remote_public_key.unwrap(), &self.secret_key); 239 | let msg = x ^ self.nonce; 240 | let (rec_id, sig) = SECP256K1 241 | .sign_recoverable( 242 | &secp256k1::Message::from_slice(msg.as_bytes()).unwrap(), 243 | &self.ephemeral_secret_key, 244 | ) 245 | .serialize_compact(); 246 | 247 | let mut sig_bytes = [0_u8; 65]; 248 | sig_bytes[..64].copy_from_slice(&sig); 249 | sig_bytes[64] = rec_id.to_i32() as u8; 250 | let mut out = RlpStream::new_list(4); 251 | out.append(&(&sig_bytes as &[u8])); 252 | out.append(&pk2id(&self.public_key)); 253 | out.append(&self.nonce); 254 | out.append(&PROTOCOL_VERSION); 255 | 256 | let mut out = out.out(); 257 | out.resize(out.len() + thread_rng().gen_range(100..=300), 0); 258 | out 259 | } 260 | 261 | #[cfg(test)] 262 | fn create_auth(&mut self) -> BytesMut { 263 | let mut buf = BytesMut::new(); 264 | self.write_auth(&mut buf); 265 | buf 266 | } 267 | 268 | pub fn write_auth(&mut self, buf: &mut BytesMut) { 269 | let unencrypted = self.create_auth_unencrypted(); 270 | 271 | let mut out = buf.split_off(buf.len()); 272 | out.put_u16(0); 273 | 274 | let mut encrypted = out.split_off(out.len()); 275 | self.encrypt_message(&unencrypted, &mut encrypted); 276 | 277 | let len_bytes = u16::try_from(encrypted.len()).unwrap().to_be_bytes(); 278 | out[..len_bytes.len()].copy_from_slice(&len_bytes); 279 | 280 | out.unsplit(encrypted); 281 | 282 | self.init_msg = Some(Bytes::copy_from_slice(&out)); 283 | 284 | buf.unsplit(out); 285 | } 286 | 287 | fn parse_auth_unencrypted(&mut self, data: &[u8]) -> Result<(), ECIESError> { 288 | let rlp = Rlp::new(data); 289 | let mut rlp = rlp.into_iter(); 290 | 291 | let sigdata = rlp 292 | .next() 293 | .ok_or(rlp::DecoderError::RlpInvalidLength)? 294 | .data()?; 295 | if sigdata.len() != 65 { 296 | return Err(ECIESError::InvalidAuthData); 297 | } 298 | let signature = RecoverableSignature::from_compact( 299 | &sigdata[0..64], 300 | RecoveryId::from_i32(sigdata[64] as i32)?, 301 | )?; 302 | let remote_id = rlp 303 | .next() 304 | .ok_or(rlp::DecoderError::RlpInvalidLength)? 305 | .as_val()?; 306 | self.remote_id = Some(remote_id); 307 | self.remote_public_key = Some(id2pk(remote_id).context("failed to parse peer id")?); 308 | self.remote_nonce = Some( 309 | rlp.next() 310 | .ok_or(rlp::DecoderError::RlpInvalidLength)? 311 | .as_val()?, 312 | ); 313 | 314 | let x = ecdh_x(&self.remote_public_key.unwrap(), &self.secret_key); 315 | self.remote_ephemeral_public_key = Some(SECP256K1.recover( 316 | &secp256k1::Message::from_slice((x ^ self.remote_nonce.unwrap()).as_ref()).unwrap(), 317 | &signature, 318 | )?); 319 | self.ephemeral_shared_secret = Some(ecdh_x( 320 | &self.remote_ephemeral_public_key.unwrap(), 321 | &self.ephemeral_secret_key, 322 | )); 323 | 324 | Ok(()) 325 | } 326 | 327 | pub fn read_auth(&mut self, data: &mut [u8]) -> Result<(), ECIESError> { 328 | self.remote_init_msg = Some(Bytes::copy_from_slice(data)); 329 | let unencrypted = self.decrypt_message(data)?; 330 | self.parse_auth_unencrypted(&unencrypted) 331 | } 332 | 333 | fn create_ack_unencrypted(&self) -> BytesMut { 334 | let mut out = RlpStream::new_list(3); 335 | out.append(&pk2id(&self.ephemeral_public_key)); 336 | out.append(&self.nonce); 337 | out.append(&PROTOCOL_VERSION); 338 | out.out() 339 | } 340 | 341 | #[cfg(test)] 342 | pub fn create_ack(&mut self) -> BytesMut { 343 | let mut buf = BytesMut::new(); 344 | self.write_ack(&mut buf); 345 | buf 346 | } 347 | 348 | pub fn write_ack(&mut self, out: &mut BytesMut) { 349 | let unencrypted = self.create_ack_unencrypted(); 350 | 351 | let mut buf = out.split_off(out.len()); 352 | 353 | // reserve space for length 354 | buf.put_u16(0); 355 | 356 | // encrypt and append 357 | let mut encrypted = buf.split_off(buf.len()); 358 | self.encrypt_message(&unencrypted, &mut encrypted); 359 | let len_bytes = u16::try_from(encrypted.len()).unwrap().to_be_bytes(); 360 | buf.unsplit(encrypted); 361 | 362 | // write length 363 | buf[..len_bytes.len()].copy_from_slice(&len_bytes[..]); 364 | 365 | self.init_msg = Some(buf.clone().freeze()); 366 | out.unsplit(buf); 367 | 368 | self.setup_frame(true); 369 | } 370 | 371 | fn parse_ack_unencrypted(&mut self, data: &[u8]) -> Result<(), ECIESError> { 372 | let rlp = Rlp::new(data); 373 | let mut rlp = rlp.into_iter(); 374 | self.remote_ephemeral_public_key = Some(id2pk( 375 | rlp.next() 376 | .ok_or(rlp::DecoderError::RlpInvalidLength)? 377 | .as_val()?, 378 | )?); 379 | self.remote_nonce = Some( 380 | rlp.next() 381 | .ok_or(rlp::DecoderError::RlpInvalidLength)? 382 | .as_val()?, 383 | ); 384 | 385 | self.ephemeral_shared_secret = Some(ecdh_x( 386 | &self.remote_ephemeral_public_key.unwrap(), 387 | &self.ephemeral_secret_key, 388 | )); 389 | Ok(()) 390 | } 391 | 392 | pub fn read_ack(&mut self, data: &mut [u8]) -> Result<(), ECIESError> { 393 | self.remote_init_msg = Some(Bytes::copy_from_slice(data)); 394 | let unencrypted = self.decrypt_message(data)?; 395 | self.parse_ack_unencrypted(&unencrypted)?; 396 | self.setup_frame(false); 397 | Ok(()) 398 | } 399 | 400 | fn setup_frame(&mut self, incoming: bool) { 401 | let h_nonce: H256 = if incoming { 402 | let mut hasher = Keccak256::new(); 403 | hasher.update(self.nonce.as_ref()); 404 | hasher.update(self.remote_nonce.unwrap().as_ref()); 405 | H256::from(hasher.finalize().as_ref()) 406 | } else { 407 | let mut hasher = Keccak256::new(); 408 | hasher.update(self.remote_nonce.unwrap().as_ref()); 409 | hasher.update(self.nonce.as_ref()); 410 | H256::from(hasher.finalize().as_ref()) 411 | }; 412 | 413 | let iv = H128::default(); 414 | let shared_secret: H256 = { 415 | let mut hasher = Keccak256::new(); 416 | hasher.update(self.ephemeral_shared_secret.unwrap().as_ref()); 417 | hasher.update(h_nonce.as_ref()); 418 | H256::from(hasher.finalize().as_ref()) 419 | }; 420 | 421 | let aes_secret: H256 = { 422 | let mut hasher = Keccak256::new(); 423 | hasher.update(self.ephemeral_shared_secret.unwrap().as_ref()); 424 | hasher.update(shared_secret.as_ref()); 425 | H256::from(hasher.finalize().as_ref()) 426 | }; 427 | self.ingress_aes = Some(Aes256Ctr::new( 428 | aes_secret.as_ref().into(), 429 | iv.as_ref().into(), 430 | )); 431 | self.egress_aes = Some(Aes256Ctr::new( 432 | aes_secret.as_ref().into(), 433 | iv.as_ref().into(), 434 | )); 435 | 436 | let mac_secret: H256 = { 437 | let mut hasher = Keccak256::new(); 438 | hasher.update(self.ephemeral_shared_secret.unwrap().as_ref()); 439 | hasher.update(aes_secret.as_ref()); 440 | H256::from(hasher.finalize().as_ref()) 441 | }; 442 | self.ingress_mac = Some(MAC::new(mac_secret)); 443 | self.ingress_mac 444 | .as_mut() 445 | .unwrap() 446 | .update((mac_secret ^ self.nonce).as_ref()); 447 | self.ingress_mac 448 | .as_mut() 449 | .unwrap() 450 | .update(self.remote_init_msg.as_ref().unwrap()); 451 | self.egress_mac = Some(MAC::new(mac_secret)); 452 | self.egress_mac 453 | .as_mut() 454 | .unwrap() 455 | .update((mac_secret ^ self.remote_nonce.unwrap()).as_ref()); 456 | self.egress_mac 457 | .as_mut() 458 | .unwrap() 459 | .update(self.init_msg.as_ref().unwrap()); 460 | } 461 | 462 | #[cfg(test)] 463 | fn create_header(&mut self, size: usize) -> BytesMut { 464 | let mut out = BytesMut::new(); 465 | self.write_header(&mut out, size); 466 | out 467 | } 468 | 469 | pub fn write_header(&mut self, out: &mut BytesMut, size: usize) { 470 | let mut buf = [0; 8]; 471 | BigEndian::write_uint(&mut buf, size as u64, 3); 472 | let mut header = [0_u8; 16]; 473 | header[0..3].copy_from_slice(&buf[0..3]); 474 | header[3..6].copy_from_slice(&[194, 128, 128]); 475 | 476 | let mut header = HeaderBytes::from(header); 477 | self.egress_aes.as_mut().unwrap().encrypt(&mut header); 478 | self.egress_mac.as_mut().unwrap().update_header(&header); 479 | let tag = self.egress_mac.as_mut().unwrap().digest(); 480 | 481 | out.reserve(ECIES::header_len()); 482 | out.extend_from_slice(&header); 483 | out.extend_from_slice(tag.as_bytes()); 484 | } 485 | 486 | pub fn read_header(&mut self, data: &mut [u8]) -> Result { 487 | let (header_bytes, mac_bytes) = data.split_at_mut(16); 488 | let mut header = HeaderBytes::from_mut_slice(header_bytes); 489 | let mac = H128::from_slice(&mac_bytes[..16]); 490 | 491 | self.ingress_mac.as_mut().unwrap().update_header(&header); 492 | let check_mac = self.ingress_mac.as_mut().unwrap().digest(); 493 | if check_mac != mac { 494 | return Err(ECIESError::TagCheckFailed); 495 | } 496 | 497 | self.ingress_aes.as_mut().unwrap().decrypt(&mut header); 498 | self.body_size = Some( 499 | usize::try_from(header.as_slice().read_uint::(3)?) 500 | .context("excessive body len")?, 501 | ); 502 | 503 | Ok(self.body_size.unwrap()) 504 | } 505 | 506 | pub const fn header_len() -> usize { 507 | 32 508 | } 509 | 510 | pub fn body_len(&self) -> usize { 511 | let len = self.body_size.unwrap(); 512 | (if len % 16 == 0 { 513 | len 514 | } else { 515 | (len / 16 + 1) * 16 516 | }) + 16 517 | } 518 | 519 | #[cfg(test)] 520 | fn create_body(&mut self, data: &[u8]) -> BytesMut { 521 | let mut out = BytesMut::new(); 522 | self.write_body(&mut out, data); 523 | out 524 | } 525 | 526 | pub fn write_body(&mut self, out: &mut BytesMut, data: &[u8]) { 527 | let len = if data.len() % 16 == 0 { 528 | data.len() 529 | } else { 530 | (data.len() / 16 + 1) * 16 531 | }; 532 | let old_len = out.len(); 533 | out.resize(old_len + len, 0); 534 | 535 | let mut encrypted = &mut out[old_len..old_len + len]; 536 | encrypted[..data.len()].copy_from_slice(data); 537 | 538 | self.egress_aes.as_mut().unwrap().encrypt(&mut encrypted); 539 | self.egress_mac.as_mut().unwrap().update_body(&encrypted); 540 | let tag = self.egress_mac.as_mut().unwrap().digest(); 541 | 542 | out.extend_from_slice(tag.as_bytes()); 543 | } 544 | 545 | pub fn read_body<'a>(&mut self, data: &'a mut [u8]) -> Result<&'a mut [u8], ECIESError> { 546 | let (body, mac_bytes) = data.split_at_mut(data.len() - 16); 547 | let mac = H128::from_slice(mac_bytes); 548 | self.ingress_mac.as_mut().unwrap().update_body(body); 549 | let check_mac = self.ingress_mac.as_mut().unwrap().digest(); 550 | if check_mac != mac { 551 | return Err(ECIESError::TagCheckFailed); 552 | } 553 | 554 | let size = self.body_size.unwrap(); 555 | self.body_size = None; 556 | let mut ret = body; 557 | self.ingress_aes.as_mut().unwrap().decrypt(&mut ret); 558 | Ok(ret.split_at_mut(size).0) 559 | } 560 | } 561 | 562 | #[cfg(test)] 563 | mod tests { 564 | use super::*; 565 | use hex_literal::hex; 566 | 567 | #[test] 568 | fn ecdh() { 569 | let our_secret_key = SecretKey::from_slice(&hex!( 570 | "202a36e24c3eb39513335ec99a7619bad0e7dc68d69401b016253c7d26dc92f8" 571 | )) 572 | .unwrap(); 573 | let remote_public_key = id2pk(hex!("d860a01f9722d78051619d1e2351aba3f43f943f6f00718d1b9baa4101932a1f5011f16bb2b1bb35db20d6fe28fa0bf09636d26a87d31de9ec6203eeedb1f666").into()).unwrap(); 574 | 575 | assert_eq!( 576 | ecdh_x(&remote_public_key, &our_secret_key), 577 | hex!("821ce7e01ea11b111a52b2dafae8a3031a372d83bdf1a78109fa0783c2b9d5d3").into() 578 | ) 579 | } 580 | 581 | #[test] 582 | fn communicate() { 583 | let server_secret_key = SecretKey::new(&mut secp256k1::rand::thread_rng()); 584 | let server_public_key = PublicKey::from_secret_key(SECP256K1, &server_secret_key); 585 | let client_secret_key = SecretKey::new(&mut secp256k1::rand::thread_rng()); 586 | 587 | let mut server_ecies = ECIES::new_server(server_secret_key).unwrap(); 588 | let mut client_ecies = 589 | ECIES::new_client(client_secret_key, pk2id(&server_public_key)).unwrap(); 590 | 591 | // Handshake 592 | let mut auth = client_ecies.create_auth(); 593 | server_ecies.read_auth(&mut auth).unwrap(); 594 | let mut ack = server_ecies.create_ack(); 595 | client_ecies.read_ack(&mut ack).unwrap(); 596 | 597 | let server_to_client_data = [0_u8, 1_u8, 2_u8, 3_u8, 4_u8]; 598 | let client_to_server_data = [5_u8, 6_u8, 7_u8]; 599 | 600 | // Test server to client 1 601 | let mut header = server_ecies.create_header(server_to_client_data.len()); 602 | assert_eq!(header.len(), ECIES::header_len()); 603 | client_ecies.read_header(&mut *header).unwrap(); 604 | let mut body = server_ecies.create_body(&server_to_client_data); 605 | assert_eq!(body.len(), client_ecies.body_len()); 606 | let ret = client_ecies.read_body(&mut *body).unwrap(); 607 | assert_eq!(ret, server_to_client_data); 608 | 609 | // Test client to server 1 610 | server_ecies 611 | .read_header(&mut *client_ecies.create_header(client_to_server_data.len())) 612 | .unwrap(); 613 | let mut b = client_ecies.create_body(&client_to_server_data); 614 | let ret = server_ecies.read_body(&mut b).unwrap(); 615 | assert_eq!(ret, client_to_server_data); 616 | 617 | // Test server to client 2 618 | client_ecies 619 | .read_header(&mut *server_ecies.create_header(server_to_client_data.len())) 620 | .unwrap(); 621 | let mut b = server_ecies.create_body(&server_to_client_data); 622 | let ret = client_ecies.read_body(&mut b).unwrap(); 623 | assert_eq!(ret, server_to_client_data); 624 | 625 | // Test server to client 3 626 | client_ecies 627 | .read_header(&mut *server_ecies.create_header(server_to_client_data.len())) 628 | .unwrap(); 629 | let mut b = server_ecies.create_body(&server_to_client_data); 630 | let ret = client_ecies.read_body(&mut b).unwrap(); 631 | assert_eq!(ret, server_to_client_data); 632 | 633 | // Test client to server 2 634 | server_ecies 635 | .read_header(&mut *client_ecies.create_header(client_to_server_data.len())) 636 | .unwrap(); 637 | let mut b = client_ecies.create_body(&client_to_server_data); 638 | let ret = server_ecies.read_body(&mut b).unwrap(); 639 | assert_eq!(ret, client_to_server_data); 640 | 641 | // Test client to server 3 642 | server_ecies 643 | .read_header(&mut *client_ecies.create_header(client_to_server_data.len())) 644 | .unwrap(); 645 | let mut b = client_ecies.create_body(&client_to_server_data); 646 | let ret = server_ecies.read_body(&mut b).unwrap(); 647 | assert_eq!(ret, client_to_server_data); 648 | } 649 | 650 | fn eip8_test_server_key() -> SecretKey { 651 | SecretKey::from_slice(&hex!( 652 | "b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291" 653 | )) 654 | .unwrap() 655 | } 656 | 657 | fn eip8_test_client() -> ECIES { 658 | let client_static_key = SecretKey::from_slice(&hex!( 659 | "49a7b37aa6f6645917e7b807e9d1c00d4fa71f18343b0d4122a4d2df64dd6fee" 660 | )) 661 | .unwrap(); 662 | 663 | let client_ephemeral_key = SecretKey::from_slice(&hex!( 664 | "869d6ecf5211f1cc60418a13b9d870b22959d0c16f02bec714c960dd2298a32d" 665 | )) 666 | .unwrap(); 667 | 668 | let client_nonce = H256(hex!( 669 | "7e968bba13b6c50e2c4cd7f241cc0d64d1ac25c7f5952df231ac6a2bda8ee5d6" 670 | )); 671 | 672 | let server_id = pk2id(&PublicKey::from_secret_key( 673 | SECP256K1, 674 | &eip8_test_server_key(), 675 | )); 676 | 677 | ECIES::new_static_client( 678 | client_static_key, 679 | server_id, 680 | client_nonce, 681 | client_ephemeral_key, 682 | ) 683 | .unwrap() 684 | } 685 | 686 | fn eip8_test_server() -> ECIES { 687 | let server_ephemeral_key = SecretKey::from_slice(&hex!( 688 | "e238eb8e04fee6511ab04c6dd3c89ce097b11f25d584863ac2b6d5b35b1847e4" 689 | )) 690 | .unwrap(); 691 | 692 | let server_nonce = H256(hex!( 693 | "559aead08264d5795d3909718cdd05abd49572e84fe55590eef31a88a08fdffd" 694 | )); 695 | 696 | ECIES::new_static_server(eip8_test_server_key(), server_nonce, server_ephemeral_key) 697 | .unwrap() 698 | } 699 | 700 | #[test] 701 | /// Test vectors from https://eips.ethereum.org/EIPS/eip-8 702 | fn eip8_test() { 703 | // EIP-8 format with version 4 and no additional list elements 704 | let auth2 = hex!( 705 | " 706 | 01b304ab7578555167be8154d5cc456f567d5ba302662433674222360f08d5f1534499d3678b513b 707 | 0fca474f3a514b18e75683032eb63fccb16c156dc6eb2c0b1593f0d84ac74f6e475f1b8d56116b84 708 | 9634a8c458705bf83a626ea0384d4d7341aae591fae42ce6bd5c850bfe0b999a694a49bbbaf3ef6c 709 | da61110601d3b4c02ab6c30437257a6e0117792631a4b47c1d52fc0f8f89caadeb7d02770bf999cc 710 | 147d2df3b62e1ffb2c9d8c125a3984865356266bca11ce7d3a688663a51d82defaa8aad69da39ab6 711 | d5470e81ec5f2a7a47fb865ff7cca21516f9299a07b1bc63ba56c7a1a892112841ca44b6e0034dee 712 | 70c9adabc15d76a54f443593fafdc3b27af8059703f88928e199cb122362a4b35f62386da7caad09 713 | c001edaeb5f8a06d2b26fb6cb93c52a9fca51853b68193916982358fe1e5369e249875bb8d0d0ec3 714 | 6f917bc5e1eafd5896d46bd61ff23f1a863a8a8dcd54c7b109b771c8e61ec9c8908c733c0263440e 715 | 2aa067241aaa433f0bb053c7b31a838504b148f570c0ad62837129e547678c5190341e4f1693956c 716 | 3bf7678318e2d5b5340c9e488eefea198576344afbdf66db5f51204a6961a63ce072c8926c 717 | " 718 | ); 719 | 720 | // EIP-8 format with version 56 and 3 additional list elements (sent from A to B) 721 | let auth3 = hex!( 722 | " 723 | 01b8044c6c312173685d1edd268aa95e1d495474c6959bcdd10067ba4c9013df9e40ff45f5bfd6f7 724 | 2471f93a91b493f8e00abc4b80f682973de715d77ba3a005a242eb859f9a211d93a347fa64b597bf 725 | 280a6b88e26299cf263b01b8dfdb712278464fd1c25840b995e84d367d743f66c0e54a586725b7bb 726 | f12acca27170ae3283c1073adda4b6d79f27656993aefccf16e0d0409fe07db2dc398a1b7e8ee93b 727 | cd181485fd332f381d6a050fba4c7641a5112ac1b0b61168d20f01b479e19adf7fdbfa0905f63352 728 | bfc7e23cf3357657455119d879c78d3cf8c8c06375f3f7d4861aa02a122467e069acaf513025ff19 729 | 6641f6d2810ce493f51bee9c966b15c5043505350392b57645385a18c78f14669cc4d960446c1757 730 | 1b7c5d725021babbcd786957f3d17089c084907bda22c2b2675b4378b114c601d858802a55345a15 731 | 116bc61da4193996187ed70d16730e9ae6b3bb8787ebcaea1871d850997ddc08b4f4ea668fbf3740 732 | 7ac044b55be0908ecb94d4ed172ece66fd31bfdadf2b97a8bc690163ee11f5b575a4b44e36e2bfb2 733 | f0fce91676fd64c7773bac6a003f481fddd0bae0a1f31aa27504e2a533af4cef3b623f4791b2cca6 734 | d490 735 | " 736 | ); 737 | 738 | // EIP-8 format with version 4 and no additional list elements (sent from B to A) 739 | let ack2 = hex!( 740 | " 741 | 01ea0451958701280a56482929d3b0757da8f7fbe5286784beead59d95089c217c9b917788989470 742 | b0e330cc6e4fb383c0340ed85fab836ec9fb8a49672712aeabbdfd1e837c1ff4cace34311cd7f4de 743 | 05d59279e3524ab26ef753a0095637ac88f2b499b9914b5f64e143eae548a1066e14cd2f4bd7f814 744 | c4652f11b254f8a2d0191e2f5546fae6055694aed14d906df79ad3b407d94692694e259191cde171 745 | ad542fc588fa2b7333313d82a9f887332f1dfc36cea03f831cb9a23fea05b33deb999e85489e645f 746 | 6aab1872475d488d7bd6c7c120caf28dbfc5d6833888155ed69d34dbdc39c1f299be1057810f34fb 747 | e754d021bfca14dc989753d61c413d261934e1a9c67ee060a25eefb54e81a4d14baff922180c395d 748 | 3f998d70f46f6b58306f969627ae364497e73fc27f6d17ae45a413d322cb8814276be6ddd13b885b 749 | 201b943213656cde498fa0e9ddc8e0b8f8a53824fbd82254f3e2c17e8eaea009c38b4aa0a3f306e8 750 | 797db43c25d68e86f262e564086f59a2fc60511c42abfb3057c247a8a8fe4fb3ccbadde17514b7ac 751 | 8000cdb6a912778426260c47f38919a91f25f4b5ffb455d6aaaf150f7e5529c100ce62d6d92826a7 752 | 1778d809bdf60232ae21ce8a437eca8223f45ac37f6487452ce626f549b3b5fdee26afd2072e4bc7 753 | 5833c2464c805246155289f4 754 | " 755 | ); 756 | 757 | // EIP-8 format with version 57 and 3 additional list elements (sent from B to A) 758 | let ack3 = hex!( 759 | " 760 | 01f004076e58aae772bb101ab1a8e64e01ee96e64857ce82b1113817c6cdd52c09d26f7b90981cd7 761 | ae835aeac72e1573b8a0225dd56d157a010846d888dac7464baf53f2ad4e3d584531fa203658fab0 762 | 3a06c9fd5e35737e417bc28c1cbf5e5dfc666de7090f69c3b29754725f84f75382891c561040ea1d 763 | dc0d8f381ed1b9d0d4ad2a0ec021421d847820d6fa0ba66eaf58175f1b235e851c7e2124069fbc20 764 | 2888ddb3ac4d56bcbd1b9b7eab59e78f2e2d400905050f4a92dec1c4bdf797b3fc9b2f8e84a482f3 765 | d800386186712dae00d5c386ec9387a5e9c9a1aca5a573ca91082c7d68421f388e79127a5177d4f8 766 | 590237364fd348c9611fa39f78dcdceee3f390f07991b7b47e1daa3ebcb6ccc9607811cb17ce51f1 767 | c8c2c5098dbdd28fca547b3f58c01a424ac05f869f49c6a34672ea2cbbc558428aa1fe48bbfd6115 768 | 8b1b735a65d99f21e70dbc020bfdface9f724a0d1fb5895db971cc81aa7608baa0920abb0a565c9c 769 | 436e2fd13323428296c86385f2384e408a31e104670df0791d93e743a3a5194ee6b076fb6323ca59 770 | 3011b7348c16cf58f66b9633906ba54a2ee803187344b394f75dd2e663a57b956cb830dd7a908d4f 771 | 39a2336a61ef9fda549180d4ccde21514d117b6c6fd07a9102b5efe710a32af4eeacae2cb3b1dec0 772 | 35b9593b48b9d3ca4c13d245d5f04169b0b1 773 | " 774 | ); 775 | 776 | eip8_test_server().read_auth(&mut auth2.to_vec()).unwrap(); 777 | eip8_test_server().read_auth(&mut auth3.to_vec()).unwrap(); 778 | 779 | let mut test_client = eip8_test_client(); 780 | let mut test_server = eip8_test_server(); 781 | 782 | test_server 783 | .read_auth(&mut test_client.create_auth()) 784 | .unwrap(); 785 | 786 | test_client.read_ack(&mut test_server.create_ack()).unwrap(); 787 | 788 | test_client.read_ack(&mut ack2.to_vec()).unwrap(); 789 | test_client.read_ack(&mut ack3.to_vec()).unwrap(); 790 | } 791 | } 792 | -------------------------------------------------------------------------------- /src/ecies/proto.rs: -------------------------------------------------------------------------------- 1 | use super::algorithm::ECIES; 2 | use crate::{errors::ECIESError, transport::Transport, types::PeerId}; 3 | use anyhow::{bail, Context as _}; 4 | use bytes::{Bytes, BytesMut}; 5 | use futures::{ready, Sink, SinkExt}; 6 | use secp256k1::SecretKey; 7 | use std::{ 8 | fmt::Debug, 9 | io, 10 | pin::Pin, 11 | task::{Context, Poll}, 12 | }; 13 | use tokio_stream::*; 14 | use tokio_util::codec::*; 15 | use tracing::*; 16 | 17 | #[derive(Clone, Copy, Debug, PartialEq, Eq)] 18 | /// Current ECIES state of a connection 19 | pub enum ECIESState { 20 | Auth, 21 | Ack, 22 | Header, 23 | Body, 24 | } 25 | 26 | #[derive(Clone, Debug, PartialEq, Eq)] 27 | /// Raw egress values for an ECIES protocol 28 | pub enum EgressECIESValue { 29 | Auth, 30 | Ack, 31 | Message(Bytes), 32 | } 33 | 34 | #[derive(Clone, Debug, PartialEq, Eq)] 35 | /// Raw ingress values for an ECIES protocol 36 | pub enum IngressECIESValue { 37 | AuthReceive(PeerId), 38 | Ack, 39 | Message(Bytes), 40 | } 41 | 42 | /// Tokio codec for ECIES 43 | #[derive(Debug)] 44 | pub struct ECIESCodec { 45 | ecies: ECIES, 46 | state: ECIESState, 47 | } 48 | 49 | impl ECIESCodec { 50 | /// Create a new server codec using the given secret key 51 | pub fn new_server(secret_key: SecretKey) -> Result { 52 | Ok(Self { 53 | ecies: ECIES::new_server(secret_key)?, 54 | state: ECIESState::Auth, 55 | }) 56 | } 57 | 58 | /// Create a new client codec using the given secret key and the server's public id 59 | pub fn new_client(secret_key: SecretKey, remote_id: PeerId) -> Result { 60 | Ok(Self { 61 | ecies: ECIES::new_client(secret_key, remote_id)?, 62 | state: ECIESState::Auth, 63 | }) 64 | } 65 | } 66 | 67 | impl Decoder for ECIESCodec { 68 | type Item = IngressECIESValue; 69 | type Error = io::Error; 70 | 71 | #[instrument(level = "trace", skip(self, buf), fields(peer=&*format!("{:?}", self.ecies.remote_id.map(|s| s.to_string())), state=&*format!("{:?}", self.state)))] 72 | fn decode(&mut self, buf: &mut BytesMut) -> Result, Self::Error> { 73 | loop { 74 | match self.state { 75 | ECIESState::Auth => { 76 | trace!("parsing auth"); 77 | if buf.len() < 2 { 78 | return Ok(None); 79 | } 80 | 81 | let payload_size = u16::from_be_bytes([buf[0], buf[1]]) as usize; 82 | let total_size = payload_size + 2; 83 | 84 | if buf.len() < total_size { 85 | trace!("current len {}, need {}", buf.len(), total_size); 86 | return Ok(None); 87 | } 88 | 89 | self.ecies.read_auth(&mut *buf.split_to(total_size))?; 90 | 91 | self.state = ECIESState::Header; 92 | return Ok(Some(IngressECIESValue::AuthReceive(self.ecies.remote_id()))); 93 | } 94 | ECIESState::Ack => { 95 | trace!("parsing ack with len {}", buf.len()); 96 | if buf.len() < 2 { 97 | return Ok(None); 98 | } 99 | 100 | let payload_size = u16::from_be_bytes([buf[0], buf[1]]) as usize; 101 | let total_size = payload_size + 2; 102 | 103 | if buf.len() < total_size { 104 | trace!("current len {}, need {}", buf.len(), total_size); 105 | return Ok(None); 106 | } 107 | 108 | self.ecies.read_ack(&mut *buf.split_to(total_size))?; 109 | 110 | self.state = ECIESState::Header; 111 | return Ok(Some(IngressECIESValue::Ack)); 112 | } 113 | ECIESState::Header => { 114 | if buf.len() < ECIES::header_len() { 115 | return Ok(None); 116 | } 117 | 118 | self.ecies 119 | .read_header(&mut *buf.split_to(ECIES::header_len()))?; 120 | 121 | self.state = ECIESState::Body; 122 | } 123 | ECIESState::Body => { 124 | if buf.len() < self.ecies.body_len() { 125 | return Ok(None); 126 | } 127 | 128 | let mut data = buf.split_to(self.ecies.body_len()); 129 | let ret = Bytes::copy_from_slice(&self.ecies.read_body(&mut *data)?); 130 | 131 | self.state = ECIESState::Header; 132 | return Ok(Some(IngressECIESValue::Message(ret))); 133 | } 134 | } 135 | } 136 | } 137 | } 138 | 139 | impl Encoder for ECIESCodec { 140 | type Error = io::Error; 141 | 142 | #[instrument(level = "trace", skip(self, buf), fields(peer=&*format!("{:?}", self.ecies.remote_id.map(|s| s.to_string())), state=&*format!("{:?}", self.state)))] 143 | fn encode(&mut self, item: EgressECIESValue, buf: &mut BytesMut) -> Result<(), Self::Error> { 144 | match item { 145 | EgressECIESValue::Auth => { 146 | self.state = ECIESState::Ack; 147 | self.ecies.write_auth(buf); 148 | Ok(()) 149 | } 150 | EgressECIESValue::Ack => { 151 | self.state = ECIESState::Header; 152 | self.ecies.write_ack(buf); 153 | Ok(()) 154 | } 155 | EgressECIESValue::Message(data) => { 156 | self.ecies.write_header(buf, data.len()); 157 | self.ecies.write_body(buf, &data); 158 | Ok(()) 159 | } 160 | } 161 | } 162 | } 163 | 164 | /// `ECIES` stream over TCP exchanging raw bytes 165 | #[derive(Debug)] 166 | pub struct ECIESStream { 167 | stream: Framed, 168 | remote_id: PeerId, 169 | } 170 | 171 | impl ECIESStream 172 | where 173 | Io: Transport, 174 | { 175 | /// Connect to an `ECIES` server 176 | #[instrument(skip(transport, secret_key), fields(peer=&*format!("{:?}", transport.remote_addr())))] 177 | pub async fn connect( 178 | transport: Io, 179 | secret_key: SecretKey, 180 | remote_id: PeerId, 181 | ) -> anyhow::Result { 182 | let ecies = ECIESCodec::new_client(secret_key, remote_id) 183 | .map_err(|_| io::Error::new(io::ErrorKind::Other, "invalid handshake"))?; 184 | 185 | let mut transport = ecies.framed(transport); 186 | 187 | trace!("sending ecies auth ..."); 188 | transport.send(EgressECIESValue::Auth).await?; 189 | 190 | trace!("waiting for ecies ack ..."); 191 | let ack = transport.try_next().await?; 192 | 193 | trace!("parsing ecies ack ..."); 194 | if matches!(ack, Some(IngressECIESValue::Ack)) { 195 | Ok(Self { 196 | stream: transport, 197 | remote_id, 198 | }) 199 | } else { 200 | bail!("invalid handshake: expected ack, got {:?} instead", ack) 201 | } 202 | } 203 | 204 | /// Listen on a just connected ECIES client 205 | #[instrument(skip(transport, secret_key), fields(peer=&*format!("{:?}", transport.remote_addr())))] 206 | pub async fn incoming(transport: Io, secret_key: SecretKey) -> anyhow::Result { 207 | let ecies = ECIESCodec::new_server(secret_key).context("handshake error")?; 208 | 209 | debug!("incoming ecies stream ..."); 210 | let mut transport = ecies.framed(transport); 211 | let ack = transport.try_next().await?; 212 | 213 | debug!("receiving ecies auth"); 214 | let remote_id = match ack { 215 | Some(IngressECIESValue::AuthReceive(remote_id)) => remote_id, 216 | other => { 217 | debug!("expected auth, got {:?} instead", other); 218 | bail!("invalid handshake"); 219 | } 220 | }; 221 | 222 | debug!("sending ecies ack ..."); 223 | transport 224 | .send(EgressECIESValue::Ack) 225 | .await 226 | .context("failed to send ECIES auth")?; 227 | 228 | Ok(Self { 229 | stream: transport, 230 | remote_id, 231 | }) 232 | } 233 | 234 | /// Get the remote id 235 | pub fn remote_id(&self) -> PeerId { 236 | self.remote_id 237 | } 238 | } 239 | 240 | impl Stream for ECIESStream 241 | where 242 | Io: Transport, 243 | { 244 | type Item = Result; 245 | 246 | fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { 247 | match ready!(Pin::new(&mut self.get_mut().stream).poll_next(cx)) { 248 | Some(Ok(IngressECIESValue::Message(body))) => Poll::Ready(Some(Ok(body))), 249 | Some(other) => Poll::Ready(Some(Err(io::Error::new( 250 | io::ErrorKind::Other, 251 | format!( 252 | "ECIES stream protocol error: expected message, received {:?}", 253 | other 254 | ), 255 | )))), 256 | None => Poll::Ready(None), 257 | } 258 | } 259 | } 260 | 261 | impl Sink for ECIESStream 262 | where 263 | Io: Transport, 264 | { 265 | type Error = io::Error; 266 | 267 | fn poll_ready(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { 268 | Pin::new(&mut self.get_mut().stream).poll_ready(cx) 269 | } 270 | 271 | fn start_send(self: Pin<&mut Self>, item: Bytes) -> Result<(), Self::Error> { 272 | let this = self.get_mut(); 273 | Pin::new(&mut this.stream).start_send(EgressECIESValue::Message(item))?; 274 | 275 | Ok(()) 276 | } 277 | 278 | fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { 279 | Pin::new(&mut self.get_mut().stream).poll_flush(cx) 280 | } 281 | 282 | fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { 283 | Pin::new(&mut self.get_mut().stream).poll_close(cx) 284 | } 285 | } 286 | -------------------------------------------------------------------------------- /src/errors.rs: -------------------------------------------------------------------------------- 1 | use std::io; 2 | use thiserror::Error; 3 | 4 | #[derive(Debug, Error)] 5 | pub enum ECIESError { 6 | #[error("IO error")] 7 | IO(#[from] io::Error), 8 | #[error("tag check failure")] 9 | TagCheckFailed, 10 | #[error("invalid auth data")] 11 | InvalidAuthData, 12 | #[error("invalid ack data")] 13 | InvalidAckData, 14 | #[error("other")] 15 | Other(#[from] anyhow::Error), 16 | } 17 | 18 | impl From for io::Error { 19 | fn from(error: ECIESError) -> Self { 20 | Self::new(io::ErrorKind::Other, format!("ECIES error: {:?}", error)) 21 | } 22 | } 23 | 24 | impl From for ECIESError { 25 | fn from(error: secp256k1::Error) -> Self { 26 | Self::Other(error.into()) 27 | } 28 | } 29 | 30 | impl From for ECIESError { 31 | fn from(error: rlp::DecoderError) -> Self { 32 | Self::Other(error.into()) 33 | } 34 | } 35 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | //! Ethereum devp2p protocol implementation 2 | //! 3 | //! It is layered in the following way: 4 | //! * `RLPxNode` which represents the whole pool of connected peers. It handles message routing and peer management. 5 | //! * `MuxServer` which provides a request-response API to otherwise stateless P2P protocol. 6 | //! * `EthIngressServer` which `MuxServer` calls into when new requests and gossip messages arrive. 7 | //! * `MuxServer` itself implements `EthProtocol` which is a simple gateway to abstract Ethereum network. 8 | 9 | #![allow(clippy::large_enum_variant, clippy::upper_case_acronyms)] 10 | 11 | mod disc; 12 | pub mod ecies; 13 | mod errors; 14 | mod mac; 15 | mod node_filter; 16 | mod peer; 17 | mod rlpx; 18 | pub mod transport; 19 | mod types; 20 | pub mod util; 21 | 22 | pub use disc::*; 23 | pub use peer::{DisconnectReason, PeerStream}; 24 | pub use rlpx::{ListenOptions, Swarm, SwarmBuilder}; 25 | pub use types::{ 26 | CapabilityId, CapabilityInfo, CapabilityName, CapabilityServer, CapabilityVersion, 27 | InboundEvent, Message, NodeRecord, OutboundEvent, PeerId, 28 | }; 29 | -------------------------------------------------------------------------------- /src/mac.rs: -------------------------------------------------------------------------------- 1 | use aes::*; 2 | use block_modes::{block_padding::NoPadding, BlockMode, Ecb}; 3 | use ethereum_types::{H128, H256}; 4 | use generic_array::{typenum::U16, GenericArray}; 5 | use sha3::{Digest, Keccak256}; 6 | 7 | pub type HeaderBytes = GenericArray; 8 | 9 | #[derive(Debug)] 10 | pub struct MAC { 11 | secret: H256, 12 | hasher: Keccak256, 13 | } 14 | 15 | impl MAC { 16 | pub fn new(secret: H256) -> Self { 17 | Self { 18 | secret, 19 | hasher: Keccak256::new(), 20 | } 21 | } 22 | 23 | pub fn update(&mut self, data: &[u8]) { 24 | self.hasher.update(data) 25 | } 26 | 27 | pub fn update_header(&mut self, data: &HeaderBytes) { 28 | let aes = Ecb::<_, NoPadding>::new( 29 | Aes256::new_varkey(self.secret.as_ref()).unwrap(), 30 | &Default::default(), 31 | ); 32 | let mut encrypted = self.digest().to_fixed_bytes(); 33 | aes.encrypt(&mut encrypted, H128::len_bytes()).unwrap(); 34 | for i in 0..data.len() { 35 | encrypted[i] ^= data[i]; 36 | } 37 | self.hasher.update(encrypted); 38 | } 39 | 40 | pub fn update_body(&mut self, data: &[u8]) { 41 | self.hasher.update(data); 42 | let prev = self.digest(); 43 | let aes = Ecb::<_, NoPadding>::new( 44 | Aes256::new_varkey(self.secret.as_ref()).unwrap(), 45 | &Default::default(), 46 | ); 47 | let mut encrypted = self.digest().to_fixed_bytes(); 48 | aes.encrypt(&mut encrypted, H128::len_bytes()).unwrap(); 49 | for i in 0..16 { 50 | encrypted[i] ^= prev[i]; 51 | } 52 | self.hasher.update(encrypted); 53 | } 54 | 55 | pub fn digest(&self) -> H128 { 56 | H128::from_slice(&self.hasher.clone().finalize()[0..16]) 57 | } 58 | } 59 | -------------------------------------------------------------------------------- /src/node_filter.rs: -------------------------------------------------------------------------------- 1 | use crate::types::PeerId; 2 | use std::{ 3 | collections::HashSet, 4 | fmt::Debug, 5 | sync::{ 6 | atomic::{AtomicUsize, Ordering}, 7 | Arc, 8 | }, 9 | }; 10 | 11 | pub trait NodeFilter: Debug + Send + 'static { 12 | fn max_peers(&self) -> usize; 13 | fn is_banned(&self, id: PeerId) -> bool; 14 | fn allow(&self, pool_size: usize, id: PeerId) -> bool { 15 | pool_size < self.max_peers() && !self.is_banned(id) 16 | } 17 | fn ban(&mut self, id: PeerId); 18 | } 19 | 20 | #[derive(Debug)] 21 | pub struct MemoryNodeFilter { 22 | peer_limiter: Arc, 23 | ban_list: HashSet, 24 | } 25 | 26 | impl MemoryNodeFilter { 27 | pub fn new(peer_limiter: Arc) -> Self { 28 | Self { 29 | peer_limiter, 30 | ban_list: Default::default(), 31 | } 32 | } 33 | } 34 | 35 | impl NodeFilter for MemoryNodeFilter { 36 | fn max_peers(&self) -> usize { 37 | self.peer_limiter.load(Ordering::Relaxed) 38 | } 39 | 40 | fn is_banned(&self, id: PeerId) -> bool { 41 | self.ban_list.contains(&id) 42 | } 43 | 44 | fn ban(&mut self, id: PeerId) { 45 | self.ban_list.insert(id); 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /src/peer.rs: -------------------------------------------------------------------------------- 1 | use crate::{ecies::ECIESStream, transport::Transport, types::*, util::pk2id}; 2 | use anyhow::{anyhow, bail, Context as _}; 3 | use bytes::{Bytes, BytesMut}; 4 | use derive_more::Display; 5 | use enum_primitive_derive::Primitive; 6 | use futures::{ready, Sink, SinkExt}; 7 | use num_traits::*; 8 | use rlp::{Decodable, DecoderError, Encodable, Rlp, RlpStream}; 9 | use secp256k1::{PublicKey, SecretKey, SECP256K1}; 10 | use std::{ 11 | fmt::Debug, 12 | io, 13 | pin::Pin, 14 | task::{Context, Poll}, 15 | }; 16 | use tokio_stream::{Stream, StreamExt}; 17 | use tracing::*; 18 | 19 | const MAX_PAYLOAD_SIZE: usize = 16 * 1024 * 1024; 20 | 21 | /// RLPx disconnect reason. 22 | #[derive(Clone, Copy, Debug, Display, Primitive)] 23 | pub enum DisconnectReason { 24 | #[display(fmt = "disconnect requested")] 25 | DisconnectRequested = 0x00, 26 | #[display(fmt = "TCP sub-system error")] 27 | TcpSubsystemError = 0x01, 28 | #[display(fmt = "breach of protocol, e.g. a malformed message, bad RLP, ...")] 29 | ProtocolBreach = 0x02, 30 | #[display(fmt = "useless peer")] 31 | UselessPeer = 0x03, 32 | #[display(fmt = "too many peers")] 33 | TooManyPeers = 0x04, 34 | #[display(fmt = "already connected")] 35 | AlreadyConnected = 0x05, 36 | #[display(fmt = "incompatible P2P protocol version")] 37 | IncompatibleP2PProtocolVersion = 0x06, 38 | #[display(fmt = "null node identity received - this is automatically invalid")] 39 | NullNodeIdentity = 0x07, 40 | #[display(fmt = "client quitting")] 41 | ClientQuitting = 0x08, 42 | #[display(fmt = "unexpected identity in handshake")] 43 | UnexpectedHandshakeIdentity = 0x09, 44 | #[display(fmt = "identity is the same as this node (i.e. connected to itself)")] 45 | ConnectedToSelf = 0x0a, 46 | #[display(fmt = "ping timeout")] 47 | PingTimeout = 0x0b, 48 | #[display(fmt = "some other reason specific to a subprotocol")] 49 | SubprotocolSpecific = 0x10, 50 | } 51 | 52 | /// RLPx protocol version. 53 | #[derive(Copy, Clone, Debug, Primitive)] 54 | pub enum ProtocolVersion { 55 | V4 = 4, 56 | V5 = 5, 57 | } 58 | 59 | #[derive(Clone, Debug, PartialEq, Eq)] 60 | pub struct CapabilityMessage { 61 | pub name: CapabilityName, 62 | pub version: usize, 63 | } 64 | 65 | impl Encodable for CapabilityMessage { 66 | fn rlp_append(&self, s: &mut RlpStream) { 67 | s.begin_list(2); 68 | s.append(&self.name); 69 | s.append(&self.version); 70 | } 71 | } 72 | 73 | impl Decodable for CapabilityMessage { 74 | fn decode(rlp: &Rlp) -> Result { 75 | Ok(Self { 76 | name: rlp.val_at(0)?, 77 | version: rlp.val_at(1)?, 78 | }) 79 | } 80 | } 81 | 82 | #[derive(Clone, Debug)] 83 | pub struct HelloMessage { 84 | pub protocol_version: usize, 85 | pub client_version: String, 86 | pub capabilities: Vec, 87 | pub port: u16, 88 | pub id: PeerId, 89 | } 90 | 91 | impl Encodable for HelloMessage { 92 | fn rlp_append(&self, s: &mut RlpStream) { 93 | s.begin_list(5); 94 | s.append(&self.protocol_version); 95 | s.append(&self.client_version); 96 | s.append_list(&self.capabilities); 97 | s.append(&self.port); 98 | s.append(&self.id); 99 | } 100 | } 101 | 102 | impl Decodable for HelloMessage { 103 | fn decode(rlp: &Rlp) -> Result { 104 | Ok(Self { 105 | protocol_version: rlp.val_at(0)?, 106 | client_version: rlp.val_at(1)?, 107 | capabilities: rlp.list_at(2)?, 108 | port: rlp.val_at(3)?, 109 | id: rlp.val_at(4)?, 110 | }) 111 | } 112 | } 113 | 114 | #[derive(Debug)] 115 | struct Snappy { 116 | encoder: snap::raw::Encoder, 117 | decoder: snap::raw::Decoder, 118 | } 119 | 120 | /// RLPx transport peer stream 121 | #[allow(unused)] 122 | #[derive(Debug)] 123 | pub struct PeerStream { 124 | stream: ECIESStream, 125 | client_version: String, 126 | shared_capabilities: Vec, 127 | port: u16, 128 | id: PeerId, 129 | remote_id: PeerId, 130 | 131 | snappy: Option, 132 | 133 | disconnected: bool, 134 | } 135 | 136 | impl PeerStream 137 | where 138 | Io: Transport, 139 | { 140 | /// Remote public id of this peer 141 | pub fn remote_id(&self) -> PeerId { 142 | self.remote_id 143 | } 144 | 145 | /// Get all capabilities of this peer stream 146 | pub fn capabilities(&self) -> &[CapabilityInfo] { 147 | &self.shared_capabilities 148 | } 149 | 150 | /// Connect to a peer over TCP 151 | #[instrument( 152 | skip( 153 | transport, 154 | secret_key, 155 | protocol_version, 156 | client_version, 157 | capabilities, 158 | port, 159 | remote_id 160 | ), 161 | fields() 162 | )] 163 | pub async fn connect( 164 | transport: Io, 165 | secret_key: SecretKey, 166 | remote_id: PeerId, 167 | protocol_version: ProtocolVersion, 168 | client_version: String, 169 | capabilities: Vec, 170 | port: u16, 171 | ) -> anyhow::Result { 172 | Ok(Self::new( 173 | ECIESStream::connect(transport, secret_key, remote_id).await?, 174 | secret_key, 175 | protocol_version, 176 | client_version, 177 | capabilities, 178 | port, 179 | ) 180 | .await?) 181 | } 182 | 183 | /// Incoming peer stream over TCP 184 | #[instrument( 185 | skip( 186 | transport, 187 | secret_key, 188 | protocol_version, 189 | client_version, 190 | capabilities, 191 | port 192 | ), 193 | fields() 194 | )] 195 | pub async fn incoming( 196 | transport: Io, 197 | secret_key: SecretKey, 198 | protocol_version: ProtocolVersion, 199 | client_version: String, 200 | capabilities: Vec, 201 | port: u16, 202 | ) -> anyhow::Result { 203 | Ok(Self::new( 204 | ECIESStream::incoming(transport, secret_key).await?, 205 | secret_key, 206 | protocol_version, 207 | client_version, 208 | capabilities, 209 | port, 210 | ) 211 | .await?) 212 | } 213 | 214 | /// Create a new peer stream 215 | #[instrument(skip(transport, secret_key, protocol_version, client_version, capabilities, port), fields(id=&*transport.remote_id().to_string()))] 216 | pub async fn new( 217 | mut transport: ECIESStream, 218 | secret_key: SecretKey, 219 | protocol_version: ProtocolVersion, 220 | client_version: String, 221 | capabilities: Vec, 222 | port: u16, 223 | ) -> anyhow::Result { 224 | let public_key = PublicKey::from_secret_key(SECP256K1, &secret_key); 225 | let id = pk2id(&public_key); 226 | let nonhello_capabilities = capabilities.clone(); 227 | let nonhello_client_version = client_version.clone(); 228 | 229 | debug!("Connecting to RLPx peer {:02x}", transport.remote_id()); 230 | 231 | let hello = HelloMessage { 232 | port, 233 | id, 234 | protocol_version: protocol_version.to_usize().unwrap(), 235 | client_version, 236 | capabilities: { 237 | let mut caps = Vec::new(); 238 | for cap in capabilities { 239 | caps.push(CapabilityMessage { 240 | name: cap.name, 241 | version: cap.version, 242 | }); 243 | } 244 | caps 245 | }, 246 | }; 247 | trace!("Sending hello message: {:?}", hello); 248 | let mut outbound_hello = BytesMut::new(); 249 | outbound_hello = { 250 | let mut s = RlpStream::new_with_buffer(outbound_hello); 251 | s.append(&0_usize); 252 | s.out() 253 | }; 254 | 255 | outbound_hello = { 256 | let mut s = RlpStream::new_with_buffer(outbound_hello); 257 | s.append(&hello); 258 | s.out() 259 | }; 260 | trace!("Outbound hello: {}", hex::encode(&outbound_hello)); 261 | transport.send(outbound_hello.freeze()).await?; 262 | 263 | let hello = transport.try_next().await?; 264 | 265 | let hello = hello.ok_or_else(|| { 266 | debug!("Hello failed because of no value"); 267 | anyhow!("hello failed (no value)") 268 | })?; 269 | trace!("Receiving hello message: {:02x?}", hello); 270 | 271 | let message_id_rlp = Rlp::new(&hello[0..1]); 272 | let message_id = message_id_rlp 273 | .as_val::() 274 | .context("hello failed (message id)")?; 275 | let payload = &hello[1..]; 276 | match message_id { 277 | 0 => {} 278 | 1 => { 279 | let reason = Rlp::new(payload) 280 | .val_at::(0) 281 | .ok() 282 | .and_then(DisconnectReason::from_u8); 283 | bail!( 284 | "explicit disconnect: {}", 285 | reason 286 | .map(|r| r.to_string()) 287 | .unwrap_or_else(|| "(unknown)".to_string()) 288 | ); 289 | } 290 | _ => { 291 | bail!( 292 | "Hello failed because message id is not 0 but {}: {:02x?}", 293 | message_id, 294 | payload 295 | ); 296 | } 297 | } 298 | 299 | let val = Rlp::new(payload) 300 | .as_val::() 301 | .context("hello failed (rlp)")?; 302 | debug!("hello message: {:?}", val); 303 | let mut shared_capabilities: Vec = Vec::new(); 304 | 305 | for cap_info in nonhello_capabilities { 306 | let cap_match = val 307 | .capabilities 308 | .iter() 309 | .any(|v| v.name == cap_info.name && v.version == cap_info.version); 310 | 311 | if cap_match { 312 | shared_capabilities.push(cap_info); 313 | } 314 | } 315 | 316 | let shared_caps_original = shared_capabilities.clone(); 317 | 318 | for cap_info in shared_caps_original { 319 | shared_capabilities 320 | .retain(|v| v.name != cap_info.name || v.version >= cap_info.version); 321 | } 322 | 323 | shared_capabilities.sort_by_key(|v| v.name); 324 | 325 | let no_shared_caps = shared_capabilities.is_empty(); 326 | 327 | let snappy = match protocol_version { 328 | ProtocolVersion::V4 => None, 329 | ProtocolVersion::V5 => Some(Snappy { 330 | encoder: snap::raw::Encoder::new(), 331 | decoder: snap::raw::Decoder::new(), 332 | }), 333 | }; 334 | 335 | let mut this = Self { 336 | remote_id: transport.remote_id(), 337 | stream: transport, 338 | client_version: nonhello_client_version, 339 | port, 340 | id, 341 | shared_capabilities, 342 | snappy, 343 | disconnected: false, 344 | }; 345 | 346 | if no_shared_caps { 347 | debug!("No shared capabilities, disconnecting."); 348 | let _ = this 349 | .send(PeerMessage::Disconnect(DisconnectReason::UselessPeer)) 350 | .await; 351 | 352 | bail!("handshake failed - no shared capabilities"); 353 | } 354 | 355 | Ok(this) 356 | } 357 | } 358 | 359 | /// Sending message for RLPx 360 | #[derive(Clone, Debug)] 361 | pub struct SubprotocolMessage { 362 | pub cap_name: CapabilityName, 363 | pub message: Message, 364 | } 365 | 366 | #[derive(Clone, Debug)] 367 | pub enum PeerMessage { 368 | Disconnect(DisconnectReason), 369 | Ping, 370 | Pong, 371 | Subprotocol(SubprotocolMessage), 372 | } 373 | 374 | impl Stream for PeerStream 375 | where 376 | Io: Transport, 377 | { 378 | type Item = Result; 379 | 380 | fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { 381 | let mut s = self.get_mut(); 382 | 383 | if s.disconnected { 384 | return Poll::Ready(None); 385 | } 386 | 387 | match ready!(Pin::new(&mut s.stream).poll_next(cx)) { 388 | Some(Ok(val)) => { 389 | trace!("Received peer message: {}", hex::encode(&val)); 390 | let message_id_rlp = Rlp::new(&val[0..1]); 391 | let message_id: Result = message_id_rlp.as_val(); 392 | 393 | let (cap, id, data) = match message_id { 394 | Ok(message_id) => { 395 | let data = if let Some(snappy) = &mut s.snappy { 396 | let input = &val[1..]; 397 | let payload_len = snap::raw::decompress_len(input)?; 398 | if payload_len > MAX_PAYLOAD_SIZE { 399 | return Poll::Ready(Some(Err(io::Error::new( 400 | io::ErrorKind::InvalidInput, 401 | format!( 402 | "payload size ({}) exceeds limit ({} bytes)", 403 | payload_len, MAX_PAYLOAD_SIZE 404 | ), 405 | )))); 406 | } 407 | let v = snappy.decoder.decompress_vec(input)?.into(); 408 | trace!("Decompressed raw message data: {}", hex::encode(&v)); 409 | v 410 | } else { 411 | Bytes::copy_from_slice(&val[1..]) 412 | }; 413 | 414 | if message_id < 0x10 { 415 | match message_id { 416 | 0x01 => { 417 | s.disconnected = true; 418 | if let Some(reason) = Rlp::new(&*data) 419 | .val_at::(0) 420 | .ok() 421 | .and_then(DisconnectReason::from_u8) 422 | { 423 | return Poll::Ready(Some(Ok(PeerMessage::Disconnect( 424 | reason, 425 | )))); 426 | } else { 427 | return Poll::Ready(Some(Err(io::Error::new( 428 | io::ErrorKind::Other, 429 | format!( 430 | "peer disconnected with malformed message: {}", 431 | hex::encode(data) 432 | ), 433 | )))); 434 | } 435 | } 436 | 0x02 => { 437 | debug!("received ping message data {:?}", data); 438 | return Poll::Ready(Some(Ok(PeerMessage::Ping))); 439 | } 440 | 0x03 => { 441 | debug!("received pong message"); 442 | return Poll::Ready(Some(Ok(PeerMessage::Pong))); 443 | } 444 | _ => { 445 | debug!("received unknown reserved message"); 446 | return Poll::Ready(Some(Err(io::Error::new( 447 | io::ErrorKind::Other, 448 | "unhandled reserved message", 449 | )))); 450 | } 451 | } 452 | } 453 | 454 | let mut message_id = message_id - 0x10; 455 | let mut index = 0; 456 | for cap in &s.shared_capabilities { 457 | if message_id > cap.length { 458 | message_id -= cap.length; 459 | index += 1; 460 | } 461 | } 462 | if index >= s.shared_capabilities.len() { 463 | return Poll::Ready(Some(Err(io::Error::new( 464 | io::ErrorKind::Other, 465 | "invalid message id (out of cap range)", 466 | )))); 467 | } 468 | (s.shared_capabilities[index], message_id, data) 469 | } 470 | Err(e) => { 471 | return Poll::Ready(Some(Err(io::Error::new( 472 | io::ErrorKind::Other, 473 | format!("message id parsing failed (invalid): {}", e), 474 | )))); 475 | } 476 | }; 477 | 478 | trace!( 479 | "Cap: {}, id: {}, data: {}", 480 | CapabilityId::from(cap), 481 | id, 482 | hex::encode(&data) 483 | ); 484 | 485 | Poll::Ready(Some(Ok(PeerMessage::Subprotocol(SubprotocolMessage { 486 | cap_name: cap.name, 487 | message: Message { id, data }, 488 | })))) 489 | } 490 | Some(Err(e)) => Poll::Ready(Some(Err(e))), 491 | None => Poll::Ready(None), 492 | } 493 | } 494 | } 495 | 496 | impl Sink for PeerStream 497 | where 498 | Io: Transport, 499 | { 500 | type Error = io::Error; 501 | 502 | fn poll_ready(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { 503 | Pin::new(&mut self.get_mut().stream).poll_ready(cx) 504 | } 505 | 506 | fn start_send(self: Pin<&mut Self>, message: PeerMessage) -> Result<(), Self::Error> { 507 | let this = self.get_mut(); 508 | 509 | if this.disconnected { 510 | return Err(io::Error::new( 511 | io::ErrorKind::BrokenPipe, 512 | "disconnection requested", 513 | )); 514 | } 515 | 516 | let (message_id, payload) = match message { 517 | PeerMessage::Disconnect(reason) => { 518 | this.disconnected = true; 519 | (0x01, rlp::encode(&reason.to_u8().unwrap()).into()) 520 | } 521 | PeerMessage::Ping => { 522 | debug!("sending ping message"); 523 | (0x02, rlp::EMPTY_LIST_RLP.to_vec().into()) 524 | } 525 | PeerMessage::Pong => { 526 | debug!("sending pong message"); 527 | (0x03, rlp::EMPTY_LIST_RLP.to_vec().into()) 528 | } 529 | PeerMessage::Subprotocol(SubprotocolMessage { cap_name, message }) => { 530 | let Message { id, data } = message; 531 | let cap = this 532 | .shared_capabilities 533 | .iter() 534 | .find(|cap| cap.name == cap_name); 535 | 536 | if cap.is_none() { 537 | debug!( 538 | "giving up sending cap {} of id {} to 0x{:x} because remote does not support.", 539 | cap_name.0, 540 | id, 541 | this.remote_id() 542 | ); 543 | return Ok(()); 544 | } 545 | 546 | let cap = *cap.unwrap(); 547 | 548 | if id >= cap.length { 549 | debug!( 550 | "giving up sending cap {} of id {} to 0x{:x} because it is too big.", 551 | cap_name.0, 552 | id, 553 | this.remote_id() 554 | ); 555 | return Ok(()); 556 | } 557 | 558 | let mut message_id = 0x10; 559 | for scap in &this.shared_capabilities { 560 | if scap == &cap { 561 | break; 562 | } 563 | 564 | message_id += scap.length; 565 | } 566 | message_id += id; 567 | 568 | (message_id, data) 569 | } 570 | }; 571 | 572 | let mut s = RlpStream::new_with_buffer(BytesMut::with_capacity(2 + payload.len())); 573 | s.append(&message_id); 574 | let mut msg = s.out(); 575 | 576 | if let Some(snappy) = &mut this.snappy { 577 | let mut buf = msg.split_off(msg.len()); 578 | buf.resize(snap::raw::max_compress_len(payload.len()), 0); 579 | 580 | let compressed_len = snappy.encoder.compress(&*payload, &mut buf).unwrap(); 581 | buf.truncate(compressed_len); 582 | 583 | msg.unsplit(buf); 584 | } else { 585 | msg.extend_from_slice(&*payload) 586 | } 587 | 588 | Pin::new(&mut this.stream).start_send(msg.freeze())?; 589 | 590 | Ok(()) 591 | } 592 | 593 | fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { 594 | Pin::new(&mut self.get_mut().stream).poll_flush(cx) 595 | } 596 | 597 | fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { 598 | Pin::new(&mut self.get_mut().stream).poll_close(cx) 599 | } 600 | } 601 | -------------------------------------------------------------------------------- /src/rlpx.rs: -------------------------------------------------------------------------------- 1 | //! RLPx protocol implementation in Rust 2 | 3 | use crate::{disc::Discovery, node_filter::*, peer::*, transport::Transport, types::*}; 4 | use anyhow::{anyhow, bail}; 5 | use cidr::{Cidr, IpCidr}; 6 | use educe::Educe; 7 | use futures::sink::SinkExt; 8 | use parking_lot::Mutex; 9 | use secp256k1::SecretKey; 10 | use std::{ 11 | collections::{hash_map::Entry, BTreeMap, HashMap, HashSet}, 12 | fmt::Debug, 13 | future::Future, 14 | net::SocketAddr, 15 | ops::Deref, 16 | sync::{ 17 | atomic::{AtomicBool, AtomicUsize, Ordering}, 18 | Arc, Weak, 19 | }, 20 | time::Duration, 21 | }; 22 | use task_group::TaskGroup; 23 | use tokio::{ 24 | net::{TcpListener, TcpStream}, 25 | sync::{ 26 | mpsc::{channel, unbounded_channel}, 27 | oneshot::{channel as oneshot, Sender as OneshotSender}, 28 | }, 29 | time::sleep, 30 | }; 31 | use tokio_stream::{StreamExt, StreamMap}; 32 | use tracing::*; 33 | use uuid::Uuid; 34 | 35 | const GRACE_PERIOD_SECS: u64 = 2; 36 | const HANDSHAKE_TIMEOUT_SECS: u64 = 10; 37 | const PING_TIMEOUT: Duration = Duration::from_secs(60); 38 | const DISCOVERY_TIMEOUT_SECS: u64 = 90; 39 | const DISCOVERY_CONNECT_TIMEOUT_SECS: u64 = 5; 40 | const DIAL_INTERVAL: Duration = Duration::from_millis(100); 41 | 42 | #[derive(Clone, Copy)] 43 | enum DisconnectInitiator { 44 | Local, 45 | LocalForceful, 46 | Remote, 47 | } 48 | 49 | struct DisconnectSignal { 50 | initiator: DisconnectInitiator, 51 | reason: DisconnectReason, 52 | } 53 | 54 | #[derive(Debug)] 55 | struct ConnectedPeerState { 56 | tasks: TaskGroup, 57 | } 58 | 59 | #[derive(Debug)] 60 | enum PeerState { 61 | Connecting { connection_id: Uuid }, 62 | Connected(ConnectedPeerState), 63 | } 64 | 65 | impl PeerState { 66 | const fn is_connected(&self) -> bool { 67 | matches!(self, Self::Connected(_)) 68 | } 69 | } 70 | 71 | #[derive(Debug)] 72 | struct PeerStreams { 73 | /// Mapping of remote IDs to streams in `StreamMap` 74 | mapping: HashMap, 75 | } 76 | 77 | impl PeerStreams { 78 | fn disconnect_peer(&mut self, remote_id: PeerId) -> bool { 79 | debug!("disconnecting peer {}", remote_id); 80 | 81 | self.mapping.remove(&remote_id).is_some() 82 | } 83 | } 84 | 85 | impl Default for PeerStreams { 86 | fn default() -> Self { 87 | Self { 88 | mapping: HashMap::new(), 89 | } 90 | } 91 | } 92 | 93 | #[derive(Educe)] 94 | #[educe(Clone)] 95 | struct PeerStreamHandshakeData { 96 | port: u16, 97 | protocol_version: ProtocolVersion, 98 | secret_key: SecretKey, 99 | client_version: String, 100 | capabilities: Arc, 101 | capability_server: Arc, 102 | } 103 | 104 | async fn handle_incoming( 105 | task_group: Weak, 106 | streams: Arc>, 107 | node_filter: Arc>, 108 | tcp_incoming: TcpListener, 109 | cidr: Option, 110 | handshake_data: PeerStreamHandshakeData, 111 | ) where 112 | C: CapabilityServer, 113 | { 114 | let _: anyhow::Result<()> = async { 115 | loop { 116 | match tcp_incoming.accept().await { 117 | Err(e) => { 118 | bail!("failed to accept peer: {:?}, shutting down", e); 119 | } 120 | Ok((stream, remote_addr)) => { 121 | let tasks = task_group 122 | .upgrade() 123 | .ok_or_else(|| anyhow!("task group is down"))?; 124 | 125 | if let Some(cidr) = &cidr { 126 | if !cidr.contains(&remote_addr.ip()) { 127 | debug!( 128 | "Ignoring connection request: {} is not in range {}", 129 | remote_addr, cidr 130 | ); 131 | 132 | continue; 133 | } 134 | } 135 | 136 | let f = handle_incoming_request( 137 | streams.clone(), 138 | node_filter.clone(), 139 | stream, 140 | handshake_data.clone(), 141 | ); 142 | tasks.spawn_with_name(format!("Incoming connection setup: {}", remote_addr), f); 143 | } 144 | } 145 | } 146 | } 147 | .await; 148 | } 149 | 150 | /// Set up newly connected peer's state, start its tasks 151 | fn setup_peer_state( 152 | streams: Weak>, 153 | capability_server: Arc, 154 | remote_id: PeerId, 155 | peer: PeerStream, 156 | ) -> ConnectedPeerState 157 | where 158 | C: CapabilityServer, 159 | Io: Transport, 160 | { 161 | let capability_set = peer 162 | .capabilities() 163 | .iter() 164 | .copied() 165 | .map(|cap_info| (cap_info.name, cap_info.version)) 166 | .collect::>(); 167 | let (mut sink, mut stream) = futures::StreamExt::split(peer); 168 | let (peer_disconnect_tx, mut peer_disconnect_rx) = unbounded_channel(); 169 | let tasks = TaskGroup::default(); 170 | 171 | capability_server.on_peer_connect(remote_id, capability_set); 172 | 173 | let pinged = Arc::new(AtomicBool::default()); 174 | let (pings_tx, mut pings) = channel(1); 175 | let (pongs_tx, mut pongs) = channel(1); 176 | 177 | tasks.spawn_with_name(format!("peer {} ingress router", remote_id), { 178 | let peer_disconnect_tx = peer_disconnect_tx.clone(); 179 | let capability_server = capability_server.clone(); 180 | let pinged = pinged.clone(); 181 | async move { 182 | let disconnect_signal = { 183 | async move { 184 | while let Some(message) = stream.next().await { 185 | match message { 186 | Err(e) => { 187 | debug!("Peer incoming error: {}", e); 188 | break; 189 | } 190 | Ok(PeerMessage::Subprotocol(SubprotocolMessage { 191 | cap_name, 192 | message, 193 | })) => { 194 | // Actually handle the message 195 | capability_server 196 | .on_peer_event( 197 | remote_id, 198 | InboundEvent::Message { 199 | capability_name: cap_name, 200 | message, 201 | }, 202 | ) 203 | .await 204 | } 205 | Ok(PeerMessage::Disconnect(reason)) => { 206 | // Peer has requested disconnection. 207 | return DisconnectSignal { 208 | initiator: DisconnectInitiator::Remote, 209 | reason, 210 | }; 211 | } 212 | Ok(PeerMessage::Ping) => { 213 | let _ = pongs_tx.send(()).await; 214 | } 215 | Ok(PeerMessage::Pong) => { 216 | pinged.store(false, Ordering::Relaxed); 217 | } 218 | } 219 | } 220 | 221 | // Ingress stream is closed, force disconnect the peer. 222 | DisconnectSignal { 223 | initiator: DisconnectInitiator::Remote, 224 | reason: DisconnectReason::DisconnectRequested, 225 | } 226 | } 227 | } 228 | .await; 229 | 230 | let _ = peer_disconnect_tx.send(disconnect_signal); 231 | } 232 | .instrument(span!(Level::DEBUG, "IN", "peer={}", remote_id.to_string(),)) 233 | }); 234 | 235 | tasks.spawn_with_name( 236 | format!("peer {} egress router & disconnector", remote_id), 237 | async move { 238 | let mut event_fut = capability_server.next(remote_id); 239 | loop { 240 | let mut disconnecting = None; 241 | let mut egress = None; 242 | let mut trigger: Option> = None; 243 | tokio::select! { 244 | // Event from capability server. 245 | msg = &mut event_fut => { 246 | // Invariant: CapabilityServer::next() will never be called after disconnect event 247 | match msg { 248 | OutboundEvent::Message { 249 | capability_name, message 250 | } => { 251 | event_fut = capability_server.next(remote_id); 252 | egress = Some(PeerMessage::Subprotocol(SubprotocolMessage { 253 | cap_name: capability_name, message 254 | })); 255 | } 256 | OutboundEvent::Disconnect { 257 | reason 258 | } => { 259 | egress = Some(PeerMessage::Disconnect(reason)); 260 | disconnecting = Some(DisconnectSignal { 261 | initiator: DisconnectInitiator::Local, reason 262 | }); 263 | } 264 | }; 265 | }, 266 | // We ping the peer. 267 | Some(tx) = pings.recv() => { 268 | egress = Some(PeerMessage::Ping); 269 | trigger = Some(tx); 270 | } 271 | // Peer has pinged us. 272 | Some(_) = pongs.recv() => { 273 | egress = Some(PeerMessage::Pong); 274 | } 275 | // Ping timeout or signal from ingress router. 276 | Some(DisconnectSignal { initiator, reason }) = peer_disconnect_rx.recv() => { 277 | if let DisconnectInitiator::Local = initiator { 278 | egress = Some(PeerMessage::Disconnect(reason)); 279 | } 280 | disconnecting = Some(DisconnectSignal { initiator, reason }) 281 | } 282 | }; 283 | 284 | if let Some(message) = egress { 285 | trace!("Sending message: {:?}", message); 286 | 287 | // Send egress message, force disconnect on error. 288 | if let Err(e) = sink.send(message).await { 289 | debug!("peer disconnected with error {:?}", e); 290 | disconnecting.get_or_insert(DisconnectSignal { 291 | initiator: DisconnectInitiator::LocalForceful, 292 | reason: DisconnectReason::TcpSubsystemError, 293 | }); 294 | } else if let Some(trigger) = trigger { 295 | let _ = trigger.send(()); 296 | } 297 | } 298 | 299 | if let Some(DisconnectSignal { initiator, reason }) = disconnecting { 300 | if let DisconnectInitiator::Local = initiator { 301 | // We have sent disconnect message, wait for grace period. 302 | sleep(Duration::from_secs(GRACE_PERIOD_SECS)).await; 303 | } 304 | capability_server 305 | .on_peer_event( 306 | remote_id, 307 | InboundEvent::Disconnect { 308 | reason: Some(reason), 309 | }, 310 | ) 311 | .await; 312 | break; 313 | } 314 | } 315 | 316 | // We are done, drop the peer state. 317 | if let Some(streams) = streams.upgrade() { 318 | // This is the last line that is guaranteed to be executed. 319 | // After this the peer's task group is dropped and any alive tasks are forcibly cancelled. 320 | streams.lock().disconnect_peer(remote_id); 321 | } 322 | } 323 | .instrument(span!( 324 | Level::DEBUG, 325 | "OUT/DISC", 326 | "peer={}", 327 | remote_id.to_string(), 328 | )), 329 | ); 330 | 331 | tasks.spawn_with_name(format!("peer {} pinger", remote_id), async move { 332 | loop { 333 | pinged.store(true, Ordering::Relaxed); 334 | 335 | let (tx, rx) = oneshot(); 336 | if pings_tx.send(tx).await.is_ok() && rx.await.is_ok() { 337 | sleep(PING_TIMEOUT).await; 338 | 339 | if pinged.load(Ordering::Relaxed) { 340 | let _ = peer_disconnect_tx.send(DisconnectSignal { 341 | initiator: DisconnectInitiator::Local, 342 | reason: DisconnectReason::PingTimeout, 343 | }); 344 | 345 | return; 346 | } 347 | 348 | continue; 349 | } 350 | 351 | return; 352 | } 353 | }); 354 | ConnectedPeerState { tasks } 355 | } 356 | 357 | /// Establishes the connection with peer and adds them to internal state. 358 | async fn handle_incoming_request( 359 | streams: Arc>, 360 | node_filter: Arc>, 361 | stream: Io, 362 | handshake_data: PeerStreamHandshakeData, 363 | ) where 364 | C: CapabilityServer, 365 | Io: Transport, 366 | { 367 | let PeerStreamHandshakeData { 368 | secret_key, 369 | protocol_version, 370 | client_version, 371 | capabilities, 372 | capability_server, 373 | port, 374 | } = handshake_data; 375 | // Do handshake and convert incoming connection into stream. 376 | let peer_res = tokio::time::timeout( 377 | Duration::from_secs(HANDSHAKE_TIMEOUT_SECS), 378 | PeerStream::incoming( 379 | stream, 380 | secret_key, 381 | protocol_version, 382 | client_version, 383 | capabilities.get_capabilities().to_vec(), 384 | port, 385 | ), 386 | ) 387 | .await 388 | .unwrap_or_else(|_| Err(anyhow!("incoming connection timeout"))); 389 | 390 | match peer_res { 391 | Ok(peer) => { 392 | let remote_id = peer.remote_id(); 393 | let s = streams.clone(); 394 | let mut s = s.lock(); 395 | let node_filter = node_filter.clone(); 396 | let PeerStreams { mapping } = &mut *s; 397 | let total_connections = mapping.len(); 398 | 399 | match mapping.entry(remote_id) { 400 | Entry::Occupied(entry) => { 401 | debug!( 402 | "We are already {} to remote peer {}!", 403 | if entry.get().is_connected() { 404 | "connected" 405 | } else { 406 | "connecting" 407 | }, 408 | remote_id 409 | ); 410 | } 411 | Entry::Vacant(entry) => { 412 | if node_filter.lock().allow(total_connections, remote_id) { 413 | debug!("New incoming peer connected: {}", remote_id); 414 | entry.insert(PeerState::Connected(setup_peer_state( 415 | Arc::downgrade(&streams), 416 | capability_server, 417 | remote_id, 418 | peer, 419 | ))); 420 | } else { 421 | trace!("Node filter rejected peer {}, disconnecting", remote_id); 422 | } 423 | } 424 | } 425 | } 426 | Err(e) => { 427 | debug!("Peer disconnected with error {}", e); 428 | } 429 | } 430 | } 431 | 432 | #[derive(Debug, Default)] 433 | struct CapabilitySet { 434 | inner: BTreeMap, 435 | 436 | capability_cache: Vec, 437 | } 438 | 439 | impl CapabilitySet { 440 | fn get_capabilities(&self) -> &[CapabilityInfo] { 441 | &self.capability_cache 442 | } 443 | } 444 | 445 | impl From> for CapabilitySet { 446 | fn from(inner: BTreeMap) -> Self { 447 | let capability_cache = inner 448 | .iter() 449 | .map( 450 | |(&CapabilityId { name, version }, &length)| CapabilityInfo { 451 | name, 452 | version, 453 | length, 454 | }, 455 | ) 456 | .collect(); 457 | 458 | Self { 459 | inner, 460 | capability_cache, 461 | } 462 | } 463 | } 464 | 465 | /// This is an asynchronous RLPx server implementation. 466 | /// 467 | /// `Swarm` is the representation of swarm of connected RLPx peers 468 | /// supports registration for capability servers. 469 | /// 470 | /// This implementation is based on the concept of structured concurrency. 471 | /// Internal state is managed by a multitude of workers that run in separate runtime tasks 472 | /// spawned on the running executor during the server creation and addition of new peers. 473 | /// All continuously running workers are inside the task scope owned by the server struct. 474 | #[derive(Educe)] 475 | #[educe(Debug)] 476 | pub struct Swarm { 477 | #[allow(unused)] 478 | tasks: Arc, 479 | 480 | streams: Arc>, 481 | 482 | currently_connecting: Arc, 483 | 484 | node_filter: Arc>, 485 | 486 | capabilities: Arc, 487 | #[educe(Debug(ignore))] 488 | capability_server: Arc, 489 | 490 | #[educe(Debug(ignore))] 491 | secret_key: SecretKey, 492 | protocol_version: ProtocolVersion, 493 | client_version: String, 494 | port: u16, 495 | } 496 | 497 | /// Builder for ergonomically creating a new `Server`. 498 | #[derive(Debug)] 499 | pub struct SwarmBuilder { 500 | task_group: Option>, 501 | listen_options: Option, 502 | client_version: String, 503 | } 504 | 505 | impl SwarmBuilder { 506 | pub fn with_task_group(mut self, task_group: Arc) -> Self { 507 | self.task_group = Some(task_group); 508 | self 509 | } 510 | 511 | pub fn with_listen_options(mut self, options: ListenOptions) -> Self { 512 | self.listen_options = Some(options); 513 | self 514 | } 515 | 516 | pub fn with_client_version(mut self, version: String) -> Self { 517 | self.client_version = version; 518 | self 519 | } 520 | 521 | /// Create a new RLPx node 522 | pub async fn build( 523 | self, 524 | capability_mask: BTreeMap, 525 | capability_server: Arc, 526 | secret_key: SecretKey, 527 | ) -> anyhow::Result>> { 528 | Swarm::new_inner( 529 | secret_key, 530 | self.client_version, 531 | self.task_group, 532 | capability_mask.into(), 533 | capability_server, 534 | self.listen_options, 535 | ) 536 | .await 537 | } 538 | } 539 | 540 | #[derive(Educe)] 541 | #[educe(Debug)] 542 | pub struct ListenOptions { 543 | #[educe(Debug(ignore))] 544 | pub discovery_tasks: StreamMap, 545 | pub max_peers: usize, 546 | pub addr: SocketAddr, 547 | pub cidr: Option, 548 | } 549 | 550 | impl Swarm<()> { 551 | pub fn builder() -> SwarmBuilder { 552 | SwarmBuilder { 553 | task_group: None, 554 | listen_options: None, 555 | client_version: format!("rust-devp2p/{}", env!("CARGO_PKG_VERSION")), 556 | } 557 | } 558 | } 559 | 560 | impl Swarm { 561 | pub async fn new( 562 | capability_mask: BTreeMap, 563 | capability_server: Arc, 564 | secret_key: SecretKey, 565 | ) -> anyhow::Result> { 566 | Swarm::builder() 567 | .build(capability_mask, capability_server, secret_key) 568 | .await 569 | } 570 | 571 | async fn new_inner( 572 | secret_key: SecretKey, 573 | client_version: String, 574 | task_group: Option>, 575 | capabilities: CapabilitySet, 576 | capability_server: Arc, 577 | listen_options: Option, 578 | ) -> anyhow::Result> { 579 | let tasks = task_group.unwrap_or_default(); 580 | 581 | let protocol_version = ProtocolVersion::V5; 582 | 583 | let port = listen_options 584 | .as_ref() 585 | .map_or(0, |options| options.addr.port()); 586 | 587 | let streams = Arc::new(Mutex::new(PeerStreams::default())); 588 | let node_filter = Arc::new(Mutex::new(MemoryNodeFilter::new(Arc::new( 589 | listen_options 590 | .as_ref() 591 | .map_or(0.into(), |options| options.max_peers.into()), 592 | )))); 593 | 594 | let capabilities = Arc::new(capabilities); 595 | 596 | if let Some(options) = &listen_options { 597 | let tcp_incoming = TcpListener::bind(options.addr).await?; 598 | let cidr = options.cidr.clone(); 599 | tasks.spawn_with_name( 600 | "incoming handler", 601 | handle_incoming( 602 | Arc::downgrade(&tasks), 603 | streams.clone(), 604 | node_filter.clone(), 605 | tcp_incoming, 606 | cidr, 607 | PeerStreamHandshakeData { 608 | port, 609 | protocol_version, 610 | secret_key, 611 | client_version: client_version.clone(), 612 | capabilities: capabilities.clone(), 613 | capability_server: capability_server.clone(), 614 | }, 615 | ), 616 | ); 617 | } 618 | 619 | let server = Arc::new(Self { 620 | tasks: tasks.clone(), 621 | streams, 622 | currently_connecting: Default::default(), 623 | node_filter, 624 | capabilities, 625 | capability_server, 626 | secret_key, 627 | protocol_version, 628 | client_version, 629 | port, 630 | }); 631 | 632 | if let Some(mut options) = listen_options { 633 | tasks.spawn_with_name("dialer", { 634 | let server = Arc::downgrade(&server); 635 | let tasks = Arc::downgrade(&tasks); 636 | async move { 637 | let current_peers = Arc::new(Mutex::new(HashSet::new())); 638 | loop { 639 | if let Some(server) = server.upgrade() { 640 | let streams_len = server.streams.lock().mapping.len(); 641 | let max_peers = server.node_filter.lock().max_peers(); 642 | 643 | if streams_len < max_peers { 644 | trace!("Discovering peers as our peer count is too low: {} < {}", streams_len, max_peers); 645 | match tokio::time::timeout( 646 | Duration::from_secs(DISCOVERY_TIMEOUT_SECS), 647 | options.discovery_tasks.next(), 648 | ) 649 | .await { 650 | Err(_) => { 651 | debug!("Failed to get new peer: timed out"); 652 | } 653 | Ok(None) => { 654 | debug!("Discoveries ended, dialer quitting"); 655 | return; 656 | } 657 | Ok(Some((disc_id, Ok(NodeRecord { addr, id: remote_id })))) => { 658 | if let Some(tasks) = tasks.upgrade() { 659 | if current_peers.lock().insert(remote_id) { 660 | debug!("Discovered peer: {:?} ({})", remote_id, disc_id); 661 | tasks.spawn_with_name(format!("add peer {} at {}", remote_id, addr), { 662 | let current_peers = current_peers.clone(); 663 | async move { 664 | if tokio::time::timeout( 665 | Duration::from_secs(DISCOVERY_CONNECT_TIMEOUT_SECS), 666 | server.add_peer_inner(addr, remote_id, true) 667 | ).await.is_err() { 668 | debug!("Timed out adding peer {}", remote_id); 669 | } 670 | current_peers.lock().remove(&remote_id) 671 | } 672 | }); 673 | } 674 | } 675 | } 676 | Ok(Some((disc_id, Err(e)))) => warn!("Failed to get new peer: {} ({})", e, disc_id) 677 | } 678 | 679 | sleep(DIAL_INTERVAL).await; 680 | } else { 681 | trace!("Skipping discovery as current number of peers is too high: {} >= {}", streams_len, max_peers); 682 | sleep(Duration::from_secs(2)).await; 683 | } 684 | } else { 685 | return; 686 | } 687 | } 688 | }.instrument(span!(Level::DEBUG, "dialer")) 689 | }); 690 | } 691 | 692 | Ok(server) 693 | } 694 | 695 | /// Add a new peer to this RLPx node. Returns `true` if it was added successfully (did not exist before, accepted by node filter). 696 | pub fn add_peer( 697 | &self, 698 | node_record: NodeRecord, 699 | ) -> impl Future> + Send + 'static { 700 | self.add_peer_inner(node_record.addr, node_record.id, false) 701 | } 702 | 703 | fn add_peer_inner( 704 | &self, 705 | addr: SocketAddr, 706 | remote_id: PeerId, 707 | check_peer: bool, 708 | ) -> impl Future> + Send + 'static { 709 | let tasks = self.tasks.clone(); 710 | let streams = self.streams.clone(); 711 | let node_filter = self.node_filter.clone(); 712 | 713 | let capabilities = self.capabilities.clone(); 714 | let capability_set = capabilities.get_capabilities().to_vec(); 715 | let capability_server = self.capability_server.clone(); 716 | 717 | let secret_key = self.secret_key; 718 | let protocol_version = self.protocol_version; 719 | let client_version = self.client_version.clone(); 720 | let port = self.port; 721 | 722 | let (tx, rx) = tokio::sync::oneshot::channel(); 723 | let connection_id = Uuid::new_v4(); 724 | let currently_connecting = self.currently_connecting.clone(); 725 | 726 | // Start reaper task that will terminate this connection if connection future gets dropped. 727 | tasks.spawn_with_name(format!("connection {} reaper", connection_id), { 728 | let cid = connection_id; 729 | let streams = streams.clone(); 730 | let currently_connecting = currently_connecting.clone(); 731 | async move { 732 | if rx.await.is_err() { 733 | let mut s = streams.lock(); 734 | if let Entry::Occupied(entry) = s.mapping.entry(remote_id) { 735 | // If this is the same connection attempt, then remove. 736 | if let PeerState::Connecting { connection_id } = entry.get() { 737 | if *connection_id == cid { 738 | trace!("Reaping failed outbound connection: {}/{}", remote_id, cid); 739 | 740 | entry.remove(); 741 | } 742 | } 743 | } 744 | } 745 | currently_connecting.fetch_sub(1, Ordering::Relaxed); 746 | } 747 | }); 748 | 749 | async move { 750 | trace!("Received request to add peer {}", remote_id); 751 | let mut inserted = false; 752 | 753 | currently_connecting.fetch_add(1, Ordering::Relaxed); 754 | 755 | { 756 | let mut streams = streams.lock(); 757 | let node_filter = node_filter.lock(); 758 | 759 | let connection_num = streams.mapping.len(); 760 | 761 | match streams.mapping.entry(remote_id) { 762 | Entry::Occupied(key) => { 763 | debug!( 764 | "We are already {} to remote peer {}!", 765 | if key.get().is_connected() { 766 | "connected" 767 | } else { 768 | "connecting" 769 | }, 770 | remote_id 771 | ); 772 | } 773 | Entry::Vacant(vacant) => { 774 | if check_peer && !node_filter.allow(connection_num, remote_id) { 775 | trace!("rejecting peer {}", remote_id); 776 | } else { 777 | debug!("connecting to peer {} at {}", remote_id, addr); 778 | 779 | vacant.insert(PeerState::Connecting { connection_id }); 780 | inserted = true; 781 | } 782 | } 783 | } 784 | } 785 | 786 | if !inserted { 787 | return Ok(false); 788 | } 789 | 790 | // Connecting to peer is a long running operation so we have to break the mutex lock. 791 | let peer_res = async { 792 | let transport = TcpStream::connect(addr).await?; 793 | PeerStream::connect( 794 | transport, 795 | secret_key, 796 | remote_id, 797 | protocol_version, 798 | client_version, 799 | capability_set, 800 | port, 801 | ) 802 | .await 803 | } 804 | .await; 805 | 806 | let s = streams.clone(); 807 | let mut s = s.lock(); 808 | let PeerStreams { mapping } = &mut *s; 809 | 810 | // Adopt the new connection if the peer has not been dropped or superseded by incoming connection. 811 | if let Entry::Occupied(mut peer_state) = mapping.entry(remote_id) { 812 | if !peer_state.get().is_connected() { 813 | match peer_res { 814 | Ok(peer) => { 815 | assert_eq!(peer.remote_id(), remote_id); 816 | debug!("New peer connected: {}", remote_id); 817 | 818 | *peer_state.get_mut() = PeerState::Connected(setup_peer_state( 819 | Arc::downgrade(&streams), 820 | capability_server, 821 | remote_id, 822 | peer, 823 | )); 824 | 825 | let _ = tx.send(()); 826 | return Ok(true); 827 | } 828 | Err(e) => { 829 | debug!("peer disconnected with error {}", e); 830 | peer_state.remove(); 831 | return Err(e); 832 | } 833 | } 834 | } 835 | } 836 | 837 | Ok(false) 838 | } 839 | .instrument(span!(Level::DEBUG, "add peer",)) 840 | } 841 | 842 | /// Returns the number of peers we're currently dialing 843 | pub fn dialing(&self) -> usize { 844 | self.currently_connecting.load(Ordering::Relaxed) 845 | } 846 | } 847 | 848 | impl Deref for Swarm { 849 | type Target = C; 850 | 851 | fn deref(&self) -> &Self::Target { 852 | &*self.capability_server 853 | } 854 | } 855 | -------------------------------------------------------------------------------- /src/transport.rs: -------------------------------------------------------------------------------- 1 | use std::{fmt::Debug, net::SocketAddr}; 2 | use tokio::{ 3 | io::{AsyncRead, AsyncWrite}, 4 | net::TcpStream, 5 | }; 6 | 7 | pub trait Transport: AsyncRead + AsyncWrite + Debug + Send + Unpin + 'static { 8 | fn remote_addr(&self) -> Option; 9 | } 10 | 11 | impl Transport for TcpStream { 12 | fn remote_addr(&self) -> Option { 13 | self.peer_addr().ok() 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /src/types.rs: -------------------------------------------------------------------------------- 1 | use crate::{peer::DisconnectReason, util::*}; 2 | use arrayvec::ArrayString; 3 | use async_trait::async_trait; 4 | use auto_impl::auto_impl; 5 | use bytes::Bytes; 6 | use derive_more::Display; 7 | use educe::Educe; 8 | pub use ethereum_types::H512 as PeerId; 9 | use rlp::{DecoderError, Rlp, RlpStream}; 10 | use std::{collections::HashMap, fmt::Debug, net::SocketAddr, str::FromStr}; 11 | 12 | /// Record that specifies information necessary to connect to RLPx node 13 | #[derive(Clone, Copy, Debug)] 14 | pub struct NodeRecord { 15 | /// Node ID. 16 | pub id: PeerId, 17 | /// Address of RLPx TCP server. 18 | pub addr: SocketAddr, 19 | } 20 | 21 | impl FromStr for NodeRecord { 22 | type Err = Box; 23 | 24 | fn from_str(s: &str) -> Result { 25 | const PREFIX: &str = "enode://"; 26 | 27 | let (prefix, data) = s.split_at(PREFIX.len()); 28 | if prefix != PREFIX { 29 | return Err("Not an enode".into()); 30 | } 31 | 32 | let mut parts = data.split('@'); 33 | let id = parts.next().ok_or("Failed to read remote ID")?.parse()?; 34 | let addr = parts.next().ok_or("Failed to read address")?.parse()?; 35 | 36 | Ok(Self { id, addr }) 37 | } 38 | } 39 | 40 | #[derive(Clone, Copy, Debug, Display, PartialEq, Eq, Hash, PartialOrd, Ord)] 41 | pub struct CapabilityName(pub ArrayString<[u8; 4]>); 42 | 43 | impl rlp::Encodable for CapabilityName { 44 | fn rlp_append(&self, s: &mut RlpStream) { 45 | self.0.as_bytes().rlp_append(s); 46 | } 47 | } 48 | 49 | impl rlp::Decodable for CapabilityName { 50 | fn decode(rlp: &Rlp) -> Result { 51 | Ok(Self( 52 | ArrayString::from( 53 | std::str::from_utf8(rlp.data()?) 54 | .map_err(|_| DecoderError::Custom("should be a UTF-8 string"))?, 55 | ) 56 | .map_err(|_| DecoderError::RlpIsTooBig)?, 57 | )) 58 | } 59 | } 60 | 61 | pub type CapabilityLength = usize; 62 | pub type CapabilityVersion = usize; 63 | 64 | #[derive(Clone, Debug, Copy, PartialEq, Eq)] 65 | /// Capability information 66 | pub struct CapabilityInfo { 67 | pub name: CapabilityName, 68 | pub version: CapabilityVersion, 69 | pub length: CapabilityLength, 70 | } 71 | 72 | impl CapabilityInfo { 73 | pub fn new(CapabilityId { name, version }: CapabilityId, length: CapabilityLength) -> Self { 74 | Self { 75 | name, 76 | version, 77 | length, 78 | } 79 | } 80 | } 81 | 82 | #[derive(Clone, Debug, Display, Copy, PartialEq, Eq, Hash, PartialOrd, Ord)] 83 | #[display(fmt = "{}/{}", name, version)] 84 | pub struct CapabilityId { 85 | pub name: CapabilityName, 86 | pub version: CapabilityVersion, 87 | } 88 | 89 | impl From for CapabilityId { 90 | fn from(CapabilityInfo { name, version, .. }: CapabilityInfo) -> Self { 91 | Self { name, version } 92 | } 93 | } 94 | 95 | #[derive(Clone, Debug, Display)] 96 | pub enum InboundEvent { 97 | #[display( 98 | fmt = "disconnect/{}", 99 | "reason.map(|r| r.to_string()).unwrap_or_else(|| \"(no reason)\".to_string())" 100 | )] 101 | Disconnect { reason: Option }, 102 | #[display(fmt = "message/{}/{}", capability_name, "message.id.to_string()")] 103 | Message { 104 | capability_name: CapabilityName, 105 | message: Message, 106 | }, 107 | } 108 | 109 | #[derive(Clone, Debug)] 110 | pub enum OutboundEvent { 111 | Disconnect { 112 | reason: DisconnectReason, 113 | }, 114 | Message { 115 | capability_name: CapabilityName, 116 | message: Message, 117 | }, 118 | } 119 | 120 | #[async_trait] 121 | #[auto_impl(&, Box, Arc)] 122 | pub trait CapabilityServer: Send + Sync + 'static { 123 | /// Should be used to set up relevant state for the peer. 124 | fn on_peer_connect(&self, peer: PeerId, caps: HashMap); 125 | /// Called on the next event for peer. 126 | async fn on_peer_event(&self, peer: PeerId, event: InboundEvent); 127 | /// Get the next event for peer. 128 | async fn next(&self, peer: PeerId) -> OutboundEvent; 129 | } 130 | 131 | #[async_trait] 132 | impl CapabilityServer for () { 133 | fn on_peer_connect(&self, _: PeerId, _: HashMap) {} 134 | 135 | async fn on_peer_event(&self, _: PeerId, _: InboundEvent) {} 136 | 137 | async fn next(&self, _: PeerId) -> OutboundEvent { 138 | futures::future::pending().await 139 | } 140 | } 141 | 142 | #[derive(Clone, Educe)] 143 | #[educe(Debug)] 144 | pub struct Message { 145 | pub id: usize, 146 | #[educe(Debug(method = "hex_debug"))] 147 | pub data: Bytes, 148 | } 149 | -------------------------------------------------------------------------------- /src/util.rs: -------------------------------------------------------------------------------- 1 | use crate::types::*; 2 | use ethereum_types::H256; 3 | use hmac::{Hmac, Mac, NewMac}; 4 | use secp256k1::PublicKey; 5 | use sha2::Sha256; 6 | use sha3::{Digest, Keccak256}; 7 | use std::fmt::{self, Formatter}; 8 | 9 | pub fn keccak256(data: &[u8]) -> H256 { 10 | H256::from(Keccak256::digest(data).as_ref()) 11 | } 12 | 13 | pub fn sha256(data: &[u8]) -> H256 { 14 | H256::from(Sha256::digest(data).as_ref()) 15 | } 16 | 17 | pub fn hmac_sha256(key: &[u8], input: &[&[u8]], auth_data: &[u8]) -> H256 { 18 | let mut hmac = Hmac::::new_varkey(key).unwrap(); 19 | for input in input { 20 | hmac.update(input); 21 | } 22 | hmac.update(auth_data); 23 | H256::from_slice(&*hmac.finalize().into_bytes()) 24 | } 25 | 26 | pub fn pk2id(pk: &PublicKey) -> PeerId { 27 | PeerId::from_slice(&pk.serialize_uncompressed()[1..]) 28 | } 29 | 30 | pub fn id2pk(id: PeerId) -> Result { 31 | let mut s = [0_u8; 65]; 32 | s[0] = 4; 33 | s[1..].copy_from_slice(&id.as_bytes()); 34 | PublicKey::from_slice(&s) 35 | } 36 | 37 | pub fn hex_debug>(s: &T, f: &mut Formatter) -> fmt::Result { 38 | f.write_str(&hex::encode(&s)) 39 | } 40 | 41 | #[cfg(test)] 42 | mod tests { 43 | use super::*; 44 | use secp256k1::{SecretKey, SECP256K1}; 45 | 46 | #[test] 47 | fn pk2id2pk() { 48 | let prikey = SecretKey::new(&mut secp256k1::rand::thread_rng()); 49 | let pubkey = PublicKey::from_secret_key(SECP256K1, &prikey); 50 | assert_eq!(pubkey, id2pk(pk2id(&pubkey)).unwrap()); 51 | } 52 | } 53 | --------------------------------------------------------------------------------