├── examples ├── util │ ├── mod.rs │ └── global_config.rs ├── dht.rs ├── overlay_broadcast.rs ├── overlay_query.rs ├── rldp.rs ├── adnl.rs └── mainnet.json ├── rustfmt.toml ├── src ├── proto │ ├── mod.rs │ ├── rldp.rs │ ├── rpc.rs │ ├── overlay.rs │ └── dht.rs ├── lib.rs ├── util │ ├── updated_at.rs │ ├── mod.rs │ ├── address_list.rs │ ├── fast_rand.rs │ ├── network_builder.rs │ └── packets_history.rs ├── adnl │ ├── ping_subscriber.rs │ ├── queries_cache.rs │ ├── packet_view.rs │ ├── encryption.rs │ ├── socket.rs │ ├── transfer.rs │ ├── keystore.rs │ ├── node_id.rs │ ├── peer.rs │ └── handshake.rs ├── rldp │ ├── decoder.rs │ ├── compression.rs │ ├── encoder.rs │ ├── mod.rs │ ├── outgoing_transfer.rs │ ├── incoming_transfer.rs │ └── node.rs ├── overlay │ ├── broadcast_receiver.rs │ ├── mod.rs │ ├── overlay_id.rs │ └── node.rs ├── subscriber │ └── mod.rs ├── dht │ ├── peers_iter.rs │ ├── mod.rs │ ├── buckets.rs │ ├── futures.rs │ ├── streams.rs │ ├── entry.rs │ └── storage.rs └── scheme.tl ├── .gitignore ├── .github └── workflows │ └── master.yml ├── Cargo.toml ├── README.md └── LICENSE /examples/util/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod global_config; 2 | -------------------------------------------------------------------------------- /rustfmt.toml: -------------------------------------------------------------------------------- 1 | format_code_in_doc_comments = true 2 | -------------------------------------------------------------------------------- /src/proto/mod.rs: -------------------------------------------------------------------------------- 1 | //! # TL protocol models 2 | 3 | #![allow(clippy::enum_variant_names)] 4 | 5 | pub mod adnl; 6 | pub mod dht; 7 | pub mod overlay; 8 | pub mod rldp; 9 | pub mod rpc; 10 | 11 | pub type HashRef<'a> = &'a [u8; 32]; 12 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .idea 2 | 3 | # Generated by Cargo 4 | # will have compiled files and executables 5 | /target/ 6 | 7 | # Remove Cargo.lock from gitignore if creating an executable, leave it for libraries 8 | # More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html 9 | Cargo.lock 10 | 11 | # These are backup files generated by rustfmt 12 | **/*.rs.bk 13 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | #![allow(clippy::too_many_arguments)] 2 | 3 | //! Implementation of the network part of the Everscale blockchain. 4 | 5 | macro_rules! ok { 6 | ($expr:expr) => { 7 | match $expr { 8 | Ok(val) => val, 9 | Err(err) => return Err(err), 10 | } 11 | }; 12 | } 13 | 14 | // Re-export TL-proto crate 15 | pub use everscale_crypto as crypto; 16 | pub use tl_proto as tl; 17 | 18 | pub use subscriber::{MessageSubscriber, QueryConsumingResult, QuerySubscriber, SubscriberContext}; 19 | pub use util::NetworkBuilder; 20 | 21 | pub mod adnl; 22 | #[cfg(feature = "dht")] 23 | pub mod dht; 24 | pub mod overlay; 25 | pub mod proto; 26 | #[cfg(feature = "rldp")] 27 | pub mod rldp; 28 | mod subscriber; 29 | pub mod util; 30 | -------------------------------------------------------------------------------- /src/util/updated_at.rs: -------------------------------------------------------------------------------- 1 | use std::sync::atomic::{AtomicU64, Ordering}; 2 | use std::time::Instant; 3 | 4 | pub struct UpdatedAt { 5 | started_at: Instant, 6 | updated_at: AtomicU64, 7 | } 8 | 9 | impl UpdatedAt { 10 | pub fn new() -> Self { 11 | Self { 12 | started_at: Instant::now(), 13 | updated_at: Default::default(), 14 | } 15 | } 16 | 17 | pub fn refresh(&self) { 18 | self.updated_at 19 | .store(self.started_at.elapsed().as_secs(), Ordering::Release) 20 | } 21 | 22 | pub fn is_expired(&self, timeout: u64) -> bool { 23 | self.started_at.elapsed().as_secs() >= self.updated_at.load(Ordering::Acquire) + timeout 24 | } 25 | } 26 | 27 | impl Default for UpdatedAt { 28 | fn default() -> Self { 29 | Self::new() 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /src/adnl/ping_subscriber.rs: -------------------------------------------------------------------------------- 1 | use std::borrow::Cow; 2 | 3 | use anyhow::Result; 4 | 5 | use crate::proto; 6 | use crate::subscriber::{QueryConsumingResult, QuerySubscriber, SubscriberContext}; 7 | 8 | pub struct PingSubscriber; 9 | 10 | #[async_trait::async_trait] 11 | impl QuerySubscriber for PingSubscriber { 12 | async fn try_consume_query<'a>( 13 | &self, 14 | _: SubscriberContext<'a>, 15 | constructor: u32, 16 | query: Cow<'a, [u8]>, 17 | ) -> Result> { 18 | if constructor == proto::rpc::AdnlPing::TL_ID { 19 | let proto::rpc::AdnlPing { value } = tl_proto::deserialize(&query)?; 20 | QueryConsumingResult::consume(proto::adnl::Pong { value }) 21 | } else { 22 | Ok(QueryConsumingResult::Rejected(query)) 23 | } 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /src/util/mod.rs: -------------------------------------------------------------------------------- 1 | //! # Basic primitives and helpers 2 | 3 | use std::collections::{HashMap, HashSet}; 4 | 5 | pub use self::network_builder::{ 6 | DeferredInitialization, DeferredInitializationList, NetworkBuilder, 7 | }; 8 | 9 | pub(crate) use self::address_list::*; 10 | pub(crate) use self::fast_rand::*; 11 | pub(crate) use self::packets_history::*; 12 | pub(crate) use self::updated_at::*; 13 | 14 | mod address_list; 15 | mod fast_rand; 16 | mod network_builder; 17 | mod packets_history; 18 | mod updated_at; 19 | 20 | pub(crate) type FastHashSet = HashSet; 21 | pub(crate) type FastHashMap = HashMap; 22 | pub(crate) type FastDashSet = dashmap::DashSet; 23 | pub(crate) type FastDashMap = dashmap::DashMap; 24 | pub(crate) type FastHasherState = ahash::RandomState; 25 | 26 | pub(crate) fn now() -> u32 { 27 | std::time::SystemTime::now() 28 | .duration_since(std::time::UNIX_EPOCH) 29 | .unwrap_or_default() 30 | .as_secs() as u32 31 | } 32 | -------------------------------------------------------------------------------- /src/rldp/decoder.rs: -------------------------------------------------------------------------------- 1 | use everscale_raptorq::{Decoder, EncodingPacket, ObjectTransmissionInformation, PayloadId}; 2 | 3 | use crate::proto::rldp::RaptorQFecType; 4 | 5 | pub struct RaptorQDecoder { 6 | engine: Decoder, 7 | params: RaptorQFecType, 8 | seqno: u32, 9 | } 10 | 11 | impl RaptorQDecoder { 12 | pub fn with_params(params: RaptorQFecType) -> Self { 13 | Self { 14 | engine: Decoder::new(ObjectTransmissionInformation::with_defaults( 15 | params.total_len as u64, 16 | params.packet_len as u16, 17 | )), 18 | params, 19 | seqno: 0, 20 | } 21 | } 22 | 23 | pub fn decode(&mut self, seqno: u32, data: Vec) -> Option> { 24 | let packet = EncodingPacket::new(PayloadId::new(0, seqno), data); 25 | self.seqno = seqno; 26 | self.engine.decode(packet) 27 | } 28 | 29 | pub fn params(&self) -> &RaptorQFecType { 30 | &self.params 31 | } 32 | 33 | pub fn seqno(&self) -> u32 { 34 | self.seqno 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /src/util/address_list.rs: -------------------------------------------------------------------------------- 1 | use std::net::{Ipv4Addr, SocketAddrV4}; 2 | 3 | use super::now; 4 | use crate::proto; 5 | 6 | /// Validates address list and extracts socket address from it 7 | pub fn parse_address_list( 8 | list: &proto::adnl::AddressList, 9 | clock_tolerance: u32, 10 | ) -> Result { 11 | let address = list.address.ok_or(AdnlAddressListError::ListIsEmpty)?; 12 | 13 | let version = now(); 14 | if list.reinit_date > version + clock_tolerance { 15 | return Err(AdnlAddressListError::TooNewVersion); 16 | } 17 | 18 | if list.expire_at != 0 && list.expire_at < version { 19 | return Err(AdnlAddressListError::Expired); 20 | } 21 | 22 | Ok(SocketAddrV4::new( 23 | Ipv4Addr::from(address.ip), 24 | address.port as u16, 25 | )) 26 | } 27 | 28 | #[derive(thiserror::Error, Debug)] 29 | pub enum AdnlAddressListError { 30 | #[error("Address list is empty")] 31 | ListIsEmpty, 32 | #[error("Address list version is too new")] 33 | TooNewVersion, 34 | #[error("Address list is expired")] 35 | Expired, 36 | } 37 | 38 | #[cfg(test)] 39 | mod tests { 40 | use std::net::SocketAddrV4; 41 | 42 | #[test] 43 | fn correct_port_update() { 44 | let mut ip = SocketAddrV4::new(0x12345678.into(), 123); 45 | assert_eq!(ip.port(), 123); 46 | 47 | ip.set_port(4560); 48 | assert_eq!(ip.port(), 4560); 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /src/proto/rldp.rs: -------------------------------------------------------------------------------- 1 | use tl_proto::{TlRead, TlWrite}; 2 | 3 | use super::HashRef; 4 | 5 | #[derive(Debug, Copy, Clone, TlRead, TlWrite)] 6 | #[tl(boxed, scheme = "scheme.tl")] 7 | pub enum Message<'tl> { 8 | #[tl(id = "rldp.message")] 9 | Message { 10 | #[tl(size_hint = 32)] 11 | id: HashRef<'tl>, 12 | data: &'tl [u8], 13 | }, 14 | #[tl(id = "rldp.answer")] 15 | Answer { 16 | #[tl(size_hint = 32)] 17 | query_id: HashRef<'tl>, 18 | data: &'tl [u8], 19 | }, 20 | #[tl(id = "rldp.query")] 21 | Query { 22 | #[tl(size_hint = 32)] 23 | query_id: HashRef<'tl>, 24 | max_answer_size: u64, 25 | timeout: u32, 26 | data: &'tl [u8], 27 | }, 28 | } 29 | 30 | #[derive(Debug, Copy, Clone, TlRead, TlWrite)] 31 | #[tl(boxed, scheme = "scheme.tl")] 32 | pub enum MessagePart<'tl> { 33 | #[tl(id = "rldp.messagePart")] 34 | MessagePart { 35 | #[tl(size_hint = 32)] 36 | transfer_id: HashRef<'tl>, 37 | fec_type: RaptorQFecType, 38 | part: u32, 39 | total_size: u64, 40 | seqno: u32, 41 | data: &'tl [u8], 42 | }, 43 | #[tl(id = "rldp.confirm", size_hint = 40)] 44 | Confirm { 45 | transfer_id: HashRef<'tl>, 46 | part: u32, 47 | seqno: u32, 48 | }, 49 | #[tl(id = "rldp.complete", size_hint = 36)] 50 | Complete { 51 | transfer_id: HashRef<'tl>, 52 | part: u32, 53 | }, 54 | } 55 | 56 | #[derive(Debug, Copy, Clone, Eq, PartialEq, TlRead, TlWrite)] 57 | #[tl(boxed, id = "fec.raptorQ", size_hint = 12, scheme = "scheme.tl")] 58 | pub struct RaptorQFecType { 59 | pub total_len: u32, 60 | pub packet_len: u32, 61 | pub packet_count: u32, 62 | } 63 | -------------------------------------------------------------------------------- /.github/workflows/master.yml: -------------------------------------------------------------------------------- 1 | on: [push, pull_request] 2 | 3 | name: master 4 | 5 | jobs: 6 | check: 7 | name: Check 8 | runs-on: ubuntu-latest 9 | steps: 10 | - name: Checkout sources 11 | uses: actions/checkout@v2 12 | 13 | - name: Install stable toolchain 14 | uses: actions-rs/toolchain@v1 15 | with: 16 | profile: minimal 17 | toolchain: stable 18 | override: true 19 | 20 | - name: Run cargo check 21 | uses: actions-rs/cargo@v1 22 | with: 23 | command: check 24 | 25 | test: 26 | name: Test Suite 27 | runs-on: ubuntu-latest 28 | steps: 29 | - name: Checkout sources 30 | uses: actions/checkout@v2 31 | 32 | - name: Install stable toolchain 33 | uses: actions-rs/toolchain@v1 34 | with: 35 | profile: minimal 36 | toolchain: stable 37 | override: true 38 | 39 | - name: Run cargo test 40 | uses: actions-rs/cargo@v1 41 | with: 42 | command: test 43 | args: --all-targets 44 | 45 | lints: 46 | name: Lints 47 | runs-on: ubuntu-latest 48 | steps: 49 | - name: Checkout sources 50 | uses: actions/checkout@v2 51 | 52 | - name: Install stable toolchain 53 | uses: actions-rs/toolchain@v1 54 | with: 55 | profile: minimal 56 | toolchain: stable 57 | override: true 58 | components: rustfmt, clippy 59 | 60 | - name: Run cargo fmt 61 | uses: actions-rs/cargo@v1 62 | with: 63 | command: fmt 64 | args: --all -- --check 65 | 66 | - name: Run cargo clippy 67 | uses: actions-rs/cargo@v1 68 | with: 69 | command: clippy 70 | args: --all-targets -- -D warnings 71 | -------------------------------------------------------------------------------- /examples/dht.rs: -------------------------------------------------------------------------------- 1 | use anyhow::{Context, Result}; 2 | use everscale_network::{adnl, NetworkBuilder}; 3 | use rand::Rng; 4 | use tl_proto::{TlRead, TlWrite}; 5 | 6 | use self::util::global_config; 7 | 8 | mod util; 9 | 10 | #[derive(TlWrite, TlRead)] 11 | #[tl(boxed, id = 0x11223344)] 12 | struct MyCustomData { 13 | counter: u32, 14 | } 15 | 16 | #[tokio::main] 17 | async fn main() -> Result<()> { 18 | tracing_subscriber::fmt::init(); 19 | 20 | const KEY_TAG: usize = 0; 21 | 22 | let global_config = 23 | serde_json::from_str::(include_str!("mainnet.json"))?; 24 | 25 | // Resolve public ip 26 | let my_ip = public_ip::addr_v4() 27 | .await 28 | .context("failed to resolve public ip address")?; 29 | 30 | // Create and fill keystore 31 | let keystore = adnl::Keystore::builder() 32 | .with_tagged_key(rand::thread_rng().gen(), KEY_TAG)? 33 | .build(); 34 | 35 | // Create basic network parts 36 | let (_adnl, dht) = NetworkBuilder::with_adnl((my_ip, 30000), keystore, Default::default()) 37 | .with_dht(KEY_TAG, Default::default()) 38 | .build()?; 39 | 40 | // Fill static nodes 41 | for global_config::DhtNode(peer) in global_config.dht_nodes { 42 | dht.add_dht_peer(peer)?; 43 | } 44 | 45 | tracing::info!("searching for DHT nodes"); 46 | let new_dht_nodes = dht.find_more_dht_nodes().await?; 47 | tracing::info!("found {new_dht_nodes} DHT nodes"); 48 | 49 | // Store some data in DHT 50 | let stored = dht 51 | .entry(dht.key().id(), "some_value") 52 | .with_data(MyCustomData { counter: 0 }) 53 | .with_ttl(3600) 54 | .sign_and_store(dht.key())? 55 | .then_check(|_, MyCustomData { counter }| Ok(counter == 0)) 56 | .await?; 57 | assert!(stored); 58 | 59 | Ok(()) 60 | } 61 | -------------------------------------------------------------------------------- /src/proto/rpc.rs: -------------------------------------------------------------------------------- 1 | use tl_proto::{TlRead, TlWrite}; 2 | 3 | use super::{dht, overlay, HashRef}; 4 | 5 | #[derive(Copy, Clone, TlWrite, TlRead)] 6 | #[tl(boxed, id = "adnl.ping", size_hint = 8, scheme = "scheme.tl")] 7 | pub struct AdnlPing { 8 | pub value: u64, 9 | } 10 | 11 | #[derive(Copy, Clone, TlWrite, TlRead)] 12 | #[tl(boxed, id = "overlay.query", size_hint = 32, scheme = "scheme.tl")] 13 | pub struct OverlayQuery<'tl> { 14 | pub overlay: HashRef<'tl>, 15 | } 16 | 17 | #[derive(TlWrite, TlRead)] 18 | #[tl(boxed, id = "overlay.getRandomPeers", scheme = "scheme.tl")] 19 | pub struct OverlayGetRandomPeers<'tl> { 20 | pub peers: overlay::Nodes<'tl>, 21 | } 22 | 23 | #[derive(TlWrite, TlRead)] 24 | #[tl(boxed, id = "overlay.getRandomPeers", scheme = "scheme.tl")] 25 | pub struct OverlayGetRandomPeersOwned { 26 | pub peers: overlay::NodesOwned, 27 | } 28 | 29 | #[derive(TlWrite, TlRead)] 30 | #[tl(boxed, id = "dht.ping", size_hint = 8, scheme = "scheme.tl")] 31 | pub struct DhtPing { 32 | pub random_id: u64, 33 | } 34 | 35 | #[derive(TlWrite, TlRead)] 36 | #[tl(boxed, id = "dht.findNode", size_hint = 36, scheme = "scheme.tl")] 37 | pub struct DhtFindNode<'tl> { 38 | pub key: HashRef<'tl>, 39 | pub k: u32, 40 | } 41 | 42 | #[derive(TlWrite, TlRead)] 43 | #[tl(boxed, id = "dht.findValue", size_hint = 36, scheme = "scheme.tl")] 44 | pub struct DhtFindValue<'tl> { 45 | pub key: HashRef<'tl>, 46 | pub k: u32, 47 | } 48 | 49 | #[derive(TlWrite, TlRead)] 50 | #[tl(boxed, id = "dht.getSignedAddressList", scheme = "scheme.tl")] 51 | pub struct DhtGetSignedAddressList; 52 | 53 | #[derive(TlWrite, TlRead)] 54 | #[tl(boxed, id = "dht.store", scheme = "scheme.tl")] 55 | pub struct DhtStore<'tl> { 56 | pub value: dht::Value<'tl>, 57 | } 58 | 59 | #[derive(TlWrite, TlRead)] 60 | #[tl(boxed, id = "dht.query", scheme = "scheme.tl")] 61 | pub struct DhtQuery<'tl> { 62 | pub node: dht::Node<'tl>, 63 | } 64 | -------------------------------------------------------------------------------- /src/overlay/broadcast_receiver.rs: -------------------------------------------------------------------------------- 1 | use std::sync::atomic::{AtomicU32, Ordering}; 2 | use std::sync::Arc; 3 | 4 | use crossbeam_queue::SegQueue; 5 | use tokio::sync::Barrier; 6 | 7 | pub struct BroadcastReceiver { 8 | data: SegQueue, 9 | barriers: SegQueue>, 10 | sync_lock: AtomicU32, 11 | } 12 | 13 | impl BroadcastReceiver { 14 | pub fn data_len(&self) -> usize { 15 | self.data.len() 16 | } 17 | 18 | pub fn barriers_len(&self) -> usize { 19 | self.barriers.len() 20 | } 21 | 22 | pub fn push(self: &Arc, data: T) { 23 | self.data.push(data); 24 | let receiver = self.clone(); 25 | tokio::spawn(async move { 26 | while receiver.sync_lock.load(Ordering::Acquire) > 0 { 27 | if let Some(barrier) = receiver.barriers.pop() { 28 | barrier.wait().await; 29 | break; 30 | } else { 31 | tokio::task::yield_now().await; 32 | } 33 | } 34 | }); 35 | } 36 | 37 | pub async fn pop(&self) -> T { 38 | self.sync_lock.fetch_add(1, Ordering::Release); 39 | loop { 40 | match self.data.pop() { 41 | Some(data) => { 42 | self.sync_lock.fetch_sub(1, Ordering::Release); 43 | return data; 44 | } 45 | None => { 46 | let barrier = Arc::new(Barrier::new(2)); 47 | self.barriers.push(barrier.clone()); 48 | barrier.wait().await; 49 | } 50 | } 51 | } 52 | } 53 | } 54 | 55 | impl Default for BroadcastReceiver { 56 | fn default() -> Self { 57 | Self { 58 | data: Default::default(), 59 | barriers: Default::default(), 60 | sync_lock: Default::default(), 61 | } 62 | } 63 | } 64 | -------------------------------------------------------------------------------- /src/adnl/queries_cache.rs: -------------------------------------------------------------------------------- 1 | use std::sync::{Arc, Weak}; 2 | 3 | use tokio::sync::oneshot; 4 | 5 | use crate::util::FastDashMap; 6 | 7 | pub type QueryId = [u8; 32]; 8 | 9 | #[derive(Default)] 10 | pub struct QueriesCache { 11 | queries: FastDashMap, 12 | } 13 | 14 | impl QueriesCache { 15 | #[allow(unused)] 16 | pub fn is_empty(&self) -> bool { 17 | self.queries.is_empty() 18 | } 19 | 20 | pub fn len(&self) -> usize { 21 | self.queries.len() 22 | } 23 | 24 | pub fn add_query(self: &Arc, query_id: QueryId) -> PendingAdnlQuery { 25 | let (tx, rx) = oneshot::channel(); 26 | 27 | self.queries.insert(query_id, tx); 28 | 29 | PendingAdnlQuery { 30 | query_id, 31 | data_rx: Some(rx), 32 | cache: Arc::downgrade(self), 33 | finished: false, 34 | } 35 | } 36 | 37 | pub fn update_query(&self, query_id: &QueryId, answer: &[u8]) { 38 | if let Some((_, tx)) = self.queries.remove(query_id) { 39 | tx.send(answer.to_vec()).ok(); 40 | } 41 | } 42 | } 43 | 44 | pub struct PendingAdnlQuery { 45 | query_id: QueryId, 46 | data_rx: Option, 47 | cache: Weak, 48 | finished: bool, 49 | } 50 | 51 | impl PendingAdnlQuery { 52 | pub async fn wait(mut self) -> Option> { 53 | // SAFETY: `data_rx` is guaranteed to be `Some` 54 | let data_rx = unsafe { self.data_rx.take().unwrap_unchecked() }; 55 | let data = data_rx.await.ok(); 56 | self.finished = true; 57 | data 58 | } 59 | } 60 | 61 | impl Drop for PendingAdnlQuery { 62 | fn drop(&mut self) { 63 | if self.finished { 64 | return; 65 | } 66 | 67 | if let Some(cache) = self.cache.upgrade() { 68 | cache.queries.remove(&self.query_id); 69 | } 70 | } 71 | } 72 | 73 | type DataTx = oneshot::Sender>; 74 | type DataRx = oneshot::Receiver>; 75 | -------------------------------------------------------------------------------- /src/adnl/packet_view.rs: -------------------------------------------------------------------------------- 1 | use std::ops::{Index, IndexMut, Range, RangeFrom, RangeTo}; 2 | 3 | pub struct PacketView<'a> { 4 | bytes: &'a mut [u8], 5 | } 6 | 7 | impl<'a> PacketView<'a> { 8 | #[inline(always)] 9 | pub const fn as_ptr(&self) -> *const u8 { 10 | self.bytes.as_ptr() 11 | } 12 | 13 | #[inline(always)] 14 | pub const fn as_slice(&self) -> &[u8] { 15 | self.bytes 16 | } 17 | 18 | #[inline(always)] 19 | pub fn len(&self) -> usize { 20 | self.bytes.len() 21 | } 22 | 23 | pub fn remove_prefix(&mut self, prefix_len: usize) { 24 | let len = self.bytes.len(); 25 | let ptr = self.bytes.as_mut_ptr(); 26 | // SAFETY: `bytes` is already a reference bounded by a lifetime 27 | self.bytes = 28 | unsafe { std::slice::from_raw_parts_mut(ptr.add(prefix_len), len - prefix_len) }; 29 | } 30 | } 31 | 32 | impl Index> for PacketView<'_> { 33 | type Output = [u8]; 34 | 35 | fn index(&self, index: RangeTo) -> &Self::Output { 36 | self.bytes.index(index) 37 | } 38 | } 39 | 40 | impl Index> for PacketView<'_> { 41 | type Output = [u8]; 42 | 43 | fn index(&self, index: Range) -> &Self::Output { 44 | self.bytes.index(index) 45 | } 46 | } 47 | 48 | impl IndexMut> for PacketView<'_> { 49 | fn index_mut(&mut self, index: Range) -> &mut Self::Output { 50 | self.bytes.index_mut(index) 51 | } 52 | } 53 | 54 | impl Index> for PacketView<'_> { 55 | type Output = [u8]; 56 | 57 | fn index(&self, index: RangeFrom) -> &Self::Output { 58 | self.bytes.index(index) 59 | } 60 | } 61 | 62 | impl IndexMut> for PacketView<'_> { 63 | fn index_mut(&mut self, index: RangeFrom) -> &mut Self::Output { 64 | self.bytes.index_mut(index) 65 | } 66 | } 67 | 68 | impl<'a> From<&'a mut [u8]> for PacketView<'a> { 69 | fn from(bytes: &'a mut [u8]) -> Self { 70 | Self { bytes } 71 | } 72 | } 73 | -------------------------------------------------------------------------------- /src/rldp/compression.rs: -------------------------------------------------------------------------------- 1 | pub fn compress(data: &mut Vec) -> std::io::Result<()> { 2 | let uncompressed = data.len(); 3 | if uncompressed <= COMPRESSION_THRESHOLD { 4 | return Ok(()); 5 | } 6 | 7 | let mut result = Vec::with_capacity(data.len() + 1); 8 | ok!(zstd::stream::copy_encode( 9 | &mut data.as_slice(), 10 | &mut result, 11 | COMPRESSION_LEVEL 12 | )); 13 | ok!(zstd::stream::copy_encode( 14 | &mut (uncompressed as u32).to_be_bytes().as_slice(), 15 | &mut result, 16 | COMPRESSION_LEVEL, 17 | )); 18 | result.push(TAG_COMPRESSED); 19 | 20 | *data = result; 21 | Ok(()) 22 | } 23 | 24 | pub fn decompress(data: &[u8]) -> Option> { 25 | if data.last() != Some(&TAG_COMPRESSED) { 26 | return None; 27 | } 28 | 29 | let len = data.len(); 30 | match zstd::stream::decode_all(&mut &data[..len - 1]) { 31 | Ok(mut data) if data.len() >= 4 => { 32 | let len = data.len(); 33 | 34 | let src_len = ((data[len - 4] as usize) << 24) 35 | | ((data[len - 3] as usize) << 16) 36 | | ((data[len - 2] as usize) << 8) 37 | | (data[len - 1] as usize); 38 | 39 | if src_len != len - 4 { 40 | return None; 41 | } 42 | 43 | data.truncate(src_len); 44 | Some(data) 45 | } 46 | _ => None, 47 | } 48 | } 49 | 50 | const COMPRESSION_THRESHOLD: usize = 256; 51 | const COMPRESSION_LEVEL: i32 = 3; 52 | 53 | const TAG_COMPRESSED: u8 = 0x80; 54 | 55 | #[cfg(test)] 56 | mod tests { 57 | use super::*; 58 | use rand::Rng; 59 | 60 | #[test] 61 | fn correct_compression() { 62 | let data = std::iter::repeat_with(|| rand::thread_rng().gen()) 63 | .take(1000) 64 | .collect::>(); 65 | 66 | let mut compressed = data.clone(); 67 | compress(&mut compressed).unwrap(); 68 | 69 | let decompressed = decompress(&compressed).unwrap(); 70 | assert_eq!(decompressed, data); 71 | } 72 | } 73 | -------------------------------------------------------------------------------- /src/rldp/encoder.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Result; 2 | use everscale_raptorq::{Encoder, EncodingPacket}; 3 | 4 | use crate::proto::rldp::RaptorQFecType; 5 | 6 | pub struct RaptorQEncoder { 7 | engine: Encoder, 8 | params: RaptorQFecType, 9 | source_packets: Vec, 10 | encoder_index: usize, 11 | } 12 | 13 | impl RaptorQEncoder { 14 | pub fn with_data(data: &[u8]) -> Self { 15 | let engine = Encoder::with_defaults(data, MAX_TRANSMISSION_UNIT as u16); 16 | let source_packets = engine 17 | .get_block_encoders() 18 | .iter() 19 | .flat_map(|encoder| encoder.source_packets().into_iter().rev()) 20 | .collect::>(); 21 | 22 | Self { 23 | engine, 24 | params: RaptorQFecType { 25 | total_len: data.len() as u32, 26 | packet_len: MAX_TRANSMISSION_UNIT, 27 | packet_count: source_packets.len() as u32, 28 | }, 29 | source_packets, 30 | encoder_index: 0, 31 | } 32 | } 33 | 34 | pub fn encode(&mut self, seqno: &mut u32) -> Result> { 35 | let packet = if let Some(packet) = self.source_packets.pop() { 36 | packet 37 | } else { 38 | let encoders = self.engine.get_block_encoders(); 39 | let packet = match encoders[self.encoder_index].repair_packets(*seqno, 1).pop() { 40 | Some(packet) => packet, 41 | None => return Err(EncoderError::FailedToEncode.into()), 42 | }; 43 | self.encoder_index = (self.encoder_index + 1) % encoders.len(); 44 | packet 45 | }; 46 | 47 | let (payload_id, data) = packet.split(); 48 | *seqno = payload_id.encoding_symbol_id(); 49 | 50 | Ok(data) 51 | } 52 | 53 | #[inline(always)] 54 | pub fn params(&self) -> &RaptorQFecType { 55 | &self.params 56 | } 57 | } 58 | 59 | #[derive(thiserror::Error, Debug)] 60 | enum EncoderError { 61 | #[error("Failed to encode repair packet")] 62 | FailedToEncode, 63 | } 64 | 65 | pub const MAX_TRANSMISSION_UNIT: u32 = 768; 66 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "everscale-network" 3 | version = "0.5.5" 4 | description = "Implementation of the network part of the Everscale blockchain" 5 | repository = "https://github.com/broxus/everscale-network" 6 | authors = ["Ivan Kalinin "] 7 | rust-version = "1.64.0" 8 | edition = "2021" 9 | include = ["src/**/*.rs", "src/**/*.tl", "README.md"] 10 | license = "Apache-2.0" 11 | 12 | [package.metadata.docs.rs] 13 | all-features = true 14 | 15 | [[example]] 16 | name = "adnl" 17 | path = "examples/adnl.rs" 18 | 19 | [[example]] 20 | name = "rldp" 21 | path = "examples/rldp.rs" 22 | 23 | [[example]] 24 | name = "dht" 25 | path = "examples/dht.rs" 26 | 27 | [[example]] 28 | name = "overlay-broadcast" 29 | path = "examples/overlay_broadcast.rs" 30 | 31 | [[example]] 32 | name = "overlay-query" 33 | path = "examples/overlay_query.rs" 34 | 35 | [profile.release] 36 | debug = true 37 | 38 | [dependencies] 39 | aes = "0.8" 40 | ahash = "0.8" 41 | anyhow = "1.0" 42 | async-trait = "0.1" 43 | bytes = "1" 44 | crossbeam-queue = { version = "0.3", optional = true } 45 | ctr = "0.9" 46 | dashmap = "5.4" 47 | everscale-crypto = "0.2.0-pre.1" 48 | everscale-raptorq = { version = "1.7.0", optional = true } 49 | frunk_core = "0.4" 50 | futures-util = "0.3" 51 | generic-array = "0.14" 52 | hex = "0.4" 53 | libc = "0.2" 54 | once_cell = "1.13.0" 55 | parking_lot = { version = "0.12", features = ["hardware-lock-elision"] } 56 | rand = { version = "0.8", features = ["small_rng"] } 57 | serde = { version = "1.0", features = ["derive"] } 58 | sha2 = "0.10" 59 | smallvec = { version = "1.9.0", features = ["union", "const_generics"] } 60 | thiserror = "1.0" 61 | tl-proto = { version = "0.4", features = ["derive", "bytes"] } 62 | tokio = { version = "1", features = ["sync", "net", "rt", "time", "io-util", "macros"] } 63 | tokio-util = "0.7.0" 64 | tracing = "0.1" 65 | zstd = { version = "0.12", optional = true } 66 | 67 | [dev-dependencies] 68 | base64 = "0.21" 69 | serde_json = "1.0" 70 | public-ip = "0.2" 71 | tokio = { version = "1", features = ["rt-multi-thread", "parking_lot"] } 72 | tracing-subscriber = "0.3" 73 | 74 | [features] 75 | default = ["log", "rldp", "dht", "overlay"] 76 | log = ["tracing/log"] 77 | rldp = ["dep:everscale-raptorq", "dep:zstd"] 78 | dht = [] 79 | overlay = ["rldp", "dep:crossbeam-queue"] 80 | -------------------------------------------------------------------------------- /src/util/fast_rand.rs: -------------------------------------------------------------------------------- 1 | use std::cell::UnsafeCell; 2 | use std::mem::MaybeUninit; 3 | use std::rc::Rc; 4 | use std::thread_local; 5 | 6 | use rand::rngs::SmallRng; 7 | use rand::{RngCore, SeedableRng}; 8 | 9 | thread_local!( 10 | static THREAD_RNG_KEY: Rc> = { 11 | Rc::new(UnsafeCell::new(SmallRng::from_rng(&mut rand::thread_rng()).unwrap())) 12 | } 13 | ); 14 | 15 | pub fn fast_thread_rng() -> SmallThreadRng { 16 | let rng = THREAD_RNG_KEY.with(|t| t.clone()); 17 | SmallThreadRng { rng } 18 | } 19 | 20 | pub fn gen_fast_bytes() -> [u8; N] { 21 | THREAD_RNG_KEY.with(|t| { 22 | unsafe { 23 | // SAFETY: We must make sure to stop using `rng` before anyone else 24 | // creates another mutable reference 25 | let rng = &mut *t.get(); 26 | 27 | let mut id = MaybeUninit::<[u8; N]>::uninit(); 28 | rng.fill_bytes(&mut *id.as_mut_ptr() as &mut [u8; N]); 29 | 30 | id.assume_init() 31 | } 32 | }) 33 | } 34 | 35 | pub struct SmallThreadRng { 36 | rng: Rc>, 37 | } 38 | 39 | impl RngCore for SmallThreadRng { 40 | #[inline(always)] 41 | fn next_u32(&mut self) -> u32 { 42 | // SAFETY: We must make sure to stop using `rng` before anyone else 43 | // creates another mutable reference 44 | let rng = unsafe { &mut *self.rng.get() }; 45 | rng.next_u32() 46 | } 47 | 48 | #[inline(always)] 49 | fn next_u64(&mut self) -> u64 { 50 | // SAFETY: We must make sure to stop using `rng` before anyone else 51 | // creates another mutable reference 52 | let rng = unsafe { &mut *self.rng.get() }; 53 | rng.next_u64() 54 | } 55 | 56 | fn fill_bytes(&mut self, dest: &mut [u8]) { 57 | // SAFETY: We must make sure to stop using `rng` before anyone else 58 | // creates another mutable reference 59 | let rng = unsafe { &mut *self.rng.get() }; 60 | rng.fill_bytes(dest) 61 | } 62 | 63 | fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), rand::Error> { 64 | // SAFETY: We must make sure to stop using `rng` before anyone else 65 | // creates another mutable reference 66 | let rng = unsafe { &mut *self.rng.get() }; 67 | rng.try_fill_bytes(dest) 68 | } 69 | } 70 | -------------------------------------------------------------------------------- /src/adnl/encryption.rs: -------------------------------------------------------------------------------- 1 | use sha2::Digest; 2 | 3 | pub fn build_packet_cipher(shared_secret: &[u8; 32], checksum: &[u8; 32]) -> Aes256Ctr { 4 | use aes::cipher::KeyIvInit; 5 | 6 | let mut aes_key_bytes: [u8; 32] = *shared_secret; 7 | aes_key_bytes[16..32].copy_from_slice(&checksum[16..32]); 8 | let mut aes_ctr_bytes: [u8; 16] = checksum[0..16].try_into().unwrap(); 9 | aes_ctr_bytes[4..16].copy_from_slice(&shared_secret[20..32]); 10 | 11 | Aes256Ctr::new( 12 | &generic_array::GenericArray::from(aes_key_bytes), 13 | &generic_array::GenericArray::from(aes_ctr_bytes), 14 | ) 15 | } 16 | 17 | pub fn compute_packet_data_hash(version: Option, data: &[u8]) -> [u8; 32] { 18 | match version { 19 | Some(version) => { 20 | let mut hash = sha2::Sha256::new(); 21 | hash.update(version.to_be_bytes()); 22 | hash.update(data); 23 | hash.finalize() 24 | } 25 | None => sha2::Sha256::digest(data), 26 | } 27 | .into() 28 | } 29 | 30 | pub fn decode_version(prefix: &[u8; LEN]) -> Option { 31 | let end: usize = LEN - 32; 32 | let start: usize = end - 4; 33 | 34 | let mut xor: [u8; 4] = prefix[start..end].try_into().unwrap(); 35 | for (i, byte) in prefix[..start].iter().enumerate() { 36 | xor[i % 4] ^= *byte; 37 | } 38 | for (i, byte) in prefix[end..].iter().enumerate() { 39 | xor[i % 4] ^= *byte; 40 | } 41 | if xor[0] == xor[2] && xor[1] == xor[3] { 42 | Some(u16::from_be_bytes(xor[..2].try_into().unwrap())) 43 | } else { 44 | None 45 | } 46 | } 47 | 48 | pub type Aes256Ctr = ctr::Ctr64BE; 49 | 50 | #[cfg(test)] 51 | mod tests { 52 | use aes::cipher::{StreamCipher, StreamCipherSeek}; 53 | use rand::Rng; 54 | 55 | use super::*; 56 | 57 | #[test] 58 | fn double_encode() { 59 | let data: [u8; 32] = rand::thread_rng().gen(); 60 | 61 | let mut cipher = build_packet_cipher(&rand::thread_rng().gen(), &rand::thread_rng().gen()); 62 | 63 | let mut encoded_data = data; 64 | cipher.apply_keystream(&mut encoded_data); 65 | assert_ne!(encoded_data, data); 66 | 67 | cipher.seek(0); 68 | cipher.apply_keystream(&mut encoded_data); 69 | assert_eq!(encoded_data, data); 70 | } 71 | } 72 | -------------------------------------------------------------------------------- /examples/overlay_broadcast.rs: -------------------------------------------------------------------------------- 1 | use std::time::Duration; 2 | 3 | use anyhow::{Context, Result}; 4 | 5 | use everscale_network::{adnl, overlay, NetworkBuilder}; 6 | use rand::Rng; 7 | 8 | use self::util::global_config; 9 | 10 | mod util; 11 | 12 | #[tokio::main] 13 | async fn main() -> Result<()> { 14 | tracing_subscriber::fmt::init(); 15 | 16 | const KEY_TAG: usize = 0; 17 | 18 | let global_config = 19 | serde_json::from_str::(include_str!("mainnet.json"))?; 20 | 21 | // Resolve public ip 22 | let my_ip = public_ip::addr_v4() 23 | .await 24 | .context("failed to resolve public ip address")?; 25 | 26 | // Create and fill keystore 27 | let keystore = adnl::Keystore::builder() 28 | .with_tagged_key(rand::thread_rng().gen(), KEY_TAG)? 29 | .build(); 30 | 31 | // Create basic network parts 32 | let (adnl, dht, _rldp, overlay) = 33 | NetworkBuilder::with_adnl((my_ip, 0), keystore, Default::default()) 34 | .with_dht(KEY_TAG, Default::default()) 35 | .with_rldp(Default::default()) 36 | .with_overlay(KEY_TAG) 37 | .build()?; 38 | 39 | // Fill static nodes 40 | for global_config::DhtNode(peer) in global_config.dht_nodes { 41 | dht.add_dht_peer(peer)?; 42 | } 43 | 44 | let new_dht_nodes = dht.find_more_dht_nodes().await?; 45 | tracing::info!("found {new_dht_nodes} DHT nodes"); 46 | 47 | // Add masterchain overlay 48 | let mc_overlay_id = 49 | overlay::IdFull::for_workchain_overlay(-1, &global_config.zero_state.file_hash) 50 | .compute_short_id(); 51 | let (workchain_overlay, _) = overlay.add_public_overlay(&mc_overlay_id, Default::default()); 52 | 53 | // Populate overlay with nodes 54 | let overlay_nodes = dht 55 | .find_overlay_nodes(&mc_overlay_id) 56 | .await 57 | .context("failed to find overlay nodes")?; 58 | tracing::info!("found {} overlay nodes", overlay_nodes.len()); 59 | 60 | for (ip, node) in overlay_nodes { 61 | workchain_overlay.add_public_peer(&adnl, ip, node.as_equivalent_ref())?; 62 | } 63 | 64 | // Broadcast something 65 | workchain_overlay.broadcast( 66 | &adnl, 67 | vec![0; 10], 68 | None, 69 | overlay::BroadcastTarget::RandomNeighbours, 70 | ); 71 | 72 | // NOTE: broadcast is just fire-and-forget, so wait a bit 73 | tokio::time::sleep(Duration::from_secs(1)).await; 74 | 75 | // Done 76 | Ok(()) 77 | } 78 | -------------------------------------------------------------------------------- /src/subscriber/mod.rs: -------------------------------------------------------------------------------- 1 | use std::borrow::Cow; 2 | use std::sync::Arc; 3 | 4 | use anyhow::Result; 5 | use tl_proto::TlRead; 6 | 7 | use crate::adnl; 8 | 9 | /// ADNL custom messages subscriber 10 | #[async_trait::async_trait] 11 | pub trait MessageSubscriber: Send + Sync { 12 | async fn try_consume_custom<'a>( 13 | &self, 14 | ctx: SubscriberContext<'a>, 15 | constructor: u32, 16 | data: &'a [u8], 17 | ) -> Result; 18 | } 19 | 20 | /// ADNL, RLDP or overlay queries subscriber 21 | #[async_trait::async_trait] 22 | pub trait QuerySubscriber: Send + Sync { 23 | async fn try_consume_query<'a>( 24 | &self, 25 | ctx: SubscriberContext<'a>, 26 | constructor: u32, 27 | query: Cow<'a, [u8]>, 28 | ) -> Result>; 29 | } 30 | 31 | /// Message or query context. 32 | /// 33 | /// See [`MessageSubscriber::try_consume_custom`] and [`QuerySubscriber::try_consume_query`] 34 | #[derive(Copy, Clone)] 35 | pub struct SubscriberContext<'a> { 36 | pub adnl: &'a Arc, 37 | pub local_id: &'a adnl::NodeIdShort, 38 | pub peer_id: &'a adnl::NodeIdShort, 39 | } 40 | 41 | /// Subscriber response for consumed query 42 | pub enum QueryConsumingResult<'a> { 43 | /// Query is accepted and processed 44 | Consumed(Option>), 45 | /// Query rejected and will be processed by the next subscriber 46 | Rejected(Cow<'a, [u8]>), 47 | } 48 | 49 | impl QueryConsumingResult<'_> { 50 | pub fn consume(answer: T) -> Result 51 | where 52 | T: tl_proto::TlWrite, 53 | { 54 | Ok(Self::Consumed(Some(tl_proto::serialize(answer)))) 55 | } 56 | } 57 | 58 | pub(crate) async fn process_query<'a>( 59 | ctx: SubscriberContext<'a>, 60 | subscribers: &[Arc], 61 | mut query: Cow<'_, [u8]>, 62 | ) -> Result>> { 63 | let constructor = u32::read_from(&query, &mut 0)?; 64 | 65 | for subscriber in subscribers { 66 | query = match subscriber 67 | .try_consume_query(ctx, constructor, query) 68 | .await? 69 | { 70 | QueryConsumingResult::Consumed(answer) => { 71 | return Ok(QueryProcessingResult::Processed(answer)) 72 | } 73 | QueryConsumingResult::Rejected(query) => query, 74 | }; 75 | } 76 | 77 | Ok(QueryProcessingResult::Rejected) 78 | } 79 | 80 | pub(crate) enum QueryProcessingResult { 81 | Processed(Option), 82 | Rejected, 83 | } 84 | -------------------------------------------------------------------------------- /src/dht/peers_iter.rs: -------------------------------------------------------------------------------- 1 | use super::buckets::get_affinity; 2 | use super::node::Node; 3 | use super::storage::StorageKeyId; 4 | use crate::adnl; 5 | 6 | pub struct PeersIter { 7 | key_id: StorageKeyId, 8 | peer_ids: Vec<(u8, adnl::NodeIdShort)>, 9 | index: usize, 10 | } 11 | 12 | impl PeersIter { 13 | pub fn with_key_id(key_id: StorageKeyId) -> Self { 14 | Self { 15 | key_id, 16 | peer_ids: Default::default(), 17 | index: 0, 18 | } 19 | } 20 | 21 | pub fn next(&mut self) -> Option { 22 | self.peer_ids.pop().map(|(_, peer_id)| peer_id) 23 | } 24 | 25 | pub fn fill(&mut self, dht: &Node, batch_len: Option) { 26 | // Get next peer (skipping bad peers) and update the index 27 | while let Some(peer_id) = self.next_known_peer(dht) { 28 | let affinity = get_affinity(&self.key_id, peer_id.as_slice()); 29 | 30 | // Keep adding peer ids until max tasks is reached 31 | // or there are values with higher affinity 32 | let add = match (self.peer_ids.last(), batch_len) { 33 | (None, _) | (_, None) => true, 34 | (Some((top_affinity, _)), Some(batch_len)) => { 35 | *top_affinity <= affinity || self.peer_ids.len() < batch_len 36 | } 37 | }; 38 | 39 | if add { 40 | self.peer_ids.push((affinity, peer_id)) 41 | } 42 | } 43 | 44 | // Sort peer ids by descending affinity 45 | self.peer_ids 46 | .sort_unstable_by_key(|(affinity, _)| std::cmp::Reverse(*affinity)); 47 | 48 | if let Some(batch_len) = batch_len { 49 | if let Some(top_affinity) = self.peer_ids.first().map(|(affinity, _)| *affinity) { 50 | let mut offset = 0usize; 51 | tracing::trace!(top_affinity, batch_len, "clearing peer ids"); 52 | self.peer_ids.retain(|(affinity, _)| { 53 | let retain = offset < batch_len || *affinity >= top_affinity; 54 | offset += 1; 55 | retain 56 | }); 57 | } 58 | } 59 | } 60 | 61 | fn next_known_peer(&mut self, dht: &Node) -> Option { 62 | loop { 63 | let peer_id = dht.known_peers().get(self.index); 64 | self.index += 1; 65 | 66 | if let Some(peer) = &peer_id { 67 | if dht.is_bad_peer(peer) { 68 | continue; 69 | } 70 | } 71 | 72 | break peer_id; 73 | } 74 | } 75 | } 76 | -------------------------------------------------------------------------------- /src/dht/mod.rs: -------------------------------------------------------------------------------- 1 | //! ## DHT - Kademlia-like Distributed Hash Table 2 | //! 3 | //! TODO 4 | 5 | use std::sync::Arc; 6 | 7 | use anyhow::Result; 8 | use frunk_core::hlist::{HCons, HList, IntoTuple2, Selector}; 9 | use frunk_core::indices::There; 10 | 11 | pub use entry::Entry; 12 | pub use node::{Node, NodeMetrics, NodeOptions}; 13 | 14 | use crate::adnl; 15 | use crate::util::{DeferredInitialization, NetworkBuilder}; 16 | 17 | mod buckets; 18 | mod entry; 19 | mod node; 20 | mod peers_iter; 21 | mod storage; 22 | 23 | /// DHT helper futures 24 | pub mod futures; 25 | /// DHT helper streams 26 | pub mod streams; 27 | 28 | pub(crate) type Deferred = Result<(Arc, usize, NodeOptions)>; 29 | 30 | impl DeferredInitialization for Deferred { 31 | type Initialized = Arc; 32 | 33 | fn initialize(self) -> Result { 34 | let (adnl, key_tag, options) = self?; 35 | Node::new(adnl, key_tag, options) 36 | } 37 | } 38 | 39 | impl NetworkBuilder 40 | where 41 | L: HList + Selector, 42 | HCons: IntoTuple2, 43 | { 44 | /// Creates DHT network layer 45 | /// 46 | /// # Examples 47 | /// 48 | /// ``` 49 | /// # use std::error::Error; 50 | /// # use anyhow::Result; 51 | /// # use everscale_network::{adnl, dht, NetworkBuilder}; 52 | /// 53 | /// #[tokio::main] 54 | /// async fn main() -> Result<()> { 55 | /// const DHT_KEY_TAG: usize = 0; 56 | /// 57 | /// let keystore = adnl::Keystore::builder() 58 | /// .with_tagged_key([0; 32], DHT_KEY_TAG)? 59 | /// .build(); 60 | /// 61 | /// let adnl_options = adnl::NodeOptions::default(); 62 | /// let dht_options = dht::NodeOptions::default(); 63 | /// 64 | /// let (adnl, dht) = NetworkBuilder::with_adnl("127.0.0.1:10000", keystore, adnl_options) 65 | /// .with_dht(DHT_KEY_TAG, dht_options) 66 | /// .build()?; 67 | /// Ok(()) 68 | /// } 69 | /// ``` 70 | #[allow(clippy::type_complexity)] 71 | pub fn with_dht( 72 | self, 73 | key_tag: usize, 74 | options: NodeOptions, 75 | ) -> NetworkBuilder, (There, There)> { 76 | let deferred = match self.0.get() { 77 | Ok(adnl) => Ok((adnl.clone(), key_tag, options)), 78 | Err(_) => Err(anyhow::anyhow!("ADNL was not initialized")), 79 | }; 80 | NetworkBuilder(self.0.prepend(deferred), Default::default()) 81 | } 82 | } 83 | 84 | /// DHT key name used for storing nodes socket address 85 | pub const KEY_ADDRESS: &str = "address"; 86 | 87 | /// DHT key name used for storing overlay nodes 88 | pub const KEY_NODES: &str = "nodes"; 89 | 90 | /// Max allowed DHT peers in the network 91 | pub const MAX_DHT_PEERS: u32 = 65536; 92 | -------------------------------------------------------------------------------- /examples/util/global_config.rs: -------------------------------------------------------------------------------- 1 | use std::net::SocketAddrV4; 2 | 3 | use base64::Engine as _; 4 | use everscale_network::proto; 5 | use serde::{de::Error, Deserialize, Deserializer}; 6 | 7 | #[derive(Deserialize)] 8 | pub struct GlobalConfig { 9 | pub zero_state: ZeroState, 10 | pub dht_nodes: Vec, 11 | } 12 | 13 | pub struct ZeroState { 14 | pub file_hash: [u8; 32], 15 | } 16 | 17 | impl<'de> Deserialize<'de> for ZeroState { 18 | fn deserialize(deserializer: D) -> Result 19 | where 20 | D: Deserializer<'de>, 21 | { 22 | #[derive(Deserialize)] 23 | struct Entry { 24 | file_hash: String, 25 | } 26 | 27 | let entry = Entry::deserialize(deserializer)?; 28 | 29 | Ok(ZeroState { 30 | file_hash: base64::engine::general_purpose::STANDARD 31 | .decode(entry.file_hash) 32 | .map_err(Error::custom)? 33 | .try_into() 34 | .map_err(|_| Error::custom("invalid zerostate file hash"))?, 35 | }) 36 | } 37 | } 38 | 39 | pub struct DhtNode(pub proto::dht::NodeOwned); 40 | 41 | impl<'de> Deserialize<'de> for DhtNode { 42 | fn deserialize(deserializer: D) -> Result 43 | where 44 | D: Deserializer<'de>, 45 | { 46 | #[derive(Deserialize)] 47 | struct Entry { 48 | pubkey: String, 49 | addr_list: AddrList, 50 | version: u32, 51 | signature: String, 52 | } 53 | 54 | #[derive(Deserialize)] 55 | struct AddrList { 56 | address: Option, 57 | expire_at: u32, 58 | reinit_date: u32, 59 | version: u32, 60 | } 61 | 62 | let entry = Entry::deserialize(deserializer)?; 63 | 64 | let addr_list = proto::adnl::AddressList { 65 | address: entry 66 | .addr_list 67 | .address 68 | .map(|addr| proto::adnl::Address::from(&addr)), 69 | version: entry.addr_list.version, 70 | reinit_date: entry.addr_list.reinit_date, 71 | expire_at: entry.addr_list.expire_at, 72 | }; 73 | 74 | let node = proto::dht::NodeOwned { 75 | id: everscale_crypto::tl::PublicKeyOwned::Ed25519 { 76 | key: hex::decode(entry.pubkey) 77 | .map_err(Error::custom)? 78 | .try_into() 79 | .map_err(|_| Error::custom("invalid pubkey"))?, 80 | }, 81 | addr_list, 82 | version: entry.version, 83 | signature: base64::engine::general_purpose::STANDARD 84 | .decode(entry.signature) 85 | .map_err(|_| Error::custom("invalid signature"))? 86 | .into(), 87 | }; 88 | 89 | Ok(Self(node)) 90 | } 91 | } 92 | -------------------------------------------------------------------------------- /src/util/network_builder.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Result; 2 | use frunk_core::hlist::{HCons, HNil}; 3 | 4 | /// Convenient network layer builder 5 | pub struct NetworkBuilder(pub(crate) T, pub(crate) std::marker::PhantomData); 6 | 7 | impl NetworkBuilder 8 | where 9 | T: DeferredInitializationList, 10 | { 11 | /// Initializes all layers into a tuple of components 12 | pub fn build(self) -> Result { 13 | self.0.initialize() 14 | } 15 | } 16 | 17 | /// Lazy initialization when building layers with [`NetworkBuilder`] 18 | pub trait DeferredInitialization { 19 | type Initialized; 20 | 21 | fn initialize(self) -> Result; 22 | } 23 | 24 | /// List of lazy initializers for [`NetworkBuilder`] 25 | pub trait DeferredInitializationList { 26 | type Output; 27 | 28 | fn initialize(self) -> Result; 29 | } 30 | 31 | type BaseLayer = HCons; 32 | 33 | impl DeferredInitializationList for BaseLayer 34 | where 35 | T0: DeferredInitialization, 36 | { 37 | type Output = T0::Initialized; 38 | 39 | fn initialize(self) -> Result { 40 | self.head.initialize() 41 | } 42 | } 43 | 44 | impl DeferredInitializationList for HCons> 45 | where 46 | T0: DeferredInitialization, 47 | T1: DeferredInitialization, 48 | { 49 | type Output = (T0::Initialized, T1::Initialized); 50 | 51 | fn initialize(self) -> Result { 52 | let t1 = self.head.initialize()?; 53 | let t0 = self.tail.head.initialize()?; 54 | Ok((t0, t1)) 55 | } 56 | } 57 | 58 | impl DeferredInitializationList for HCons>> 59 | where 60 | T0: DeferredInitialization, 61 | T1: DeferredInitialization, 62 | T2: DeferredInitialization, 63 | { 64 | type Output = (T0::Initialized, T1::Initialized, T2::Initialized); 65 | 66 | fn initialize(self) -> Result { 67 | let t2 = self.head.initialize()?; 68 | let t1 = self.tail.head.initialize()?; 69 | let t0 = self.tail.tail.head.initialize()?; 70 | Ok((t0, t1, t2)) 71 | } 72 | } 73 | 74 | impl DeferredInitializationList for HCons>>> 75 | where 76 | T0: DeferredInitialization, 77 | T1: DeferredInitialization, 78 | T2: DeferredInitialization, 79 | T3: DeferredInitialization, 80 | { 81 | type Output = ( 82 | T0::Initialized, 83 | T1::Initialized, 84 | T2::Initialized, 85 | T3::Initialized, 86 | ); 87 | 88 | fn initialize(self) -> Result { 89 | let t3 = self.head.initialize()?; 90 | let t2 = self.tail.head.initialize()?; 91 | let t1 = self.tail.tail.head.initialize()?; 92 | let t0 = self.tail.tail.tail.head.initialize()?; 93 | Ok((t0, t1, t2, t3)) 94 | } 95 | } 96 | -------------------------------------------------------------------------------- /src/adnl/socket.rs: -------------------------------------------------------------------------------- 1 | use std::net::Ipv4Addr; 2 | use std::sync::Arc; 3 | 4 | use anyhow::Result; 5 | use tokio::net::UdpSocket; 6 | 7 | pub fn make_udp_socket(port: u16) -> Result> { 8 | let udp_socket = std::net::UdpSocket::bind((Ipv4Addr::UNSPECIFIED, port))?; 9 | udp_socket.set_nonblocking(true)?; 10 | 11 | #[cfg(unix)] 12 | { 13 | use std::os::unix::io::AsRawFd; 14 | 15 | let fd = udp_socket.as_raw_fd(); 16 | maximise_recv_buffer(fd)?; 17 | set_reuse_port(fd, true)?; 18 | } 19 | 20 | Ok(Arc::new(UdpSocket::from_std(udp_socket)?)) 21 | } 22 | 23 | #[cfg(unix)] 24 | fn set_reuse_port(socket: libc::c_int, reuse: bool) -> Result<()> { 25 | unsafe { 26 | setsockopt( 27 | socket, 28 | libc::SOL_SOCKET, 29 | libc::SO_REUSEPORT, 30 | reuse as libc::c_int, 31 | ) 32 | } 33 | } 34 | 35 | #[cfg(unix)] 36 | fn maximise_recv_buffer(socket: libc::c_int) -> Result<()> { 37 | const MAX_UDP_RECV_BUFFER_SIZE: usize = 1 << 24; 38 | 39 | unsafe { 40 | let current_size: libc::c_int = getsockopt(socket, libc::SOL_SOCKET, libc::SO_RCVBUF)?; 41 | 42 | let mut min = current_size; 43 | let mut max = MAX_UDP_RECV_BUFFER_SIZE as libc::c_int; 44 | while min <= max { 45 | let avg = min + (max - min) / 2; 46 | match setsockopt(socket, libc::SOL_SOCKET, libc::SO_RCVBUF, avg) { 47 | Ok(_) => { 48 | min = avg + 1; 49 | } 50 | Err(_) => { 51 | max = avg - 1; 52 | } 53 | } 54 | } 55 | } 56 | 57 | Ok(()) 58 | } 59 | 60 | #[cfg(unix)] 61 | unsafe fn getsockopt(socket: libc::c_int, level: libc::c_int, optname: libc::c_int) -> Result 62 | where 63 | T: Copy, 64 | { 65 | let mut slot: T = std::mem::zeroed(); 66 | let mut len = std::mem::size_of::() as libc::socklen_t; 67 | cvt(libc::getsockopt( 68 | socket, 69 | level, 70 | optname, 71 | &mut slot as *mut _ as *mut _, 72 | &mut len, 73 | ))?; 74 | debug_assert_eq!(len as usize, std::mem::size_of::()); 75 | Ok(slot) 76 | } 77 | 78 | #[cfg(unix)] 79 | unsafe fn setsockopt( 80 | socket: libc::c_int, 81 | level: libc::c_int, 82 | name: libc::c_int, 83 | value: T, 84 | ) -> Result<()> 85 | where 86 | T: Copy, 87 | { 88 | let value = &value as *const T as *const libc::c_void; 89 | cvt(libc::setsockopt( 90 | socket, 91 | level, 92 | name, 93 | value, 94 | std::mem::size_of::() as libc::socklen_t, 95 | ))?; 96 | Ok(()) 97 | } 98 | 99 | #[cfg(unix)] 100 | fn cvt(res: libc::c_int) -> std::io::Result<()> { 101 | if res == -1 { 102 | Err(std::io::Error::last_os_error()) 103 | } else { 104 | Ok(()) 105 | } 106 | } 107 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | 6 | 7 | # everscale-network   [![Latest Version]][crates.io] [![Workflow badge]][workflow] [![License Apache badge]][license apache] [![Docs badge]][docs] 8 | 9 | ## Table of Contents 10 | 11 | - [About](#about) 12 | - [Usage](#usage) 13 | - [Contributing](#contributing) 14 | - [License](#license) 15 | 16 | ## About 17 | 18 | Implementation of the network part of the Everscale blockchain. 19 | 20 | ### Network stack 21 | 22 | ```text 23 | ┌─────────────────────┐ 24 | │ Overlay │ - Overlay: Virtual subnetwork 25 | ┌──────────┼──────────┐ │ - DHT: Kademlia-like Distributed Hash Table 26 | │ DHT │ RLDP │ │ - RLDP: Reliable Large Datagram Protocol 27 | ├──────────┴──────────┴──────────┤ 28 | │ ADNL │ - ADNL: Abstract Data Network Layer 29 | ├────────────────────────────────┤ 30 | │ UDP │ - underlying transport protocol 31 | └────────────────────────────────┘ 32 | ``` 33 | 34 | ## Usage 35 | 36 | ```bash 37 | cargo add everscale-network 38 | ``` 39 | 40 | ```rust 41 | use anyhow::Result; 42 | use everscale_network::{adnl, NetworkBuilder}; 43 | use tl_proto::{TlRead, TlWrite}; 44 | 45 | #[derive(TlWrite, TlRead)] 46 | #[tl(boxed, id = 0x11223344)] 47 | struct MyCustomData { 48 | counter: u32, 49 | } 50 | 51 | async fn example() -> Result<()> { 52 | const DHT_KEY_TAG: usize = 0; 53 | // Create and fill keystore 54 | let keystore = adnl::Keystore::builder() 55 | .with_tagged_key([1u8; 32], DHT_KEY_TAG)? 56 | .build(); 57 | 58 | // Create basic network parts 59 | // NOTE: our ip address must be accessible from other peers 60 | let (_adnl, dht) = NetworkBuilder::with_adnl("1.2.3.4:10000", keystore, Default::default()) 61 | .with_dht(DHT_KEY_TAG, Default::default()) 62 | .build()?; 63 | 64 | // Store some data in DHT 65 | let stored = dht 66 | .entry(dht.key().id(), "some_value") 67 | .with_data(MyCustomData { counter: 0 }) 68 | .with_ttl(3600) 69 | .sign_and_store(dht.key())? 70 | .then_check(|_, MyCustomData { counter }| Ok(counter == 0)) 71 | .await?; 72 | assert!(stored); 73 | 74 | Ok(()) 75 | } 76 | ``` 77 | 78 | For more information you can check the [docs](https://docs.rs/everscale-network) or the [examples](https://github.com/broxus/everscale-network/tree/master/examples). 79 | 80 | ### Minimum Rust version 81 | 82 | The current minimum required Rust version is `1.64.0`. 83 | 84 | ## Contributing 85 | 86 | We welcome contributions to the project! If you notice any issues or errors, feel free to open an issue or submit a pull request. 87 | 88 | ## License 89 | 90 | This project is licensed under the [License Apache]. 91 | 92 | [latest version]: https://img.shields.io/crates/v/everscale-network.svg 93 | [crates.io]: https://crates.io/crates/everscale-network 94 | [workflow badge]: https://img.shields.io/github/actions/workflow/status/broxus/everscale-network/master.yml?branch=master 95 | [workflow]: https://github.com/broxus/everscale-network/actions?query=workflow%3Amaster 96 | [license apache badge]: https://img.shields.io/github/license/broxus/everscale-network 97 | [license apache]: https://opensource.org/licenses/Apache-2.0 98 | [docs badge]: https://docs.rs/everscale-network/badge.svg 99 | [docs]: https://docs.rs/everscale-network 100 | -------------------------------------------------------------------------------- /src/overlay/mod.rs: -------------------------------------------------------------------------------- 1 | //! ## Overlay - virtual subnetwork 2 | //! 3 | //! An overlay (sub)network is simply a (virtual) network implemented inside some larger network. 4 | //! Only some nodes of the larger network participate in the overlay subnetwork, 5 | //! and only some "links" between these nodes, physical or virtual, are part of the overlay 6 | //! sub-network. 7 | //! 8 | //! TODO 9 | 10 | pub use overlay_id::{IdFull, IdShort}; 11 | 12 | mod overlay_id; 13 | 14 | #[cfg(feature = "overlay")] 15 | mod broadcast_receiver; 16 | #[cfg(feature = "overlay")] 17 | mod node; 18 | #[cfg(feature = "overlay")] 19 | #[allow(clippy::module_inception)] 20 | mod overlay; 21 | 22 | #[cfg(feature = "overlay")] 23 | mod node_impl { 24 | use std::sync::Arc; 25 | 26 | use anyhow::Result; 27 | use frunk_core::hlist::{HCons, HList, IntoTuple2, Selector}; 28 | use frunk_core::indices::There; 29 | 30 | pub use super::node::Node; 31 | pub use super::overlay::{ 32 | BroadcastTarget, ExistingPeersFilter, IncomingBroadcastInfo, OutgoingBroadcastInfo, 33 | Overlay, OverlayMetrics, OverlayOptions, ReceivedPeersMap, 34 | }; 35 | 36 | use crate::rldp; 37 | use crate::util::{DeferredInitialization, NetworkBuilder}; 38 | 39 | pub(crate) type Deferred = Result>; 40 | 41 | impl DeferredInitialization for Deferred { 42 | type Initialized = Arc; 43 | 44 | fn initialize(self) -> Result { 45 | self 46 | } 47 | } 48 | 49 | impl NetworkBuilder 50 | where 51 | L: HList + Selector, 52 | HCons: IntoTuple2, 53 | { 54 | /// Creates overlay network layer. 55 | /// 56 | /// NOTE: RLDP network layer must be present before calling this method. 57 | /// 58 | /// # Examples 59 | /// 60 | /// ``` 61 | /// # use anyhow::Result; 62 | /// # use everscale_network::{adnl, rldp, NetworkBuilder}; 63 | /// #[tokio::main] 64 | /// async fn main() -> Result<()> { 65 | /// const OVERLAY_KEY_TAG: usize = 0; 66 | /// 67 | /// let keystore = adnl::Keystore::builder() 68 | /// .with_tagged_key([0; 32], OVERLAY_KEY_TAG)? 69 | /// .build(); 70 | /// 71 | /// let adnl_options = adnl::NodeOptions::default(); 72 | /// let rldp_options = rldp::NodeOptions::default(); 73 | /// 74 | /// let (adnl, rldp, overlay) = 75 | /// NetworkBuilder::with_adnl("127.0.0.1:10000", keystore, adnl_options) 76 | /// .with_rldp(rldp_options) 77 | /// .with_overlay(OVERLAY_KEY_TAG) 78 | /// .build()?; 79 | /// Ok(()) 80 | /// } 81 | /// ``` 82 | #[allow(clippy::type_complexity)] 83 | pub fn with_overlay( 84 | mut self, 85 | key_tag: usize, 86 | ) -> NetworkBuilder, (There, There)> { 87 | let deferred = match self.0.get_mut() { 88 | Ok((adnl, subscribers, _)) => { 89 | let overlay = Node::new(adnl.clone(), key_tag); 90 | if let Ok(overlay) = &overlay { 91 | subscribers.push(overlay.query_subscriber()); 92 | } 93 | overlay 94 | } 95 | Err(_) => Err(anyhow::anyhow!("ADNL was not initialized")), 96 | }; 97 | 98 | NetworkBuilder(self.0.prepend(deferred), Default::default()) 99 | } 100 | } 101 | 102 | /// Max allowed known peer count 103 | pub const MAX_OVERLAY_PEERS: u32 = 65536; 104 | } 105 | 106 | #[cfg(feature = "overlay")] 107 | pub use node_impl::*; 108 | -------------------------------------------------------------------------------- /src/overlay/overlay_id.rs: -------------------------------------------------------------------------------- 1 | use std::borrow::Borrow; 2 | use std::convert::TryFrom; 3 | 4 | use anyhow::Result; 5 | 6 | use crate::adnl; 7 | use crate::proto; 8 | 9 | /// Full overlay id 10 | /// 11 | /// See [`PublicKey::Overlay`] 12 | /// 13 | /// [`PublicKey::Overlay`]: everscale_crypto::tl::PublicKey::Overlay 14 | #[derive(Debug, Copy, Clone, Eq, PartialEq)] 15 | pub struct IdFull([u8; 32]); 16 | 17 | impl IdFull { 18 | /// Constructs full overlay id for the catchain overlay 19 | pub fn for_catchain_overlay<'tl, I>(unique_hash: &'tl [u8; 32], nodes: I) -> Self 20 | where 21 | I: Iterator + ExactSizeIterator + Clone, 22 | { 23 | Self(tl_proto::hash(proto::overlay::CatchainFirstBlock { 24 | unique_hash, 25 | nodes: tl_proto::IterRef(&nodes), 26 | })) 27 | } 28 | 29 | /// Constructs full overlay id for the workchain overlay 30 | pub fn for_workchain_overlay(workchain: i32, zero_state_file_hash: &[u8; 32]) -> Self { 31 | Self(tl_proto::hash(proto::overlay::ShardPublicOverlayId { 32 | workchain, 33 | shard: 1u64 << 63, 34 | zero_state_file_hash, 35 | })) 36 | } 37 | 38 | /// Returns inner bytes 39 | pub fn as_slice(&self) -> &[u8; 32] { 40 | &self.0 41 | } 42 | 43 | /// Hashes inner public key 44 | pub fn compute_short_id(&self) -> IdShort { 45 | let key = everscale_crypto::tl::PublicKey::Overlay { name: &self.0 }; 46 | IdShort(tl_proto::hash(key)) 47 | } 48 | } 49 | 50 | /// Short overlay id 51 | #[derive(Debug, Default, Copy, Clone, Eq, PartialEq, Hash, Ord, PartialOrd)] 52 | pub struct IdShort([u8; 32]); 53 | 54 | impl IdShort { 55 | /// Constructs short overlay id from public key hash 56 | #[inline(always)] 57 | pub const fn new(id: [u8; 32]) -> Self { 58 | Self(id) 59 | } 60 | 61 | /// Checks overlay node object (overlay id, signature) 62 | pub fn verify_overlay_node(&self, node: &proto::overlay::Node) -> Result<()> { 63 | if node.overlay != &self.0 { 64 | return Err(OverlayIdError::OverlayIdMismatch.into()); 65 | } 66 | 67 | let peer_id_full = adnl::NodeIdFull::try_from(node.id)?; 68 | let peer_id = peer_id_full.compute_short_id(); 69 | 70 | let node_to_sign = &proto::overlay::NodeToSign { 71 | id: peer_id.as_slice(), 72 | overlay: node.overlay, 73 | version: node.version, 74 | }; 75 | 76 | peer_id_full.verify(node_to_sign, node.signature)?; 77 | 78 | Ok(()) 79 | } 80 | 81 | /// Returns inner bytes 82 | #[inline(always)] 83 | pub const fn as_slice(&self) -> &[u8; 32] { 84 | &self.0 85 | } 86 | } 87 | 88 | impl PartialEq<[u8]> for IdShort { 89 | fn eq(&self, other: &[u8]) -> bool { 90 | self.0.eq(other) 91 | } 92 | } 93 | 94 | impl PartialEq<[u8; 32]> for IdShort { 95 | fn eq(&self, other: &[u8; 32]) -> bool { 96 | self.0.eq(other) 97 | } 98 | } 99 | 100 | impl From for [u8; 32] { 101 | fn from(id: IdShort) -> Self { 102 | id.0 103 | } 104 | } 105 | 106 | impl From<&IdShort> for [u8; 32] { 107 | fn from(id: &IdShort) -> Self { 108 | id.0 109 | } 110 | } 111 | 112 | impl From<[u8; 32]> for IdShort { 113 | fn from(id: [u8; 32]) -> Self { 114 | Self(id) 115 | } 116 | } 117 | 118 | impl Borrow<[u8; 32]> for IdShort { 119 | fn borrow(&self) -> &[u8; 32] { 120 | &self.0 121 | } 122 | } 123 | 124 | impl<'a> Borrow<[u8; 32]> for &'a IdShort { 125 | fn borrow(&self) -> &[u8; 32] { 126 | &self.0 127 | } 128 | } 129 | 130 | impl std::fmt::Display for IdShort { 131 | fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { 132 | f.write_str(&hex::encode(self.0)) 133 | } 134 | } 135 | 136 | #[derive(thiserror::Error, Debug)] 137 | enum OverlayIdError { 138 | #[error("Overlay id mismatch")] 139 | OverlayIdMismatch, 140 | } 141 | -------------------------------------------------------------------------------- /examples/overlay_query.rs: -------------------------------------------------------------------------------- 1 | use std::borrow::Cow; 2 | use std::net::{Ipv4Addr, SocketAddrV4}; 3 | use std::sync::Arc; 4 | 5 | use anyhow::{Context, Result}; 6 | use everscale_network::{ 7 | adnl, overlay, NetworkBuilder, QueryConsumingResult, QuerySubscriber, SubscriberContext, 8 | }; 9 | use rand::Rng; 10 | use tl_proto::{TlRead, TlWrite}; 11 | 12 | const KEY_TAG: usize = 0; 13 | 14 | #[tokio::main] 15 | async fn main() -> Result<()> { 16 | tracing_subscriber::fmt::init(); 17 | 18 | let overlay_id = Default::default(); 19 | 20 | let (adnl, _rldp, overlay) = NetworkBuilder::with_adnl( 21 | (Ipv4Addr::LOCALHOST, 0), 22 | make_keystore()?, 23 | Default::default(), 24 | ) 25 | .with_rldp(Default::default()) 26 | .with_overlay(KEY_TAG) 27 | .build()?; 28 | 29 | let (shard, _) = overlay.add_public_overlay(&overlay_id, Default::default()); 30 | 31 | let subscriber = Arc::new(OverlaySubscriber); 32 | overlay.add_overlay_subscriber(overlay_id, subscriber); 33 | 34 | send_query(overlay_id, shard.sign_local_node(), adnl.socket_addr()).await?; 35 | 36 | Ok(()) 37 | } 38 | 39 | async fn send_query( 40 | overlay_id: overlay::IdShort, 41 | other: everscale_network::proto::overlay::NodeOwned, 42 | addr: SocketAddrV4, 43 | ) -> Result<()> { 44 | let (adnl, _rldp, overlay) = NetworkBuilder::with_adnl( 45 | (Ipv4Addr::LOCALHOST, 0), 46 | make_keystore()?, 47 | Default::default(), 48 | ) 49 | .with_rldp(Default::default()) 50 | .with_overlay(KEY_TAG) 51 | .build()?; 52 | 53 | let (shard, _) = overlay.add_public_overlay(&overlay_id, Default::default()); 54 | let peer_id = shard 55 | .add_public_peer(&adnl, addr, other.as_equivalent_ref())? 56 | .context("failed to add overlay peer")?; 57 | 58 | let pong: everscale_network::proto::adnl::Pong = adnl 59 | .query( 60 | shard.overlay_key().id(), 61 | &peer_id, 62 | everscale_network::proto::rpc::AdnlPing { value: 123 }, 63 | None, 64 | ) 65 | .await? 66 | .context("no ping response")?; 67 | tracing::info!("PONG: {pong:?}"); 68 | 69 | let answer = shard 70 | .adnl_query(&adnl, &peer_id, RpcGetCapabilities, None) 71 | .await? 72 | .context("no answer")?; 73 | tracing::info!("response: {}", hex::encode(&answer)); 74 | 75 | let parsed = tl_proto::deserialize::(&answer)?; 76 | tracing::info!("answer: {parsed:?}"); 77 | 78 | Ok(()) 79 | } 80 | 81 | fn make_keystore() -> Result { 82 | Ok(adnl::Keystore::builder() 83 | .with_tagged_key(rand::thread_rng().gen(), KEY_TAG)? 84 | .build()) 85 | } 86 | 87 | struct OverlaySubscriber; 88 | 89 | #[async_trait::async_trait] 90 | impl QuerySubscriber for OverlaySubscriber { 91 | async fn try_consume_query<'a>( 92 | &self, 93 | _: SubscriberContext<'a>, 94 | constructor: u32, 95 | query: Cow<'a, [u8]>, 96 | ) -> Result> { 97 | tracing::info!("query: {}", hex::encode(query)); 98 | if constructor == RpcGetCapabilities::TL_ID { 99 | QueryConsumingResult::consume(Capabilities { 100 | version: 2, 101 | capabilities: 1, 102 | }) 103 | } else { 104 | Ok(QueryConsumingResult::Consumed(None)) 105 | } 106 | } 107 | } 108 | 109 | #[derive(TlWrite, TlRead)] 110 | #[tl( 111 | boxed, 112 | id = "tonNode.getCapabilities", 113 | scheme_inline = "tonNode.getCapabilities = tonNode.Capabilities;" 114 | )] 115 | pub struct RpcGetCapabilities; 116 | 117 | #[derive(Debug, Copy, Clone, TlWrite, TlRead)] 118 | #[tl( 119 | boxed, 120 | id = "tonNode.capabilities", 121 | size_hint = 12, 122 | scheme_inline = "tonNode.capabilities version:int capabilities:long = tonNode.Capabilities;" 123 | )] 124 | pub struct Capabilities { 125 | pub version: u32, 126 | pub capabilities: u64, 127 | } 128 | -------------------------------------------------------------------------------- /src/dht/buckets.rs: -------------------------------------------------------------------------------- 1 | use std::borrow::Borrow; 2 | 3 | use crate::adnl; 4 | use crate::proto; 5 | use crate::util::*; 6 | 7 | /// DHT nodes, distributed by max equal bits 8 | pub struct Buckets { 9 | local_id: [u8; 32], 10 | buckets: Box<[FastDashMap; 256]>, 11 | } 12 | 13 | impl Buckets { 14 | pub fn new(local_id: &adnl::NodeIdShort) -> Self { 15 | Self { 16 | local_id: *local_id.as_slice(), 17 | buckets: Box::new([(); 256].map(|_| Default::default())), 18 | } 19 | } 20 | 21 | /// Returns iterator over all buckets, starting from the most distant 22 | pub fn iter(&self) -> std::slice::Iter> { 23 | self.buckets.iter() 24 | } 25 | 26 | /// Inserts DHT node into the bucket based on its distance 27 | pub fn insert(&self, peer_id: &adnl::NodeIdShort, peer: proto::dht::NodeOwned) { 28 | use dashmap::mapref::entry::Entry; 29 | 30 | let affinity = get_affinity(&self.local_id, peer_id.borrow()); 31 | match self.buckets[affinity as usize].entry(*peer_id) { 32 | Entry::Occupied(mut entry) => { 33 | if entry.get().version < peer.version { 34 | entry.insert(peer); 35 | } 36 | } 37 | Entry::Vacant(entry) => { 38 | entry.insert(peer); 39 | } 40 | } 41 | } 42 | 43 | /// Finds `k` closest DHT nodes for the given `peer_id` 44 | pub fn find(&self, peer_id: T, k: u32) -> proto::dht::NodesOwned 45 | where 46 | T: Borrow<[u8; 32]>, 47 | { 48 | let key1 = &self.local_id; 49 | let key2: &[u8; 32] = peer_id.borrow(); 50 | 51 | let mut nodes = Vec::new(); 52 | 53 | // Iterate over buckets 54 | 'outer: for i in 0..32 { 55 | let mut distance = i as u8 * 8; 56 | 57 | // Compare bytes 58 | let mut diff = key1[i] ^ key2[i]; 59 | 60 | // If they are not equal (otherwise we will just add 8 bits 61 | // to the distance and continue to the next byte) 62 | while diff != 0 { 63 | // Get equal bit count 64 | let equal_bits = diff.leading_zeros() as u8; // 0..=7 65 | distance += equal_bits; 66 | 67 | // Add all nodes from this distance to the result 68 | let bucket = &self.buckets[distance as usize]; 69 | for item in bucket.iter() { 70 | nodes.push(item.value().clone()); 71 | if nodes.len() >= k as usize { 72 | break 'outer; 73 | } 74 | } 75 | 76 | // Skip one different bit: 77 | if equal_bits < 7 { 78 | diff <<= equal_bits + 1; 79 | distance = distance.saturating_add(1); 80 | } else { 81 | continue 'outer; 82 | } 83 | } 84 | } 85 | 86 | // Done 87 | proto::dht::NodesOwned { nodes } 88 | } 89 | } 90 | 91 | impl<'a> IntoIterator for &'a Buckets { 92 | type Item = &'a FastDashMap; 93 | type IntoIter = std::slice::Iter<'a, FastDashMap>; 94 | 95 | fn into_iter(self) -> Self::IntoIter { 96 | self.iter() 97 | } 98 | } 99 | 100 | /// Returns the length of the longest common prefix of two keys 101 | pub fn get_affinity(key1: &[u8; 32], key2: &[u8; 32]) -> u8 { 102 | for i in 0..32 { 103 | let diff = key1[i] ^ key2[i]; 104 | if diff != 0 { 105 | return (i * 8 + diff.leading_zeros() as usize) as u8; 106 | } 107 | } 108 | 255 109 | } 110 | 111 | #[cfg(test)] 112 | mod tests { 113 | use super::*; 114 | 115 | #[test] 116 | fn same_affinity() { 117 | assert_eq!(get_affinity(&[0xaa; 32], &[0xaa; 32]), 255); 118 | } 119 | } 120 | -------------------------------------------------------------------------------- /examples/rldp.rs: -------------------------------------------------------------------------------- 1 | use std::borrow::Cow; 2 | use std::net::Ipv4Addr; 3 | use std::sync::atomic::{AtomicUsize, Ordering}; 4 | use std::sync::Arc; 5 | use std::time::Duration; 6 | 7 | use anyhow::Result; 8 | use everscale_crypto::ed25519; 9 | use everscale_network::{adnl, rldp}; 10 | use everscale_network::{NetworkBuilder, QueryConsumingResult, QuerySubscriber, SubscriberContext}; 11 | use rand::Rng; 12 | use tl_proto::{TlRead, TlWrite}; 13 | 14 | #[tokio::main] 15 | async fn main() -> Result<()> { 16 | tracing_subscriber::fmt::init(); 17 | 18 | let adnl_node_options = adnl::NodeOptions::default(); 19 | let rldp_node_options = rldp::NodeOptions { 20 | max_peer_queries: 10000, 21 | force_compression: true, 22 | ..Default::default() 23 | }; 24 | 25 | let build_node = |service| -> Result<(Arc, Arc)> { 26 | let key = ed25519::SecretKey::generate(&mut rand::thread_rng()); 27 | let (adnl, rldp) = NetworkBuilder::with_adnl( 28 | (Ipv4Addr::LOCALHOST, 0), 29 | adnl::Keystore::builder() 30 | .with_tagged_key(key.to_bytes(), 0)? 31 | .build(), 32 | adnl_node_options, 33 | ) 34 | .with_rldp_ext(rldp_node_options, vec![service]) 35 | .build()?; 36 | 37 | Ok((adnl, rldp)) 38 | }; 39 | 40 | let (left_adnl, left_rldp) = build_node(Arc::new(Service))?; 41 | let (right_adnl, _right_rldp) = build_node(Arc::new(Service))?; 42 | 43 | let left_node_id = *left_adnl.key_by_tag(0)?.id(); 44 | 45 | let right_node_id_full = *right_adnl.key_by_tag(0)?.full_id(); 46 | let right_node_id = right_node_id_full.compute_short_id(); 47 | 48 | left_adnl.add_peer( 49 | adnl::NewPeerContext::AdnlPacket, 50 | &left_node_id, 51 | &right_node_id, 52 | right_adnl.socket_addr(), 53 | right_node_id_full, 54 | )?; 55 | 56 | let iterations = Arc::new(AtomicUsize::new(0)); 57 | let mut handles = Vec::new(); 58 | 59 | for _ in 0..200 { 60 | let left_rldp = left_rldp.clone(); 61 | let query = example_request(); 62 | let iterations = iterations.clone(); 63 | handles.push(tokio::spawn(async move { 64 | let e = loop { 65 | let query = tl_proto::serialize(query); 66 | match left_rldp 67 | .query(&left_node_id, &right_node_id, query, None) 68 | .await 69 | { 70 | Ok((Some(_), _)) => { 71 | iterations.fetch_add(1, Ordering::Relaxed); 72 | } 73 | Ok((None, _)) => println!("Packet lost"), 74 | Err(e) => break e, 75 | } 76 | }; 77 | println!("Error: {e:?}"); 78 | })); 79 | } 80 | 81 | tokio::select! { 82 | _ = futures_util::future::join_all(handles) => {}, 83 | _ = tokio::time::sleep(Duration::from_secs(10)) => {}, 84 | } 85 | 86 | let throughput = (tl_proto::serialize(example_request()).len() 87 | + tl_proto::serialize(example_response()).len()) 88 | * iterations.load(Ordering::Relaxed); 89 | 90 | println!( 91 | "Total throughput: {} MB/s ({})", 92 | throughput as f64 / 10485760.0, 93 | iterations.load(Ordering::Relaxed) 94 | ); 95 | 96 | Ok(()) 97 | } 98 | 99 | struct Service; 100 | 101 | #[async_trait::async_trait] 102 | impl QuerySubscriber for Service { 103 | async fn try_consume_query<'a>( 104 | &self, 105 | _: SubscriberContext<'a>, 106 | _: u32, 107 | query: Cow<'a, [u8]>, 108 | ) -> Result> { 109 | let _req = tl_proto::deserialize::(query.as_ref())?; 110 | Ok(QueryConsumingResult::Consumed(Some(example_response()))) 111 | } 112 | } 113 | 114 | fn example_request() -> RpcGetArchiveSlice { 115 | RpcGetArchiveSlice { 116 | archive_id: 123123, 117 | offset: 0, 118 | max_size: 2 << 21, 119 | } 120 | } 121 | 122 | fn example_response() -> Vec { 123 | static DATA: once_cell::race::OnceBox> = once_cell::race::OnceBox::new(); 124 | DATA.get_or_init(|| { 125 | let mut rng = rand::thread_rng(); 126 | Box::new(std::iter::repeat_with(|| rng.gen()).take(2 << 21).collect()) 127 | }) 128 | .clone() 129 | } 130 | 131 | #[derive(Copy, Clone, Debug, TlRead, TlWrite)] 132 | #[tl(boxed, id = 0x203b5168, size_hint = 20)] 133 | pub struct RpcGetArchiveSlice { 134 | pub archive_id: u64, 135 | pub offset: u64, 136 | pub max_size: u32, 137 | } 138 | -------------------------------------------------------------------------------- /src/dht/futures.rs: -------------------------------------------------------------------------------- 1 | use std::future::Future; 2 | use std::pin::Pin; 3 | use std::sync::Arc; 4 | use std::task::{Context, Poll}; 5 | 6 | use anyhow::Result; 7 | use bytes::Bytes; 8 | use futures_util::future::BoxFuture; 9 | use futures_util::stream::FuturesUnordered; 10 | use futures_util::{FutureExt, StreamExt}; 11 | use tl_proto::TlRead; 12 | 13 | use super::streams::DhtValuesStream; 14 | use super::Node; 15 | use crate::proto; 16 | 17 | /// Future for the `DhtNode::store_value` method. 18 | #[must_use = "futures do nothing unless polled"] 19 | pub struct StoreValue { 20 | dht: Arc, 21 | key: proto::dht::KeyOwned, 22 | query: Bytes, 23 | futures: FuturesUnordered, 24 | started: bool, 25 | } 26 | 27 | impl StoreValue { 28 | pub(super) fn new(dht: Arc, value: proto::dht::Value<'_>) -> Result { 29 | dht.storage().insert(value)?; 30 | 31 | let key = value.key.key.as_equivalent_owned(); 32 | let query = tl_proto::serialize(proto::rpc::DhtStore { value }).into(); 33 | 34 | Ok(Self { 35 | dht, 36 | key, 37 | query, 38 | futures: Default::default(), 39 | started: false, 40 | }) 41 | } 42 | 43 | /// Wraps `DhtStoreValue` into future which verifies that value is stored in the DHT 44 | /// and passes the predicate test 45 | pub fn then_check(self, check_value: FV) -> DhtStoreValueWithCheck { 46 | DhtStoreValueWithCheck { 47 | store_value: self, 48 | find_value: None, 49 | check_value, 50 | check_all: false, 51 | _marker: Default::default(), 52 | } 53 | } 54 | 55 | /// Drops the future, causing the value to be stored only locally 56 | pub fn only_locally(self) {} 57 | } 58 | 59 | impl Future for StoreValue { 60 | type Output = (); 61 | 62 | fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { 63 | if !self.started { 64 | for &peer_id in self.dht.iter_known_peers() { 65 | let dht = self.dht.clone(); 66 | let query = self.query.clone(); 67 | self.futures.push(Box::pin(async move { 68 | dht.query_raw(&peer_id, query).await.ok(); 69 | })); 70 | } 71 | self.started = true; 72 | } 73 | 74 | loop { 75 | match self.futures.poll_next_unpin(cx) { 76 | Poll::Ready(Some(_)) => continue, 77 | Poll::Ready(None) => break Poll::Ready(()), 78 | Poll::Pending => break Poll::Pending, 79 | } 80 | } 81 | } 82 | } 83 | 84 | /// Future for the `DhtStoreValue::ensure_stored` method. 85 | #[must_use = "futures do nothing unless polled"] 86 | pub struct DhtStoreValueWithCheck { 87 | store_value: StoreValue, 88 | find_value: Option>, 89 | check_value: FV, 90 | check_all: bool, 91 | _marker: std::marker::PhantomData, 92 | } 93 | 94 | impl DhtStoreValueWithCheck { 95 | /// Forces the future to check all stored values 96 | pub fn check_all(mut self) -> Self { 97 | self.check_all = true; 98 | self 99 | } 100 | } 101 | 102 | impl Unpin for DhtStoreValueWithCheck {} 103 | 104 | impl Future for DhtStoreValueWithCheck 105 | where 106 | FV: FnMut(proto::dht::KeyDescriptionOwned, T) -> Result, 107 | for<'a> T: TlRead<'a, Repr = tl_proto::Boxed> + Send + 'static, 108 | { 109 | type Output = Result; 110 | 111 | fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { 112 | loop { 113 | match &mut self.find_value { 114 | None => { 115 | futures_util::ready!(self.store_value.poll_unpin(cx)); 116 | self.find_value = Some( 117 | DhtValuesStream::new( 118 | self.store_value.dht.clone(), 119 | self.store_value.key.as_equivalent_ref(), 120 | ) 121 | .use_new_peers(true), 122 | ); 123 | } 124 | Some(find_value) => match find_value.poll_next_unpin(cx) { 125 | Poll::Ready(Some((key, value))) => match (self.check_value)(key, value) { 126 | Ok(true) => break Poll::Ready(Ok(true)), 127 | Ok(false) => continue, 128 | Err(e) => break Poll::Ready(Err(e)), 129 | }, 130 | Poll::Ready(None) => break Poll::Ready(Ok(false)), 131 | Poll::Pending => break Poll::Pending, 132 | }, 133 | } 134 | } 135 | } 136 | } 137 | 138 | type StoreFuture = BoxFuture<'static, ()>; 139 | -------------------------------------------------------------------------------- /src/rldp/mod.rs: -------------------------------------------------------------------------------- 1 | //! ## RLDP - Reliable Large Datagram Protocol 2 | //! 3 | //! A reliable arbitrary-size datagram protocol built upon the ADNL, called RLDP, is used instead 4 | //! of a TCP-like protocol. This reliable datagram protocol can be employed, for instance, 5 | //! to send RPC queries to remote hosts and receive answers from them. 6 | //! 7 | //! TODO 8 | 9 | use std::sync::Arc; 10 | 11 | use anyhow::Result; 12 | use frunk_core::hlist::{HCons, HList, IntoTuple2, Selector}; 13 | use frunk_core::indices::{Here, There}; 14 | 15 | pub(crate) use decoder::RaptorQDecoder; 16 | pub(crate) use encoder::RaptorQEncoder; 17 | pub use node::{Node, NodeMetrics, NodeOptions}; 18 | 19 | use crate::adnl; 20 | use crate::subscriber::QuerySubscriber; 21 | use crate::util::{DeferredInitialization, NetworkBuilder}; 22 | 23 | pub(crate) mod compression; 24 | mod decoder; 25 | mod encoder; 26 | mod incoming_transfer; 27 | mod node; 28 | mod outgoing_transfer; 29 | mod transfers_cache; 30 | 31 | pub(crate) type Deferred = Result<(Arc, Vec>, NodeOptions)>; 32 | 33 | impl DeferredInitialization for Deferred { 34 | type Initialized = Arc; 35 | 36 | fn initialize(self) -> Result { 37 | let (adnl, subscribers, options) = self?; 38 | Node::new(adnl, subscribers, options) 39 | } 40 | } 41 | 42 | impl NetworkBuilder 43 | where 44 | L: HList + Selector, 45 | HCons: IntoTuple2, 46 | { 47 | /// Creates RLDP network layer 48 | /// 49 | /// See [`with_rldp_ext`] if you need an RLDP node with additional subscribers 50 | /// 51 | /// [`with_rldp_ext`]: fn@crate::util::NetworkBuilder::with_rldp_ext 52 | /// 53 | /// # Examples 54 | /// 55 | /// ``` 56 | /// # use anyhow::Result; 57 | /// # use everscale_network::{adnl, rldp, NetworkBuilder}; 58 | /// #[tokio::main] 59 | /// async fn main() -> Result<()> { 60 | /// let keystore = adnl::Keystore::builder() 61 | /// .with_tagged_key([0; 32], 0)? 62 | /// .build(); 63 | /// 64 | /// let adnl_options = adnl::NodeOptions::default(); 65 | /// let rldp_options = rldp::NodeOptions::default(); 66 | /// 67 | /// let (adnl, rldp) = NetworkBuilder::with_adnl("127.0.0.1:10000", keystore, adnl_options) 68 | /// .with_rldp(rldp_options) 69 | /// .build()?; 70 | /// Ok(()) 71 | /// } 72 | /// ``` 73 | #[allow(clippy::type_complexity)] 74 | pub fn with_rldp( 75 | self, 76 | options: NodeOptions, 77 | ) -> NetworkBuilder, (There, Here)> { 78 | self.with_rldp_ext(options, Vec::new()) 79 | } 80 | 81 | /// Creates RLDP network layer with additional RLDP query subscribers 82 | /// 83 | /// # Examples 84 | /// 85 | /// ``` 86 | /// # use std::borrow::Cow; 87 | /// # use std::sync::Arc; 88 | /// # use anyhow::Result; 89 | /// # use everscale_network::{ 90 | /// # adnl, rldp, NetworkBuilder, QueryConsumingResult, QuerySubscriber, SubscriberContext, 91 | /// # }; 92 | /// struct LoggerSubscriber; 93 | /// 94 | /// #[async_trait::async_trait] 95 | /// impl QuerySubscriber for LoggerSubscriber { 96 | /// async fn try_consume_query<'a>( 97 | /// &self, 98 | /// ctx: SubscriberContext<'a>, 99 | /// constructor: u32, 100 | /// query: Cow<'a, [u8]>, 101 | /// ) -> Result> { 102 | /// println!("received {constructor}"); 103 | /// Ok(QueryConsumingResult::Rejected(query)) 104 | /// } 105 | /// } 106 | /// 107 | /// #[tokio::main] 108 | /// async fn main() -> Result<()> { 109 | /// let keystore = adnl::Keystore::builder() 110 | /// .with_tagged_key([0; 32], 0)? 111 | /// .build(); 112 | /// 113 | /// let adnl_options = adnl::NodeOptions::default(); 114 | /// let rldp_options = rldp::NodeOptions::default(); 115 | /// 116 | /// let subscriber = Arc::new(LoggerSubscriber); 117 | /// 118 | /// let (adnl, rldp) = NetworkBuilder::with_adnl("127.0.0.1:10000", keystore, adnl_options) 119 | /// .with_rldp_ext(rldp_options, vec![subscriber]) 120 | /// .build()?; 121 | /// Ok(()) 122 | /// } 123 | /// ``` 124 | #[allow(clippy::type_complexity)] 125 | pub fn with_rldp_ext( 126 | self, 127 | options: NodeOptions, 128 | subscribers: Vec>, 129 | ) -> NetworkBuilder, (There, Here)> { 130 | let deferred = match self.0.get() { 131 | Ok(adnl) => Ok((adnl.clone(), subscribers, options)), 132 | Err(_) => Err(anyhow::anyhow!("ADNL was not initialized")), 133 | }; 134 | NetworkBuilder(self.0.prepend(deferred), Default::default()) 135 | } 136 | } 137 | -------------------------------------------------------------------------------- /src/adnl/transfer.rs: -------------------------------------------------------------------------------- 1 | use std::sync::atomic::{AtomicUsize, Ordering}; 2 | 3 | use sha2::Digest; 4 | 5 | use crate::util::*; 6 | 7 | pub type TransferId = [u8; 32]; 8 | 9 | /// Multipart transfer 10 | /// 11 | /// It is used to collect multiple values of ADNL `Part` messages. 12 | /// 13 | /// See [crate::proto::adnl::Message] 14 | pub struct Transfer { 15 | /// Data parts labeled with offset 16 | parts: FastDashMap>, 17 | /// Received data length 18 | received_len: AtomicUsize, 19 | /// Total data length 20 | total_len: usize, 21 | /// Transfer timings used to check its validity 22 | timings: UpdatedAt, 23 | } 24 | 25 | impl Transfer { 26 | /// Creates new multipart transfer with target length in bytes 27 | pub fn new(total_len: usize) -> Self { 28 | Self { 29 | parts: FastDashMap::with_capacity_and_hasher(0, Default::default()), 30 | received_len: Default::default(), 31 | total_len, 32 | timings: Default::default(), 33 | } 34 | } 35 | 36 | /// Returns transfer timings info (when it was last updated) 37 | #[inline(always)] 38 | pub fn timings(&self) -> &UpdatedAt { 39 | &self.timings 40 | } 41 | 42 | /// Tries to add new part to the transfer at given offset 43 | /// 44 | /// Will do nothing if part at given offset already exists 45 | pub fn add_part( 46 | &self, 47 | offset: usize, 48 | data: Vec, 49 | transfer_id: &TransferId, 50 | ) -> Result>, TransferError> { 51 | let len = data.len(); 52 | if self.parts.insert(offset, data).is_some() { 53 | return Ok(None); 54 | } 55 | 56 | // Increase received length. 57 | // This part heavily relies on ordering, so hope that it works as expected 58 | self.received_len.fetch_add(len, Ordering::Release); 59 | 60 | // Check if it is equal to the total length and make sure it will be big enough to fail 61 | // next check on success 62 | let mut received = self 63 | .received_len 64 | .compare_exchange( 65 | self.total_len, 66 | self.total_len * 2, 67 | Ordering::Acquire, 68 | Ordering::Acquire, 69 | ) 70 | .unwrap_or_else(std::convert::identity); 71 | 72 | // Handle part 73 | match received.cmp(&self.total_len) { 74 | std::cmp::Ordering::Equal => { 75 | tracing::debug!( 76 | received, 77 | total = self.total_len, 78 | transfer_id = %DisplayTransferId(transfer_id), 79 | "finished ADNL transfer" 80 | ); 81 | 82 | // Combine all parts 83 | received = 0; 84 | let mut buffer = Vec::with_capacity(self.total_len); 85 | while received < self.total_len { 86 | if let Some(data) = self.parts.get(&received) { 87 | let data = data.value(); 88 | received += data.len(); 89 | buffer.extend_from_slice(data); 90 | } else { 91 | return Err(TransferError::PartMissing); 92 | } 93 | } 94 | 95 | // Check hash 96 | let hash = sha2::Sha256::digest(&buffer); 97 | if hash.as_slice() != transfer_id { 98 | return Err(TransferError::InvalidHash); 99 | } 100 | 101 | // Done 102 | Ok(Some(buffer)) 103 | } 104 | std::cmp::Ordering::Greater => Err(TransferError::ReceivedTooMuch), 105 | std::cmp::Ordering::Less => { 106 | tracing::trace!( 107 | received, 108 | total = self.total_len, 109 | transfer_id = %DisplayTransferId(transfer_id), 110 | "received ADNL transfer part" 111 | ); 112 | Ok(None) 113 | } 114 | } 115 | } 116 | } 117 | 118 | #[derive(Copy, Clone)] 119 | pub struct DisplayTransferId<'a>(pub &'a TransferId); 120 | 121 | impl std::fmt::Display for DisplayTransferId<'_> { 122 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 123 | let mut output = [0u8; 64]; 124 | hex::encode_to_slice(self.0, &mut output).ok(); 125 | 126 | // SAFETY: output is guaranteed to contain only [0-9a-f] 127 | let output = unsafe { std::str::from_utf8_unchecked(&output) }; 128 | f.write_str(output) 129 | } 130 | } 131 | 132 | #[derive(thiserror::Error, Debug)] 133 | pub enum TransferError { 134 | #[error("Invalid transfer part (received too much)")] 135 | ReceivedTooMuch, 136 | #[error("Invalid transfer (part is missing)")] 137 | PartMissing, 138 | #[error("Invalid transfer data hash")] 139 | InvalidHash, 140 | } 141 | -------------------------------------------------------------------------------- /examples/adnl.rs: -------------------------------------------------------------------------------- 1 | use std::borrow::Cow; 2 | use std::net::{Ipv4Addr, SocketAddrV4}; 3 | use std::sync::atomic::{AtomicUsize, Ordering}; 4 | use std::sync::Arc; 5 | use std::time::Duration; 6 | 7 | use anyhow::Result; 8 | use everscale_crypto::ed25519; 9 | use everscale_network::adnl; 10 | use everscale_network::{QueryConsumingResult, QuerySubscriber, SubscriberContext}; 11 | use tl_proto::{TlRead, TlWrite}; 12 | 13 | #[tokio::main] 14 | async fn main() -> Result<()> { 15 | // tracing_subscriber::fmt::init(); 16 | 17 | let adnl_node_options = adnl::NodeOptions::default(); 18 | 19 | let left_key = ed25519::SecretKey::generate(&mut rand::thread_rng()); 20 | let left_node = adnl::Node::new( 21 | SocketAddrV4::new(Ipv4Addr::LOCALHOST, 0), 22 | adnl::Keystore::builder() 23 | .with_tagged_keys([(left_key.to_bytes(), 0)])? 24 | .build(), 25 | adnl_node_options, 26 | None, 27 | )?; 28 | let left_node_id = *left_node.key_by_tag(0)?.id(); 29 | 30 | let right_key = ed25519::SecretKey::generate(&mut rand::thread_rng()); 31 | let right_node = adnl::Node::new( 32 | SocketAddrV4::new(Ipv4Addr::LOCALHOST, 0), 33 | adnl::Keystore::builder() 34 | .with_tagged_keys([(right_key.to_bytes(), 0)])? 35 | .build(), 36 | adnl_node_options, 37 | None, 38 | )?; 39 | right_node.add_query_subscriber(Arc::new(Service))?; 40 | 41 | let right_node_id_full = *right_node.key_by_tag(0)?.full_id(); 42 | let right_node_id = right_node_id_full.compute_short_id(); 43 | 44 | left_node.add_peer( 45 | adnl::NewPeerContext::AdnlPacket, 46 | &left_node_id, 47 | &right_node_id, 48 | right_node.socket_addr(), 49 | right_node_id_full, 50 | )?; 51 | 52 | left_node.start()?; 53 | right_node.start()?; 54 | 55 | let iterations = Arc::new(AtomicUsize::new(0)); 56 | let mut handles = Vec::new(); 57 | for _ in 0..200 { 58 | let left_node = left_node.clone(); 59 | let query = example_request(); 60 | let iterations = iterations.clone(); 61 | handles.push(tokio::spawn(async move { 62 | let e = loop { 63 | match query_data::<_, DataFull>(&left_node, &left_node_id, &right_node_id, query) 64 | .await 65 | { 66 | Ok(_) => { 67 | iterations.fetch_add(1, Ordering::Relaxed); 68 | } 69 | Err(e) => break e, 70 | } 71 | }; 72 | println!("Error: {e:?}"); 73 | })); 74 | } 75 | 76 | tokio::select! { 77 | _ = futures_util::future::join_all(handles) => {}, 78 | _ = tokio::time::sleep(Duration::from_secs(10)) => {}, 79 | } 80 | 81 | left_node.shutdown(); 82 | 83 | let throughput = (tl_proto::serialize(example_request()).len() 84 | + tl_proto::serialize(example_response()).len()) 85 | * iterations.load(Ordering::Relaxed); 86 | 87 | println!("Total throughput: {} MB/s", throughput as f64 / 10485760.0); 88 | 89 | Ok(()) 90 | } 91 | 92 | async fn query_data( 93 | left_node: &Arc, 94 | left_node_id: &adnl::NodeIdShort, 95 | right_node_id: &adnl::NodeIdShort, 96 | query: Q, 97 | ) -> Result<()> 98 | where 99 | Q: TlWrite, 100 | for<'a> A: TlRead<'a, Repr = tl_proto::Boxed> + 'static, 101 | { 102 | match left_node 103 | .query::(left_node_id, right_node_id, query, None) 104 | .await? 105 | { 106 | Some(_) => {} 107 | None => println!("Packet lost"), 108 | }; 109 | Ok(()) 110 | } 111 | 112 | struct Service; 113 | 114 | #[async_trait::async_trait] 115 | impl QuerySubscriber for Service { 116 | async fn try_consume_query<'a>( 117 | &self, 118 | _: SubscriberContext<'a>, 119 | _: u32, 120 | _: Cow<'a, [u8]>, 121 | ) -> Result> { 122 | QueryConsumingResult::consume(example_response()) 123 | } 124 | } 125 | 126 | fn example_request() -> DownloadNextBlockFull { 127 | DownloadNextBlockFull { 128 | prev_block: Default::default(), 129 | } 130 | } 131 | 132 | fn example_response() -> DataFull { 133 | DataFull { 134 | id: Default::default(), 135 | proof: vec![1u8; 128], 136 | block: vec![1u8; 128], 137 | is_link: false, 138 | } 139 | } 140 | 141 | #[derive(Copy, Clone, TlRead, TlWrite)] 142 | #[tl(boxed, id = 0x6ea0374a)] 143 | struct DownloadNextBlockFull { 144 | prev_block: BlockId, 145 | } 146 | 147 | #[derive(Clone, TlRead, TlWrite)] 148 | #[tl(boxed, id = 0xbe589f93)] 149 | struct DataFull { 150 | id: BlockId, 151 | proof: Vec, 152 | block: Vec, 153 | is_link: bool, 154 | } 155 | 156 | #[derive(Default, Copy, Clone, TlRead, TlWrite)] 157 | struct BlockId { 158 | workchain: i32, 159 | shard: u64, 160 | seqno: u32, 161 | root_hash: [u8; 32], 162 | file_hash: [u8; 32], 163 | } 164 | -------------------------------------------------------------------------------- /src/adnl/keystore.rs: -------------------------------------------------------------------------------- 1 | use std::collections::hash_map; 2 | use std::sync::Arc; 3 | 4 | use anyhow::Result; 5 | use everscale_crypto::ed25519; 6 | 7 | use super::node_id::{ComputeNodeIds, NodeIdFull, NodeIdShort}; 8 | use crate::util::FastHashMap; 9 | 10 | /// Tagged keystore for ADNL keys 11 | #[derive(Default)] 12 | pub struct Keystore { 13 | keys: FastHashMap>, 14 | tags: FastHashMap, 15 | } 16 | 17 | impl Keystore { 18 | pub fn builder() -> KeystoreBuilder { 19 | KeystoreBuilder::default() 20 | } 21 | 22 | /// Searches key by its short id 23 | pub fn key_by_id(&self, id: &NodeIdShort) -> Result<&Arc, KeystoreError> { 24 | if let Some(key) = self.keys.get(id) { 25 | Ok(key) 26 | } else { 27 | Err(KeystoreError::KeyIdNotFound(*id)) 28 | } 29 | } 30 | 31 | /// Searches key by its tag 32 | pub fn key_by_tag(&self, tag: usize) -> Result<&Arc, KeystoreError> { 33 | if let Some(id) = self.tags.get(&tag) { 34 | self.key_by_id(id) 35 | } else { 36 | Err(KeystoreError::KeyTagNotFound(tag)) 37 | } 38 | } 39 | 40 | /// Returns inner keys table 41 | #[inline(always)] 42 | pub fn keys(&self) -> &FastHashMap> { 43 | &self.keys 44 | } 45 | 46 | /// Adds a new key with the specified tag 47 | /// 48 | /// NOTE: duplicate keys or tags will cause this method to fail 49 | pub fn add_key(&mut self, key: [u8; 32], tag: usize) -> Result { 50 | let secret_key = ed25519::SecretKey::from_bytes(key); 51 | let (_, short_id) = secret_key.compute_node_ids(); 52 | 53 | match self.tags.entry(tag) { 54 | hash_map::Entry::Vacant(entry) => { 55 | entry.insert(short_id); 56 | match self.keys.entry(short_id) { 57 | hash_map::Entry::Vacant(entry) => { 58 | entry.insert(Arc::new(secret_key.into())); 59 | Ok(short_id) 60 | } 61 | hash_map::Entry::Occupied(_) => Err(KeystoreError::DuplicatedKey(tag)), 62 | } 63 | } 64 | hash_map::Entry::Occupied(entry) => { 65 | if entry.get() == &short_id { 66 | Ok(short_id) 67 | } else { 68 | Err(KeystoreError::DuplicatedKeyTag(tag)) 69 | } 70 | } 71 | } 72 | } 73 | } 74 | 75 | #[derive(Default)] 76 | pub struct KeystoreBuilder { 77 | keystore: Keystore, 78 | } 79 | 80 | impl KeystoreBuilder { 81 | pub fn build(self) -> Keystore { 82 | self.keystore 83 | } 84 | 85 | /// Adds a new key with the specified tag 86 | /// 87 | /// NOTE: duplicate keys or tags will cause this method to fail 88 | pub fn with_tagged_key(mut self, key: [u8; 32], tag: usize) -> Result { 89 | self.keystore.add_key(key, tag)?; 90 | Ok(self) 91 | } 92 | 93 | /// Creates a new keystore from tagged secret keys 94 | pub fn with_tagged_keys(mut self, keys: I) -> Result 95 | where 96 | I: IntoIterator, 97 | { 98 | for (key, tag) in keys { 99 | self.keystore.add_key(key, tag)?; 100 | } 101 | Ok(self) 102 | } 103 | } 104 | 105 | /// ADNL key with precomputed node IDs 106 | pub struct Key { 107 | short_id: NodeIdShort, 108 | full_id: NodeIdFull, 109 | secret_key: ed25519::ExpandedSecretKey, 110 | } 111 | 112 | impl Key { 113 | /// Constructs new key from the secret key bytes 114 | pub fn from_bytes(secret_key: [u8; 32]) -> Self { 115 | ed25519::SecretKey::from_bytes(secret_key).into() 116 | } 117 | 118 | /// Returns short key id 119 | #[inline(always)] 120 | pub fn id(&self) -> &NodeIdShort { 121 | &self.short_id 122 | } 123 | 124 | /// Returns full key id 125 | #[inline(always)] 126 | pub fn full_id(&self) -> &NodeIdFull { 127 | &self.full_id 128 | } 129 | 130 | /// Returns inner secret key (as expanded) 131 | #[inline(always)] 132 | pub fn secret_key(&self) -> &ed25519::ExpandedSecretKey { 133 | &self.secret_key 134 | } 135 | 136 | /// Signs serializable boxed data 137 | #[inline(always)] 138 | pub fn sign>(&self, data: T) -> [u8; 64] { 139 | self.secret_key.sign(data, self.full_id.public_key()) 140 | } 141 | } 142 | 143 | impl From for Key { 144 | fn from(secret_key: ed25519::SecretKey) -> Self { 145 | let (full_id, short_id) = secret_key.compute_node_ids(); 146 | Self { 147 | short_id, 148 | full_id, 149 | secret_key: ed25519::ExpandedSecretKey::from(&secret_key), 150 | } 151 | } 152 | } 153 | 154 | #[derive(thiserror::Error, Debug)] 155 | pub enum KeystoreError { 156 | #[error("Duplicated key tag {0}")] 157 | DuplicatedKeyTag(usize), 158 | #[error("Duplicated secret key {0}")] 159 | DuplicatedKey(usize), 160 | #[error("Key is not found: {0}")] 161 | KeyIdNotFound(NodeIdShort), 162 | #[error("Key tag not found: {0}")] 163 | KeyTagNotFound(usize), 164 | #[error("Unexpected key")] 165 | UnexpectedKey, 166 | } 167 | -------------------------------------------------------------------------------- /src/dht/streams.rs: -------------------------------------------------------------------------------- 1 | use std::pin::Pin; 2 | use std::sync::Arc; 3 | use std::task::{Context, Poll}; 4 | 5 | use bytes::Bytes; 6 | use futures_util::future::BoxFuture; 7 | use futures_util::stream::FuturesUnordered; 8 | use futures_util::{Stream, StreamExt}; 9 | use tl_proto::TlRead; 10 | 11 | use super::node::Node; 12 | use super::peers_iter::PeersIter; 13 | use crate::proto; 14 | 15 | /// Stream for the `DhtNode::values` method. 16 | #[must_use = "streams do nothing unless polled"] 17 | pub struct DhtValuesStream { 18 | dht: Arc, 19 | query: Bytes, 20 | batch_len: Option, 21 | known_peers_version: u64, 22 | use_new_peers: bool, 23 | peers_iter: PeersIter, 24 | futures: FuturesUnordered>, 25 | future_count: usize, 26 | _marker: std::marker::PhantomData, 27 | } 28 | 29 | impl Unpin for DhtValuesStream {} 30 | 31 | impl DhtValuesStream 32 | where 33 | for<'a> T: TlRead<'a, Repr = tl_proto::Boxed> + Send + 'static, 34 | { 35 | pub(super) fn new(dht: Arc, key: proto::dht::Key<'_>) -> Self { 36 | let key_id = tl_proto::hash_as_boxed(key); 37 | let peers_iter = PeersIter::with_key_id(key_id); 38 | 39 | let batch_len = Some(dht.options().default_value_batch_len); 40 | let known_peers_version = dht.known_peers().version(); 41 | 42 | let query = tl_proto::serialize(proto::rpc::DhtFindValue { key: &key_id, k: 6 }).into(); 43 | 44 | Self { 45 | dht, 46 | query, 47 | batch_len, 48 | known_peers_version, 49 | use_new_peers: false, 50 | peers_iter, 51 | futures: Default::default(), 52 | future_count: usize::MAX, 53 | _marker: Default::default(), 54 | } 55 | } 56 | 57 | /// Use all DHT nodes in peers iterator 58 | pub fn use_full_batch(mut self) -> Self { 59 | self.batch_len = None; 60 | self 61 | } 62 | 63 | /// Whether stream should fill peers iterator when new nodes are found 64 | pub fn use_new_peers(mut self, enable: bool) -> Self { 65 | self.use_new_peers = enable; 66 | self 67 | } 68 | 69 | fn refill_futures(&mut self) { 70 | // Spawn at most `max_tasks` queries 71 | while let Some(peer_id) = self.peers_iter.next() { 72 | let dht = self.dht.clone(); 73 | let query = self.query.clone(); 74 | 75 | self.futures.push(Box::pin(async move { 76 | match dht.query_raw(&peer_id, query).await { 77 | Ok(Some(result)) => match dht.parse_value_result::(&result) { 78 | Ok(Some(value)) => Some(value), 79 | Ok(None) => None, 80 | Err(e) => { 81 | tracing::warn!("failed to parse queried value: {e}"); 82 | None 83 | } 84 | }, 85 | Ok(None) => None, 86 | Err(e) => { 87 | tracing::warn!("failed to query value: {e}"); 88 | None 89 | } 90 | } 91 | })); 92 | 93 | self.future_count += 1; 94 | if self.future_count > MAX_PARALLEL_FUTURES { 95 | break; 96 | } 97 | } 98 | } 99 | } 100 | 101 | impl Stream for DhtValuesStream 102 | where 103 | for<'a> T: TlRead<'a, Repr = tl_proto::Boxed> + Send + 'static, 104 | { 105 | type Item = ReceivedValue; 106 | 107 | fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { 108 | let this = self.get_mut(); 109 | 110 | // Fill iterator during the first poll 111 | if this.future_count == usize::MAX { 112 | this.peers_iter.fill(&this.dht, this.batch_len); 113 | this.future_count = 0; 114 | } 115 | 116 | loop { 117 | // Keep starting new futures when we can 118 | if this.future_count < MAX_PARALLEL_FUTURES { 119 | this.refill_futures(); 120 | } 121 | 122 | match this.futures.poll_next_unpin(cx) { 123 | Poll::Ready(Some(value)) => { 124 | // Refill peers iterator when version has changed and `use_new_peers` is set 125 | match this.dht.known_peers().version() { 126 | version if this.use_new_peers && version != this.known_peers_version => { 127 | this.peers_iter.fill(&this.dht, this.batch_len); 128 | this.known_peers_version = version; 129 | } 130 | _ => {} 131 | } 132 | 133 | // Decrease the number of parallel futures on each new item from `futures` 134 | this.future_count -= 1; 135 | 136 | if let Some(value) = value { 137 | break Poll::Ready(Some(value)); 138 | } 139 | } 140 | Poll::Ready(None) => break Poll::Ready(None), 141 | Poll::Pending => break Poll::Pending, 142 | } 143 | } 144 | } 145 | } 146 | 147 | type ValueFuture = BoxFuture<'static, Option>>; 148 | type ReceivedValue = (proto::dht::KeyDescriptionOwned, T); 149 | 150 | const MAX_PARALLEL_FUTURES: usize = 5; 151 | -------------------------------------------------------------------------------- /src/rldp/outgoing_transfer.rs: -------------------------------------------------------------------------------- 1 | use std::sync::atomic::{AtomicBool, AtomicU32, Ordering}; 2 | use std::sync::Arc; 3 | 4 | use anyhow::Result; 5 | 6 | use super::encoder::*; 7 | use super::transfers_cache::TransferId; 8 | use crate::proto; 9 | use crate::util::*; 10 | 11 | pub struct OutgoingTransfer { 12 | buffer: Vec, 13 | transfer_id: TransferId, 14 | data: Vec, 15 | current_message_part: u32, 16 | encoder: Option, 17 | state: Arc, 18 | } 19 | 20 | impl OutgoingTransfer { 21 | pub fn new(data: Vec, transfer_id: Option) -> Self { 22 | let transfer_id = transfer_id.unwrap_or_else(gen_fast_bytes); 23 | 24 | Self { 25 | buffer: Vec::new(), 26 | transfer_id, 27 | data, 28 | current_message_part: 0, 29 | encoder: None, 30 | state: Default::default(), 31 | } 32 | } 33 | 34 | #[inline(always)] 35 | pub fn transfer_id(&self) -> &TransferId { 36 | &self.transfer_id 37 | } 38 | 39 | /// Encodes next part of the message. Returns packet count which is required to be sent. 40 | pub fn start_next_part(&mut self) -> Result> { 41 | if self.is_finished() { 42 | return Ok(None); 43 | } 44 | 45 | let total = self.data.len(); 46 | let part = self.state.part() as usize; 47 | let processed = part * SLICE; 48 | if processed >= total { 49 | return Ok(None); 50 | } 51 | 52 | self.current_message_part = part as u32; 53 | 54 | let chunk_size = std::cmp::min(total - processed, SLICE); 55 | let encoder = self.encoder.insert(RaptorQEncoder::with_data( 56 | &self.data[processed..processed + chunk_size], 57 | )); 58 | 59 | let packet_count = encoder.params().packet_count; 60 | Ok(if packet_count > 0 { 61 | Some(packet_count) 62 | } else { 63 | None 64 | }) 65 | } 66 | 67 | pub fn prepare_chunk(&mut self) -> Result<&[u8]> { 68 | let encoder = match &mut self.encoder { 69 | Some(encoder) => encoder, 70 | None => return Err(OutgoingTransferError::EncoderIsNotReady.into()), 71 | }; 72 | 73 | let mut seqno_out = self.state.seqno_out(); 74 | let previous_seqno_out = seqno_out; 75 | 76 | let data = ok!(encoder.encode(&mut seqno_out)); 77 | 78 | let seqno_in = self.state.seqno_in(); 79 | 80 | let mut next_seqno_out = seqno_out; 81 | if seqno_out - seqno_in <= WINDOW { 82 | if previous_seqno_out == seqno_out { 83 | next_seqno_out += 1; 84 | } 85 | self.state.set_seqno_out(next_seqno_out); 86 | } 87 | 88 | tl_proto::serialize_into( 89 | proto::rldp::MessagePart::MessagePart { 90 | transfer_id: &self.transfer_id, 91 | fec_type: *encoder.params(), 92 | part: self.current_message_part, 93 | total_size: self.data.len() as u64, 94 | seqno: seqno_out, 95 | data: &data, 96 | }, 97 | &mut self.buffer, 98 | ); 99 | Ok(&self.buffer) 100 | } 101 | 102 | pub fn is_finished(&self) -> bool { 103 | self.state.has_reply() && ((self.state.part() as usize + 1) * SLICE >= self.data.len()) 104 | } 105 | 106 | pub fn is_finished_or_next_part(&self, part: u32) -> Result { 107 | if self.is_finished() { 108 | Ok(true) 109 | } else { 110 | match self.state.part() { 111 | x if x == part => Ok(false), 112 | x if x == part + 1 => Ok(true), 113 | _ => Err(OutgoingTransferError::PartMismatch.into()), 114 | } 115 | } 116 | } 117 | 118 | pub fn state(&self) -> &Arc { 119 | &self.state 120 | } 121 | } 122 | 123 | #[derive(Default)] 124 | pub struct OutgoingTransferState { 125 | part: AtomicU32, 126 | has_reply: AtomicBool, 127 | seqno_out: AtomicU32, 128 | seqno_in: AtomicU32, 129 | } 130 | 131 | impl OutgoingTransferState { 132 | pub fn part(&self) -> u32 { 133 | self.part.load(Ordering::Acquire) 134 | } 135 | 136 | pub fn set_part(&self, part: u32) { 137 | let _ = self 138 | .part 139 | .compare_exchange(part - 1, part, Ordering::Release, Ordering::Relaxed); 140 | } 141 | 142 | pub fn has_reply(&self) -> bool { 143 | self.has_reply.load(Ordering::Acquire) 144 | } 145 | 146 | pub fn set_reply(&self) { 147 | self.has_reply.store(true, Ordering::Release); 148 | } 149 | 150 | pub fn seqno_out(&self) -> u32 { 151 | self.seqno_out.load(Ordering::Acquire) 152 | } 153 | 154 | pub fn set_seqno_out(&self, seqno: u32) { 155 | self.seqno_out.fetch_max(seqno, Ordering::Release); 156 | } 157 | 158 | pub fn seqno_in(&self) -> u32 { 159 | self.seqno_in.load(Ordering::Acquire) 160 | } 161 | 162 | pub fn set_seqno_in(&self, seqno: u32) { 163 | if seqno > self.seqno_out() { 164 | return; 165 | } 166 | self.seqno_in.fetch_max(seqno, Ordering::Release); 167 | } 168 | } 169 | 170 | const WINDOW: u32 = 1000; 171 | const SLICE: usize = 2000000; 172 | 173 | #[derive(thiserror::Error, Debug)] 174 | enum OutgoingTransferError { 175 | #[error("Encoder is not ready")] 176 | EncoderIsNotReady, 177 | #[error("Part mismatch")] 178 | PartMismatch, 179 | } 180 | -------------------------------------------------------------------------------- /src/dht/entry.rs: -------------------------------------------------------------------------------- 1 | use std::borrow::{Borrow, Cow}; 2 | use std::sync::Arc; 3 | 4 | use anyhow::Result; 5 | use tl_proto::BoxedConstructor; 6 | 7 | use super::futures::StoreValue; 8 | use super::node::Node; 9 | use super::streams::DhtValuesStream; 10 | use crate::adnl; 11 | use crate::proto; 12 | use crate::util::now; 13 | 14 | /// DHT entry builder 15 | #[must_use] 16 | #[derive(Copy, Clone)] 17 | pub struct Entry<'a> { 18 | dht: &'a Arc, 19 | id: &'a [u8; 32], 20 | name: &'a str, 21 | key_index: u32, 22 | } 23 | 24 | impl<'a> Entry<'a> { 25 | pub(super) fn new(dht: &'a Arc, id: &'a T, name: &'a str) -> Self 26 | where 27 | T: Borrow<[u8; 32]>, 28 | { 29 | Self { 30 | dht, 31 | id: id.borrow(), 32 | name, 33 | key_index: 0, 34 | } 35 | } 36 | 37 | /// Sets the key index. Default: `0` 38 | pub fn with_key_index(mut self, idx: u32) -> Self { 39 | self.key_index = idx; 40 | self 41 | } 42 | 43 | /// Creates a new builder which can store the value in the DHT. 44 | /// 45 | /// See [`with_data_raw`] for raw API 46 | /// 47 | /// [`with_data_raw`]: fn@crate::dht::Entry::with_data_raw 48 | pub fn with_data(self, data: T) -> EntryWithData<'a> 49 | where 50 | T: tl_proto::TlWrite, 51 | { 52 | EntryWithData { 53 | inner: self, 54 | data: Cow::Owned(tl_proto::serialize(data)), 55 | expire_at: None, 56 | } 57 | } 58 | 59 | /// Creates a new builder which can store the value in the DHT. 60 | /// 61 | /// See [`with_data`] for more convenient API 62 | /// 63 | /// [`with_data`]: fn@crate::dht::Entry::with_data 64 | pub fn with_data_raw(self, data: &'a [u8]) -> EntryWithData<'a> { 65 | EntryWithData { 66 | inner: self, 67 | data: Cow::Borrowed(data), 68 | expire_at: None, 69 | } 70 | } 71 | 72 | /// Returns a stream of values for this entry. 73 | pub fn values(self) -> DhtValuesStream 74 | where 75 | for<'tl> T: tl_proto::TlRead<'tl, Repr = tl_proto::Boxed> + Send + 'static, 76 | { 77 | DhtValuesStream::new(self.dht.clone(), self.key()) 78 | } 79 | 80 | /// Queries a value from the given peer. 81 | pub async fn value_from( 82 | self, 83 | peer_id: &adnl::NodeIdShort, 84 | ) -> Result> 85 | where 86 | for<'tl> T: tl_proto::TlRead<'tl, Repr = tl_proto::Boxed> + Send + 'static, 87 | { 88 | let key_id = tl_proto::hash_as_boxed(self.key()); 89 | let query = tl_proto::serialize(proto::rpc::DhtFindValue { key: &key_id, k: 6 }).into(); 90 | 91 | match self.dht.query_raw(peer_id, query).await? { 92 | Some(result) => self.dht.parse_value_result(&result), 93 | None => Ok(None), 94 | } 95 | } 96 | 97 | /// Returns TL representation of the entry key. 98 | pub fn key(&self) -> proto::dht::Key<'a> { 99 | proto::dht::Key { 100 | id: self.id, 101 | name: self.name.as_bytes(), 102 | idx: self.key_index, 103 | } 104 | } 105 | } 106 | 107 | pub struct EntryWithData<'a> { 108 | inner: Entry<'a>, 109 | data: Cow<'a, [u8]>, 110 | expire_at: Option, 111 | } 112 | 113 | impl<'a> EntryWithData<'a> { 114 | /// Sets the expiration time for the value. 115 | pub fn expire_at(mut self, timestamp: u32) -> Self { 116 | self.expire_at = Some(timestamp); 117 | self 118 | } 119 | 120 | /// Sets expiration time for the value as `now + ttl` 121 | pub fn with_ttl(mut self, ttl: u32) -> Self { 122 | self.expire_at = Some(now() + ttl); 123 | self 124 | } 125 | 126 | /// Creates signed TL representation of the entry. 127 | pub fn sign(self, key: &adnl::Key) -> proto::dht::ValueOwned { 128 | let mut value = self.make_value(key); 129 | 130 | let key_signature = key.sign(value.key.as_boxed()); 131 | value.key.signature = &key_signature; 132 | 133 | let value_signature = key.sign(value.as_boxed()); 134 | value.signature = &value_signature; 135 | 136 | value.as_equivalent_owned() 137 | } 138 | 139 | /// Creates signed TL representation of the entry and stores it in the DHT. 140 | /// 141 | /// See [`StoreValue`] 142 | pub fn sign_and_store(self, key: &adnl::Key) -> Result { 143 | let mut value = self.make_value(key); 144 | 145 | let key_signature = key.sign(value.key.as_boxed()); 146 | value.key.signature = &key_signature; 147 | 148 | let value_signature = key.sign(value.as_boxed()); 149 | value.signature = &value_signature; 150 | 151 | StoreValue::new(self.inner.dht.clone(), value) 152 | } 153 | 154 | fn make_value<'b>(&'b self, key: &'b adnl::Key) -> proto::dht::Value<'b> 155 | where 156 | 'a: 'b, 157 | { 158 | proto::dht::Value { 159 | key: proto::dht::KeyDescription { 160 | key: self.inner.key(), 161 | id: key.full_id().as_tl(), 162 | update_rule: proto::dht::UpdateRule::Signature, 163 | signature: Default::default(), 164 | }, 165 | value: &self.data, 166 | ttl: self 167 | .expire_at 168 | .unwrap_or_else(|| now() + self.inner.dht.options().value_ttl_sec), 169 | signature: Default::default(), 170 | } 171 | } 172 | } 173 | -------------------------------------------------------------------------------- /src/proto/overlay.rs: -------------------------------------------------------------------------------- 1 | use bytes::Bytes; 2 | use smallvec::SmallVec; 3 | use tl_proto::{BoxedConstructor, TlRead, TlWrite}; 4 | 5 | use super::{rldp, HashRef}; 6 | 7 | #[derive(TlWrite, TlRead)] 8 | pub struct Nodes<'tl> { 9 | pub nodes: SmallVec<[Node<'tl>; 5]>, 10 | } 11 | 12 | impl BoxedConstructor for Nodes<'_> { 13 | const TL_ID: u32 = tl_proto::id!("overlay.nodes", scheme = "scheme.tl"); 14 | } 15 | 16 | #[derive(Clone, TlWrite, TlRead)] 17 | pub struct NodesOwned { 18 | pub nodes: SmallVec<[NodeOwned; 5]>, 19 | } 20 | 21 | impl BoxedConstructor for NodesOwned { 22 | const TL_ID: u32 = Nodes::TL_ID; 23 | } 24 | 25 | #[derive(Debug, Copy, Clone, Eq, PartialEq, TlWrite, TlRead)] 26 | pub struct Node<'tl> { 27 | pub id: everscale_crypto::tl::PublicKey<'tl>, 28 | #[tl(size_hint = 32)] 29 | pub overlay: HashRef<'tl>, 30 | #[tl(size_hint = 4)] 31 | pub version: u32, 32 | pub signature: &'tl [u8], 33 | } 34 | 35 | impl Node<'_> { 36 | pub fn as_equivalent_owned(&self) -> NodeOwned { 37 | NodeOwned { 38 | id: self.id.as_equivalent_owned(), 39 | overlay: *self.overlay, 40 | version: self.version, 41 | signature: self.signature.to_vec().into(), 42 | } 43 | } 44 | } 45 | 46 | #[derive(Debug, Clone, TlWrite, TlRead)] 47 | pub struct NodeOwned { 48 | pub id: everscale_crypto::tl::PublicKeyOwned, 49 | pub overlay: [u8; 32], 50 | pub version: u32, 51 | pub signature: Bytes, 52 | } 53 | 54 | impl NodeOwned { 55 | pub fn as_equivalent_ref(&self) -> Node { 56 | Node { 57 | id: self.id.as_equivalent_ref(), 58 | overlay: &self.overlay, 59 | version: self.version, 60 | signature: &self.signature, 61 | } 62 | } 63 | } 64 | 65 | #[derive(TlWrite)] 66 | #[tl(boxed, id = "overlay.node.toSign", scheme = "scheme.tl")] 67 | pub struct NodeToSign<'tl> { 68 | pub id: HashRef<'tl>, 69 | pub overlay: HashRef<'tl>, 70 | pub version: u32, 71 | } 72 | 73 | #[derive(TlWrite)] 74 | #[tl(boxed, id = "tonNode.shardPublicOverlayId", scheme = "scheme.tl")] 75 | pub struct ShardPublicOverlayId<'tl> { 76 | pub workchain: i32, 77 | pub shard: u64, 78 | pub zero_state_file_hash: HashRef<'tl>, 79 | } 80 | 81 | pub struct CatchainFirstBlock<'a, 'tl: 'a, I> { 82 | pub unique_hash: HashRef<'tl>, 83 | pub nodes: tl_proto::IterRef<'a, I>, 84 | } 85 | 86 | impl<'a, 'tl: 'a, I> TlWrite for CatchainFirstBlock<'a, 'tl, I> 87 | where 88 | I: Iterator> + ExactSizeIterator + Clone, 89 | { 90 | type Repr = tl_proto::Boxed; 91 | 92 | fn max_size_hint(&self) -> usize { 93 | 4 + self.unique_hash.max_size_hint() + self.nodes.max_size_hint() 94 | } 95 | 96 | fn write_to

(&self, packet: &mut P) 97 | where 98 | P: tl_proto::TlPacket, 99 | { 100 | const ID: u32 = tl_proto::id!(scheme = "scheme.tl", "catchain.firstblock"); 101 | ID.write_to(packet); 102 | self.unique_hash.write_to(packet); 103 | self.nodes.write_to(packet); 104 | } 105 | } 106 | 107 | #[derive(Debug, Copy, Clone, TlWrite, TlRead)] 108 | #[tl(boxed, id = "overlay.message", scheme = "scheme.tl", size_hint = 32)] 109 | pub struct Message<'tl> { 110 | pub overlay: HashRef<'tl>, 111 | } 112 | 113 | #[derive(Debug, Copy, Clone, TlWrite, TlRead)] 114 | #[tl(boxed, scheme = "scheme.tl")] 115 | pub enum Broadcast<'tl> { 116 | #[tl(id = "overlay.broadcast")] 117 | Broadcast(OverlayBroadcast<'tl>), 118 | #[tl(id = "overlay.broadcastFec")] 119 | BroadcastFec(OverlayBroadcastFec<'tl>), 120 | #[tl(id = "overlay.broadcastFecShort")] 121 | BroadcastFecShort { 122 | src: everscale_crypto::tl::PublicKey<'tl>, 123 | certificate: Certificate<'tl>, 124 | #[tl(size_hint = 32)] 125 | broadcast_hash: HashRef<'tl>, 126 | #[tl(size_hint = 32)] 127 | part_data_hash: HashRef<'tl>, 128 | seqno: u32, 129 | signature: &'tl [u8], 130 | }, 131 | #[tl(id = "overlay.broadcastNotFound", size_hint = 0)] 132 | BroadcastNotFound, 133 | #[tl(id = "overlay.fec.completed", size_hint = 32)] 134 | FecCompleted { hash: HashRef<'tl> }, 135 | #[tl(id = "overlay.fec.received", size_hint = 32)] 136 | FecReceived { hash: HashRef<'tl> }, 137 | #[tl(id = "overlay.unicast")] 138 | Unicast { data: &'tl [u8] }, 139 | } 140 | 141 | #[derive(Debug, Copy, Clone, TlWrite, TlRead)] 142 | pub struct OverlayBroadcast<'tl> { 143 | pub src: everscale_crypto::tl::PublicKey<'tl>, 144 | pub certificate: Certificate<'tl>, 145 | pub flags: u32, 146 | pub data: &'tl [u8], 147 | pub date: u32, 148 | pub signature: &'tl [u8], 149 | } 150 | 151 | #[derive(Debug, Copy, Clone, TlWrite, TlRead)] 152 | pub struct OverlayBroadcastFec<'tl> { 153 | pub src: everscale_crypto::tl::PublicKey<'tl>, 154 | pub certificate: Certificate<'tl>, 155 | #[tl(size_hint = 32)] 156 | pub data_hash: HashRef<'tl>, 157 | pub data_size: u32, 158 | pub flags: u32, 159 | pub data: &'tl [u8], 160 | pub seqno: u32, 161 | pub fec: rldp::RaptorQFecType, 162 | pub date: u32, 163 | pub signature: &'tl [u8], 164 | } 165 | 166 | #[derive(Debug, Copy, Clone, TlWrite, TlRead)] 167 | #[tl(boxed, scheme = "scheme.tl")] 168 | pub enum Certificate<'tl> { 169 | #[tl(id = "overlay.certificate")] 170 | Certificate { 171 | issued_by: everscale_crypto::tl::PublicKey<'tl>, 172 | expire_at: u32, 173 | max_size: u32, 174 | signature: &'tl [u8], 175 | }, 176 | #[tl(id = "overlay.emptyCertificate", size_hint = 0)] 177 | EmptyCertificate, 178 | } 179 | -------------------------------------------------------------------------------- /src/adnl/node_id.rs: -------------------------------------------------------------------------------- 1 | use std::borrow::Borrow; 2 | use std::convert::TryFrom; 3 | 4 | use everscale_crypto::{ed25519, tl}; 5 | use rand::Rng; 6 | 7 | /// Full ADNL node id. 8 | /// 9 | /// See [`PublicKey::Ed25519`] 10 | /// 11 | /// [`PublicKey::Ed25519`]: everscale_crypto::tl::PublicKey::Ed25519 12 | #[derive(Debug, Copy, Clone, Eq, PartialEq)] 13 | pub struct NodeIdFull(ed25519::PublicKey); 14 | 15 | impl NodeIdFull { 16 | /// Constructs full node id from a valid ED25519 public key 17 | pub const fn new(public_key: ed25519::PublicKey) -> Self { 18 | Self(public_key) 19 | } 20 | 21 | /// Returns inner public key 22 | #[inline(always)] 23 | pub const fn public_key(&self) -> &ed25519::PublicKey { 24 | &self.0 25 | } 26 | 27 | /// Represents public key as a TL structure 28 | #[inline(always)] 29 | pub fn as_tl(&self) -> tl::PublicKey { 30 | self.0.as_tl() 31 | } 32 | 33 | /// Verifies the signature of an arbitrary serializable data 34 | pub fn verify>( 35 | &self, 36 | data: T, 37 | other_signature: &[u8], 38 | ) -> Result<(), NodeIdFullError> { 39 | match <[u8; 64]>::try_from(other_signature) { 40 | Ok(other_signature) if self.0.verify(data, &other_signature) => Ok(()), 41 | _ => Err(NodeIdFullError::InvalidSignature), 42 | } 43 | } 44 | 45 | /// Hashes inner public key 46 | pub fn compute_short_id(&self) -> NodeIdShort { 47 | NodeIdShort::new(tl_proto::hash(self.0.as_tl())) 48 | } 49 | } 50 | 51 | impl From for NodeIdFull { 52 | fn from(key: ed25519::PublicKey) -> Self { 53 | Self::new(key) 54 | } 55 | } 56 | 57 | impl<'a> TryFrom> for NodeIdFull { 58 | type Error = NodeIdFullError; 59 | 60 | fn try_from(value: tl::PublicKey<'a>) -> Result { 61 | match value { 62 | tl::PublicKey::Ed25519 { key } => match ed25519::PublicKey::from_bytes(*key) { 63 | Some(public_key) => Ok(Self::new(public_key)), 64 | None => Err(NodeIdFullError::InvalidPublicKey), 65 | }, 66 | _ => Err(NodeIdFullError::UnsupportedPublicKey), 67 | } 68 | } 69 | } 70 | 71 | #[derive(Debug, thiserror::Error)] 72 | pub enum NodeIdFullError { 73 | #[error("Unsupported public key")] 74 | UnsupportedPublicKey, 75 | #[error("Invalid public key")] 76 | InvalidPublicKey, 77 | #[error("Invalid signature")] 78 | InvalidSignature, 79 | } 80 | 81 | /// Short ADNL node id. 82 | #[derive(Default, Copy, Clone, Eq, PartialEq, Hash, Ord, PartialOrd)] 83 | #[repr(transparent)] 84 | pub struct NodeIdShort([u8; 32]); 85 | 86 | impl NodeIdShort { 87 | /// Constructs short node id from public key hash 88 | #[inline(always)] 89 | pub const fn new(hash: [u8; 32]) -> Self { 90 | Self(hash) 91 | } 92 | 93 | /// Generates random short node id 94 | pub fn random() -> Self { 95 | Self(rand::thread_rng().gen()) 96 | } 97 | 98 | /// Returns inner bytes 99 | #[inline(always)] 100 | pub const fn as_slice(&self) -> &[u8; 32] { 101 | &self.0 102 | } 103 | 104 | #[inline(always)] 105 | pub fn is_zero(&self) -> bool { 106 | self == &[0; 32] 107 | } 108 | } 109 | 110 | impl std::fmt::Display for NodeIdShort { 111 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 112 | let mut output = [0u8; 64]; 113 | hex::encode_to_slice(self.0, &mut output).ok(); 114 | 115 | // SAFETY: output is guaranteed to contain only [0-9a-f] 116 | let output = unsafe { std::str::from_utf8_unchecked(&output) }; 117 | f.write_str(output) 118 | } 119 | } 120 | 121 | impl std::fmt::Debug for NodeIdShort { 122 | #[inline(always)] 123 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 124 | std::fmt::Display::fmt(self, f) 125 | } 126 | } 127 | 128 | impl PartialEq<[u8]> for NodeIdShort { 129 | #[inline(always)] 130 | fn eq(&self, other: &[u8]) -> bool { 131 | self.0.eq(other) 132 | } 133 | } 134 | 135 | impl PartialEq<[u8; 32]> for NodeIdShort { 136 | #[inline(always)] 137 | fn eq(&self, other: &[u8; 32]) -> bool { 138 | self.0.eq(other) 139 | } 140 | } 141 | 142 | impl From for [u8; 32] { 143 | #[inline(always)] 144 | fn from(id: NodeIdShort) -> Self { 145 | id.0 146 | } 147 | } 148 | 149 | impl From<&NodeIdShort> for [u8; 32] { 150 | #[inline(always)] 151 | fn from(id: &NodeIdShort) -> Self { 152 | id.0 153 | } 154 | } 155 | 156 | impl From<[u8; 32]> for NodeIdShort { 157 | #[inline(always)] 158 | fn from(id: [u8; 32]) -> Self { 159 | Self(id) 160 | } 161 | } 162 | 163 | impl Borrow<[u8; 32]> for NodeIdShort { 164 | #[inline(always)] 165 | fn borrow(&self) -> &[u8; 32] { 166 | &self.0 167 | } 168 | } 169 | 170 | impl<'a> Borrow<[u8; 32]> for &'a NodeIdShort { 171 | #[inline(always)] 172 | fn borrow(&self) -> &[u8; 32] { 173 | &self.0 174 | } 175 | } 176 | 177 | /// Abstract trait to compute all node ids 178 | pub trait ComputeNodeIds { 179 | fn compute_node_ids(&self) -> (NodeIdFull, NodeIdShort); 180 | } 181 | 182 | impl ComputeNodeIds for ed25519::SecretKey { 183 | fn compute_node_ids(&self) -> (NodeIdFull, NodeIdShort) { 184 | let public_key = ed25519::PublicKey::from(self); 185 | let full_id = NodeIdFull::new(public_key); 186 | let short_id = full_id.compute_short_id(); 187 | (full_id, short_id) 188 | } 189 | } 190 | 191 | impl ComputeNodeIds for ed25519::PublicKey { 192 | fn compute_node_ids(&self) -> (NodeIdFull, NodeIdShort) { 193 | let full_id = NodeIdFull::new(*self); 194 | let short_id = full_id.compute_short_id(); 195 | (full_id, short_id) 196 | } 197 | } 198 | -------------------------------------------------------------------------------- /src/rldp/incoming_transfer.rs: -------------------------------------------------------------------------------- 1 | use std::sync::atomic::{AtomicU32, Ordering}; 2 | use std::sync::Arc; 3 | 4 | use anyhow::Result; 5 | 6 | use super::decoder::*; 7 | use super::transfers_cache::TransferId; 8 | use crate::proto; 9 | 10 | pub struct IncomingTransfer { 11 | buffer: Vec, 12 | transfer_id: TransferId, 13 | max_answer_size: u32, 14 | confirm_count: usize, 15 | data: Vec, 16 | decoder: Option, 17 | part: u32, 18 | state: Arc, 19 | total_size: Option, 20 | } 21 | 22 | impl IncomingTransfer { 23 | pub fn new(transfer_id: TransferId, max_answer_size: u32) -> Self { 24 | Self { 25 | buffer: Vec::new(), 26 | transfer_id, 27 | max_answer_size, 28 | confirm_count: 0, 29 | data: Vec::new(), 30 | decoder: None, 31 | part: 0, 32 | state: Default::default(), 33 | total_size: None, 34 | } 35 | } 36 | 37 | pub fn total_size(&self) -> Option { 38 | self.total_size 39 | } 40 | 41 | pub fn data(&self) -> &[u8] { 42 | self.data.as_slice() 43 | } 44 | 45 | pub fn into_data(self) -> Vec { 46 | self.data 47 | } 48 | 49 | pub fn take_data(&mut self) -> Vec { 50 | std::mem::take(&mut self.data) 51 | } 52 | 53 | pub fn process_chunk(&mut self, message: MessagePart) -> Result> { 54 | // Check FEC type 55 | let fec_type = message.fec_type; 56 | 57 | // Initialize `total_size` on first message 58 | let total_size = match self.total_size { 59 | Some(total_size) if total_size != message.total_size as usize => { 60 | return Err(IncomingTransferError::TotalSizeMismatch.into()) 61 | } 62 | Some(total_size) => total_size, 63 | None => { 64 | let total_size = message.total_size as usize; 65 | if total_size > self.max_answer_size as usize { 66 | return Err(IncomingTransferError::TooBigTransferSize.into()); 67 | } 68 | self.total_size = Some(total_size); 69 | self.data.reserve_exact(total_size); 70 | total_size 71 | } 72 | }; 73 | 74 | // Check message part 75 | let decoder = match message.part.cmp(&self.part) { 76 | std::cmp::Ordering::Equal => match &mut self.decoder { 77 | Some(decoder) if decoder.params() != &fec_type => { 78 | return Err(IncomingTransferError::PacketParametersMismatch.into()) 79 | } 80 | Some(decoder) => decoder, 81 | None => self 82 | .decoder 83 | .get_or_insert_with(|| RaptorQDecoder::with_params(fec_type)), 84 | }, 85 | std::cmp::Ordering::Less => { 86 | tl_proto::serialize_into( 87 | proto::rldp::MessagePart::Complete { 88 | transfer_id: &self.transfer_id, 89 | part: message.part, 90 | }, 91 | &mut self.buffer, 92 | ); 93 | return Ok(Some(&self.buffer)); 94 | } 95 | std::cmp::Ordering::Greater => return Ok(None), 96 | }; 97 | 98 | // Decode message data 99 | match decoder.decode(message.seqno, message.data) { 100 | Some(data) if data.len() + self.data.len() > total_size => { 101 | Err(IncomingTransferError::TooBigTransferSize.into()) 102 | } 103 | Some(mut data) => { 104 | self.data.append(&mut data); 105 | 106 | // Reset decoder 107 | if self.data.len() < total_size { 108 | self.decoder = None; 109 | self.part += 1; 110 | self.confirm_count = 0; 111 | } 112 | 113 | tl_proto::serialize_into( 114 | proto::rldp::MessagePart::Complete { 115 | transfer_id: &self.transfer_id, 116 | part: message.part, 117 | }, 118 | &mut self.buffer, 119 | ); 120 | Ok(Some(&self.buffer)) 121 | } 122 | None if self.confirm_count == 9 => { 123 | self.confirm_count = 0; 124 | tl_proto::serialize_into( 125 | proto::rldp::MessagePart::Confirm { 126 | transfer_id: &self.transfer_id, 127 | part: message.part, 128 | seqno: decoder.seqno(), 129 | }, 130 | &mut self.buffer, 131 | ); 132 | Ok(Some(&self.buffer)) 133 | } 134 | None => { 135 | self.confirm_count += 1; 136 | Ok(None) 137 | } 138 | } 139 | } 140 | 141 | pub fn state(&self) -> &Arc { 142 | &self.state 143 | } 144 | } 145 | 146 | #[derive(Default)] 147 | pub struct IncomingTransferState { 148 | updates: AtomicU32, 149 | } 150 | 151 | impl IncomingTransferState { 152 | pub fn updates(&self) -> u32 { 153 | self.updates.load(Ordering::Acquire) 154 | } 155 | 156 | pub fn increase_updates(&self) { 157 | self.updates.fetch_add(1, Ordering::Release); 158 | } 159 | } 160 | 161 | pub struct MessagePart { 162 | pub fec_type: proto::rldp::RaptorQFecType, 163 | pub part: u32, 164 | pub total_size: u64, 165 | pub seqno: u32, 166 | pub data: Vec, 167 | } 168 | 169 | #[derive(thiserror::Error, Debug)] 170 | enum IncomingTransferError { 171 | #[error("Total packet size mismatch")] 172 | TotalSizeMismatch, 173 | #[error("Packet parameters mismatch")] 174 | PacketParametersMismatch, 175 | #[error("Too big size for RLDP transfer")] 176 | TooBigTransferSize, 177 | } 178 | -------------------------------------------------------------------------------- /src/scheme.tl: -------------------------------------------------------------------------------- 1 | // Generic stuff 2 | //////////////////////////////////////////////////////////////////////////////// 3 | 4 | ---types--- 5 | 6 | int ? = Int; 7 | long ? = Long; 8 | double ? = Double; 9 | string ? = String; 10 | object ? = Object; 11 | function ? = Function; 12 | bytes data:string = Bytes; 13 | true = True; 14 | boolTrue = Bool; 15 | boolFalse = Bool; 16 | 17 | vector {t:Type} # [ t ] = Vector t; 18 | 19 | int128 4*[ int ] = Int128; 20 | int256 8*[ int ] = Int256; 21 | 22 | fec.raptorQ data_size:int symbol_size:int symbols_count:int = fec.Type; 23 | 24 | pub.ed25519 key:int256 = PublicKey; 25 | pub.aes key:int256 = PublicKey; 26 | pub.overlay name:bytes = PublicKey; 27 | 28 | 29 | // ADNL 30 | //////////////////////////////////////////////////////////////////////////////// 31 | 32 | ---types--- 33 | 34 | adnl.id.short id:int256 = adnl.id.Short; 35 | 36 | adnl.address.udp ip:int port:int = adnl.Address; 37 | adnl.address.udp6 ip:int128 port:int = adnl.Address; 38 | 39 | adnl.addressList addrs:(vector adnl.Address) version:int reinit_date:int priority:int expire_at:int = adnl.AddressList; 40 | 41 | adnl.node id:PublicKey addr_list:adnl.addressList = adnl.Node; 42 | adnl.nodes nodes:(vector adnl.node) = adnl.Nodes; 43 | 44 | adnl.packetContents 45 | rand1:bytes 46 | flags:# 47 | from:flags.0?PublicKey 48 | from_short:flags.1?adnl.id.short 49 | message:flags.2?adnl.Message 50 | messages:flags.3?(vector adnl.Message) 51 | address:flags.4?adnl.addressList 52 | priority_address:flags.5?adnl.addressList 53 | seqno:flags.6?long 54 | confirm_seqno:flags.7?long 55 | recv_addr_list_version:flags.8?int 56 | recv_priority_addr_list_version:flags.9?int 57 | reinit_date:flags.10?int 58 | dst_reinit_date:flags.10?int 59 | signature:flags.11?bytes 60 | rand2:bytes 61 | = adnl.PacketContents; 62 | 63 | adnl.message.createChannel key:int256 date:int = adnl.Message; 64 | adnl.message.confirmChannel key:int256 peer_key:int256 date:int = adnl.Message; 65 | 66 | adnl.message.custom data:bytes = adnl.Message; 67 | 68 | adnl.message.nop = adnl.Message; 69 | adnl.message.reinit date:int = adnl.Message; 70 | 71 | adnl.message.query query_id:int256 query:bytes = adnl.Message; 72 | adnl.message.answer query_id:int256 answer:bytes = adnl.Message; 73 | 74 | adnl.message.part hash:int256 total_size:int offset:int data:bytes = adnl.Message; 75 | 76 | adnl.pong value:long = adnl.Pong; 77 | 78 | ---functions--- 79 | 80 | adnl.ping value:long = adnl.Pong; 81 | 82 | 83 | // RLDP 84 | //////////////////////////////////////////////////////////////////////////////// 85 | 86 | ---types--- 87 | 88 | rldp.messagePart transfer_id:int256 fec_type:fec.Type part:int total_size:long seqno:int data:bytes = rldp.MessagePart; 89 | rldp.confirm transfer_id:int256 part:int seqno:int = rldp.MessagePart; 90 | rldp.complete transfer_id:int256 part:int = rldp.MessagePart; 91 | 92 | rldp.message id:int256 data:bytes = rldp.Message; 93 | rldp.query query_id:int256 max_answer_size:long timeout:int data:bytes = rldp.Message; 94 | rldp.answer query_id:int256 data:bytes = rldp.Message; 95 | 96 | 97 | // DHT 98 | //////////////////////////////////////////////////////////////////////////////// 99 | 100 | ---types--- 101 | 102 | dht.node id:PublicKey addr_list:adnl.addressList version:int signature:bytes = dht.Node; 103 | dht.nodes nodes:(vector dht.node) = dht.Nodes; 104 | 105 | dht.key id:int256 name:bytes idx:int = dht.Key; 106 | 107 | dht.updateRule.signature = dht.UpdateRule; 108 | dht.updateRule.anybody = dht.UpdateRule; 109 | dht.updateRule.overlayNodes = dht.UpdateRule; 110 | 111 | dht.keyDescription key:dht.key id:PublicKey update_rule:dht.UpdateRule signature:bytes = dht.KeyDescription; 112 | 113 | dht.value key:dht.keyDescription value:bytes ttl:int signature:bytes = dht.Value; 114 | 115 | dht.pong random_id:long = dht.Pong; 116 | 117 | dht.valueNotFound nodes:dht.nodes = dht.ValueResult; 118 | dht.valueFound value:dht.Value = dht.ValueResult; 119 | 120 | dht.stored = dht.Stored; 121 | dht.message node:dht.node = dht.Message; 122 | 123 | ---functions--- 124 | 125 | dht.ping random_id:long = dht.Pong; 126 | dht.store value:dht.value = dht.Stored; 127 | dht.findNode key:int256 k:int = dht.Nodes; 128 | dht.findValue key:int256 k:int = dht.ValueResult; 129 | dht.getSignedAddressList = dht.Node; 130 | 131 | dht.query node:dht.node = True; 132 | 133 | 134 | // Overlay 135 | //////////////////////////////////////////////////////////////////////////////// 136 | 137 | ---types--- 138 | 139 | overlay.node.toSign id:adnl.id.short overlay:int256 version:int = overlay.node.ToSign; 140 | overlay.node id:PublicKey overlay:int256 version:int signature:bytes = overlay.Node; 141 | overlay.nodes nodes:(vector overlay.node) = overlay.Nodes; 142 | 143 | overlay.message overlay:int256 = overlay.Message; 144 | 145 | overlay.fec.received hash:int256 = overlay.Broadcast; 146 | overlay.fec.completed hash:int256 = overlay.Broadcast; 147 | 148 | overlay.broadcast.id src:int256 data_hash:int256 flags:int = overlay.broadcast.Id; 149 | overlay.broadcastFec.id src:int256 type:int256 data_hash:int256 size:int flags:int = overlay.broadcastFec.Id; 150 | overlay.broadcastFec.partId broadcast_hash:int256 data_hash:int256 seqno:int = overlay.broadcastFec.PartId; 151 | 152 | overlay.broadcast.toSign hash:int256 date:int = overlay.broadcast.ToSign; 153 | 154 | overlay.certificate issued_by:PublicKey expire_at:int max_size:int signature:bytes = overlay.Certificate; 155 | overlay.emptyCertificate = overlay.Certificate; 156 | 157 | overlay.unicast data:bytes = overlay.Broadcast; 158 | overlay.broadcast src:PublicKey certificate:overlay.Certificate flags:int data:bytes date:int signature:bytes = overlay.Broadcast; 159 | overlay.broadcastFec src:PublicKey certificate:overlay.Certificate data_hash:int256 data_size:int flags:int 160 | data:bytes seqno:int fec:fec.Type date:int signature:bytes = overlay.Broadcast; 161 | overlay.broadcastFecShort src:PublicKey certificate:overlay.Certificate broadcast_hash:int256 part_data_hash:int256 seqno:int signature:bytes = overlay.Broadcast; 162 | overlay.broadcastNotFound = overlay.Broadcast; 163 | 164 | ---functions--- 165 | 166 | overlay.getRandomPeers peers:overlay.nodes = overlay.Nodes; 167 | overlay.query overlay:int256 = True; 168 | 169 | 170 | // Other 171 | //////////////////////////////////////////////////////////////////////////////// 172 | 173 | ---types--- 174 | 175 | tonNode.shardPublicOverlayId workchain:int shard:long zero_state_file_hash:int256 = tonNode.ShardPublicOverlayId; 176 | catchain.firstblock unique_hash:int256 nodes:(vector int256) = catchain.FirstBlock; 177 | -------------------------------------------------------------------------------- /src/adnl/peer.rs: -------------------------------------------------------------------------------- 1 | use std::net::{Ipv4Addr, SocketAddrV4}; 2 | use std::sync::atomic::{AtomicU32, AtomicU64, Ordering}; 3 | 4 | use everscale_crypto::ed25519; 5 | 6 | use super::node_id::{NodeIdFull, NodeIdShort}; 7 | use crate::util::*; 8 | 9 | pub type Peers = FastDashMap; 10 | 11 | /// Remote peer info 12 | pub struct Peer { 13 | /// Remove peer public key 14 | id: NodeIdFull, 15 | /// IPv4 address 16 | addr: AtomicU64, 17 | /// Adnl channel key pair to encrypt messages from our side 18 | channel_key: ed25519::KeyPair, 19 | /// Packets receiver state 20 | receiver_state: PeerState, 21 | /// Packets sender state 22 | sender_state: PeerState, 23 | } 24 | 25 | impl Peer { 26 | /// Creates new peer with receiver state initialized with the local reinit date 27 | pub fn new(local_reinit_date: u32, addr: SocketAddrV4, id: NodeIdFull) -> Self { 28 | Self { 29 | id, 30 | addr: AtomicU64::new(pack_socket_addr(&addr)), 31 | channel_key: ed25519::KeyPair::generate(&mut rand::thread_rng()), 32 | receiver_state: PeerState::for_receive_with_reinit_date(local_reinit_date), 33 | sender_state: PeerState::for_send(), 34 | } 35 | } 36 | 37 | /// Tries to update peer reinit date 38 | /// 39 | /// It is only allowed to update peer reinit date if it is greater or equal to the known one 40 | #[inline(always)] 41 | pub fn try_reinit_sender(&self, reinit_date: u32) -> bool { 42 | let sender_reinit_date = self.sender_state.reinit_date(); 43 | match reinit_date.cmp(&sender_reinit_date) { 44 | std::cmp::Ordering::Equal => true, 45 | std::cmp::Ordering::Greater => { 46 | self.sender_state.set_reinit_date(reinit_date); 47 | if sender_reinit_date != 0 { 48 | self.sender_state.history(false).reset(); 49 | self.sender_state.history(true).reset(); 50 | self.receiver_state.history(false).reset(); 51 | self.receiver_state.history(true).reset(); 52 | } 53 | true 54 | } 55 | std::cmp::Ordering::Less => false, 56 | } 57 | } 58 | 59 | /// Returns peer full id (public key) 60 | #[inline(always)] 61 | pub fn id(&self) -> &NodeIdFull { 62 | &self.id 63 | } 64 | 65 | #[inline(always)] 66 | pub fn addr(&self) -> SocketAddrV4 { 67 | unpack_socket_addr(self.addr.load(Ordering::Acquire)) 68 | } 69 | 70 | #[inline(always)] 71 | pub fn set_addr(&self, addr: SocketAddrV4) { 72 | self.addr.store(pack_socket_addr(&addr), Ordering::Release); 73 | } 74 | 75 | /// Adnl channel key pair to encrypt messages from our side 76 | #[inline(always)] 77 | pub fn channel_key(&self) -> &ed25519::KeyPair { 78 | &self.channel_key 79 | } 80 | 81 | /// Packets receiver state 82 | #[inline(always)] 83 | pub fn receiver_state(&self) -> &PeerState { 84 | &self.receiver_state 85 | } 86 | 87 | /// Packets sender state 88 | #[inline(always)] 89 | pub fn sender_state(&self) -> &PeerState { 90 | &self.sender_state 91 | } 92 | 93 | /// Generates new channel key pair and resets receiver/sender states 94 | /// 95 | /// NOTE: Receiver state increments its reinit date so the peer will reset states 96 | /// on the next message (see [`try_reinit_sender`]) 97 | /// 98 | /// [`try_reinit_sender`]: fn@crate::adnl::Peer::try_reinit_sender 99 | pub fn reset(&mut self) { 100 | let reinit_date = self.receiver_state.reinit_date(); 101 | 102 | self.channel_key = ed25519::KeyPair::generate(&mut rand::thread_rng()); 103 | self.receiver_state = PeerState::for_receive_with_reinit_date(reinit_date + 1); 104 | self.sender_state = PeerState::for_send(); 105 | } 106 | } 107 | 108 | pub fn pack_socket_addr(addr: &SocketAddrV4) -> u64 { 109 | let mut result = [0; 8]; 110 | result[0..4].copy_from_slice(&addr.ip().octets()); 111 | result[4..6].copy_from_slice(&addr.port().to_le_bytes()); 112 | u64::from_le_bytes(result) 113 | } 114 | 115 | #[inline(always)] 116 | pub fn unpack_socket_addr(addr: u64) -> SocketAddrV4 { 117 | let result = addr.to_le_bytes(); 118 | let addr: [u8; 4] = result[0..4].try_into().unwrap(); 119 | SocketAddrV4::new( 120 | Ipv4Addr::from(addr), 121 | u16::from_le_bytes([result[4], result[5]]), 122 | ) 123 | } 124 | 125 | /// Connection side packets histories and reinit date 126 | pub struct PeerState { 127 | ordinary_history: PacketsHistory, 128 | priority_history: PacketsHistory, 129 | reinit_date: AtomicU32, 130 | } 131 | 132 | impl PeerState { 133 | fn for_receive_with_reinit_date(reinit_date: u32) -> Self { 134 | Self { 135 | ordinary_history: PacketsHistory::for_recv(), 136 | priority_history: PacketsHistory::for_recv(), 137 | reinit_date: AtomicU32::new(reinit_date), 138 | } 139 | } 140 | 141 | fn for_send() -> Self { 142 | Self { 143 | ordinary_history: PacketsHistory::for_send(), 144 | priority_history: PacketsHistory::for_send(), 145 | reinit_date: Default::default(), 146 | } 147 | } 148 | 149 | #[inline(always)] 150 | pub fn history(&self, priority: bool) -> &PacketsHistory { 151 | if priority { 152 | &self.priority_history 153 | } else { 154 | &self.ordinary_history 155 | } 156 | } 157 | 158 | pub fn reinit_date(&self) -> u32 { 159 | self.reinit_date.load(Ordering::Acquire) 160 | } 161 | 162 | pub fn set_reinit_date(&self, reinit_date: u32) { 163 | self.reinit_date.store(reinit_date, Ordering::Release) 164 | } 165 | } 166 | 167 | /// The context in which the new peer is added 168 | #[derive(Debug, Copy, Clone, Hash, Eq, PartialEq)] 169 | pub enum NewPeerContext { 170 | AdnlPacket, 171 | Dht, 172 | PublicOverlay, 173 | } 174 | 175 | /// New peers filter 176 | pub trait PeerFilter: Send + Sync { 177 | fn check(&self, ctx: NewPeerContext, addr: SocketAddrV4, peer_id: &NodeIdShort) -> bool; 178 | } 179 | 180 | #[cfg(test)] 181 | mod tests { 182 | use super::*; 183 | 184 | #[test] 185 | fn correct_addr_pack() { 186 | let test = SocketAddrV4::new(Ipv4Addr::LOCALHOST, 23123); 187 | 188 | let packed = pack_socket_addr(&test); 189 | 190 | let unpacked = unpack_socket_addr(packed); 191 | assert_eq!(unpacked, test); 192 | } 193 | } 194 | -------------------------------------------------------------------------------- /src/adnl/handshake.rs: -------------------------------------------------------------------------------- 1 | use std::convert::TryInto; 2 | use std::sync::Arc; 3 | 4 | use aes::cipher::{StreamCipher, StreamCipherSeek}; 5 | use everscale_crypto::ed25519; 6 | 7 | use super::encryption::*; 8 | use super::keystore::Key; 9 | use super::node_id::{NodeIdFull, NodeIdShort}; 10 | use super::packet_view::*; 11 | use crate::util::FastHashMap; 12 | 13 | #[inline(always)] 14 | pub fn compute_handshake_prefix_len(version: Option) -> usize { 15 | 96 + if version.is_some() { 4 } else { 0 } 16 | } 17 | 18 | /// Modifies `buffer` in-place to contain the handshake packet 19 | pub fn build_handshake_packet( 20 | peer_id: &NodeIdShort, 21 | peer_id_full: &NodeIdFull, 22 | buffer: &mut Vec, 23 | version: Option, 24 | ) { 25 | // Create temp local key 26 | let temp_private_key = ed25519::SecretKey::generate(&mut rand::thread_rng()); 27 | let temp_private_key = ed25519::ExpandedSecretKey::from(&temp_private_key); 28 | let temp_public_key = ed25519::PublicKey::from(&temp_private_key); 29 | 30 | let shared_secret = temp_private_key.compute_shared_secret(peer_id_full.public_key()); 31 | 32 | // Prepare packet 33 | let checksum: [u8; 32] = compute_packet_data_hash(version, buffer.as_slice()); 34 | 35 | let header_len = compute_handshake_prefix_len(version); 36 | let buffer_len = buffer.len(); 37 | buffer.resize(header_len + buffer_len, 0); 38 | buffer.copy_within(..buffer_len, header_len); 39 | 40 | buffer[..32].copy_from_slice(peer_id.as_slice()); 41 | buffer[32..64].copy_from_slice(temp_public_key.as_bytes()); 42 | 43 | match version { 44 | Some(version) => { 45 | let mut xor = [ 46 | (version >> 8) as u8, 47 | version as u8, 48 | (version >> 8) as u8, 49 | version as u8, 50 | ]; 51 | for (i, byte) in buffer[..64].iter().enumerate() { 52 | xor[i % 4] ^= *byte; 53 | } 54 | for (i, byte) in checksum.iter().enumerate() { 55 | xor[i % 4] ^= *byte; 56 | } 57 | buffer[64..68].copy_from_slice(&xor); 58 | buffer[68..100].copy_from_slice(&checksum); 59 | build_packet_cipher(&shared_secret, &checksum).apply_keystream(&mut buffer[100..]); 60 | } 61 | None => { 62 | buffer[64..96].copy_from_slice(&checksum); 63 | build_packet_cipher(&shared_secret, &checksum).apply_keystream(&mut buffer[96..]); 64 | } 65 | } 66 | } 67 | 68 | /// Attempts to decode the buffer as an ADNL handshake packet. On a successful nonempty result, 69 | /// this buffer remains as decrypted packet data. 70 | /// 71 | /// Expected packet structure (without version): 72 | /// - 0..=31 - short local node id 73 | /// - 32..=63 - sender pubkey 74 | /// - 64..=95 - checksum 75 | /// - 96..... - encrypted data 76 | /// 77 | /// Expected packet structure (with version): 78 | /// - 0..=31 - short local node id 79 | /// - 32..=63 - sender pubkey 80 | /// - 64..=68 - XOR'ed ADNL version 81 | /// - 68..=100 - checksum 82 | /// - 100..... - encrypted data 83 | /// 84 | /// **NOTE: even on failure buffer can be modified** 85 | pub fn parse_handshake_packet( 86 | keys: &FastHashMap>, 87 | buffer: &mut PacketView<'_>, 88 | ) -> Result)>, HandshakeError> { 89 | const PUBLIC_KEY_RANGE: std::ops::Range = 32..64; 90 | 91 | // Ordinary data ranges 92 | const DATA_START: usize = 96; 93 | const CHECKSUM_RANGE: std::ops::Range = 64..DATA_START; 94 | const DATA_RANGE: std::ops::RangeFrom = DATA_START..; 95 | 96 | // Data ranges for packets with ADNL version 97 | const EXT_DATA_START: usize = 100; 98 | const EXT_CHECKSUM_RANGE: std::ops::Range = 68..EXT_DATA_START; 99 | const EXT_DATA_RANGE: std::ops::RangeFrom = EXT_DATA_START..; 100 | 101 | if buffer.len() < DATA_START { 102 | return Err(HandshakeError::BadHandshakePacketLength); 103 | } 104 | 105 | // SAFETY: NodeIdShort is 32 (<= 96) bytes and has the same layout as `[u8; 32]` 106 | // due to `#[repr(transparent)]` 107 | let local_id = unsafe { &*(buffer.as_ptr() as *const NodeIdShort) }; 108 | 109 | // Get local id 110 | let local_key = match keys.get(local_id) { 111 | Some(key) => key, 112 | // No local keys found 113 | None => return Ok(None), 114 | }; 115 | 116 | // Compute shared secret 117 | let shared_secret = 118 | match ed25519::PublicKey::from_bytes(buffer[PUBLIC_KEY_RANGE].try_into().unwrap()) { 119 | Some(other_public_key) => local_key 120 | .secret_key() 121 | .compute_shared_secret(&other_public_key), 122 | None => return Err(HandshakeError::InvalidPublicKey), 123 | }; 124 | 125 | if buffer.len() > EXT_DATA_START { 126 | if let Some(version) = 127 | decode_version::((&buffer[..EXT_DATA_START]).try_into().unwrap()) 128 | { 129 | // Build cipher 130 | let mut cipher = build_packet_cipher( 131 | &shared_secret, 132 | &buffer[EXT_CHECKSUM_RANGE].try_into().unwrap(), 133 | ); 134 | 135 | // Decode data 136 | cipher.apply_keystream(&mut buffer[EXT_DATA_RANGE]); 137 | 138 | // If hash is ok 139 | if compute_packet_data_hash(Some(version), &buffer[EXT_DATA_RANGE]).as_slice() 140 | == &buffer[EXT_CHECKSUM_RANGE] 141 | { 142 | // Leave only data in the buffer and return version 143 | buffer.remove_prefix(EXT_DATA_START); 144 | return Ok(Some((*local_id, Some(version)))); 145 | } 146 | 147 | // Otherwise restore data 148 | cipher.seek(0); 149 | cipher.apply_keystream(&mut buffer[EXT_DATA_RANGE]); 150 | } 151 | } 152 | 153 | // Decode data 154 | build_packet_cipher(&shared_secret, &buffer[CHECKSUM_RANGE].try_into().unwrap()) 155 | .apply_keystream(&mut buffer[DATA_RANGE]); 156 | 157 | // Check checksum 158 | if compute_packet_data_hash(None, &buffer[DATA_RANGE]).as_slice() != &buffer[CHECKSUM_RANGE] { 159 | return Err(HandshakeError::BadHandshakePacketChecksum); 160 | } 161 | 162 | // Leave only data in the buffer 163 | buffer.remove_prefix(DATA_START); 164 | 165 | Ok(Some((*local_id, None))) 166 | } 167 | 168 | #[derive(thiserror::Error, Debug)] 169 | pub enum HandshakeError { 170 | #[error("Bad handshake packet length")] 171 | BadHandshakePacketLength, 172 | #[error("Bad handshake packet checksum")] 173 | BadHandshakePacketChecksum, 174 | #[error("Invalid public key")] 175 | InvalidPublicKey, 176 | } 177 | -------------------------------------------------------------------------------- /src/util/packets_history.rs: -------------------------------------------------------------------------------- 1 | use std::sync::atomic::{AtomicU64, Ordering}; 2 | 3 | pub struct PacketsHistory { 4 | mask: Option, 5 | seqno: AtomicU64, 6 | } 7 | 8 | impl PacketsHistory { 9 | pub fn for_send() -> Self { 10 | Self { 11 | mask: None, 12 | seqno: Default::default(), 13 | } 14 | } 15 | 16 | pub fn for_recv() -> Self { 17 | Self { 18 | mask: Some(Default::default()), 19 | seqno: Default::default(), 20 | } 21 | } 22 | 23 | pub fn reset(&self) { 24 | if let Some(mask) = &self.mask { 25 | loop { 26 | let index = mask.index.load(Ordering::Acquire); 27 | if index == IN_TRANSIT { 28 | // TODO: yield 29 | continue; 30 | } 31 | 32 | if mask 33 | .index 34 | .compare_exchange(index, IN_TRANSIT, Ordering::Release, Ordering::Relaxed) 35 | .is_err() 36 | { 37 | continue; 38 | } 39 | break; 40 | } 41 | 42 | for i in 0..HISTORY_SIZE { 43 | let value = u64::from(i == HISTORY_SIZE / 2); 44 | mask.bits[i].store(value, Ordering::Release); 45 | } 46 | } 47 | 48 | self.seqno.store(0, Ordering::Release); 49 | if let Some(mask) = &self.mask { 50 | let _ = 51 | mask.index 52 | .compare_exchange(IN_TRANSIT, 0, Ordering::Release, Ordering::Relaxed); 53 | } 54 | } 55 | 56 | pub fn seqno(&self) -> u64 { 57 | self.seqno.load(Ordering::Acquire) 58 | } 59 | 60 | pub fn bump_seqno(&self) -> u64 { 61 | self.seqno.fetch_add(1, Ordering::AcqRel) + 1 62 | } 63 | 64 | pub fn deliver_packet(&self, seqno: u64) -> bool { 65 | let mask = match &self.mask { 66 | Some(mask) => mask, 67 | None => loop { 68 | let last_seqno = self.seqno.load(Ordering::Acquire); 69 | if last_seqno < seqno 70 | && self 71 | .seqno 72 | .compare_exchange(last_seqno, seqno, Ordering::Release, Ordering::Relaxed) 73 | .is_err() 74 | { 75 | continue; 76 | } 77 | return true; 78 | }, 79 | }; 80 | 81 | let seqno_masked = seqno & INDEX_MASK; 82 | let seqno_normalized = seqno & !INDEX_MASK; 83 | 84 | loop { 85 | let index = mask.index.load(Ordering::Acquire); 86 | if index == IN_TRANSIT { 87 | // TODO: yield 88 | continue; 89 | } 90 | 91 | let index_masked = index & INDEX_MASK; 92 | let index_normalized = index & !INDEX_MASK; 93 | 94 | if index_normalized > seqno_normalized + INDEX_MASK + 1 { 95 | tracing::debug!(seqno, index_normalized, "peer packet is too old"); 96 | return false; 97 | } 98 | 99 | let mask_bit = 1 << (seqno_masked % 64); 100 | let mask_offset = match index_normalized.cmp(&seqno_normalized) { 101 | std::cmp::Ordering::Greater => Some(0), 102 | std::cmp::Ordering::Equal => Some(HISTORY_SIZE / 2), 103 | std::cmp::Ordering::Less => None, 104 | }; 105 | 106 | let next_index = match mask_offset { 107 | Some(mask_offset) => { 108 | let mask_offset = mask_offset + seqno_masked as usize / 64; 109 | let already_delivered = 110 | mask.bits[mask_offset].load(Ordering::Acquire) & mask_bit; 111 | if mask.index.load(Ordering::Acquire) != index { 112 | continue; 113 | } 114 | 115 | if already_delivered != 0 { 116 | tracing::trace!(seqno, "peer packet was already received"); 117 | return false; 118 | } 119 | 120 | if mask 121 | .index 122 | .compare_exchange(index, IN_TRANSIT, Ordering::Release, Ordering::Relaxed) 123 | .is_err() 124 | { 125 | continue; 126 | } 127 | 128 | mask.bits[mask_offset].fetch_or(mask_bit, Ordering::Release); 129 | 130 | index 131 | } 132 | None => { 133 | if mask 134 | .index 135 | .compare_exchange(index, IN_TRANSIT, Ordering::Release, Ordering::Relaxed) 136 | .is_err() 137 | { 138 | continue; 139 | } 140 | 141 | if index_normalized + INDEX_MASK + 1 == seqno_normalized { 142 | for i in 0..HISTORY_SIZE / 2 { 143 | mask.bits[i].store( 144 | mask.bits[i + HISTORY_SIZE / 2].load(Ordering::Acquire), 145 | Ordering::Release, 146 | ) 147 | } 148 | 149 | for bits in &mask.bits[HISTORY_SIZE / 2..HISTORY_SIZE] { 150 | bits.store(0, Ordering::Relaxed) 151 | } 152 | } else { 153 | for bits in &mask.bits { 154 | bits.store(0, Ordering::Release) 155 | } 156 | } 157 | 158 | index_normalized 159 | } 160 | }; 161 | 162 | let last_seqno = self.seqno.load(Ordering::Acquire); 163 | if last_seqno < seqno { 164 | self.seqno.store(seqno, Ordering::Release); 165 | } 166 | 167 | let index_masked = (index_masked + 1) & !INDEX_MASK; 168 | let _ = mask.index.compare_exchange( 169 | IN_TRANSIT, 170 | next_index | index_masked, 171 | Ordering::Release, 172 | Ordering::Relaxed, 173 | ); 174 | 175 | break; 176 | } 177 | 178 | true 179 | } 180 | } 181 | 182 | #[derive(Default)] 183 | struct HistoryBits { 184 | index: AtomicU64, 185 | bits: [AtomicU64; HISTORY_SIZE], 186 | } 187 | 188 | const INDEX_MASK: u64 = HISTORY_BITS as u64 / 2 - 1; 189 | const IN_TRANSIT: u64 = 0xFFFFFFFFFFFFFFFF; 190 | 191 | const HISTORY_BITS: usize = 512; 192 | const HISTORY_SIZE: usize = HISTORY_BITS / 64; 193 | -------------------------------------------------------------------------------- /src/proto/dht.rs: -------------------------------------------------------------------------------- 1 | use bytes::Bytes; 2 | use smallvec::SmallVec; 3 | use tl_proto::{BoxedConstructor, BoxedWrapper, TlRead, TlWrite}; 4 | 5 | use super::{adnl, HashRef}; 6 | 7 | #[derive(TlRead)] 8 | #[tl(boxed, scheme = "scheme.tl")] 9 | pub enum ValueResult<'tl> { 10 | #[tl(id = "dht.valueFound")] 11 | ValueFound(BoxedWrapper>), 12 | #[tl(id = "dht.valueNotFound")] 13 | ValueNotFound(NodesOwned), 14 | } 15 | 16 | #[derive(TlWrite)] 17 | #[tl(boxed, scheme = "scheme.tl")] 18 | pub enum ValueResultOwned { 19 | #[tl(id = "dht.valueFound")] 20 | ValueFound(BoxedWrapper), 21 | #[tl(id = "dht.valueNotFound")] 22 | ValueNotFound(NodesOwned), 23 | } 24 | 25 | #[derive(TlWrite, TlRead)] 26 | pub struct Nodes<'tl> { 27 | pub nodes: SmallVec<[Node<'tl>; 5]>, 28 | } 29 | 30 | impl BoxedConstructor for Nodes<'_> { 31 | const TL_ID: u32 = tl_proto::id!("dht.nodes", scheme = "scheme.tl"); 32 | } 33 | 34 | #[derive(TlWrite, TlRead)] 35 | pub struct NodesOwned { 36 | pub nodes: Vec, 37 | } 38 | 39 | impl BoxedConstructor for NodesOwned { 40 | const TL_ID: u32 = Nodes::TL_ID; 41 | } 42 | 43 | #[derive(Debug, Copy, Clone, TlWrite, TlRead)] 44 | pub struct Node<'tl> { 45 | pub id: everscale_crypto::tl::PublicKey<'tl>, 46 | pub addr_list: adnl::AddressList, 47 | pub version: u32, 48 | pub signature: &'tl [u8], 49 | } 50 | 51 | impl BoxedConstructor for Node<'_> { 52 | const TL_ID: u32 = tl_proto::id!("dht.node", scheme = "scheme.tl"); 53 | } 54 | 55 | impl Node<'_> { 56 | pub fn as_equivalent_owned(&self) -> NodeOwned { 57 | NodeOwned { 58 | id: self.id.as_equivalent_owned(), 59 | addr_list: self.addr_list, 60 | version: self.version, 61 | signature: self.signature.to_vec().into(), 62 | } 63 | } 64 | } 65 | 66 | #[derive(Debug, Clone, TlWrite, TlRead)] 67 | pub struct NodeOwned { 68 | pub id: everscale_crypto::tl::PublicKeyOwned, 69 | pub addr_list: adnl::AddressList, 70 | pub version: u32, 71 | pub signature: Bytes, 72 | } 73 | 74 | impl BoxedConstructor for NodeOwned { 75 | const TL_ID: u32 = Node::TL_ID; 76 | } 77 | 78 | impl NodeOwned { 79 | pub fn as_equivalent_ref(&self) -> Node { 80 | Node { 81 | id: self.id.as_equivalent_ref(), 82 | addr_list: self.addr_list, 83 | version: self.version, 84 | signature: &self.signature, 85 | } 86 | } 87 | } 88 | 89 | #[derive(Debug, Copy, Clone, TlWrite, TlRead)] 90 | pub struct Value<'tl> { 91 | pub key: KeyDescription<'tl>, 92 | pub value: &'tl [u8], 93 | pub ttl: u32, 94 | pub signature: &'tl [u8], 95 | } 96 | 97 | impl BoxedConstructor for Value<'_> { 98 | const TL_ID: u32 = tl_proto::id!("dht.value", scheme = "scheme.tl"); 99 | } 100 | 101 | impl Value<'_> { 102 | pub fn as_equivalent_owned(&self) -> ValueOwned { 103 | ValueOwned { 104 | key: self.key.as_equivalent_owned(), 105 | value: self.value.to_vec().into(), 106 | ttl: self.ttl, 107 | signature: self.signature.to_vec().into(), 108 | } 109 | } 110 | } 111 | 112 | #[derive(Debug, Clone, TlWrite, TlRead)] 113 | pub struct ValueOwned { 114 | pub key: KeyDescriptionOwned, 115 | pub value: Bytes, 116 | pub ttl: u32, 117 | pub signature: Bytes, 118 | } 119 | 120 | impl BoxedConstructor for ValueOwned { 121 | const TL_ID: u32 = Value::TL_ID; 122 | } 123 | 124 | impl ValueOwned { 125 | pub fn as_equivalent_ref(&self) -> Value { 126 | Value { 127 | key: self.key.as_equivalent_ref(), 128 | value: &self.value, 129 | ttl: self.ttl, 130 | signature: &self.signature, 131 | } 132 | } 133 | } 134 | 135 | #[derive(Debug, Copy, Clone, TlWrite, TlRead)] 136 | pub struct KeyDescription<'tl> { 137 | pub key: Key<'tl>, 138 | pub id: everscale_crypto::tl::PublicKey<'tl>, 139 | pub update_rule: UpdateRule, 140 | pub signature: &'tl [u8], 141 | } 142 | 143 | impl BoxedConstructor for KeyDescription<'_> { 144 | const TL_ID: u32 = tl_proto::id!("dht.keyDescription", scheme = "scheme.tl"); 145 | } 146 | 147 | impl KeyDescription<'_> { 148 | pub fn as_equivalent_owned(&self) -> KeyDescriptionOwned { 149 | KeyDescriptionOwned { 150 | key: self.key.as_equivalent_owned(), 151 | id: self.id.as_equivalent_owned(), 152 | update_rule: self.update_rule, 153 | signature: self.signature.to_vec().into(), 154 | } 155 | } 156 | } 157 | 158 | #[derive(Debug, Clone, TlWrite, TlRead)] 159 | pub struct KeyDescriptionOwned { 160 | pub key: KeyOwned, 161 | pub id: everscale_crypto::tl::PublicKeyOwned, 162 | pub update_rule: UpdateRule, 163 | pub signature: Bytes, 164 | } 165 | 166 | impl BoxedConstructor for KeyDescriptionOwned { 167 | const TL_ID: u32 = KeyDescription::TL_ID; 168 | } 169 | 170 | impl KeyDescriptionOwned { 171 | pub fn as_equivalent_ref(&self) -> KeyDescription<'_> { 172 | KeyDescription { 173 | key: self.key.as_equivalent_ref(), 174 | id: self.id.as_equivalent_ref(), 175 | update_rule: self.update_rule, 176 | signature: &self.signature, 177 | } 178 | } 179 | } 180 | 181 | #[derive(Debug, Copy, Clone, Eq, PartialEq, TlWrite, TlRead)] 182 | pub struct Key<'tl> { 183 | #[tl(size_hint = 32)] 184 | pub id: HashRef<'tl>, 185 | pub name: &'tl [u8], 186 | pub idx: u32, 187 | } 188 | 189 | impl BoxedConstructor for Key<'_> { 190 | const TL_ID: u32 = tl_proto::id!("dht.key", scheme = "scheme.tl"); 191 | } 192 | 193 | impl Key<'_> { 194 | pub fn as_equivalent_owned(&self) -> KeyOwned { 195 | KeyOwned { 196 | id: *self.id, 197 | name: self.name.to_vec().into(), 198 | idx: self.idx, 199 | } 200 | } 201 | } 202 | 203 | #[derive(Debug, Clone, TlWrite, TlRead)] 204 | pub struct KeyOwned { 205 | #[tl(size_hint = 32)] 206 | pub id: [u8; 32], 207 | pub name: Bytes, 208 | pub idx: u32, 209 | } 210 | 211 | impl BoxedConstructor for KeyOwned { 212 | const TL_ID: u32 = Key::TL_ID; 213 | } 214 | 215 | impl KeyOwned { 216 | pub fn as_equivalent_ref(&self) -> Key { 217 | Key { 218 | id: &self.id, 219 | name: &self.name, 220 | idx: self.idx, 221 | } 222 | } 223 | } 224 | 225 | #[derive(Debug, Copy, Clone, Eq, PartialEq, TlWrite, TlRead)] 226 | #[tl(boxed, scheme = "scheme.tl")] 227 | pub enum UpdateRule { 228 | #[tl(id = "dht.updateRule.anybody", size_hint = 0)] 229 | Anybody, 230 | #[tl(id = "dht.updateRule.overlayNodes", size_hint = 0)] 231 | OverlayNodes, 232 | #[tl(id = "dht.updateRule.signature", size_hint = 0)] 233 | Signature, 234 | } 235 | 236 | #[derive(Copy, Clone, TlWrite, TlRead)] 237 | #[tl(boxed, id = "dht.pong", size_hint = 8, scheme = "scheme.tl")] 238 | pub struct Pong { 239 | pub random_id: u64, 240 | } 241 | 242 | #[derive(Copy, Clone, TlWrite, TlRead)] 243 | #[tl(boxed, id = "dht.stored", size_hint = 0, scheme = "scheme.tl")] 244 | pub struct Stored; 245 | -------------------------------------------------------------------------------- /src/rldp/node.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Arc; 2 | 3 | use anyhow::Result; 4 | use serde::{Deserialize, Serialize}; 5 | use tokio::sync::Semaphore; 6 | 7 | use super::compression; 8 | use super::transfers_cache::*; 9 | use crate::adnl; 10 | use crate::proto; 11 | use crate::subscriber::*; 12 | use crate::util::*; 13 | 14 | /// RLDP node configuration 15 | #[derive(Debug, Copy, Clone, Serialize, Deserialize)] 16 | #[serde(default)] 17 | pub struct NodeOptions { 18 | /// Max allowed RLDP answer size in bytes. Query will be rejected 19 | /// if answer is bigger. 20 | /// 21 | /// Default: `10485760` (10 MB) 22 | pub max_answer_size: u32, 23 | 24 | /// Max parallel RLDP queries per peer. 25 | /// 26 | /// Default: `16` 27 | pub max_peer_queries: usize, 28 | 29 | /// Min RLDP query timeout. 30 | /// 31 | /// Default: `500` ms 32 | pub query_min_timeout_ms: u64, 33 | 34 | /// Max RLDP query timeout 35 | /// 36 | /// Default: `10000` ms 37 | pub query_max_timeout_ms: u64, 38 | 39 | /// Number of FEC messages to send in group. There will be a short delay between them. 40 | /// 41 | /// Default: `10` 42 | pub query_wave_len: u32, 43 | 44 | /// Interval between FEC broadcast waves. 45 | /// 46 | /// Default: `10` ms 47 | pub query_wave_interval_ms: u64, 48 | 49 | /// Whether requests will be compressed. 50 | /// 51 | /// Default: `false` 52 | pub force_compression: bool, 53 | } 54 | 55 | impl Default for NodeOptions { 56 | fn default() -> Self { 57 | Self { 58 | max_answer_size: 10 * 1024 * 1024, 59 | max_peer_queries: 16, 60 | query_min_timeout_ms: 500, 61 | query_max_timeout_ms: 10000, 62 | query_wave_len: 10, 63 | query_wave_interval_ms: 10, 64 | force_compression: false, 65 | } 66 | } 67 | } 68 | 69 | /// Reliable UDP transport layer 70 | pub struct Node { 71 | /// Underlying ADNL node 72 | adnl: Arc, 73 | /// Parallel requests limiter 74 | semaphores: FastDashMap>, 75 | /// Transfers handler 76 | transfers: Arc, 77 | /// Configuration 78 | options: NodeOptions, 79 | } 80 | 81 | impl Node { 82 | /// Create new RLDP node on top of the given ADNL node 83 | pub fn new( 84 | adnl: Arc, 85 | subscribers: Vec>, 86 | options: NodeOptions, 87 | ) -> Result> { 88 | let transfers = Arc::new(TransfersCache::new(subscribers, options)); 89 | 90 | adnl.add_message_subscriber(transfers.clone())?; 91 | 92 | Ok(Arc::new(Self { 93 | adnl, 94 | semaphores: Default::default(), 95 | transfers, 96 | options, 97 | })) 98 | } 99 | 100 | /// Underlying ADNL node 101 | #[inline(always)] 102 | pub fn adnl(&self) -> &Arc { 103 | &self.adnl 104 | } 105 | 106 | #[inline(always)] 107 | pub fn options(&self) -> &NodeOptions { 108 | &self.options 109 | } 110 | 111 | pub fn metrics(&self) -> NodeMetrics { 112 | NodeMetrics { 113 | peer_count: self.semaphores.len(), 114 | transfers_cache_len: self.transfers.len(), 115 | } 116 | } 117 | 118 | /// Clears semaphores table 119 | pub fn gc(&self) { 120 | let max_permits = self.options.max_peer_queries; 121 | self.semaphores 122 | .retain(|_, semaphore| semaphore.available_permits() < max_permits); 123 | } 124 | 125 | #[tracing::instrument(level = "debug", name = "rldp_query", skip_all, fields(%local_id, %peer_id, ?roundtrip))] 126 | pub async fn query( 127 | &self, 128 | local_id: &adnl::NodeIdShort, 129 | peer_id: &adnl::NodeIdShort, 130 | data: Vec, 131 | roundtrip: Option, 132 | ) -> Result<(Option>, u64)> { 133 | let (query_id, query) = self.make_query(data); 134 | 135 | let peer = self 136 | .semaphores 137 | .entry(*peer_id) 138 | .or_insert_with(|| Arc::new(Semaphore::new(self.options.max_peer_queries))) 139 | .value() 140 | .clone(); 141 | 142 | let result = { 143 | let _permit = peer.acquire().await.ok(); 144 | self.transfers 145 | .query(&self.adnl, local_id, peer_id, query, roundtrip) 146 | .await 147 | }; 148 | 149 | match result? { 150 | (Some(answer), roundtrip) => match tl_proto::deserialize(&answer) { 151 | Ok(proto::rldp::Message::Answer { 152 | query_id: answer_id, 153 | data, 154 | }) if answer_id == &query_id => Ok(( 155 | Some(compression::decompress(data).unwrap_or_else(|| data.to_vec())), 156 | roundtrip, 157 | )), 158 | Ok(proto::rldp::Message::Answer { .. }) => Err(NodeError::QueryIdMismatch.into()), 159 | Ok(proto::rldp::Message::Message { .. }) => { 160 | Err(NodeError::UnexpectedAnswer("RldpMessageView::Message").into()) 161 | } 162 | Ok(proto::rldp::Message::Query { .. }) => { 163 | Err(NodeError::UnexpectedAnswer("RldpMessageView::Query").into()) 164 | } 165 | Err(e) => Err(NodeError::InvalidPacketContent(e).into()), 166 | }, 167 | (None, roundtrip) => Ok((None, roundtrip)), 168 | } 169 | } 170 | 171 | fn make_query(&self, mut data: Vec) -> ([u8; 32], Vec) { 172 | if self.options.force_compression { 173 | if let Err(e) = compression::compress(&mut data) { 174 | tracing::warn!("failed to compress RLDP query: {e:?}"); 175 | } 176 | } 177 | 178 | let query_id = gen_fast_bytes(); 179 | let data = proto::rldp::Message::Query { 180 | query_id: &query_id, 181 | max_answer_size: self.options.max_answer_size as u64, 182 | timeout: now() + self.options.query_max_timeout_ms as u32 / 1000, 183 | data: &data, 184 | }; 185 | (query_id, tl_proto::serialize(data)) 186 | } 187 | } 188 | 189 | #[async_trait::async_trait] 190 | impl MessageSubscriber for TransfersCache { 191 | async fn try_consume_custom<'a>( 192 | &self, 193 | ctx: SubscriberContext<'a>, 194 | constructor: u32, 195 | data: &'a [u8], 196 | ) -> Result { 197 | if constructor != proto::rldp::MessagePart::TL_ID_MESSAGE_PART 198 | && constructor != proto::rldp::MessagePart::TL_ID_CONFIRM 199 | && constructor != proto::rldp::MessagePart::TL_ID_COMPLETE 200 | { 201 | return Ok(false); 202 | } 203 | 204 | let message_part = tl_proto::deserialize(data)?; 205 | self.handle_message(ctx.adnl, ctx.local_id, ctx.peer_id, message_part) 206 | .await?; 207 | 208 | Ok(true) 209 | } 210 | } 211 | 212 | /// Instant RLDP node metrics 213 | #[derive(Debug, Copy, Clone)] 214 | pub struct NodeMetrics { 215 | pub peer_count: usize, 216 | pub transfers_cache_len: usize, 217 | } 218 | 219 | #[derive(thiserror::Error, Debug)] 220 | enum NodeError { 221 | #[error("Unexpected answer: {0}")] 222 | UnexpectedAnswer(&'static str), 223 | #[error("Invalid packet content: {0:?}")] 224 | InvalidPacketContent(tl_proto::TlError), 225 | #[error("Unknown query id")] 226 | QueryIdMismatch, 227 | } 228 | -------------------------------------------------------------------------------- /src/overlay/node.rs: -------------------------------------------------------------------------------- 1 | use std::borrow::Cow; 2 | use std::sync::Arc; 3 | 4 | use anyhow::Result; 5 | use tl_proto::{BoxedConstructor, TlRead}; 6 | 7 | use super::overlay::{Overlay, OverlayMetrics, OverlayOptions}; 8 | use super::overlay_id::IdShort; 9 | use crate::adnl; 10 | use crate::proto; 11 | use crate::subscriber::*; 12 | use crate::util::*; 13 | 14 | /// P2P messages distribution layer group 15 | pub struct Node { 16 | /// Underlying ADNL node 17 | adnl: Arc, 18 | /// Local ADNL key 19 | node_key: Arc, 20 | /// Shared state 21 | state: Arc, 22 | } 23 | 24 | impl Node { 25 | pub fn new(adnl: Arc, key_tag: usize) -> Result> { 26 | let node_key = adnl.key_by_tag(key_tag)?.clone(); 27 | let state = Arc::new(NodeState::default()); 28 | 29 | adnl.add_query_subscriber(state.clone())?; 30 | adnl.add_message_subscriber(state.clone())?; 31 | 32 | Ok(Arc::new(Self { 33 | adnl, 34 | node_key, 35 | state, 36 | })) 37 | } 38 | 39 | /// Returns inner query subscriber 40 | pub fn query_subscriber(&self) -> Arc { 41 | self.state.clone() 42 | } 43 | 44 | /// Returns metrics for all overlays 45 | pub fn metrics(&self) -> impl Iterator + '_ { 46 | self.state 47 | .overlays 48 | .iter() 49 | .map(|item| (*item.id(), item.metrics())) 50 | } 51 | 52 | /// Underlying ADNL node 53 | pub fn adnl(&self) -> &Arc { 54 | &self.adnl 55 | } 56 | 57 | /// Adds overlay queries subscriber 58 | pub fn add_overlay_subscriber( 59 | &self, 60 | overlay_id: IdShort, 61 | subscriber: Arc, 62 | ) -> bool { 63 | use dashmap::mapref::entry::Entry; 64 | 65 | match self.state.subscribers.entry(overlay_id) { 66 | Entry::Vacant(entry) => { 67 | entry.insert(subscriber); 68 | true 69 | } 70 | Entry::Occupied(_) => false, 71 | } 72 | } 73 | 74 | /// Creates new public overlay 75 | pub fn add_public_overlay( 76 | &self, 77 | overlay_id: &IdShort, 78 | options: OverlayOptions, 79 | ) -> (Arc, bool) { 80 | use dashmap::mapref::entry::Entry; 81 | 82 | match self.state.overlays.entry(*overlay_id) { 83 | Entry::Vacant(entry) => { 84 | let overlay = Overlay::new(self.node_key.clone(), *overlay_id, &[], options); 85 | entry.insert(overlay.clone()); 86 | (overlay, true) 87 | } 88 | Entry::Occupied(entry) => (entry.get().clone(), false), 89 | } 90 | } 91 | 92 | /// Creates new private overlay 93 | pub fn add_private_overlay( 94 | &self, 95 | overlay_id: &IdShort, 96 | overlay_key: Arc, 97 | peers: &[adnl::NodeIdShort], 98 | options: OverlayOptions, 99 | ) -> (Arc, bool) { 100 | use dashmap::mapref::entry::Entry; 101 | 102 | match self.state.overlays.entry(*overlay_id) { 103 | Entry::Vacant(entry) => { 104 | let overlay = Overlay::new(overlay_key, *overlay_id, peers, options); 105 | entry.insert(overlay.clone()); 106 | (overlay, true) 107 | } 108 | Entry::Occupied(entry) => (entry.get().clone(), false), 109 | } 110 | } 111 | 112 | /// Returns overlay by specified id 113 | #[inline(always)] 114 | pub fn get_overlay(&self, overlay_id: &IdShort) -> Result> { 115 | self.state.get_overlay(overlay_id) 116 | } 117 | } 118 | 119 | #[derive(Default)] 120 | struct NodeState { 121 | /// Overlays by ids 122 | overlays: FastDashMap>, 123 | /// Overlay query subscribers 124 | subscribers: FastDashMap>, 125 | } 126 | 127 | impl NodeState { 128 | fn get_overlay(&self, overlay_id: &IdShort) -> Result> { 129 | match self.overlays.get(overlay_id) { 130 | Some(overlay) => Ok(overlay.clone()), 131 | None => Err(NodeError::UnknownOverlay.into()), 132 | } 133 | } 134 | } 135 | 136 | #[async_trait::async_trait] 137 | impl MessageSubscriber for NodeState { 138 | async fn try_consume_custom<'a>( 139 | &self, 140 | ctx: SubscriberContext<'a>, 141 | constructor: u32, 142 | data: &'a [u8], 143 | ) -> Result { 144 | if constructor != proto::overlay::Message::TL_ID { 145 | return Ok(false); 146 | } 147 | 148 | let mut offset = 4; // skip `overlay::Message` constructor 149 | let overlay_id = IdShort::from(<[u8; 32]>::read_from(data, &mut offset)?); 150 | let broadcast = proto::overlay::Broadcast::read_from(data, &mut offset)?; 151 | 152 | // TODO: check that offset == data.len() 153 | 154 | let overlay = self.get_overlay(&overlay_id)?; 155 | match broadcast { 156 | proto::overlay::Broadcast::Broadcast(broadcast) => { 157 | overlay 158 | .receive_broadcast(ctx.adnl, ctx.local_id, ctx.peer_id, broadcast, data) 159 | .await?; 160 | Ok(true) 161 | } 162 | proto::overlay::Broadcast::BroadcastFec(broadcast) => { 163 | overlay 164 | .receive_fec_broadcast(ctx.adnl, ctx.local_id, ctx.peer_id, broadcast, data) 165 | .await?; 166 | Ok(true) 167 | } 168 | _ => Err(NodeError::UnsupportedOverlayBroadcastMessage.into()), 169 | } 170 | } 171 | } 172 | 173 | #[async_trait::async_trait] 174 | impl QuerySubscriber for NodeState { 175 | async fn try_consume_query<'a>( 176 | &self, 177 | ctx: SubscriberContext<'a>, 178 | constructor: u32, 179 | query: Cow<'a, [u8]>, 180 | ) -> Result> { 181 | if constructor != proto::rpc::OverlayQuery::TL_ID { 182 | return Ok(QueryConsumingResult::Rejected(query)); 183 | } 184 | 185 | let mut offset = 4; // skip `rpc::OverlayQuery` constructor 186 | let overlay_id = IdShort::from(<[u8; 32]>::read_from(&query, &mut offset)?); 187 | 188 | let constructor = u32::read_from(&query, &mut std::convert::identity(offset))?; 189 | if constructor == proto::rpc::OverlayGetRandomPeers::TL_ID { 190 | let query = proto::rpc::OverlayGetRandomPeers::read_from(&query, &mut offset)?; 191 | let overlay = self.get_overlay(&overlay_id)?; 192 | return QueryConsumingResult::consume( 193 | overlay.process_get_random_peers(query).into_boxed(), 194 | ); 195 | } 196 | 197 | let consumer = match self.subscribers.get(&overlay_id) { 198 | Some(consumer) => consumer.clone(), 199 | None => return Err(NodeError::NoConsumerFound.into()), 200 | }; 201 | 202 | match consumer 203 | .try_consume_query(ctx, constructor, Cow::Borrowed(&query[offset..])) 204 | .await? 205 | { 206 | QueryConsumingResult::Consumed(result) => Ok(QueryConsumingResult::Consumed(result)), 207 | QueryConsumingResult::Rejected(_) => Err(NodeError::UnsupportedQuery.into()), 208 | } 209 | } 210 | } 211 | 212 | #[derive(thiserror::Error, Debug)] 213 | enum NodeError { 214 | #[error("Unsupported overlay broadcast message")] 215 | UnsupportedOverlayBroadcastMessage, 216 | #[error("Unknown overlay")] 217 | UnknownOverlay, 218 | #[error("No consumer for message in overlay")] 219 | NoConsumerFound, 220 | #[error("Unsupported query")] 221 | UnsupportedQuery, 222 | } 223 | -------------------------------------------------------------------------------- /src/dht/storage.rs: -------------------------------------------------------------------------------- 1 | use std::convert::TryFrom; 2 | use std::ops::Deref; 3 | 4 | use anyhow::Result; 5 | use smallvec::SmallVec; 6 | use tl_proto::{BoxedConstructor, HashWrapper, TlWrite}; 7 | 8 | use super::KEY_NODES; 9 | use crate::adnl; 10 | use crate::overlay; 11 | use crate::proto; 12 | use crate::util::*; 13 | 14 | pub struct StorageOptions { 15 | pub max_key_name_len: usize, 16 | pub max_key_index: u32, 17 | } 18 | 19 | /// Local DHT data storage 20 | pub struct Storage { 21 | storage: FastDashMap, 22 | options: StorageOptions, 23 | } 24 | 25 | impl Storage { 26 | pub fn new(options: StorageOptions) -> Self { 27 | Self { 28 | storage: Default::default(), 29 | options, 30 | } 31 | } 32 | 33 | /// Returns number of stored values 34 | pub fn len(&self) -> usize { 35 | self.storage.len() 36 | } 37 | 38 | /// Returns total size of stored values in bytes 39 | pub fn total_size(&self) -> usize { 40 | self.storage.iter().map(|item| item.value.len()).sum() 41 | } 42 | 43 | /// Returns value reference by key 44 | pub fn get_ref( 45 | &self, 46 | key: &StorageKeyId, 47 | ) -> Option + '_> { 48 | match self.storage.get(key) { 49 | Some(item) if item.ttl > now() => Some(item), 50 | _ => None, 51 | } 52 | } 53 | 54 | /// Inserts value into the local storage 55 | /// 56 | /// NOTE: Values with `UpdateRule::Anybody` can't be inserted 57 | pub fn insert(&self, value: proto::dht::Value<'_>) -> Result { 58 | if value.ttl <= now() { 59 | return Err(StorageError::ValueExpired.into()); 60 | } 61 | 62 | if !(0..=self.options.max_key_name_len).contains(&value.key.key.name.len()) 63 | || value.key.key.idx > self.options.max_key_index 64 | { 65 | return Err(StorageError::InvalidKey.into()); 66 | } 67 | 68 | if value.key.key.id != &tl_proto::hash(value.key.id) { 69 | return Err(StorageError::InvalidKey.into()); 70 | } 71 | 72 | match value.key.update_rule { 73 | proto::dht::UpdateRule::Signature => self.insert_signed_value(value), 74 | proto::dht::UpdateRule::OverlayNodes => self.insert_overlay_nodes(value), 75 | _ => Err(StorageError::UnsupportedUpdateRule.into()), 76 | } 77 | } 78 | 79 | /// Removes all outdated value 80 | pub fn gc(&self) { 81 | let now = now(); 82 | self.storage.retain(|_, value| value.ttl > now); 83 | } 84 | 85 | /// Inserts signed value into the storage 86 | fn insert_signed_value(&self, mut value: proto::dht::Value<'_>) -> Result { 87 | use dashmap::mapref::entry::Entry; 88 | 89 | let full_id = adnl::NodeIdFull::try_from(value.key.id)?; 90 | 91 | let key_signature = std::mem::take(&mut value.key.signature); 92 | full_id.verify(value.key.as_boxed(), key_signature)?; 93 | value.key.signature = key_signature; 94 | 95 | let value_signature = std::mem::take(&mut value.signature); 96 | full_id.verify(value.as_boxed(), value_signature)?; 97 | value.signature = value_signature; 98 | 99 | let key = tl_proto::hash_as_boxed(value.key.key); 100 | Ok(match self.storage.entry(key) { 101 | Entry::Occupied(mut entry) if entry.get().ttl < value.ttl => { 102 | entry.insert(value.as_equivalent_owned()); 103 | true 104 | } 105 | Entry::Occupied(_) => false, 106 | Entry::Vacant(entry) => { 107 | entry.insert(value.as_equivalent_owned()); 108 | true 109 | } 110 | }) 111 | } 112 | 113 | /// Special case of inserting overlay nodes value. 114 | /// 115 | /// It requires empty signatures and special update rule 116 | fn insert_overlay_nodes(&self, value: proto::dht::Value) -> Result { 117 | use dashmap::mapref::entry::Entry; 118 | 119 | if !value.signature.is_empty() || !value.key.signature.is_empty() { 120 | return Err(StorageError::InvalidSignatureValue.into()); 121 | } 122 | 123 | let overlay_id = match value.key.id { 124 | everscale_crypto::tl::PublicKey::Overlay { .. } => { 125 | overlay::IdShort::from(tl_proto::hash(value.key.id)) 126 | } 127 | _ => return Err(StorageError::InvalidKeyDescription.into()), 128 | }; 129 | 130 | let required_key = proto::dht::Key { 131 | id: overlay_id.as_slice(), 132 | name: KEY_NODES.as_ref(), 133 | idx: 0, 134 | }; 135 | if value.key.key != required_key { 136 | return Err(StorageError::InvalidDhtKey.into()); 137 | } 138 | 139 | let mut new_nodes = deserialize_overlay_nodes(value.value)?; 140 | new_nodes.retain(|node| { 141 | if overlay_id.verify_overlay_node(node).is_err() { 142 | tracing::warn!(?node, "bad overlay node"); 143 | false 144 | } else { 145 | true 146 | } 147 | }); 148 | if new_nodes.is_empty() { 149 | return Err(StorageError::EmptyOverlayNodes.into()); 150 | } 151 | 152 | let key = tl_proto::hash_as_boxed(value.key.key); 153 | match self.storage.entry(key) { 154 | Entry::Occupied(mut entry) => { 155 | let value = { 156 | let old_nodes = match entry.get().ttl { 157 | old_ttl if old_ttl < now() => None, 158 | old_ttl if old_ttl > value.ttl => return Ok(false), 159 | _ => Some(deserialize_overlay_nodes(&entry.get().value)?), 160 | }; 161 | make_overlay_nodes_value(value, new_nodes, old_nodes) 162 | }; 163 | entry.insert(value); 164 | } 165 | Entry::Vacant(entry) => { 166 | entry.insert(make_overlay_nodes_value(value, new_nodes, None)); 167 | } 168 | } 169 | 170 | Ok(true) 171 | } 172 | } 173 | 174 | // Merges old and new overlay nodes and returns updated value 175 | fn make_overlay_nodes_value( 176 | value: proto::dht::Value<'_>, 177 | new_nodes: SmallVec<[proto::overlay::Node<'_>; N]>, 178 | old_nodes: Option; N]>>, 179 | ) -> proto::dht::ValueOwned { 180 | use std::collections::hash_map::Entry; 181 | 182 | let mut result = match old_nodes { 183 | Some(nodes) => nodes 184 | .into_iter() 185 | .map(|item| (HashWrapper(item.id), item)) 186 | .collect::>(), 187 | None => Default::default(), 188 | }; 189 | 190 | for node in new_nodes { 191 | match result.entry(HashWrapper(node.id)) { 192 | Entry::Occupied(mut entry) => { 193 | if entry.get().version < node.version { 194 | entry.insert(node); 195 | } 196 | } 197 | Entry::Vacant(entry) => { 198 | entry.insert(node); 199 | } 200 | } 201 | } 202 | 203 | let capacity = result 204 | .values() 205 | .map(|item| item.max_size_hint()) 206 | .sum::(); 207 | 208 | let mut stored_value = Vec::with_capacity(4 + 4 + capacity); 209 | stored_value.extend_from_slice(&proto::overlay::Nodes::TL_ID.to_le_bytes()); 210 | stored_value.extend_from_slice(&(result.len() as u32).to_le_bytes()); 211 | for node in result.into_values() { 212 | node.write_to(&mut stored_value); 213 | } 214 | 215 | proto::dht::ValueOwned { 216 | key: value.key.as_equivalent_owned(), 217 | value: stored_value.into(), 218 | ttl: value.ttl, 219 | signature: value.signature.to_vec().into(), 220 | } 221 | } 222 | 223 | fn deserialize_overlay_nodes( 224 | data: &[u8], 225 | ) -> tl_proto::TlResult> { 226 | match tl_proto::deserialize_as_boxed(data) { 227 | Ok(proto::overlay::Nodes { nodes }) => Ok(nodes), 228 | Err(e) => Err(e), 229 | } 230 | } 231 | 232 | pub type StorageKeyId = [u8; 32]; 233 | 234 | #[derive(thiserror::Error, Debug)] 235 | enum StorageError { 236 | #[error("Unsupported update rule")] 237 | UnsupportedUpdateRule, 238 | #[error("Invalid signature value")] 239 | InvalidSignatureValue, 240 | #[error("Invalid key description for OverlayNodes")] 241 | InvalidKeyDescription, 242 | #[error("Invalid DHT key")] 243 | InvalidDhtKey, 244 | #[error("Empty overlay nodes list")] 245 | EmptyOverlayNodes, 246 | #[error("Value expired")] 247 | ValueExpired, 248 | #[error("Invalid key")] 249 | InvalidKey, 250 | } 251 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /examples/mainnet.json: -------------------------------------------------------------------------------- 1 | { 2 | "zero_state": { 3 | "file_hash": "0nC4eylStbp9qnCq8KjDYb789NjS25L5ZA1UQwcIOOQ=" 4 | }, 5 | "dht_nodes": [ 6 | { 7 | "addr_list": { 8 | "address": "51.210.99.131:30303", 9 | "expire_at": 0, 10 | "reinit_date": 1643142875, 11 | "version": 1643142875 12 | }, 13 | "pubkey": "ca86421e09fe1f09d4c36608ebb5012af181aea30e5dac4b353d5b364af03ac5", 14 | "signature": "gpLPupTxag2TmYOGlu/LugTsQPa8+67jZNF9vH5EcXlCvgRVODC98fsVDO72TthtuPAxTXF+UuCpKrLr2zBJAg==", 15 | "version": 1643142875 16 | }, 17 | { 18 | "addr_list": { 19 | "address": "146.59.54.116:30303", 20 | "expire_at": 0, 21 | "reinit_date": 1643142880, 22 | "version": 1643142880 23 | }, 24 | "pubkey": "74f5222ec9db27d3b956077fa288df774f7e5f92b6eea014a6d896ed3b29f7fb", 25 | "signature": "nlV4gBis2qhw47OR4rgx2VlFu0YNHT2GLf4PVM/Nnl5Ss7Nzy2ermRhGLkLkTNIa+xbmZzJitusZQ6UBxfGgAQ==", 26 | "version": 1643142880 27 | }, 28 | { 29 | "addr_list": { 30 | "address": "141.95.47.232:30303", 31 | "expire_at": 0, 32 | "reinit_date": 1643142884, 33 | "version": 1643142884 34 | }, 35 | "pubkey": "1e7c0c5bd8e39e54c70c29e7cff40f4d170b9be113cfb12ca6cebf06c42e9863", 36 | "signature": "qYsnLXB8qQfFFafOynpc91DgdjIDljRU0yuXkSPHkywzYXWkX0I6OZCHfMGgg8LdnFaER6r1nmLVkb9PeSXtAw==", 37 | "version": 1643142884 38 | }, 39 | { 40 | "addr_list": { 41 | "address": "141.94.170.131:30303", 42 | "expire_at": 0, 43 | "reinit_date": 1643142888, 44 | "version": 1643142888 45 | }, 46 | "pubkey": "f6905c14a6b5bfcaf9df9a69598522914387c24a811a844569f22ad56918da78", 47 | "signature": "up+SFzbCJa8VGY9Meknms0dfTdQeNO63no9Rb1TYScNvromu2BJM87YBKfIqhvQTVv5aDOu0a7EucK9lR7cZAQ==", 48 | "version": 1643142888 49 | }, 50 | { 51 | "addr_list": { 52 | "address": "198.244.202.87:30303", 53 | "expire_at": 0, 54 | "reinit_date": 1643142893, 55 | "version": 1643142893 56 | }, 57 | "pubkey": "4721a6fc4b67cfa9640ec3fab38426049aed9b992d1afd32eba2c6eb3a527c70", 58 | "signature": "HZOqgQL9uEwwwR0AgHmuBLyWkkHXqjSVljG7lLoCIzG9MK276KBtfs73JrGKYg1NjepAqeEOeeexqmoqfuqMDQ==", 59 | "version": 1643142893 60 | }, 61 | { 62 | "addr_list": { 63 | "address": "141.94.170.130:30303", 64 | "expire_at": 0, 65 | "reinit_date": 1643142897, 66 | "version": 1643142897 67 | }, 68 | "pubkey": "61c7df97bacdf2ff933324dfab5853c2951f9966009df6f9e7f3bba22fec03a7", 69 | "signature": "mKKfEsfKrsVBtw28pSa8Jm8I1My4Zy4tJSYbS2aMyNJ8xfSUszwSlojlu9xmTfePWVagK1cgTlD8R6gj5FLRBw==", 70 | "version": 1643142897 71 | }, 72 | { 73 | "addr_list": { 74 | "address": "198.244.202.173:30303", 75 | "expire_at": 0, 76 | "reinit_date": 1643142901, 77 | "version": 1643142901 78 | }, 79 | "pubkey": "5dff550062c1c383e55322cddd71626d87d243b901a9c7e4647cc944ada4ea1b", 80 | "signature": "vr4kx21lMEzSww7vds2v7FNyq0sbY+K2bUQsGzQzsCHCnIBZhVEGKAFk6pYIlx0yoDjqZGEgvZ5lTSby/Xw9AQ==", 81 | "version": 1643142901 82 | }, 83 | { 84 | "addr_list": { 85 | "address": "141.94.219.93:30303", 86 | "expire_at": 0, 87 | "reinit_date": 1643142906, 88 | "version": 1643142906 89 | }, 90 | "pubkey": "d398c274a97415b657881a4cfc2b95ee919d3a3eecbc6f135af01eb8c32030a2", 91 | "signature": "1auAFFWpcaiiyOzZdiMmlMnvM72nw6Cl33+CKrmoxRumQ947GSYn1lTJSFvnHIM93/tRq+MqkRDZJvrb7Bx+Cg==", 92 | "version": 1643142906 93 | }, 94 | { 95 | "addr_list": { 96 | "address": "198.244.202.86:30303", 97 | "expire_at": 0, 98 | "reinit_date": 1643142911, 99 | "version": 1643142911 100 | }, 101 | "pubkey": "55092c7350aee4cedee0e7a4765e518a4a3a7f6239654cbb2bd9eaf1950f4a26", 102 | "signature": "oxWBQWuLQqMBkj76y6eOrY5v3M2QHvrRPVHjKaLZX5191wo8D6fTkjGHNG1QV/ZTLEVxeObCtthamWOB6XPTCQ==", 103 | "version": 1643142911 104 | }, 105 | { 106 | "addr_list": { 107 | "address": "141.95.47.230:30303", 108 | "expire_at": 0, 109 | "reinit_date": 1643142915, 110 | "version": 1643142915 111 | }, 112 | "pubkey": "879458de31c608b7052f10bced8a4d38985f165dddf7da8f0d06388b8f7244d5", 113 | "signature": "gbVVvCyy+bVLAWWfiIUpJwVHEv89Q5xQSqFELEmS8mRt8QozvUaOXWNyBfVJ+yRc1zLNNcmvks7gQY5lMQyJDQ==", 114 | "version": 1643142915 115 | }, 116 | { 117 | "addr_list": { 118 | "address": "141.94.219.92:30303", 119 | "expire_at": 0, 120 | "reinit_date": 1643142919, 121 | "version": 1643142919 122 | }, 123 | "pubkey": "a9c93acfa9f1e7105c6bf218690541b0e9081b172c782a11810e213bd88bf127", 124 | "signature": "oACrpCmQ1tCCNbH0FcsxoWVwsAU5N2h474+sNsTUFDqUGBLSQAoQP9LwmfbZW4phYI5VWCutUR4dYejgefqsBQ==", 125 | "version": 1643142919 126 | }, 127 | { 128 | "addr_list": { 129 | "address": "141.95.47.234:30303", 130 | "expire_at": 0, 131 | "reinit_date": 1643142924, 132 | "version": 1643142924 133 | }, 134 | "pubkey": "237c24c72076cc4d6bd5bce733ed5d58752f8d21dc18d91ba1485a2f96bff8c8", 135 | "signature": "enfSwHe1mGLSX/Xc9Nx2kNG6HqrE9kZUbMiBOD8fzyh1nqo9UVxlI8ZOdfCcUpdravPtCQERADchk46eGHusCw==", 136 | "version": 1643142924 137 | }, 138 | { 139 | "addr_list": { 140 | "address": "146.59.54.125:30303", 141 | "expire_at": 0, 142 | "reinit_date": 1643142928, 143 | "version": 1643142928 144 | }, 145 | "pubkey": "3b879059ecfa638d7c96a08dae32d0f85fcc1043ba8397cf341484c62504291a", 146 | "signature": "VNFgnRLJs6FAOZ8Qx8dfjWgllbqxJzwQ9B4joY8d2D9XMvUy5L8WoElq9qkqfs2zxWEMpUe6nS71NcD/svpXBg==", 147 | "version": 1643142928 148 | }, 149 | { 150 | "addr_list": { 151 | "address": "141.94.170.134:30303", 152 | "expire_at": 0, 153 | "reinit_date": 1643142932, 154 | "version": 1643142932 155 | }, 156 | "pubkey": "5013ce3bafa4a29d6cfebe32038437666d9470bbcf0cda1252d80dd3ab82d6fb", 157 | "signature": "S/NRZAeyUdXTAZJTm0E3hv7hPRmAV+dYBTMxmvrrkly0rlOz2ZoWO0yAYCtLG8hBEaiEUEUflmN12pz5hfpMCA==", 158 | "version": 1643142932 159 | }, 160 | { 161 | "addr_list": { 162 | "address": "141.94.170.133:30303", 163 | "expire_at": 0, 164 | "reinit_date": 1643142936, 165 | "version": 1643142936 166 | }, 167 | "pubkey": "f1566ca227b48a05971f7ecc018295bcbb8ca769ad61c29fea9155df827b1bac", 168 | "signature": "gs/BE0tKDFChjh6DyYt2c7gM07hFlx2e/lCuf1GFHfUwTcO5JkS2t5KoQVbTQ/bqhsxtVWQnnVAHuMM60pnDAA==", 169 | "version": 1643142936 170 | }, 171 | { 172 | "addr_list": { 173 | "address": "198.244.202.174:30303", 174 | "expire_at": 0, 175 | "reinit_date": 1643142941, 176 | "version": 1643142941 177 | }, 178 | "pubkey": "5b9bf4cb020157c0e85f18dd3f4e0bb6a778f215b5f7a9f6f1329e16b98980c5", 179 | "signature": "3AKBy9ufaJXSraNF8dGXRqake1p1RqRkUHCQM/LUnA1/ZogiC+2KFuaROCs4nOdYcqFbgGzyy6W4kHXsfaWTBw==", 180 | "version": 1643142941 181 | }, 182 | { 183 | "addr_list": { 184 | "address": "141.95.47.231:30303", 185 | "expire_at": 0, 186 | "reinit_date": 1643142945, 187 | "version": 1643142945 188 | }, 189 | "pubkey": "227e5ed21dade169aa2a98b6d960386c8394cadd4eb1fd24f73e9c23387b14ea", 190 | "signature": "rE6Q6OO+8Oas+ySpVlfk9Ve1C0rm8WQb8DIZQ+Nw+iher7ysc3xlC2iLrbDZze55ArKnCoL7kxhs6cx93C4dBQ==", 191 | "version": 1643142945 192 | }, 193 | { 194 | "addr_list": { 195 | "address": "146.59.54.124:30303", 196 | "expire_at": 0, 197 | "reinit_date": 1643142950, 198 | "version": 1643142950 199 | }, 200 | "pubkey": "afd5503b4beb119b1d00a448d2efd57bd4ed200ed8a71b4d73a7f5be0b3af091", 201 | "signature": "COfg4XoFDBPER/uf56xtz+3GTMjZbI3aKCpaf41z8WunVdyL6Mvmy7/jXB72/gvdiKUwqse6MLR0aXKefA/gAA==", 202 | "version": 1643142950 203 | }, 204 | { 205 | "addr_list": { 206 | "address": "146.59.54.126:30303", 207 | "expire_at": 0, 208 | "reinit_date": 1643142955, 209 | "version": 1643142955 210 | }, 211 | "pubkey": "4e286c04d10eba97f48bcc2afa2276047b457284ae18b22c9258e6a611ca510b", 212 | "signature": "Y2JVXcCKXEMqp+KWHF5yWLZNJSwhKZMY9UfznGVgJ8M7gRLQosatIk12YWwH82NvLv6vYNxMKdACeQlmx2RPCA==", 213 | "version": 1643142955 214 | }, 215 | { 216 | "addr_list": { 217 | "address": "141.94.170.132:30303", 218 | "expire_at": 0, 219 | "reinit_date": 1643142959, 220 | "version": 1643142959 221 | }, 222 | "pubkey": "c602fc4910cd610007aac91c3e04a542e38cfd0e5e56e7be98f37af8bffb3bfa", 223 | "signature": "VpkxeG89CjnhIVsw3JS3/wDb9ZhjuYyCWRS0aHn0KTYgvVgNQvK2jiICHEyhe1VqAtgDBUiDvsokf3WImmmgDA==", 224 | "version": 1643142959 225 | }, 226 | { 227 | "addr_list": { 228 | "address": "141.94.219.91:30303", 229 | "expire_at": 0, 230 | "reinit_date": 1643142963, 231 | "version": 1643142963 232 | }, 233 | "pubkey": "6da5df9cd60a6001ef0db4b56f2c09b9f4f3cccf5c369312287d009d6d8d5dfa", 234 | "signature": "3hFbuRlbQ3JB3deSh+uqrRUmhvWvTZJheXddrT+GcGXnk6KOZcRQOWG3suScrNXq8XaQPd9pqPRxVOQAR98gDw==", 235 | "version": 1643142963 236 | }, 237 | { 238 | "addr_list": { 239 | "address": "51.89.219.191:30303", 240 | "expire_at": 0, 241 | "reinit_date": 1643142968, 242 | "version": 1643142968 243 | }, 244 | "pubkey": "5821653e5fe21aa4f9b0e0a2134d5ec183f05d0fe6e190c39a51552f2158f40c", 245 | "signature": "QJVPStDhScf66ZJg6/VfC6xWhvqvalLAHVa6hrKUQyTC8GDV/cLXTCfk9Phy6xBlhwbtDoZhJokdDJrkXcZADg==", 246 | "version": 1643142968 247 | }, 248 | { 249 | "addr_list": { 250 | "address": "135.125.8.129:30303", 251 | "expire_at": 0, 252 | "reinit_date": 1643142974, 253 | "version": 1643142974 254 | }, 255 | "pubkey": "6040a1180f3825c6bfb3f8cb5982108c9786566626baa1326ed6c8f59934bb67", 256 | "signature": "6MVl8vhaa2vG4pVE6FVd8Nu56Q+s0ZHdGHz/fvsy8hzSTM3FMIXPGDhvvjhT7DvPIOutSCpBojALPJMoQuvfDw==", 257 | "version": 1643142974 258 | }, 259 | { 260 | "addr_list": { 261 | "address": "51.210.117.4:30303", 262 | "expire_at": 0, 263 | "reinit_date": 1643142979, 264 | "version": 1643142979 265 | }, 266 | "pubkey": "48669f88a816697bb221b726edf9d15615c5c9f19adb812bdcb117317719a750", 267 | "signature": "QGdMo8Qxuo3TMuzI+9fewGhUEBTkGB3299rSO+P7F1SDqd5E4c9lsG6zFTEwSGyIcj+p9F3jRpd83fX/Wg8TCg==", 268 | "version": 1643142979 269 | } 270 | ] 271 | } 272 | --------------------------------------------------------------------------------

2 | 3 | Logo 4 | 5 |