├── .gitignore ├── rustfmt.toml ├── README.md ├── Cargo.toml ├── src ├── core │ ├── util.rs │ ├── sphinx │ │ ├── mod.rs │ │ ├── target.rs │ │ ├── delay.rs │ │ ├── packet.rs │ │ ├── peel.rs │ │ ├── tests.rs │ │ ├── crypto.rs │ │ └── build.rs │ ├── kx_pair.rs │ ├── cover.rs │ ├── surb_keystore.rs │ ├── request_builder.rs │ ├── replay_filter.rs │ ├── packet_queues.rs │ ├── config.rs │ ├── scattered.rs │ ├── sessions.rs │ ├── topology.rs │ ├── fragment.rs │ └── mod.rs ├── lib.rs ├── request_manager │ ├── config.rs │ ├── post_queues.rs │ └── mod.rs └── reply_manager.rs └── tests └── core.rs /.gitignore: -------------------------------------------------------------------------------- 1 | # Generated by Cargo 2 | # will have compiled files and executables 3 | /target/ 4 | 5 | # Remove Cargo.lock from gitignore if creating an executable, leave it for libraries 6 | # More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html 7 | Cargo.lock 8 | 9 | # These are backup files generated by rustfmt 10 | **/*.rs.bk 11 | -------------------------------------------------------------------------------- /rustfmt.toml: -------------------------------------------------------------------------------- 1 | # Basic 2 | hard_tabs = true 3 | max_width = 100 4 | use_small_heuristics = "Max" 5 | # Imports 6 | imports_granularity = "Crate" 7 | reorder_imports = true 8 | # Consistency 9 | newline_style = "Unix" 10 | # Format comments 11 | comment_width = 100 12 | wrap_comments = true 13 | # Misc 14 | chain_width = 80 15 | spaces_around_ranges = false 16 | binop_separator = "Back" 17 | reorder_impl_items = false 18 | match_arm_leading_pipes = "Preserve" 19 | match_arm_blocks = false 20 | match_block_trailing_comma = true 21 | trailing_comma = "Vertical" 22 | trailing_semicolon = false 23 | use_field_init_shorthand = true 24 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Parity Mix Network 2 | 3 | ## Overview 4 | 5 | This crate implements the core logic for a [Substrate Mix 6 | Network](https://paritytech.github.io/mixnet-spec/) node. It does _not_ provide a full node 7 | implementation; the following parts must be provided by the crate user: 8 | 9 | - Networking. This crate is mostly network-agnostic. 10 | - Blockchain integration. This crate expects to be provided with the current session index, phase, 11 | and mixnodes. 12 | - Request/reply handling. This crate treats request and reply payloads as opaque blobs. 13 | 14 | ## Modules 15 | 16 | The core mixnet logic lives in the `core` module and may be used on its own. The `request_manager` 17 | and `reply_manager` modules provide a very simple reliable delivery layer. 18 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "mixnet" 3 | edition = "2021" 4 | description = "A mix network based on Loopix" 5 | version = "0.7.0" 6 | authors = ["Parity Technologies "] 7 | license = "MIT" 8 | repository = "https://github.com/paritytech/mixnet/" 9 | categories = ["network-programming", "asynchronous"] 10 | readme = "README.md" 11 | 12 | [dependencies] 13 | arrayref = "0.3.6" 14 | arrayvec = "0.7.2" 15 | bitflags = "1.3.2" 16 | blake2 = "0.10.4" 17 | c2-chacha = "0.3.3" 18 | curve25519-dalek = "4.0.0" 19 | either = "1.5.3" 20 | hashlink = "0.8.2" 21 | lioness = "0.1.2" 22 | log = "0.4.17" 23 | parking_lot = "0.12.1" 24 | rand = "0.8.5" 25 | rand_chacha = "0.3.1" 26 | rand_distr = "0.4.3" 27 | subtle = "2.4.1" 28 | thiserror = "1.0.30" 29 | zeroize = "1.6.0" 30 | 31 | [dev-dependencies] 32 | env_logger = "0.10.0" 33 | itertools = "0.10.5" 34 | rand_xoshiro = "0.6.0" 35 | -------------------------------------------------------------------------------- /src/core/util.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2022 Parity Technologies (UK) Ltd. 2 | // 3 | // Permission is hereby granted, free of charge, to any person obtaining a 4 | // copy of this software and associated documentation files (the "Software"), 5 | // to deal in the Software without restriction, including without limitation 6 | // the rights to use, copy, modify, merge, publish, distribute, sublicense, 7 | // and/or sell copies of the Software, and to permit persons to whom the 8 | // Software is furnished to do so, subject to the following conditions: 9 | // 10 | // The above copyright notice and this permission notice shall be included in 11 | // all copies or substantial portions of the Software. 12 | // 13 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 14 | // OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 18 | // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 19 | // DEALINGS IN THE SOFTWARE. 20 | 21 | //! Misc utilities. 22 | 23 | pub fn default_boxed_array() -> Box<[T; N]> { 24 | vec![Default::default(); N].try_into().ok().expect("Vec is the right size") 25 | } 26 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2022 Parity Technologies (UK) Ltd. 2 | // 3 | // Permission is hereby granted, free of charge, to any person obtaining a 4 | // copy of this software and associated documentation files (the "Software"), 5 | // to deal in the Software without restriction, including without limitation 6 | // the rights to use, copy, modify, merge, publish, distribute, sublicense, 7 | // and/or sell copies of the Software, and to permit persons to whom the 8 | // Software is furnished to do so, subject to the following conditions: 9 | // 10 | // The above copyright notice and this permission notice shall be included in 11 | // all copies or substantial portions of the Software. 12 | // 13 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 14 | // OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 18 | // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 19 | // DEALINGS IN THE SOFTWARE. 20 | 21 | //! A mixnet loosely based on 22 | //! [Loopix](https://www.usenix.org/conference/usenixsecurity17/technical-sessions/presentation/piotrowska). 23 | //! 24 | //! This crate is mostly network agnostic. While it determines which nodes should be connected and 25 | //! which packets should be sent where, it does not care _how_ this is done. It's not entirely 26 | //! agnostic; it assumes that peers have 32-byte globally-unique identifiers. 27 | 28 | #![warn(missing_docs)] 29 | #![forbid(unsafe_code)] 30 | 31 | pub mod core; 32 | pub mod reply_manager; 33 | pub mod request_manager; 34 | -------------------------------------------------------------------------------- /src/core/sphinx/mod.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2022 Parity Technologies (UK) Ltd. 2 | // 3 | // Permission is hereby granted, free of charge, to any person obtaining a 4 | // copy of this software and associated documentation files (the "Software"), 5 | // to deal in the Software without restriction, including without limitation 6 | // the rights to use, copy, modify, merge, publish, distribute, sublicense, 7 | // and/or sell copies of the Software, and to permit persons to whom the 8 | // Software is furnished to do so, subject to the following conditions: 9 | // 10 | // The above copyright notice and this permission notice shall be included in 11 | // all copies or substantial portions of the Software. 12 | // 13 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 14 | // OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 18 | // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 19 | // DEALINGS IN THE SOFTWARE. 20 | 21 | //! Sphinx packet building and "peeling". 22 | 23 | mod build; 24 | mod crypto; 25 | mod delay; 26 | mod packet; 27 | mod peel; 28 | mod target; 29 | mod tests; 30 | 31 | pub use self::{ 32 | build::*, 33 | crypto::{derive_kx_public, derive_kx_shared_secret, gen_kx_secret, KxSecret, SharedSecret}, 34 | delay::Delay, 35 | packet::{ 36 | CoverId, KxPublic, Packet, Payload, PayloadData, PeerId, RawMixnodeIndex, SurbId, 37 | COVER_ID_SIZE, KX_PUBLIC_SIZE, MAX_HOPS, MAX_MIXNODE_INDEX, PACKET_SIZE, PAYLOAD_DATA_SIZE, 38 | PAYLOAD_SIZE, PEER_ID_SIZE, SURB_ID_SIZE, 39 | }, 40 | peel::*, 41 | target::{MixnodeIndex, Target}, 42 | }; 43 | -------------------------------------------------------------------------------- /src/request_manager/config.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2022 Parity Technologies (UK) Ltd. 2 | // 3 | // Permission is hereby granted, free of charge, to any person obtaining a 4 | // copy of this software and associated documentation files (the "Software"), 5 | // to deal in the Software without restriction, including without limitation 6 | // the rights to use, copy, modify, merge, publish, distribute, sublicense, 7 | // and/or sell copies of the Software, and to permit persons to whom the 8 | // Software is furnished to do so, subject to the following conditions: 9 | // 10 | // The above copyright notice and this permission notice shall be included in 11 | // all copies or substantial portions of the Software. 12 | // 13 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 14 | // OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 18 | // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 19 | // DEALINGS IN THE SOFTWARE. 20 | 21 | /// Request manager configuration. 22 | #[derive(Clone, Debug)] 23 | pub struct Config { 24 | /// Maximum number of requests that can be managed at once. 25 | pub capacity: usize, 26 | 27 | /// Number of destinations to try sending a request to before giving up. Note that the 28 | /// destinations are chosen randomly with replacement; the same destination might be chosen 29 | /// multiple times. 30 | pub num_destinations: u32, 31 | /// Number of times to attempt a destination before moving on to the next. After each attempt, 32 | /// we conservatively estimate the round-trip time and wait at least this long before the next 33 | /// attempt. Must not be 0. 34 | pub num_attempts_per_destination: u32, 35 | /// Number of copies of the message to post each time we send a request. Must not be 0. 36 | pub num_posts_per_attempt: u32, 37 | } 38 | 39 | impl Default for Config { 40 | fn default() -> Self { 41 | Self { 42 | capacity: 20, 43 | 44 | num_destinations: 3, 45 | num_attempts_per_destination: 2, 46 | num_posts_per_attempt: 2, 47 | } 48 | } 49 | } 50 | -------------------------------------------------------------------------------- /src/core/kx_pair.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2022 Parity Technologies (UK) Ltd. 2 | // 3 | // Permission is hereby granted, free of charge, to any person obtaining a 4 | // copy of this software and associated documentation files (the "Software"), 5 | // to deal in the Software without restriction, including without limitation 6 | // the rights to use, copy, modify, merge, publish, distribute, sublicense, 7 | // and/or sell copies of the Software, and to permit persons to whom the 8 | // Software is furnished to do so, subject to the following conditions: 9 | // 10 | // The above copyright notice and this permission notice shall be included in 11 | // all copies or substantial portions of the Software. 12 | // 13 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 14 | // OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 18 | // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 19 | // DEALINGS IN THE SOFTWARE. 20 | 21 | //! Mixnet key-exchange key pair. 22 | 23 | use super::sphinx::{ 24 | derive_kx_public, derive_kx_shared_secret, gen_kx_secret, KxPublic, KxSecret, SharedSecret, 25 | }; 26 | use rand::{CryptoRng, Rng}; 27 | use zeroize::Zeroizing; 28 | 29 | pub struct KxPair { 30 | /// Unclamped secret key. Boxed to avoid leaving copies around in memory if `KxPair` is moved. 31 | secret: Box>, 32 | public: KxPublic, 33 | } 34 | 35 | impl KxPair { 36 | pub fn gen(rng: &mut (impl Rng + CryptoRng)) -> Self { 37 | gen_kx_secret(rng).into() 38 | } 39 | 40 | pub fn public(&self) -> &KxPublic { 41 | &self.public 42 | } 43 | 44 | pub fn exchange(&self, their_public: &KxPublic) -> SharedSecret { 45 | derive_kx_shared_secret(their_public, self.secret.as_ref()) 46 | } 47 | } 48 | 49 | impl From for KxPair { 50 | fn from(secret: KxSecret) -> Self { 51 | // We box the secret to avoid leaving copies of it in memory when the KxPair is moved. Note 52 | // that we will likely leave some copies on the stack here; I'm not aware of any good way 53 | // of avoiding this. 54 | let secret = Box::new(Zeroizing::new(secret)); 55 | let public = derive_kx_public(secret.as_ref()); 56 | Self { secret, public } 57 | } 58 | } 59 | -------------------------------------------------------------------------------- /src/core/sphinx/target.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2022 Parity Technologies (UK) Ltd. 2 | // 3 | // Permission is hereby granted, free of charge, to any person obtaining a 4 | // copy of this software and associated documentation files (the "Software"), 5 | // to deal in the Software without restriction, including without limitation 6 | // the rights to use, copy, modify, merge, publish, distribute, sublicense, 7 | // and/or sell copies of the Software, and to permit persons to whom the 8 | // Software is furnished to do so, subject to the following conditions: 9 | // 10 | // The above copyright notice and this permission notice shall be included in 11 | // all copies or substantial portions of the Software. 12 | // 13 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 14 | // OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 18 | // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 19 | // DEALINGS IN THE SOFTWARE. 20 | 21 | //! Hop target type. 22 | 23 | use super::packet::{PeerId, RawMixnodeIndex, MAX_MIXNODE_INDEX}; 24 | use std::fmt; 25 | 26 | /// The index of a mixnode in a session's mixnode list. The index is always <= 27 | /// [`MAX_MIXNODE_INDEX`]. 28 | #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)] 29 | pub struct MixnodeIndex(RawMixnodeIndex); 30 | 31 | impl MixnodeIndex { 32 | /// Get the raw index out. This will always be <= [`MAX_MIXNODE_INDEX`]. 33 | pub fn get(self) -> RawMixnodeIndex { 34 | self.0 35 | } 36 | } 37 | 38 | impl TryFrom for MixnodeIndex { 39 | type Error = (); 40 | 41 | fn try_from(index: usize) -> Result { 42 | if index <= MAX_MIXNODE_INDEX as usize { 43 | Ok(Self(index as RawMixnodeIndex)) 44 | } else { 45 | Err(()) 46 | } 47 | } 48 | } 49 | 50 | impl TryFrom for MixnodeIndex { 51 | type Error = (); 52 | 53 | fn try_from(index: RawMixnodeIndex) -> Result { 54 | (index as usize).try_into() 55 | } 56 | } 57 | 58 | impl fmt::Display for MixnodeIndex { 59 | fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { 60 | self.0.fmt(fmt) 61 | } 62 | } 63 | 64 | #[derive(Debug, PartialEq, Eq)] 65 | pub enum Target { 66 | MixnodeIndex(MixnodeIndex), 67 | PeerId(PeerId), 68 | } 69 | -------------------------------------------------------------------------------- /src/core/cover.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2022 Parity Technologies (UK) Ltd. 2 | // 3 | // Permission is hereby granted, free of charge, to any person obtaining a 4 | // copy of this software and associated documentation files (the "Software"), 5 | // to deal in the Software without restriction, including without limitation 6 | // the rights to use, copy, modify, merge, publish, distribute, sublicense, 7 | // and/or sell copies of the Software, and to permit persons to whom the 8 | // Software is furnished to do so, subject to the following conditions: 9 | // 10 | // The above copyright notice and this permission notice shall be included in 11 | // all copies or substantial portions of the Software. 12 | // 13 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 14 | // OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 18 | // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 19 | // DEALINGS IN THE SOFTWARE. 20 | 21 | //! Mixnet cover packet generation. 22 | 23 | use super::{ 24 | packet_queues::AddressedPacket, 25 | sphinx::build_cover_packet, 26 | topology::{NetworkStatus, RouteGenerator, RouteKind, Topology, TopologyErr}, 27 | util::default_boxed_array, 28 | }; 29 | use arrayvec::ArrayVec; 30 | use rand::{CryptoRng, Rng}; 31 | 32 | #[derive(PartialEq, Eq)] 33 | pub enum CoverKind { 34 | Drop, 35 | Loop, 36 | } 37 | 38 | pub fn gen_cover_packet( 39 | rng: &mut (impl Rng + CryptoRng), 40 | topology: &Topology, 41 | ns: &dyn NetworkStatus, 42 | kind: CoverKind, 43 | num_hops: usize, 44 | ) -> Result { 45 | // Generate route 46 | let route_generator = RouteGenerator::new(topology, ns); 47 | let route_kind = match kind { 48 | CoverKind::Drop => RouteKind::ToMixnode(route_generator.choose_destination_index(rng)?), 49 | CoverKind::Loop => RouteKind::Loop, 50 | }; 51 | let mut targets = ArrayVec::new(); 52 | let mut their_kx_publics = ArrayVec::new(); 53 | let first_mixnode_index = route_generator.gen_route( 54 | &mut targets, 55 | &mut their_kx_publics, 56 | rng, 57 | route_kind, 58 | num_hops, 59 | )?; 60 | let peer_id = topology.mixnode_index_to_peer_id(first_mixnode_index)?; 61 | 62 | // Build packet 63 | let mut packet = default_boxed_array(); 64 | build_cover_packet(&mut packet, rng, &targets, &their_kx_publics, None); 65 | 66 | Ok(AddressedPacket { peer_id, packet }) 67 | } 68 | -------------------------------------------------------------------------------- /src/request_manager/post_queues.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2022 Parity Technologies (UK) Ltd. 2 | // 3 | // Permission is hereby granted, free of charge, to any person obtaining a 4 | // copy of this software and associated documentation files (the "Software"), 5 | // to deal in the Software without restriction, including without limitation 6 | // the rights to use, copy, modify, merge, publish, distribute, sublicense, 7 | // and/or sell copies of the Software, and to permit persons to whom the 8 | // Software is furnished to do so, subject to the following conditions: 9 | // 10 | // The above copyright notice and this permission notice shall be included in 11 | // all copies or substantial portions of the Software. 12 | // 13 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 14 | // OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 18 | // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 19 | // DEALINGS IN THE SOFTWARE. 20 | 21 | use super::super::core::RelSessionIndex; 22 | use std::collections::VecDeque; 23 | 24 | pub struct PostQueues { 25 | /// Post queue for the current session. 26 | pub current: VecDeque, 27 | /// Post queue for the previous session. 28 | pub prev: VecDeque, 29 | /// Additional post queue for the default session (either the previous or the current session, 30 | /// depending on the current session phase). 31 | pub default: VecDeque, 32 | } 33 | 34 | impl PostQueues { 35 | pub fn new(capacity: usize) -> Self { 36 | Self { 37 | current: VecDeque::with_capacity(capacity), 38 | prev: VecDeque::with_capacity(capacity), 39 | default: VecDeque::with_capacity(capacity), 40 | } 41 | } 42 | 43 | pub fn iter(&self) -> impl Iterator> { 44 | [&self.current, &self.prev, &self.default].into_iter() 45 | } 46 | 47 | pub fn iter_mut(&mut self) -> impl Iterator> { 48 | [&mut self.current, &mut self.prev, &mut self.default].into_iter() 49 | } 50 | } 51 | 52 | impl std::ops::Index> for PostQueues { 53 | type Output = VecDeque; 54 | 55 | fn index(&self, index: Option) -> &Self::Output { 56 | match index { 57 | Some(RelSessionIndex::Current) => &self.current, 58 | Some(RelSessionIndex::Prev) => &self.prev, 59 | None => &self.default, 60 | } 61 | } 62 | } 63 | 64 | impl std::ops::IndexMut> for PostQueues { 65 | fn index_mut(&mut self, index: Option) -> &mut Self::Output { 66 | match index { 67 | Some(RelSessionIndex::Current) => &mut self.current, 68 | Some(RelSessionIndex::Prev) => &mut self.prev, 69 | None => &mut self.default, 70 | } 71 | } 72 | } 73 | -------------------------------------------------------------------------------- /src/core/surb_keystore.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2022 Parity Technologies (UK) Ltd. 2 | // 3 | // Permission is hereby granted, free of charge, to any person obtaining a 4 | // copy of this software and associated documentation files (the "Software"), 5 | // to deal in the Software without restriction, including without limitation 6 | // the rights to use, copy, modify, merge, publish, distribute, sublicense, 7 | // and/or sell copies of the Software, and to permit persons to whom the 8 | // Software is furnished to do so, subject to the following conditions: 9 | // 10 | // The above copyright notice and this permission notice shall be included in 11 | // all copies or substantial portions of the Software. 12 | // 13 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 14 | // OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 18 | // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 19 | // DEALINGS IN THE SOFTWARE. 20 | 21 | //! Keystore for SURB payload encryption keys. 22 | 23 | use super::{ 24 | fragment::MessageId, 25 | sphinx::{SurbId, SurbPayloadEncryptionKeys, SURB_ID_SIZE}, 26 | }; 27 | use hashlink::{linked_hash_map, LinkedHashMap}; 28 | use log::debug; 29 | use rand::{CryptoRng, Rng}; 30 | 31 | struct Value { 32 | keys: SurbPayloadEncryptionKeys, 33 | message_id: MessageId, 34 | } 35 | 36 | pub struct Entry<'a>(linked_hash_map::OccupiedEntry<'a, SurbId, Value>); 37 | 38 | impl<'a> Entry<'a> { 39 | pub fn keys(&self) -> &SurbPayloadEncryptionKeys { 40 | &self.0.get().keys 41 | } 42 | 43 | pub fn message_id(&self) -> &MessageId { 44 | &self.0.get().message_id 45 | } 46 | 47 | pub fn remove(self) { 48 | self.0.remove(); 49 | } 50 | } 51 | 52 | pub struct SurbKeystore { 53 | /// Maximum number of SURBs to keep keys for. 54 | capacity: usize, 55 | /// In creation order: oldest SURBs at the front, newest SURBs at the back. 56 | surbs: LinkedHashMap, 57 | } 58 | 59 | impl SurbKeystore { 60 | pub fn new(capacity: usize) -> Self { 61 | debug_assert_ne!(capacity, 0); 62 | Self { capacity, surbs: LinkedHashMap::with_capacity(capacity) } 63 | } 64 | 65 | /// Create an entry for a new SURB. Returns the randomly generated ID and a mutable reference 66 | /// to the keys, which should be filled in by the caller. 67 | pub fn insert( 68 | &mut self, 69 | rng: &mut (impl Rng + CryptoRng), 70 | message_id: &MessageId, 71 | log_target: &str, 72 | ) -> (SurbId, &mut SurbPayloadEncryptionKeys) { 73 | // Discard the oldest SURB if we're already at capacity 74 | debug_assert!(self.surbs.len() <= self.capacity); 75 | if self.surbs.len() == self.capacity { 76 | debug!(target: log_target, "Too many entries in SURB keystore; evicting oldest"); 77 | self.surbs.pop_front(); 78 | } 79 | 80 | let mut id = [0; SURB_ID_SIZE]; 81 | rng.fill_bytes(&mut id); 82 | match self.surbs.entry(id) { 83 | linked_hash_map::Entry::Occupied(_) => panic!( 84 | "Randomly generated SURB ID matches an existing SURB ID; something wrong with RNG?" 85 | ), 86 | linked_hash_map::Entry::Vacant(entry) => { 87 | let value = entry.insert(Value { 88 | keys: SurbPayloadEncryptionKeys::new(), 89 | message_id: *message_id, 90 | }); 91 | (id, &mut value.keys) 92 | }, 93 | } 94 | } 95 | 96 | /// Returns the entry for a SURB, or [`None`] if the ID is not recognised. 97 | pub fn entry(&mut self, id: &SurbId) -> Option { 98 | match self.surbs.entry(*id) { 99 | linked_hash_map::Entry::Occupied(entry) => Some(Entry(entry)), 100 | linked_hash_map::Entry::Vacant(_) => None, 101 | } 102 | } 103 | } 104 | -------------------------------------------------------------------------------- /src/core/request_builder.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2022 Parity Technologies (UK) Ltd. 2 | // 3 | // Permission is hereby granted, free of charge, to any person obtaining a 4 | // copy of this software and associated documentation files (the "Software"), 5 | // to deal in the Software without restriction, including without limitation 6 | // the rights to use, copy, modify, merge, publish, distribute, sublicense, 7 | // and/or sell copies of the Software, and to permit persons to whom the 8 | // Software is furnished to do so, subject to the following conditions: 9 | // 10 | // The above copyright notice and this permission notice shall be included in 11 | // all copies or substantial portions of the Software. 12 | // 13 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 14 | // OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 18 | // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 19 | // DEALINGS IN THE SOFTWARE. 20 | 21 | //! Mixnet request builder. This module simply plugs together the topology and Sphinx modules. 22 | 23 | use super::{ 24 | packet_queues::AddressedPacket, 25 | sphinx::{ 26 | build_surb, complete_request_packet, mut_payload_data, Delay, MixnodeIndex, PayloadData, 27 | Surb, SurbId, SurbPayloadEncryptionKeys, 28 | }, 29 | topology::{NetworkStatus, RouteGenerator, RouteKind, Topology, TopologyErr}, 30 | util::default_boxed_array, 31 | }; 32 | use arrayvec::ArrayVec; 33 | use rand::{CryptoRng, Rng}; 34 | 35 | pub struct RouteMetrics { 36 | pub num_hops: usize, 37 | pub forwarding_delay: Delay, 38 | } 39 | 40 | pub struct RequestBuilder<'topology, X> { 41 | route_generator: RouteGenerator<'topology, X>, 42 | destination_index: MixnodeIndex, 43 | } 44 | 45 | impl<'topology, X> RequestBuilder<'topology, X> { 46 | pub fn new( 47 | rng: &mut (impl Rng + CryptoRng), 48 | topology: &'topology Topology, 49 | ns: &dyn NetworkStatus, 50 | destination_index: Option, 51 | ) -> Result { 52 | let route_generator = RouteGenerator::new(topology, ns); 53 | let destination_index = match destination_index { 54 | Some(index) => index, 55 | None => route_generator.choose_destination_index(rng)?, 56 | }; 57 | Ok(Self { route_generator, destination_index }) 58 | } 59 | 60 | pub fn destination_index(&self) -> MixnodeIndex { 61 | self.destination_index 62 | } 63 | 64 | pub fn build_packet( 65 | &self, 66 | rng: &mut R, 67 | write_payload_data: impl FnOnce(&mut PayloadData, &mut R) -> Result<(), TopologyErr>, 68 | num_hops: usize, 69 | ) -> Result<(AddressedPacket, RouteMetrics), TopologyErr> { 70 | // Generate route 71 | let mut targets = ArrayVec::new(); 72 | let mut their_kx_publics = ArrayVec::new(); 73 | let first_mixnode_index = self.route_generator.gen_route( 74 | &mut targets, 75 | &mut their_kx_publics, 76 | rng, 77 | RouteKind::ToMixnode(self.destination_index), 78 | num_hops, 79 | )?; 80 | let peer_id = 81 | self.route_generator.topology().mixnode_index_to_peer_id(first_mixnode_index)?; 82 | 83 | // Build packet 84 | let mut packet = default_boxed_array(); 85 | write_payload_data(mut_payload_data(&mut packet), rng)?; 86 | let forwarding_delay = 87 | complete_request_packet(&mut packet, rng, &targets, &their_kx_publics); 88 | 89 | let packet = AddressedPacket { peer_id, packet }; 90 | let metrics = RouteMetrics { num_hops: their_kx_publics.len(), forwarding_delay }; 91 | Ok((packet, metrics)) 92 | } 93 | 94 | pub fn build_surb( 95 | &self, 96 | surb: &mut Surb, 97 | payload_encryption_keys: &mut SurbPayloadEncryptionKeys, 98 | rng: &mut (impl Rng + CryptoRng), 99 | id: &SurbId, 100 | num_hops: usize, 101 | ) -> Result { 102 | // Generate route 103 | let mut targets = ArrayVec::new(); 104 | let mut their_kx_publics = ArrayVec::new(); 105 | let first_mixnode_index = self.route_generator.gen_route( 106 | &mut targets, 107 | &mut their_kx_publics, 108 | rng, 109 | RouteKind::FromMixnode(self.destination_index), 110 | num_hops, 111 | )?; 112 | 113 | // Build SURB 114 | let forwarding_delay = build_surb( 115 | surb, 116 | payload_encryption_keys, 117 | rng, 118 | first_mixnode_index, 119 | &targets, 120 | &their_kx_publics, 121 | id, 122 | ); 123 | 124 | Ok(RouteMetrics { num_hops: their_kx_publics.len(), forwarding_delay }) 125 | } 126 | } 127 | -------------------------------------------------------------------------------- /src/core/sphinx/delay.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2022 Parity Technologies (UK) Ltd. 2 | // 3 | // Permission is hereby granted, free of charge, to any person obtaining a 4 | // copy of this software and associated documentation files (the "Software"), 5 | // to deal in the Software without restriction, including without limitation 6 | // the rights to use, copy, modify, merge, publish, distribute, sublicense, 7 | // and/or sell copies of the Software, and to permit persons to whom the 8 | // Software is furnished to do so, subject to the following conditions: 9 | // 10 | // The above copyright notice and this permission notice shall be included in 11 | // all copies or substantial portions of the Software. 12 | // 13 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 14 | // OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 18 | // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 19 | // DEALINGS IN THE SOFTWARE. 20 | 21 | //! Unitless delay type. 22 | 23 | use arrayref::array_mut_ref; 24 | use rand::Rng; 25 | use rand_chacha::{rand_core::SeedableRng, ChaChaRng}; 26 | use std::{ 27 | cmp::Ordering, 28 | ops::{Add, AddAssign}, 29 | time::Duration, 30 | }; 31 | 32 | pub const DELAY_SEED_SIZE: usize = 16; 33 | pub type DelaySeed = [u8; DELAY_SEED_SIZE]; 34 | 35 | /// Unitless delay. Can be converted to a [`Duration`] with [`to_duration`](Self::to_duration). 36 | #[derive(Clone, Copy, Debug, PartialEq, PartialOrd)] 37 | pub struct Delay(f64); 38 | 39 | impl Delay { 40 | /// Returns a delay of zero time. 41 | pub fn zero() -> Self { 42 | Self(0.0) 43 | } 44 | 45 | /// Returns a random delay sampled from an exponential distribution with mean 1. `seed` 46 | /// provides the entropy. 47 | pub fn exp(seed: &DelaySeed) -> Self { 48 | // The algorithm for sampling from an exponential distribution consumes a variable amount 49 | // of random data; possibly more random data than is in seed. So it is not sufficient to 50 | // just use the random data in seed directly; we really do need to seed an RNG with it. 51 | let mut double_seed = [0; 32]; 52 | *array_mut_ref![double_seed, 0, 16] = *seed; 53 | *array_mut_ref![double_seed, 16, 16] = *seed; 54 | let mut rng = ChaChaRng::from_seed(double_seed); 55 | let delay: f64 = rng.sample(rand_distr::Exp1); 56 | // Cap at 10x the mean; this is about the 99.995th percentile. This avoids potential panics 57 | // in to_duration() due to overflow. 58 | Self(delay.min(10.0)) 59 | } 60 | 61 | /// Convert the unitless delay into a [`Duration`] by multiplying by `unit`. For delays 62 | /// calculated by different parties to match, they must all agree on `unit`! 63 | pub fn to_duration(self, unit: Duration) -> Duration { 64 | unit.mul_f64(self.0) 65 | } 66 | } 67 | 68 | // Delays are never NaN 69 | impl Eq for Delay {} 70 | 71 | #[allow(clippy::derive_ord_xor_partial_ord)] 72 | impl Ord for Delay { 73 | fn cmp(&self, other: &Self) -> Ordering { 74 | self.partial_cmp(other).expect("Delays are never NaN") 75 | } 76 | } 77 | 78 | impl Add for Delay { 79 | type Output = Self; 80 | 81 | fn add(self, other: Self) -> Self { 82 | Self(self.0 + other.0) 83 | } 84 | } 85 | 86 | impl AddAssign for Delay { 87 | fn add_assign(&mut self, other: Self) { 88 | self.0 += other.0; 89 | } 90 | } 91 | 92 | #[cfg(test)] 93 | mod tests { 94 | use super::*; 95 | 96 | #[test] 97 | fn portable_deterministic_exp() { 98 | assert_eq!( 99 | Delay::exp(&[ 100 | 0xdc, 0x18, 0x0e, 0xe6, 0x71, 0x1e, 0xcf, 0x2d, 0xad, 0x0c, 0xde, 0xd1, 0xd4, 0x94, 101 | 0xbd, 0x3b 102 | ]), 103 | Delay(2.953842296445717) 104 | ); 105 | assert_eq!( 106 | Delay::exp(&[ 107 | 0x0a, 0xcc, 0x48, 0xbd, 0xa2, 0x30, 0x9a, 0x48, 0xc8, 0x78, 0x61, 0x0d, 0xf8, 0xc2, 108 | 0x8d, 0x99 109 | ]), 110 | Delay(1.278588765412407) 111 | ); 112 | assert_eq!( 113 | Delay::exp(&[ 114 | 0x17, 0x4c, 0x40, 0x2f, 0x8f, 0xda, 0xa6, 0x46, 0x45, 0xe7, 0x1c, 0xb0, 0x1e, 0xff, 115 | 0xf8, 0xfc 116 | ]), 117 | Delay(0.7747915675800142) 118 | ); 119 | assert_eq!( 120 | Delay::exp(&[ 121 | 0xca, 0xe8, 0x07, 0x72, 0x17, 0x28, 0xf7, 0x09, 0xd8, 0x7d, 0x3e, 0xa2, 0x03, 0x7d, 122 | 0x4f, 0x03 123 | ]), 124 | Delay(0.8799379598933348) 125 | ); 126 | assert_eq!( 127 | Delay::exp(&[ 128 | 0x61, 0x56, 0x54, 0x41, 0xd0, 0x25, 0xdf, 0xe7, 0xb9, 0xc8, 0x6a, 0x56, 0xdd, 0x27, 129 | 0x09, 0xa6 130 | ]), 131 | Delay(10.0) 132 | ); 133 | } 134 | } 135 | -------------------------------------------------------------------------------- /src/core/sphinx/packet.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2022 Parity Technologies (UK) Ltd. 2 | // 3 | // Permission is hereby granted, free of charge, to any person obtaining a 4 | // copy of this software and associated documentation files (the "Software"), 5 | // to deal in the Software without restriction, including without limitation 6 | // the rights to use, copy, modify, merge, publish, distribute, sublicense, 7 | // and/or sell copies of the Software, and to permit persons to whom the 8 | // Software is furnished to do so, subject to the following conditions: 9 | // 10 | // The above copyright notice and this permission notice shall be included in 11 | // all copies or substantial portions of the Software. 12 | // 13 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 14 | // OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 18 | // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 19 | // DEALINGS IN THE SOFTWARE. 20 | 21 | //! Sphinx packet format. 22 | //! 23 | //! Packets consist of the following, in order: 24 | //! 25 | //! - [`Header`]: 26 | //! - Key-exchange public key ([`KxPublic`], alpha in the Sphinx paper). 27 | //! - [`Mac`] (gamma in the Sphinx paper). 28 | //! - Routing actions ([`Actions`], beta in the Sphinx paper). 29 | //! - [`Payload`] (delta in the Sphinx paper): 30 | //! - [`PayloadData`]. 31 | //! - [`PayloadTag`] (for detecting tampering). 32 | //! 33 | //! For each hop, the routing actions field contains, in order: 34 | //! 35 | //! - A [`RawAction`]. Always a deliver action for the last hop and a forward action for earlier 36 | //! hops. 37 | //! - If the [`RawAction`] is [`RAW_ACTION_FORWARD_TO_PEER_ID`], a [`PeerId`]. 38 | //! - If the [`RawAction`] is a forward action, a [`Mac`] for the next hop. 39 | //! - If the [`RawAction`] is [`RAW_ACTION_DELIVER_REPLY`], a [`SurbId`]. 40 | //! - If the [`RawAction`] is [`RAW_ACTION_DELIVER_COVER_WITH_ID`], a [`CoverId`]. 41 | 42 | /// Size in bytes of a [`KxPublic`]. 43 | pub const KX_PUBLIC_SIZE: usize = 32; 44 | /// Key-exchange public key. 45 | pub type KxPublic = [u8; KX_PUBLIC_SIZE]; 46 | 47 | pub const MAC_SIZE: usize = 16; 48 | pub type Mac = [u8; MAC_SIZE]; 49 | 50 | /// Maximum number of hops a packet can traverse. Sending a packet directly to the final 51 | /// destination node would count as one hop. Strictly speaking it is possible to construct packets 52 | /// that will traverse slightly more hops than this, but not using this crate. 53 | pub const MAX_HOPS: usize = 6; 54 | pub const RAW_MIXNODE_INDEX_SIZE: usize = 2; 55 | /// Raw mixnode index type, not guaranteed to be <= [`MAX_MIXNODE_INDEX`]. 56 | pub type RawMixnodeIndex = u16; 57 | /// Maximum valid mixnode index. 58 | pub const MAX_MIXNODE_INDEX: RawMixnodeIndex = 0xfeff; 59 | pub const RAW_ACTION_SIZE: usize = RAW_MIXNODE_INDEX_SIZE; // A mixnode index means forward to that mixnode 60 | pub type RawAction = RawMixnodeIndex; 61 | pub const RAW_ACTION_FORWARD_TO_PEER_ID: RawAction = 0xff00; 62 | pub const RAW_ACTION_DELIVER_REQUEST: RawAction = 0xff01; 63 | pub const RAW_ACTION_DELIVER_REPLY: RawAction = 0xff02; 64 | pub const RAW_ACTION_DELIVER_COVER: RawAction = 0xff03; 65 | pub const RAW_ACTION_DELIVER_COVER_WITH_ID: RawAction = 0xff04; 66 | /// Size in bytes of a [`PeerId`]. 67 | pub const PEER_ID_SIZE: usize = 32; 68 | /// Globally unique identifier for a network peer. This is treated as an opaque type. 69 | pub type PeerId = [u8; PEER_ID_SIZE]; 70 | /// Maximum amount of padding that might need to be appended to the routing actions for length 71 | /// invariance at each hop. 72 | pub const MAX_ACTIONS_PAD_SIZE: usize = RAW_ACTION_SIZE + PEER_ID_SIZE + MAC_SIZE; 73 | pub const SURB_COVER_ID_SIZE: usize = 16; 74 | pub const SURB_ID_SIZE: usize = SURB_COVER_ID_SIZE; 75 | pub type SurbId = [u8; SURB_ID_SIZE]; 76 | pub const COVER_ID_SIZE: usize = SURB_COVER_ID_SIZE; 77 | pub type CoverId = [u8; COVER_ID_SIZE]; 78 | pub const ACTIONS_SIZE: usize = (MAX_HOPS * (RAW_ACTION_SIZE + MAC_SIZE)) + 79 | PEER_ID_SIZE + // Allow one hop to use a peer ID 80 | SURB_COVER_ID_SIZE // Last hop may have a SURB ID or a cover ID... 81 | - MAC_SIZE; // ...but no next-hop MAC 82 | pub type Actions = [u8; ACTIONS_SIZE]; 83 | 84 | pub const PAYLOAD_DATA_SIZE: usize = 2048; 85 | pub type PayloadData = [u8; PAYLOAD_DATA_SIZE]; 86 | pub const PAYLOAD_TAG_SIZE: usize = 16; 87 | pub type PayloadTag = [u8; PAYLOAD_TAG_SIZE]; 88 | pub const PAYLOAD_TAG: PayloadTag = [0; PAYLOAD_TAG_SIZE]; 89 | 90 | pub const HEADER_SIZE: usize = KX_PUBLIC_SIZE + MAC_SIZE + ACTIONS_SIZE; 91 | pub type Header = [u8; HEADER_SIZE]; 92 | pub const PAYLOAD_SIZE: usize = PAYLOAD_DATA_SIZE + PAYLOAD_TAG_SIZE; 93 | pub type Payload = [u8; PAYLOAD_SIZE]; 94 | /// Size in bytes of a [`Packet`]. 95 | pub const PACKET_SIZE: usize = HEADER_SIZE + PAYLOAD_SIZE; 96 | /// Type for packets sent between nodes. Note that all packets are the same size. 97 | pub type Packet = [u8; PACKET_SIZE]; 98 | -------------------------------------------------------------------------------- /src/core/replay_filter.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2022 Parity Technologies (UK) Ltd. 2 | // 3 | // Permission is hereby granted, free of charge, to any person obtaining a 4 | // copy of this software and associated documentation files (the "Software"), 5 | // to deal in the Software without restriction, including without limitation 6 | // the rights to use, copy, modify, merge, publish, distribute, sublicense, 7 | // and/or sell copies of the Software, and to permit persons to whom the 8 | // Software is furnished to do so, subject to the following conditions: 9 | // 10 | // The above copyright notice and this permission notice shall be included in 11 | // all copies or substantial portions of the Software. 12 | // 13 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 14 | // OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 18 | // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 19 | // DEALINGS IN THE SOFTWARE. 20 | 21 | //! Mixnet replay filter. 22 | 23 | use super::sphinx::SharedSecret; 24 | use blake2::{ 25 | digest::{ 26 | consts::U8, 27 | generic_array::{sequence::Concat, GenericArray}, 28 | Mac, 29 | }, 30 | Blake2bMac, 31 | }; 32 | use rand::{ 33 | distributions::{Distribution, Standard}, 34 | CryptoRng, Rng, 35 | }; 36 | 37 | // https://hur.st/bloomfilter/?n=7000000&p=&m=67108864&k=8 38 | // The false positive rate is ~1% with 7m packets in the filter. 1% packet loss per hop over 5 hops 39 | // gives ~5% packet loss overall. The key-exchange keys are rotated every session. Polkadot 40 | // sessions are 4 hours. To accumulate 7m packets over a session, we would need to process ~490 41 | // packets per second. 42 | const NUM_BITS: usize = 64 * 1024 * 1024; 43 | const NUM_WORDS: usize = NUM_BITS / 64; 44 | const NUM_TAG_BITS: usize = 8; 45 | 46 | #[derive(Clone, Copy)] 47 | pub struct ReplayTag { 48 | base: u32, 49 | inc: u32, 50 | } 51 | 52 | impl Distribution for Standard { 53 | fn sample(&self, rng: &mut R) -> ReplayTag { 54 | ReplayTag { base: rng.gen(), inc: rng.gen() } 55 | } 56 | } 57 | 58 | pub struct ReplayFilter { 59 | key: [u8; 32], 60 | /// Allocated on demand. 61 | words: Option>, 62 | } 63 | 64 | impl ReplayFilter { 65 | fn new_with_key(key: [u8; 32]) -> Self { 66 | Self { key, words: None } 67 | } 68 | 69 | pub fn new(rng: &mut (impl Rng + CryptoRng)) -> Self { 70 | let mut key = [0; 32]; 71 | rng.fill_bytes(&mut key); 72 | Self::new_with_key(key) 73 | } 74 | 75 | pub fn tag(&self, shared_secret: &SharedSecret) -> ReplayTag { 76 | let key: &GenericArray<_, _> = (&self.key).into(); 77 | let key = key.concat((*shared_secret).into()); 78 | let h = Blake2bMac::::new_with_salt_and_personal(&key, b"", b"sphinx-replay-tg") 79 | .expect("Key, salt, and personalisation sizes are fixed and small enough"); 80 | let tag = u64::from_le_bytes(h.finalize().into_bytes().into()); 81 | ReplayTag { base: tag as u32, inc: (tag >> 32) as u32 } 82 | } 83 | 84 | pub fn insert(&mut self, tag: ReplayTag) { 85 | let mut i = tag.base; 86 | let words = self 87 | .words 88 | .get_or_insert_with(|| vec![0; NUM_WORDS].try_into().expect("Vec has the right size")); 89 | for _ in 0..NUM_TAG_BITS { 90 | words[((i as usize) >> 6) % NUM_WORDS] |= 1 << (i & 63); 91 | i = i.wrapping_add(tag.inc); 92 | } 93 | } 94 | 95 | pub fn contains(&self, tag: ReplayTag) -> bool { 96 | match &self.words { 97 | None => false, 98 | Some(words) => { 99 | let mut i = tag.base; 100 | for _ in 0..NUM_TAG_BITS { 101 | if (words[((i as usize) >> 6) % NUM_WORDS] & (1 << (i & 63))) == 0 { 102 | return false 103 | } 104 | i = i.wrapping_add(tag.inc); 105 | } 106 | true 107 | }, 108 | } 109 | } 110 | } 111 | 112 | #[cfg(test)] 113 | mod tests { 114 | use super::*; 115 | use rand::{Rng, SeedableRng}; 116 | 117 | #[test] 118 | fn basic_operation() { 119 | let mut rf = ReplayFilter::new_with_key(Default::default()); 120 | let zero: SharedSecret = Default::default(); 121 | let mut one: SharedSecret = Default::default(); 122 | one[0] = 1; 123 | assert!(!rf.contains(rf.tag(&zero))); 124 | assert!(!rf.contains(rf.tag(&one))); 125 | rf.insert(rf.tag(&zero)); 126 | assert!(rf.contains(rf.tag(&zero))); 127 | assert!(!rf.contains(rf.tag(&one))); 128 | } 129 | 130 | #[test] 131 | fn false_positive_rate() { 132 | let mut rf = ReplayFilter::new_with_key(Default::default()); 133 | 134 | let mut rng = rand_xoshiro::Xoshiro256StarStar::seed_from_u64(0); 135 | for _ in 0..3_000_000 { 136 | rf.insert(rng.gen()); 137 | } 138 | 139 | { 140 | let mut rng = rand_xoshiro::Xoshiro256StarStar::seed_from_u64(0); 141 | for _ in 0..3_000_000 { 142 | assert!(rf.contains(rng.gen())); 143 | } 144 | } 145 | 146 | // One of these randomly generated tags might actually match one we inserted earlier, but 147 | // this is much less likely than a false positive... 148 | let mut false_positives = 0; 149 | for _ in 0..1_000_000 { 150 | if rf.contains(rng.gen()) { 151 | false_positives += 1; 152 | } 153 | } 154 | 155 | // The false positive rate should be about 1 in 15,000 with 3m packets in the filter. With 156 | // the seed above we get 62 false positives among 1,000,000 random tags that (most likely) 157 | // aren't actually in the set... 158 | assert_eq!(false_positives, 62); 159 | } 160 | } 161 | -------------------------------------------------------------------------------- /src/core/packet_queues.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2022 Parity Technologies (UK) Ltd. 2 | // 3 | // Permission is hereby granted, free of charge, to any person obtaining a 4 | // copy of this software and associated documentation files (the "Software"), 5 | // to deal in the Software without restriction, including without limitation 6 | // the rights to use, copy, modify, merge, publish, distribute, sublicense, 7 | // and/or sell copies of the Software, and to permit persons to whom the 8 | // Software is furnished to do so, subject to the following conditions: 9 | // 10 | // The above copyright notice and this permission notice shall be included in 11 | // all copies or substantial portions of the Software. 12 | // 13 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 14 | // OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 18 | // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 19 | // DEALINGS IN THE SOFTWARE. 20 | 21 | //! Mixnet packet queues. 22 | 23 | use super::sphinx::{Packet, PeerId}; 24 | use std::{ 25 | cmp::Ordering, 26 | collections::{BinaryHeap, VecDeque}, 27 | time::Instant, 28 | }; 29 | 30 | /// A packet plus the ID of the peer it should be sent to. 31 | pub struct AddressedPacket { 32 | /// Where the packet should be sent. 33 | pub peer_id: PeerId, 34 | /// The packet contents. 35 | pub packet: Box, 36 | } 37 | 38 | /// `Eq` and `Ord` are implemented for this to support use in `BinaryHeap`s. Only `deadline` is 39 | /// compared. 40 | struct ForwardPacket { 41 | /// When the packet should be sent. 42 | deadline: Instant, 43 | /// The packet and destination. 44 | packet: AddressedPacket, 45 | } 46 | 47 | impl PartialEq for ForwardPacket { 48 | fn eq(&self, other: &Self) -> bool { 49 | self.deadline == other.deadline 50 | } 51 | } 52 | 53 | impl Eq for ForwardPacket {} 54 | 55 | impl PartialOrd for ForwardPacket { 56 | fn partial_cmp(&self, other: &Self) -> Option { 57 | Some(self.cmp(other)) 58 | } 59 | } 60 | 61 | impl Ord for ForwardPacket { 62 | fn cmp(&self, other: &Self) -> Ordering { 63 | // Packets with the earliest deadline considered greatest 64 | self.deadline.cmp(&other.deadline).reverse() 65 | } 66 | } 67 | 68 | pub struct ForwardPacketQueue { 69 | /// Maximum number of packets in the queue. This should match the capacity of `queue`, but we 70 | /// don't rely on that. 71 | capacity: usize, 72 | queue: BinaryHeap, 73 | } 74 | 75 | impl ForwardPacketQueue { 76 | pub fn new(capacity: usize) -> Self { 77 | Self { capacity, queue: BinaryHeap::with_capacity(capacity) } 78 | } 79 | 80 | pub fn next_deadline(&self) -> Option { 81 | self.queue.peek().map(|packet| packet.deadline) 82 | } 83 | 84 | pub fn has_space(&self) -> bool { 85 | self.queue.len() < self.capacity 86 | } 87 | 88 | /// Insert a packet into the queue. Returns `true` iff the deadline of the item at the head of 89 | /// the queue changed. Should only be called if there is space in the queue (see 90 | /// [`has_space`](Self::has_space)). 91 | pub fn insert(&mut self, deadline: Instant, packet: AddressedPacket) -> bool { 92 | debug_assert!(self.has_space()); 93 | let prev_deadline = self.next_deadline(); 94 | self.queue.push(ForwardPacket { deadline, packet }); 95 | self.next_deadline() != prev_deadline 96 | } 97 | 98 | pub fn pop(&mut self) -> Option { 99 | self.queue.pop().map(|packet| packet.packet) 100 | } 101 | } 102 | 103 | #[derive(Clone, Copy, Debug)] 104 | pub struct AuthoredPacketQueueConfig { 105 | /// Maximum number of packets in the queue. Note that cover packets do not go in the queue; 106 | /// they are generated on demand. 107 | pub capacity: usize, 108 | /// Allow packets for multiple messages in the queue? 109 | pub multiple_messages: bool, 110 | } 111 | 112 | pub enum CheckSpaceErr { 113 | /// There will never be enough space. 114 | Capacity, 115 | /// There are too many other packets in the queue at the moment. 116 | Len, 117 | } 118 | 119 | pub struct AuthoredPacketQueue { 120 | config: AuthoredPacketQueueConfig, 121 | queue: VecDeque, 122 | } 123 | 124 | impl AuthoredPacketQueue { 125 | pub fn new(config: AuthoredPacketQueueConfig) -> Self { 126 | Self { config, queue: VecDeque::with_capacity(config.capacity) } 127 | } 128 | 129 | pub fn len(&self) -> usize { 130 | self.queue.len() 131 | } 132 | 133 | pub fn check_space(&self, num_packets: usize) -> Result<(), CheckSpaceErr> { 134 | let Some(mut max_len) = self.config.capacity.checked_sub(num_packets) else { 135 | return Err(CheckSpaceErr::Capacity) 136 | }; 137 | if !self.config.multiple_messages { 138 | max_len = 0; 139 | } 140 | if self.queue.len() > max_len { 141 | Err(CheckSpaceErr::Len) 142 | } else { 143 | Ok(()) 144 | } 145 | } 146 | 147 | /// Push a packet onto the queue. Should only be called if there is space in the queue (see 148 | /// [`check_space`](Self::check_space)). 149 | pub fn push(&mut self, packet: AddressedPacket) { 150 | debug_assert!(self.queue.len() < self.config.capacity); 151 | self.queue.push_back(packet); 152 | } 153 | 154 | /// Pop the packet at the head of the queue and return it, or, if the queue is empty, return 155 | /// `None`. Also returns `true` if [`check_space`](Self::check_space) might now succeed where 156 | /// it wouldn't before. 157 | pub fn pop(&mut self) -> (Option, bool) { 158 | let packet = self.queue.pop_front(); 159 | let space = packet.is_some() && (self.config.multiple_messages || self.queue.is_empty()); 160 | (packet, space) 161 | } 162 | } 163 | -------------------------------------------------------------------------------- /src/core/config.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2022 Parity Technologies (UK) Ltd. 2 | // 3 | // Permission is hereby granted, free of charge, to any person obtaining a 4 | // copy of this software and associated documentation files (the "Software"), 5 | // to deal in the Software without restriction, including without limitation 6 | // the rights to use, copy, modify, merge, publish, distribute, sublicense, 7 | // and/or sell copies of the Software, and to permit persons to whom the 8 | // Software is furnished to do so, subject to the following conditions: 9 | // 10 | // The above copyright notice and this permission notice shall be included in 11 | // all copies or substantial portions of the Software. 12 | // 13 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 14 | // OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 18 | // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 19 | // DEALINGS IN THE SOFTWARE. 20 | 21 | //! Mixnet configuration. 22 | 23 | use super::{ 24 | packet_queues::AuthoredPacketQueueConfig, 25 | sphinx::{KxSecret, MAX_HOPS}, 26 | }; 27 | use std::time::Duration; 28 | 29 | /// Configuration that can vary between sessions depending on whether the local node is a mixnode 30 | /// or not. 31 | #[derive(Clone, Debug)] 32 | pub struct SessionConfig { 33 | /// Authored packet queue configuration. 34 | pub authored_packet_queue: AuthoredPacketQueueConfig, 35 | /// Mean period between authored packet dispatches for the session. Cover packets are sent when 36 | /// there are no real packets to send, or when we randomly choose to send loop cover packets 37 | /// (see `Config::loop_cover_proportion`). This parameter, in combination with 38 | /// `Config::loop_cover_proportion`, bounds the rate at which messages can be sent. Note that 39 | /// this period is automatically increased during session transitions to keep the overall rate 40 | /// stable. 41 | pub mean_authored_packet_period: Duration, 42 | } 43 | 44 | /// Mixnet configuration. 45 | #[derive(Clone, Debug)] 46 | pub struct Config { 47 | /// The target for log messages. 48 | pub log_target: &'static str, 49 | 50 | /// The number of mixnodes to connect to when we are not a mixnode ourselves. When we are a 51 | /// mixnode, we connect to all other mixnodes. 52 | pub num_gateway_mixnodes: u32, 53 | 54 | /// The key-exchange secret key to use in session 0. This option is intended for testing 55 | /// purposes only. 56 | pub session_0_kx_secret: Option, 57 | /// Used by sessions in which the local node is a mixnode. If this is not the same for all 58 | /// nodes, delay estimates may be off. 59 | pub mixnode_session: SessionConfig, 60 | /// Used by sessions in which the local node is not a mixnode. If [`None`], we will only 61 | /// participate in the mixnet during sessions in which we are a mixnode. 62 | pub non_mixnode_session: Option, 63 | 64 | /// Maximum number of packets waiting for their forwarding delay to elapse. When at the limit, 65 | /// any packets arriving that need forwarding will simply be dropped. 66 | pub forward_packet_queue_capacity: usize, 67 | /// Mean forwarding delay at each mixnode. This should really be the same for all nodes! 68 | pub mean_forwarding_delay: Duration, 69 | /// Conservative estimate of the network (and processing) delay per hop. 70 | pub per_hop_net_delay: Duration, 71 | 72 | /// Proportion of authored packets which should be loop cover packets (as opposed to drop cover 73 | /// packets or real packets). If this is not the same for all nodes, delay estimates may be 74 | /// off. 75 | pub loop_cover_proportion: f64, 76 | /// Generate cover packets? This option is intended for testing purposes only. It essentially 77 | /// just drops all cover packets instead of sending them. 78 | pub gen_cover_packets: bool, 79 | /// Number of hops for packets to traverse. Some packets may traverse more hops if necessary. 80 | /// Note this only affects packets whose headers are generated by this node. Must be <= 81 | /// [`MAX_HOPS`]. 82 | pub num_hops: usize, 83 | 84 | /// Maximum number of outstanding SURBs to keep keys for. Must be greater than 0. 85 | pub surb_keystore_capacity: usize, 86 | /// Maximum number of incomplete messages to keep. 87 | pub max_incomplete_messages: usize, 88 | /// Maximum number of fragments to keep across all incomplete messages. 89 | pub max_incomplete_fragments: usize, 90 | /// Maximum number of fragments per message. This should really be the same for all nodes! 91 | pub max_fragments_per_message: usize, 92 | } 93 | 94 | impl Default for Config { 95 | fn default() -> Self { 96 | Self { 97 | log_target: "mixnet", 98 | 99 | num_gateway_mixnodes: 3, 100 | 101 | session_0_kx_secret: None, 102 | mixnode_session: SessionConfig { 103 | authored_packet_queue: AuthoredPacketQueueConfig { 104 | capacity: 50, 105 | multiple_messages: true, 106 | }, 107 | mean_authored_packet_period: Duration::from_millis(100), 108 | }, 109 | non_mixnode_session: Some(SessionConfig { 110 | authored_packet_queue: AuthoredPacketQueueConfig { 111 | capacity: 25, 112 | // By default only allow a single message to be queued in non-mixnode sessions. 113 | // Replies won't be sent in non-mixnode sessions, and requests really need to 114 | // be buffered externally anyway to handle eg retransmission. Limiting the 115 | // queue to a single message means we don't need to choose a session for 116 | // messages until the last moment (improving behaviour around session changes), 117 | // and minimises SPACE_IN_AUTHORED_PACKET_QUEUE events. 118 | multiple_messages: false, 119 | }, 120 | mean_authored_packet_period: Duration::from_millis(1000), 121 | }), 122 | 123 | forward_packet_queue_capacity: 300, 124 | mean_forwarding_delay: Duration::from_secs(1), 125 | per_hop_net_delay: Duration::from_millis(300), 126 | 127 | loop_cover_proportion: 0.25, 128 | gen_cover_packets: true, 129 | num_hops: MAX_HOPS, 130 | 131 | surb_keystore_capacity: 200, 132 | max_incomplete_messages: 2000, 133 | max_incomplete_fragments: 2000, 134 | max_fragments_per_message: 25, 135 | } 136 | } 137 | } 138 | -------------------------------------------------------------------------------- /src/core/scattered.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2022 Parity Technologies (UK) Ltd. 2 | // 3 | // Permission is hereby granted, free of charge, to any person obtaining a 4 | // copy of this software and associated documentation files (the "Software"), 5 | // to deal in the Software without restriction, including without limitation 6 | // the rights to use, copy, modify, merge, publish, distribute, sublicense, 7 | // and/or sell copies of the Software, and to permit persons to whom the 8 | // Software is furnished to do so, subject to the following conditions: 9 | // 10 | // The above copyright notice and this permission notice shall be included in 11 | // all copies or substantial portions of the Software. 12 | // 13 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 14 | // OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 18 | // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 19 | // DEALINGS IN THE SOFTWARE. 20 | 21 | /// A concatenation of multiple slices. The slices are not copied until 22 | /// [`copy_to_slice`](Self::copy_to_slice) or [`to_vec`](Self::to_vec) is called. 23 | #[derive(Clone, Copy)] 24 | pub struct Scattered<'a, T> { 25 | len: usize, 26 | first_slice: &'a [T], 27 | mid_slices: &'a [&'a [T]], 28 | last_slice: &'a [T], 29 | } 30 | 31 | impl<'a, T> Scattered<'a, T> { 32 | /// Returns the total number of elements. 33 | pub fn len(&self) -> usize { 34 | self.len 35 | } 36 | 37 | /// Returns `true` if there are no elements. 38 | pub fn is_empty(&self) -> bool { 39 | self.len == 0 40 | } 41 | 42 | /// Just like [`slice::split_at`]. 43 | pub fn split_at(&self, mid: usize) -> (Self, Self) { 44 | let right_len = self.len.checked_sub(mid).expect("mid must be <= len"); 45 | 46 | // Split first_slice case 47 | let Some(mut mid_in_remaining) = mid.checked_sub(self.first_slice.len()) else { 48 | let (first_slice_left, first_slice_right) = self.first_slice.split_at(mid); 49 | return ( 50 | Self { len: mid, first_slice: first_slice_left, mid_slices: &[], last_slice: &[] }, 51 | Self { 52 | len: right_len, 53 | first_slice: first_slice_right, 54 | mid_slices: self.mid_slices, 55 | last_slice: self.last_slice, 56 | }, 57 | ) 58 | }; 59 | 60 | // Split mid_slices case 61 | for (i, mid_slice) in self.mid_slices.iter().enumerate() { 62 | mid_in_remaining = match mid_in_remaining.checked_sub(mid_slice.len()) { 63 | Some(mid_in_remaining) => mid_in_remaining, 64 | None => { 65 | let (mid_slices_left, mid_slices_right) = self.mid_slices.split_at(i); 66 | let mid_slices_right = 67 | mid_slices_right.split_first().expect("i < self.mid_slices.len()").1; 68 | let (mid_slice_left, mid_slice_right) = mid_slice.split_at(mid_in_remaining); 69 | return ( 70 | Self { 71 | len: mid, 72 | first_slice: self.first_slice, 73 | mid_slices: mid_slices_left, 74 | last_slice: mid_slice_left, 75 | }, 76 | Self { 77 | len: right_len, 78 | first_slice: mid_slice_right, 79 | mid_slices: mid_slices_right, 80 | last_slice: self.last_slice, 81 | }, 82 | ) 83 | }, 84 | }; 85 | } 86 | 87 | // Split last_slice case 88 | let (last_slice_left, last_slice_right) = self.last_slice.split_at(mid_in_remaining); 89 | ( 90 | Self { 91 | len: mid, 92 | first_slice: self.first_slice, 93 | mid_slices: self.mid_slices, 94 | last_slice: last_slice_left, 95 | }, 96 | Self { 97 | len: right_len, 98 | first_slice: last_slice_right, 99 | mid_slices: &[], 100 | last_slice: &[], 101 | }, 102 | ) 103 | } 104 | } 105 | 106 | impl<'a, T: Copy> Scattered<'a, T> { 107 | /// Copy all elements into `dst`. `dst.len()` must equal `self.len()`. 108 | pub fn copy_to_slice(&self, dst: &mut [T]) { 109 | let (dst_first_slice, mut dst) = dst.split_at_mut(self.first_slice.len()); 110 | dst_first_slice.copy_from_slice(self.first_slice); 111 | for mid_slice in self.mid_slices { 112 | let (dst_mid_slice, remaining_dst) = dst.split_at_mut(mid_slice.len()); 113 | dst_mid_slice.copy_from_slice(mid_slice); 114 | dst = remaining_dst; 115 | } 116 | dst.copy_from_slice(self.last_slice); 117 | } 118 | } 119 | 120 | impl<'a, T: Clone> Scattered<'a, T> { 121 | /// Copy all elements to a new [`Vec`]. 122 | pub fn to_vec(&self) -> Vec { 123 | let mut vec = Vec::with_capacity(self.len); 124 | vec.extend_from_slice(self.first_slice); 125 | for mid_slice in self.mid_slices { 126 | vec.extend_from_slice(mid_slice); 127 | } 128 | vec.extend_from_slice(self.last_slice); 129 | vec 130 | } 131 | } 132 | 133 | impl<'a, T> From<&'a [T]> for Scattered<'a, T> { 134 | fn from(slice: &'a [T]) -> Self { 135 | Self { len: slice.len(), first_slice: slice, mid_slices: &[], last_slice: &[] } 136 | } 137 | } 138 | 139 | impl<'a, T> From<&'a [&'a [T]]> for Scattered<'a, T> { 140 | fn from(slices: &'a [&'a [T]]) -> Self { 141 | Self { 142 | len: slices.iter().map(|slice| slice.len()).sum(), 143 | first_slice: &[], 144 | mid_slices: slices, 145 | last_slice: &[], 146 | } 147 | } 148 | } 149 | 150 | #[cfg(test)] 151 | mod tests { 152 | use super::*; 153 | use rand::RngCore; 154 | 155 | fn to_vec_using_copy_to_slice(scattered: &Scattered) -> Vec { 156 | let mut vec = vec![0; scattered.len()]; 157 | scattered.copy_to_slice(&mut vec); 158 | vec 159 | } 160 | 161 | fn test_splits(slice_lens: &[usize], mids: &[usize]) { 162 | let mut contig = vec![0; slice_lens.iter().sum()]; 163 | rand::thread_rng().fill_bytes(&mut contig); 164 | let mut contig = contig.as_slice(); 165 | 166 | let slices: Vec<_> = { 167 | let mut remaining = contig; 168 | slice_lens 169 | .iter() 170 | .map(|slice_len| { 171 | let (left, right) = remaining.split_at(*slice_len); 172 | remaining = right; 173 | left 174 | }) 175 | .collect() 176 | }; 177 | let mut scattered: Scattered = slices.as_slice().into(); 178 | 179 | for mid in mids { 180 | let (contig_left, contig_right) = contig.split_at(*mid); 181 | let (scattered_left, scattered_right) = scattered.split_at(*mid); 182 | assert_eq!(contig_left, scattered_left.to_vec()); 183 | assert_eq!(contig_right, scattered_right.to_vec()); 184 | assert_eq!(contig_left, to_vec_using_copy_to_slice(&scattered_left)); 185 | assert_eq!(contig_right, to_vec_using_copy_to_slice(&scattered_right)); 186 | contig = contig_right; 187 | scattered = scattered_right; 188 | } 189 | } 190 | 191 | #[test] 192 | fn single_slice() { 193 | test_splits(&[20], &[0, 9, 5, 6]); 194 | } 195 | 196 | #[test] 197 | fn multiple_slices() { 198 | test_splits(&[5, 7, 10, 7, 5], &[3, 2, 3, 4, 6, 4, 3, 4, 4, 1]); 199 | test_splits(&[5, 7, 10, 7, 5], &[6, 9, 16, 3]); 200 | test_splits(&[5, 7, 10, 7, 5], &[33, 1]); 201 | test_splits(&[5, 7, 10, 7, 5], &[34]); 202 | } 203 | } 204 | -------------------------------------------------------------------------------- /src/core/sphinx/peel.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2022 Parity Technologies (UK) Ltd. 2 | // 3 | // Permission is hereby granted, free of charge, to any person obtaining a 4 | // copy of this software and associated documentation files (the "Software"), 5 | // to deal in the Software without restriction, including without limitation 6 | // the rights to use, copy, modify, merge, publish, distribute, sublicense, 7 | // and/or sell copies of the Software, and to permit persons to whom the 8 | // Software is furnished to do so, subject to the following conditions: 9 | // 10 | // The above copyright notice and this permission notice shall be included in 11 | // all copies or substantial portions of the Software. 12 | // 13 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 14 | // OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 18 | // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 19 | // DEALINGS IN THE SOFTWARE. 20 | 21 | //! Sphinx packet peeling. 22 | 23 | use super::{build::SurbPayloadEncryptionKeys, crypto::*, delay::Delay, packet::*, target::Target}; 24 | use arrayref::{array_mut_ref, array_ref, array_refs}; 25 | use subtle::ConstantTimeEq; 26 | 27 | /// Returns a reference to the key-exchange public key in `packet`. 28 | pub fn kx_public(packet: &Packet) -> &KxPublic { 29 | array_ref![packet, 0, KX_PUBLIC_SIZE] 30 | } 31 | 32 | /// Action to take with a peeled packet. 33 | #[derive(Debug, PartialEq, Eq)] 34 | pub enum Action { 35 | /// The packet in `out` should be forwarded to `target` after `delay`. 36 | ForwardTo { target: Target, delay: Delay }, 37 | /// The payload data in `out[..PAYLOAD_DATA_SIZE]` should be delivered locally. 38 | DeliverRequest, 39 | /// The reply payload in `out[..PAYLOAD_SIZE]` should be decrypted according to `surb_id` and 40 | /// then delivered locally. 41 | DeliverReply { surb_id: SurbId }, 42 | /// The packet was a cover packet with the specified ID. There is no payload. 43 | DeliverCover { cover_id: Option }, 44 | } 45 | 46 | #[derive(Debug, thiserror::Error, PartialEq, Eq)] 47 | pub enum PeelErr { 48 | #[error("Bad MAC in header")] 49 | Mac, 50 | #[error("Bad action in header")] 51 | Action, 52 | #[error("Bad payload tag")] 53 | PayloadTag, 54 | } 55 | 56 | fn check_payload_tag(tag: &PayloadTag) -> Result<(), PeelErr> { 57 | let tag_ok: bool = tag.ct_eq(&PAYLOAD_TAG).into(); 58 | if tag_ok { 59 | Ok(()) 60 | } else { 61 | Err(PeelErr::PayloadTag) 62 | } 63 | } 64 | 65 | /// Attempt to peel a layer off `packet` using `kx_shared_secret`. `kx_shared_secret` should be 66 | /// derived from [`kx_public(packet)`](kx_public) and this node's secret key. 67 | pub fn peel( 68 | out: &mut Packet, 69 | packet: &Packet, 70 | kx_shared_secret: &SharedSecret, 71 | ) -> Result { 72 | // (kx_public, mac, actions, payload) correspond to (alpha, gamma, beta, delta) in the Sphinx 73 | // paper 74 | let (kx_public, mac, actions, payload) = 75 | array_refs![packet, KX_PUBLIC_SIZE, MAC_SIZE, ACTIONS_SIZE, PAYLOAD_SIZE]; 76 | 77 | let sds = SmallDerivedSecrets::new(kx_shared_secret); 78 | 79 | // Verify the MAC 80 | if !mac_ok(mac, actions, sds.mac_key()) { 81 | return Err(PeelErr::Mac) 82 | } 83 | 84 | // Decrypt the routing actions and generate padding for length invariance. Try to get the 85 | // decrypted actions in the right place in the output. The most likely case is that we will be 86 | // forwarding the packet to a mixnode, so assume this. We could save some work in the deliver 87 | // case by decrypting just the first few bytes to start with to see if we need to decrypt the 88 | // rest. This would complicate things and as the forward case is much more common it might 89 | // ultimately not make things any faster, so don't bother for now. 90 | let decrypted_actions = 91 | array_mut_ref![out, KX_PUBLIC_SIZE - RAW_ACTION_SIZE, ACTIONS_SIZE + MAX_ACTIONS_PAD_SIZE]; 92 | *array_mut_ref![decrypted_actions, 0, ACTIONS_SIZE] = *actions; 93 | *array_mut_ref![decrypted_actions, ACTIONS_SIZE, MAX_ACTIONS_PAD_SIZE] = 94 | [0; MAX_ACTIONS_PAD_SIZE]; // Padding is generated by encrypting zeroes 95 | apply_actions_encryption_keystream(decrypted_actions, sds.actions_encryption_key()); 96 | 97 | let raw_action = RawAction::from_le_bytes(*array_ref![decrypted_actions, 0, RAW_ACTION_SIZE]); 98 | Ok(match raw_action { 99 | RAW_ACTION_DELIVER_REQUEST => { 100 | // Peel off the final layer of payload encryption 101 | let out = array_mut_ref![out, 0, PAYLOAD_SIZE]; 102 | *out = *payload; 103 | decrypt_payload(out, &derive_payload_encryption_key(kx_shared_secret)); 104 | 105 | check_payload_tag(array_ref![out, PAYLOAD_DATA_SIZE, PAYLOAD_TAG_SIZE])?; 106 | 107 | Action::DeliverRequest 108 | }, 109 | RAW_ACTION_DELIVER_REPLY => { 110 | // Pull the SURB ID out 111 | let surb_id = *array_ref![decrypted_actions, RAW_ACTION_SIZE, SURB_ID_SIZE]; 112 | 113 | // Copy the payload across but don't do anything with it yet; the caller will need to 114 | // fetch the keys corresponding to the SURB ID and then call decrypt_reply_payload() 115 | *array_mut_ref![out, 0, PAYLOAD_SIZE] = *payload; 116 | 117 | Action::DeliverReply { surb_id } 118 | }, 119 | RAW_ACTION_DELIVER_COVER => Action::DeliverCover { cover_id: None }, 120 | RAW_ACTION_DELIVER_COVER_WITH_ID => { 121 | // Pull the cover ID out 122 | let cover_id = *array_ref![decrypted_actions, RAW_ACTION_SIZE, COVER_ID_SIZE]; 123 | 124 | Action::DeliverCover { cover_id: Some(cover_id) } 125 | }, 126 | _ => { 127 | // Forward. Determine target... 128 | let target = if raw_action == RAW_ACTION_FORWARD_TO_PEER_ID { 129 | // Copy out peer ID and move rest down 130 | let peer_id = *array_ref![decrypted_actions, RAW_ACTION_SIZE, PEER_ID_SIZE]; 131 | decrypted_actions.copy_within( 132 | RAW_ACTION_SIZE + PEER_ID_SIZE.. 133 | RAW_ACTION_SIZE + PEER_ID_SIZE + MAC_SIZE + ACTIONS_SIZE, 134 | RAW_ACTION_SIZE, 135 | ); 136 | Target::PeerId(peer_id) 137 | } else { 138 | Target::MixnodeIndex(raw_action.try_into().map_err(|_| PeelErr::Action)?) 139 | }; 140 | 141 | // Determine the forwarding delay 142 | let delay = Delay::exp(sds.delay_seed()); 143 | 144 | // Blind the key-exchange public key 145 | *array_mut_ref![out, 0, KX_PUBLIC_SIZE] = blind_kx_public(kx_public, kx_shared_secret); 146 | 147 | // The next MAC and routing actions are already in the right place in out 148 | 149 | // Peel off one layer of payload encryption 150 | let out_payload = array_mut_ref![out, HEADER_SIZE, PAYLOAD_SIZE]; 151 | *out_payload = *payload; 152 | decrypt_payload(out_payload, &derive_payload_encryption_key(kx_shared_secret)); 153 | 154 | Action::ForwardTo { target, delay } 155 | }, 156 | }) 157 | } 158 | 159 | /// Decrypts a reply payload given the encryption keys of the corresponding SURB. On success, the 160 | /// payload data is left in `payload[..PAYLOAD_DATA_SIZE]`. 161 | pub fn decrypt_reply_payload( 162 | payload: &mut Payload, 163 | keys: &SurbPayloadEncryptionKeys, 164 | ) -> Result<(), PeelErr> { 165 | for key in keys.iter().rev() { 166 | encrypt_payload(payload, key); 167 | } 168 | 169 | check_payload_tag(array_ref![payload, PAYLOAD_DATA_SIZE, PAYLOAD_TAG_SIZE]) 170 | } 171 | -------------------------------------------------------------------------------- /src/core/sessions.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2022 Parity Technologies (UK) Ltd. 2 | // 3 | // Permission is hereby granted, free of charge, to any person obtaining a 4 | // copy of this software and associated documentation files (the "Software"), 5 | // to deal in the Software without restriction, including without limitation 6 | // the rights to use, copy, modify, merge, publish, distribute, sublicense, 7 | // and/or sell copies of the Software, and to permit persons to whom the 8 | // Software is furnished to do so, subject to the following conditions: 9 | // 10 | // The above copyright notice and this permission notice shall be included in 11 | // all copies or substantial portions of the Software. 12 | // 13 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 14 | // OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 18 | // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 19 | // DEALINGS IN THE SOFTWARE. 20 | 21 | //! Mixnet sessions. 22 | 23 | use super::{ 24 | kx_pair::KxPair, packet_queues::AuthoredPacketQueue, replay_filter::ReplayFilter, 25 | topology::Topology, 26 | }; 27 | use std::{ 28 | fmt, 29 | ops::{Add, Index, IndexMut}, 30 | time::Duration, 31 | }; 32 | 33 | pub struct Session { 34 | /// Key-exchange key pair. 35 | pub kx_pair: KxPair, 36 | /// Mixnode topology. 37 | pub topology: Topology, 38 | /// Queue of packets authored by us, to be dispatched in place of drop cover traffic. 39 | pub authored_packet_queue: AuthoredPacketQueue, 40 | /// See [`SessionConfig`](super::config::SessionConfig::mean_authored_packet_period). 41 | pub mean_authored_packet_period: Duration, 42 | /// Filter applied to incoming packets to prevent replay. This is per-session because the 43 | /// key-exchange keys are rotated every session. Note that while this always exists, for 44 | /// sessions where we are not a mixnode, it should never contain anything, and so should not 45 | /// cost anything ([`ReplayFilter`] lazily allocates internally). 46 | pub replay_filter: ReplayFilter, 47 | } 48 | 49 | /// Absolute session index. 50 | pub type SessionIndex = u32; 51 | 52 | /// Relative session index. 53 | #[derive(Clone, Copy, PartialEq, Eq)] 54 | pub enum RelSessionIndex { 55 | /// The current session. 56 | Current, 57 | /// The previous session. 58 | Prev, 59 | } 60 | 61 | impl RelSessionIndex { 62 | /// Returns the `RelSessionIndex` corresponding to `session_index`, or `None` if there is no 63 | /// such `RelSessionIndex`. 64 | pub fn from_session_index( 65 | session_index: SessionIndex, 66 | current_session_index: SessionIndex, 67 | ) -> Option { 68 | match current_session_index.checked_sub(session_index) { 69 | Some(0) => Some(Self::Current), 70 | Some(1) => Some(Self::Prev), 71 | _ => None, 72 | } 73 | } 74 | } 75 | 76 | impl Add for RelSessionIndex { 77 | type Output = SessionIndex; 78 | 79 | fn add(self, other: SessionIndex) -> Self::Output { 80 | match self { 81 | Self::Current => other, 82 | Self::Prev => other.checked_sub(1).expect("Session index underflow"), 83 | } 84 | } 85 | } 86 | 87 | pub enum SessionSlot { 88 | Empty, 89 | KxPair(KxPair), 90 | /// Like [`Empty`](Self::Empty), but we should not try to create a [`Session`] struct. 91 | Disabled, 92 | Full(Session), 93 | } 94 | 95 | impl SessionSlot { 96 | pub fn is_empty(&self) -> bool { 97 | matches!(self, Self::Empty) 98 | } 99 | 100 | pub fn as_option(&self) -> Option<&Session> { 101 | match self { 102 | Self::Full(session) => Some(session), 103 | _ => None, 104 | } 105 | } 106 | 107 | pub fn as_mut_option(&mut self) -> Option<&mut Session> { 108 | match self { 109 | Self::Full(session) => Some(session), 110 | _ => None, 111 | } 112 | } 113 | } 114 | 115 | pub struct Sessions { 116 | pub current: SessionSlot, 117 | pub prev: SessionSlot, 118 | } 119 | 120 | impl Sessions { 121 | pub fn is_empty(&self) -> bool { 122 | self.current.is_empty() && self.prev.is_empty() 123 | } 124 | 125 | pub fn iter(&self) -> impl Iterator> { 126 | [&self.current, &self.prev] 127 | .into_iter() 128 | .filter_map(|session| session.as_option()) 129 | } 130 | 131 | /// This is guaranteed to return the current session first, if it exists. 132 | pub fn enumerate_mut(&mut self) -> impl Iterator)> { 133 | [(RelSessionIndex::Current, &mut self.current), (RelSessionIndex::Prev, &mut self.prev)] 134 | .into_iter() 135 | .filter_map(|(index, session)| session.as_mut_option().map(|session| (index, session))) 136 | } 137 | } 138 | 139 | impl Index for Sessions { 140 | type Output = SessionSlot; 141 | 142 | fn index(&self, index: RelSessionIndex) -> &Self::Output { 143 | match index { 144 | RelSessionIndex::Current => &self.current, 145 | RelSessionIndex::Prev => &self.prev, 146 | } 147 | } 148 | } 149 | 150 | impl IndexMut for Sessions { 151 | fn index_mut(&mut self, index: RelSessionIndex) -> &mut Self::Output { 152 | match index { 153 | RelSessionIndex::Current => &mut self.current, 154 | RelSessionIndex::Prev => &mut self.prev, 155 | } 156 | } 157 | } 158 | 159 | /// Each session should progress through these phases in order. 160 | #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] 161 | pub enum SessionPhase { 162 | /// Generate cover traffic to the current session's mixnode set. 163 | CoverToCurrent, 164 | /// Build requests using the current session's mixnode set. The previous session's mixnode set 165 | /// may be used if this is explicitly requested. 166 | RequestsToCurrent, 167 | /// Only send cover (and forwarded) traffic to the previous session's mixnode set. Any packets 168 | /// in the authored packet queue for the previous session at this point are effectively 169 | /// dropped. 170 | CoverToPrev, 171 | /// Disconnect the previous session's mixnode set. 172 | DisconnectFromPrev, 173 | } 174 | 175 | impl SessionPhase { 176 | /// Is the previous session still needed? 177 | pub fn need_prev(self) -> bool { 178 | self < Self::DisconnectFromPrev 179 | } 180 | 181 | /// Should we allow pushing to and popping from the authored packet queue for the specified 182 | /// session? 183 | pub fn allow_requests_and_replies(self, rel_session_index: RelSessionIndex) -> bool { 184 | match rel_session_index { 185 | RelSessionIndex::Prev => self < Self::CoverToPrev, 186 | RelSessionIndex::Current => self >= Self::RequestsToCurrent, 187 | } 188 | } 189 | 190 | /// Which session should requests be built for by default? 191 | pub fn default_request_session(self) -> RelSessionIndex { 192 | if self >= Self::RequestsToCurrent { 193 | RelSessionIndex::Current 194 | } else { 195 | RelSessionIndex::Prev 196 | } 197 | } 198 | } 199 | 200 | impl fmt::Display for SessionPhase { 201 | fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { 202 | match self { 203 | Self::CoverToCurrent => write!(fmt, "Generating cover traffic to current mixnode set"), 204 | Self::RequestsToCurrent => write!(fmt, "Building requests using current mixnode set"), 205 | Self::CoverToPrev => write!(fmt, "Only sending cover traffic to previous mixnode set"), 206 | Self::DisconnectFromPrev => write!(fmt, "Only using current mixnode set"), 207 | } 208 | } 209 | } 210 | 211 | /// The index and phase of the current session. 212 | #[derive(Clone, Copy, PartialEq, Eq)] 213 | pub struct SessionStatus { 214 | /// Index of the current session. 215 | pub current_index: SessionIndex, 216 | /// Current session phase. 217 | pub phase: SessionPhase, 218 | } 219 | 220 | impl fmt::Display for SessionStatus { 221 | fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { 222 | write!(fmt, "Current index {}, phase: {}", self.current_index, self.phase) 223 | } 224 | } 225 | -------------------------------------------------------------------------------- /src/core/sphinx/tests.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2022 Parity Technologies (UK) Ltd. 2 | // 3 | // Permission is hereby granted, free of charge, to any person obtaining a 4 | // copy of this software and associated documentation files (the "Software"), 5 | // to deal in the Software without restriction, including without limitation 6 | // the rights to use, copy, modify, merge, publish, distribute, sublicense, 7 | // and/or sell copies of the Software, and to permit persons to whom the 8 | // Software is furnished to do so, subject to the following conditions: 9 | // 10 | // The above copyright notice and this permission notice shall be included in 11 | // all copies or substantial portions of the Software. 12 | // 13 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 14 | // OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 18 | // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 19 | // DEALINGS IN THE SOFTWARE. 20 | 21 | //! Sphinx packet building/peeling tests. 22 | 23 | #![cfg(test)] 24 | 25 | use super::{ 26 | crypto::{derive_kx_public, derive_kx_shared_secret, gen_kx_secret}, 27 | packet::HEADER_SIZE, 28 | *, 29 | }; 30 | use arrayref::array_mut_ref; 31 | use rand::{CryptoRng, Rng}; 32 | 33 | fn gen_mixnode_index(rng: &mut impl Rng) -> MixnodeIndex { 34 | rng.gen_range(0..=MAX_MIXNODE_INDEX).try_into().unwrap() 35 | } 36 | 37 | fn gen_targets(rng: &mut impl Rng, num_hops: usize) -> Vec { 38 | let peer_id_i = rng.gen_range(0..num_hops - 1); 39 | (0..num_hops - 1) 40 | .map(|i| { 41 | if i == peer_id_i { 42 | Target::PeerId(rng.gen()) 43 | } else { 44 | Target::MixnodeIndex(gen_mixnode_index(rng)) 45 | } 46 | }) 47 | .collect() 48 | } 49 | 50 | fn gen_their_kx_secrets_and_publics( 51 | rng: &mut (impl Rng + CryptoRng), 52 | num_hops: usize, 53 | ) -> (Vec, Vec) { 54 | (0..num_hops) 55 | .map(|_i| { 56 | let secret = gen_kx_secret(rng); 57 | let public = derive_kx_public(&secret); 58 | (secret, public) 59 | }) 60 | .unzip() 61 | } 62 | 63 | fn gen_payload_data(rng: &mut impl Rng) -> PayloadData { 64 | let mut data = [0; PAYLOAD_DATA_SIZE]; 65 | rng.fill_bytes(&mut data); 66 | data 67 | } 68 | 69 | #[test] 70 | fn basic_operation() { 71 | let mut rng = rand::thread_rng(); 72 | 73 | let num_hops = rng.gen_range(MAX_HOPS - 1..=MAX_HOPS); 74 | let targets = gen_targets(&mut rng, num_hops); 75 | let (their_kx_secrets, their_kx_publics) = gen_their_kx_secrets_and_publics(&mut rng, num_hops); 76 | let payload_data = gen_payload_data(&mut rng); 77 | 78 | let mut packet = [0; PACKET_SIZE]; 79 | *mut_payload_data(&mut packet) = payload_data; 80 | let expected_total_delay = 81 | complete_request_packet(&mut packet, &mut rng, &targets, &their_kx_publics); 82 | 83 | let mut total_delay = Delay::zero(); 84 | for (expected_target, their_kx_secret) in 85 | targets.iter().map(Some).chain(std::iter::once(None)).zip(&their_kx_secrets) 86 | { 87 | let kx_shared_secret = derive_kx_shared_secret(kx_public(&packet), their_kx_secret); 88 | 89 | let mut out = [0; PACKET_SIZE]; 90 | let action = peel(&mut out, &packet, &kx_shared_secret).unwrap(); 91 | 92 | let target = match &action { 93 | Action::ForwardTo { target, delay } => { 94 | total_delay += *delay; 95 | packet = out; 96 | Some(target) 97 | }, 98 | Action::DeliverRequest => { 99 | assert_eq!(out[..PAYLOAD_DATA_SIZE], payload_data); 100 | None 101 | }, 102 | Action::DeliverReply { .. } => panic!("Did not expect deliver reply action"), 103 | Action::DeliverCover { .. } => panic!("Did not expect deliver cover action"), 104 | }; 105 | assert_eq!(target, expected_target); 106 | } 107 | 108 | assert_eq!(total_delay, expected_total_delay); 109 | } 110 | 111 | #[test] 112 | fn bad_mac() { 113 | let mut rng = rand::thread_rng(); 114 | 115 | let targets = []; 116 | let (their_kx_secrets, their_kx_publics) = gen_their_kx_secrets_and_publics(&mut rng, 1); 117 | 118 | let mut packet = [0; PACKET_SIZE]; 119 | complete_request_packet(&mut packet, &mut rng, &targets, &their_kx_publics); 120 | 121 | let kx_shared_secret = 122 | derive_kx_shared_secret(kx_public(&packet), their_kx_secrets.first().unwrap()); 123 | 124 | let mut out = [0; PACKET_SIZE]; 125 | 126 | // Corrupt the header, MAC check should fail 127 | packet[HEADER_SIZE - 1] ^= 1; 128 | assert_eq!(peel(&mut out, &packet, &kx_shared_secret), Err(PeelErr::Mac)); 129 | 130 | // Fix the header, peel should succeed 131 | packet[HEADER_SIZE - 1] ^= 1; 132 | assert_eq!(peel(&mut out, &packet, &kx_shared_secret), Ok(Action::DeliverRequest)); 133 | } 134 | 135 | #[test] 136 | fn bad_payload_tag() { 137 | let mut rng = rand::thread_rng(); 138 | 139 | let targets = []; 140 | let (their_kx_secrets, their_kx_publics) = gen_their_kx_secrets_and_publics(&mut rng, 1); 141 | let payload_data = gen_payload_data(&mut rng); 142 | 143 | let mut packet = [0; PACKET_SIZE]; 144 | *mut_payload_data(&mut packet) = payload_data; 145 | complete_request_packet(&mut packet, &mut rng, &targets, &their_kx_publics); 146 | 147 | let kx_shared_secret = 148 | derive_kx_shared_secret(kx_public(&packet), their_kx_secrets.first().unwrap()); 149 | 150 | let mut out = [0; PACKET_SIZE]; 151 | 152 | // Corrupt the payload, tag check should fail 153 | packet[HEADER_SIZE] ^= 1; 154 | assert_eq!(peel(&mut out, &packet, &kx_shared_secret), Err(PeelErr::PayloadTag)); 155 | 156 | // Fix the payload, peel should succeed 157 | packet[HEADER_SIZE] ^= 1; 158 | assert_eq!(peel(&mut out, &packet, &kx_shared_secret), Ok(Action::DeliverRequest)); 159 | assert_eq!(out[..PAYLOAD_DATA_SIZE], payload_data); 160 | } 161 | 162 | #[test] 163 | fn surb() { 164 | let mut rng = rand::thread_rng(); 165 | 166 | let num_hops = rng.gen_range(MAX_HOPS - 1..=MAX_HOPS); 167 | let first_mixnode_index = gen_mixnode_index(&mut rng); 168 | let targets = gen_targets(&mut rng, num_hops); 169 | let (their_kx_secrets, their_kx_publics) = gen_their_kx_secrets_and_publics(&mut rng, num_hops); 170 | let expected_surb_id = rng.gen(); 171 | let payload_data = gen_payload_data(&mut rng); 172 | 173 | let mut surb = [0; SURB_SIZE]; 174 | let mut payload_encryption_keys = SurbPayloadEncryptionKeys::new(); 175 | let expected_total_delay = build_surb( 176 | &mut surb, 177 | &mut payload_encryption_keys, 178 | &mut rng, 179 | first_mixnode_index, 180 | &targets, 181 | &their_kx_publics, 182 | &expected_surb_id, 183 | ); 184 | 185 | let mut packet = [0; PACKET_SIZE]; 186 | *mut_payload_data(&mut packet) = payload_data; 187 | assert_eq!(complete_reply_packet(&mut packet, &surb), Some(first_mixnode_index)); 188 | 189 | let mut total_delay = Delay::zero(); 190 | for (expected_target, their_kx_secret) in 191 | targets.iter().map(Some).chain(std::iter::once(None)).zip(&their_kx_secrets) 192 | { 193 | let kx_shared_secret = derive_kx_shared_secret(kx_public(&packet), their_kx_secret); 194 | 195 | let mut out = [0; PACKET_SIZE]; 196 | let action = peel(&mut out, &packet, &kx_shared_secret).unwrap(); 197 | 198 | let target = match &action { 199 | Action::ForwardTo { target, delay } => { 200 | total_delay += *delay; 201 | packet = out; 202 | Some(target) 203 | }, 204 | Action::DeliverReply { surb_id } => { 205 | assert_eq!(surb_id, &expected_surb_id); 206 | decrypt_reply_payload( 207 | array_mut_ref![out, 0, PAYLOAD_SIZE], 208 | &payload_encryption_keys, 209 | ) 210 | .unwrap(); 211 | assert_eq!(out[..PAYLOAD_DATA_SIZE], payload_data); 212 | None 213 | }, 214 | Action::DeliverRequest => panic!("Did not expect deliver request action"), 215 | Action::DeliverCover { .. } => panic!("Did not expect deliver cover action"), 216 | }; 217 | assert_eq!(target, expected_target); 218 | } 219 | 220 | assert_eq!(total_delay, expected_total_delay); 221 | } 222 | -------------------------------------------------------------------------------- /tests/core.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2022 Parity Technologies (UK) Ltd. 2 | // 3 | // Permission is hereby granted, free of charge, to any person obtaining a 4 | // copy of this software and associated documentation files (the "Software"), 5 | // to deal in the Software without restriction, including without limitation 6 | // the rights to use, copy, modify, merge, publish, distribute, sublicense, 7 | // and/or sell copies of the Software, and to permit persons to whom the 8 | // Software is furnished to do so, subject to the following conditions: 9 | // 10 | // The above copyright notice and this permission notice shall be included in 11 | // all copies or substantial portions of the Software. 12 | // 13 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 14 | // OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 18 | // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 19 | // DEALINGS IN THE SOFTWARE. 20 | 21 | //! Mixnet core tests. 22 | 23 | use mixnet::core::{ 24 | Config, Events, Message, MessageId, Mixnet, Mixnode, NetworkStatus, PeerId, RelSessionIndex, 25 | SessionIndex, SessionPhase, SessionStatus, MESSAGE_ID_SIZE, 26 | }; 27 | use parking_lot::Mutex; 28 | use rand::{Rng, RngCore}; 29 | use std::{ 30 | collections::{HashMap, HashSet}, 31 | sync::OnceLock, 32 | }; 33 | 34 | fn log_target(peer_index: usize) -> &'static str { 35 | static LOG_TARGETS: OnceLock>> = OnceLock::new(); 36 | LOG_TARGETS 37 | .get_or_init(|| Mutex::new(HashMap::new())) 38 | .lock() 39 | .entry(peer_index) 40 | .or_insert_with(|| Box::leak(format!("mixnet({peer_index})").into_boxed_str())) 41 | } 42 | 43 | struct Peer { 44 | id: PeerId, 45 | mixnet: Mixnet<()>, 46 | } 47 | 48 | struct PeerNetworkStatus<'id, 'connections> { 49 | id: &'id PeerId, 50 | connections: &'connections HashMap>, 51 | } 52 | 53 | impl<'id, 'connections> NetworkStatus for PeerNetworkStatus<'id, 'connections> { 54 | fn local_peer_id(&self) -> PeerId { 55 | *self.id 56 | } 57 | 58 | fn is_connected(&self, peer_id: &PeerId) -> bool { 59 | self.connections[self.id].contains(peer_id) || self.connections[peer_id].contains(self.id) 60 | } 61 | } 62 | 63 | struct Network { 64 | current_session_index: SessionIndex, 65 | peers: Vec, 66 | connections: HashMap>, 67 | } 68 | 69 | impl Network { 70 | fn new(rng: &mut impl Rng, mut config: impl FnMut(usize) -> Config, num_peers: usize) -> Self { 71 | let peers = (0..num_peers) 72 | .map(|peer_index| Peer { id: rng.gen(), mixnet: Mixnet::new(config(peer_index)) }) 73 | .collect(); 74 | Self { current_session_index: 0, peers, connections: HashMap::new() } 75 | } 76 | 77 | fn set_session_status(&mut self, session_status: SessionStatus) { 78 | self.current_session_index = session_status.current_index; 79 | for peer in &mut self.peers { 80 | peer.mixnet.set_session_status(session_status); 81 | } 82 | } 83 | 84 | fn maybe_set_mixnodes(&mut self, rel_session_index: RelSessionIndex, mixnodes: &[Mixnode<()>]) { 85 | for peer in &mut self.peers { 86 | peer.mixnet 87 | .maybe_set_mixnodes(rel_session_index, &mut || Ok(mixnodes.to_owned())); 88 | } 89 | } 90 | 91 | fn next_mixnodes(&mut self, peer_indices: impl Iterator) -> Vec> { 92 | peer_indices 93 | .map(|index| { 94 | let peer = &mut self.peers[index]; 95 | Mixnode { kx_public: *peer.mixnet.next_kx_public(), peer_id: peer.id, extra: () } 96 | }) 97 | .collect() 98 | } 99 | 100 | fn tick(&mut self, mut handle_message: impl FnMut(usize, &mut Peer, Message)) { 101 | let mut packets = Vec::new(); 102 | for peer in &mut self.peers { 103 | let events = peer.mixnet.take_events(); 104 | if events.contains(Events::RESERVED_PEERS_CHANGED) { 105 | self.connections.insert( 106 | peer.id, 107 | peer.mixnet.reserved_peers().map(|mixnode| mixnode.peer_id).collect(), 108 | ); 109 | } 110 | let ns = PeerNetworkStatus { id: &peer.id, connections: &self.connections }; 111 | if events.contains(Events::NEXT_FORWARD_PACKET_DEADLINE_CHANGED) && 112 | peer.mixnet.next_forward_packet_deadline().is_some() 113 | { 114 | if let Some(packet) = peer.mixnet.pop_next_forward_packet() { 115 | assert!(ns.is_connected(&packet.peer_id)); 116 | packets.push(packet); 117 | } 118 | } 119 | if events.contains(Events::NEXT_AUTHORED_PACKET_DEADLINE_CHANGED) && 120 | peer.mixnet.next_authored_packet_delay().is_some() 121 | { 122 | if let Some(packet) = peer.mixnet.pop_next_authored_packet(&ns) { 123 | assert!(ns.is_connected(&packet.peer_id)); 124 | packets.push(packet); 125 | } 126 | } 127 | } 128 | 129 | for packet in packets { 130 | let (peer_index, peer) = self 131 | .peers 132 | .iter_mut() 133 | .enumerate() 134 | .find(|(_, peer)| peer.id == packet.peer_id) 135 | .unwrap(); 136 | if let Some(message) = peer.mixnet.handle_packet(&packet.packet) { 137 | handle_message(peer_index, peer, message); 138 | } 139 | } 140 | } 141 | 142 | fn post_request( 143 | &mut self, 144 | from_peer_index: usize, 145 | session_index: SessionIndex, 146 | message_id: &MessageId, 147 | data: &[u8], 148 | num_surbs: usize, 149 | ) { 150 | let from_peer = &mut self.peers[from_peer_index]; 151 | let from_peer_ns = PeerNetworkStatus { id: &from_peer.id, connections: &self.connections }; 152 | from_peer 153 | .mixnet 154 | .post_request( 155 | session_index, 156 | &mut None, 157 | message_id, 158 | data.into(), 159 | num_surbs, 160 | &from_peer_ns, 161 | ) 162 | .unwrap(); 163 | } 164 | } 165 | 166 | #[test] 167 | fn basic_operation() { 168 | let _ = env_logger::try_init(); 169 | 170 | let mut rng = rand::thread_rng(); 171 | 172 | let mut network = Network::new( 173 | &mut rng, 174 | |peer_index| Config { 175 | log_target: log_target(peer_index), 176 | gen_cover_packets: false, 177 | ..Default::default() 178 | }, 179 | 30, 180 | ); 181 | network.set_session_status(SessionStatus { 182 | current_index: 0, 183 | phase: SessionPhase::DisconnectFromPrev, 184 | }); 185 | let mixnodes = network.next_mixnodes(0..20); 186 | network.set_session_status(SessionStatus { 187 | current_index: 1, 188 | phase: SessionPhase::DisconnectFromPrev, 189 | }); 190 | network.maybe_set_mixnodes(RelSessionIndex::Current, &mixnodes); 191 | 192 | let request_from_peer_index = 20; 193 | let mut request_message_id = [0; MESSAGE_ID_SIZE]; 194 | rng.fill_bytes(&mut request_message_id); 195 | let mut request_data = vec![0; 9999]; 196 | rng.fill_bytes(&mut request_data); 197 | let num_surbs = 3; 198 | let mut reply_data = vec![0; 4567]; 199 | rng.fill_bytes(&mut reply_data); 200 | 201 | let mut step = 0; 202 | for i in 0..100 { 203 | network.tick(|peer_index, peer, message| { 204 | match step { 205 | 0 => { 206 | let Message::Request(mut message) = message else { 207 | panic!("Expected request message") 208 | }; 209 | assert_eq!(message.session_index, 1); 210 | assert_eq!(message.id, request_message_id); 211 | assert_eq!(message.data, request_data); 212 | assert_eq!(message.surbs.len(), num_surbs); 213 | let mut reply_id = [0; MESSAGE_ID_SIZE]; 214 | rng.fill_bytes(&mut reply_id); 215 | peer.mixnet 216 | .post_reply( 217 | &mut message.surbs, 218 | message.session_index, 219 | &reply_id, 220 | reply_data.as_slice().into(), 221 | ) 222 | .unwrap(); 223 | }, 224 | 1 => { 225 | assert_eq!(peer_index, request_from_peer_index); 226 | let Message::Reply(message) = message else { panic!("Expected reply message") }; 227 | assert_eq!(message.request_id, request_message_id); 228 | assert_eq!(message.data, reply_data); 229 | }, 230 | _ => panic!("Unexpected message"), 231 | } 232 | step += 1; 233 | }); 234 | if i == 0 { 235 | network.post_request( 236 | request_from_peer_index, 237 | 1, 238 | &request_message_id, 239 | &request_data, 240 | num_surbs, 241 | ); 242 | } 243 | } 244 | assert_eq!(step, 2); 245 | } 246 | -------------------------------------------------------------------------------- /src/reply_manager.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2022 Parity Technologies (UK) Ltd. 2 | // 3 | // Permission is hereby granted, free of charge, to any person obtaining a 4 | // copy of this software and associated documentation files (the "Software"), 5 | // to deal in the Software without restriction, including without limitation 6 | // the rights to use, copy, modify, merge, publish, distribute, sublicense, 7 | // and/or sell copies of the Software, and to permit persons to whom the 8 | // Software is furnished to do so, subject to the following conditions: 9 | // 10 | // The above copyright notice and this permission notice shall be included in 11 | // all copies or substantial portions of the Software. 12 | // 13 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 14 | // OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 18 | // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 19 | // DEALINGS IN THE SOFTWARE. 20 | 21 | //! A mixnode may receive the same request multiple times due to retransmission (see eg 22 | //! [`request_manager`](super::request_manager)). A [`ReplyManager`] can be used to cache replies, 23 | //! to avoid needing to execute requests more than once. 24 | 25 | use super::core::{MessageId, Mixnet, RequestMessage, SessionIndex, Surb, MESSAGE_ID_SIZE}; 26 | use hashlink::{linked_hash_map::Entry, LinkedHashMap}; 27 | use log::{debug, trace}; 28 | use rand::RngCore; 29 | use std::time::{Duration, Instant}; 30 | 31 | /// Reply manager configuration. 32 | #[derive(Clone, Debug)] 33 | pub struct Config { 34 | /// The target for log messages. 35 | pub log_target: &'static str, 36 | /// Maximum number of requests to remember. When this limit is reached, old requests will be 37 | /// automatically discarded to make space for new ones. 38 | pub capacity: usize, 39 | /// Maximum number of copies of a reply message to post in response to a single request 40 | /// message. Note that the number of copies is also limited by the number of SURBs provided in 41 | /// the request message. 42 | pub max_posts: usize, 43 | /// After replying to a request, ignore repeats of the request for this length of time. This 44 | /// should ideally be set such that extra copies of a request message posted at the same time 45 | /// as the first received one get ignored, but retries posted after a timeout do not. 46 | pub cooldown: Duration, 47 | } 48 | 49 | impl Default for Config { 50 | fn default() -> Self { 51 | Self { 52 | log_target: "mixnet", 53 | capacity: 400, 54 | max_posts: 2, 55 | cooldown: Duration::from_secs(10), 56 | } 57 | } 58 | } 59 | 60 | struct Reply { 61 | /// The _reply_ message ID. 62 | message_id: MessageId, 63 | data: Vec, 64 | } 65 | 66 | impl Reply { 67 | fn new(data: Vec) -> Self { 68 | let mut message_id = [0; MESSAGE_ID_SIZE]; 69 | rand::thread_rng().fill_bytes(&mut message_id); 70 | Self { message_id, data } 71 | } 72 | } 73 | 74 | /// Context needed to reply to a request. 75 | pub struct ReplyContext { 76 | session_index: SessionIndex, 77 | /// The _request_ message ID. 78 | message_id: MessageId, 79 | surbs: Vec, 80 | } 81 | 82 | impl ReplyContext { 83 | /// Returns a reference to the request message ID. 84 | pub fn message_id(&self) -> &MessageId { 85 | &self.message_id 86 | } 87 | 88 | fn post_reply(&mut self, reply: &Reply, mixnet: &mut Mixnet, config: &Config) { 89 | for _ in 0..config.max_posts { 90 | if let Err(err) = mixnet.post_reply( 91 | &mut self.surbs, 92 | self.session_index, 93 | &reply.message_id, 94 | reply.data.as_slice().into(), 95 | ) { 96 | debug!(target: config.log_target, 97 | "Failed to post reply to request with message ID {:x?}: {err}", 98 | self.message_id); 99 | break 100 | } 101 | } 102 | } 103 | } 104 | 105 | enum ReplyState { 106 | /// The request is currently being handled. 107 | Pending, 108 | /// The request has been handled already. 109 | Complete { reply: Reply, last_post: Instant }, 110 | } 111 | 112 | /// Reply manager state. 113 | pub struct ReplyManager { 114 | config: Config, 115 | states: LinkedHashMap, 116 | } 117 | 118 | impl ReplyManager { 119 | /// Create a new `ReplyManager` with the given configuration. 120 | pub fn new(config: Config) -> Self { 121 | let states = LinkedHashMap::with_capacity( 122 | // Plus one because we only evict _after_ going over the limit 123 | config.capacity.saturating_add(1), 124 | ); 125 | Self { config, states } 126 | } 127 | 128 | fn maybe_evict(&mut self) { 129 | if self.states.len() > self.config.capacity { 130 | self.states.pop_front(); 131 | debug_assert_eq!(self.states.len(), self.config.capacity); 132 | } 133 | } 134 | 135 | /// Attempt to insert a request. 136 | /// 137 | /// If the request is already present, posts the reply if necessary, and returns `None`. The 138 | /// caller does not need to do anything more. 139 | /// 140 | /// If `Some` is returned, the caller should handle the request and then call either 141 | /// [`abandon`](Self::abandon) or [`complete`](Self::complete) with the [`ReplyContext`]. The 142 | /// `Vec` contains the request message data. 143 | pub fn insert( 144 | &mut self, 145 | message: RequestMessage, 146 | mixnet: &mut Mixnet, 147 | ) -> Option<(ReplyContext, Vec)> { 148 | let mut reply_context = ReplyContext { 149 | session_index: message.session_index, 150 | message_id: message.id, 151 | surbs: message.surbs, 152 | }; 153 | 154 | match self.states.entry(message.id) { 155 | Entry::Occupied(mut entry) => { 156 | match entry.get_mut() { 157 | ReplyState::Pending => trace!(target: self.config.log_target, 158 | "Ignoring repeat request with message ID {:x?}; currently handling", message.id), 159 | ReplyState::Complete { reply, last_post } => { 160 | let now = Instant::now(); 161 | let since_last = now.saturating_duration_since(*last_post); 162 | if since_last < self.config.cooldown { 163 | trace!(target: self.config.log_target, 164 | "Ignoring repeat request with message ID {:x?}; posted a reply {:.1}s ago", 165 | message.id, since_last.as_secs_f32()); 166 | } else { 167 | *last_post = now; 168 | reply_context.post_reply(reply, mixnet, &self.config); 169 | } 170 | }, 171 | } 172 | None 173 | }, 174 | Entry::Vacant(entry) => { 175 | entry.insert(ReplyState::Pending); 176 | self.maybe_evict(); 177 | Some((reply_context, message.data)) 178 | }, 179 | } 180 | } 181 | 182 | /// Abandon a request. This should be called if you do not wish to reply at this time. If 183 | /// [`insert`](Self::insert) is called again with a matching message (same ID), it will return 184 | /// `Some`, and you will have another chance to handle the request. 185 | pub fn abandon(&mut self, reply_context: ReplyContext) { 186 | if let Entry::Occupied(entry) = self.states.entry(reply_context.message_id) { 187 | match entry.get() { 188 | ReplyState::Pending => { 189 | entry.remove(); 190 | }, 191 | ReplyState::Complete { .. } => debug!( 192 | target: self.config.log_target, 193 | "Ignoring abandon of request with message ID {:x?}; already completed", 194 | reply_context.message_id 195 | ), 196 | } 197 | } 198 | } 199 | 200 | /// Complete a request. This will post the reply and cache it for repeat requests. 201 | pub fn complete( 202 | &mut self, 203 | mut reply_context: ReplyContext, 204 | data: Vec, 205 | mixnet: &mut Mixnet, 206 | ) { 207 | let state = match self.states.entry(reply_context.message_id) { 208 | Entry::Occupied(entry) => match entry.into_mut() { 209 | state @ ReplyState::Pending => state, 210 | ReplyState::Complete { .. } => { 211 | debug!(target: self.config.log_target, 212 | "Request with message ID {:x?} completed twice", 213 | reply_context.message_id); 214 | return 215 | }, 216 | }, 217 | Entry::Vacant(entry) => entry.insert(ReplyState::Pending), 218 | }; 219 | 220 | let reply = Reply::new(data); 221 | reply_context.post_reply(&reply, mixnet, &self.config); 222 | *state = ReplyState::Complete { reply, last_post: Instant::now() }; 223 | 224 | self.maybe_evict(); 225 | } 226 | } 227 | -------------------------------------------------------------------------------- /src/core/sphinx/crypto.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2022 Parity Technologies (UK) Ltd. 2 | // 3 | // Permission is hereby granted, free of charge, to any person obtaining a 4 | // copy of this software and associated documentation files (the "Software"), 5 | // to deal in the Software without restriction, including without limitation 6 | // the rights to use, copy, modify, merge, publish, distribute, sublicense, 7 | // and/or sell copies of the Software, and to permit persons to whom the 8 | // Software is furnished to do so, subject to the following conditions: 9 | // 10 | // The above copyright notice and this permission notice shall be included in 11 | // all copies or substantial portions of the Software. 12 | // 13 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 14 | // OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 18 | // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 19 | // DEALINGS IN THE SOFTWARE. 20 | 21 | //! Key exchange, secret derivation, MAC computation, and encryption. 22 | 23 | use super::{ 24 | delay::{DelaySeed, DELAY_SEED_SIZE}, 25 | packet::{Actions, KxPublic, Mac, Payload, MAX_HOPS}, 26 | }; 27 | use arrayref::array_refs; 28 | use arrayvec::ArrayVec; 29 | use blake2::{ 30 | digest::{ 31 | consts::{U16, U32, U64}, 32 | generic_array::{sequence::Concat, GenericArray}, 33 | FixedOutput, Mac as DigestMac, 34 | }, 35 | Blake2bMac, 36 | }; 37 | use c2_chacha::{ 38 | stream_cipher::{NewStreamCipher, SyncStreamCipher}, 39 | ChaCha20, 40 | }; 41 | use curve25519_dalek::{scalar::clamp_integer, MontgomeryPoint, Scalar}; 42 | use lioness::LionessDefault; 43 | use rand::{CryptoRng, Rng}; 44 | 45 | const KX_BLINDING_FACTOR_PERSONAL: &[u8; 16] = b"sphinx-blind-fac"; 46 | const SMALL_DERIVED_SECRETS_PERSONAL: &[u8; 16] = b"sphinx-small-d-s"; 47 | const PAYLOAD_ENCRYPTION_KEY_PERSONAL: &[u8; 16] = b"sphinx-pl-en-key"; 48 | 49 | /// Size in bytes of a [`SharedSecret`]. 50 | pub const SHARED_SECRET_SIZE: usize = 32; 51 | /// Either produced by key exchange or shared in a SURB. 52 | pub type SharedSecret = [u8; SHARED_SECRET_SIZE]; 53 | 54 | //////////////////////////////////////////////////////////////////////////////// 55 | // Key exchange 56 | //////////////////////////////////////////////////////////////////////////////// 57 | 58 | /// An _unclamped_ key-exchange secret key. 59 | pub type KxSecret = [u8; 32]; 60 | 61 | /// Generate an _unclamped_ key-exchange secret key. 62 | pub fn gen_kx_secret(rng: &mut (impl Rng + CryptoRng)) -> KxSecret { 63 | let mut secret = [0; 32]; 64 | rng.fill_bytes(&mut secret); 65 | secret 66 | } 67 | 68 | /// Derive the public key corresponding to a secret key. 69 | pub fn derive_kx_public(kx_secret: &KxSecret) -> KxPublic { 70 | MontgomeryPoint::mul_base_clamped(*kx_secret).to_bytes() 71 | } 72 | 73 | /// Returns the _unclamped_ blinding factor. 74 | fn derive_kx_blinding_factor(kx_public: &KxPublic, kx_shared_secret: &SharedSecret) -> [u8; 32] { 75 | let kx_public: &GenericArray<_, _> = kx_public.into(); 76 | let key = kx_public.concat((*kx_shared_secret).into()); 77 | let h = Blake2bMac::::new_with_salt_and_personal(&key, b"", KX_BLINDING_FACTOR_PERSONAL) 78 | .expect("Key, salt, and personalisation sizes are fixed and small enough"); 79 | h.finalize().into_bytes().into() 80 | } 81 | 82 | /// Apply the blinding factor to `kx_public`. 83 | pub fn blind_kx_public(kx_public: &KxPublic, kx_shared_secret: &SharedSecret) -> KxPublic { 84 | MontgomeryPoint(*kx_public) 85 | .mul_clamped(derive_kx_blinding_factor(kx_public, kx_shared_secret)) 86 | .to_bytes() 87 | } 88 | 89 | pub fn derive_kx_shared_secret(kx_public: &KxPublic, kx_secret: &KxSecret) -> SharedSecret { 90 | MontgomeryPoint(*kx_public).mul_clamped(*kx_secret).to_bytes() 91 | } 92 | 93 | /// Generate a public key to go in a packet and the corresponding shared secrets for each hop. 94 | pub fn gen_kx_public_and_shared_secrets( 95 | kx_public: &mut KxPublic, 96 | kx_shared_secrets: &mut ArrayVec, 97 | rng: &mut (impl Rng + CryptoRng), 98 | their_kx_publics: &[KxPublic], 99 | ) { 100 | let kx_secret = gen_kx_secret(rng); 101 | *kx_public = derive_kx_public(&kx_secret); 102 | 103 | let mut kx_secret = Scalar::from_bytes_mod_order(clamp_integer(kx_secret)); 104 | let mut kx_public = *kx_public; 105 | for (i, their_kx_public) in their_kx_publics.iter().enumerate() { 106 | if i != 0 { 107 | if i != 1 { 108 | // An alternative would be to use blind_kx_public, but this is much cheaper 109 | kx_public = MontgomeryPoint::mul_base(&kx_secret).to_bytes(); 110 | } 111 | let kx_shared_secret = kx_shared_secrets.last().expect( 112 | "On at least second iteration of loop, shared secret pushed every iteration", 113 | ); 114 | kx_secret *= Scalar::from_bytes_mod_order(clamp_integer(derive_kx_blinding_factor( 115 | &kx_public, 116 | kx_shared_secret, 117 | ))); 118 | } 119 | kx_shared_secrets.push((MontgomeryPoint(*their_kx_public) * kx_secret).to_bytes()); 120 | } 121 | } 122 | 123 | //////////////////////////////////////////////////////////////////////////////// 124 | // Additional secret derivation 125 | //////////////////////////////////////////////////////////////////////////////// 126 | 127 | fn derive_secret(derived: &mut [u8], shared_secret: &SharedSecret, personal: &[u8; 16]) { 128 | for (i, chunk) in derived.chunks_mut(64).enumerate() { 129 | // This is the construction libsodium uses for crypto_kdf_derive_from_key; see 130 | // https://doc.libsodium.org/key_derivation/ 131 | let h = Blake2bMac::::new_with_salt_and_personal( 132 | shared_secret, 133 | &i.to_le_bytes(), 134 | personal, 135 | ) 136 | .expect("Key, salt, and personalisation sizes are fixed and small enough"); 137 | h.finalize_into(GenericArray::from_mut_slice(chunk)); 138 | } 139 | } 140 | 141 | const MAC_KEY_SIZE: usize = 16; 142 | pub type MacKey = [u8; MAC_KEY_SIZE]; 143 | const ACTIONS_ENCRYPTION_KEY_SIZE: usize = 32; 144 | pub type ActionsEncryptionKey = [u8; ACTIONS_ENCRYPTION_KEY_SIZE]; 145 | const SMALL_DERIVED_SECRETS_SIZE: usize = 146 | MAC_KEY_SIZE + ACTIONS_ENCRYPTION_KEY_SIZE + DELAY_SEED_SIZE; 147 | 148 | pub struct SmallDerivedSecrets([u8; SMALL_DERIVED_SECRETS_SIZE]); 149 | 150 | impl SmallDerivedSecrets { 151 | pub fn new(shared_secret: &SharedSecret) -> Self { 152 | let mut derived = [0; SMALL_DERIVED_SECRETS_SIZE]; 153 | derive_secret(&mut derived, shared_secret, SMALL_DERIVED_SECRETS_PERSONAL); 154 | Self(derived) 155 | } 156 | 157 | fn split(&self) -> (&MacKey, &ActionsEncryptionKey, &DelaySeed) { 158 | array_refs![&self.0, MAC_KEY_SIZE, ACTIONS_ENCRYPTION_KEY_SIZE, DELAY_SEED_SIZE] 159 | } 160 | 161 | pub fn mac_key(&self) -> &MacKey { 162 | self.split().0 163 | } 164 | 165 | pub fn actions_encryption_key(&self) -> &ActionsEncryptionKey { 166 | self.split().1 167 | } 168 | 169 | pub fn delay_seed(&self) -> &DelaySeed { 170 | self.split().2 171 | } 172 | } 173 | 174 | pub const PAYLOAD_ENCRYPTION_KEY_SIZE: usize = 192; 175 | pub type PayloadEncryptionKey = [u8; PAYLOAD_ENCRYPTION_KEY_SIZE]; 176 | 177 | pub fn derive_payload_encryption_key(shared_secret: &SharedSecret) -> PayloadEncryptionKey { 178 | let mut derived = [0; PAYLOAD_ENCRYPTION_KEY_SIZE]; 179 | derive_secret(&mut derived, shared_secret, PAYLOAD_ENCRYPTION_KEY_PERSONAL); 180 | derived 181 | } 182 | 183 | //////////////////////////////////////////////////////////////////////////////// 184 | // MAC computation 185 | //////////////////////////////////////////////////////////////////////////////// 186 | 187 | pub fn compute_mac(actions: &[u8], pad: &[u8], key: &MacKey) -> Mac { 188 | let mut h = Blake2bMac::::new_from_slice(key).expect("Key size is fixed and small enough"); 189 | h.update(actions); 190 | h.update(pad); 191 | h.finalize().into_bytes().into() 192 | } 193 | 194 | pub fn mac_ok(mac: &Mac, actions: &Actions, key: &MacKey) -> bool { 195 | let mut h = Blake2bMac::::new_from_slice(key).expect("Key size is fixed and small enough"); 196 | h.update(actions); 197 | h.verify(mac.into()).is_ok() 198 | } 199 | 200 | //////////////////////////////////////////////////////////////////////////////// 201 | // Actions encryption 202 | //////////////////////////////////////////////////////////////////////////////// 203 | 204 | pub fn apply_actions_encryption_keystream(data: &mut [u8], key: &ActionsEncryptionKey) { 205 | // Key is only used once, so fine for nonce to be 0 206 | let mut c = ChaCha20::new(key.into(), &[0; 8].into()); 207 | c.apply_keystream(data); 208 | } 209 | 210 | pub fn apply_keystream(data: &mut [u8], keystream: &[u8]) { 211 | for (d, k) in data.iter_mut().zip(keystream) { 212 | *d ^= *k; 213 | } 214 | } 215 | 216 | //////////////////////////////////////////////////////////////////////////////// 217 | // Payload encryption 218 | //////////////////////////////////////////////////////////////////////////////// 219 | 220 | pub fn encrypt_payload(payload: &mut Payload, key: &PayloadEncryptionKey) { 221 | let l = LionessDefault::new_raw(key); 222 | l.encrypt(payload).expect("Payload size is fixed and large enough"); 223 | } 224 | 225 | pub fn decrypt_payload(payload: &mut Payload, key: &PayloadEncryptionKey) { 226 | let l = LionessDefault::new_raw(key); 227 | l.decrypt(payload).expect("Payload size is fixed and large enough"); 228 | } 229 | -------------------------------------------------------------------------------- /src/core/sphinx/build.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2022 Parity Technologies (UK) Ltd. 2 | // 3 | // Permission is hereby granted, free of charge, to any person obtaining a 4 | // copy of this software and associated documentation files (the "Software"), 5 | // to deal in the Software without restriction, including without limitation 6 | // the rights to use, copy, modify, merge, publish, distribute, sublicense, 7 | // and/or sell copies of the Software, and to permit persons to whom the 8 | // Software is furnished to do so, subject to the following conditions: 9 | // 10 | // The above copyright notice and this permission notice shall be included in 11 | // all copies or substantial portions of the Software. 12 | // 13 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 14 | // OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 18 | // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 19 | // DEALINGS IN THE SOFTWARE. 20 | 21 | //! Sphinx packet building. 22 | 23 | use super::{ 24 | crypto::*, 25 | delay::Delay, 26 | packet::*, 27 | target::{MixnodeIndex, Target}, 28 | }; 29 | use arrayref::{array_mut_ref, array_refs, mut_array_refs}; 30 | use arrayvec::ArrayVec; 31 | use rand::{CryptoRng, Rng}; 32 | 33 | fn mut_arr_at(slice: &mut [T], offset: usize) -> &mut [T; N] { 34 | (&mut slice[offset..offset + N]) 35 | .try_into() 36 | .expect("Slice length is fixed and matches array length") 37 | } 38 | 39 | enum PacketKind<'a> { 40 | Request, 41 | Reply(&'a SurbId), 42 | Cover(Option<&'a CoverId>), 43 | } 44 | 45 | /// Build a Sphinx header. `targets` should not include the first hop. At most one target may be a 46 | /// peer ID; all others should be mixnode indices. Returns the total forwarding delay across all 47 | /// hops. 48 | fn build_header( 49 | header: &mut Header, 50 | kx_shared_secrets: &mut ArrayVec, 51 | rng: &mut (impl Rng + CryptoRng), 52 | targets: &[Target], 53 | their_kx_publics: &[KxPublic], 54 | kind: PacketKind, 55 | ) -> Delay { 56 | debug_assert_eq!(targets.len() + 1, their_kx_publics.len()); 57 | debug_assert!(their_kx_publics.len() <= MAX_HOPS); 58 | 59 | let (kx_public, mac_plus_actions) = 60 | mut_array_refs![header, KX_PUBLIC_SIZE, MAC_SIZE + ACTIONS_SIZE]; 61 | 62 | gen_kx_public_and_shared_secrets(kx_public, kx_shared_secrets, rng, their_kx_publics); 63 | 64 | // Routing actions, and current write offset into 65 | let actions = array_mut_ref![mac_plus_actions, MAC_SIZE, ACTIONS_SIZE]; 66 | let mut offset = 0; 67 | 68 | // Total forwarding delay across all hops 69 | let mut total_delay = Delay::zero(); 70 | 71 | // We loop over the hops forward and then backward. Data that is generated by the first pass 72 | // for the second pass is stashed here. The last hop is handled specially so is excluded here. 73 | struct Hop { 74 | mac_key: MacKey, 75 | keystream: [u8; ACTIONS_SIZE + MAX_ACTIONS_PAD_SIZE], 76 | start_offset: u16, // Starting offset of hop in actions 77 | } 78 | let mut hops: ArrayVec = ArrayVec::new(); 79 | 80 | // Padding for length invariance, generated from the keystreams. This is only needed for 81 | // computing the MACs. 82 | let mut pad = [0; ACTIONS_SIZE - RAW_ACTION_SIZE]; 83 | 84 | // Loop over hops forward (excluding the last hop) 85 | for (target, kx_shared_secret) in targets.iter().zip(kx_shared_secrets.iter()) { 86 | // Write forward action 87 | let start_offset = offset; 88 | offset += RAW_ACTION_SIZE; 89 | let raw_action = match target { 90 | Target::MixnodeIndex(mixnode_index) => mixnode_index.get(), 91 | Target::PeerId(peer_id) => { 92 | *mut_arr_at(actions, offset) = *peer_id; 93 | offset += PEER_ID_SIZE; 94 | RAW_ACTION_FORWARD_TO_PEER_ID 95 | }, 96 | }; 97 | *mut_arr_at(actions, start_offset) = raw_action.to_le_bytes(); 98 | 99 | // The MAC for the next hop can't be computed yet. Leave a gap for it. Note that this is 100 | // always the last part of the action; this is assumed by the backward loop. 101 | offset += MAC_SIZE; 102 | 103 | let sds = SmallDerivedSecrets::new(kx_shared_secret); 104 | 105 | total_delay += Delay::exp(sds.delay_seed()); 106 | 107 | hops.push(Hop { 108 | mac_key: *sds.mac_key(), 109 | keystream: [0; ACTIONS_SIZE + MAX_ACTIONS_PAD_SIZE], 110 | start_offset: start_offset as u16, 111 | }); 112 | let keystream = &mut hops.last_mut().expect("Just pushed, so not empty").keystream; 113 | apply_actions_encryption_keystream(keystream, sds.actions_encryption_key()); 114 | 115 | // At the end of the loop, pad will contain the padding as seen by the last hop (before 116 | // decryption) 117 | apply_keystream(&mut pad[..offset], &keystream[ACTIONS_SIZE - start_offset..]); 118 | } 119 | 120 | // Handle the last hop 121 | { 122 | // Write deliver action 123 | let start_offset = offset; 124 | offset += RAW_ACTION_SIZE; 125 | let raw_action = match kind { 126 | PacketKind::Request => RAW_ACTION_DELIVER_REQUEST, 127 | PacketKind::Reply(surb_id) => { 128 | *mut_arr_at(actions, offset) = *surb_id; 129 | offset += SURB_ID_SIZE; 130 | RAW_ACTION_DELIVER_REPLY 131 | }, 132 | PacketKind::Cover(None) => RAW_ACTION_DELIVER_COVER, 133 | PacketKind::Cover(Some(cover_id)) => { 134 | *mut_arr_at(actions, offset) = *cover_id; 135 | offset += COVER_ID_SIZE; 136 | RAW_ACTION_DELIVER_COVER_WITH_ID 137 | }, 138 | }; 139 | *mut_arr_at(actions, start_offset) = raw_action.to_le_bytes(); 140 | 141 | // Fill the remainder of the routing actions field with random bytes, so the last hop 142 | // cannot determine the path length 143 | rng.fill_bytes(&mut actions[offset..]); 144 | 145 | let sds = 146 | SmallDerivedSecrets::new(kx_shared_secrets.last().expect("There is at least one hop")); 147 | 148 | // Encrypt the deliver action (and the random bytes, although this isn't really necessary). 149 | // Note that the padding is not touched here; it is generated entirely from the keystreams 150 | // for earlier hops, and effectively gets scrambled even further when the last hop 151 | // "decrypts" it. 152 | apply_actions_encryption_keystream( 153 | &mut actions[start_offset..], 154 | sds.actions_encryption_key(), 155 | ); 156 | 157 | // Compute the MAC for the last hop and place it in the appropriate place (right before the 158 | // deliver action) 159 | *mut_arr_at(mac_plus_actions, start_offset) = 160 | compute_mac(&actions[start_offset..], &pad[..start_offset], sds.mac_key()); 161 | } 162 | 163 | // Loop over hops backward (excluding the last hop, which has already been handled) 164 | for hop in hops.iter().rev() { 165 | let start_offset = hop.start_offset as usize; 166 | 167 | // Encrypt the actions and padding for the hop 168 | apply_keystream(&mut mac_plus_actions[MAC_SIZE + start_offset..], &hop.keystream); 169 | apply_keystream(&mut pad[..start_offset], &hop.keystream[ACTIONS_SIZE - start_offset..]); 170 | 171 | // Compute the MAC for the hop and place it in the appropriate place (right before the hop 172 | // action) 173 | *mut_arr_at(mac_plus_actions, start_offset) = compute_mac( 174 | &mac_plus_actions[MAC_SIZE + start_offset..], 175 | &pad[..start_offset], 176 | &hop.mac_key, 177 | ); 178 | } 179 | 180 | total_delay 181 | } 182 | 183 | /// Returns a mutable reference to the payload data in `packet`. This is only really useful for 184 | /// filling in the payload data prior to calling [`complete_request_packet`] or 185 | /// [`complete_reply_packet`]. 186 | pub fn mut_payload_data(packet: &mut Packet) -> &mut PayloadData { 187 | array_mut_ref![packet, HEADER_SIZE, PAYLOAD_DATA_SIZE] 188 | } 189 | 190 | /// Complete a Sphinx request packet. The unencrypted payload data should be written to 191 | /// [`mut_payload_data(packet)`](mut_payload_data) before calling this function. `targets` should 192 | /// not include the first hop. At most one target may be a peer ID; all others should be mixnode 193 | /// indices. Returns the total forwarding delay across all hops. 194 | pub fn complete_request_packet( 195 | packet: &mut Packet, 196 | rng: &mut (impl Rng + CryptoRng), 197 | targets: &[Target], 198 | their_kx_publics: &[KxPublic], 199 | ) -> Delay { 200 | debug_assert_eq!(targets.len() + 1, their_kx_publics.len()); 201 | debug_assert!(their_kx_publics.len() <= MAX_HOPS); 202 | 203 | let (header, payload) = mut_array_refs![packet, HEADER_SIZE, PAYLOAD_SIZE]; 204 | 205 | // Build the header 206 | let mut kx_shared_secrets = ArrayVec::new(); 207 | let total_delay = build_header( 208 | header, 209 | &mut kx_shared_secrets, 210 | rng, 211 | targets, 212 | their_kx_publics, 213 | PacketKind::Request, 214 | ); 215 | 216 | // Force the payload tag 217 | *array_mut_ref![payload, PAYLOAD_DATA_SIZE, PAYLOAD_TAG_SIZE] = PAYLOAD_TAG; 218 | 219 | // Encrypt the payload 220 | for kx_shared_secret in kx_shared_secrets.iter().rev() { 221 | encrypt_payload(payload, &derive_payload_encryption_key(kx_shared_secret)); 222 | } 223 | 224 | total_delay 225 | } 226 | 227 | /// Size in bytes of a [`Surb`]. 228 | pub const SURB_SIZE: usize = RAW_MIXNODE_INDEX_SIZE + HEADER_SIZE + SHARED_SECRET_SIZE; 229 | /// A "single-use reply block". This should be treated as an opaque type. 230 | pub type Surb = [u8; SURB_SIZE]; 231 | 232 | pub type SurbPayloadEncryptionKeys = ArrayVec; 233 | 234 | /// Build a SURB. Note that unlike in the Sphinx paper, the last hop (which should be this node) 235 | /// decrypts the payload, rather than adding another layer of encryption and forwarding to the 236 | /// "destination". So the number of payload encryption keys matches the number of hops. The first 237 | /// hop must have a mixnode index, specified by `first_mixnode_index`. `targets` specifies the 238 | /// remaining hops. At most one target may be a peer ID; all others should be mixnode indices. 239 | /// Returns the total forwarding delay across all hops. 240 | pub fn build_surb( 241 | surb: &mut Surb, 242 | payload_encryption_keys: &mut SurbPayloadEncryptionKeys, 243 | rng: &mut (impl Rng + CryptoRng), 244 | first_mixnode_index: MixnodeIndex, 245 | targets: &[Target], 246 | their_kx_publics: &[KxPublic], 247 | id: &SurbId, 248 | ) -> Delay { 249 | debug_assert_eq!(targets.len() + 1, their_kx_publics.len()); 250 | debug_assert!(their_kx_publics.len() <= MAX_HOPS); 251 | 252 | let (raw_first_mixnode_index, header, shared_secret) = 253 | mut_array_refs![surb, RAW_MIXNODE_INDEX_SIZE, HEADER_SIZE, SHARED_SECRET_SIZE]; 254 | 255 | *raw_first_mixnode_index = first_mixnode_index.get().to_le_bytes(); 256 | 257 | // Build the header 258 | let mut kx_shared_secrets = ArrayVec::new(); 259 | let total_delay = build_header( 260 | header, 261 | &mut kx_shared_secrets, 262 | rng, 263 | targets, 264 | their_kx_publics, 265 | PacketKind::Reply(id), 266 | ); 267 | 268 | // Generate the payload encryption keys. The first key is derived from a totally random shared 269 | // secret, the rest are derived from the key-exchange shared secrets. Note that we _could_ just 270 | // return the shared secrets here and derive the encryption keys in decrypt_reply_payload. This 271 | // wouldn't save much time/space though, and it seems better from a security perspective to not 272 | // keep the shared secrets around. 273 | rng.fill_bytes(shared_secret); 274 | payload_encryption_keys.push(derive_payload_encryption_key(shared_secret)); 275 | kx_shared_secrets.pop(); // Last hop does not encrypt 276 | for kx_shared_secret in &kx_shared_secrets { 277 | payload_encryption_keys.push(derive_payload_encryption_key(kx_shared_secret)); 278 | } 279 | 280 | total_delay 281 | } 282 | 283 | /// Complete a Sphinx reply packet. The unencrypted payload data should be written to 284 | /// [`mut_payload_data(packet)`](mut_payload_data) before calling this function. `surb` should be a 285 | /// SURB built by the receiving node using [`build_surb`]. The mixnode index of the first hop is 286 | /// returned. Will only return [`None`] if the SURB is malformed. 287 | pub fn complete_reply_packet(packet: &mut Packet, surb: &Surb) -> Option { 288 | let (header, payload) = mut_array_refs![packet, HEADER_SIZE, PAYLOAD_SIZE]; 289 | let (raw_first_mixnode_index, surb_header, shared_secret) = 290 | array_refs![surb, RAW_MIXNODE_INDEX_SIZE, HEADER_SIZE, SHARED_SECRET_SIZE]; 291 | 292 | // Copy the header from the SURB across as-is. We can't really check it; we just have to trust 293 | // it. 294 | *header = *surb_header; 295 | 296 | // Force the payload tag 297 | *array_mut_ref![payload, PAYLOAD_DATA_SIZE, PAYLOAD_TAG_SIZE] = PAYLOAD_TAG; 298 | 299 | // Encrypt the payload. Actually "decrypt" to make decrypt_reply_payload slightly simpler. 300 | decrypt_payload(payload, &derive_payload_encryption_key(shared_secret)); 301 | 302 | // Return the mixnode index of the first hop from the SURB 303 | let raw_first_mixnode_index = RawMixnodeIndex::from_le_bytes(*raw_first_mixnode_index); 304 | raw_first_mixnode_index.try_into().ok() 305 | } 306 | 307 | /// Build a Sphinx cover packet. `targets` should not include the first hop. At most one target may 308 | /// be a peer ID; all others should be mixnode indices. Returns the total forwarding delay across 309 | /// all hops. 310 | pub fn build_cover_packet( 311 | packet: &mut Packet, 312 | rng: &mut (impl Rng + CryptoRng), 313 | targets: &[Target], 314 | their_kx_publics: &[KxPublic], 315 | id: Option<&CoverId>, 316 | ) -> Delay { 317 | debug_assert_eq!(targets.len() + 1, their_kx_publics.len()); 318 | debug_assert!(their_kx_publics.len() <= MAX_HOPS); 319 | 320 | let (header, payload) = mut_array_refs![packet, HEADER_SIZE, PAYLOAD_SIZE]; 321 | 322 | // Build the header 323 | let mut kx_shared_secrets = ArrayVec::new(); 324 | let total_delay = build_header( 325 | header, 326 | &mut kx_shared_secrets, 327 | rng, 328 | targets, 329 | their_kx_publics, 330 | PacketKind::Cover(id), 331 | ); 332 | 333 | // Randomise the payload. It will be ignored by the destination, but needs to be 334 | // indistinguishable from a normal encrypted payload. 335 | rng.fill_bytes(payload); 336 | 337 | total_delay 338 | } 339 | -------------------------------------------------------------------------------- /src/core/topology.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2022 Parity Technologies (UK) Ltd. 2 | // 3 | // Permission is hereby granted, free of charge, to any person obtaining a 4 | // copy of this software and associated documentation files (the "Software"), 5 | // to deal in the Software without restriction, including without limitation 6 | // the rights to use, copy, modify, merge, publish, distribute, sublicense, 7 | // and/or sell copies of the Software, and to permit persons to whom the 8 | // Software is furnished to do so, subject to the following conditions: 9 | // 10 | // The above copyright notice and this permission notice shall be included in 11 | // all copies or substantial portions of the Software. 12 | // 13 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 14 | // OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 18 | // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 19 | // DEALINGS IN THE SOFTWARE. 20 | 21 | //! Mixnet topology. A new [`Topology`] is created for every session. 22 | 23 | use super::sphinx::{ 24 | KxPublic, MixnodeIndex, PeerId, RawMixnodeIndex, Target, MAX_HOPS, MAX_MIXNODE_INDEX, 25 | }; 26 | use arrayvec::ArrayVec; 27 | use either::Either; 28 | use rand::{seq::SliceRandom, CryptoRng, Rng}; 29 | use std::{ 30 | cmp::{max, min}, 31 | fmt, 32 | }; 33 | 34 | /// Per-mixnode data. 35 | #[derive(Clone)] 36 | pub struct Mixnode { 37 | /// Key-exchange public key for the mixnode. 38 | pub kx_public: KxPublic, 39 | /// Peer ID for the mixnode. 40 | pub peer_id: PeerId, 41 | /// Extra data; for use by the crate user. 42 | pub extra: X, 43 | } 44 | 45 | enum LocalNode { 46 | /// The local node is a mixnode, with the specified index. 47 | Mixnode(MixnodeIndex), 48 | /// The local node is not a mixnode. It should attempt to connect to the specified gateway 49 | /// mixnodes. 50 | NonMixnode(Vec), 51 | } 52 | 53 | /// Topology error. 54 | #[derive(Debug, thiserror::Error)] 55 | pub enum TopologyErr { 56 | /// An out-of-range mixnode index was encountered. 57 | #[error("Bad mixnode index ({0})")] 58 | BadMixnodeIndex(MixnodeIndex), 59 | /// There aren't enough mixnodes. 60 | #[error("Too few mixnodes; this should have been caught earlier")] 61 | TooFewMixnodes, 62 | /// The local node has not managed to connect to any gateway mixnodes. 63 | #[error("The local node has not managed to connect to any gateway mixnodes")] 64 | NoConnectedGatewayMixnodes, 65 | } 66 | 67 | pub struct Topology { 68 | mixnodes: Vec>, 69 | local_kx_public: KxPublic, 70 | local_node: LocalNode, 71 | } 72 | 73 | impl Topology { 74 | /// `mixnodes` must be no longer than [`MAX_MIXNODE_INDEX + 1`](MAX_MIXNODE_INDEX). 75 | pub fn new( 76 | rng: &mut impl Rng, 77 | mixnodes: Vec>, 78 | local_kx_public: &KxPublic, 79 | num_gateway_mixnodes: u32, 80 | ) -> Self { 81 | debug_assert!(mixnodes.len() <= (MAX_MIXNODE_INDEX + 1) as usize); 82 | 83 | // Determine if the local node is a mixnode. It is possible for another node to publish our 84 | // key-exchange public key as theirs, possibly resulting in a bogus index here. This isn't 85 | // particularly harmful so we don't bother doing anything about it: 86 | // 87 | // - It might result in us thinking we're in the mixnode set when we're really not. Note 88 | // that this situation can only occur if we were trying to register anyway; if we weren't, 89 | // we wouldn't have even generated our key-exchange keys before session registration 90 | // ended. 91 | // - We might attempt to connect to ourselves or include ourselves as a hop in packets we 92 | // send. While this is usually avoided, it isn't a big deal. 93 | let local_node = mixnodes 94 | .iter() 95 | .position(|mixnode| &mixnode.kx_public == local_kx_public) 96 | .map_or_else( 97 | || { 98 | // Local node is not a mixnode. Pick some gateway mixnodes to connect to. 99 | LocalNode::NonMixnode( 100 | rand::seq::index::sample( 101 | rng, 102 | mixnodes.len(), 103 | min(num_gateway_mixnodes as usize, mixnodes.len()), 104 | ) 105 | .iter() 106 | .map(|index| { 107 | index 108 | .try_into() 109 | .expect("Topology::new() contract limits size of mixnode set") 110 | }) 111 | .collect(), 112 | ) 113 | }, 114 | |index| { 115 | // Local node is a mixnode 116 | LocalNode::Mixnode( 117 | index 118 | .try_into() 119 | .expect("Topology::new() contract limits size of mixnode set"), 120 | ) 121 | }, 122 | ); 123 | 124 | Self { mixnodes, local_kx_public: *local_kx_public, local_node } 125 | } 126 | 127 | pub fn is_mixnode(&self) -> bool { 128 | matches!(self.local_node, LocalNode::Mixnode(_)) 129 | } 130 | 131 | pub fn reserved_peers(&self) -> impl Iterator> { 132 | let indices = match &self.local_node { 133 | LocalNode::Mixnode(local_index) => Either::Left({ 134 | // Connect to all other mixnodes (ie exclude the local node) 135 | let num = self.mixnodes.len() as RawMixnodeIndex; 136 | (0..local_index.get()).chain((local_index.get() + 1)..num) 137 | }), 138 | LocalNode::NonMixnode(gateway_indices) => 139 | Either::Right(gateway_indices.iter().map(|index| index.get())), 140 | }; 141 | indices.map(|index| &self.mixnodes[index as usize]) 142 | } 143 | 144 | pub fn mixnode_index_to_peer_id(&self, index: MixnodeIndex) -> Result { 145 | self.mixnodes 146 | .get(index.get() as usize) 147 | .map(|mixnode| mixnode.peer_id) 148 | .ok_or(TopologyErr::BadMixnodeIndex(index)) 149 | } 150 | 151 | pub fn target_to_peer_id(&self, target: &Target) -> Result { 152 | match target { 153 | Target::MixnodeIndex(index) => self.mixnode_index_to_peer_id(*index), 154 | Target::PeerId(peer_id) => Ok(*peer_id), 155 | } 156 | } 157 | } 158 | 159 | impl fmt::Display for Topology { 160 | fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { 161 | match &self.local_node { 162 | LocalNode::Mixnode(local_index) => write!(fmt, "Local node is mixnode {local_index}"), 163 | LocalNode::NonMixnode(gateway_indices) => { 164 | write!(fmt, "Local node is not a mixnode; gateway mixnodes are ")?; 165 | for (i, gateway_index) in gateway_indices.iter().enumerate() { 166 | if i == 0 { 167 | gateway_index.fmt(fmt)?; 168 | } else { 169 | write!(fmt, ", {gateway_index}")?; 170 | } 171 | } 172 | Ok(()) 173 | }, 174 | } 175 | } 176 | } 177 | 178 | /// A trait for querying the peer ID and connectivity of the local node. 179 | pub trait NetworkStatus { 180 | /// Returns the peer ID of the local node. 181 | fn local_peer_id(&self) -> PeerId; 182 | /// Returns `true` iff the local node is currently connected to the specified peer. 183 | fn is_connected(&self, peer_id: &PeerId) -> bool; 184 | } 185 | 186 | const MAX_CONNECTED_GATEWAY_INDICES: usize = 5; 187 | 188 | pub enum RouteKind { 189 | /// Route begins at the local node and ends at the specified mixnode. 190 | ToMixnode(MixnodeIndex), 191 | /// Route begins at the specified mixnode and ends at the local node. 192 | FromMixnode(MixnodeIndex), 193 | /// Route begins and ends at the local node. 194 | Loop, 195 | } 196 | 197 | struct UsedIndices(ArrayVec); 198 | 199 | impl UsedIndices { 200 | fn new() -> Self { 201 | Self(ArrayVec::new()) 202 | } 203 | 204 | fn insert(&mut self, index: MixnodeIndex) { 205 | match self.0.iter().position(|used_index| *used_index >= index) { 206 | Some(i) => 207 | if self.0[i] != index { 208 | self.0.insert(i, index); 209 | }, 210 | None => self.0.push(index), 211 | } 212 | } 213 | 214 | fn iter(&self) -> impl ExactSizeIterator + '_ { 215 | self.0.iter().copied() 216 | } 217 | 218 | fn as_option(&self) -> Option { 219 | debug_assert!(self.0.len() <= 1); 220 | self.0.first().copied() 221 | } 222 | } 223 | 224 | pub struct RouteGenerator<'topology, X> { 225 | topology: &'topology Topology, 226 | local_peer_id: PeerId, 227 | /// Always empty if the local node is a mixnode. Otherwise, the subset of the gateway mixnodes 228 | /// from the topology that are currently connected. 229 | connected_gateway_indices: ArrayVec, 230 | } 231 | 232 | impl<'topology, X> RouteGenerator<'topology, X> { 233 | pub fn new(topology: &'topology Topology, ns: &dyn NetworkStatus) -> Self { 234 | let connected_gateway_indices = match &topology.local_node { 235 | LocalNode::Mixnode(_) => ArrayVec::new(), 236 | // If we're not a mixnode, we should have attempted to connect to a number of "gateway" 237 | // mixnodes. As we compete with other nodes for slots we might not have managed to 238 | // connect to all of them. Check which ones we managed to connect to. 239 | LocalNode::NonMixnode(gateway_indices) => gateway_indices 240 | .iter() 241 | .copied() 242 | .filter(|gateway_index| { 243 | let mixnode = &topology.mixnodes[gateway_index.get() as usize]; 244 | ns.is_connected(&mixnode.peer_id) 245 | }) 246 | .take(MAX_CONNECTED_GATEWAY_INDICES) 247 | .collect(), 248 | }; 249 | 250 | Self { topology, local_peer_id: ns.local_peer_id(), connected_gateway_indices } 251 | } 252 | 253 | pub fn topology(&self) -> &'topology Topology { 254 | self.topology 255 | } 256 | 257 | /// Choose a random mixnode and return its index. Exclude mixnodes with indices in 258 | /// `exclude_indices` from consideration. `exclude_indices` must be sorted and must not contain 259 | /// duplicate or invalid indices. 260 | fn choose_mixnode_index( 261 | &self, 262 | rng: &mut (impl Rng + CryptoRng), 263 | exclude_indices: impl ExactSizeIterator, 264 | ) -> Result { 265 | let num_allowed = 266 | self.topology 267 | .mixnodes 268 | .len() 269 | .checked_sub(exclude_indices.len()) 270 | .expect("No duplicate or invalid indices in exclude_indices") as RawMixnodeIndex; 271 | if num_allowed == 0 { 272 | return Err(TopologyErr::TooFewMixnodes) 273 | } 274 | 275 | let mut chosen = rng.gen_range(0..num_allowed); 276 | for exclude_index in exclude_indices { 277 | if chosen >= exclude_index.get() { 278 | chosen += 1; 279 | } 280 | } 281 | // At most exclude_indices.len() added in loop, and chosen was less than 282 | // self.topology.mixnodes.len() - exclude_indices.len() before the loop 283 | debug_assert!((chosen as usize) < self.topology.mixnodes.len()); 284 | 285 | Ok(chosen.try_into().expect("Topology::new() contract limits size of mixnode set")) 286 | } 287 | 288 | /// Choose a random mixnode to send a message to and return its index. 289 | pub fn choose_destination_index( 290 | &self, 291 | rng: &mut (impl Rng + CryptoRng), 292 | ) -> Result { 293 | let exclude_index = match self.topology.local_node { 294 | // If we're a mixnode, don't send to ourselves 295 | LocalNode::Mixnode(local_index) => Some(local_index), 296 | // If we're not a mixnode, and we are only connected to one gateway mixnode, don't send 297 | // to it; it must be the first hop, and we don't want to visit any node more than once 298 | LocalNode::NonMixnode(_) => match self.connected_gateway_indices.as_slice() { 299 | [gateway_index] => Some(*gateway_index), 300 | _ => None, 301 | }, 302 | }; 303 | self.choose_mixnode_index(rng, exclude_index.iter().copied()) 304 | } 305 | 306 | fn choose_connected_gateway_index( 307 | &self, 308 | rng: &mut (impl Rng + CryptoRng), 309 | try_exclude_index: Option, 310 | ) -> Result { 311 | try_exclude_index 312 | .and_then(|try_exclude_index| { 313 | if !self.connected_gateway_indices.iter().any(|index| *index == try_exclude_index) { 314 | // Mixnode to exclude is not a connected gateway 315 | return None 316 | } 317 | let (&first, rest) = self.connected_gateway_indices.split_first()?; 318 | let Some(&chosen) = rest.choose(rng) else { 319 | // Only one connected gateway; must use regardless of try_exclude_index 320 | return Some(first) 321 | }; 322 | // try_exclude_index is either first or in rest. If we chose it from rest, replace 323 | // it with first. 324 | Some(if chosen == try_exclude_index { first } else { chosen }) 325 | }) 326 | .or_else(|| self.connected_gateway_indices.choose(rng).copied()) 327 | .ok_or(TopologyErr::NoConnectedGatewayMixnodes) 328 | } 329 | 330 | /// Generate a route through the mixnet. Returns the mixnode index of the first hop. The route 331 | /// may contain more hops than `num_hops` if this is necessary. 332 | pub fn gen_route( 333 | &self, 334 | targets: &mut ArrayVec, 335 | their_kx_publics: &mut ArrayVec, 336 | rng: &mut (impl Rng + CryptoRng), 337 | kind: RouteKind, 338 | num_hops: usize, 339 | ) -> Result { 340 | // Mixnode indices we've used already. We avoid using any mixnode more than once. 341 | let mut used_indices = UsedIndices::new(); 342 | 343 | let (from_local, to_local) = match kind { 344 | RouteKind::ToMixnode(index) => { 345 | used_indices.insert(index); 346 | (true, false) 347 | }, 348 | RouteKind::FromMixnode(index) => { 349 | used_indices.insert(index); 350 | (false, true) 351 | }, 352 | RouteKind::Loop => (true, true), 353 | }; 354 | 355 | // If we're a mixnode, make sure we don't include ourselves in the route 356 | debug_assert!(from_local || to_local); 357 | if let LocalNode::Mixnode(index) = self.topology.local_node { 358 | used_indices.insert(index); 359 | } 360 | 361 | // If we're not a mixnode, and the packet is to be sent by us, the first hop needs to be to 362 | // a connected gateway mixnode 363 | let special_first_index = match self.topology.local_node { 364 | LocalNode::NonMixnode(_) if from_local => { 365 | let index = self.choose_connected_gateway_index(rng, used_indices.as_option())?; 366 | used_indices.insert(index); 367 | Some(index) 368 | }, 369 | _ => None, 370 | }; 371 | 372 | // If we're not a mixnode, and the packet is to be received by us, the last hop needs to be 373 | // from a connected gateway mixnode 374 | let special_penultimate_index = match self.topology.local_node { 375 | LocalNode::NonMixnode(_) if to_local => { 376 | let index = self.choose_connected_gateway_index(rng, used_indices.as_option())?; 377 | used_indices.insert(index); 378 | Some(index) 379 | }, 380 | _ => None, 381 | }; 382 | 383 | let min_hops = [ 384 | // Special first hop 385 | special_first_index.is_some(), 386 | // Intermediate hop required if special first and penultimate hops to same mixnode 387 | // (this can only happen with RouteKind::Loop) 388 | special_first_index.is_some() && (special_first_index == special_penultimate_index), 389 | // Special penultimate hop 390 | special_penultimate_index.is_some(), 391 | // Last hop 392 | true, 393 | ] 394 | .iter() 395 | .map(|need_hop| *need_hop as usize) 396 | .sum(); 397 | let num_hops = max(num_hops, min_hops); 398 | 399 | let mut first_index = None; 400 | for i in 0..num_hops { 401 | // Figure out the hop target. This is either a mixnode index (Some) or the local node 402 | // (None). 403 | let mut index = match (i, num_hops - i, special_first_index, special_penultimate_index) 404 | { 405 | // Special first hop 406 | (0, _, Some(index), _) => Some(index), 407 | // Special penultimate hop 408 | (_, 2, _, Some(index)) => Some(index), 409 | // Last hop 410 | (_, 1, _, _) => match kind { 411 | RouteKind::ToMixnode(index) => Some(index), 412 | RouteKind::FromMixnode(_) => None, 413 | RouteKind::Loop => None, 414 | }, 415 | // Intermediate hop 416 | _ => { 417 | let index = self.choose_mixnode_index(rng, used_indices.iter())?; 418 | used_indices.insert(index); 419 | Some(index) 420 | }, 421 | }; 422 | 423 | // Push the key-exchange public key for the target 424 | their_kx_publics.push(match index { 425 | Some(index) => self.topology.mixnodes[index.get() as usize].kx_public, 426 | None => self.topology.local_kx_public, 427 | }); 428 | 429 | // Push the target 430 | if index.is_none() { 431 | // Target is the local node. If the local node is a mixnode, use its index. 432 | if let LocalNode::Mixnode(local_index) = self.topology.local_node { 433 | index = Some(local_index); 434 | } 435 | } 436 | if i == 0 { 437 | // First hop should always be to a mixnode 438 | debug_assert!(index.is_some()); 439 | first_index = index; 440 | } else { 441 | targets.push(match index { 442 | Some(index) => Target::MixnodeIndex(index), 443 | None => Target::PeerId(self.local_peer_id), 444 | }); 445 | } 446 | } 447 | 448 | Ok(first_index.expect("At least one hop")) 449 | } 450 | } 451 | -------------------------------------------------------------------------------- /src/request_manager/mod.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2022 Parity Technologies (UK) Ltd. 2 | // 3 | // Permission is hereby granted, free of charge, to any person obtaining a 4 | // copy of this software and associated documentation files (the "Software"), 5 | // to deal in the Software without restriction, including without limitation 6 | // the rights to use, copy, modify, merge, publish, distribute, sublicense, 7 | // and/or sell copies of the Software, and to permit persons to whom the 8 | // Software is furnished to do so, subject to the following conditions: 9 | // 10 | // The above copyright notice and this permission notice shall be included in 11 | // all copies or substantial portions of the Software. 12 | // 13 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 14 | // OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 18 | // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 19 | // DEALINGS IN THE SOFTWARE. 20 | 21 | //! For more reliable delivery, a [`RequestManager`] can be used instead of calling 22 | //! [`Mixnet::post_request`] directly. A [`RequestManager`] serves as an additional buffer for 23 | //! requests, and will retry posting if requests are not removed within the expected time. 24 | 25 | mod config; 26 | mod post_queues; 27 | 28 | pub use self::config::Config; 29 | use self::post_queues::PostQueues; 30 | use super::core::{ 31 | MessageId, Mixnet, MixnodeIndex, NetworkStatus, PostErr, RelSessionIndex, Scattered, 32 | SessionIndex, SessionPhase, SessionStatus, 33 | }; 34 | use rand::RngCore; 35 | use std::{ 36 | cmp::max, 37 | collections::VecDeque, 38 | time::{Duration, Instant}, 39 | }; 40 | 41 | /// Requests managed by a [`RequestManager`] must implement this trait. 42 | pub trait Request { 43 | /// Opaque context type; a `&Context` is passed through [`RequestManager`] methods to `Request` 44 | /// methods. 45 | type Context; 46 | 47 | /// Call `f` with the message data. The same data must be provided every time this is called. 48 | fn with_data(&self, f: impl FnOnce(Scattered) -> T, context: &Self::Context) -> T; 49 | /// Returns the number of SURBs that should be sent along with the request. The same number 50 | /// must be returned every time this is called. 51 | fn num_surbs(&self, context: &Self::Context) -> usize; 52 | /// Returns a conservative estimate of the handling delay. That is, the maximum time it should 53 | /// take for the destination mixnode to process the request and post a reply. 54 | fn handling_delay(&self, message_id: &MessageId, context: &Self::Context) -> Duration; 55 | 56 | /// Called if an unrecoverable error is encountered while posting to the mixnet. 57 | fn handle_post_err(self, err: PostErr, context: &Self::Context); 58 | /// Called if we cannot retry posting because the configured limit has been reached. 59 | fn handle_retry_limit_reached(self, context: &Self::Context); 60 | } 61 | 62 | struct RequestState { 63 | request: R, 64 | 65 | destinations_remaining: u32, 66 | attempts_remaining: u32, 67 | /// This is decremented on insertion into the post queue. 68 | posts_remaining: u32, 69 | 70 | message_id: MessageId, 71 | /// Should be `None` iff `destination_index` is `None`. 72 | session_index: Option, 73 | destination_index: Option, 74 | retry_deadline: Instant, 75 | } 76 | 77 | impl RequestState { 78 | /// `past` should be some instant in the past. 79 | fn new_destination(&mut self, past: Instant) { 80 | // Change message ID when changing destination; a message ID should only be known by the 81 | // sender and receiver. Additionally, if we're changing session, and happen to pick the 82 | // same node in the new session, we really need a different message ID to avoid old SURBs 83 | // getting used in the new session. 84 | // 85 | // Assuming that message IDs are used to identify replies, this will mean that we no longer 86 | // recognise replies from the previous destination. We only switch if there is an issue 87 | // with the previous destination (eg the session is ending, or it has not replied), so this 88 | // shouldn't matter much. TODO We could keep the old message ID around as well as the new 89 | // one and match against it in remove(). 90 | rand::thread_rng().fill_bytes(&mut self.message_id); 91 | self.session_index = None; 92 | self.destination_index = None; 93 | self.retry_deadline = past; 94 | } 95 | } 96 | 97 | /// Request manager state. The user is responsible for calling 98 | /// [`update_session_status`](Self::update_session_status), 99 | /// [`process_post_queues`](Self::process_post_queues), and 100 | /// [`pop_next_retry`](Self::pop_next_retry) at the appropriate times to make progress. 101 | pub struct RequestManager { 102 | config: Config, 103 | created_at: Instant, 104 | session_status: SessionStatus, 105 | /// `post_queues.prev` should be empty if `session_status.current_index` is 0, or if 106 | /// previous-session requests are not allowed in the current phase. Similarly, 107 | /// `post_queues.current` should be empty if current-session requests are not allowed in the 108 | /// current phase. 109 | post_queues: PostQueues>, 110 | retry_queue: VecDeque>, 111 | next_retry_deadline_changed: bool, 112 | } 113 | 114 | impl> RequestManager { 115 | /// Create a new `RequestManager` with the given configuration. 116 | pub fn new(config: Config) -> Self { 117 | let capacity = config.capacity; 118 | Self { 119 | config, 120 | created_at: Instant::now(), 121 | session_status: SessionStatus { current_index: 0, phase: SessionPhase::CoverToCurrent }, 122 | post_queues: PostQueues::new(capacity), 123 | retry_queue: VecDeque::with_capacity(capacity), 124 | next_retry_deadline_changed: false, 125 | } 126 | } 127 | 128 | /// Update the current session index and phase. This should be called after 129 | /// [`Mixnet::set_session_status`]. This may post messages to `mixnet`. 130 | pub fn update_session_status( 131 | &mut self, 132 | mixnet: &mut Mixnet, 133 | ns: &dyn NetworkStatus, 134 | context: &C, 135 | ) { 136 | let session_status = mixnet.session_status(); 137 | if self.session_status == session_status { 138 | return 139 | } 140 | 141 | let prev_default_len = self.post_queues.default.len(); 142 | 143 | if self.session_status.current_index != session_status.current_index { 144 | self.post_queues.default.append(&mut self.post_queues.prev); // Clears prev 145 | if session_status.current_index.saturating_sub(self.session_status.current_index) == 1 { 146 | std::mem::swap(&mut self.post_queues.current, &mut self.post_queues.prev); 147 | } else { 148 | // Unexpected session index change. Mixnet core will warn about this, don't bother 149 | // warning again here. 150 | self.post_queues.default.append(&mut self.post_queues.current); // Clears current 151 | } 152 | } 153 | 154 | if !session_status.phase.allow_requests_and_replies(RelSessionIndex::Current) { 155 | self.post_queues.default.append(&mut self.post_queues.current); // Clears current 156 | } 157 | if !session_status.phase.allow_requests_and_replies(RelSessionIndex::Prev) { 158 | self.post_queues.default.append(&mut self.post_queues.prev); // Clears prev 159 | } 160 | 161 | for state in self.post_queues.default.iter_mut().skip(prev_default_len) { 162 | state.new_destination(self.created_at); 163 | } 164 | 165 | self.session_status = session_status; 166 | 167 | // The session status shouldn't change very often. For simplicity just retry posting in all 168 | // sessions, rather than trying to figure out if we can skip some. 169 | self.process_post_queues(mixnet, ns, context); 170 | } 171 | 172 | /// Returns `true` iff there is space for another request. 173 | pub fn has_space(&self) -> bool { 174 | let len = 175 | self.post_queues.iter().map(VecDeque::len).sum::() + self.retry_queue.len(); 176 | len < self.config.capacity 177 | } 178 | 179 | /// Insert a request. This should only be called if there is space (see 180 | /// [`has_space`](Self::has_space)). This may post messages to `mixnet`. 181 | /// 182 | /// A request is only removed when: 183 | /// 184 | /// - [`remove`](Self::remove) is called with the corresponding message ID. This would typically 185 | /// happen when a reply is received. 186 | /// - An unrecoverable error is encountered while posting to the mixnet. In this case, 187 | /// [`Request::handle_post_err`] is called. 188 | /// - The retry limit is reached. In this case, [`Request::handle_retry_limit_reached`] is 189 | /// called. 190 | pub fn insert( 191 | &mut self, 192 | request: R, 193 | mixnet: &mut Mixnet, 194 | ns: &dyn NetworkStatus, 195 | context: &C, 196 | ) { 197 | debug_assert!(self.has_space()); 198 | let state = RequestState { 199 | request, 200 | 201 | destinations_remaining: self.config.num_destinations, 202 | attempts_remaining: 0, 203 | posts_remaining: 0, 204 | 205 | // The message ID will get generated when retry (below) calls state.new_destination() 206 | message_id: Default::default(), 207 | session_index: None, 208 | destination_index: None, 209 | retry_deadline: self.created_at, 210 | }; 211 | self.retry(state, mixnet, ns, context); 212 | } 213 | 214 | /// Remove a request. Typically this would be called when a reply is received. Returns `None` 215 | /// if there is no request with the given message ID. 216 | pub fn remove(&mut self, message_id: &MessageId) -> Option { 217 | for post_queue in self.post_queues.iter_mut() { 218 | if let Some(i) = post_queue.iter().position(|state| &state.message_id == message_id) { 219 | return Some(post_queue.remove(i).expect("i returned by position()").request) 220 | } 221 | } 222 | 223 | if let Some(i) = self.retry_queue.iter().position(|state| &state.message_id == message_id) { 224 | if i == 0 { 225 | self.next_retry_deadline_changed = true; 226 | } 227 | return Some(self.retry_queue.remove(i).expect("i returned by position()").request) 228 | } 229 | 230 | None 231 | } 232 | 233 | fn process_post_queue( 234 | &mut self, 235 | rel_session_index: Option, 236 | mixnet: &mut Mixnet, 237 | ns: &dyn NetworkStatus, 238 | context: &C, 239 | ) { 240 | let rel_session_index_or_default = 241 | rel_session_index.unwrap_or(self.session_status.phase.default_request_session()); 242 | if (rel_session_index_or_default == RelSessionIndex::Prev) && 243 | (self.session_status.current_index == 0) 244 | { 245 | // The session does not exist. If this is the default session queue, just wait for the 246 | // default session to change. 247 | debug_assert!(self.post_queues.prev.is_empty()); 248 | return 249 | } 250 | 251 | let session_index = rel_session_index 252 | .map(|rel_session_index| rel_session_index + self.session_status.current_index); 253 | let session_index_or_default = 254 | rel_session_index_or_default + self.session_status.current_index; 255 | 256 | while let Some(mut state) = self.post_queues[rel_session_index].pop_front() { 257 | debug_assert_eq!(state.session_index, session_index); 258 | 259 | // Attempt to post a request message 260 | let res = state.request.with_data( 261 | |data| { 262 | mixnet.post_request( 263 | session_index_or_default, 264 | &mut state.destination_index, 265 | &state.message_id, 266 | data, 267 | state.request.num_surbs(context), 268 | ns, 269 | ) 270 | }, 271 | context, 272 | ); 273 | 274 | match res { 275 | Ok(metrics) => { 276 | state.session_index = Some(session_index_or_default); 277 | 278 | // Extend the retry deadline 279 | let handling_delay = state.request.handling_delay(&state.message_id, context); 280 | let rtt = metrics.estimate_rtt(handling_delay); 281 | state.retry_deadline = max(state.retry_deadline, Instant::now() + rtt); 282 | 283 | match state.posts_remaining.checked_sub(1) { 284 | Some(posts_remaining) => { 285 | state.posts_remaining = posts_remaining; 286 | self.post_queues[Some(rel_session_index_or_default)].push_back(state); 287 | }, 288 | None => { 289 | let i = self 290 | .retry_queue 291 | .partition_point(|s| s.retry_deadline < state.retry_deadline); 292 | self.retry_queue.insert(i, state); 293 | if i == 0 { 294 | self.next_retry_deadline_changed = true; 295 | } 296 | }, 297 | } 298 | }, 299 | Err(PostErr::NotEnoughSpaceInQueue) => { 300 | // In this case, nothing should have changed. Just push the request back on the 301 | // front of the queue and try again later. 302 | self.post_queues[rel_session_index].push_front(state); 303 | break 304 | }, 305 | Err(err) => state.request.handle_post_err(err, context), 306 | } 307 | } 308 | } 309 | 310 | /// Attempt to post messages from the internal post queues to `mixnet`. This should be called 311 | /// when the 312 | /// [`SPACE_IN_AUTHORED_PACKET_QUEUE`](super::core::Events::SPACE_IN_AUTHORED_PACKET_QUEUE) 313 | /// event fires. 314 | pub fn process_post_queues( 315 | &mut self, 316 | mixnet: &mut Mixnet, 317 | ns: &dyn NetworkStatus, 318 | context: &C, 319 | ) { 320 | // Process the default session queue first, as doing so might result in requests getting 321 | // pushed onto the other queues 322 | self.process_post_queue(None, mixnet, ns, context); 323 | self.process_post_queue(Some(RelSessionIndex::Current), mixnet, ns, context); 324 | self.process_post_queue(Some(RelSessionIndex::Prev), mixnet, ns, context); 325 | } 326 | 327 | fn session_post_queues_empty(&self, rel_session_index: Option) -> bool { 328 | if !self.post_queues[rel_session_index].is_empty() { 329 | return false 330 | } 331 | let default = self.session_status.phase.default_request_session(); 332 | match rel_session_index { 333 | Some(rel_session_index) if rel_session_index == default => 334 | self.post_queues.default.is_empty(), 335 | Some(_) => true, 336 | None => self.post_queues[Some(default)].is_empty(), 337 | } 338 | } 339 | 340 | fn retry( 341 | &mut self, 342 | mut state: RequestState, 343 | mixnet: &mut Mixnet, 344 | ns: &dyn NetworkStatus, 345 | context: &C, 346 | ) { 347 | debug_assert_eq!(state.posts_remaining, 0); 348 | match state.attempts_remaining.checked_sub(1) { 349 | Some(attempts_remaining) => state.attempts_remaining = attempts_remaining, 350 | None => { 351 | let Some(destinations_remaining) = state.destinations_remaining.checked_sub(1) 352 | else { 353 | state.request.handle_retry_limit_reached(context); 354 | return 355 | }; 356 | state.destinations_remaining = destinations_remaining; 357 | state.attempts_remaining = self.config.num_attempts_per_destination - 1; 358 | state.new_destination(self.created_at); 359 | }, 360 | } 361 | state.posts_remaining = self.config.num_posts_per_attempt - 1; 362 | 363 | let rel_session_index = state.session_index.and_then(|session_index| { 364 | let rel_session_index = RelSessionIndex::from_session_index( 365 | session_index, 366 | self.session_status.current_index, 367 | ); 368 | if !rel_session_index.map_or(false, |rel_session_index| { 369 | self.session_status.phase.allow_requests_and_replies(rel_session_index) 370 | }) { 371 | state.new_destination(self.created_at); 372 | return None 373 | } 374 | rel_session_index 375 | }); 376 | 377 | let empty = self.session_post_queues_empty(rel_session_index); 378 | self.post_queues[rel_session_index].push_back(state); 379 | if empty { 380 | // There were no requests waiting. It might be possible to post immediately. 381 | self.process_post_queue(rel_session_index, mixnet, ns, context); 382 | if rel_session_index.is_none() { 383 | // Might have pushed requests onto this queue while processing the default session 384 | // queue 385 | self.process_post_queue( 386 | Some(self.session_status.phase.default_request_session()), 387 | mixnet, 388 | ns, 389 | context, 390 | ); 391 | } 392 | } 393 | } 394 | 395 | /// Returns the next instant at which [`pop_next_retry`](Self::pop_next_retry) should be 396 | /// called. 397 | pub fn next_retry_deadline(&self) -> Option { 398 | self.retry_queue.front().map(|state| state.retry_deadline) 399 | } 400 | 401 | /// Pop the next request from the internal retry queue. This should be called whenever the 402 | /// deadline returned by [`next_retry_deadline`](Self::next_retry_deadline) is reached. This 403 | /// may post messages to `mixnet`. Returns `false` if the internal retry queue is empty. 404 | pub fn pop_next_retry( 405 | &mut self, 406 | mixnet: &mut Mixnet, 407 | ns: &dyn NetworkStatus, 408 | context: &C, 409 | ) -> bool { 410 | if let Some(state) = self.retry_queue.pop_front() { 411 | self.next_retry_deadline_changed = true; 412 | self.retry(state, mixnet, ns, context); 413 | true 414 | } else { 415 | false 416 | } 417 | } 418 | 419 | /// Returns `true` if the next retry deadline (see 420 | /// [`next_retry_deadline`](Self::next_retry_deadline)) has changed since the last call. 421 | pub fn next_retry_deadline_changed(&mut self) -> bool { 422 | let changed = self.next_retry_deadline_changed; 423 | self.next_retry_deadline_changed = false; 424 | changed 425 | } 426 | } 427 | -------------------------------------------------------------------------------- /src/core/fragment.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2022 Parity Technologies (UK) Ltd. 2 | // 3 | // Permission is hereby granted, free of charge, to any person obtaining a 4 | // copy of this software and associated documentation files (the "Software"), 5 | // to deal in the Software without restriction, including without limitation 6 | // the rights to use, copy, modify, merge, publish, distribute, sublicense, 7 | // and/or sell copies of the Software, and to permit persons to whom the 8 | // Software is furnished to do so, subject to the following conditions: 9 | // 10 | // The above copyright notice and this permission notice shall be included in 11 | // all copies or substantial portions of the Software. 12 | // 13 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 14 | // OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 18 | // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 19 | // DEALINGS IN THE SOFTWARE. 20 | 21 | //! Mixnet message fragment handling. 22 | 23 | use super::{ 24 | scattered::Scattered, 25 | sphinx::{Surb, PAYLOAD_DATA_SIZE, SURB_SIZE}, 26 | }; 27 | use arrayref::{array_mut_ref, array_refs, mut_array_refs}; 28 | use hashlink::{linked_hash_map::Entry, LinkedHashMap}; 29 | use log::{debug, log, Level}; 30 | use std::cmp::{max, min}; 31 | 32 | /// Size in bytes of a [`MessageId`]. 33 | pub const MESSAGE_ID_SIZE: usize = 16; 34 | /// Message identifier. Should be randomly generated. Attached to fragments to enable reassembly. 35 | pub type MessageId = [u8; MESSAGE_ID_SIZE]; 36 | const FRAGMENT_INDEX_SIZE: usize = 2; 37 | type FragmentIndex = u16; 38 | const FRAGMENT_DATA_SIZE_SIZE: usize = 2; 39 | type FragmentDataSize = u16; 40 | const FRAGMENT_NUM_SURBS_SIZE: usize = 1; 41 | type FragmentNumSurbs = u8; 42 | const FRAGMENT_HEADER_SIZE: usize = MESSAGE_ID_SIZE + 43 | FRAGMENT_INDEX_SIZE + // Last fragment index (number of fragments - 1) 44 | FRAGMENT_INDEX_SIZE + // Index of this fragment 45 | FRAGMENT_DATA_SIZE_SIZE + // Number of data bytes in this fragment 46 | FRAGMENT_NUM_SURBS_SIZE; // Number of SURBs in this fragment 47 | 48 | pub const FRAGMENT_SIZE: usize = PAYLOAD_DATA_SIZE; 49 | pub type Fragment = [u8; FRAGMENT_SIZE]; 50 | const FRAGMENT_PAYLOAD_SIZE: usize = FRAGMENT_SIZE - FRAGMENT_HEADER_SIZE; 51 | type FragmentPayload = [u8; FRAGMENT_PAYLOAD_SIZE]; 52 | const MAX_SURBS_PER_FRAGMENT: usize = FRAGMENT_PAYLOAD_SIZE / SURB_SIZE; 53 | 54 | #[allow(clippy::type_complexity)] 55 | fn split_fragment( 56 | fragment: &Fragment, 57 | ) -> ( 58 | &MessageId, 59 | &[u8; FRAGMENT_INDEX_SIZE], 60 | &[u8; FRAGMENT_INDEX_SIZE], 61 | &[u8; FRAGMENT_DATA_SIZE_SIZE], 62 | &[u8; FRAGMENT_NUM_SURBS_SIZE], 63 | &FragmentPayload, 64 | ) { 65 | array_refs![ 66 | fragment, 67 | MESSAGE_ID_SIZE, 68 | FRAGMENT_INDEX_SIZE, 69 | FRAGMENT_INDEX_SIZE, 70 | FRAGMENT_DATA_SIZE_SIZE, 71 | FRAGMENT_NUM_SURBS_SIZE, 72 | FRAGMENT_PAYLOAD_SIZE 73 | ] 74 | } 75 | 76 | fn message_id(fragment: &Fragment) -> &MessageId { 77 | split_fragment(fragment).0 78 | } 79 | 80 | fn num_fragments(fragment: &Fragment) -> usize { 81 | (FragmentIndex::from_le_bytes(*split_fragment(fragment).1) as usize) + 1 82 | } 83 | 84 | fn fragment_index(fragment: &Fragment) -> usize { 85 | FragmentIndex::from_le_bytes(*split_fragment(fragment).2) as usize 86 | } 87 | 88 | fn fragment_data_size(fragment: &Fragment) -> usize { 89 | FragmentDataSize::from_le_bytes(*split_fragment(fragment).3) as usize 90 | } 91 | 92 | fn fragment_num_surbs(fragment: &Fragment) -> usize { 93 | FragmentNumSurbs::from_le_bytes(*split_fragment(fragment).4) as usize 94 | } 95 | 96 | fn fragment_payload(fragment: &Fragment) -> &FragmentPayload { 97 | split_fragment(fragment).5 98 | } 99 | 100 | #[derive(Debug, thiserror::Error)] 101 | enum CheckFragmentErr { 102 | #[error("Out-of-range index ({index}, max {max})")] 103 | Index { index: usize, max: usize }, 104 | #[error("Bad payload size ({size}, max {max})")] 105 | PayloadSize { size: usize, max: usize }, 106 | } 107 | 108 | fn check_fragment(fragment: &Fragment) -> Result<(), CheckFragmentErr> { 109 | if fragment_index(fragment) >= num_fragments(fragment) { 110 | return Err(CheckFragmentErr::Index { 111 | index: fragment_index(fragment), 112 | max: num_fragments(fragment) - 1, 113 | }) 114 | } 115 | 116 | let data_size = fragment_data_size(fragment); 117 | let num_surbs = fragment_num_surbs(fragment); 118 | let payload_size = data_size + (num_surbs * SURB_SIZE); 119 | if payload_size > FRAGMENT_PAYLOAD_SIZE { 120 | return Err(CheckFragmentErr::PayloadSize { size: payload_size, max: FRAGMENT_PAYLOAD_SIZE }) 121 | } 122 | 123 | Ok(()) 124 | } 125 | 126 | #[derive(Debug, PartialEq, Eq)] 127 | pub struct GenericMessage { 128 | pub id: MessageId, 129 | pub data: Vec, 130 | pub surbs: Vec, 131 | } 132 | 133 | impl GenericMessage { 134 | /// Construct a message from a list of fragments. The fragments must all be valid (checked by 135 | /// [`check_fragment`]) and in the correct order. 136 | fn from_fragments<'a>(fragments: impl Iterator + Clone) -> Self { 137 | let id = *message_id(fragments.clone().next().expect("At least one fragment")); 138 | 139 | let mut data = Vec::with_capacity(fragments.clone().map(fragment_data_size).sum()); 140 | let mut surbs = Vec::with_capacity(fragments.clone().map(fragment_num_surbs).sum()); 141 | for fragment in fragments { 142 | debug_assert!(check_fragment(fragment).is_ok()); 143 | let payload = fragment_payload(fragment); 144 | data.extend_from_slice(&payload[..fragment_data_size(fragment)]); 145 | surbs.extend( 146 | payload 147 | // TODO Use array_rchunks if/when this is stabilised 148 | .rchunks_exact(SURB_SIZE) 149 | .map(|surb| { 150 | TryInto::<&Surb>::try_into(surb) 151 | .expect("All slices returned by rchunks_exact have length SURB_SIZE") 152 | }) 153 | .take(fragment_num_surbs(fragment)), 154 | ); 155 | } 156 | 157 | Self { id, data, surbs } 158 | } 159 | } 160 | 161 | #[derive(Debug, thiserror::Error)] 162 | enum IncompleteMessageInsertErr { 163 | #[error("Inconsistent number of fragments for message ({0} vs {1})")] 164 | InconsistentNumFragments(usize, usize), 165 | #[error("Already have this fragment")] 166 | AlreadyHave, 167 | } 168 | 169 | struct IncompleteMessage { 170 | fragments: Vec>>, 171 | /// Count of [`Some`] in `fragments`. 172 | num_received_fragments: usize, 173 | } 174 | 175 | impl IncompleteMessage { 176 | fn new(num_fragments: usize) -> Self { 177 | Self { fragments: vec![None; num_fragments], num_received_fragments: 0 } 178 | } 179 | 180 | /// Attempt to insert `fragment`, which must be a valid fragment (checked by 181 | /// [`check_fragment`]). Success implies 182 | /// [`num_received_fragments`](Self::num_received_fragments) was incremented. 183 | fn insert(&mut self, fragment: &Fragment) -> Result<(), IncompleteMessageInsertErr> { 184 | debug_assert!(check_fragment(fragment).is_ok()); 185 | 186 | if num_fragments(fragment) != self.fragments.len() { 187 | return Err(IncompleteMessageInsertErr::InconsistentNumFragments( 188 | num_fragments(fragment), 189 | self.fragments.len(), 190 | )) 191 | } 192 | 193 | let slot = &mut self.fragments[fragment_index(fragment)]; 194 | if slot.is_some() { 195 | return Err(IncompleteMessageInsertErr::AlreadyHave) 196 | } 197 | 198 | *slot = Some((*fragment).into()); 199 | self.num_received_fragments += 1; 200 | debug_assert!(self.num_received_fragments <= self.fragments.len()); 201 | Ok(()) 202 | } 203 | 204 | /// Returns [`None`] if we don't have all the fragments yet. Otherwise, returns an iterator 205 | /// over the completed list of fragments. 206 | fn complete_fragments(&self) -> Option + Clone> { 207 | (self.num_received_fragments == self.fragments.len()).then(|| { 208 | self.fragments 209 | .iter() 210 | .map(|fragment| fragment.as_ref().expect("All fragments received").as_ref()) 211 | }) 212 | } 213 | } 214 | 215 | pub struct FragmentAssembler { 216 | /// Incomplete messages, in LRU order: least recently used at the front, most recently at the 217 | /// back. All messages have at least one received fragment. 218 | incomplete_messages: LinkedHashMap, 219 | /// Total number of received fragments across all messages in `incomplete_messages`. 220 | num_incomplete_fragments: usize, 221 | 222 | /// Maximum number of incomplete messages to keep in `incomplete_messages`. 223 | max_incomplete_messages: usize, 224 | /// Maximum number of received fragments to keep across all messages in `incomplete_messages`. 225 | max_incomplete_fragments: usize, 226 | /// Maximum number of fragments per message. Fragments of messages with more than this many 227 | /// fragments are dropped on receipt. 228 | max_fragments_per_message: usize, 229 | } 230 | 231 | impl FragmentAssembler { 232 | pub fn new( 233 | max_incomplete_messages: usize, 234 | max_incomplete_fragments: usize, 235 | max_fragments_per_message: usize, 236 | ) -> Self { 237 | Self { 238 | incomplete_messages: LinkedHashMap::with_capacity( 239 | // Plus one because we only evict _after_ going over the limit 240 | max_incomplete_messages.saturating_add(1), 241 | ), 242 | num_incomplete_fragments: 0, 243 | max_incomplete_messages, 244 | max_incomplete_fragments, 245 | max_fragments_per_message, 246 | } 247 | } 248 | 249 | fn need_eviction(&self) -> bool { 250 | (self.incomplete_messages.len() > self.max_incomplete_messages) || 251 | (self.num_incomplete_fragments > self.max_incomplete_fragments) 252 | } 253 | 254 | /// Evict a message if we're over the messages or fragments limit. This should be called after 255 | /// each fragment insertion. 256 | fn maybe_evict(&mut self, log_target: &str) { 257 | if self.need_eviction() { 258 | debug!(target: log_target, "Too many incomplete messages; evicting LRU"); 259 | let incomplete_message = self 260 | .incomplete_messages 261 | .pop_front() 262 | .expect("Over messages or fragments limit, there must be at least one message") 263 | .1; 264 | debug_assert!( 265 | self.num_incomplete_fragments >= incomplete_message.num_received_fragments 266 | ); 267 | self.num_incomplete_fragments -= incomplete_message.num_received_fragments; 268 | // Called after each fragment insertion, so could only have been one message or 269 | // fragment over the limit. Each message has at least one received fragment, so having 270 | // popped a message we should now be within both limits. 271 | debug_assert!(!self.need_eviction()); 272 | } 273 | } 274 | 275 | /// Attempt to insert `fragment`. If this completes a message, the completed message is 276 | /// returned. 277 | pub fn insert(&mut self, fragment: &Fragment, log_target: &str) -> Option { 278 | if let Err(err) = check_fragment(fragment) { 279 | debug!(target: log_target, "Received bad fragment: {err}"); 280 | return None 281 | } 282 | let num_fragments = num_fragments(fragment); 283 | if num_fragments > self.max_fragments_per_message { 284 | return None 285 | } 286 | if num_fragments == 1 { 287 | return Some(GenericMessage::from_fragments(std::iter::once(fragment))) 288 | } 289 | match self.incomplete_messages.entry(*message_id(fragment)) { 290 | Entry::Occupied(mut entry) => { 291 | let incomplete_message = entry.get_mut(); 292 | if let Err(err) = incomplete_message.insert(fragment) { 293 | let level = match err { 294 | IncompleteMessageInsertErr::AlreadyHave => Level::Trace, 295 | _ => Level::Debug, 296 | }; 297 | log!(target: log_target, level, "Fragment insert failed: {err}"); 298 | return None 299 | } 300 | self.num_incomplete_fragments += 1; 301 | let message = 302 | incomplete_message.complete_fragments().map(GenericMessage::from_fragments); 303 | if message.is_some() { 304 | self.num_incomplete_fragments -= entry.remove().num_received_fragments; 305 | } else { 306 | entry.to_back(); 307 | self.maybe_evict(log_target); 308 | } 309 | message 310 | }, 311 | Entry::Vacant(entry) => { 312 | let mut incomplete_message = IncompleteMessage::new(num_fragments); 313 | // Insert of first fragment cannot fail 314 | assert!(incomplete_message.insert(fragment).is_ok()); 315 | entry.insert(incomplete_message); 316 | self.num_incomplete_fragments += 1; 317 | self.maybe_evict(log_target); 318 | None 319 | }, 320 | } 321 | } 322 | } 323 | 324 | pub struct FragmentBlueprint<'a> { 325 | message_id: MessageId, 326 | last_index: FragmentIndex, 327 | index: FragmentIndex, 328 | data: Scattered<'a, u8>, 329 | num_surbs: FragmentNumSurbs, 330 | } 331 | 332 | impl<'a> FragmentBlueprint<'a> { 333 | pub fn write_except_surbs(&self, fragment: &mut Fragment) { 334 | let (message_id, last_index, index, data_size, num_surbs, payload) = mut_array_refs![ 335 | fragment, 336 | MESSAGE_ID_SIZE, 337 | FRAGMENT_INDEX_SIZE, 338 | FRAGMENT_INDEX_SIZE, 339 | FRAGMENT_DATA_SIZE_SIZE, 340 | FRAGMENT_NUM_SURBS_SIZE, 341 | FRAGMENT_PAYLOAD_SIZE 342 | ]; 343 | 344 | // Write header 345 | *message_id = self.message_id; 346 | *last_index = self.last_index.to_le_bytes(); 347 | *index = self.index.to_le_bytes(); 348 | *data_size = (self.data.len() as FragmentDataSize).to_le_bytes(); 349 | *num_surbs = self.num_surbs.to_le_bytes(); 350 | 351 | // Write payload 352 | self.data.copy_to_slice(&mut payload[..self.data.len()]); 353 | } 354 | 355 | pub fn surbs<'fragment>( 356 | &self, 357 | fragment: &'fragment mut Fragment, 358 | ) -> impl Iterator { 359 | array_mut_ref![fragment, FRAGMENT_HEADER_SIZE, FRAGMENT_PAYLOAD_SIZE] 360 | // TODO Use array_rchunks_mut if/when this is stabilised 361 | .rchunks_exact_mut(SURB_SIZE) 362 | .map(|surb| { 363 | TryInto::<&mut Surb>::try_into(surb) 364 | .expect("All slices returned by rchunks_exact_mut have length SURB_SIZE") 365 | }) 366 | .take(self.num_surbs as usize) 367 | } 368 | } 369 | 370 | // TODO Use usize::div_ceil when this is stabilised 371 | fn div_ceil(x: usize, y: usize) -> usize { 372 | if x == 0 { 373 | 0 374 | } else { 375 | ((x - 1) / y) + 1 376 | } 377 | } 378 | 379 | /// Generate fragment blueprints containing the provided message ID and data and the specified 380 | /// number of SURBs. Returns [`None`] if more fragments would be required than are possible to 381 | /// encode. Note that the actual number of fragments supported by the receiver is likely to be 382 | /// significantly less than this. 383 | pub fn fragment_blueprints<'a>( 384 | message_id: &MessageId, 385 | mut data: Scattered<'a, u8>, 386 | mut num_surbs: usize, 387 | ) -> Option>> { 388 | let message_id = *message_id; 389 | 390 | // Figure out how many fragments we need 391 | let num_fragments_for_surbs = div_ceil(num_surbs, MAX_SURBS_PER_FRAGMENT); 392 | let surb_fragments_unused_size = num_fragments_for_surbs.saturating_mul(FRAGMENT_PAYLOAD_SIZE) - 393 | num_surbs.saturating_mul(SURB_SIZE); 394 | let remaining_data_size = data.len().saturating_sub(surb_fragments_unused_size); 395 | let num_fragments_for_remaining_data = div_ceil(remaining_data_size, FRAGMENT_PAYLOAD_SIZE); 396 | let num_fragments = 397 | max(num_fragments_for_surbs.saturating_add(num_fragments_for_remaining_data), 1); 398 | 399 | let last_index = num_fragments - 1; 400 | (last_index <= (FragmentIndex::MAX as usize)).then(|| { 401 | (0..num_fragments).map(move |index| { 402 | let fragment_num_surbs = min(num_surbs, MAX_SURBS_PER_FRAGMENT); 403 | num_surbs -= fragment_num_surbs; 404 | let fragment_unused_size = FRAGMENT_PAYLOAD_SIZE - (fragment_num_surbs * SURB_SIZE); 405 | let fragment_data_size = min(data.len(), fragment_unused_size); 406 | let (fragment_data, remaining_data) = data.split_at(fragment_data_size); 407 | data = remaining_data; 408 | FragmentBlueprint { 409 | message_id, 410 | last_index: last_index as FragmentIndex, 411 | index: index as FragmentIndex, 412 | data: fragment_data, 413 | num_surbs: fragment_num_surbs as FragmentNumSurbs, 414 | } 415 | }) 416 | }) 417 | } 418 | 419 | #[cfg(test)] 420 | mod tests { 421 | use super::*; 422 | use itertools::Itertools; 423 | use rand::{prelude::SliceRandom, Rng, RngCore}; 424 | 425 | const LOG_TARGET: &str = "mixnet"; 426 | 427 | #[test] 428 | fn create_and_insert_small() { 429 | let mut rng = rand::thread_rng(); 430 | 431 | let id = rng.gen(); 432 | let mut blueprints = fragment_blueprints(&id, [42].as_slice().into(), 1).unwrap(); 433 | assert_eq!(blueprints.len(), 1); 434 | let blueprint = blueprints.next().unwrap(); 435 | 436 | let mut fragment = [0; FRAGMENT_SIZE]; 437 | blueprint.write_except_surbs(&mut fragment); 438 | let mut dummy_surb = [0; SURB_SIZE]; 439 | rng.fill_bytes(&mut dummy_surb); 440 | { 441 | let mut surbs = blueprint.surbs(&mut fragment); 442 | *surbs.next().unwrap() = dummy_surb; 443 | assert!(surbs.next().is_none()); 444 | } 445 | 446 | let mut fa = FragmentAssembler::new(1, usize::MAX, usize::MAX); 447 | assert_eq!( 448 | fa.insert(&fragment, LOG_TARGET), 449 | Some(GenericMessage { id, data: vec![42], surbs: vec![dummy_surb] }) 450 | ); 451 | } 452 | 453 | fn no_surb_fragments(message_id: &MessageId, data: &[u8]) -> Vec { 454 | fragment_blueprints(message_id, data.into(), 0) 455 | .unwrap() 456 | .map(|blueprint| { 457 | let mut fragment = [0; FRAGMENT_SIZE]; 458 | blueprint.write_except_surbs(&mut fragment); 459 | fragment 460 | }) 461 | .collect() 462 | } 463 | 464 | fn insert_fragments<'a>( 465 | fa: &mut FragmentAssembler, 466 | mut fragments: impl Iterator, 467 | ) -> Option { 468 | let message = fragments.find_map(|fragment| fa.insert(fragment, LOG_TARGET)); 469 | assert!(fragments.next().is_none()); 470 | message 471 | } 472 | 473 | #[test] 474 | fn create_and_insert_large() { 475 | let mut rng = rand::thread_rng(); 476 | 477 | let id = rng.gen(); 478 | let mut data = vec![0; 60000]; 479 | rng.fill_bytes(&mut data); 480 | let mut fragments = no_surb_fragments(&id, &data); 481 | assert_eq!(fragments.len(), 30); 482 | fragments.shuffle(&mut rng); 483 | 484 | let mut fa = FragmentAssembler::new(1, usize::MAX, usize::MAX); 485 | assert_eq!( 486 | insert_fragments(&mut fa, fragments.iter()), 487 | Some(GenericMessage { id, data, surbs: Vec::new() }) 488 | ); 489 | } 490 | 491 | #[test] 492 | fn create_too_large() { 493 | let too_large = vec![0; (((FragmentIndex::MAX as usize) + 1) * FRAGMENT_PAYLOAD_SIZE) + 1]; 494 | assert!( 495 | fragment_blueprints(&[0; MESSAGE_ID_SIZE], too_large.as_slice().into(), 0).is_none() 496 | ); 497 | } 498 | 499 | #[test] 500 | fn message_limit_eviction() { 501 | let mut rng = rand::thread_rng(); 502 | 503 | let first_id = rng.gen(); 504 | let mut first_data = vec![0; 3000]; 505 | rng.fill_bytes(&mut first_data); 506 | let first_fragments = no_surb_fragments(&first_id, &first_data); 507 | 508 | let second_id = rng.gen(); 509 | let mut second_data = vec![0; 3000]; 510 | rng.fill_bytes(&mut second_data); 511 | let second_fragments = no_surb_fragments(&second_id, &second_data); 512 | 513 | let mut fa = FragmentAssembler::new(1, usize::MAX, usize::MAX); 514 | 515 | // One message at a time should work 516 | assert_eq!( 517 | insert_fragments(&mut fa, first_fragments.iter()), 518 | Some(GenericMessage { id: first_id, data: first_data, surbs: Vec::new() }) 519 | ); 520 | assert_eq!( 521 | insert_fragments(&mut fa, second_fragments.iter()), 522 | Some(GenericMessage { id: second_id, data: second_data, surbs: Vec::new() }) 523 | ); 524 | 525 | // Alternating fragments should not work due to eviction 526 | assert_eq!( 527 | insert_fragments(&mut fa, first_fragments.iter().interleave(&second_fragments)), 528 | None 529 | ); 530 | } 531 | 532 | #[test] 533 | fn fragment_limit_eviction() { 534 | let mut rng = rand::thread_rng(); 535 | 536 | let first_id = rng.gen(); 537 | let mut first_data = vec![0; 5000]; 538 | rng.fill_bytes(&mut first_data); 539 | let first_fragments = no_surb_fragments(&first_id, &first_data); 540 | 541 | let second_id = rng.gen(); 542 | let mut second_data = vec![0; 5000]; 543 | rng.fill_bytes(&mut second_data); 544 | let second_fragments = no_surb_fragments(&second_id, &second_data); 545 | 546 | // With a one-fragment limit it should not be possible to reconstruct either message 547 | let mut fa = FragmentAssembler::new(2, 1, usize::MAX); 548 | assert_eq!(insert_fragments(&mut fa, first_fragments.iter()), None); 549 | assert_eq!(insert_fragments(&mut fa, second_fragments.iter()), None); 550 | 551 | let mut fa = FragmentAssembler::new(2, 2, usize::MAX); 552 | 553 | // With a two-fragment limit it should be possible to reconstruct them individually 554 | assert_eq!( 555 | insert_fragments(&mut fa, first_fragments.iter()), 556 | Some(GenericMessage { id: first_id, data: first_data, surbs: Vec::new() }) 557 | ); 558 | assert_eq!( 559 | insert_fragments(&mut fa, second_fragments.iter()), 560 | Some(GenericMessage { id: second_id, data: second_data, surbs: Vec::new() }) 561 | ); 562 | 563 | // But not when interleaved 564 | assert_eq!( 565 | insert_fragments(&mut fa, first_fragments.iter().interleave(&second_fragments)), 566 | None 567 | ); 568 | } 569 | } 570 | -------------------------------------------------------------------------------- /src/core/mod.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2022 Parity Technologies (UK) Ltd. 2 | // 3 | // Permission is hereby granted, free of charge, to any person obtaining a 4 | // copy of this software and associated documentation files (the "Software"), 5 | // to deal in the Software without restriction, including without limitation 6 | // the rights to use, copy, modify, merge, publish, distribute, sublicense, 7 | // and/or sell copies of the Software, and to permit persons to whom the 8 | // Software is furnished to do so, subject to the following conditions: 9 | // 10 | // The above copyright notice and this permission notice shall be included in 11 | // all copies or substantial portions of the Software. 12 | // 13 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 14 | // OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 18 | // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 19 | // DEALINGS IN THE SOFTWARE. 20 | 21 | //! Mixnet core logic. 22 | 23 | // Get a bunch of these from [mut_]array_refs 24 | #![allow(clippy::ptr_offset_with_cast)] 25 | 26 | mod config; 27 | mod cover; 28 | mod fragment; 29 | mod kx_pair; 30 | mod packet_queues; 31 | mod replay_filter; 32 | mod request_builder; 33 | mod scattered; 34 | mod sessions; 35 | mod sphinx; 36 | mod surb_keystore; 37 | mod topology; 38 | mod util; 39 | 40 | pub use self::{ 41 | config::{Config, SessionConfig}, 42 | fragment::{MessageId, MESSAGE_ID_SIZE}, 43 | packet_queues::AddressedPacket, 44 | scattered::Scattered, 45 | sessions::{RelSessionIndex, SessionIndex, SessionPhase, SessionStatus}, 46 | sphinx::{ 47 | Delay, KxPublic, KxSecret, MixnodeIndex, Packet, PeerId, RawMixnodeIndex, Surb, 48 | KX_PUBLIC_SIZE, MAX_HOPS, MAX_MIXNODE_INDEX, PACKET_SIZE, PEER_ID_SIZE, SURB_SIZE, 49 | }, 50 | topology::{Mixnode, NetworkStatus, TopologyErr}, 51 | }; 52 | use self::{ 53 | cover::{gen_cover_packet, CoverKind}, 54 | fragment::{fragment_blueprints, FragmentAssembler}, 55 | kx_pair::KxPair, 56 | packet_queues::{AuthoredPacketQueue, CheckSpaceErr, ForwardPacketQueue}, 57 | replay_filter::ReplayFilter, 58 | request_builder::RequestBuilder, 59 | sessions::{Session, SessionSlot, Sessions}, 60 | sphinx::{ 61 | complete_reply_packet, decrypt_reply_payload, kx_public, mut_payload_data, peel, Action, 62 | PeelErr, PAYLOAD_DATA_SIZE, PAYLOAD_SIZE, 63 | }, 64 | surb_keystore::SurbKeystore, 65 | topology::Topology, 66 | util::default_boxed_array, 67 | }; 68 | use arrayref::{array_mut_ref, array_ref}; 69 | use arrayvec::ArrayVec; 70 | use bitflags::bitflags; 71 | use either::Either; 72 | use log::{debug, info, trace}; 73 | use rand::Rng; 74 | use std::{ 75 | cmp::{max, min}, 76 | time::{Duration, Instant}, 77 | }; 78 | 79 | /// Error querying the mixnodes for a session. 80 | pub enum MixnodesErr { 81 | /// Transient error. The query might succeed later. Do not disable the mixnet for the session. 82 | Transient, 83 | /// Permanent error. The query will never succeed. Disable the mixnet for the session. 84 | Permanent, 85 | } 86 | 87 | /// A request from another node. 88 | #[derive(Debug, PartialEq, Eq)] 89 | pub struct RequestMessage { 90 | /// Index of the session this message was received in. This session index should be used when 91 | /// sending replies. 92 | pub session_index: SessionIndex, 93 | /// Message identifier, explicitly provided by the request sender. 94 | pub id: MessageId, 95 | /// The message contents. 96 | pub data: Vec, 97 | /// SURBs that were attached to the message. These can be used to send replies. 98 | pub surbs: Vec, 99 | } 100 | 101 | /// A reply to a previously sent request. 102 | #[derive(Debug, PartialEq, Eq)] 103 | pub struct ReplyMessage { 104 | /// ID of the request message this reply was sent in response to. 105 | pub request_id: MessageId, 106 | /// The message contents. 107 | pub data: Vec, 108 | } 109 | 110 | /// A message received over the mixnet. 111 | #[derive(Debug, PartialEq, Eq)] 112 | pub enum Message { 113 | /// A request from another node. 114 | Request(RequestMessage), 115 | /// A reply to a previously sent request. 116 | Reply(ReplyMessage), 117 | } 118 | 119 | /// Request/reply posting error. 120 | #[derive(Debug, thiserror::Error)] 121 | pub enum PostErr { 122 | /// Message contents too large or too many SURBs. 123 | #[error("Message would need to be split into too many fragments")] 124 | TooManyFragments, 125 | /// The session is no longer active. 126 | #[error("Session {0} is no longer active")] 127 | SessionNoLongerActive(SessionIndex), 128 | /// The session is not active yet. 129 | #[error("Session {0} is not active yet")] 130 | SessionNotActiveYet(SessionIndex), 131 | /// Mixnodes not yet known for the session. 132 | #[error("Mixnodes not yet known for session {0}")] 133 | SessionMixnodesNotKnown(SessionIndex), 134 | /// Mixnet disabled for the session. 135 | #[error("Mixnet disabled for session {0}")] 136 | SessionDisabled(SessionIndex), 137 | /// Not enough space in the authored packet queue. 138 | #[error("There is not enough space in the authored packet queue")] 139 | NotEnoughSpaceInQueue, 140 | /// Topology error. 141 | #[error("Topology error: {0}")] 142 | Topology(#[from] TopologyErr), 143 | /// Bad SURB. 144 | #[error("Bad SURB")] 145 | BadSurb, 146 | } 147 | 148 | fn post_session( 149 | sessions: &mut Sessions, 150 | status: SessionStatus, 151 | index: SessionIndex, 152 | ) -> Result<&mut Session, PostErr> { 153 | let Some(rel_index) = RelSessionIndex::from_session_index(index, status.current_index) else { 154 | return Err(if index < status.current_index { 155 | PostErr::SessionNoLongerActive(index) 156 | } else { 157 | PostErr::SessionNotActiveYet(index) 158 | }) 159 | }; 160 | if !status.phase.allow_requests_and_replies(rel_index) { 161 | return Err(match rel_index { 162 | RelSessionIndex::Prev => PostErr::SessionNoLongerActive(index), 163 | RelSessionIndex::Current => PostErr::SessionNotActiveYet(index), 164 | }) 165 | } 166 | match &mut sessions[rel_index] { 167 | SessionSlot::Empty | SessionSlot::KxPair(_) => Err(PostErr::SessionMixnodesNotKnown(index)), 168 | // Note that in the case where the session has been disabled because it is no longer 169 | // needed, we will enter the !allow_requests_and_replies if above and not get here 170 | SessionSlot::Disabled => Err(PostErr::SessionDisabled(index)), 171 | SessionSlot::Full(session) => Ok(session), 172 | } 173 | } 174 | 175 | impl From for PostErr { 176 | fn from(value: CheckSpaceErr) -> Self { 177 | match value { 178 | CheckSpaceErr::Capacity => PostErr::TooManyFragments, 179 | CheckSpaceErr::Len => PostErr::NotEnoughSpaceInQueue, 180 | } 181 | } 182 | } 183 | 184 | /// Returns a conservative estimate of the time taken for the last packet in the authored packet 185 | /// queue to get dispatched plus the time taken for all reply packets to get through the authored 186 | /// packet queue at the far end. 187 | fn estimate_authored_packet_queue_delay(config: &Config, session: &Session) -> Duration { 188 | let rate_mul = 189 | // When transitioning between sessions, the rate is halved 190 | 0.5 * 191 | // Loop cover packets are never replaced with packets from the authored packet queue 192 | (1.0 - config.loop_cover_proportion); 193 | let request_period = session.mean_authored_packet_period.div_f64(rate_mul); 194 | let request_len = session.authored_packet_queue.len(); 195 | // Assume that the destination mixnode is using the same configuration as us 196 | let reply_period = config.mixnode_session.mean_authored_packet_period.div_f64(rate_mul); 197 | let reply_len = config.mixnode_session.authored_packet_queue.capacity; // Worst case 198 | 199 | // The delays between authored packet queue pops follow an exponential distribution. The sum of 200 | // n independent exponential random variables with scale s follows a gamma distribution with 201 | // shape n and scale s. A reasonable approximation to the 99.995th percentile of the gamma 202 | // distribution with shape n and scale s is: 203 | // 204 | // s * (4.92582 + (3.87809 * sqrt(n)) + n) 205 | // 206 | // The constants were obtained by fitting to the actual values for n=1..200. 207 | // 208 | // This isn't quite what we want here; we are interested in the sum of two gamma-distributed 209 | // random variables with different scales (request_period and reply_period). An approximation 210 | // to the 99.995th percentile of such a sum is: 211 | // 212 | // s * (4.92582 + (3.87809 * sqrt(n + (r^3 * m))) + n + (r * m)) 213 | // 214 | // Where: 215 | // 216 | // - s is the larger scale. 217 | // - n is the corresponding shape. 218 | // - m is the other shape. 219 | // - r is the other scale divided by s (between 0 and 1). 220 | // 221 | // Note that when r is 0 this matches the first approximation, and when r is 1 this matches the 222 | // first approximation with n replaced by (n + m). 223 | let (s, n, m, rs) = if request_period > reply_period { 224 | (request_period, request_len, reply_len, reply_period) 225 | } else { 226 | (reply_period, reply_len, request_len, request_period) 227 | }; 228 | let n = n as f64; 229 | let m = m as f64; 230 | let r = rs.as_secs_f64() / s.as_secs_f64(); 231 | s.mul_f64(4.92582 + (3.87809 * (n + (r * r * r * m)).sqrt()) + n + (r * m)) 232 | } 233 | 234 | /// Metrics that can be used to estimate a request's round-trip time. 235 | pub struct RequestMetrics { 236 | /// The maximum number of hops for any of the fragments to reach the destination, plus the 237 | /// maximum number of hops for any of the SURBs to come back. 238 | pub num_hops: usize, 239 | /// Conservative estimate of the network (and processing) delay per hop. 240 | pub per_hop_net_delay: Duration, 241 | /// The maximum total forwarding delay for any request fragment, plus the maximum total 242 | /// forwarding delay for any SURB. 243 | pub forwarding_delay: Duration, 244 | /// A conservative estimate of the total delay through the authored packet queues at the source 245 | /// and destination. 246 | pub authored_packet_queue_delay: Duration, 247 | } 248 | 249 | impl RequestMetrics { 250 | /// Returns a conservative estimate of the round-trip time, suitable for use as a timeout. 251 | /// `handling_delay` should be a conservative estimate of the time taken to handle the request 252 | /// at the destination and post the reply. 253 | pub fn estimate_rtt(&self, handling_delay: Duration) -> Duration { 254 | let net_delay = self.per_hop_net_delay * (self.num_hops as u32); 255 | self.forwarding_delay + self.authored_packet_queue_delay + net_delay + handling_delay 256 | } 257 | } 258 | 259 | bitflags! { 260 | /// Flags to indicate events that have occurred. Note that these may be set spuriously. 261 | pub struct Events: u32 { 262 | /// The reserved peers returned by [`Mixnet::reserved_peers`] have changed. 263 | const RESERVED_PEERS_CHANGED = 0b1; 264 | /// The deadline returned by [`Mixnet::next_forward_packet_deadline`] has changed. 265 | const NEXT_FORWARD_PACKET_DEADLINE_CHANGED = 0b10; 266 | /// The effective deadline returned by [`Mixnet::next_authored_packet_delay`] has changed. 267 | /// The delay (and thus the effective deadline) is randomly generated according to an 268 | /// exponential distribution each time the function is called, but the last returned 269 | /// deadline remains valid until this bit indicates otherwise. Due to the memoryless nature 270 | /// of exponential distributions, it is harmless for this bit to be set spuriously. 271 | const NEXT_AUTHORED_PACKET_DEADLINE_CHANGED = 0b100; 272 | /// Space has become available in an authored packet queue. 273 | const SPACE_IN_AUTHORED_PACKET_QUEUE = 0b1000; 274 | } 275 | } 276 | 277 | /// Mixnet core state. `X` is the type of the extra data stored for each mixnode 278 | /// ([`Mixnode::extra`]). 279 | pub struct Mixnet { 280 | config: Config, 281 | 282 | /// Index and phase of current session. 283 | session_status: SessionStatus, 284 | /// Current and previous sessions. 285 | sessions: Sessions, 286 | /// Key-exchange key pair for the next session. 287 | next_kx_pair: Option, 288 | 289 | /// Queue of packets to be forwarded, after some delay. 290 | forward_packet_queue: ForwardPacketQueue, 291 | 292 | /// Keystore for SURB payload encryption keys. 293 | surb_keystore: SurbKeystore, 294 | /// Reassembles fragments into messages. Note that for simplicity there is just one assembler 295 | /// for everything (requests and replies across all sessions). 296 | fragment_assembler: FragmentAssembler, 297 | 298 | /// Flags to indicate events that have occurred. 299 | events: Events, 300 | } 301 | 302 | impl Mixnet { 303 | /// Create a new `Mixnet`. 304 | pub fn new(config: Config) -> Self { 305 | let sessions = Sessions { 306 | current: config 307 | .session_0_kx_secret 308 | .map_or(SessionSlot::Empty, |secret| SessionSlot::KxPair(secret.into())), 309 | prev: SessionSlot::Disabled, 310 | }; 311 | 312 | let forward_packet_queue = ForwardPacketQueue::new(config.forward_packet_queue_capacity); 313 | 314 | let surb_keystore = SurbKeystore::new(config.surb_keystore_capacity); 315 | let fragment_assembler = FragmentAssembler::new( 316 | config.max_incomplete_messages, 317 | config.max_incomplete_fragments, 318 | config.max_fragments_per_message, 319 | ); 320 | 321 | Self { 322 | config, 323 | 324 | session_status: SessionStatus { current_index: 0, phase: SessionPhase::CoverToCurrent }, 325 | sessions, 326 | next_kx_pair: None, 327 | 328 | forward_packet_queue, 329 | 330 | surb_keystore, 331 | fragment_assembler, 332 | 333 | events: Events::empty(), 334 | } 335 | } 336 | 337 | /// Returns the current session index and phase. 338 | pub fn session_status(&self) -> SessionStatus { 339 | self.session_status 340 | } 341 | 342 | /// Sets the current session index and phase. The current and previous mixnodes may need to be 343 | /// provided after calling this; see [`maybe_set_mixnodes`](Self::maybe_set_mixnodes). 344 | pub fn set_session_status(&mut self, session_status: SessionStatus) { 345 | if self.session_status == session_status { 346 | return 347 | } 348 | 349 | // Shift sessions when current session index changes 350 | if self.session_status.current_index != session_status.current_index { 351 | let next_session = std::mem::take(&mut self.next_kx_pair) 352 | .map_or(SessionSlot::Empty, SessionSlot::KxPair); 353 | match session_status.current_index.saturating_sub(self.session_status.current_index) { 354 | 1 => 355 | self.sessions.prev = std::mem::replace(&mut self.sessions.current, next_session), 356 | 2 => { 357 | self.sessions.prev = next_session; 358 | self.sessions.current = SessionSlot::Empty; 359 | }, 360 | _ => 361 | if !self.sessions.is_empty() || !next_session.is_empty() { 362 | debug!( 363 | target: self.config.log_target, 364 | "Unexpected session index {}; previous session index was {}", 365 | session_status.current_index, 366 | self.session_status.current_index 367 | ); 368 | self.sessions = 369 | Sessions { current: SessionSlot::Empty, prev: SessionSlot::Empty }; 370 | }, 371 | } 372 | } 373 | 374 | // Discard previous session if it is not needed. Also, avoid ever having a previous session 375 | // when the current session index is 0... there is no sensible index for it. 376 | if !session_status.phase.need_prev() || (session_status.current_index == 0) { 377 | self.sessions.prev = SessionSlot::Disabled; 378 | } 379 | 380 | // For simplicity just assume these have changed. This should happen at most once a minute 381 | // or so. 382 | self.events |= 383 | Events::RESERVED_PEERS_CHANGED | Events::NEXT_AUTHORED_PACKET_DEADLINE_CHANGED; 384 | 385 | self.session_status = session_status; 386 | 387 | info!(target: self.config.log_target, "Session status changed: {session_status}"); 388 | } 389 | 390 | /// Sets the mixnodes for the specified session, if they are needed. If `mixnodes()` returns 391 | /// `Err(MixnodesErr::Permanent)`, the session slot will be disabled, and later calls to 392 | /// `maybe_set_mixnodes` for the session will return immediately. If `mixnodes()` returns 393 | /// `Err(MixnodesErr::Transient)`, the session slot will merely remain empty, and later calls to 394 | /// `maybe_set_mixnodes` may succeed. 395 | /// 396 | /// The mixnode peer IDs are used for two things: 397 | /// 398 | /// - Checking for connectivity (they are passed to [`NetworkStatus::is_connected`]). 399 | /// - Sending packets (they are put in [`AddressedPacket::peer_id`]). 400 | pub fn maybe_set_mixnodes( 401 | &mut self, 402 | rel_session_index: RelSessionIndex, 403 | mixnodes: &mut dyn FnMut() -> Result>, MixnodesErr>, 404 | ) { 405 | let session = &mut self.sessions[rel_session_index]; 406 | if !matches!(session, SessionSlot::Empty | SessionSlot::KxPair(_)) { 407 | return 408 | } 409 | 410 | let session_index = rel_session_index + self.session_status.current_index; 411 | let mut rng = rand::thread_rng(); 412 | 413 | // Determine mixnodes 414 | let mut mixnodes = match mixnodes() { 415 | Ok(mixnodes) => mixnodes, 416 | Err(err) => { 417 | if matches!(err, MixnodesErr::Permanent) { 418 | *session = SessionSlot::Disabled; 419 | } 420 | return 421 | }, 422 | }; 423 | let max_mixnodes = (MAX_MIXNODE_INDEX + 1) as usize; 424 | if mixnodes.len() > max_mixnodes { 425 | debug!( 426 | target: self.config.log_target, 427 | "Session {session_index}: Too many mixnodes ({}, max {max_mixnodes}); ignoring excess", 428 | mixnodes.len() 429 | ); 430 | mixnodes.truncate(max_mixnodes); 431 | } 432 | 433 | // Determine key-exchange key pair for the local node. Note that from this point on, we are 434 | // guaranteed to either panic or overwrite *session. 435 | let kx_pair = match std::mem::replace(session, SessionSlot::Empty) { 436 | SessionSlot::KxPair(kx_pair) => kx_pair, 437 | _ => KxPair::gen(&mut rng), 438 | }; 439 | 440 | // Build Topology struct 441 | let topology = 442 | Topology::new(&mut rng, mixnodes, kx_pair.public(), self.config.num_gateway_mixnodes); 443 | 444 | // Determine session config 445 | let config = if topology.is_mixnode() { 446 | &self.config.mixnode_session 447 | } else { 448 | match &self.config.non_mixnode_session { 449 | Some(config) => config, 450 | None => { 451 | info!(target: self.config.log_target, 452 | "Session {session_index}: Local node is not a mixnode; \ 453 | disabling mixnet as per configuration"); 454 | *session = SessionSlot::Disabled; 455 | return 456 | }, 457 | } 458 | }; 459 | 460 | info!(target: self.config.log_target, "Session {session_index}: {topology}"); 461 | 462 | // Build Session struct 463 | *session = SessionSlot::Full(Session { 464 | kx_pair, 465 | topology, 466 | authored_packet_queue: AuthoredPacketQueue::new(config.authored_packet_queue), 467 | mean_authored_packet_period: config.mean_authored_packet_period, 468 | replay_filter: ReplayFilter::new(&mut rng), 469 | }); 470 | 471 | self.events |= 472 | Events::RESERVED_PEERS_CHANGED | Events::NEXT_AUTHORED_PACKET_DEADLINE_CHANGED; 473 | } 474 | 475 | /// Returns the key-exchange public key for the next session. 476 | pub fn next_kx_public(&mut self) -> &KxPublic { 477 | self.next_kx_pair 478 | .get_or_insert_with(|| KxPair::gen(&mut rand::thread_rng())) 479 | .public() 480 | } 481 | 482 | /// Returns the mixnodes we should try to maintain connections to. 483 | pub fn reserved_peers(&self) -> impl Iterator> { 484 | self.sessions.iter().flat_map(|session| session.topology.reserved_peers()) 485 | } 486 | 487 | /// Handle an incoming packet. If the packet completes a message, the message is returned. 488 | /// Otherwise, [`None`] is returned. 489 | pub fn handle_packet(&mut self, packet: &Packet) -> Option { 490 | let mut out = [0; PACKET_SIZE]; 491 | let res = self.sessions.enumerate_mut().find_map(|(rel_session_index, session)| { 492 | let kx_shared_secret = session.kx_pair.exchange(kx_public(packet)); 493 | 494 | let replay_tag = session.replay_filter.tag(&kx_shared_secret); 495 | if session.replay_filter.contains(replay_tag) { 496 | return Some(Err(Either::Left("Packet found in replay filter"))) 497 | } 498 | 499 | match peel(&mut out, packet, &kx_shared_secret) { 500 | // Bad MAC possibly means we used the wrong secret; try other session 501 | Err(PeelErr::Mac) => None, 502 | // Any other error means the packet is bad; just discard it 503 | Err(err) => Some(Err(Either::Right(err))), 504 | Ok(action) => Some(Ok((action, rel_session_index, session, replay_tag))), 505 | } 506 | }); 507 | 508 | let (action, rel_session_index, session, replay_tag) = match res { 509 | None => { 510 | // This will usually get hit quite a bit on session changeover after we discard the 511 | // keys for the previous session. It may get hit just before a new session if other 512 | // nodes switch sooner. 513 | trace!( 514 | target: self.config.log_target, 515 | "Failed to peel packet; either bad MAC or unknown secret" 516 | ); 517 | return None 518 | }, 519 | Some(Err(err)) => { 520 | debug!(target: self.config.log_target, "Failed to peel packet: {err}"); 521 | return None 522 | }, 523 | Some(Ok(x)) => x, 524 | }; 525 | 526 | match action { 527 | Action::ForwardTo { target, delay } => { 528 | if !session.topology.is_mixnode() { 529 | debug!(target: self.config.log_target, 530 | "Received packet to forward despite not being a mixnode in the session; discarding"); 531 | return None 532 | } 533 | 534 | if !self.forward_packet_queue.has_space() { 535 | debug!(target: self.config.log_target, "Dropped forward packet; forward queue full"); 536 | return None 537 | } 538 | 539 | // After the is_mixnode check to avoid inserting anything into the replay filters 540 | // for sessions where we are not a mixnode 541 | session.replay_filter.insert(replay_tag); 542 | 543 | match session.topology.target_to_peer_id(&target) { 544 | Ok(peer_id) => { 545 | let deadline = 546 | Instant::now() + delay.to_duration(self.config.mean_forwarding_delay); 547 | let packet = AddressedPacket { peer_id, packet: out.into() }; 548 | if self.forward_packet_queue.insert(deadline, packet) { 549 | self.events |= Events::NEXT_FORWARD_PACKET_DEADLINE_CHANGED; 550 | } 551 | }, 552 | Err(err) => debug!( 553 | target: self.config.log_target, 554 | "Failed to map target {target:?} to peer ID: {err}" 555 | ), 556 | } 557 | 558 | None 559 | }, 560 | Action::DeliverRequest => { 561 | let payload_data = array_ref![out, 0, PAYLOAD_DATA_SIZE]; 562 | 563 | if !session.topology.is_mixnode() { 564 | debug!(target: self.config.log_target, 565 | "Received request packet despite not being a mixnode in the session; discarding"); 566 | return None 567 | } 568 | 569 | // After the is_mixnode check to avoid inserting anything into the replay filters 570 | // for sessions where we are not a mixnode 571 | session.replay_filter.insert(replay_tag); 572 | 573 | // Add to fragment assembler and return any completed message 574 | self.fragment_assembler.insert(payload_data, self.config.log_target).map( 575 | |message| { 576 | Message::Request(RequestMessage { 577 | session_index: rel_session_index + self.session_status.current_index, 578 | id: message.id, 579 | data: message.data, 580 | surbs: message.surbs, 581 | }) 582 | }, 583 | ) 584 | }, 585 | Action::DeliverReply { surb_id } => { 586 | let payload = array_mut_ref![out, 0, PAYLOAD_SIZE]; 587 | 588 | // Note that we do not insert anything into the replay filter here. The SURB ID 589 | // lookup will fail for replayed SURBs, so explicit replay prevention is not 590 | // necessary. The main reason for avoiding the replay filter here is so that it 591 | // does not need to be allocated at all for sessions where we are not a mixnode. 592 | 593 | // Lookup payload encryption keys and decrypt payload. The original request message 594 | // ID is stored alongside the keys; it is simply returned with any completed 595 | // message to provide context. 596 | let Some(entry) = self.surb_keystore.entry(&surb_id) else { 597 | debug!(target: self.config.log_target, 598 | "Received reply with unrecognised SURB ID {surb_id:x?}; discarding"); 599 | return None 600 | }; 601 | let request_id = *entry.message_id(); 602 | let res = decrypt_reply_payload(payload, entry.keys()); 603 | entry.remove(); 604 | if let Err(err) = res { 605 | debug!(target: self.config.log_target, "Failed to decrypt reply payload: {err}"); 606 | return None 607 | } 608 | let payload_data = array_ref![payload, 0, PAYLOAD_DATA_SIZE]; 609 | 610 | // Add to fragment assembler and return any completed message 611 | self.fragment_assembler.insert(payload_data, self.config.log_target).map( 612 | |message| { 613 | if !message.surbs.is_empty() { 614 | debug!(target: self.config.log_target, 615 | "Reply message included SURBs; discarding them"); 616 | } 617 | Message::Reply(ReplyMessage { request_id, data: message.data }) 618 | }, 619 | ) 620 | }, 621 | Action::DeliverCover { cover_id: _ } => None, 622 | } 623 | } 624 | 625 | /// Returns the next instant at which 626 | /// [`pop_next_forward_packet`](Self::pop_next_forward_packet) should be called. [`None`] means 627 | /// never. 628 | pub fn next_forward_packet_deadline(&self) -> Option { 629 | self.forward_packet_queue.next_deadline() 630 | } 631 | 632 | /// Pop and return the packet at the head of the forward packet queue. Returns [`None`] if the 633 | /// queue is empty. 634 | pub fn pop_next_forward_packet(&mut self) -> Option { 635 | self.events |= Events::NEXT_FORWARD_PACKET_DEADLINE_CHANGED; 636 | self.forward_packet_queue.pop() 637 | } 638 | 639 | /// Returns the delay after which [`pop_next_authored_packet`](Self::pop_next_authored_packet) 640 | /// should be called. [`None`] means an infinite delay. 641 | pub fn next_authored_packet_delay(&self) -> Option { 642 | // Determine the mean period 643 | let means: ArrayVec<_, 2> = self 644 | .sessions 645 | .iter() 646 | .map(|session| session.mean_authored_packet_period.as_secs_f64()) 647 | .collect(); 648 | let mean = match means.into_inner() { 649 | // Both sessions active. Send at half rate in each. Note that pop_next_authored_packet 650 | // will choose between the sessions randomly based on their rates. 651 | Ok(means) => (2.0 * means[0] * means[1]) / (means[0] + means[1]), 652 | Err(mut means) => { 653 | let mean = means.pop()?; 654 | // Just one session active 655 | if self.session_status.phase.need_prev() { 656 | // Both sessions _should_ be active. Send at half rate. 657 | 2.0 * mean 658 | } else { 659 | mean 660 | } 661 | }, 662 | }; 663 | 664 | let delay: f64 = rand::thread_rng().sample(rand_distr::Exp1); 665 | // Cap at 10x the mean; this is about the 99.995th percentile. This avoids potential panics 666 | // in from_secs_f64() due to overflow. 667 | Some(Duration::from_secs_f64(delay.min(10.0) * mean)) 668 | } 669 | 670 | /// Either generate and return a cover packet or pop and return the packet at the head of one 671 | /// of the authored packet queues. May return [`None`] if cover packets are disabled, we fail 672 | /// to generate a cover packet, or there are no active sessions (though in the no active 673 | /// sessions case [`next_authored_packet_delay`](Self::next_authored_packet_delay) should 674 | /// return [`None`] and so this function should not really be called). 675 | pub fn pop_next_authored_packet(&mut self, ns: &dyn NetworkStatus) -> Option { 676 | // This function should be called according to a Poisson process. Randomly choosing between 677 | // sessions and cover kinds here is equivalent to there being multiple independent Poisson 678 | // processes; see https://www.randomservices.org/random/poisson/Splitting.html 679 | let mut rng = rand::thread_rng(); 680 | 681 | // First pick the session 682 | let sessions: ArrayVec<_, 2> = self.sessions.enumerate_mut().collect(); 683 | let (rel_session_index, session) = match sessions.into_inner() { 684 | Ok(sessions) => { 685 | // Both sessions active. We choose randomly based on their rates. 686 | let periods = sessions 687 | // TODO This could be replaced with .each_ref() once it is stabilised, allowing 688 | // the collect/into_inner/expect at the end to be dropped 689 | .iter() 690 | .map(|(_, session)| session.mean_authored_packet_period.as_secs_f64()) 691 | .collect::>() 692 | .into_inner() 693 | .expect("Input is array of length 2"); 694 | let [session_0, session_1] = sessions; 695 | // Rate is 1/period, and (1/a)/((1/a)+(1/b)) = b/(a+b) 696 | if rng.gen_bool(periods[1] / (periods[0] + periods[1])) { 697 | session_0 698 | } else { 699 | session_1 700 | } 701 | }, 702 | // Either just one active session or no active sessions. This function shouldn't really 703 | // be called in the latter case, as next_authored_packet_delay() should return None. 704 | Err(mut sessions) => sessions.pop()?, 705 | }; 706 | 707 | self.events |= Events::NEXT_AUTHORED_PACKET_DEADLINE_CHANGED; 708 | 709 | // Choose randomly between drop and loop cover packet 710 | let cover_kind = if rng.gen_bool(self.config.loop_cover_proportion) { 711 | CoverKind::Loop 712 | } else { 713 | CoverKind::Drop 714 | }; 715 | 716 | // Maybe replace drop cover packet with request or reply packet from queue 717 | if (cover_kind == CoverKind::Drop) && 718 | self.session_status.phase.allow_requests_and_replies(rel_session_index) 719 | { 720 | let (packet, space) = session.authored_packet_queue.pop(); 721 | if space { 722 | self.events |= Events::SPACE_IN_AUTHORED_PACKET_QUEUE; 723 | } 724 | if packet.is_some() { 725 | return packet 726 | } 727 | } 728 | 729 | if !self.config.gen_cover_packets { 730 | return None 731 | } 732 | 733 | // Generate cover packet 734 | match gen_cover_packet(&mut rng, &session.topology, ns, cover_kind, self.config.num_hops) { 735 | Ok(packet) => Some(packet), 736 | Err(err) => { 737 | if (self.session_status.phase == SessionPhase::CoverToCurrent) && 738 | (rel_session_index == RelSessionIndex::Current) && 739 | matches!(err, TopologyErr::NoConnectedGatewayMixnodes) 740 | { 741 | // Possibly still connecting to mixnodes 742 | trace!(target: self.config.log_target, "Failed to generate cover packet: {err}"); 743 | } else { 744 | debug!(target: self.config.log_target, "Failed to generate cover packet: {err}"); 745 | } 746 | None 747 | }, 748 | } 749 | } 750 | 751 | /// Post a request message. If `destination_index` is [`None`], a destination mixnode is chosen 752 | /// at random and (on success) its index is written back to `destination_index`. The message is 753 | /// split into fragments and each fragment is sent over a different path to the destination. 754 | pub fn post_request( 755 | &mut self, 756 | session_index: SessionIndex, 757 | destination_index: &mut Option, 758 | message_id: &MessageId, 759 | data: Scattered, 760 | num_surbs: usize, 761 | ns: &dyn NetworkStatus, 762 | ) -> Result { 763 | // Split the message into fragments 764 | let fragment_blueprints = match fragment_blueprints(message_id, data, num_surbs) { 765 | Some(fragment_blueprints) 766 | if fragment_blueprints.len() <= self.config.max_fragments_per_message => 767 | fragment_blueprints, 768 | _ => return Err(PostErr::TooManyFragments), 769 | }; 770 | 771 | // Grab the session and check there's room in the queue 772 | let session = post_session(&mut self.sessions, self.session_status, session_index)?; 773 | session.authored_packet_queue.check_space(fragment_blueprints.len())?; 774 | 775 | // Generate the packets and push them into the queue 776 | let mut rng = rand::thread_rng(); 777 | let request_builder = 778 | RequestBuilder::new(&mut rng, &session.topology, ns, *destination_index)?; 779 | let mut request_hops = 0; 780 | let mut request_forwarding_delay = Delay::zero(); 781 | let mut reply_hops = 0; 782 | let mut reply_forwarding_delay = Delay::zero(); 783 | for fragment_blueprint in fragment_blueprints { 784 | let (packet, metrics) = request_builder.build_packet( 785 | &mut rng, 786 | |fragment, rng| { 787 | fragment_blueprint.write_except_surbs(fragment); 788 | for surb in fragment_blueprint.surbs(fragment) { 789 | // TODO Currently we don't clean up keystore entries on failure 790 | let (id, keys) = 791 | self.surb_keystore.insert(rng, message_id, self.config.log_target); 792 | let num_hops = self.config.num_hops; 793 | let metrics = request_builder.build_surb(surb, keys, rng, &id, num_hops)?; 794 | reply_hops = max(reply_hops, metrics.num_hops); 795 | reply_forwarding_delay = 796 | max(reply_forwarding_delay, metrics.forwarding_delay); 797 | } 798 | Ok(()) 799 | }, 800 | self.config.num_hops, 801 | )?; 802 | session.authored_packet_queue.push(packet); 803 | request_hops = max(request_hops, metrics.num_hops); 804 | request_forwarding_delay = max(request_forwarding_delay, metrics.forwarding_delay); 805 | } 806 | 807 | // Calculate metrics 808 | let metrics = RequestMetrics { 809 | num_hops: request_hops + reply_hops, 810 | per_hop_net_delay: self.config.per_hop_net_delay, 811 | forwarding_delay: (request_forwarding_delay + reply_forwarding_delay) 812 | .to_duration(self.config.mean_forwarding_delay), 813 | authored_packet_queue_delay: estimate_authored_packet_queue_delay( 814 | &self.config, 815 | session, 816 | ), 817 | }; 818 | 819 | *destination_index = Some(request_builder.destination_index()); 820 | Ok(metrics) 821 | } 822 | 823 | /// Post a reply message using SURBs. The session index must match the session the SURBs were 824 | /// generated for. SURBs are removed from `surbs` on use. 825 | pub fn post_reply( 826 | &mut self, 827 | surbs: &mut Vec, 828 | session_index: SessionIndex, 829 | message_id: &MessageId, 830 | data: Scattered, 831 | ) -> Result<(), PostErr> { 832 | // Split the message into fragments 833 | let fragment_blueprints = match fragment_blueprints(message_id, data, 0) { 834 | Some(fragment_blueprints) 835 | if fragment_blueprints.len() <= 836 | min(self.config.max_fragments_per_message, surbs.len()) => 837 | fragment_blueprints, 838 | _ => return Err(PostErr::TooManyFragments), 839 | }; 840 | 841 | // Grab the session and check there's room in the queue 842 | let session = post_session(&mut self.sessions, self.session_status, session_index)?; 843 | session.authored_packet_queue.check_space(fragment_blueprints.len())?; 844 | 845 | // Generate the packets and push them into the queue 846 | for fragment_blueprint in fragment_blueprints { 847 | let mut packet = default_boxed_array(); 848 | fragment_blueprint.write_except_surbs(mut_payload_data(&mut packet)); 849 | let mixnode_index = complete_reply_packet( 850 | &mut packet, 851 | &surbs.pop().expect("Checked number of SURBs above"), 852 | ) 853 | .ok_or(PostErr::BadSurb)?; 854 | let peer_id = session.topology.mixnode_index_to_peer_id(mixnode_index)?; 855 | session.authored_packet_queue.push(AddressedPacket { peer_id, packet }); 856 | } 857 | 858 | Ok(()) 859 | } 860 | 861 | /// Clear the event flags. Returns the flags that were cleared. 862 | pub fn take_events(&mut self) -> Events { 863 | let events = self.events; 864 | self.events = Events::empty(); 865 | events 866 | } 867 | } 868 | --------------------------------------------------------------------------------