├── .gitignore ├── CHANGELOG.md ├── Cargo.toml ├── LICENSE ├── README.md ├── examples ├── echo_client.rs └── echo_server.rs ├── src ├── client │ └── mod.rs ├── frame │ ├── mod.rs │ └── serial │ │ ├── build.rs │ │ ├── crc.rs │ │ └── mod.rs ├── half_connection │ ├── emit.rs │ ├── frame_ack_queue.rs │ ├── frame_queue.rs │ ├── loss_rate.rs │ ├── mod.rs │ ├── packet_receiver │ │ ├── assembly_window │ │ │ ├── fragment_buffer.rs │ │ │ └── mod.rs │ │ └── mod.rs │ ├── packet_sender.rs │ ├── packet_tests.rs │ ├── pending_packet.rs │ ├── pending_queue.rs │ ├── recv_rate_set.rs │ ├── reorder_buffer.rs │ ├── resend_queue.rs │ └── send_rate.rs ├── lib.rs ├── packet_id.rs ├── server │ ├── event_queue.rs │ ├── mod.rs │ └── remote_client.rs └── udp_frame_sink.rs ├── tests ├── disconnect.rs ├── ideal_transfer.rs ├── reliable_transfer.rs └── timeouts.rs └── whitepaper.pdf /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | Cargo.lock 3 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | 2 | # Changelog 3 | 4 | ## 0.7.1 5 | 6 | * Added the ability to configure timeouts for active connections, as well as 7 | how often keepalive frames are sent. 8 | 9 | ## 0.7.0 10 | 11 | The public API has changed significantly since the previous version: 12 | 13 | * A 3-way handshake similar to TCP has been implemented. This removes the 14 | possibility of connecting two `Client` objects simultaneously, but allows 15 | servers to be far more robust to spam and DDoS reflection attacks. 16 | 17 | * Client objects now manage a single outbound connection, which resolves a 18 | major bug involving simultaneous connections to the same server. In general, 19 | a `Client` object is a drop-in replacement for a `Peer` object. 20 | 21 | * Packets and connection events are received from the `Client`/`Server` object 22 | directly after the call to `step()`, rather than through an awkward 23 | combination of `step()` followed by `Peer::poll_events()`. This means that 24 | events from all connected clients must be handled in one place, but allows 25 | for easier management of per-connection userdata. 26 | 27 | * For servers, what now corresponds to a `Peer` object (`RemoteClient`) is not 28 | stored within an `Arc>`, and thus cannot be passed between 29 | threads. 30 | 31 | ## 0.6.1 32 | 33 | * Subtly optimized memory usage of sender fragment acknowledgement flags 34 | 35 | * Switched to `Arc>` internally so as to allow `Peer` objects to be 36 | processed by a separate thread from their containing `Client`/`Server` 37 | 38 | ## 0.6.0 39 | 40 | * Subtly altered the behavior of `(Server|Client)::step()` in order to allow 41 | acknowledgements to indicate which packets have been delivered without 42 | waiting for the next call to `step()` 43 | 44 | * Reduced the space of packet sequence IDs to a comfortable minimum of 20 bits, 45 | and optimized the data frame encoding so that certain packets containing less 46 | than 64 bytes of data require only 6 bytes of overhead 47 | 48 | * Added 32-bit checksums to all frames 49 | 50 | * Added a keepalive flag to `EndpointConfig`, allowing the application to 51 | specify whether or not to send keepalive frames 52 | 53 | * Added a method to `Peer` which returns the total amount of data awaiting 54 | delivery, allowing the application to detect and terminate prohibitively slow 55 | connections 56 | 57 | * Implemented a channel tracking mechanism for received packets which 58 | elliminates any need to specify the number of channels during connection 59 | initialization (and removed the associated `EndpointConfig` parameter) 60 | 61 | * Optimized packet reordering/delivery for sparsely populated receive windows, 62 | unnecessary iteration, and otherwise cache-efficiency 63 | 64 | * Removed the mostly redundant setters of `EndpointConfig` 65 | 66 | * Improved the windup behavior of traffic shaping through a more opportunistic 67 | leaky bucket algorithm—the result is compliant with RFC 5348, section 4.6 68 | 69 | * Removed time-tracking from calls to `(Server|Client)::flush()`, thereby 70 | ensuring that redundant calls do not affect the flush allocation, and that 71 | `step()` may first call `flush()` without producing erroneous resends 72 | 73 | ## 0.5.1 74 | 75 | Fixed bad protocol version ID 76 | 77 | ## 0.5.0 78 | 79 | Major internal refactoring related to frame queues and TFRC feedback 80 | computation: 81 | 82 | * Eliminated a copy when generating initial fragments 83 | 84 | * Corrected the initial send rate computation, as well as sender behavior when 85 | no data acknowledgements have been received 86 | 87 | * Fixed the loss rate computation to process the next pending frame ID as soon 88 | as it arrives 89 | 90 | * Implemented a frame receive window to ensure the packets of duplicated frames 91 | are ignored, paving the way for a reduced packet sequence ID space 92 | 93 | * Augmented the existing resynchronization scheme to synchronize both the packet 94 | window and the frame window as appropriate 95 | 96 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | 2 | [package] 3 | name = "uflow" 4 | version = "0.7.1" 5 | edition = "2018" 6 | description = "Provides ordered, mixed-reliability, and congestion-controlled data transfer over UDP" 7 | license = "MIT" 8 | repository = "https://github.com/lowquark/uflow" 9 | 10 | [lib] 11 | name = "uflow" 12 | path = "src/lib.rs" 13 | 14 | [dependencies] 15 | rand = "0.8.4" 16 | 17 | [dev-dependencies] 18 | md5 = "0.7.0" 19 | 20 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | 2 | Copyright (c) 2022 David Petrizze III 3 | 4 | Permission is hereby granted, free of charge, to any person obtaining a copy 5 | of this software and associated documentation files (the "Software"), to deal 6 | in the Software without restriction, including without limitation the rights 7 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 8 | copies of the Software, and to permit persons to whom the Software is 9 | furnished to do so, subject to the following conditions: 10 | 11 | The above copyright notice and this permission notice shall be included in all 12 | copies or substantial portions of the Software. 13 | 14 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 17 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 19 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 20 | SOFTWARE. 21 | 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | 2 | # UFlow 3 | 4 | UFlow is a Rust library and UDP networking protocol for realtime internet data 5 | transfer, with a focus on simplicity and robustness. Though it has been 6 | designed from the ground up, UFlow's interface and functionality are inspired 7 | by the venerable [ENet](http://enet.bespin.org) library. 8 | 9 | ## Features 10 | 11 | * Packet-oriented data transfer between two hosts 12 | * Automatic packet fragmentation and reassembly according to the internet MTU 13 | (1500 bytes) 14 | * 3-way connection handshake for proper connection management 15 | * Up to 64 independently sequenced packet streams 16 | * 4 intuitive packet transfer modes: *Time-Sensitive*, *Unreliable*, 17 | *Persistent*, and *Reliable* 18 | * TCP-friendly, streaming congestion control implemented according to [RFC 5348](https://datatracker.ietf.org/doc/html/rfc5348) 19 | * Efficient frame encoding and transfer protocol with minimal packet overhead 20 | * CRC validation for all transmitted frames ([Polynomial: 0x132c00699](http://users.ece.cmu.edu/~koopman/crc/hd6.html)) 21 | * 100% packet throughput and an unaffected delivery order under ideal network 22 | conditions 23 | * Water-tight sequence ID management for maximum dup-mitigation 24 | * Application-configurable receiver memory limits (to prevent memory 25 | allocation attacks) 26 | * Nonce-validated data acknowledgements (to prevent loss rate / bandwidth 27 | spoofing) 28 | * Resilient to DDoS amplification (request-reply ratio ≈ 28:1) 29 | * Meticulously designed and unit tested to ensure stall-free behavior 30 | * Threadless, non-blocking implementation 31 | 32 | ## Documentation 33 | 34 | Documentation can be found at [docs.rs](https://docs.rs/uflow/latest/uflow/). 35 | 36 | ## Architecture 37 | 38 | Although a previous version is described in the [whitepaper](whitepaper.pdf), 39 | much has changed about the library in the meantime (including the name!). The 40 | current version has the following improvements: 41 | 42 | * TCP-friendly congestion control implemented according to RFC 5348 43 | * Receiver memory limits (for packet reassembly) 44 | * No sentinel packets or frames 45 | * An additional packet send mode which causes packets to be dropped if they 46 | cannot be sent immediately (Time-Sensitive) 47 | * No iteration over the number of channels 48 | 49 | The new design will soon™ be summarized in an updated whitepaper. 50 | 51 | -------------------------------------------------------------------------------- /examples/echo_client.rs: -------------------------------------------------------------------------------- 1 | fn main() { 2 | let server_address = "127.0.0.1:8888"; 3 | let config = Default::default(); 4 | 5 | // Create a client object 6 | let mut client = uflow::client::Client::connect(server_address, config).unwrap(); 7 | 8 | let mut send_counter = 0; 9 | let mut message_counter = 0; 10 | 11 | loop { 12 | // Process inbound UDP frames and handle events 13 | for event in client.step() { 14 | match event { 15 | uflow::client::Event::Connect => { 16 | println!("connected to server"); 17 | } 18 | uflow::client::Event::Disconnect => { 19 | println!("disconnected from server"); 20 | } 21 | uflow::client::Event::Error(err) => { 22 | println!("server connection error: {:?}", err); 23 | } 24 | uflow::client::Event::Receive(packet_data) => { 25 | let packet_data_utf8 = std::str::from_utf8(&packet_data).unwrap(); 26 | 27 | println!("received \"{}\"", packet_data_utf8); 28 | } 29 | } 30 | } 31 | 32 | // Periodically send incrementing hello worlds on channel 0 33 | send_counter += 1; 34 | 35 | if send_counter == 10 { 36 | let packet_data: Box<[u8]> = format!("Hello world {}!", message_counter).as_bytes().into(); 37 | let channel_id = 0; 38 | let send_mode = uflow::SendMode::Unreliable; 39 | 40 | client.send(packet_data, channel_id, send_mode); 41 | 42 | send_counter = 0; 43 | message_counter += 1; 44 | } 45 | 46 | // Flush outbound UDP frames 47 | client.flush(); 48 | 49 | std::thread::sleep(std::time::Duration::from_millis(30)); 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /examples/echo_server.rs: -------------------------------------------------------------------------------- 1 | fn main() { 2 | let server_address = "127.0.0.1:8888"; 3 | let config = Default::default(); 4 | 5 | // Create a server object 6 | let mut server = uflow::server::Server::bind(server_address, config).unwrap(); 7 | 8 | loop { 9 | // Process inbound UDP frames and handle events 10 | for event in server.step() { 11 | match event { 12 | uflow::server::Event::Connect(client_address) => { 13 | println!("[{:?}] connected", client_address); 14 | } 15 | uflow::server::Event::Disconnect(client_address) => { 16 | println!("[{:?}] disconnected", client_address); 17 | } 18 | uflow::server::Event::Error(client_address, err) => { 19 | println!("[{:?}] error: {:?}", client_address, err); 20 | } 21 | uflow::server::Event::Receive(client_address, packet_data) => { 22 | let packet_data_utf8 = std::str::from_utf8(&packet_data).unwrap(); 23 | let reversed_string: std::string::String = packet_data_utf8.chars().rev().collect(); 24 | 25 | println!("[{:?}] received \"{}\"", client_address, packet_data_utf8); 26 | 27 | let mut client = server.client(&client_address).unwrap().borrow_mut(); 28 | 29 | // Echo the packet reliably on channel 0 30 | client.send(packet_data, 0, uflow::SendMode::Reliable); 31 | 32 | // Echo the reverse of the packet unreliably on channel 1 33 | client.send(reversed_string.as_bytes().into(), 1, uflow::SendMode::Unreliable); 34 | } 35 | } 36 | } 37 | 38 | // Flush outbound UDP frames 39 | server.flush(); 40 | 41 | std::thread::sleep(std::time::Duration::from_millis(30)); 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /src/frame/mod.rs: -------------------------------------------------------------------------------- 1 | 2 | pub mod serial; 3 | 4 | #[derive(Clone,Debug,PartialEq)] 5 | pub struct HandshakeSynFrame { 6 | pub version: u8, 7 | pub nonce: u32, 8 | pub max_receive_rate: u32, 9 | pub max_packet_size: u32, 10 | pub max_receive_alloc: u32, 11 | } 12 | 13 | #[derive(Clone,Debug,PartialEq)] 14 | pub struct HandshakeSynAckFrame { 15 | pub nonce_ack: u32, 16 | pub nonce: u32, 17 | pub max_receive_rate: u32, 18 | pub max_packet_size: u32, 19 | pub max_receive_alloc: u32, 20 | } 21 | 22 | #[derive(Clone,Debug,PartialEq)] 23 | pub struct HandshakeAckFrame { 24 | pub nonce_ack: u32, 25 | } 26 | 27 | #[derive(Clone,Debug,PartialEq)] 28 | pub enum HandshakeErrorType { 29 | Version, 30 | Config, 31 | ServerFull, 32 | } 33 | 34 | #[derive(Clone,Debug,PartialEq)] 35 | pub struct HandshakeErrorFrame { 36 | pub nonce_ack: u32, 37 | pub error: HandshakeErrorType, 38 | } 39 | 40 | #[derive(Clone,Debug,PartialEq)] 41 | pub struct InfoRequestFrame { 42 | pub version: u8, 43 | } 44 | 45 | #[derive(Clone,Debug,PartialEq)] 46 | pub struct InfoReplyFrame { 47 | pub peer_count: u32, 48 | pub max_peer_count: u32, 49 | pub name: String, 50 | } 51 | 52 | #[derive(Clone,Debug,PartialEq)] 53 | pub struct DisconnectFrame { 54 | } 55 | 56 | #[derive(Clone,Debug,PartialEq)] 57 | pub struct DisconnectAckFrame { 58 | } 59 | 60 | #[derive(Clone,Debug,PartialEq)] 61 | pub struct Datagram { 62 | pub sequence_id: u32, 63 | pub channel_id: u8, 64 | pub window_parent_lead: u16, 65 | pub channel_parent_lead: u16, 66 | pub fragment_id: u16, 67 | pub fragment_id_last: u16, 68 | pub data: Box<[u8]>, 69 | } 70 | 71 | #[derive(Debug,PartialEq)] 72 | pub struct DatagramRef<'a> { 73 | pub sequence_id: u32, 74 | pub channel_id: u8, 75 | pub window_parent_lead: u16, 76 | pub channel_parent_lead: u16, 77 | pub fragment_id: u16, 78 | pub fragment_id_last: u16, 79 | pub data: &'a [u8], 80 | } 81 | 82 | impl<'a> From<&'a Datagram> for DatagramRef<'a> { 83 | fn from(obj: &'a Datagram) -> Self { 84 | Self { 85 | sequence_id: obj.sequence_id, 86 | channel_id: obj.channel_id, 87 | window_parent_lead: obj.window_parent_lead, 88 | channel_parent_lead: obj.channel_parent_lead, 89 | fragment_id: obj.fragment_id, 90 | fragment_id_last: obj.fragment_id_last, 91 | data: &obj.data, 92 | } 93 | } 94 | } 95 | 96 | impl<'a> From> for Datagram { 97 | fn from(obj: DatagramRef<'a>) -> Self { 98 | Self { 99 | sequence_id: obj.sequence_id, 100 | channel_id: obj.channel_id, 101 | window_parent_lead: obj.window_parent_lead, 102 | channel_parent_lead: obj.channel_parent_lead, 103 | fragment_id: obj.fragment_id, 104 | fragment_id_last: obj.fragment_id_last, 105 | data: obj.data.into(), 106 | } 107 | } 108 | } 109 | 110 | #[derive(Clone,Debug,PartialEq)] 111 | pub struct DataFrame { 112 | pub sequence_id: u32, 113 | pub nonce: bool, 114 | pub datagrams: Vec, 115 | } 116 | 117 | #[derive(Clone,Debug,PartialEq)] 118 | pub struct SyncFrame { 119 | pub next_frame_id: Option, 120 | pub next_packet_id: Option, 121 | } 122 | 123 | #[derive(Clone,Debug,PartialEq)] 124 | pub struct AckGroup { 125 | pub base_id: u32, 126 | pub bitfield: u32, 127 | pub nonce: bool, 128 | } 129 | 130 | #[derive(Clone,Debug,PartialEq)] 131 | pub struct AckFrame { 132 | pub frame_window_base_id: u32, 133 | pub packet_window_base_id: u32, 134 | pub frame_acks: Vec, 135 | } 136 | 137 | #[derive(Clone,Debug,PartialEq)] 138 | pub enum Frame { 139 | HandshakeSynFrame(HandshakeSynFrame), 140 | HandshakeSynAckFrame(HandshakeSynAckFrame), 141 | HandshakeAckFrame(HandshakeAckFrame), 142 | HandshakeErrorFrame(HandshakeErrorFrame), 143 | DisconnectFrame(DisconnectFrame), 144 | DisconnectAckFrame(DisconnectAckFrame), 145 | DataFrame(DataFrame), 146 | SyncFrame(SyncFrame), 147 | AckFrame(AckFrame), 148 | } 149 | 150 | -------------------------------------------------------------------------------- /src/frame/serial/build.rs: -------------------------------------------------------------------------------- 1 | 2 | use super::DatagramRef; 3 | use super::AckGroup; 4 | 5 | use super::DATA_FRAME_ID; 6 | use super::DATA_FRAME_MAX_DATAGRAM_COUNT; 7 | use super::DATAGRAM_HEADER_SIZE_MICRO; 8 | use super::DATAGRAM_HEADER_SIZE_SMALL; 9 | use super::DATAGRAM_HEADER_SIZE_LARGE; 10 | 11 | use super::ACK_FRAME_ID; 12 | use super::ACK_GROUP_SIZE; 13 | 14 | use super::FRAME_CRC_SIZE; 15 | use super::MAX_CHANNELS; 16 | 17 | use super::crc; 18 | 19 | use crate::packet_id; 20 | 21 | // If we allow a maximum of 64 packets per frame, and 8192 frames per transfer window, then there 22 | // are 16384 frames in the receive window, and a 20-bit sequence ID is sufficient to ensure no 23 | // packets are ambiguous within it 24 | 25 | // C: Channel ID [0, 2^6) 26 | // S: Sequence ID [0, 2^20) 27 | // D: Payload length [0, 2^16) 28 | // W: Window parent lead [0, 2^16) 29 | // H: Channel parent lead [0, 2^16) 30 | // F: Fragment ID [0, 2^16) 31 | // L: Last fragment ID [0, 2^16) 32 | 33 | // L == 0 => F == 0 34 | 35 | // If L == 0 && D < 64 && W < 128 && H < 256: 36 | // Micro header (6 bytes) 37 | // 0CDDDDDD SSSSCCCC SSSSSSSS SSSSSSSS CWWWWWWW HHHHHHHH 38 | 39 | // Else if L == 0 && D < 256 && L == 0: 40 | // Small header (9 bytes) 41 | // 10CCCCCC DDDDDDDD 0000SSSS SSSSSSSS SSSSSSSS WWWWWWWW WWWWWWWW HHHHHHHH HHHHHHHH 42 | 43 | // Else: 44 | // Large header (14 bytes) 45 | // 11CCCCCC DDDDDDDD DDDDDDDD 0000SSSS SSSSSSSS SSSSSSSS WWWWWWWW WWWWWWWW HHHHHHHH HHHHHHHH FFFFFFFF FFFFFFFF LLLLLLLL LLLLLLLL 46 | 47 | pub struct DataFrameBuilder { 48 | buffer: Vec, 49 | count: usize, 50 | } 51 | 52 | impl DataFrameBuilder { 53 | pub const MAX_COUNT: usize = DATA_FRAME_MAX_DATAGRAM_COUNT; 54 | 55 | pub fn new(sequence_id: u32, nonce: bool) -> Self { 56 | // TODO: Could also place nonce + 6-bit length in header byte 57 | 58 | let header = vec![ 59 | DATA_FRAME_ID, 60 | (sequence_id >> 24) as u8, 61 | (sequence_id >> 16) as u8, 62 | (sequence_id >> 8) as u8, 63 | (sequence_id ) as u8, 64 | (nonce as u8) << 7, 65 | ]; 66 | 67 | Self { 68 | buffer: header, 69 | count: 0, 70 | } 71 | } 72 | 73 | pub fn add(&mut self, datagram: &DatagramRef) { 74 | debug_assert!((datagram.channel_id as usize) < MAX_CHANNELS); 75 | debug_assert!(packet_id::is_valid(datagram.sequence_id)); 76 | debug_assert!(datagram.data.len() <= u16::MAX as usize); 77 | debug_assert!(self.count < DATA_FRAME_MAX_DATAGRAM_COUNT); 78 | 79 | let data_len_u16 = datagram.data.len() as u16; 80 | 81 | if datagram.fragment_id_last == 0 { 82 | debug_assert!(datagram.fragment_id == 0); 83 | 84 | if data_len_u16 < 64 && datagram.window_parent_lead < 128 && datagram.channel_parent_lead < 256 { 85 | // Micro 86 | let header = [ 87 | data_len_u16 as u8 | (datagram.channel_id & 0x10) << 2, 88 | (datagram.sequence_id >> 12) as u8 & 0xF0 | datagram.channel_id & 0x0F, 89 | (datagram.sequence_id >> 8) as u8, 90 | (datagram.sequence_id ) as u8, 91 | datagram.window_parent_lead as u8 | (datagram.channel_id & 0x20) << 2, 92 | datagram.channel_parent_lead as u8, 93 | ]; 94 | 95 | self.buffer.extend_from_slice(&header); 96 | self.buffer.extend_from_slice(&datagram.data); 97 | self.count += 1; 98 | 99 | return; 100 | } else if data_len_u16 < 256 { 101 | // Small 102 | let header = [ 103 | datagram.channel_id | 0x80, 104 | data_len_u16 as u8, 105 | (datagram.sequence_id >> 16) as u8, 106 | (datagram.sequence_id >> 8) as u8, 107 | (datagram.sequence_id ) as u8, 108 | (datagram.window_parent_lead >> 8) as u8, 109 | (datagram.window_parent_lead ) as u8, 110 | (datagram.channel_parent_lead >> 8) as u8, 111 | (datagram.channel_parent_lead ) as u8, 112 | ]; 113 | 114 | self.buffer.extend_from_slice(&header); 115 | self.buffer.extend_from_slice(&datagram.data); 116 | self.count += 1; 117 | 118 | return; 119 | } 120 | } 121 | 122 | // Large 123 | let header = [ 124 | datagram.channel_id | 0xC0, 125 | (data_len_u16 >> 8) as u8, 126 | (data_len_u16 ) as u8, 127 | (datagram.sequence_id >> 16) as u8, 128 | (datagram.sequence_id >> 8) as u8, 129 | (datagram.sequence_id ) as u8, 130 | (datagram.window_parent_lead >> 8) as u8, 131 | (datagram.window_parent_lead ) as u8, 132 | (datagram.channel_parent_lead >> 8) as u8, 133 | (datagram.channel_parent_lead ) as u8, 134 | (datagram.fragment_id >> 8) as u8, 135 | (datagram.fragment_id ) as u8, 136 | (datagram.fragment_id_last >> 8) as u8, 137 | (datagram.fragment_id_last ) as u8, 138 | ]; 139 | 140 | self.buffer.extend_from_slice(&header); 141 | self.buffer.extend_from_slice(&datagram.data); 142 | self.count += 1; 143 | } 144 | 145 | pub fn build(mut self) -> Box<[u8]> { 146 | debug_assert!(self.count <= DATA_FRAME_MAX_DATAGRAM_COUNT); 147 | 148 | let nack_count_offset = 5; 149 | self.buffer[nack_count_offset] |= self.count as u8; 150 | 151 | let data_bytes = self.buffer.as_slice(); 152 | let crc = crc::compute(&data_bytes); 153 | 154 | self.buffer.extend_from_slice(&[ 155 | (crc >> 24) as u8, 156 | (crc >> 16) as u8, 157 | (crc >> 8) as u8, 158 | (crc ) as u8, 159 | ]); 160 | 161 | self.buffer.into_boxed_slice() 162 | } 163 | 164 | pub fn count(&self) -> usize { 165 | self.count 166 | } 167 | 168 | pub fn size(&self) -> usize { 169 | self.buffer.len() + FRAME_CRC_SIZE 170 | } 171 | 172 | pub fn encoded_size(datagram: &DatagramRef) -> usize { 173 | let data_len = datagram.data.len(); 174 | 175 | if datagram.fragment_id_last == 0 { 176 | if data_len < 64 && datagram.window_parent_lead < 128 && datagram.channel_parent_lead < 256 { 177 | return DATAGRAM_HEADER_SIZE_MICRO + data_len; 178 | } else if data_len < 256 { 179 | return DATAGRAM_HEADER_SIZE_SMALL + data_len; 180 | } 181 | } 182 | return DATAGRAM_HEADER_SIZE_LARGE + data_len; 183 | } 184 | } 185 | 186 | pub struct AckFrameBuilder { 187 | buffer: Vec, 188 | count: u16, 189 | } 190 | 191 | impl AckFrameBuilder { 192 | pub fn new(frame_window_base_id: u32, packet_window_base_id: u32) -> Self { 193 | let header = vec![ 194 | ACK_FRAME_ID, 195 | (frame_window_base_id >> 24) as u8, 196 | (frame_window_base_id >> 16) as u8, 197 | (frame_window_base_id >> 8) as u8, 198 | (frame_window_base_id ) as u8, 199 | (packet_window_base_id >> 24) as u8, 200 | (packet_window_base_id >> 16) as u8, 201 | (packet_window_base_id >> 8) as u8, 202 | (packet_window_base_id ) as u8, 203 | 0, 204 | 0 205 | ]; 206 | 207 | Self { 208 | buffer: header, 209 | count: 0, 210 | } 211 | } 212 | 213 | pub fn add(&mut self, frame_ack: &AckGroup) { 214 | let header = [ 215 | (frame_ack.base_id >> 24) as u8, 216 | (frame_ack.base_id >> 16) as u8, 217 | (frame_ack.base_id >> 8) as u8, 218 | (frame_ack.base_id ) as u8, 219 | (frame_ack.bitfield >> 24) as u8, 220 | (frame_ack.bitfield >> 16) as u8, 221 | (frame_ack.bitfield >> 8) as u8, 222 | (frame_ack.bitfield ) as u8, 223 | frame_ack.nonce as u8, 224 | ]; 225 | 226 | self.buffer.extend_from_slice(&header); 227 | self.count += 1; 228 | } 229 | 230 | pub fn build(mut self) -> Box<[u8]> { 231 | let count_offset_0 = 9; 232 | let count_offset_1 = 10; 233 | self.buffer[count_offset_0] = (self.count >> 8) as u8; 234 | self.buffer[count_offset_1] = (self.count ) as u8; 235 | 236 | let data_bytes = self.buffer.as_slice(); 237 | let crc = crc::compute(&data_bytes); 238 | 239 | self.buffer.extend_from_slice(&[ 240 | (crc >> 24) as u8, 241 | (crc >> 16) as u8, 242 | (crc >> 8) as u8, 243 | (crc ) as u8, 244 | ]); 245 | 246 | self.buffer.into_boxed_slice() 247 | } 248 | 249 | pub fn size(&self) -> usize { 250 | self.buffer.len() + FRAME_CRC_SIZE 251 | } 252 | 253 | pub fn encoded_size(_frame_ack: &AckGroup) -> usize { 254 | ACK_GROUP_SIZE 255 | } 256 | } 257 | 258 | -------------------------------------------------------------------------------- /src/frame/serial/crc.rs: -------------------------------------------------------------------------------- 1 | 2 | // polynomial: x^32 + x^29 + x^28 + x^25 + x^23 + x^22 + x^10 + x^9 + x^7 + x^4 + x^3 + 1 3 | // bits: 100110010110000000000011010011001 (0x132c00699) 4 | // Selected from: https://users.ece.cmu.edu/~koopman/crc/hd6.html 5 | 6 | // lsb msb 7 | // v v 8 | // 11111111 111111111111111111111111 00000000 9 | // -------- ------------------------ -------- 10 | // F F F F F F F F -> Initial CRC of !0x00000000, XORed by input byte 0x00 11 | // 12 | // 10011001 011000000000001101001100 1 13 | // ------- ------------------------ - 14 | // C 4 3 0 0 6 9 9 -> XOR factor is 0x9960034C 15 | // 16 | // 1100110 100111111111110010110011 1 17 | // 1001100 101100000000000110100110 01 18 | // 19 | // 101010 001011111111110100010101 11 20 | // 100110 010110000000000011010011 001 21 | // 22 | // 01100 011101111111110111000110 111 23 | // 00000 000000000000000000000000 0000 24 | // 25 | // 1100 011101111111110111000110 1110 26 | // 1001 100101100000000000110100 11001 27 | // 28 | // 101 111000011111110111110010 00101 29 | // 100 110010110000000000011010 011001 30 | // 31 | // 01 001010101111110111101000 010011 32 | // 00 000000000000000000000000 0000000 33 | // 34 | // 1 001010101111110111101000 0100110 35 | // 1 001100101100000000000110 10011001 36 | // 37 | // 000110000011110111101110 11010101 38 | // ------------------------ -------- 39 | // 8 1 C B 7 7 B A -> Partial result for 0x00 is !0xAB77BC18 = 0x548843E7 40 | 41 | static INITIAL_CRC: u32 = 0; 42 | 43 | #[cfg(test)] 44 | fn extend_slow(initial_crc: u32, data: &[u8]) -> u32 { 45 | let mut reg = !initial_crc; 46 | for &byte in data.iter() { 47 | reg ^= byte as u32; 48 | for _ in 0 .. 8 { 49 | reg = if reg & 0x00000001 != 0 { 50 | (reg >> 1) ^ 0x9960034C 51 | } else { 52 | reg >> 1 53 | }; 54 | } 55 | } 56 | return !reg; 57 | } 58 | 59 | static PARTIAL_RESULTS: [u32; 256] = [ 60 | 0x548843E7, 0x9AD08156, 0xFAF9C01C, 0x34A102AD, 0x3AAB4288, 0xF4F38039, 0x94DAC173, 0x5A8203C2, 61 | 0x88CE4139, 0x46968388, 0x26BFC2C2, 0xE8E70073, 0xE6ED4056, 0x28B582E7, 0x489CC3AD, 0x86C4011C, 62 | 0xDEC440C2, 0x109C8273, 0x70B5C339, 0xBEED0188, 0xB0E741AD, 0x7EBF831C, 0x1E96C256, 0xD0CE00E7, 63 | 0x0282421C, 0xCCDA80AD, 0xACF3C1E7, 0x62AB0356, 0x6CA14373, 0xA2F981C2, 0xC2D0C088, 0x0C880239, 64 | 0x72D04334, 0xBC888185, 0xDCA1C0CF, 0x12F9027E, 0x1CF3425B, 0xD2AB80EA, 0xB282C1A0, 0x7CDA0311, 65 | 0xAE9641EA, 0x60CE835B, 0x00E7C211, 0xCEBF00A0, 0xC0B54085, 0x0EED8234, 0x6EC4C37E, 0xA09C01CF, 66 | 0xF89C4011, 0x36C482A0, 0x56EDC3EA, 0x98B5015B, 0x96BF417E, 0x58E783CF, 0x38CEC285, 0xF6960034, 67 | 0x24DA42CF, 0xEA82807E, 0x8AABC134, 0x44F30385, 0x4AF943A0, 0x84A18111, 0xE488C05B, 0x2AD002EA, 68 | 0x18384241, 0xD66080F0, 0xB649C1BA, 0x7811030B, 0x761B432E, 0xB843819F, 0xD86AC0D5, 0x16320264, 69 | 0xC47E409F, 0x0A26822E, 0x6A0FC364, 0xA45701D5, 0xAA5D41F0, 0x64058341, 0x042CC20B, 0xCA7400BA, 70 | 0x92744164, 0x5C2C83D5, 0x3C05C29F, 0xF25D002E, 0xFC57400B, 0x320F82BA, 0x5226C3F0, 0x9C7E0141, 71 | 0x4E3243BA, 0x806A810B, 0xE043C041, 0x2E1B02F0, 0x201142D5, 0xEE498064, 0x8E60C12E, 0x4038039F, 72 | 0x3E604292, 0xF0388023, 0x9011C169, 0x5E4903D8, 0x504343FD, 0x9E1B814C, 0xFE32C006, 0x306A02B7, 73 | 0xE226404C, 0x2C7E82FD, 0x4C57C3B7, 0x820F0106, 0x8C054123, 0x425D8392, 0x2274C2D8, 0xEC2C0069, 74 | 0xB42C41B7, 0x7A748306, 0x1A5DC24C, 0xD40500FD, 0xDA0F40D8, 0x14578269, 0x747EC323, 0xBA260192, 75 | 0x686A4369, 0xA63281D8, 0xC61BC092, 0x08430223, 0x06494206, 0xC81180B7, 0xA838C1FD, 0x6660034C, 76 | 0xCDE840AB, 0x03B0821A, 0x6399C350, 0xADC101E1, 0xA3CB41C4, 0x6D938375, 0x0DBAC23F, 0xC3E2008E, 77 | 0x11AE4275, 0xDFF680C4, 0xBFDFC18E, 0x7187033F, 0x7F8D431A, 0xB1D581AB, 0xD1FCC0E1, 0x1FA40250, 78 | 0x47A4438E, 0x89FC813F, 0xE9D5C075, 0x278D02C4, 0x298742E1, 0xE7DF8050, 0x87F6C11A, 0x49AE03AB, 79 | 0x9BE24150, 0x55BA83E1, 0x3593C2AB, 0xFBCB001A, 0xF5C1403F, 0x3B99828E, 0x5BB0C3C4, 0x95E80175, 80 | 0xEBB04078, 0x25E882C9, 0x45C1C383, 0x8B990132, 0x85934117, 0x4BCB83A6, 0x2BE2C2EC, 0xE5BA005D, 81 | 0x37F642A6, 0xF9AE8017, 0x9987C15D, 0x57DF03EC, 0x59D543C9, 0x978D8178, 0xF7A4C032, 0x39FC0283, 82 | 0x61FC435D, 0xAFA481EC, 0xCF8DC0A6, 0x01D50217, 0x0FDF4232, 0xC1878083, 0xA1AEC1C9, 0x6FF60378, 83 | 0xBDBA4183, 0x73E28332, 0x13CBC278, 0xDD9300C9, 0xD39940EC, 0x1DC1825D, 0x7DE8C317, 0xB3B001A6, 84 | 0x8158410D, 0x4F0083BC, 0x2F29C2F6, 0xE1710047, 0xEF7B4062, 0x212382D3, 0x410AC399, 0x8F520128, 85 | 0x5D1E43D3, 0x93468162, 0xF36FC028, 0x3D370299, 0x333D42BC, 0xFD65800D, 0x9D4CC147, 0x531403F6, 86 | 0x0B144228, 0xC54C8099, 0xA565C1D3, 0x6B3D0362, 0x65374347, 0xAB6F81F6, 0xCB46C0BC, 0x051E020D, 87 | 0xD75240F6, 0x190A8247, 0x7923C30D, 0xB77B01BC, 0xB9714199, 0x77298328, 0x1700C262, 0xD95800D3, 88 | 0xA70041DE, 0x6958836F, 0x0971C225, 0xC7290094, 0xC92340B1, 0x077B8200, 0x6752C34A, 0xA90A01FB, 89 | 0x7B464300, 0xB51E81B1, 0xD537C0FB, 0x1B6F024A, 0x1565426F, 0xDB3D80DE, 0xBB14C194, 0x754C0325, 90 | 0x2D4C42FB, 0xE314804A, 0x833DC100, 0x4D6503B1, 0x436F4394, 0x8D378125, 0xED1EC06F, 0x234602DE, 91 | 0xF10A4025, 0x3F528294, 0x5F7BC3DE, 0x9123016F, 0x9F29414A, 0x517183FB, 0x3158C2B1, 0xFF000000, 92 | ]; 93 | 94 | pub fn extend(initial_crc: u32, data: &[u8]) -> u32 { 95 | let mut crc = initial_crc; 96 | for &byte in data.iter() { 97 | crc = (crc >> 8) ^ PARTIAL_RESULTS[(crc as u8 ^ byte) as usize]; 98 | } 99 | return crc; 100 | } 101 | 102 | pub fn compute(data: &[u8]) -> u32 { 103 | extend(INITIAL_CRC, data) 104 | } 105 | 106 | #[cfg(test)] 107 | mod tests { 108 | use super::*; 109 | 110 | /* 111 | #[test] 112 | fn main() { 113 | println!("polynomial: {:b}", 0x132c00699u64); 114 | 115 | let check_value = extend_slow(0, "123456789".as_bytes().into()); 116 | println!("check value: {:X}", check_value); 117 | 118 | let mut code: usize = 0; 119 | for _ in 0 .. 32 { 120 | for _ in 0 .. 8 { 121 | print!("0x{:08X}, ", extend_slow(0, &[code as u8])); 122 | code += 1; 123 | } 124 | println!(""); 125 | } 126 | } 127 | */ 128 | 129 | #[test] 130 | fn zero_nonzero_crc() { 131 | assert!(extend_slow(0, &[0]) != 0); 132 | } 133 | 134 | #[test] 135 | fn basic() { 136 | assert_eq!(extend_slow(0, "123456789".as_bytes().into()), 0x11A6F2A3); 137 | assert_eq!(extend(0, "123456789".as_bytes().into()), 0x11A6F2A3); 138 | } 139 | 140 | #[test] 141 | fn random() { 142 | for _ in 0 .. 100 { 143 | let data = (0 .. 1024).map(|_| rand::random::()).collect::>().into_boxed_slice(); 144 | let initial_crc = rand::random::(); 145 | assert_eq!(extend_slow(initial_crc, &data), extend(initial_crc, &data)); 146 | } 147 | } 148 | } 149 | 150 | -------------------------------------------------------------------------------- /src/half_connection/emit.rs: -------------------------------------------------------------------------------- 1 | 2 | use crate::frame; 3 | use crate::frame::serial::AckFrameBuilder; 4 | use crate::frame::serial::DataFrameBuilder; 5 | use crate::MAX_FRAME_SIZE; 6 | use crate::MAX_FRAME_WINDOW_SIZE; 7 | use crate::packet_id; 8 | 9 | use super::pending_packet; 10 | use super::frame_queue; 11 | 12 | #[derive(Debug,PartialEq)] 13 | pub enum DataPushError { 14 | SizeLimited, 15 | WindowLimited, 16 | } 17 | 18 | struct InProgressDataFrame { 19 | fbuilder: frame::serial::DataFrameBuilder, 20 | resend_refs: Vec, 21 | nonce: bool, 22 | } 23 | 24 | pub struct DataFrameEmitter<'a, F> { 25 | now_ms: u64, 26 | frame_queue: &'a mut frame_queue::FrameQueue, 27 | 28 | in_progress_frame: Option, 29 | flush_alloc: isize, 30 | emit_cb: F, 31 | } 32 | 33 | impl<'a, F> DataFrameEmitter<'a, F> where F: FnMut(Box<[u8]>) { 34 | pub fn new(now_ms: u64, frame_queue: &'a mut frame_queue::FrameQueue, flush_alloc: isize, emit_cb: F) -> Self { 35 | Self { 36 | now_ms, 37 | frame_queue, 38 | 39 | in_progress_frame: None, 40 | flush_alloc, 41 | emit_cb, 42 | } 43 | } 44 | 45 | // Returns Ok(()) if the datagram was added successfully 46 | // Returns Err(DataPushError) if the datagram could not be added 47 | pub fn push(&mut self, packet_rc: &pending_packet::PendingPacketRc, fragment_id: u16, resend: bool) -> Result<(), DataPushError> { 48 | let packet_ref = packet_rc.borrow(); 49 | let datagram = packet_ref.datagram(fragment_id); 50 | 51 | if let Some(ref mut next_frame) = self.in_progress_frame { 52 | // Try to add to existing frame 53 | let frame_size = next_frame.fbuilder.size(); 54 | let potential_frame_size = frame_size + DataFrameBuilder::encoded_size(&datagram); 55 | 56 | // Restrict the number of datagrams per frame to ensure that packet IDs are unique over 57 | // the receiver's frame window, which has size MAX_FRAME_WINDOW_SIZE * 2. I.e.: 58 | // 59 | // max_packet_count * MAX_FRAME_WINDOW_SIZE * 2 <= packet_id::SPAN 60 | 61 | let max_packet_count = 62 | ((packet_id::SPAN / (MAX_FRAME_WINDOW_SIZE * 2)) as usize).min(frame::serial::DataFrameBuilder::MAX_COUNT); 63 | 64 | if (self.flush_alloc - frame_size as isize) < 0 { 65 | // Out of bandwidth 66 | self.finalize(); 67 | self.frame_queue.mark_rate_limited(); 68 | return Err(DataPushError::SizeLimited); 69 | } else if potential_frame_size > MAX_FRAME_SIZE || next_frame.fbuilder.count() >= max_packet_count { 70 | // Would exceed maximum 71 | self.finalize(); 72 | } else { 73 | next_frame.fbuilder.add(&datagram); 74 | debug_assert!(next_frame.fbuilder.size() == potential_frame_size); 75 | if resend { 76 | next_frame.resend_refs.push(pending_packet::FragmentRef::new(packet_rc, fragment_id)); 77 | } 78 | return Ok(()); 79 | } 80 | } 81 | 82 | // Try to add to new frame 83 | if self.flush_alloc < 0 { 84 | // Out of bandwidth 85 | self.frame_queue.mark_rate_limited(); 86 | return Err(DataPushError::SizeLimited); 87 | } 88 | 89 | if !self.frame_queue.can_push() { 90 | // Would exceed window 91 | return Err(DataPushError::WindowLimited); 92 | } 93 | 94 | let frame_id = self.frame_queue.next_id(); 95 | let nonce = rand::random(); 96 | 97 | let mut next_frame = InProgressDataFrame { 98 | fbuilder: DataFrameBuilder::new(frame_id, nonce), 99 | resend_refs: Vec::new(), 100 | nonce, 101 | }; 102 | 103 | next_frame.fbuilder.add(&datagram); 104 | if resend { 105 | next_frame.resend_refs.push(pending_packet::FragmentRef::new(packet_rc, fragment_id)); 106 | } 107 | 108 | debug_assert!(self.in_progress_frame.is_none()); 109 | self.in_progress_frame = Some(next_frame); 110 | 111 | return Ok(()); 112 | } 113 | 114 | pub fn finalize(&mut self) { 115 | if let Some(next_frame) = self.in_progress_frame.take() { 116 | let frame_bytes = next_frame.fbuilder.build(); 117 | let resend_refs = next_frame.resend_refs.into_boxed_slice(); 118 | 119 | debug_assert!(self.frame_queue.can_push()); 120 | self.frame_queue.push(frame_bytes.len(), self.now_ms, resend_refs, next_frame.nonce); 121 | 122 | self.flush_alloc -= frame_bytes.len() as isize; 123 | (self.emit_cb)(frame_bytes); 124 | } 125 | } 126 | } 127 | 128 | pub struct AckFrameEmitter { 129 | frame_window_base_id: u32, 130 | packet_window_base_id: u32, 131 | 132 | in_progress_frame: Option, 133 | flush_alloc: isize, 134 | emit_cb: F, 135 | } 136 | 137 | impl AckFrameEmitter where F: FnMut(Box<[u8]>) { 138 | pub fn new(frame_window_base_id: u32, packet_window_base_id: u32, flush_alloc: isize, emit_cb: F) -> Self { 139 | Self { 140 | frame_window_base_id, 141 | packet_window_base_id, 142 | 143 | in_progress_frame: None, 144 | flush_alloc, 145 | emit_cb, 146 | } 147 | } 148 | 149 | pub fn push_dud(&mut self) -> Result<(), ()> { 150 | if self.in_progress_frame.is_some() { 151 | return Ok(()); 152 | } 153 | 154 | // Try to start a new frame 155 | if self.flush_alloc < 0 { 156 | // Out of bandwidth 157 | return Err(()); 158 | } 159 | 160 | let fbuilder = AckFrameBuilder::new(self.frame_window_base_id, self.packet_window_base_id); 161 | 162 | debug_assert!(self.in_progress_frame.is_none()); 163 | self.in_progress_frame = Some(fbuilder); 164 | 165 | return Ok(()); 166 | } 167 | 168 | // Returns Ok(()) if the ack group was added successfully 169 | // Returns Err(()) if the ack group could not be added 170 | pub fn push(&mut self, ack_group: &frame::AckGroup) -> Result<(), ()> { 171 | if let Some(ref mut next_frame) = self.in_progress_frame { 172 | // Try to add to existing frame 173 | let frame_size = next_frame.size(); 174 | let potential_frame_size = frame_size + AckFrameBuilder::encoded_size(ack_group); 175 | 176 | if (self.flush_alloc - frame_size as isize) < 0 { 177 | // Out of bandwidth 178 | self.finalize(); 179 | return Err(()); 180 | } else if potential_frame_size > MAX_FRAME_SIZE { 181 | // Would exceed maximum 182 | self.finalize(); 183 | } else { 184 | next_frame.add(ack_group); 185 | debug_assert!(next_frame.size() == potential_frame_size); 186 | return Ok(()); 187 | } 188 | } 189 | 190 | // Try to add to new frame 191 | if self.flush_alloc < 0 { 192 | // Out of bandwidth 193 | return Err(()); 194 | } 195 | 196 | let mut fbuilder = AckFrameBuilder::new(self.frame_window_base_id, self.packet_window_base_id); 197 | fbuilder.add(ack_group); 198 | 199 | debug_assert!(self.in_progress_frame.is_none()); 200 | self.in_progress_frame = Some(fbuilder); 201 | 202 | return Ok(()); 203 | } 204 | 205 | pub fn finalize(&mut self) { 206 | if let Some(next_frame) = self.in_progress_frame.take() { 207 | let frame_bytes = next_frame.build(); 208 | self.flush_alloc -= frame_bytes.len() as isize; 209 | (self.emit_cb)(frame_bytes); 210 | } 211 | } 212 | } 213 | 214 | #[cfg(test)] 215 | mod tests { 216 | use super::*; 217 | use super::super::pending_packet::PendingPacket; 218 | 219 | use crate::MAX_FRAME_WINDOW_SIZE; 220 | use crate::MAX_FRAGMENT_SIZE; 221 | 222 | use std::rc::Rc; 223 | use std::cell::RefCell; 224 | 225 | fn max_datagram_test(flush_alloc: isize, window_size: u32, push_count: usize, final_result: Result<(),DataPushError>) -> Vec> { 226 | let now_ms = 0; 227 | 228 | let mut fq = frame_queue::FrameQueue::new(window_size, window_size, 0); 229 | 230 | let mut frames = Vec::new(); 231 | let emit_cb = |frame_bytes: Box<[u8]>| { 232 | frames.push(frame_bytes); 233 | }; 234 | 235 | let mut dfe = DataFrameEmitter::new(now_ms, &mut fq, flush_alloc, emit_cb); 236 | 237 | let packet_bytes = (0 .. 2*MAX_FRAGMENT_SIZE).map(|i| i as u8).collect::>().into_boxed_slice(); 238 | let packet_rc = Rc::new(RefCell::new(PendingPacket::new(packet_bytes, 0, 0, 0, 0))); 239 | 240 | for _ in 0 .. push_count - 1 { 241 | assert_eq!(dfe.push(&packet_rc, 0, false), Ok(())); 242 | } 243 | assert_eq!(dfe.push(&packet_rc, 0, false), final_result); 244 | dfe.finalize(); 245 | 246 | return frames; 247 | } 248 | 249 | fn datagram_test(flush_alloc: isize, payload_size: usize, push_count: usize, final_result: Result<(),DataPushError>) -> Vec { 250 | let now_ms = 0; 251 | 252 | let mut fq = frame_queue::FrameQueue::new(MAX_FRAME_WINDOW_SIZE, MAX_FRAME_WINDOW_SIZE, 0); 253 | 254 | let mut frames = Vec::new(); 255 | let emit_cb = |frame_bytes: Box<[u8]>| { 256 | frames.push(frame_bytes); 257 | }; 258 | 259 | let mut dfe = DataFrameEmitter::new(now_ms, &mut fq, flush_alloc, emit_cb); 260 | 261 | let packet_bytes = (0 .. payload_size).map(|i| i as u8).collect::>().into_boxed_slice(); 262 | let packet_rc = Rc::new(RefCell::new(PendingPacket::new(packet_bytes, 0, 0, 0, 0))); 263 | 264 | for _ in 0 .. push_count - 1 { 265 | assert_eq!(dfe.push(&packet_rc, 0, false), Ok(())); 266 | } 267 | assert_eq!(dfe.push(&packet_rc, 0, false), final_result); 268 | dfe.finalize(); 269 | 270 | use frame::serial::Serialize; 271 | 272 | return frames.iter().map(|frame_bytes| { 273 | match frame::Frame::read(&frame_bytes) { 274 | Some(frame::Frame::DataFrame(data_frame)) => data_frame, 275 | _ => panic!(), 276 | } 277 | }).collect::>(); 278 | } 279 | 280 | fn ack_test(flush_alloc: isize, push_count: usize, final_result: Result<(),()>) -> Vec { 281 | let mut frames = Vec::new(); 282 | let emit_cb = |frame_bytes: Box<[u8]>| { 283 | frames.push(frame_bytes); 284 | }; 285 | 286 | let mut afe = AckFrameEmitter::new(0, 0, flush_alloc, emit_cb); 287 | 288 | let ack_group = frame::AckGroup { base_id: 0, bitfield: 0, nonce: false }; 289 | 290 | for _ in 0 .. push_count - 1 { 291 | assert_eq!(afe.push(&ack_group), Ok(())); 292 | } 293 | assert_eq!(afe.push(&ack_group), final_result); 294 | afe.finalize(); 295 | 296 | use frame::serial::Serialize; 297 | 298 | return frames.iter().map(|frame_bytes| { 299 | match frame::Frame::read(&frame_bytes) { 300 | Some(frame::Frame::AckFrame(ack_frame)) => ack_frame, 301 | _ => panic!(), 302 | } 303 | }).collect::>(); 304 | } 305 | 306 | #[test] 307 | fn data_max_frame_size() { 308 | let frames = max_datagram_test(2 * MAX_FRAME_SIZE as isize, MAX_FRAME_WINDOW_SIZE, 2, Ok(())); 309 | assert_eq!(frames.len(), 2); 310 | assert_eq!(frames[0].len(), MAX_FRAME_SIZE); 311 | assert_eq!(frames[1].len(), MAX_FRAME_SIZE); 312 | } 313 | 314 | #[test] 315 | fn data_size_limited() { 316 | let payload_len = 5; 317 | let datagram_overhead = frame::serial::MIN_DATAGRAM_OVERHEAD; 318 | let frame_overhead = frame::serial::DATA_FRAME_OVERHEAD; 319 | 320 | let len_a = (frame_overhead + 1 * (datagram_overhead + payload_len)) as isize; 321 | let len_b = (frame_overhead + 2 * (datagram_overhead + payload_len)) as isize; 322 | 323 | let test_cases: Vec<(isize, usize, usize, Result<(),DataPushError>)> = vec![ 324 | ( 0 , 1, 1, Ok(()) ), 325 | ( len_a - 1, 1, 1, Ok(()) ), 326 | ( len_a , 1, 1, Ok(()) ), 327 | ( len_a + 1, 1, 1, Ok(()) ), 328 | 329 | ( len_a , 2, 2, Ok(()) ), 330 | ( len_b - 1, 2, 2, Ok(()) ), 331 | ( len_b , 2, 2, Ok(()) ), 332 | ( len_b + 1, 2, 2, Ok(()) ), 333 | 334 | ( len_b , 3, 3, Ok(()) ), 335 | 336 | ( 0 , 2, 1, Err(DataPushError::SizeLimited) ), 337 | ( len_a - 1, 2, 1, Err(DataPushError::SizeLimited) ), 338 | ( len_a , 2, 2, Ok(()) ), 339 | ( len_a + 1, 2, 2, Ok(()) ), 340 | 341 | ( len_a , 3, 2, Err(DataPushError::SizeLimited) ), 342 | ( len_b - 1, 3, 2, Err(DataPushError::SizeLimited) ), 343 | ( len_b , 3, 3, Ok(()) ), 344 | ( len_b + 1, 3, 3, Ok(()) ), 345 | 346 | ( len_b , 4, 3, Err(DataPushError::SizeLimited) ), 347 | ]; 348 | 349 | for (idx, test) in test_cases.into_iter().enumerate() { 350 | println!("{}", idx); 351 | let frames = datagram_test(test.0, payload_len, test.1, test.3); 352 | assert_eq!(frames.len(), 1); 353 | assert_eq!(frames[0].datagrams.len(), test.2); 354 | } 355 | } 356 | 357 | #[test] 358 | fn data_window_limited() { 359 | let frames = max_datagram_test(6 * MAX_FRAME_SIZE as isize, 5, 6, Err(DataPushError::WindowLimited)); 360 | assert_eq!(frames.len(), 5); 361 | } 362 | 363 | #[test] 364 | fn ack_max_frame_size() { 365 | let max_datagrams = (MAX_FRAME_SIZE - frame::serial::ACK_FRAME_OVERHEAD) / frame::serial::ACK_GROUP_SIZE; 366 | 367 | let frames = ack_test(2 * MAX_FRAME_SIZE as isize, max_datagrams, Ok(())); 368 | assert_eq!(frames.len(), 1); 369 | assert_eq!(frames[0].frame_acks.len(), max_datagrams); 370 | 371 | let frames = ack_test(2 * MAX_FRAME_SIZE as isize, max_datagrams + 1, Ok(())); 372 | assert_eq!(frames.len(), 2); 373 | assert_eq!(frames[0].frame_acks.len(), max_datagrams); 374 | assert_eq!(frames[1].frame_acks.len(), 1); 375 | } 376 | 377 | #[test] 378 | fn ack_size_limited() { 379 | let len_a = (frame::serial::ACK_FRAME_OVERHEAD + 1 * frame::serial::ACK_GROUP_SIZE) as isize; 380 | let len_b = (frame::serial::ACK_FRAME_OVERHEAD + 2 * frame::serial::ACK_GROUP_SIZE) as isize; 381 | 382 | let test_cases: Vec<(isize, usize, usize, Result<(),()>)> = vec![ 383 | ( 0 , 1, 1, Ok(()) ), 384 | ( len_a - 1, 1, 1, Ok(()) ), 385 | ( len_a , 1, 1, Ok(()) ), 386 | ( len_a + 1, 1, 1, Ok(()) ), 387 | 388 | ( len_a , 2, 2, Ok(()) ), 389 | ( len_b - 1, 2, 2, Ok(()) ), 390 | ( len_b , 2, 2, Ok(()) ), 391 | ( len_b + 1, 2, 2, Ok(()) ), 392 | 393 | ( len_b , 3, 3, Ok(()) ), 394 | 395 | ( 0 , 2, 1, Err(()) ), 396 | ( len_a - 1, 2, 1, Err(()) ), 397 | ( len_a , 2, 2, Ok(()) ), 398 | ( len_a + 1, 2, 2, Ok(()) ), 399 | 400 | ( len_a , 3, 2, Err(()) ), 401 | ( len_b - 1, 3, 2, Err(()) ), 402 | ( len_b , 3, 3, Ok(()) ), 403 | ( len_b + 1, 3, 3, Ok(()) ), 404 | 405 | ( len_b , 4, 3, Err(()) ), 406 | ]; 407 | 408 | for (idx, test) in test_cases.into_iter().enumerate() { 409 | println!("{}", idx); 410 | let frames = ack_test(test.0, test.1, test.3); 411 | assert_eq!(frames.len(), 1); 412 | assert_eq!(frames[0].frame_acks.len(), test.2); 413 | } 414 | } 415 | 416 | #[test] 417 | fn ack_min_one() { 418 | let mut frames = Vec::new(); 419 | let emit_cb = |frame_bytes: Box<[u8]>| { 420 | frames.push(frame_bytes); 421 | }; 422 | 423 | let mut afe = AckFrameEmitter::new(0, 0, MAX_FRAME_SIZE as isize, emit_cb); 424 | assert_eq!(afe.push_dud(), Ok(())); 425 | afe.finalize(); 426 | 427 | assert_eq!(frames.len(), 1); 428 | } 429 | } 430 | 431 | -------------------------------------------------------------------------------- /src/half_connection/frame_ack_queue.rs: -------------------------------------------------------------------------------- 1 | 2 | use crate::frame; 3 | 4 | struct ReceiveWindow { 5 | base_id: u32, 6 | size: u32, 7 | } 8 | 9 | impl ReceiveWindow { 10 | pub fn new(base_id: u32, size: u32) -> Self { 11 | Self { base_id, size } 12 | } 13 | 14 | pub fn contains(&self, frame_id: u32) -> bool { 15 | frame_id.wrapping_sub(self.base_id) < self.size 16 | } 17 | 18 | pub fn advance(&mut self, new_base_id: u32) -> bool { 19 | let delta = new_base_id.wrapping_sub(self.base_id); 20 | if delta > 0 && delta <= self.size { 21 | self.base_id = new_base_id; 22 | true 23 | } else { 24 | false 25 | } 26 | } 27 | 28 | pub fn base_id(&self) -> u32 { 29 | self.base_id 30 | } 31 | } 32 | 33 | pub struct FrameAckQueue { 34 | entries: std::collections::VecDeque, 35 | receive_window: ReceiveWindow, 36 | } 37 | 38 | impl FrameAckQueue { 39 | pub fn new(size: u32, base_id: u32) -> Self { 40 | Self { 41 | entries: std::collections::VecDeque::new(), 42 | receive_window: ReceiveWindow::new(base_id, size), 43 | } 44 | } 45 | 46 | pub fn base_id(&self) -> u32 { 47 | self.receive_window.base_id() 48 | } 49 | 50 | pub fn resynchronize(&mut self, sender_next_id: u32) { 51 | self.receive_window.advance(sender_next_id); 52 | } 53 | 54 | pub fn window_contains(&self, frame_id: u32) -> bool { 55 | self.receive_window.contains(frame_id) 56 | } 57 | 58 | pub fn mark_seen(&mut self, frame_id: u32, nonce: bool) { 59 | if self.receive_window.contains(frame_id) { 60 | self.receive_window.advance(frame_id.wrapping_add(1)); 61 | 62 | if let Some(last_entry) = self.entries.back_mut() { 63 | let bit = frame_id.wrapping_sub(last_entry.base_id); 64 | if bit < 32 { 65 | if last_entry.bitfield & (0x00000001 << bit) == 0 { 66 | last_entry.bitfield |= 0x00000001 << bit; 67 | last_entry.nonce ^= nonce; 68 | } 69 | } else { 70 | self.entries.push_back(frame::AckGroup { 71 | base_id: frame_id, 72 | bitfield: 0x00000001, 73 | nonce: nonce, 74 | }); 75 | } 76 | } else { 77 | self.entries.push_back(frame::AckGroup { 78 | base_id: frame_id, 79 | bitfield: 0x00000001, 80 | nonce: nonce, 81 | }); 82 | } 83 | } 84 | } 85 | 86 | pub fn pop(&mut self) -> Option { 87 | if let Some(first_entry) = self.entries.pop_front() { 88 | debug_assert!(first_entry.bitfield & 0x00000001 != 0); 89 | 90 | return Some(first_entry); 91 | } 92 | 93 | return None; 94 | } 95 | 96 | pub fn peek(&self) -> Option<&frame::AckGroup> { 97 | self.entries.front() 98 | } 99 | } 100 | 101 | -------------------------------------------------------------------------------- /src/half_connection/loss_rate.rs: -------------------------------------------------------------------------------- 1 | 2 | // See RFC 5348: "TCP Friendly Rate Control (TFRC): Protocol Specification" 3 | 4 | // This is a constant-time overhead implementation of the loss rate computation, where only the 5 | // most recent loss interval is updated each time a new frame is acked or nacked. Currently, no 6 | // attempt is made to fill holes in the loss history as acks are received for previous nacks. In 7 | // the future, an approximation to hole-filling will be performed which does not require iteration 8 | // over the loss history. 9 | 10 | use std::collections::VecDeque; 11 | 12 | #[derive(Debug)] 13 | struct LossInterval { 14 | end_time_ms: u64, 15 | length: u32, 16 | } 17 | 18 | #[derive(Debug)] 19 | pub struct LossIntervalQueue { 20 | entries: VecDeque, 21 | } 22 | 23 | impl LossIntervalQueue { 24 | // See section 5.4 25 | const WEIGHTS: [f64; 8] = [ 1.0, 1.0, 1.0, 1.0, 0.8, 0.6, 0.4, 0.2 ]; 26 | 27 | pub fn new() -> Self { 28 | Self { 29 | entries: VecDeque::new() 30 | } 31 | } 32 | 33 | pub fn reset(&mut self, initial_p: f64) { 34 | // Because the loss interval queue is truncated here, the particular loss pattern of the 35 | // first feedback message is ignored. That is, if frames were acked/nacked as follows: 36 | // 37 | // first nack 38 | // v 39 | // 111111111100100111000110111 40 | // a) +------------------ 41 | // b) +------------------+--+----+----+--- 42 | // 43 | // Only the single loss interval (a) will be generated, with a length initialized according 44 | // to the throughput equation, and ignoring any subsequent initial loss events. Standard 45 | // TFRC would not ignore those loss events, as shown in (b). 46 | // 47 | // By ignoring subsequent loss intervals, the throughput equation phase will begin at half 48 | // of the maximum send rate regardless of the initial loss pattern. This effects a slightly 49 | // improved response to initial packet drops, but any difference in behavior relative to 50 | // standard TFRC should be marginal. 51 | 52 | self.entries.truncate(1); 53 | self.entries[0].length = (Self::WEIGHTS[0] / initial_p).clamp(0.0, u32::MAX as f64).round() as u32; 54 | } 55 | 56 | pub fn push_ack(&mut self) { 57 | if let Some(last_interval) = self.entries.front_mut() { 58 | // Acks always contribute to previous loss interval 59 | last_interval.length = last_interval.length.saturating_add(1); 60 | } 61 | } 62 | 63 | pub fn push_nack(&mut self, send_time_ms: u64, rtt_ms: u64) { 64 | if let Some(last_interval) = self.entries.front_mut() { 65 | if send_time_ms >= last_interval.end_time_ms { 66 | // This nack marks a new loss interval 67 | self.entries.push_front(LossInterval { 68 | end_time_ms: send_time_ms + rtt_ms, 69 | length: 1, 70 | }); 71 | 72 | self.entries.truncate(9); 73 | } else { 74 | // This nack falls under previous loss interval 75 | last_interval.length = last_interval.length.saturating_add(1); 76 | } 77 | } else { 78 | // This nack marks a new loss interval 79 | self.entries.push_front(LossInterval { 80 | end_time_ms: send_time_ms + rtt_ms, 81 | length: 1, 82 | }); 83 | } 84 | } 85 | 86 | pub fn compute_loss_rate(&self) -> f64 { 87 | // See section 5.4 88 | if self.entries.len() > 0 { 89 | let mut i_total_0 = 0.0; 90 | let mut i_total_1 = 0.0; 91 | let mut w_total = 0.0; 92 | 93 | if self.entries.len() > 1 { 94 | for i in 0..self.entries.len()-1 { 95 | i_total_0 += self.entries[i].length as f64 * Self::WEIGHTS[i]; 96 | w_total += Self::WEIGHTS[i]; 97 | } 98 | for i in 1..self.entries.len() { 99 | i_total_1 += self.entries[i].length as f64 * Self::WEIGHTS[i - 1]; 100 | } 101 | 102 | return w_total / i_total_0.max(i_total_1); 103 | } else { 104 | return Self::WEIGHTS[0] / (self.entries[0].length as f64 * Self::WEIGHTS[0]); 105 | } 106 | } else { 107 | return 0.0; 108 | } 109 | } 110 | } 111 | 112 | #[cfg(test)] 113 | mod tests { 114 | use super::*; 115 | 116 | /* Tests from previous incarnation. Because frames are now consumed greedily from the reorder 117 | * buffer, some modification will be necessary. 118 | * 119 | #[test] 120 | fn basic() { 121 | let mut fbc = FeedbackComp::new(0); 122 | 123 | fbc.log_frame(0, false, 53, 0); 124 | fbc.log_frame(1, false, 71, 0); 125 | fbc.log_frame(2, false, 89, 0); 126 | fbc.log_frame(3, false, 107, 10); 127 | 128 | fbc.acknowledge_frames(frame::FrameAck { base_id: 0, bitfield: 0b1111, nonce: false }, 100); 129 | 130 | let feedback = fbc.pending_feedback().unwrap(); 131 | 132 | assert_eq!(feedback.last_send_time_ms, 10); 133 | assert_eq!(feedback.total_ack_size, 320); 134 | assert_eq!(feedback.loss_rate, 0.0); 135 | assert_eq!(feedback.rate_limited, false); 136 | 137 | assert_eq!(fbc.pending_feedback(), None); 138 | } 139 | 140 | #[test] 141 | fn bad_nonce() { 142 | let mut fbc = FeedbackComp::new(0); 143 | 144 | fbc.log_frame(0, false, 53, 0); 145 | fbc.log_frame(1, true, 71, 0); 146 | fbc.log_frame(2, false, 89, 0); 147 | fbc.log_frame(3, true, 107, 10); 148 | 149 | fbc.acknowledge_frames(frame::FrameAck { base_id: 0, bitfield: 0b1111, nonce: true }, 100); 150 | 151 | assert_eq!(fbc.pending_feedback(), None); 152 | } 153 | 154 | #[test] 155 | fn rate_limited() { 156 | let mut fbc = FeedbackComp::new(0); 157 | 158 | fbc.log_frame(0, false, 53, 0); 159 | fbc.log_rate_limited(); 160 | fbc.log_frame(1, false, 71, 0); 161 | fbc.log_frame(2, false, 89, 0); 162 | fbc.log_frame(3, false, 107, 10); 163 | 164 | fbc.acknowledge_frames(frame::FrameAck { base_id: 0, bitfield: 0b1001, nonce: false }, 100); 165 | 166 | let feedback = fbc.pending_feedback().unwrap(); 167 | 168 | assert_eq!(feedback.last_send_time_ms, 10); 169 | assert_eq!(feedback.total_ack_size, 160); 170 | assert_eq!(feedback.rate_limited, true); 171 | } 172 | 173 | #[test] 174 | fn loss_intervals_reorder() { 175 | let mut fbc = FeedbackComp::new(0); 176 | 177 | fbc.log_frame( 0, false, 64, 0); 178 | fbc.log_frame( 1, false, 64, 0); 179 | fbc.log_frame( 2, false, 64, 0); 180 | fbc.log_frame( 3, false, 64, 0); 181 | fbc.log_frame( 4, false, 64, 0); 182 | fbc.log_frame( 5, false, 64, 0); 183 | fbc.log_frame( 6, false, 64, 0); 184 | fbc.log_frame( 7, false, 64, 0); 185 | fbc.log_frame( 8, false, 64, 0); 186 | fbc.log_frame( 9, false, 64, 0); 187 | 188 | fbc.acknowledge_frames(frame::FrameAck { base_id: 0, bitfield: 0b0000001001, nonce: false }, 100); 189 | 190 | assert_eq!(fbc.loss_intervals.entries.iter().map(|entry| entry.length).collect::>(), vec![ ]); 191 | 192 | fbc.acknowledge_frames(frame::FrameAck { base_id: 0, bitfield: 0b0001001001, nonce: false }, 100); 193 | 194 | assert_eq!(fbc.loss_intervals.entries.iter().map(|entry| entry.length).collect::>(), vec![ ]); 195 | 196 | fbc.acknowledge_frames(frame::FrameAck { base_id: 0, bitfield: 0b1001001001, nonce: false }, 100); 197 | 198 | assert_eq!(fbc.loss_intervals.entries.iter().map(|entry| entry.length).collect::>(), vec![ 3 ]); 199 | } 200 | 201 | #[test] 202 | fn loss_intervals_rtt() { 203 | let mut fbc = FeedbackComp::new(0); 204 | 205 | fbc.log_frame( 0, false, 64, 0); // ack 206 | fbc.log_frame( 1, false, 64, 0); 207 | fbc.log_frame( 2, false, 64, 1); 208 | fbc.log_frame( 3, false, 64, 99); 209 | fbc.log_frame( 4, false, 64, 100); 210 | fbc.log_frame( 5, false, 64, 200); 211 | fbc.log_frame( 6, false, 64, 200); 212 | fbc.log_frame( 7, false, 64, 200); // ack 213 | fbc.log_frame( 8, false, 64, 200); // ack 214 | fbc.log_frame( 9, false, 64, 200); // ack 215 | 216 | fbc.acknowledge_frames(frame::FrameAck { base_id: 0, bitfield: 0b1110000001, nonce: false }, 100); 217 | 218 | assert_eq!(fbc.loss_intervals.entries.iter().map(|entry| entry.length).collect::>(), vec![ 3, 1, 3 ]); 219 | } 220 | 221 | #[test] 222 | fn loss_intervals_reorder_forget_partial() { 223 | let mut fbc = FeedbackComp::new(0); 224 | 225 | fbc.log_frame( 0, false, 64, 0); 226 | fbc.log_frame( 1, false, 64, 0); 227 | fbc.log_frame( 2, false, 64, 100); // ack 228 | fbc.log_frame( 3, false, 64, 100); 229 | fbc.log_frame( 4, false, 64, 150); 230 | fbc.log_frame( 5, false, 64, 150); // ack 231 | fbc.log_frame( 6, false, 64, 199); 232 | fbc.log_frame( 7, false, 64, 200); 233 | fbc.log_frame( 8, false, 64, 300); 234 | 235 | fbc.acknowledge_frames(frame::FrameAck { base_id: 0, bitfield: 0b000000100, nonce: false }, 100); 236 | 237 | assert_eq!(fbc.loss_intervals.entries.iter().map(|entry| entry.length).collect::>(), vec![ ]); 238 | 239 | fbc.acknowledge_frames(frame::FrameAck { base_id: 0, bitfield: 0b000100000, nonce: false }, 100); 240 | 241 | assert_eq!(fbc.loss_intervals.entries.iter().map(|entry| entry.length).collect::>(), vec![ ]); 242 | 243 | assert_eq!(fbc.next_ack_id, 0); 244 | assert_eq!(fbc.frame_log.base_id(), 0); 245 | assert_eq!(fbc.frame_log.next_id(), 9); 246 | 247 | fbc.forget_frames(150, 100); 248 | 249 | assert_eq!(fbc.loss_intervals.entries.iter().map(|entry| entry.length).collect::>(), vec![ 1, 3 ]); 250 | 251 | assert_eq!(fbc.next_ack_id, 4); 252 | assert_eq!(fbc.frame_log.base_id(), 4); 253 | assert_eq!(fbc.frame_log.next_id(), 9); 254 | } 255 | 256 | #[test] 257 | fn loss_intervals_reorder_forget_full() { 258 | let mut fbc = FeedbackComp::new(0); 259 | 260 | fbc.log_frame( 0, false, 64, 0); 261 | fbc.log_frame( 1, false, 64, 0); 262 | fbc.log_frame( 2, false, 64, 100); // ack 263 | fbc.log_frame( 3, false, 64, 100); 264 | fbc.log_frame( 4, false, 64, 100); 265 | fbc.log_frame( 5, false, 64, 100); // ack 266 | fbc.log_frame( 6, false, 64, 199); 267 | fbc.log_frame( 7, false, 64, 200); 268 | fbc.log_frame( 8, false, 64, 300); 269 | 270 | fbc.acknowledge_frames(frame::FrameAck { base_id: 0, bitfield: 0b000000100, nonce: false }, 100); 271 | 272 | assert_eq!(fbc.loss_intervals.entries.iter().map(|entry| entry.length).collect::>(), vec![ ]); 273 | 274 | fbc.acknowledge_frames(frame::FrameAck { base_id: 0, bitfield: 0b000100000, nonce: false }, 100); 275 | 276 | assert_eq!(fbc.loss_intervals.entries.iter().map(|entry| entry.length).collect::>(), vec![ ]); 277 | 278 | assert_eq!(fbc.next_ack_id, 0); 279 | assert_eq!(fbc.frame_log.base_id(), 0); 280 | assert_eq!(fbc.frame_log.next_id(), 9); 281 | 282 | fbc.forget_frames(300, 100); 283 | 284 | assert_eq!(fbc.loss_intervals.entries.iter().map(|entry| entry.length).collect::>(), vec![ 1, 4, 3 ]); 285 | 286 | assert_eq!(fbc.next_ack_id, 8); 287 | assert_eq!(fbc.frame_log.base_id(), 8); 288 | assert_eq!(fbc.frame_log.next_id(), 9); 289 | } 290 | 291 | #[test] 292 | fn max_loss_intervals() { 293 | let mut fbc = FeedbackComp::new(0); 294 | 295 | fbc.log_frame( 0, false, 64, 0); // ack 296 | fbc.log_frame( 1, false, 64, 0); 297 | fbc.log_frame( 2, false, 64, 100); 298 | fbc.log_frame( 3, false, 64, 200); 299 | fbc.log_frame( 4, false, 64, 200); 300 | fbc.log_frame( 5, false, 64, 300); 301 | fbc.log_frame( 6, false, 64, 400); 302 | fbc.log_frame( 7, false, 64, 500); 303 | fbc.log_frame( 8, false, 64, 500); 304 | fbc.log_frame( 9, false, 64, 500); 305 | fbc.log_frame(10, false, 64, 600); 306 | fbc.log_frame(11, false, 64, 700); 307 | fbc.log_frame(12, false, 64, 800); 308 | fbc.log_frame(13, false, 64, 800); 309 | fbc.log_frame(14, false, 64, 900); 310 | fbc.log_frame(15, false, 64, 900); 311 | fbc.log_frame(16, false, 64, 900); // ack 312 | fbc.log_frame(17, false, 64, 1000); // ack 313 | fbc.log_frame(18, false, 64, 1000); // ack 314 | 315 | fbc.acknowledge_frames(frame::FrameAck { base_id: 0, bitfield: 0x70001, nonce: false }, 100); 316 | 317 | assert_eq!(fbc.loss_intervals.entries.iter().map(|entry| entry.length).collect::>(), vec![ 3, 2, 1, 1, 3, 1, 1, 2, 1 ]); 318 | } 319 | 320 | #[test] 321 | fn loss_intervals_changing_rtt() { 322 | let mut fbc = FeedbackComp::new(0); 323 | 324 | fbc.log_frame( 0, false, 64, 0); // ack @ 100ms RTT 325 | fbc.log_frame( 1, false, 64, 0); 326 | fbc.log_frame( 2, false, 64, 0); // ack @ 100ms RTT 327 | fbc.log_frame( 3, false, 64, 0); // ack @ 100ms RTT 328 | fbc.log_frame( 4, false, 64, 0); // ack @ 100ms RTT 329 | fbc.log_frame( 5, false, 64, 99); 330 | fbc.log_frame( 6, false, 64, 100); 331 | fbc.log_frame( 7, false, 64, 150); 332 | fbc.log_frame( 8, false, 64, 200); // ack @ 50ms RTT 333 | fbc.log_frame( 9, false, 64, 200); // ack @ 50ms RTT 334 | fbc.log_frame(10, false, 64, 200); // ack @ 50ms RTT 335 | 336 | fbc.acknowledge_frames(frame::FrameAck { base_id: 0, bitfield: 0b00000011101, nonce: false }, 100); 337 | 338 | assert_eq!(fbc.loss_intervals.entries.iter().map(|entry| entry.length).collect::>(), vec![ 2 ]); 339 | 340 | fbc.acknowledge_frames(frame::FrameAck { base_id: 0, bitfield: 0b11100000000, nonce: false }, 50); 341 | 342 | assert_eq!(fbc.loss_intervals.entries.iter().map(|entry| entry.length).collect::>(), vec![ 2, 1, 5 ]); 343 | } 344 | */ 345 | } 346 | 347 | -------------------------------------------------------------------------------- /src/half_connection/packet_receiver/assembly_window/fragment_buffer.rs: -------------------------------------------------------------------------------- 1 | 2 | use super::MAX_FRAGMENT_SIZE; 3 | 4 | pub struct FragmentBuffer { 5 | buffer: Box<[u8]>, 6 | fragment_bitfields: Box<[u64]>, 7 | num_fragments: usize, 8 | fragments_remaining: usize, 9 | total_size: usize, 10 | } 11 | 12 | impl FragmentBuffer { 13 | pub fn new(num_fragments: usize) -> Self { 14 | debug_assert!(num_fragments > 0); 15 | 16 | Self { 17 | buffer: vec![0; num_fragments * MAX_FRAGMENT_SIZE].into_boxed_slice(), 18 | fragment_bitfields: vec![0; (num_fragments + 63)/64].into_boxed_slice(), 19 | num_fragments: num_fragments, 20 | fragments_remaining: num_fragments, 21 | total_size: 0, 22 | } 23 | } 24 | 25 | pub fn write(&mut self, idx: usize, data: Box<[u8]>) { 26 | debug_assert!(idx < self.num_fragments); 27 | debug_assert!(data.len() == MAX_FRAGMENT_SIZE || (idx == self.num_fragments - 1 && data.len() <= MAX_FRAGMENT_SIZE)); 28 | 29 | let bitfield_idx = idx / 64; 30 | let bitfield_bit = 1 << (idx % 64); 31 | 32 | let ref mut bitfield = self.fragment_bitfields[bitfield_idx]; 33 | 34 | if *bitfield & bitfield_bit == 0 { 35 | *bitfield |= bitfield_bit; 36 | 37 | let begin_idx = idx * MAX_FRAGMENT_SIZE; 38 | let end_idx = begin_idx + data.len(); 39 | 40 | self.buffer[begin_idx .. end_idx].copy_from_slice(&data); 41 | 42 | self.fragments_remaining -= 1; 43 | self.total_size += data.len(); 44 | } 45 | } 46 | 47 | pub fn finalize(mut self) -> Box<[u8]> { 48 | debug_assert!(self.total_size <= self.buffer.len()); 49 | let ptr = self.buffer.as_mut_ptr(); 50 | std::mem::forget(self.buffer); 51 | unsafe { Box::from_raw(std::slice::from_raw_parts_mut(ptr, self.total_size)) } 52 | } 53 | 54 | pub fn is_finished(&self) -> bool { 55 | self.fragments_remaining == 0 56 | } 57 | } 58 | 59 | #[cfg(test)] 60 | mod tests { 61 | use super::*; 62 | 63 | #[test] 64 | fn single_fragment() { 65 | let mut buf = FragmentBuffer::new(1); 66 | 67 | let fragment_data = (0 .. MAX_FRAGMENT_SIZE).map(|i| i as u8).collect::>().into_boxed_slice(); 68 | 69 | buf.write(0, fragment_data.clone()); 70 | 71 | assert_eq!(buf.is_finished(), true); 72 | assert_eq!(buf.finalize(), fragment_data); 73 | } 74 | 75 | #[test] 76 | fn multiple_fragments() { 77 | let mut buf = FragmentBuffer::new(5); 78 | 79 | let packet_data = (0 .. MAX_FRAGMENT_SIZE*5).map(|i| i as u8).collect::>().into_boxed_slice(); 80 | 81 | for i in 0 .. 5 { 82 | assert_eq!(buf.is_finished(), false); 83 | buf.write(i, packet_data[i * MAX_FRAGMENT_SIZE .. (i + 1) * MAX_FRAGMENT_SIZE].into()); 84 | } 85 | 86 | assert_eq!(buf.is_finished(), true); 87 | assert_eq!(buf.finalize(), packet_data); 88 | } 89 | 90 | #[test] 91 | fn multiple_fragments_nonmultiple() { 92 | let mut buf = FragmentBuffer::new(5); 93 | 94 | let packet_data = (0 .. MAX_FRAGMENT_SIZE*5 - MAX_FRAGMENT_SIZE/2).map(|i| i as u8).collect::>().into_boxed_slice(); 95 | 96 | for i in 0 .. 4 { 97 | assert_eq!(buf.is_finished(), false); 98 | buf.write(i, packet_data[i * MAX_FRAGMENT_SIZE .. (i + 1) * MAX_FRAGMENT_SIZE].into()); 99 | } 100 | assert_eq!(buf.is_finished(), false); 101 | buf.write(4, packet_data[4 * MAX_FRAGMENT_SIZE .. ].into()); 102 | 103 | assert_eq!(buf.is_finished(), true); 104 | assert_eq!(buf.finalize(), packet_data); 105 | } 106 | 107 | #[test] 108 | fn max_fragments() { 109 | use crate::frame::serial::MAX_FRAGMENTS; 110 | 111 | let mut buf = FragmentBuffer::new(MAX_FRAGMENTS); 112 | 113 | let packet_data = (0 .. MAX_FRAGMENT_SIZE*MAX_FRAGMENTS).map(|i| i as u8).collect::>().into_boxed_slice(); 114 | 115 | let mut indices = (0 .. MAX_FRAGMENTS).map(|i| i as u16).collect::>(); 116 | 117 | for i in 0 ..= MAX_FRAGMENTS - 2 { 118 | let j = i + rand::random::() % (MAX_FRAGMENTS - i); 119 | indices.swap(i, j); 120 | } 121 | 122 | for i in 0 .. MAX_FRAGMENTS { 123 | let idx = indices[i] as usize; 124 | buf.write(idx, packet_data[idx * MAX_FRAGMENT_SIZE .. (idx + 1) * MAX_FRAGMENT_SIZE].into()); 125 | } 126 | 127 | assert_eq!(buf.is_finished(), true); 128 | assert_eq!(buf.finalize(), packet_data); 129 | } 130 | } 131 | 132 | -------------------------------------------------------------------------------- /src/half_connection/packet_receiver/assembly_window/mod.rs: -------------------------------------------------------------------------------- 1 | 2 | use super::MAX_FRAGMENT_SIZE; 3 | use super::MAX_PACKET_WINDOW_SIZE; 4 | 5 | use crate::frame; 6 | 7 | mod fragment_buffer; 8 | 9 | struct ActiveEntry { 10 | // How many allocation points this entry is worth 11 | alloc_size: usize, 12 | 13 | // Used to validate future packets 14 | channel_id: u8, 15 | window_parent_lead: u16, 16 | channel_parent_lead: u16, 17 | last_fragment_id: u16, 18 | 19 | // Assembles a multi-fragment packet 20 | asm_buffer: fragment_buffer::FragmentBuffer, 21 | } 22 | 23 | impl ActiveEntry { 24 | fn new(alloc_size: usize, channel_id: u8, window_parent_lead: u16, channel_parent_lead: u16, last_fragment_id: u16, num_fragments: usize) -> Self { 25 | Self { 26 | alloc_size, 27 | 28 | channel_id, 29 | window_parent_lead, 30 | channel_parent_lead, 31 | last_fragment_id, 32 | 33 | asm_buffer: fragment_buffer::FragmentBuffer::new(num_fragments), 34 | } 35 | } 36 | } 37 | 38 | enum WindowEntry { 39 | Open, 40 | Closed(usize), 41 | Active(ActiveEntry), 42 | } 43 | 44 | #[derive(Debug,PartialEq)] 45 | pub struct Packet { 46 | pub channel_id: u8, 47 | pub sequence_id: u32, 48 | pub window_parent_lead: u16, 49 | pub channel_parent_lead: u16, 50 | pub data: Option>, 51 | } 52 | 53 | fn packet_alloc_size(datagram: &frame::Datagram) -> usize { 54 | let num_fragments = datagram.fragment_id_last as usize + 1; 55 | if num_fragments > 1 { 56 | num_fragments*MAX_FRAGMENT_SIZE 57 | } else { 58 | datagram.data.len() 59 | } 60 | } 61 | 62 | pub struct AssemblyWindow { 63 | window: Box<[WindowEntry]>, 64 | 65 | alloc: usize, 66 | max_alloc: usize, 67 | } 68 | 69 | impl AssemblyWindow { 70 | pub fn new(max_alloc: usize) -> Self { 71 | let window: Vec = (0 .. MAX_PACKET_WINDOW_SIZE).map(|_| WindowEntry::Open).collect(); 72 | 73 | let max_alloc_ceil = ((max_alloc + MAX_FRAGMENT_SIZE - 1) / MAX_FRAGMENT_SIZE) * MAX_FRAGMENT_SIZE; 74 | 75 | Self { 76 | window: window.into_boxed_slice(), 77 | 78 | alloc: 0, 79 | max_alloc: max_alloc_ceil, 80 | } 81 | } 82 | 83 | pub fn try_add(&mut self, idx: usize, datagram: frame::Datagram) -> Option { 84 | match self.window[idx] { 85 | WindowEntry::Open => { 86 | // New packet 87 | 88 | // If this packet would exceed the memory limit, enqueue a closed entry with no 89 | // allocation value and pass on a dud packet (as if the packet was received in 90 | // full). Otherwise, pass the datagram on directly or allocate an assembly 91 | // buffer as appropriate. 92 | 93 | let alloc_size = packet_alloc_size(&datagram); 94 | 95 | if self.alloc + alloc_size > self.max_alloc { 96 | // Never should have come here! 97 | self.window[idx] = WindowEntry::Closed(0); 98 | 99 | return Some(Packet { 100 | channel_id: datagram.channel_id, 101 | sequence_id: datagram.sequence_id, 102 | window_parent_lead: datagram.window_parent_lead, 103 | channel_parent_lead: datagram.channel_parent_lead, 104 | data: None 105 | }); 106 | } else { 107 | self.alloc += alloc_size; 108 | 109 | if datagram.fragment_id_last == 0 { 110 | let new_entry = WindowEntry::Closed(alloc_size); 111 | 112 | self.window[idx] = new_entry; 113 | 114 | return Some(Packet { 115 | channel_id: datagram.channel_id, 116 | sequence_id: datagram.sequence_id, 117 | window_parent_lead: datagram.window_parent_lead, 118 | channel_parent_lead: datagram.channel_parent_lead, 119 | data: Some(datagram.data), 120 | }); 121 | } else { 122 | let num_fragments = datagram.fragment_id_last as usize + 1; 123 | 124 | let mut new_entry = ActiveEntry::new(alloc_size, 125 | datagram.channel_id, 126 | datagram.window_parent_lead, 127 | datagram.channel_parent_lead, 128 | datagram.fragment_id_last, 129 | num_fragments); 130 | 131 | new_entry.asm_buffer.write(datagram.fragment_id as usize, datagram.data); 132 | 133 | self.window[idx] = WindowEntry::Active(new_entry); 134 | 135 | return None; 136 | } 137 | } 138 | } 139 | WindowEntry::Closed(_) => { 140 | // Packet has been rejected or has already been received 141 | return None; 142 | } 143 | WindowEntry::Active(ref mut entry) => { 144 | // In-progress packet 145 | 146 | // Validate datagram against existing datagrams 147 | if datagram.channel_id != entry.channel_id { 148 | return None; 149 | } 150 | if datagram.window_parent_lead != entry.window_parent_lead { 151 | return None; 152 | } 153 | if datagram.channel_parent_lead != entry.channel_parent_lead { 154 | return None; 155 | } 156 | if datagram.fragment_id_last != entry.last_fragment_id { 157 | return None; 158 | } 159 | 160 | entry.asm_buffer.write(datagram.fragment_id as usize, datagram.data); 161 | 162 | if entry.asm_buffer.is_finished() { 163 | let new_entry = WindowEntry::Closed(entry.alloc_size); 164 | let prev_entry = std::mem::replace(&mut self.window[idx], new_entry); 165 | 166 | match prev_entry { 167 | WindowEntry::Active(entry) => { 168 | return Some(Packet { 169 | channel_id: datagram.channel_id, 170 | sequence_id: datagram.sequence_id, 171 | window_parent_lead: datagram.window_parent_lead, 172 | channel_parent_lead: datagram.channel_parent_lead, 173 | data: Some(entry.asm_buffer.finalize()), 174 | }); 175 | } 176 | _ => panic!() 177 | } 178 | } else { 179 | return None; 180 | } 181 | } 182 | } 183 | } 184 | 185 | pub fn clear(&mut self, idx: usize) { 186 | match self.window[idx] { 187 | WindowEntry::Open => { 188 | } 189 | WindowEntry::Closed(ref alloc_size) => { 190 | self.alloc -= alloc_size; 191 | } 192 | WindowEntry::Active(ref entry) => { 193 | self.alloc -= entry.alloc_size; 194 | } 195 | } 196 | 197 | self.window[idx] = WindowEntry::Open; 198 | } 199 | } 200 | 201 | #[cfg(test)] 202 | mod tests { 203 | use super::*; 204 | 205 | use frame::Datagram; 206 | 207 | #[test] 208 | fn single_packet_single_fragment() { 209 | let packet_size = 100; 210 | 211 | let mut window = AssemblyWindow::new(10000); 212 | 213 | let packet_data = (0..packet_size).map(|i| i as u8).collect::>().into_boxed_slice(); 214 | 215 | let result = window.try_add(0, Datagram { 216 | sequence_id: 0, 217 | channel_id: 0, 218 | window_parent_lead: 0, 219 | channel_parent_lead: 0, 220 | fragment_id: 0, 221 | fragment_id_last: 0, 222 | data: packet_data.clone(), 223 | }); 224 | 225 | assert_eq!(result.unwrap(), Packet { 226 | sequence_id: 0, 227 | channel_id: 0, 228 | window_parent_lead: 0, 229 | channel_parent_lead: 0, 230 | data: Some(packet_data), 231 | }); 232 | } 233 | 234 | #[test] 235 | fn single_packet_multi_fragment() { 236 | let mut window = AssemblyWindow::new(10000); 237 | 238 | let packet_data = (0..MAX_FRAGMENT_SIZE*5).map(|i| i as u8).collect::>().into_boxed_slice(); 239 | 240 | for i in 0 .. 5 { 241 | let fragment_data = packet_data[i * MAX_FRAGMENT_SIZE .. (i + 1) * MAX_FRAGMENT_SIZE].into(); 242 | 243 | let result = window.try_add(0, Datagram { 244 | sequence_id: 0, 245 | channel_id: 0, 246 | window_parent_lead: 0, 247 | channel_parent_lead: 0, 248 | fragment_id: i as u16, 249 | fragment_id_last: 4, 250 | data: fragment_data, 251 | }); 252 | 253 | if i == 4 { 254 | assert_eq!(result.unwrap(), Packet { 255 | sequence_id: 0, 256 | channel_id: 0, 257 | window_parent_lead: 0, 258 | channel_parent_lead: 0, 259 | data: Some(packet_data), 260 | }); 261 | 262 | break; 263 | } else { 264 | assert_eq!(result, None); 265 | } 266 | } 267 | } 268 | 269 | #[test] 270 | fn alloc_exceeded() { 271 | let packet_0_size = 100; 272 | 273 | let mut window = AssemblyWindow::new(packet_0_size); 274 | 275 | let packet_0_data = (0..packet_0_size).map(|i| i as u8).collect::>().into_boxed_slice(); 276 | let packet_2_data = (0..MAX_FRAGMENT_SIZE*2).map(|i| i as u8).collect::>().into_boxed_slice(); 277 | 278 | // Receive a packet which maxes out the allocation counter 279 | let result = window.try_add(0, Datagram { 280 | sequence_id: 0, 281 | channel_id: 0, 282 | window_parent_lead: 0, 283 | channel_parent_lead: 0, 284 | fragment_id: 0, 285 | fragment_id_last: 0, 286 | data: packet_0_data.clone(), 287 | }); 288 | 289 | assert_eq!(result.unwrap(), Packet { 290 | sequence_id: 0, 291 | channel_id: 0, 292 | window_parent_lead: 0, 293 | channel_parent_lead: 0, 294 | data: Some(packet_0_data.clone()), 295 | }); 296 | 297 | // A zero length packet should not trip the memory allocation counter 298 | let result = window.try_add(1, Datagram { 299 | sequence_id: 1, 300 | channel_id: 0, 301 | window_parent_lead: 0, 302 | channel_parent_lead: 0, 303 | fragment_id: 0, 304 | fragment_id_last: 0, 305 | data: vec![].into_boxed_slice(), 306 | }); 307 | 308 | assert_eq!(result.unwrap(), Packet { 309 | sequence_id: 1, 310 | channel_id: 0, 311 | window_parent_lead: 0, 312 | channel_parent_lead: 0, 313 | data: Some(vec![].into_boxed_slice()), 314 | }); 315 | 316 | // A nonzero length packet should produce a dud on first fragment, and no response to 317 | // remaining fragments. 318 | let result = window.try_add(2, Datagram { 319 | sequence_id: 2, 320 | channel_id: 0, 321 | window_parent_lead: 0, 322 | channel_parent_lead: 0, 323 | fragment_id: 0, 324 | fragment_id_last: 1, 325 | data: packet_2_data[ .. MAX_FRAGMENT_SIZE].into(), 326 | }); 327 | 328 | assert_eq!(result.unwrap(), Packet { 329 | sequence_id: 2, 330 | channel_id: 0, 331 | window_parent_lead: 0, 332 | channel_parent_lead: 0, 333 | data: None, 334 | }); 335 | 336 | let result = window.try_add(2, Datagram { 337 | sequence_id: 2, 338 | channel_id: 0, 339 | window_parent_lead: 0, 340 | channel_parent_lead: 0, 341 | fragment_id: 1, 342 | fragment_id_last: 1, 343 | data: packet_2_data[MAX_FRAGMENT_SIZE .. ].into(), 344 | }); 345 | 346 | assert_eq!(result, None); 347 | 348 | // Clearing entries should allow future packets to be received correctly 349 | window.clear(0); 350 | window.clear(1); 351 | window.clear(2); 352 | 353 | let result = window.try_add(3, Datagram { 354 | sequence_id: 3, 355 | channel_id: 0, 356 | window_parent_lead: 0, 357 | channel_parent_lead: 0, 358 | fragment_id: 0, 359 | fragment_id_last: 0, 360 | data: packet_0_data.clone(), 361 | }); 362 | 363 | assert_eq!(result.unwrap(), Packet { 364 | sequence_id: 3, 365 | channel_id: 0, 366 | window_parent_lead: 0, 367 | channel_parent_lead: 0, 368 | data: Some(packet_0_data.clone()), 369 | }); 370 | } 371 | 372 | #[test] 373 | fn alloc_counter() { 374 | use super::packet_alloc_size; 375 | use std::collections::VecDeque; 376 | 377 | let packet_num = 100; 378 | 379 | let mut window = AssemblyWindow::new(packet_num*4*MAX_FRAGMENT_SIZE); 380 | 381 | let mut alloc_sum = 0; 382 | 383 | let mut alloc_sizes = VecDeque::new(); 384 | 385 | for i in 0 .. 100 { 386 | let packet_size = if rand::random() { 387 | rand::random::() % MAX_FRAGMENT_SIZE 388 | } else { 389 | (rand::random::() % (3*MAX_FRAGMENT_SIZE)) + MAX_FRAGMENT_SIZE 390 | }; 391 | 392 | let fragment_0_data = (0..packet_size.min(MAX_FRAGMENT_SIZE)).map(|i| i as u8).collect::>().into_boxed_slice(); 393 | let num_fragments = (packet_size + MAX_FRAGMENT_SIZE - 1) / MAX_FRAGMENT_SIZE + (packet_size == 0) as usize; 394 | 395 | let datagram_0 = Datagram { 396 | sequence_id: i, 397 | channel_id: 0, 398 | window_parent_lead: 0, 399 | channel_parent_lead: 0, 400 | fragment_id: 0, 401 | fragment_id_last: (num_fragments - 1) as u16, 402 | data: fragment_0_data, 403 | }; 404 | 405 | let alloc_size = packet_alloc_size(&datagram_0); 406 | alloc_sum += alloc_size; 407 | alloc_sizes.push_back(alloc_size); 408 | 409 | window.try_add(i as usize, datagram_0); 410 | 411 | assert_eq!(window.alloc, alloc_sum); 412 | } 413 | 414 | for i in 0 .. 100 { 415 | alloc_sizes.pop_front(); 416 | 417 | window.clear(i); 418 | 419 | assert_eq!(window.alloc, alloc_sizes.iter().sum()); 420 | } 421 | } 422 | 423 | /* 424 | #[test] 425 | fn invalid_datagrams() { 426 | use super::MAX_PACKET_WINDOW_SIZE; 427 | 428 | let mut window = AssemblyWindow::new(10000); 429 | 430 | let packet_size = 100; 431 | let packet_data = (0..packet_size).map(|i| i as u8).collect::>().into_boxed_slice(); 432 | 433 | // Beyond receive window 434 | let result = window.try_add(0, Datagram { 435 | sequence_id: MAX_PACKET_WINDOW_SIZE, 436 | channel_id: 0, 437 | window_parent_lead: 0, 438 | channel_parent_lead: 0, 439 | fragment_id: 0, 440 | fragment_id_last: 0, 441 | data: packet_data.clone(), 442 | }); 443 | 444 | assert_eq!(result, None); 445 | 446 | // Window parent behind channel parent 447 | let result = window.try_add(Datagram { 448 | sequence_id: 0, 449 | channel_id: 0, 450 | window_parent_lead: 2, 451 | channel_parent_lead: 1, 452 | fragment_id: 0, 453 | fragment_id_last: 0, 454 | data: packet_data.clone(), 455 | }); 456 | 457 | assert_eq!(result, None); 458 | 459 | // Window parent unset, channel parent set 460 | let result = window.try_add(Datagram { 461 | sequence_id: 0, 462 | channel_id: 0, 463 | window_parent_lead: 0, 464 | channel_parent_lead: 1, 465 | fragment_id: 0, 466 | fragment_id_last: 0, 467 | data: packet_data.clone(), 468 | }); 469 | 470 | assert_eq!(result, None); 471 | 472 | // Bad fragment id 473 | let result = window.try_add(Datagram { 474 | sequence_id: 0, 475 | channel_id: 0, 476 | window_parent_lead: 0, 477 | channel_parent_lead: 0, 478 | fragment_id: 1, 479 | fragment_id_last: 0, 480 | data: packet_data.clone(), 481 | }); 482 | 483 | assert_eq!(result, None); 484 | 485 | let packet_data = (0..MAX_FRAGMENT_SIZE + MAX_FRAGMENT_SIZE/2).map(|i| i as u8).collect::>().into_boxed_slice(); 486 | 487 | // Last fragment too large 488 | let result = window.try_add(Datagram { 489 | sequence_id: 0, 490 | channel_id: 0, 491 | window_parent_lead: 0, 492 | channel_parent_lead: 0, 493 | fragment_id: 1, 494 | fragment_id_last: 1, 495 | data: packet_data.clone(), 496 | }); 497 | 498 | assert_eq!(result, None); 499 | 500 | // OK last fragment 501 | let result = window.try_add(Datagram { 502 | sequence_id: 0, 503 | channel_id: 0, 504 | window_parent_lead: 0, 505 | channel_parent_lead: 0, 506 | fragment_id: 1, 507 | fragment_id_last: 1, 508 | data: packet_data[MAX_FRAGMENT_SIZE .. ].into(), 509 | }); 510 | 511 | assert_eq!(result, None); 512 | 513 | // Bad fragment id 514 | let result = window.try_add(Datagram { 515 | sequence_id: 0, 516 | channel_id: 0, 517 | window_parent_lead: 0, 518 | channel_parent_lead: 0, 519 | fragment_id: 2, 520 | fragment_id_last: 1, 521 | data: packet_data[ .. MAX_FRAGMENT_SIZE].into(), 522 | }); 523 | 524 | assert_eq!(result, None); 525 | 526 | // Bad fragment size 527 | let result = window.try_add(Datagram { 528 | sequence_id: 0, 529 | channel_id: 0, 530 | window_parent_lead: 0, 531 | channel_parent_lead: 0, 532 | fragment_id: 0, 533 | fragment_id_last: 1, 534 | data: packet_data[ .. MAX_FRAGMENT_SIZE-1].into(), 535 | }); 536 | 537 | assert_eq!(result, None); 538 | 539 | // Successful receipt 540 | let result = window.try_add(Datagram { 541 | sequence_id: 0, 542 | channel_id: 0, 543 | window_parent_lead: 0, 544 | channel_parent_lead: 0, 545 | fragment_id: 0, 546 | fragment_id_last: 1, 547 | data: packet_data[ .. MAX_FRAGMENT_SIZE].into(), 548 | }); 549 | 550 | assert_eq!(result.unwrap(), Packet { 551 | sequence_id: 0, 552 | channel_id: 0, 553 | window_parent_lead: 0, 554 | channel_parent_lead: 0, 555 | data: Some(packet_data), 556 | }); 557 | } 558 | */ 559 | 560 | #[test] 561 | fn inconsistent_datagrams() { 562 | let mut window = AssemblyWindow::new(10000); 563 | 564 | let packet_data = (0..2*MAX_FRAGMENT_SIZE).map(|i| i as u8).collect::>().into_boxed_slice(); 565 | 566 | // Initial fragment 567 | let result = window.try_add(0, Datagram { 568 | sequence_id: 0, 569 | channel_id: 0, 570 | window_parent_lead: 0, 571 | channel_parent_lead: 0, 572 | fragment_id: 0, 573 | fragment_id_last: 1, 574 | data: packet_data[ .. MAX_FRAGMENT_SIZE].into(), 575 | }); 576 | 577 | assert_eq!(result, None); 578 | 579 | // Duplicate fragment 580 | let result = window.try_add(0, Datagram { 581 | sequence_id: 0, 582 | channel_id: 0, 583 | window_parent_lead: 0, 584 | channel_parent_lead: 0, 585 | fragment_id: 0, 586 | fragment_id_last: 1, 587 | data: packet_data[ .. MAX_FRAGMENT_SIZE].into(), 588 | }); 589 | 590 | assert_eq!(result, None); 591 | 592 | // Different channel ID 593 | let result = window.try_add(0, Datagram { 594 | sequence_id: 0, 595 | channel_id: 1, 596 | window_parent_lead: 0, 597 | channel_parent_lead: 0, 598 | fragment_id: 1, 599 | fragment_id_last: 1, 600 | data: packet_data[MAX_FRAGMENT_SIZE .. ].into(), 601 | }); 602 | 603 | assert_eq!(result, None); 604 | 605 | // Different window parent 606 | let result = window.try_add(0, Datagram { 607 | sequence_id: 0, 608 | channel_id: 0, 609 | window_parent_lead: 1, 610 | channel_parent_lead: 0, 611 | fragment_id: 1, 612 | fragment_id_last: 1, 613 | data: packet_data[MAX_FRAGMENT_SIZE .. ].into(), 614 | }); 615 | 616 | assert_eq!(result, None); 617 | 618 | // Different channel parent 619 | let result = window.try_add(0, Datagram { 620 | sequence_id: 0, 621 | channel_id: 0, 622 | window_parent_lead: 0, 623 | channel_parent_lead: 1, 624 | fragment_id: 1, 625 | fragment_id_last: 1, 626 | data: packet_data[MAX_FRAGMENT_SIZE .. ].into(), 627 | }); 628 | 629 | assert_eq!(result, None); 630 | 631 | // Different fragment count 632 | let result = window.try_add(0, Datagram { 633 | sequence_id: 0, 634 | channel_id: 0, 635 | window_parent_lead: 0, 636 | channel_parent_lead: 0, 637 | fragment_id: 1, 638 | fragment_id_last: 2, 639 | data: packet_data[MAX_FRAGMENT_SIZE .. ].into(), 640 | }); 641 | 642 | assert_eq!(result, None); 643 | 644 | // Successful receipt 645 | let result = window.try_add(0, Datagram { 646 | sequence_id: 0, 647 | channel_id: 0, 648 | window_parent_lead: 0, 649 | channel_parent_lead: 0, 650 | fragment_id: 1, 651 | fragment_id_last: 1, 652 | data: packet_data[MAX_FRAGMENT_SIZE .. ].into(), 653 | }); 654 | 655 | assert_eq!(result.unwrap(), Packet { 656 | sequence_id: 0, 657 | channel_id: 0, 658 | window_parent_lead: 0, 659 | channel_parent_lead: 0, 660 | data: Some(packet_data), 661 | }); 662 | } 663 | } 664 | 665 | -------------------------------------------------------------------------------- /src/half_connection/packet_sender.rs: -------------------------------------------------------------------------------- 1 | 2 | use super::pending_packet::{PendingPacket, PendingPacketRc}; 3 | 4 | use crate::CHANNEL_COUNT; 5 | use crate::MAX_FRAGMENT_SIZE; 6 | use crate::MAX_PACKET_SIZE; 7 | use crate::MAX_PACKET_WINDOW_SIZE; 8 | use crate::packet_id; 9 | use crate::SendMode; 10 | 11 | use std::collections::VecDeque; 12 | use std::cell::RefCell; 13 | use std::rc::Rc; 14 | 15 | // Size of the buffer that the receiver will allocate for a packet in bytes 16 | fn alloc_size(packet_size: usize) -> usize { 17 | if packet_size > MAX_FRAGMENT_SIZE { 18 | ((packet_size + MAX_FRAGMENT_SIZE - 1) / MAX_FRAGMENT_SIZE) * MAX_FRAGMENT_SIZE 19 | } else { 20 | packet_size 21 | } 22 | } 23 | 24 | struct WindowEntry { 25 | // The packet to be sent in this slot 26 | packet: PendingPacketRc, 27 | // How many allocation points this packet is worth 28 | alloc_size: usize, 29 | // Channel this packet was sent on 30 | channel_id: u8, 31 | } 32 | 33 | struct Channel { 34 | parent_id: Option, 35 | } 36 | 37 | impl Channel { 38 | fn new() -> Self { 39 | Self { 40 | parent_id: None, 41 | } 42 | } 43 | } 44 | 45 | #[derive(Debug)] 46 | struct PacketSendEntry { 47 | data: Box<[u8]>, 48 | channel_id: u8, 49 | mode: SendMode, 50 | flush_id: u32, 51 | } 52 | 53 | impl PacketSendEntry { 54 | fn new(data: Box<[u8]>, channel_id: u8, mode: SendMode, flush_id: u32) -> Self { 55 | Self { 56 | data, 57 | channel_id, 58 | mode, 59 | flush_id, 60 | } 61 | } 62 | } 63 | 64 | macro_rules! window_index { 65 | ($self:ident, $sequence_id:expr) => { 66 | ($sequence_id & $self.window_mask) as usize 67 | }; 68 | } 69 | 70 | pub struct PacketSender { 71 | packet_send_queue: VecDeque, 72 | 73 | base_id: u32, 74 | next_id: u32, 75 | window: Box<[Option]>, 76 | window_size: u32, 77 | window_mask: u32, 78 | 79 | window_parent_id: Option, 80 | channels: Box<[Channel]>, 81 | 82 | max_alloc: usize, 83 | alloc: usize, 84 | 85 | total_size: usize, 86 | } 87 | 88 | impl PacketSender { 89 | pub fn new(window_size: u32, base_id: u32, max_alloc: usize) -> Self { 90 | debug_assert!(window_size > 0); 91 | debug_assert!(window_size <= MAX_PACKET_WINDOW_SIZE); 92 | debug_assert!(window_size & (window_size - 1) == 0); 93 | 94 | debug_assert!(packet_id::is_valid(base_id)); 95 | 96 | let window: Vec> = (0 .. window_size).map(|_| None).collect(); 97 | 98 | let channels: Vec = (0 .. CHANNEL_COUNT).map(|_| Channel::new()).collect(); 99 | 100 | let max_alloc_ceil = ((max_alloc + MAX_FRAGMENT_SIZE - 1) / MAX_FRAGMENT_SIZE) * MAX_FRAGMENT_SIZE; 101 | 102 | Self { 103 | packet_send_queue: VecDeque::new(), 104 | 105 | base_id: base_id, 106 | next_id: base_id, 107 | window: window.into_boxed_slice(), 108 | window_size: window_size, 109 | window_mask: window_size - 1, 110 | 111 | window_parent_id: None, 112 | channels: channels.into_boxed_slice(), 113 | 114 | max_alloc: max_alloc_ceil, 115 | alloc: 0, 116 | 117 | total_size: 0, 118 | } 119 | } 120 | 121 | pub fn pending_count(&self) -> usize { 122 | self.packet_send_queue.len() 123 | } 124 | 125 | pub fn total_size(&self) -> usize { 126 | self.total_size 127 | } 128 | 129 | pub fn next_id(&self) -> u32 { 130 | self.next_id 131 | } 132 | 133 | pub fn base_id(&self) -> u32 { 134 | self.base_id 135 | } 136 | 137 | // Places a user packet on the send queue. 138 | pub fn enqueue_packet(&mut self, data: Box<[u8]>, channel_id: u8, mode: SendMode, flush_id: u32) { 139 | debug_assert!(data.len() <= MAX_PACKET_SIZE); 140 | debug_assert!(data.len() <= self.max_alloc); 141 | debug_assert!((channel_id as usize) < CHANNEL_COUNT); 142 | 143 | self.total_size += data.len(); 144 | self.packet_send_queue.push_back(PacketSendEntry::new(data, channel_id, mode, flush_id)); 145 | } 146 | 147 | // Pulls a single packet from the send queue, respecting both the maximum allocation limit, and 148 | // the maximum transfer window. 149 | pub fn emit_packet(&mut self, flush_id: u32) -> Option<(PendingPacketRc, bool)> { 150 | while let Some(packet) = self.packet_send_queue.front() { 151 | match packet.mode { 152 | SendMode::TimeSensitive => { 153 | if packet.flush_id != flush_id { 154 | self.total_size -= packet.data.len(); 155 | self.packet_send_queue.pop_front(); 156 | } else { 157 | break; 158 | } 159 | } 160 | _ => break 161 | } 162 | } 163 | 164 | if let Some(packet) = self.packet_send_queue.front() { 165 | if packet_id::sub(self.next_id, self.base_id) >= self.window_size { 166 | return None; 167 | } 168 | 169 | let packet_alloc_size = alloc_size(packet.data.len()); 170 | 171 | if self.alloc + packet_alloc_size > self.max_alloc { 172 | return None; 173 | } 174 | 175 | let packet = self.packet_send_queue.pop_front().unwrap(); 176 | 177 | let sequence_id = self.next_id; 178 | let ref mut channel = self.channels[packet.channel_id as usize]; 179 | 180 | let window_parent_lead = 181 | if let Some(parent_id) = self.window_parent_id { 182 | let lead = packet_id::sub(sequence_id, parent_id); 183 | debug_assert!(lead <= u16::MAX as u32); 184 | lead as u16 185 | } else { 186 | 0 187 | }; 188 | 189 | let channel_parent_lead = 190 | if let Some(parent_id) = channel.parent_id { 191 | let lead = packet_id::sub(sequence_id, parent_id); 192 | debug_assert!(lead <= u16::MAX as u32); 193 | lead as u16 194 | } else { 195 | 0 196 | }; 197 | 198 | let pending_packet = Rc::new(RefCell::new(PendingPacket::new(packet.data, 199 | packet.channel_id, 200 | sequence_id, 201 | window_parent_lead, 202 | channel_parent_lead))); 203 | 204 | let pending_packet_clone = Rc::clone(&pending_packet); 205 | 206 | let window_idx = window_index!(self, sequence_id); 207 | 208 | debug_assert!(self.window[window_idx].is_none()); 209 | self.window[window_idx] = Some(WindowEntry { 210 | packet: pending_packet, 211 | alloc_size: packet_alloc_size, 212 | channel_id: packet.channel_id 213 | }); 214 | 215 | self.next_id = packet_id::add(self.next_id, 1); 216 | 217 | self.alloc += packet_alloc_size; 218 | 219 | match packet.mode { 220 | SendMode::Reliable => { 221 | self.window_parent_id = Some(sequence_id); 222 | channel.parent_id = Some(sequence_id); 223 | } 224 | _ => () 225 | } 226 | 227 | let resend = match packet.mode { 228 | SendMode::TimeSensitive => false, 229 | SendMode::Unreliable => false, 230 | SendMode::Persistent => true, 231 | SendMode::Reliable => true, 232 | }; 233 | 234 | return Some((pending_packet_clone, resend)); 235 | } 236 | 237 | return None; 238 | } 239 | 240 | // Responds to a receive window acknowledgement. All packet data beyond the new receive window 241 | // is forgotten, thereby freeing transfer window & allocation space for new packets. 242 | pub fn acknowledge(&mut self, receiver_base_id: u32) { 243 | let receiver_delta = packet_id::sub(receiver_base_id, self.base_id); 244 | let span = packet_id::sub(self.next_id, self.base_id); 245 | 246 | if receiver_delta > span { 247 | return; 248 | } 249 | 250 | while self.base_id != receiver_base_id { 251 | let window_idx = window_index!(self, self.base_id); 252 | let ref mut entry = self.window[window_idx].as_ref().unwrap(); 253 | 254 | let ref mut channel = self.channels[entry.channel_id as usize]; 255 | 256 | if let Some(parent_id) = self.window_parent_id { 257 | if parent_id == self.base_id { 258 | self.window_parent_id = None; 259 | } 260 | } 261 | 262 | if let Some(parent_id) = channel.parent_id { 263 | if parent_id == self.base_id { 264 | channel.parent_id = None; 265 | } 266 | } 267 | 268 | self.alloc -= entry.alloc_size; 269 | self.total_size -= entry.packet.borrow().size(); 270 | 271 | self.window[window_idx] = None; 272 | 273 | self.base_id = packet_id::add(self.base_id, 1); 274 | } 275 | } 276 | } 277 | 278 | #[cfg(test)] 279 | mod tests { 280 | use super::*; 281 | 282 | fn new_packet_data(sequence_id: u32) -> Box<[u8]> { 283 | sequence_id.to_be_bytes().into() 284 | } 285 | 286 | fn packet_info(emit_result: (PendingPacketRc, bool)) -> (u32, u8, u16, u16, bool) { 287 | let packet_ref = emit_result.0.borrow(); 288 | (packet_ref.sequence_id(), 289 | packet_ref.channel_id(), 290 | packet_ref.window_parent_lead(), 291 | packet_ref.channel_parent_lead(), 292 | emit_result.1) 293 | } 294 | 295 | #[test] 296 | fn alloc_size_correct() { 297 | assert_eq!(alloc_size(0), 0); 298 | assert_eq!(alloc_size(1), 1); 299 | assert_eq!(alloc_size( MAX_FRAGMENT_SIZE-1), MAX_FRAGMENT_SIZE-1); 300 | assert_eq!(alloc_size( MAX_FRAGMENT_SIZE ), MAX_FRAGMENT_SIZE); 301 | assert_eq!(alloc_size( MAX_FRAGMENT_SIZE+1), 2*MAX_FRAGMENT_SIZE); 302 | assert_eq!(alloc_size(2*MAX_FRAGMENT_SIZE-1), 2*MAX_FRAGMENT_SIZE); 303 | assert_eq!(alloc_size(2*MAX_FRAGMENT_SIZE ), 2*MAX_FRAGMENT_SIZE); 304 | assert_eq!(alloc_size(2*MAX_FRAGMENT_SIZE+1), 3*MAX_FRAGMENT_SIZE); 305 | } 306 | 307 | #[test] 308 | fn basic() { 309 | let mut tx = PacketSender::new(MAX_PACKET_WINDOW_SIZE, 0, 10000); 310 | 311 | tx.enqueue_packet(new_packet_data(0), 0, SendMode::TimeSensitive, 0); 312 | tx.enqueue_packet(new_packet_data(1), 0, SendMode::Unreliable, 0); 313 | tx.enqueue_packet(new_packet_data(2), 0, SendMode::Persistent, 0); 314 | tx.enqueue_packet(new_packet_data(3), 0, SendMode::Reliable, 0); 315 | 316 | assert_eq!(packet_info(tx.emit_packet(0).unwrap()), (0, 0, 0, 0, false)); 317 | assert_eq!(packet_info(tx.emit_packet(0).unwrap()), (1, 0, 0, 0, false)); 318 | assert_eq!(packet_info(tx.emit_packet(0).unwrap()), (2, 0, 0, 0, true)); 319 | assert_eq!(packet_info(tx.emit_packet(0).unwrap()), (3, 0, 0, 0, true)); 320 | 321 | assert!(tx.emit_packet(0).is_none()); 322 | } 323 | 324 | #[test] 325 | fn parent_leads() { 326 | /* 327 | 7 # # 328 | 6 # # 329 | 5 O O 330 | 4 O O 331 | 3 # # 332 | 2 O O 333 | 1 # # 334 | 0 O O 335 | w c0 c1 336 | */ 337 | 338 | let mut tx = PacketSender::new(MAX_PACKET_WINDOW_SIZE, 0, 10000); 339 | 340 | tx.enqueue_packet(new_packet_data(0), 1, SendMode::Unreliable, 0); 341 | tx.enqueue_packet(new_packet_data(1), 1, SendMode::Reliable, 0); 342 | tx.enqueue_packet(new_packet_data(2), 1, SendMode::Unreliable, 0); 343 | 344 | tx.enqueue_packet(new_packet_data(3), 0, SendMode::Reliable, 0); 345 | tx.enqueue_packet(new_packet_data(4), 0, SendMode::Unreliable, 0); 346 | tx.enqueue_packet(new_packet_data(5), 0, SendMode::Unreliable, 0); 347 | tx.enqueue_packet(new_packet_data(6), 0, SendMode::Reliable, 0); 348 | 349 | tx.enqueue_packet(new_packet_data(7), 1, SendMode::Reliable, 0); 350 | 351 | assert_eq!(packet_info(tx.emit_packet(0).unwrap()), (0, 1, 0, 0, false)); 352 | assert_eq!(packet_info(tx.emit_packet(0).unwrap()), (1, 1, 0, 0, true)); 353 | assert_eq!(packet_info(tx.emit_packet(0).unwrap()), (2, 1, 1, 1, false)); 354 | 355 | assert_eq!(packet_info(tx.emit_packet(0).unwrap()), (3, 0, 2, 0, true)); 356 | assert_eq!(packet_info(tx.emit_packet(0).unwrap()), (4, 0, 1, 1, false)); 357 | assert_eq!(packet_info(tx.emit_packet(0).unwrap()), (5, 0, 2, 2, false)); 358 | assert_eq!(packet_info(tx.emit_packet(0).unwrap()), (6, 0, 3, 3, true)); 359 | 360 | assert_eq!(packet_info(tx.emit_packet(0).unwrap()), (7, 1, 1, 6, true)); 361 | 362 | assert!(tx.emit_packet(0).is_none()); 363 | } 364 | 365 | #[test] 366 | fn parent_leads_acknowledgement() { 367 | /* 368 | 6 # # 369 | 5 # # 370 | 4 O O 371 | 3 # # 372 | 2 O O 373 | 1 # # 374 | 0 O O 375 | w c0 c1 376 | */ 377 | 378 | let mut tx = PacketSender::new(MAX_PACKET_WINDOW_SIZE, 0, 10000); 379 | 380 | let mut flush_id = 0; 381 | 382 | for i in 0 .. MAX_PACKET_WINDOW_SIZE { 383 | let ref_id = i*7; 384 | 385 | tx.acknowledge(ref_id); 386 | 387 | tx.enqueue_packet(new_packet_data(ref_id + 0), 1, SendMode::Unreliable, flush_id); 388 | tx.enqueue_packet(new_packet_data(ref_id + 1), 1, SendMode::Reliable, flush_id); 389 | tx.enqueue_packet(new_packet_data(ref_id + 2), 1, SendMode::Unreliable, flush_id); 390 | 391 | tx.enqueue_packet(new_packet_data(ref_id + 3), 0, SendMode::Reliable, flush_id); 392 | tx.enqueue_packet(new_packet_data(ref_id + 4), 0, SendMode::Unreliable, flush_id); 393 | tx.enqueue_packet(new_packet_data(ref_id + 5), 0, SendMode::Reliable, flush_id); 394 | 395 | tx.enqueue_packet(new_packet_data(ref_id + 6), 1, SendMode::Reliable, flush_id); 396 | 397 | assert_eq!(packet_info(tx.emit_packet(0).unwrap()), (ref_id + 0, 1, 0, 0, false)); 398 | assert_eq!(packet_info(tx.emit_packet(0).unwrap()), (ref_id + 1, 1, 0, 0, true)); 399 | assert_eq!(packet_info(tx.emit_packet(0).unwrap()), (ref_id + 2, 1, 1, 1, false)); 400 | 401 | assert_eq!(packet_info(tx.emit_packet(0).unwrap()), (ref_id + 3, 0, 2, 0, true)); 402 | assert_eq!(packet_info(tx.emit_packet(0).unwrap()), (ref_id + 4, 0, 1, 1, false)); 403 | assert_eq!(packet_info(tx.emit_packet(0).unwrap()), (ref_id + 5, 0, 2, 2, true)); 404 | 405 | assert_eq!(packet_info(tx.emit_packet(0).unwrap()), (ref_id + 6, 1, 1, 5, true)); 406 | 407 | assert!(tx.emit_packet(0).is_none()); 408 | 409 | flush_id = packet_id::add(flush_id, 1); 410 | } 411 | } 412 | 413 | /* 414 | #[test] 415 | fn fragment_emission() { 416 | use super::emit_fragments; 417 | use super::PacketSendEntry; 418 | 419 | let mut sink = TestDatagramSink::new(); 420 | 421 | emit_fragments(PacketSendEntry::new(Box::new([]), 0, SendMode::Unreliable, 0), 0, 0, 0, &mut sink); 422 | 423 | assert_eq!(sink.pop(), (frame::Datagram { 424 | sequence_id: 0, 425 | channel_id: 0, 426 | window_parent_lead: 0, 427 | channel_parent_lead: 0, 428 | fragment_id: frame::FragmentId { id: 0, last: 0 }, 429 | data: Box::new([]), 430 | }, false)); 431 | 432 | assert!(sink.is_empty()); 433 | 434 | let packet_data = (0..MAX_FRAGMENT_SIZE).map(|v| v as u8).collect::>().into_boxed_slice(); 435 | 436 | emit_fragments(PacketSendEntry::new(packet_data.clone(), 0, SendMode::Unreliable, 0), 0, 0, 0, &mut sink); 437 | 438 | assert_eq!(sink.pop(), (frame::Datagram { 439 | sequence_id: 0, 440 | channel_id: 0, 441 | window_parent_lead: 0, 442 | channel_parent_lead: 0, 443 | fragment_id: frame::FragmentId { id: 0, last: 0 }, 444 | data: packet_data, 445 | }, false)); 446 | 447 | assert!(sink.is_empty()); 448 | 449 | let packet_data = (0..MAX_FRAGMENT_SIZE+1).map(|v| v as u8).collect::>().into_boxed_slice(); 450 | 451 | emit_fragments(PacketSendEntry::new(packet_data.clone(), 0, SendMode::Unreliable, 0), 0, 0, 0, &mut sink); 452 | 453 | assert_eq!(sink.pop(), (frame::Datagram { 454 | sequence_id: 0, 455 | channel_id: 0, 456 | window_parent_lead: 0, 457 | channel_parent_lead: 0, 458 | fragment_id: frame::FragmentId { id: 0, last: 1 }, 459 | data: packet_data[0..MAX_FRAGMENT_SIZE].into(), 460 | }, false)); 461 | 462 | assert_eq!(sink.pop(), (frame::Datagram { 463 | sequence_id: 0, 464 | channel_id: 0, 465 | window_parent_lead: 0, 466 | channel_parent_lead: 0, 467 | fragment_id: frame::FragmentId { id: 1, last: 1 }, 468 | data: packet_data[MAX_FRAGMENT_SIZE..MAX_FRAGMENT_SIZE+1].into(), 469 | }, false)); 470 | 471 | assert!(sink.is_empty()); 472 | } 473 | */ 474 | 475 | // TODO: Test transfer window limit 476 | // TODO: Test allocation limit 477 | // TODO: Test allocation size tracking 478 | } 479 | 480 | -------------------------------------------------------------------------------- /src/half_connection/packet_tests.rs: -------------------------------------------------------------------------------- 1 | 2 | use super::packet_sender; 3 | use super::packet_receiver; 4 | 5 | use crate::MAX_PACKET_WINDOW_SIZE; 6 | use crate::SendMode; 7 | use crate::frame; 8 | use crate::packet_id; 9 | 10 | use std::collections::VecDeque; 11 | 12 | struct TestDatagramSink { 13 | pub datagrams: VecDeque, 14 | } 15 | 16 | impl TestDatagramSink { 17 | pub fn new() -> Self { 18 | Self { 19 | datagrams: VecDeque::new(), 20 | } 21 | } 22 | 23 | pub fn pull(&mut self, sender: &mut packet_sender::PacketSender, flush_id: u32) { 24 | while let Some((pending_packet_rc, _)) = sender.emit_packet(flush_id) { 25 | let pending_packet_ref = std::cell::RefCell::borrow(&pending_packet_rc); 26 | let last_fragment_id = pending_packet_ref.last_fragment_id(); 27 | 28 | for i in 0 ..= last_fragment_id { 29 | self.datagrams.push_back(pending_packet_ref.datagram(i).into()); 30 | } 31 | } 32 | } 33 | } 34 | 35 | struct TestPacketSink { 36 | pub packets: VecDeque>, 37 | } 38 | 39 | impl TestPacketSink { 40 | pub fn new() -> Self { 41 | Self { 42 | packets: VecDeque::new(), 43 | } 44 | } 45 | } 46 | 47 | impl super::PacketSink for TestPacketSink { 48 | fn send(&mut self, packet: Box<[u8]>) { 49 | self.packets.push_back(packet); 50 | } 51 | } 52 | 53 | fn random_packet_data(size: usize) -> Box<[u8]> { 54 | (0 .. size).map(|_| rand::random::()).collect::>().into_boxed_slice() 55 | } 56 | 57 | fn random_packet_data_with_id(id: u32, size: usize) -> Box<[u8]> { 58 | let mut data = (0 .. size.max(4)).map(|_| rand::random::()).collect::>().into_boxed_slice(); 59 | data[0..4].clone_from_slice(&id.to_be_bytes()); 60 | return data; 61 | } 62 | 63 | /// Ensures that packets of various sizes can be fragmented and reassembled correctly. 64 | #[test] 65 | fn random_transfer() { 66 | use crate::CHANNEL_COUNT; 67 | 68 | const NUM_PACKETS: usize = 1024; 69 | const MAX_PACKET_SIZE: usize = 5000; 70 | const MAX_ALLOC_SIZE: usize = MAX_PACKET_SIZE*NUM_PACKETS; 71 | const WINDOW_SIZE: u32 = 1024; 72 | 73 | let base_id = packet_id::sub(0u32, NUM_PACKETS as u32/2); 74 | 75 | let mut sender = packet_sender::PacketSender::new(WINDOW_SIZE, base_id, MAX_ALLOC_SIZE); 76 | let mut receiver = packet_receiver::PacketReceiver::new(WINDOW_SIZE, base_id, MAX_ALLOC_SIZE); 77 | 78 | let mut sent_packet_ids = [0u32; CHANNEL_COUNT]; 79 | let mut sent_packets = VecDeque::new(); 80 | 81 | for _ in 0 .. NUM_PACKETS { 82 | let channel_id = rand::random::() % CHANNEL_COUNT as u8; 83 | let ref mut packet_id = sent_packet_ids[channel_id as usize]; 84 | 85 | let size = rand::random::() % MAX_PACKET_SIZE; 86 | 87 | sent_packets.push_back((random_packet_data_with_id(*packet_id, size), channel_id)); 88 | *packet_id += 1; 89 | } 90 | 91 | for (packet, channel_id) in sent_packets.clone().into_iter() { 92 | let send_mode = match rand::random::() % 4 { 93 | 0 => SendMode::TimeSensitive, 94 | 1 => SendMode::Unreliable, 95 | 2 => SendMode::Persistent, 96 | 3 => SendMode::Reliable, 97 | _ => panic!() 98 | }; 99 | 100 | sender.enqueue_packet(packet, channel_id, send_mode, 0); 101 | } 102 | 103 | let mut datagram_sink = TestDatagramSink::new(); 104 | loop { 105 | datagram_sink.pull(&mut sender, 0); 106 | 107 | let datagrams = std::mem::take(&mut datagram_sink.datagrams); 108 | 109 | if datagrams.is_empty() { 110 | break; 111 | } 112 | 113 | for datagram in datagrams.into_iter() { 114 | receiver.handle_datagram(datagram); 115 | } 116 | } 117 | 118 | let mut packet_sink = TestPacketSink::new(); 119 | receiver.receive(&mut packet_sink); 120 | 121 | assert_eq!(packet_sink.packets, sent_packets.into_iter().map(|pair| pair.0).collect::>()); 122 | } 123 | 124 | fn test_single_transfer(packet_size: usize, max_alloc: usize) { 125 | let mut sender = packet_sender::PacketSender::new(MAX_PACKET_WINDOW_SIZE, 0, max_alloc); 126 | let mut receiver = packet_receiver::PacketReceiver::new(MAX_PACKET_WINDOW_SIZE, 0, max_alloc); 127 | 128 | let packet_data = random_packet_data(packet_size); 129 | sender.enqueue_packet(packet_data.clone(), 0, SendMode::Unreliable, 0); 130 | 131 | let mut datagram_sink = TestDatagramSink::new(); 132 | datagram_sink.pull(&mut sender, 0); 133 | 134 | for datagram in datagram_sink.datagrams.into_iter() { 135 | receiver.handle_datagram(datagram); 136 | } 137 | 138 | let mut packet_sink = TestPacketSink::new(); 139 | receiver.receive(&mut packet_sink); 140 | 141 | assert_eq!(packet_sink.packets.len(), 1); 142 | assert_eq!(packet_sink.packets[0], packet_data); 143 | } 144 | 145 | /// Ensures a packet of size zero may be transferred. 146 | #[test] 147 | fn null_packet_transfer() { 148 | test_single_transfer(0, 10_000); 149 | } 150 | 151 | /// Ensures a packet of maximum size may be transferred. 152 | /// Uses the minimum possible allocation limit. 153 | #[test] 154 | #[ignore] 155 | fn max_packet_transfer() { 156 | use crate::MAX_PACKET_SIZE; 157 | 158 | test_single_transfer(MAX_PACKET_SIZE, MAX_PACKET_SIZE); 159 | } 160 | 161 | /// Ensures PacketSender/PacketReceiver can transfer a packet with a size equal to the maximum 162 | /// allocation limit. (They must internally round the allocation limit up to the nearest multiple 163 | /// of MAX_FRAGMENT_SIZE for this to work.) 164 | #[test] 165 | fn single_packet_max_alloc() { 166 | use crate::MAX_FRAGMENT_SIZE; 167 | 168 | let sizes = MAX_FRAGMENT_SIZE/2 .. MAX_FRAGMENT_SIZE*2 + MAX_FRAGMENT_SIZE/2; 169 | 170 | for size in sizes { 171 | test_single_transfer(size, size); 172 | } 173 | } 174 | 175 | -------------------------------------------------------------------------------- /src/half_connection/pending_packet.rs: -------------------------------------------------------------------------------- 1 | 2 | use crate::frame; 3 | use crate::MAX_FRAGMENT_SIZE; 4 | 5 | use std::cell::RefCell; 6 | use std::rc::Rc; 7 | use std::rc::Weak; 8 | 9 | #[derive(Debug)] 10 | pub struct PendingPacket { 11 | data: Box<[u8]>, 12 | channel_id: u8, 13 | 14 | sequence_id: u32, 15 | window_parent_lead: u16, 16 | channel_parent_lead: u16, 17 | last_fragment_id: u16, 18 | 19 | ack_flags: Box<[u64]>, 20 | } 21 | 22 | impl PendingPacket { 23 | pub fn new(data: Box<[u8]>, channel_id: u8, 24 | sequence_id: u32, window_parent_lead: u16, channel_parent_lead: u16) -> Self { 25 | let num_fragments = (data.len() + MAX_FRAGMENT_SIZE - 1) / MAX_FRAGMENT_SIZE + (data.len() == 0) as usize; 26 | debug_assert!(num_fragments != 0); 27 | 28 | debug_assert!(num_fragments - 1 <= u16::MAX as usize); 29 | let last_fragment_id = (num_fragments - 1) as u16; 30 | 31 | Self { 32 | data, 33 | channel_id, 34 | 35 | sequence_id, 36 | window_parent_lead, 37 | channel_parent_lead, 38 | last_fragment_id, 39 | 40 | ack_flags: vec![0u64; (num_fragments + 63)/64].into_boxed_slice(), 41 | } 42 | } 43 | 44 | #[cfg(test)] 45 | pub fn sequence_id(&self) -> u32 { 46 | self.sequence_id 47 | } 48 | 49 | #[cfg(test)] 50 | pub fn channel_id(&self) -> u8 { 51 | self.channel_id 52 | } 53 | 54 | #[cfg(test)] 55 | pub fn window_parent_lead(&self) -> u16 { 56 | self.window_parent_lead 57 | } 58 | 59 | #[cfg(test)] 60 | pub fn channel_parent_lead(&self) -> u16 { 61 | self.channel_parent_lead 62 | } 63 | 64 | pub fn last_fragment_id(&self) -> u16 { 65 | self.last_fragment_id 66 | } 67 | 68 | pub fn fragment_acknowledged(&self, fragment_id: u16) -> bool { 69 | let flag_bit = 1 << (fragment_id % 64) as u64; 70 | let flags_index = (fragment_id / 64) as usize; 71 | self.ack_flags[flags_index] & flag_bit != 0 72 | } 73 | 74 | pub fn acknowledge_fragment(&mut self, fragment_id: u16) { 75 | let flag_bit = 1 << (fragment_id % 64) as u64; 76 | let flags_index = (fragment_id / 64) as usize; 77 | self.ack_flags[flags_index] |= flag_bit; 78 | } 79 | 80 | pub fn size(&self) -> usize { 81 | self.data.len() 82 | } 83 | 84 | pub fn datagram<'a>(&'a self, fragment_id: u16) -> frame::DatagramRef<'a> { 85 | debug_assert!(fragment_id <= self.last_fragment_id); 86 | 87 | let i = fragment_id as usize; 88 | let data = if fragment_id == self.last_fragment_id { 89 | &self.data[i * MAX_FRAGMENT_SIZE .. ] 90 | } else { 91 | &self.data[i * MAX_FRAGMENT_SIZE .. (i + 1)*MAX_FRAGMENT_SIZE] 92 | }; 93 | 94 | frame::DatagramRef { 95 | sequence_id: self.sequence_id, 96 | channel_id: self.channel_id, 97 | window_parent_lead: self.window_parent_lead, 98 | channel_parent_lead: self.channel_parent_lead, 99 | fragment_id, 100 | fragment_id_last: self.last_fragment_id, 101 | data, 102 | } 103 | } 104 | } 105 | 106 | pub type PendingPacketRc = Rc>; 107 | pub type PendingPacketWeak = Weak>; 108 | 109 | #[derive(Debug)] 110 | pub struct FragmentRef { 111 | pub packet: PendingPacketWeak, 112 | pub fragment_id: u16, 113 | } 114 | 115 | impl Clone for FragmentRef { 116 | fn clone(&self) -> Self { 117 | Self { 118 | packet: Weak::clone(&self.packet), 119 | fragment_id: self.fragment_id, 120 | } 121 | } 122 | } 123 | 124 | impl FragmentRef { 125 | pub fn new(packet_rc: &PendingPacketRc, fragment_id: u16) -> Self { 126 | Self { 127 | packet: Rc::downgrade(packet_rc), 128 | fragment_id, 129 | } 130 | } 131 | } 132 | 133 | -------------------------------------------------------------------------------- /src/half_connection/pending_queue.rs: -------------------------------------------------------------------------------- 1 | 2 | use super::pending_packet::FragmentRef; 3 | 4 | use std::collections::VecDeque; 5 | 6 | #[derive(Debug)] 7 | pub struct Entry { 8 | pub fragment_ref: FragmentRef, 9 | pub resend: bool, 10 | } 11 | 12 | impl Entry { 13 | pub fn new(fragment_ref: FragmentRef, resend: bool) -> Self { 14 | Self { 15 | fragment_ref, 16 | resend, 17 | } 18 | } 19 | } 20 | 21 | pub type PendingQueue = VecDeque; 22 | 23 | -------------------------------------------------------------------------------- /src/half_connection/recv_rate_set.rs: -------------------------------------------------------------------------------- 1 | 2 | #[derive(Clone,Debug)] 3 | struct RecvEntry { 4 | value: u32, 5 | timestamp_ms: u64, 6 | is_initial: bool, 7 | } 8 | 9 | pub struct RecvRateSet { 10 | // Queue of receive rates reported by receiver (X_recv_set) 11 | entries: Vec, 12 | } 13 | 14 | impl RecvRateSet { 15 | pub fn new() -> Self { 16 | Self { 17 | entries: Vec::new(), 18 | } 19 | } 20 | 21 | pub fn reset_initial(&mut self, now_ms: u64) { 22 | self.entries.clear(); 23 | 24 | self.entries.push(RecvEntry { 25 | value: u32::max_value(), 26 | timestamp_ms: now_ms, 27 | is_initial: true, 28 | }); 29 | } 30 | 31 | fn replace_max(&mut self, now_ms: u64, recv_rate: u32) -> u32 { 32 | self.entries.retain(|e| e.is_initial == false); 33 | 34 | let max_rate = if self.entries.is_empty() { 35 | recv_rate 36 | } else { 37 | self.max().max(recv_rate) // lul 38 | }; 39 | 40 | self.reset(now_ms, max_rate); 41 | 42 | return max_rate; 43 | } 44 | 45 | pub fn reset(&mut self, now_ms: u64, recv_rate: u32) { 46 | self.entries.clear(); 47 | 48 | self.entries.push(RecvEntry { 49 | value: recv_rate, 50 | timestamp_ms: now_ms, 51 | is_initial: false, 52 | }); 53 | } 54 | 55 | pub fn rate_limited_update(&mut self, now_ms: u64, recv_rate: u32, rtt_ms: u64) -> u32 { 56 | self.entries.push(RecvEntry { 57 | value: recv_rate, 58 | timestamp_ms: now_ms, 59 | is_initial: false 60 | }); 61 | 62 | self.entries.retain(|e| now_ms - e.timestamp_ms < 2 * rtt_ms); 63 | 64 | return self.max(); 65 | } 66 | 67 | pub fn loss_increase_update(&mut self, now_ms: u64, recv_rate: u32) -> u32 { 68 | for entry in self.entries.iter_mut() { 69 | entry.value /= 2; 70 | } 71 | 72 | return self.replace_max(now_ms, (recv_rate as f64 * 0.85) as u32); 73 | } 74 | 75 | pub fn data_limited_update(&mut self, now_ms: u64, recv_rate: u32) -> u32 { 76 | return self.replace_max(now_ms, recv_rate); 77 | } 78 | 79 | pub fn max(&self) -> u32 { 80 | let mut max_rate = self.entries.first().unwrap().value; 81 | for entry in self.entries.iter().skip(1) { 82 | if entry.value > max_rate { 83 | max_rate = entry.value; 84 | } 85 | } 86 | return max_rate; 87 | } 88 | } 89 | 90 | -------------------------------------------------------------------------------- /src/half_connection/reorder_buffer.rs: -------------------------------------------------------------------------------- 1 | 2 | pub struct ReorderBuffer { 3 | frames: [u32; 2], 4 | frame_count: u32, 5 | base_id: u32, 6 | max_span: u32, 7 | } 8 | 9 | impl ReorderBuffer { 10 | pub fn new(base_id: u32, max_span: u32) -> Self { 11 | Self { 12 | frames: [0, 0], 13 | frame_count: 0, 14 | base_id, 15 | max_span, 16 | } 17 | } 18 | 19 | #[cfg(test)] 20 | pub fn base_id(&self) -> u32 { 21 | self.base_id 22 | } 23 | 24 | pub fn can_put(&self, new_frame_id: u32) -> bool { 25 | new_frame_id.wrapping_sub(self.base_id) < self.max_span 26 | } 27 | 28 | pub fn put(&mut self, new_frame_id: u32, mut callback: F) where F: FnMut(u32, bool) { 29 | debug_assert!(self.can_put(new_frame_id)); 30 | 31 | if self.frame_count > 0 { 32 | debug_assert!(self.frames[0] != self.base_id); 33 | } 34 | 35 | debug_assert!(self.frame_count <= 2); 36 | 37 | match self.frame_count { 38 | 0 => { 39 | if new_frame_id == self.base_id { 40 | callback(new_frame_id, true); 41 | self.base_id = self.base_id.wrapping_add(1); 42 | } else { 43 | self.frames[0] = new_frame_id; 44 | self.frame_count = 1; 45 | } 46 | } 47 | 1 => { 48 | if new_frame_id == self.base_id { 49 | callback(new_frame_id, true); 50 | self.base_id = self.base_id.wrapping_add(1); 51 | 52 | if self.frames[0] == self.base_id { 53 | callback(self.frames[0], true); 54 | self.base_id = self.base_id.wrapping_add(1); 55 | self.frame_count = 0; 56 | } 57 | } else { 58 | let delta_new = new_frame_id.wrapping_sub(self.base_id); 59 | let delta_0 = self.frames[0].wrapping_sub(self.base_id); 60 | 61 | debug_assert!(delta_new != delta_0); 62 | if delta_new < delta_0 { 63 | self.frames[1] = self.frames[0]; 64 | self.frames[0] = new_frame_id; 65 | } else { 66 | self.frames[1] = new_frame_id; 67 | } 68 | 69 | self.frame_count = 2; 70 | } 71 | } 72 | 2 => { 73 | let mut min_frame_id = new_frame_id; 74 | let mut delta_min = new_frame_id.wrapping_sub(self.base_id); 75 | 76 | let delta_1 = self.frames[1].wrapping_sub(self.base_id); 77 | 78 | debug_assert!(delta_1 != delta_min); 79 | if delta_1 < delta_min { 80 | std::mem::swap(&mut self.frames[1], &mut min_frame_id); 81 | delta_min = delta_1; 82 | } 83 | 84 | let delta_0 = self.frames[0].wrapping_sub(self.base_id); 85 | 86 | debug_assert!(delta_0 != delta_min); 87 | if delta_0 < delta_min { 88 | std::mem::swap(&mut self.frames[0], &mut min_frame_id); 89 | } 90 | 91 | while self.base_id != min_frame_id { 92 | callback(self.base_id, false); 93 | self.base_id = self.base_id.wrapping_add(1); 94 | } 95 | 96 | callback(min_frame_id, true); 97 | self.base_id = self.base_id.wrapping_add(1); 98 | 99 | if self.frames[0] == self.base_id { 100 | callback(self.frames[0], true); 101 | self.base_id = self.base_id.wrapping_add(1); 102 | self.frame_count -= 1; 103 | 104 | if self.frames[1] == self.base_id { 105 | callback(self.frames[1], true); 106 | self.base_id = self.base_id.wrapping_add(1); 107 | self.frame_count -= 1; 108 | } else { 109 | self.frames[0] = self.frames[1]; 110 | } 111 | } 112 | } 113 | _ => () 114 | } 115 | } 116 | 117 | pub fn can_advance(&self, new_base_id: u32) -> bool { 118 | let delta = new_base_id.wrapping_sub(self.base_id); 119 | delta >= 1 && delta <= self.max_span 120 | } 121 | 122 | pub fn advance(&mut self, new_base_id: u32, mut callback: F) where F: FnMut(u32, bool) { 123 | debug_assert!(self.can_advance(new_base_id)); 124 | 125 | if self.frame_count > 0 { 126 | debug_assert!(self.frames[0] != self.base_id); 127 | } 128 | 129 | debug_assert!(self.frame_count <= 2); 130 | 131 | while self.frame_count > 0 && self.frames[0].wrapping_sub(self.base_id) < new_base_id.wrapping_sub(self.base_id) { 132 | while self.base_id != self.frames[0] { 133 | callback(self.base_id, false); 134 | self.base_id = self.base_id.wrapping_add(1); 135 | } 136 | 137 | callback(self.frames[0], true); 138 | self.base_id = self.base_id.wrapping_add(1); 139 | 140 | if self.frame_count == 2 { 141 | self.frames[0] = self.frames[1]; 142 | } 143 | 144 | self.frame_count -= 1; 145 | } 146 | 147 | while self.base_id != new_base_id { 148 | callback(self.base_id, false); 149 | self.base_id = self.base_id.wrapping_add(1); 150 | } 151 | 152 | match self.frame_count { 153 | 0 => (), 154 | 1 => { 155 | if self.frames[0] == self.base_id { 156 | callback(self.frames[0], true); 157 | self.base_id = self.base_id.wrapping_add(1); 158 | self.frame_count = 0; 159 | } 160 | } 161 | 2 => { 162 | if self.frames[0] == self.base_id { 163 | callback(self.frames[0], true); 164 | self.base_id = self.base_id.wrapping_add(1); 165 | self.frame_count -= 1; 166 | 167 | if self.frames[1] == self.base_id { 168 | callback(self.frames[1], true); 169 | self.base_id = self.base_id.wrapping_add(1); 170 | self.frame_count -= 1; 171 | } else { 172 | self.frames[0] = self.frames[1]; 173 | } 174 | } 175 | } 176 | _ => (), 177 | } 178 | } 179 | } 180 | 181 | #[cfg(test)] 182 | mod tests { 183 | use super::*; 184 | 185 | fn test_put_callbacks(rb: &mut ReorderBuffer, new_frame_id: u32, expected_callbacks: Vec<(u32, bool)>) { 186 | let mut callbacks = Vec::new(); 187 | let cb = |frame_id: u32, acked: bool| { 188 | callbacks.push((frame_id, acked)); 189 | }; 190 | rb.put(new_frame_id, cb); 191 | assert_eq!(callbacks, expected_callbacks); 192 | } 193 | 194 | fn test_advance_callbacks(rb: &mut ReorderBuffer, new_frame_id: u32, expected_callbacks: Vec<(u32, bool)>) { 195 | let mut callbacks = Vec::new(); 196 | let cb = |frame_id: u32, acked: bool| { 197 | callbacks.push((frame_id, acked)); 198 | }; 199 | rb.advance(new_frame_id, cb); 200 | assert_eq!(callbacks, expected_callbacks); 201 | } 202 | 203 | #[test] 204 | fn ack_1_ack() { 205 | // Ack 0 206 | let mut rb = ReorderBuffer::new(0, 100); 207 | test_put_callbacks(&mut rb, 0, vec![(0, true)]); 208 | assert_eq!(rb.frame_count, 0); 209 | 210 | let mut rb = ReorderBuffer::new(0, 100); 211 | test_put_callbacks(&mut rb, 2, vec![]); 212 | test_put_callbacks(&mut rb, 0, vec![(0, true)]); 213 | assert_eq!(rb.frame_count, 1); 214 | 215 | let mut rb = ReorderBuffer::new(0, 100); 216 | test_put_callbacks(&mut rb, 2, vec![]); 217 | test_put_callbacks(&mut rb, 3, vec![]); 218 | test_put_callbacks(&mut rb, 0, vec![(0, true)]); 219 | assert_eq!(rb.frame_count, 2); 220 | 221 | let mut rb = ReorderBuffer::new(0, 100); 222 | test_put_callbacks(&mut rb, 3, vec![]); 223 | test_put_callbacks(&mut rb, 2, vec![]); 224 | test_put_callbacks(&mut rb, 0, vec![(0, true)]); 225 | assert_eq!(rb.frame_count, 2); 226 | 227 | // Nack 0, 1, Ack 2 228 | let mut rb = ReorderBuffer::new(0, 100); 229 | test_put_callbacks(&mut rb, 2, vec![]); 230 | test_put_callbacks(&mut rb, 4, vec![]); 231 | test_put_callbacks(&mut rb, 5, vec![(0, false), (1, false), (2, true)]); 232 | assert_eq!(rb.frame_count, 2); 233 | 234 | let mut rb = ReorderBuffer::new(0, 100); 235 | test_put_callbacks(&mut rb, 2, vec![]); 236 | test_put_callbacks(&mut rb, 5, vec![]); 237 | test_put_callbacks(&mut rb, 4, vec![(0, false), (1, false), (2, true)]); 238 | assert_eq!(rb.frame_count, 2); 239 | 240 | let mut rb = ReorderBuffer::new(0, 100); 241 | test_put_callbacks(&mut rb, 4, vec![]); 242 | test_put_callbacks(&mut rb, 2, vec![]); 243 | test_put_callbacks(&mut rb, 5, vec![(0, false), (1, false), (2, true)]); 244 | assert_eq!(rb.frame_count, 2); 245 | 246 | let mut rb = ReorderBuffer::new(0, 100); 247 | test_put_callbacks(&mut rb, 4, vec![]); 248 | test_put_callbacks(&mut rb, 5, vec![]); 249 | test_put_callbacks(&mut rb, 2, vec![(0, false), (1, false), (2, true)]); 250 | assert_eq!(rb.frame_count, 2); 251 | 252 | let mut rb = ReorderBuffer::new(0, 100); 253 | test_put_callbacks(&mut rb, 5, vec![]); 254 | test_put_callbacks(&mut rb, 2, vec![]); 255 | test_put_callbacks(&mut rb, 4, vec![(0, false), (1, false), (2, true)]); 256 | assert_eq!(rb.frame_count, 2); 257 | 258 | let mut rb = ReorderBuffer::new(0, 100); 259 | test_put_callbacks(&mut rb, 5, vec![]); 260 | test_put_callbacks(&mut rb, 4, vec![]); 261 | test_put_callbacks(&mut rb, 2, vec![(0, false), (1, false), (2, true)]); 262 | assert_eq!(rb.frame_count, 2); 263 | } 264 | 265 | #[test] 266 | fn ack_2_acks() { 267 | // Ack 0, 1 268 | let mut rb = ReorderBuffer::new(0, 100); 269 | test_put_callbacks(&mut rb, 1, vec![]); 270 | test_put_callbacks(&mut rb, 0, vec![(0, true), (1, true)]); 271 | assert_eq!(rb.frame_count, 0); 272 | 273 | let mut rb = ReorderBuffer::new(0, 100); 274 | test_put_callbacks(&mut rb, 3, vec![]); 275 | test_put_callbacks(&mut rb, 1, vec![]); 276 | test_put_callbacks(&mut rb, 0, vec![(0, true), (1, true)]); 277 | assert_eq!(rb.frame_count, 1); 278 | 279 | let mut rb = ReorderBuffer::new(0, 100); 280 | test_put_callbacks(&mut rb, 1, vec![]); 281 | test_put_callbacks(&mut rb, 3, vec![]); 282 | test_put_callbacks(&mut rb, 0, vec![(0, true), (1, true)]); 283 | assert_eq!(rb.frame_count, 1); 284 | 285 | // Nack 0, 1, Ack 2, 3 286 | let mut rb = ReorderBuffer::new(0, 100); 287 | test_put_callbacks(&mut rb, 2, vec![]); 288 | test_put_callbacks(&mut rb, 3, vec![]); 289 | test_put_callbacks(&mut rb, 5, vec![(0, false), (1, false), (2, true), (3, true)]); 290 | assert_eq!(rb.frame_count, 1); 291 | 292 | let mut rb = ReorderBuffer::new(0, 100); 293 | test_put_callbacks(&mut rb, 2, vec![]); 294 | test_put_callbacks(&mut rb, 5, vec![]); 295 | test_put_callbacks(&mut rb, 3, vec![(0, false), (1, false), (2, true), (3, true)]); 296 | assert_eq!(rb.frame_count, 1); 297 | 298 | let mut rb = ReorderBuffer::new(0, 100); 299 | test_put_callbacks(&mut rb, 3, vec![]); 300 | test_put_callbacks(&mut rb, 2, vec![]); 301 | test_put_callbacks(&mut rb, 5, vec![(0, false), (1, false), (2, true), (3, true)]); 302 | assert_eq!(rb.frame_count, 1); 303 | 304 | let mut rb = ReorderBuffer::new(0, 100); 305 | test_put_callbacks(&mut rb, 3, vec![]); 306 | test_put_callbacks(&mut rb, 5, vec![]); 307 | test_put_callbacks(&mut rb, 2, vec![(0, false), (1, false), (2, true), (3, true)]); 308 | assert_eq!(rb.frame_count, 1); 309 | 310 | let mut rb = ReorderBuffer::new(0, 100); 311 | test_put_callbacks(&mut rb, 5, vec![]); 312 | test_put_callbacks(&mut rb, 2, vec![]); 313 | test_put_callbacks(&mut rb, 3, vec![(0, false), (1, false), (2, true), (3, true)]); 314 | assert_eq!(rb.frame_count, 1); 315 | 316 | let mut rb = ReorderBuffer::new(0, 100); 317 | test_put_callbacks(&mut rb, 5, vec![]); 318 | test_put_callbacks(&mut rb, 3, vec![]); 319 | test_put_callbacks(&mut rb, 2, vec![(0, false), (1, false), (2, true), (3, true)]); 320 | assert_eq!(rb.frame_count, 1); 321 | } 322 | 323 | #[test] 324 | fn ack_3_acks() { 325 | // Ack 0, 1, 2 326 | let mut rb = ReorderBuffer::new(0, 100); 327 | test_put_callbacks(&mut rb, 1, vec![]); 328 | test_put_callbacks(&mut rb, 2, vec![]); 329 | test_put_callbacks(&mut rb, 0, vec![(0, true), (1, true), (2, true)]); 330 | assert_eq!(rb.frame_count, 0); 331 | 332 | let mut rb = ReorderBuffer::new(0, 100); 333 | test_put_callbacks(&mut rb, 2, vec![]); 334 | test_put_callbacks(&mut rb, 1, vec![]); 335 | test_put_callbacks(&mut rb, 0, vec![(0, true), (1, true), (2, true)]); 336 | assert_eq!(rb.frame_count, 0); 337 | 338 | // Nack 0, 1, ack 2, 3, 4 339 | let mut rb = ReorderBuffer::new(0, 100); 340 | test_put_callbacks(&mut rb, 2, vec![]); 341 | test_put_callbacks(&mut rb, 3, vec![]); 342 | test_put_callbacks(&mut rb, 4, vec![(0, false), (1, false), (2, true), (3, true), (4, true)]); 343 | assert_eq!(rb.frame_count, 0); 344 | 345 | let mut rb = ReorderBuffer::new(0, 100); 346 | test_put_callbacks(&mut rb, 2, vec![]); 347 | test_put_callbacks(&mut rb, 4, vec![]); 348 | test_put_callbacks(&mut rb, 3, vec![(0, false), (1, false), (2, true), (3, true), (4, true)]); 349 | assert_eq!(rb.frame_count, 0); 350 | 351 | let mut rb = ReorderBuffer::new(0, 100); 352 | test_put_callbacks(&mut rb, 3, vec![]); 353 | test_put_callbacks(&mut rb, 2, vec![]); 354 | test_put_callbacks(&mut rb, 4, vec![(0, false), (1, false), (2, true), (3, true), (4, true)]); 355 | assert_eq!(rb.frame_count, 0); 356 | 357 | let mut rb = ReorderBuffer::new(0, 100); 358 | test_put_callbacks(&mut rb, 3, vec![]); 359 | test_put_callbacks(&mut rb, 4, vec![]); 360 | test_put_callbacks(&mut rb, 2, vec![(0, false), (1, false), (2, true), (3, true), (4, true)]); 361 | assert_eq!(rb.frame_count, 0); 362 | 363 | let mut rb = ReorderBuffer::new(0, 100); 364 | test_put_callbacks(&mut rb, 4, vec![]); 365 | test_put_callbacks(&mut rb, 2, vec![]); 366 | test_put_callbacks(&mut rb, 3, vec![(0, false), (1, false), (2, true), (3, true), (4, true)]); 367 | assert_eq!(rb.frame_count, 0); 368 | 369 | let mut rb = ReorderBuffer::new(0, 100); 370 | test_put_callbacks(&mut rb, 4, vec![]); 371 | test_put_callbacks(&mut rb, 3, vec![]); 372 | test_put_callbacks(&mut rb, 2, vec![(0, false), (1, false), (2, true), (3, true), (4, true)]); 373 | assert_eq!(rb.frame_count, 0); 374 | } 375 | 376 | #[test] 377 | fn advance_0_acks() { 378 | // 0x beyond 379 | let mut rb = ReorderBuffer::new(0, 100); 380 | test_advance_callbacks(&mut rb, 5, vec![(0, false), (1, false), (2, false), (3, false), (4, false)]); 381 | assert_eq!(rb.frame_count, 0); 382 | 383 | // 1x beyond 384 | let mut rb = ReorderBuffer::new(0, 100); 385 | test_put_callbacks(&mut rb, 6, vec![]); 386 | test_advance_callbacks(&mut rb, 5, vec![(0, false), (1, false), (2, false), (3, false), (4, false)]); 387 | assert_eq!(rb.frame_count, 1); 388 | 389 | // 2x beyond 390 | let mut rb = ReorderBuffer::new(0, 100); 391 | test_put_callbacks(&mut rb, 6, vec![]); 392 | test_put_callbacks(&mut rb, 7, vec![]); 393 | test_advance_callbacks(&mut rb, 5, vec![(0, false), (1, false), (2, false), (3, false), (4, false)]); 394 | assert_eq!(rb.frame_count, 2); 395 | } 396 | 397 | #[test] 398 | fn advance_1_ack() { 399 | // ~space, ~beyond 400 | let mut rb = ReorderBuffer::new(0, 100); 401 | test_put_callbacks(&mut rb, 1, vec![]); 402 | test_advance_callbacks(&mut rb, 2, vec![(0, false), (1, true)]); 403 | assert_eq!(rb.frame_count, 0); 404 | 405 | // ~space, beyond 406 | let mut rb = ReorderBuffer::new(0, 100); 407 | test_put_callbacks(&mut rb, 1, vec![]); 408 | test_put_callbacks(&mut rb, 6, vec![]); 409 | test_advance_callbacks(&mut rb, 2, vec![(0, false), (1, true)]); 410 | assert_eq!(rb.frame_count, 1); 411 | 412 | // space, ~beyond 413 | let mut rb = ReorderBuffer::new(0, 100); 414 | test_put_callbacks(&mut rb, 1, vec![]); 415 | test_advance_callbacks(&mut rb, 5, vec![(0, false), (1, true), (2, false), (3, false), (4, false)]); 416 | assert_eq!(rb.frame_count, 0); 417 | 418 | // space, beyond 419 | let mut rb = ReorderBuffer::new(0, 100); 420 | test_put_callbacks(&mut rb, 1, vec![]); 421 | test_put_callbacks(&mut rb, 6, vec![]); 422 | test_advance_callbacks(&mut rb, 5, vec![(0, false), (1, true), (2, false), (3, false), (4, false)]); 423 | assert_eq!(rb.frame_count, 1); 424 | 425 | // past-end, ~beyond 426 | let mut rb = ReorderBuffer::new(0, 100); 427 | test_put_callbacks(&mut rb, 5, vec![]); 428 | test_advance_callbacks(&mut rb, 5, vec![(0, false), (1, false), (2, false), (3, false), (4, false), (5, true)]); 429 | assert_eq!(rb.frame_count, 0); 430 | 431 | // past-end, beyond 432 | let mut rb = ReorderBuffer::new(0, 100); 433 | test_put_callbacks(&mut rb, 5, vec![]); 434 | test_put_callbacks(&mut rb, 7, vec![]); 435 | test_advance_callbacks(&mut rb, 5, vec![(0, false), (1, false), (2, false), (3, false), (4, false), (5, true)]); 436 | assert_eq!(rb.frame_count, 1); 437 | } 438 | 439 | #[test] 440 | fn advance_2_acks() { 441 | // ~space, ~space 442 | let mut rb = ReorderBuffer::new(0, 100); 443 | test_put_callbacks(&mut rb, 1, vec![]); 444 | test_put_callbacks(&mut rb, 2, vec![]); 445 | test_advance_callbacks(&mut rb, 3, vec![(0, false), (1, true), (2, true)]); 446 | assert_eq!(rb.frame_count, 0); 447 | 448 | // ~space, space 449 | let mut rb = ReorderBuffer::new(0, 100); 450 | test_put_callbacks(&mut rb, 1, vec![]); 451 | test_put_callbacks(&mut rb, 2, vec![]); 452 | test_advance_callbacks(&mut rb, 5, vec![(0, false), (1, true), (2, true), (3, false), (4, false)]); 453 | assert_eq!(rb.frame_count, 0); 454 | 455 | // space, ~space 456 | let mut rb = ReorderBuffer::new(0, 100); 457 | test_put_callbacks(&mut rb, 1, vec![]); 458 | test_put_callbacks(&mut rb, 4, vec![]); 459 | test_advance_callbacks(&mut rb, 5, vec![(0, false), (1, true), (2, false), (3, false), (4, true)]); 460 | assert_eq!(rb.frame_count, 0); 461 | 462 | // space, space 463 | let mut rb = ReorderBuffer::new(0, 100); 464 | test_put_callbacks(&mut rb, 1, vec![]); 465 | test_put_callbacks(&mut rb, 3, vec![]); 466 | test_advance_callbacks(&mut rb, 5, vec![(0, false), (1, true), (2, false), (3, true), (4, false)]); 467 | assert_eq!(rb.frame_count, 0); 468 | 469 | // ~space, past-end 470 | let mut rb = ReorderBuffer::new(0, 100); 471 | test_put_callbacks(&mut rb, 4, vec![]); 472 | test_put_callbacks(&mut rb, 5, vec![]); 473 | test_advance_callbacks(&mut rb, 5, vec![(0, false), (1, false), (2, false), (3, false), (4, true), (5, true)]); 474 | assert_eq!(rb.frame_count, 0); 475 | 476 | // space, past-end 477 | let mut rb = ReorderBuffer::new(0, 100); 478 | test_put_callbacks(&mut rb, 1, vec![]); 479 | test_put_callbacks(&mut rb, 5, vec![]); 480 | test_advance_callbacks(&mut rb, 5, vec![(0, false), (1, true), (2, false), (3, false), (4, false), (5, true)]); 481 | assert_eq!(rb.frame_count, 0); 482 | 483 | // 2x past-end 484 | let mut rb = ReorderBuffer::new(0, 100); 485 | test_put_callbacks(&mut rb, 5, vec![]); 486 | test_put_callbacks(&mut rb, 6, vec![]); 487 | test_advance_callbacks(&mut rb, 5, vec![(0, false), (1, false), (2, false), (3, false), (4, false), (5, true), (6, true)]); 488 | assert_eq!(rb.frame_count, 0); 489 | } 490 | 491 | #[test] 492 | fn max_span() { 493 | let rb = ReorderBuffer::new(1, 100); 494 | assert_eq!(rb.can_put(0), false); 495 | assert_eq!(rb.can_put(1), true); 496 | 497 | assert_eq!(rb.can_put(100), true); 498 | assert_eq!(rb.can_put(101), false); 499 | 500 | assert_eq!(rb.can_advance(0), false); 501 | assert_eq!(rb.can_advance(1), false); 502 | assert_eq!(rb.can_advance(2), true); 503 | 504 | assert_eq!(rb.can_advance(100), true); 505 | assert_eq!(rb.can_advance(101), true); 506 | assert_eq!(rb.can_advance(102), false); 507 | } 508 | } 509 | 510 | -------------------------------------------------------------------------------- /src/half_connection/resend_queue.rs: -------------------------------------------------------------------------------- 1 | 2 | use super::pending_packet::FragmentRef; 3 | 4 | use std::cmp::Ordering; 5 | use std::collections::BinaryHeap; 6 | 7 | #[derive(Debug)] 8 | pub struct Entry { 9 | pub fragment_ref: FragmentRef, 10 | pub resend_time: u64, 11 | pub send_count: u8, 12 | } 13 | 14 | impl Entry { 15 | pub fn new(fragment_ref: FragmentRef, resend_time: u64, send_count: u8) -> Self { 16 | Self { fragment_ref, resend_time, send_count } 17 | } 18 | } 19 | 20 | impl PartialOrd for Entry { 21 | fn partial_cmp(&self, other: &Self) -> Option { 22 | Some(self.resend_time.cmp(&other.resend_time).reverse()) 23 | } 24 | } 25 | 26 | impl PartialEq for Entry { 27 | fn eq(&self, other: &Self) -> bool { 28 | self.resend_time == other.resend_time 29 | } 30 | } 31 | 32 | impl Eq for Entry {} 33 | 34 | impl Ord for Entry { 35 | fn cmp(&self, other: &Self) -> Ordering { 36 | self.resend_time.cmp(&other.resend_time).reverse() 37 | } 38 | } 39 | 40 | pub type ResendQueue = BinaryHeap; 41 | 42 | -------------------------------------------------------------------------------- /src/half_connection/send_rate.rs: -------------------------------------------------------------------------------- 1 | 2 | // See RFC 5348: "TCP Friendly Rate Control (TFRC): Protocol Specification" 3 | 4 | use super::recv_rate_set; 5 | 6 | use crate::MAX_FRAME_SIZE; 7 | 8 | // Segment size in bytes (s), see section 9 | const MSS: usize = MAX_FRAME_SIZE; 10 | 11 | // Initial TCP window size (based on MSS), see section 4.2 12 | const INITIAL_TCP_WINDOW: u32 = 4380; // std::cmp::min(std::cmp::max(2*MSS, 4380), 4*MSS) as u32; 13 | // Absolute minimum send rate (s/t_mbi), see section 4.3 14 | const MINIMUM_RATE: u32 = (MSS / 64) as u32; 15 | 16 | fn s_to_ms(v_s: f64) -> u64 { 17 | (v_s * 1000.0).max(0.0).round() as u64 18 | } 19 | 20 | fn ms_to_s(v_s: u64) -> f64 { 21 | v_s as f64 / 1000.0 22 | } 23 | 24 | fn eval_tcp_throughput(rtt: f64, p: f64) -> u32 { 25 | let s = MSS as f64; 26 | let f_p = (p*2.0/3.0).sqrt() + 12.0*(p*3.0/8.0).sqrt()*p*(1.0 + 32.0*p*p); 27 | (s / (rtt * f_p)) as u32 28 | } 29 | 30 | fn eval_tcp_throughput_inv(rtt: f64, target_rate_bps: u32) -> f64 { 31 | let delta = (target_rate_bps as f64 * 0.05) as u32; 32 | 33 | let mut a = 0.0; 34 | let mut b = 1.0; 35 | 36 | loop { 37 | let c = (b + a)/2.0; 38 | 39 | let rate = eval_tcp_throughput(rtt, c); 40 | 41 | if rate > target_rate_bps { 42 | if rate - target_rate_bps <= delta { 43 | return c; 44 | } else { 45 | a = c; 46 | continue; 47 | } 48 | } else if rate < target_rate_bps { 49 | if target_rate_bps - rate <= delta { 50 | return c; 51 | } else { 52 | b = c; 53 | continue; 54 | } 55 | } else { 56 | return c; 57 | } 58 | } 59 | } 60 | 61 | #[derive(Debug,PartialEq)] 62 | pub struct FeedbackData { 63 | pub rtt_ms: u64, 64 | pub receive_rate: u32, 65 | pub loss_rate: f64, 66 | pub rate_limited: bool, 67 | } 68 | 69 | struct SlowStartState { 70 | time_last_doubled_ms: Option, 71 | } 72 | 73 | struct ThroughputEqnState { 74 | send_rate_tcp: u32, 75 | } 76 | 77 | enum SendRateMode { 78 | AwaitSend, 79 | SlowStart(SlowStartState), 80 | ThroughputEqn(ThroughputEqnState), 81 | } 82 | 83 | pub struct SendRateComp { 84 | // Previous loss rate 85 | prev_loss_rate: f64, 86 | 87 | // Expiration of nofeedback timer 88 | nofeedback_exp_ms: Option, 89 | // Flag indicating whether sender has been idle since the nofeedback timer was sent 90 | nofeedback_idle: bool, 91 | 92 | // State used to compute send rate 93 | mode: SendRateMode, 94 | // Allowed transmit rate (X) 95 | send_rate: u32, 96 | // Application specified maximum transmit rate 97 | max_send_rate: u32, 98 | 99 | // Queue of receive rates reported by receiver (X_recv_set) 100 | recv_rate_set: recv_rate_set::RecvRateSet, 101 | 102 | // Round trip time estimate 103 | rtt_s: Option, 104 | rtt_ms: Option, 105 | 106 | // Most recent RTO computation 107 | rto_ms: Option, 108 | } 109 | 110 | fn compute_initial_send_rate(rtt_s: f64) -> u32 { 111 | (INITIAL_TCP_WINDOW as f64 / rtt_s) as u32 112 | } 113 | 114 | fn compute_initial_loss_send_rate(rtt_s: f64) -> u32 { 115 | ((MSS/2) as f64 / rtt_s) as u32 116 | } 117 | 118 | impl SendRateComp { 119 | pub fn new(max_send_rate: u32) -> Self { 120 | Self { 121 | prev_loss_rate: 0.0, 122 | 123 | nofeedback_exp_ms: None, 124 | nofeedback_idle: false, 125 | 126 | mode: SendRateMode::AwaitSend, 127 | send_rate: MSS as u32, 128 | max_send_rate: max_send_rate, 129 | 130 | recv_rate_set: recv_rate_set::RecvRateSet::new(), 131 | 132 | rtt_s: None, 133 | rtt_ms: None, 134 | 135 | rto_ms: None, 136 | } 137 | } 138 | 139 | pub fn send_rate(&self) -> f64 { 140 | self.send_rate as f64 141 | } 142 | 143 | pub fn rtt_s(&self) -> Option { 144 | self.rtt_s 145 | } 146 | 147 | pub fn rtt_ms(&self) -> Option { 148 | self.rtt_ms 149 | } 150 | 151 | pub fn rto_ms(&self) -> Option { 152 | self.rto_ms 153 | } 154 | 155 | pub fn notify_frame_sent(&mut self, now_ms: u64) { 156 | match self.mode { 157 | SendRateMode::AwaitSend => { 158 | self.nofeedback_exp_ms = Some(now_ms + 2000); 159 | self.mode = SendRateMode::SlowStart(SlowStartState { 160 | time_last_doubled_ms: None, 161 | }); 162 | self.recv_rate_set.reset_initial(now_ms); 163 | } 164 | _ => () 165 | } 166 | 167 | self.nofeedback_idle = false; 168 | } 169 | 170 | pub fn step(&mut self, now_ms: u64, feedback: Option, reset_loss_rate: F) where F: FnOnce(f64) { 171 | match self.mode { 172 | SendRateMode::AwaitSend => { 173 | return; 174 | } 175 | _ => () 176 | } 177 | 178 | if let Some(feedback) = feedback { 179 | self.handle_feedback(now_ms, feedback, reset_loss_rate); 180 | } else if let Some(nofeedback_exp_ms) = self.nofeedback_exp_ms { 181 | if now_ms >= nofeedback_exp_ms { 182 | self.nofeedback_expired(now_ms); 183 | } 184 | } 185 | } 186 | 187 | fn handle_feedback(&mut self, now_ms: u64, feedback: FeedbackData, reset_loss_rate: F) where F: FnOnce(f64) { 188 | let rtt_sample_s = ms_to_s(feedback.rtt_ms); 189 | let recv_rate = feedback.receive_rate; 190 | let loss_rate = feedback.loss_rate; 191 | let rate_limited = feedback.rate_limited; 192 | 193 | let (rtt_s, rtt_ms) = self.update_rtt(rtt_sample_s); 194 | let rto_s = self.update_rto(rtt_s, self.send_rate); 195 | 196 | // TODO: When ThroughputEqn is entered, this may produce a false positive depending on 197 | // rounding rounding error as the loss intervals are reset. An extra flag may be of use. 198 | // TODO: Does a new loss event always cause an increase in the loss rate? The spec calls 199 | // for a "new loss event or an increase in the loss event rate p" 200 | let loss_increase = loss_rate > self.prev_loss_rate; 201 | 202 | let recv_limit = 203 | if rate_limited { 204 | // If rate limited during the interval, the interval was not entirely data-limited 205 | let max_val = self.recv_rate_set.rate_limited_update(now_ms, recv_rate, rtt_ms); 206 | max_val.saturating_mul(2) 207 | } else if loss_increase { 208 | let max_val = self.recv_rate_set.loss_increase_update(now_ms, recv_rate); 209 | max_val 210 | } else { 211 | let max_val = self.recv_rate_set.data_limited_update(now_ms, recv_rate); 212 | max_val.saturating_mul(2) 213 | }; 214 | 215 | self.prev_loss_rate = loss_rate; 216 | 217 | match self.mode { 218 | SendRateMode::SlowStart(ref mut state) => { 219 | if loss_increase { 220 | // Nonzero loss, initialize loss history according to loss rate and enter 221 | // throughput equation phase, see section 6.3.1 222 | 223 | let send_rate_target = if state.time_last_doubled_ms.is_none() { 224 | // First feedback indicates loss 225 | compute_initial_loss_send_rate(rtt_s) 226 | } else { 227 | // Because this is sender-side TFRC, no X_target approximation is necessary 228 | self.send_rate/2 229 | }; 230 | 231 | let initial_p = eval_tcp_throughput_inv(rtt_s, send_rate_target); 232 | 233 | reset_loss_rate(initial_p); 234 | 235 | // Apply target send rate as if computed loss rate had been received 236 | self.send_rate = send_rate_target.min(recv_limit).max(MINIMUM_RATE); 237 | //println!("SS: first loss: new send rate: {} (limit {}, rl: {}, li: {})", 238 | // self.send_rate, recv_limit, rate_limited, loss_increase); 239 | 240 | self.mode = SendRateMode::ThroughputEqn( 241 | ThroughputEqnState { 242 | send_rate_tcp: send_rate_target, 243 | } 244 | ); 245 | } else { 246 | // No loss, continue slow start phase 247 | 248 | // Recomputing this term on the fly allows for some adaptation as RTT fluctuates 249 | let initial_rate = compute_initial_send_rate(rtt_s); 250 | 251 | if let Some(time_last_doubled_ms) = state.time_last_doubled_ms { 252 | // Continue slow start doubling, see section 4.3, step 5 253 | if now_ms - time_last_doubled_ms >= rtt_ms { 254 | state.time_last_doubled_ms = Some(now_ms); 255 | self.send_rate = (2*self.send_rate).min(recv_limit).max(initial_rate); 256 | //println!("SS: doubling: new send rate: {} (limit {}, rl: {}, li: {})", 257 | // self.send_rate, recv_limit, rate_limited, loss_increase); 258 | } 259 | } else { 260 | // Reinitialize slow start phase after first feedback, see section 4.2 261 | state.time_last_doubled_ms = Some(now_ms); 262 | self.send_rate = initial_rate; 263 | //println!("SS: first feedback: new send rate: {} (limit {}, rl: {}, li: {})", 264 | // self.send_rate, recv_limit, rate_limited, loss_increase); 265 | } 266 | } 267 | } 268 | SendRateMode::ThroughputEqn(ref mut state) => { 269 | // Continue throughput equation phase, see section 4.3, step 5 270 | state.send_rate_tcp = eval_tcp_throughput(rtt_s, loss_rate); 271 | 272 | self.send_rate = state.send_rate_tcp.min(recv_limit).max(MINIMUM_RATE); 273 | //println!("TE: new send rate: {} (limit {}, rl: {}, li: {})", 274 | // self.send_rate, recv_limit, rate_limited, loss_increase); 275 | } 276 | _ => panic!() 277 | } 278 | 279 | self.send_rate = self.send_rate.min(self.max_send_rate); 280 | 281 | // Restart nofeedback timer 282 | self.nofeedback_exp_ms = Some(now_ms + s_to_ms(rto_s)); 283 | self.nofeedback_idle = true; 284 | } 285 | 286 | fn nofeedback_expired(&mut self, now_ms: u64) { 287 | /* 288 | Section 4.4, step 1, can be de-mangled into the following: 289 | 290 | If (slow start) and (no feedback received and sender has not been idle) { 291 | // We do not have X_Bps or recover_rate yet. 292 | // Halve the allowed sending rate. 293 | X = max(X/2, s/t_mbi); 294 | } Else if (slow start) and (X < 2 * recover_rate, and sender has been idle) { 295 | // Don't halve the allowed sending rate. 296 | Do nothing; 297 | } Else if (slow start) { 298 | // We do not have X_Bps yet. 299 | // Halve the allowed sending rate. 300 | X = max(X/2, s/t_mbi); 301 | } Else if (throughput eqn) and (X_recv < recover_rate and sender has been idle) { 302 | // Don't halve the allowed sending rate. 303 | Do nothing; 304 | } Else if (throughput eqn) { 305 | If (X_Bps > 2*X_recv)) { 306 | // 2*X_recv was already limiting the sending rate. 307 | // Halve the allowed sending rate. 308 | Update_Limits(X_recv;) 309 | } Else { 310 | // The sending rate was limited by X_Bps, not by X_recv. 311 | // Halve the allowed sending rate. 312 | Update_Limits(X_Bps/2); 313 | } 314 | } 315 | 316 | where `slow start <=> p == 0`, and `throughput eqn <=> p > 0`. 317 | */ 318 | 319 | match self.mode { 320 | SendRateMode::SlowStart(_) => { 321 | if let Some(rtt_s) = self.rtt_s { 322 | // Recomputing this term on the fly allows for some adaptation as RTT fluctuates 323 | let recover_rate = compute_initial_send_rate(rtt_s); 324 | 325 | if self.nofeedback_idle && self.send_rate < 2*recover_rate { 326 | // Do nothing, this is acceptable 327 | } else { 328 | // Halve send rate every RTO, subject to minimum 329 | self.send_rate = (self.send_rate/2).max(MINIMUM_RATE); 330 | } 331 | } else { 332 | // In slow start, but no feedback has been received. 333 | debug_assert!(self.nofeedback_idle == false); 334 | 335 | // Halve send rate every RTO, subject to minimum 336 | self.send_rate = (self.send_rate/2).max(MINIMUM_RATE); 337 | } 338 | } 339 | SendRateMode::ThroughputEqn(ref mut state) => { 340 | let rtt_s = self.rtt_s.unwrap(); 341 | let recover_rate = compute_initial_send_rate(rtt_s); 342 | let recv_rate = self.recv_rate_set.max(); 343 | 344 | if self.nofeedback_idle && recv_rate < recover_rate { 345 | // Do nothing, this is acceptable 346 | } else { 347 | // Alter recv_rate_set so as to halve current send rate moving forward 348 | let current_limit = state.send_rate_tcp.min(recv_rate.saturating_mul(2)); 349 | let new_limit = (current_limit/2).max(MINIMUM_RATE); 350 | self.recv_rate_set.reset(now_ms, new_limit/2); 351 | self.send_rate = state.send_rate_tcp.min(new_limit); 352 | } 353 | } 354 | _ => panic!() 355 | } 356 | 357 | // Compute RTO for the new send rate, see section 4.4 step 2 358 | // No default RTT is specified by TFRC, but using RTT = 0 when no feedback has been 359 | // received will cause RTO to begin at 2s, and double each time send_rate is halved above. 360 | // This may or may not be the intended behavior. 361 | let rto_s = self.update_rto(self.rtt_s.unwrap_or(0.0), self.send_rate); 362 | 363 | self.nofeedback_exp_ms = Some(now_ms + s_to_ms(rto_s)); 364 | self.nofeedback_idle = true; 365 | } 366 | 367 | fn update_rtt(&mut self, rtt_sample_s: f64) -> (f64, u64) { 368 | // See section 4.3 step 2 369 | const RTT_ALPHA: f64 = 0.1; 370 | let new_rtt_s = if let Some(rtt_s) = self.rtt_s { 371 | (1.0 - RTT_ALPHA)*rtt_s + RTT_ALPHA*rtt_sample_s 372 | } else { 373 | rtt_sample_s 374 | }; 375 | let new_rtt_ms = s_to_ms(new_rtt_s); 376 | self.rtt_s = Some(new_rtt_s); 377 | self.rtt_ms = Some(new_rtt_ms); 378 | return (new_rtt_s, new_rtt_ms); 379 | } 380 | 381 | fn update_rto(&mut self, rtt_s: f64, send_rate: u32) -> f64 { 382 | // See section 4.3 step 3 and section 4.4 step 2 383 | let rto_s = (4.0*rtt_s).max((2*MSS) as f64 / send_rate as f64); 384 | self.rto_ms = Some(s_to_ms(rto_s)); 385 | return rto_s; 386 | } 387 | } 388 | 389 | #[cfg(test)] 390 | mod tests { 391 | use super::*; 392 | 393 | #[test] 394 | fn tcp_throughput_inverse() { 395 | let rtts = vec![ 0.01, 0.05, 0.1, 0.2, 0.4, 0.8, 2.0, 4.0 ]; 396 | 397 | for &rtt in rtts.iter() { 398 | let mut target_loss_rates = vec![ 1.0, 0.0, 0.01, 0.001, 0.0001, 0.00001, 0.000001 ]; 399 | for _ in 0 .. 50 { 400 | let p = -6.0 * (rand::random::() as f64 / u32::MAX as f64); 401 | target_loss_rates.push(10.0f64.powf(p)); 402 | } 403 | 404 | for &target_loss_rate in target_loss_rates.iter() { 405 | let target_send_rate = eval_tcp_throughput(rtt, target_loss_rate); 406 | let max_error = (target_send_rate as f64 * 0.05) as i32; 407 | 408 | let send_rate = eval_tcp_throughput(rtt, eval_tcp_throughput_inv(rtt, target_send_rate)); 409 | 410 | assert!((target_send_rate as i32 - send_rate as i32).abs() <= max_error); 411 | } 412 | } 413 | } 414 | } 415 | 416 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | #![warn(missing_docs)] 2 | 3 | //! `uflow` is a non-blocking, connection-based layer over UDP that provides an ordered and 4 | //! drop-tolerant packet streaming interface for real-time applications (e.g. games). It manages 5 | //! connection state, packet sequencing, packet fragmentation, reliable delivery, and congestion 6 | //! control to create a simple and robust solution for low-latency internet communication. 7 | //! 8 | //! # Hosting a Server 9 | //! 10 | //! A `uflow` server is created by calling [`Server::bind[...]()`](server::Server::bind), which 11 | //! opens a UDP socket bound to the specified address, and returns a corresponding `Server` object. 12 | //! The number of active connections will be restricted to the configured limit, and each incoming 13 | //! connection will be initialized using the given endpoint configuration (see: 14 | //! [`server::Config`]). 15 | //! 16 | //! ``` 17 | //! let server_address = "127.0.0.1:8888"; 18 | //! let config = uflow::server::Config { 19 | //! max_active_connections: 8, 20 | //! .. Default::default() 21 | //! }; 22 | //! 23 | //! // Create a server object 24 | //! let mut server = uflow::server::Server::bind(server_address, config) 25 | //! .expect("Failed to bind/configure socket"); 26 | //! ``` 27 | //! 28 | //! As a non-blocking interface, a server object depends on periodic calls to 29 | //! [`Server::step()`](server::Server::step) to process inbound traffic and update connection 30 | //! states. To signal pending events to the application, `step()` returns an iterator to a list of 31 | //! [`server::Event`] objects which contain information specific to each event type. 32 | //! 33 | //! Once a client handshake has been completed, a [`RemoteClient`](server::RemoteClient) object 34 | //! will be created to represent the new connection. These objects may be obtained by calling 35 | //! [`Server::client()`](server::Server::client) with the appropriate address. Because 36 | //! `RemoteClient` does not store user data, it is expected that the application will store 37 | //! any necessary per-client data in a separate data structure. 38 | //! 39 | //! A `RemoteClient` functions as a handle for a given connection, and allows the server 40 | //! application to send packets and query various connection details. However, no packets will be 41 | //! placed on the network, and no received packets will be processed until the next call to 42 | //! `step()`. The application may call [`Server::flush()`](server::Server::flush) to send outbound 43 | //! data immediately. 44 | //! 45 | //! A basic server loop that extends the above example is shown below: 46 | //! 47 | //! ``` 48 | //! # let server_address = "127.0.0.1:8888"; 49 | //! # let config = Default::default(); 50 | //! # let mut server = uflow::server::Server::bind(server_address, config).unwrap(); 51 | //! loop { 52 | //! // Process inbound UDP frames and handle events 53 | //! for event in server.step() { 54 | //! match event { 55 | //! uflow::server::Event::Connect(client_address) => { 56 | //! // TODO: Handle client connection 57 | //! } 58 | //! uflow::server::Event::Disconnect(client_address) => { 59 | //! // TODO: Handle client disconnection 60 | //! } 61 | //! uflow::server::Event::Error(client_address, error) => { 62 | //! // TODO: Handle connection error 63 | //! } 64 | //! uflow::server::Event::Receive(client_address, packet_data) => { 65 | //! // Echo the packet on channel 0 66 | //! let mut client = server.client(&client_address).unwrap().borrow_mut(); 67 | //! client.send(packet_data, 0, uflow::SendMode::Unreliable); 68 | //! } 69 | //! } 70 | //! } 71 | //! 72 | //! // Send data, update server application state 73 | //! // ... 74 | //! 75 | //! // Flush outbound data 76 | //! server.flush(); 77 | //! 78 | //! // Sleep for 30ms (≈33 updates/second) 79 | //! std::thread::sleep(std::time::Duration::from_millis(30)); 80 | //! # break; 81 | //! } 82 | //! ``` 83 | //! 84 | //! See the `echo_server` example for a complete server implementation. 85 | //! 86 | //! # Connecting to a Server 87 | //! 88 | //! A `uflow` client is created by calling [`Client::connect()`](client::Client::connect), which 89 | //! opens a non-blocking UDP socket and returns a corresponding [`Client`](client::Client) object. 90 | //! A connection will be initiated immediately using the provided destination address and 91 | //! configuration (see [`client::Config`]). 92 | //! 93 | //! ``` 94 | //! let server_address = "127.0.0.1:8888"; 95 | //! let config = Default::default(); 96 | //! 97 | //! // Create a client object 98 | //! let mut client = uflow::client::Client::connect(server_address, config) 99 | //! .expect("Invalid address"); 100 | //! ``` 101 | //! 102 | //! Like a server, a client depends on periodic calls to [`Client::step()`](client::Client::step) 103 | //! in order to process inbound traffic and update its connection state. A basic client loop which 104 | //! extends the above example is shown below: 105 | //! 106 | //! ``` 107 | //! # let server_address = "127.0.0.1:8888"; 108 | //! # let config = Default::default(); 109 | //! # let mut client = uflow::client::Client::connect(server_address, config).unwrap(); 110 | //! loop { 111 | //! // Process inbound UDP frames 112 | //! for event in client.step() { 113 | //! match event { 114 | //! uflow::client::Event::Connect => { 115 | //! // TODO: Handle connection 116 | //! } 117 | //! uflow::client::Event::Disconnect => { 118 | //! // TODO: Handle disconnection 119 | //! } 120 | //! uflow::client::Event::Error(error) => { 121 | //! // TODO: Handle connection error 122 | //! } 123 | //! uflow::client::Event::Receive(packet_data) => { 124 | //! // TODO: Handle received packets 125 | //! } 126 | //! } 127 | //! } 128 | //! 129 | //! // Send data, update client application state 130 | //! // ... 131 | //! 132 | //! // Flush outbound data 133 | //! client.flush(); 134 | //! 135 | //! // Sleep for 30ms (≈33 updates/second) 136 | //! std::thread::sleep(std::time::Duration::from_millis(30)); 137 | //! # break; 138 | //! } 139 | //! ``` 140 | //! 141 | //! See the `echo_client` example for a complete client implementation. 142 | //! 143 | //! # Sending Packets 144 | //! 145 | //! Packets are sent to a remote host by calling [`client::Client::send()`] or 146 | //! [`server::RemoteClient::send()`], which additionally requires a channel ID and a packet send 147 | //! mode. Any packets that are sent prior to establishing a connection will be sent once the 148 | //! connection succeeds. 149 | //! 150 | //! ``` 151 | //! let server_address = "127.0.0.1:8888"; 152 | //! let config = Default::default(); 153 | //! let mut client = uflow::client::Client::connect(server_address, config).unwrap(); 154 | //! 155 | //! let packet_data = "Hello world!".as_bytes(); 156 | //! let channel_id = 0; 157 | //! let send_mode = uflow::SendMode::Reliable; 158 | //! 159 | //! client.send(packet_data.into(), channel_id, send_mode); 160 | //! ``` 161 | //! 162 | //! Additional details relating to how packets are sent and received by `uflow` are described in 163 | //! the following subsections. 164 | //! 165 | //! ##### Packet Fragmentation and Aggregation 166 | //! 167 | //! Small packets are aggregated into larger UDP frames, and large packets are divided into 168 | //! fragments such that no frame exceeds the internet MTU (1500 bytes). Each fragment is 169 | //! transferred with the same send mode as its containing packet—that is, fragments will be resent 170 | //! if and only if the containing packet is marked with [`SendMode::Persistent`] or 171 | //! [`SendMode::Reliable`]. A packet is considered received once all of its constituent fragments 172 | //! have been received. 173 | //! 174 | //! ##### Channels 175 | //! 176 | //! Each connection contains 64 virtual channels that are used to ensure relative packet ordering: 177 | //! packets that are received on a given channel will be delivered to the receiving application in 178 | //! the order they were sent. Packets which have not yet been received may be skipped, depending on 179 | //! the send mode of the particular packet, and whether or not any subsequent packets have been 180 | //! received. 181 | //! 182 | //! Because packets that are sent using [`SendMode::Reliable`] may not be skipped, and because all 183 | //! packets on a given channel must be delivered in-order, the receiving application will not see a 184 | //! given received packet until all previous reliable packets on the same channel have also been 185 | //! received. This means that if a reliable packet is dropped, that channel will effectively stall 186 | //! for its arrival, but packets received on other channels may still be delivered in the meantime. 187 | //! 188 | //! Thus, by carefully choosing the send mode and channel of outgoing packets, the latency effects 189 | //! of intermittent network losses can be mitigated. Because `uflow` does not store packets by 190 | //! channel, and otherwise never iterates over the space of channel IDs, there is no penalty to 191 | //! using a large number of channels. 192 | //! 193 | //! ##### Packet Buffering 194 | //! 195 | //! All packets are sent subject to adaptive rate control, a maximum transfer window, and a memory 196 | //! limit set by the receiving host. If any of these mechanisms prevent a packet from being sent, 197 | //! the packet will remain in a queue at the sender. Thus, a sender can expect that packets will 198 | //! begin to accumulate in its queue if the connection bandwidth is low, or if the receiver is not 199 | //! processing packets quickly enough. 200 | //! 201 | //! The total size of all packets awaiting delivery can be obtained by calling 202 | //! [`Client::send_buffer_size()`](client::Client::send_buffer_size) or 203 | //! [`RemoteClient::send_buffer_size()`](server::RemoteClient::send_buffer_size), and if desired, 204 | //! an application can use this value to terminate excessively delayed connections. In addition, 205 | //! the application may send packets using [`SendMode::TimeSensitive`] to drop packets at the 206 | //! sender if they could not be sent immediately (i.e. during the next call to `step()`). In the 207 | //! event that the total available bandwidth is limited, this prevents outdated packets from using 208 | //! any unnecessary bandwidth, and prioritizes sending newer packets in the send queue. 209 | //! 210 | //! # Receiving Packets (and Other Events) 211 | //! 212 | //! Each time `step()` is called on a `Client` or `Server` object, connection events are returned 213 | //! via iterator. Because servers may have multiple connections, server events each contain an 214 | //! associated client address, whereas client events do not. See [`client::Event`] and 215 | //! [`server::Event`] for more details. 216 | //! 217 | //! For client and server, the overall connection-event behavior is as follows. A `Connect` event 218 | //! will be generated when a connection is first established. If either end of the connection 219 | //! explicitly disconnects, a `Disconnect` event will be generated. Once a packet has been received 220 | //! (and that packet is not waiting for any previous packets), a `Receive` event will be generated. 221 | //! If an error is encountered, or the connection times out at any point, an `Error` event will be 222 | //! generated. No further events are generated after a `Disconnect` or an `Error` event. 223 | //! 224 | //! ##### Maximum Receive Allocation 225 | //! 226 | //! If a sender is sending a continuous stream of packets, but `step()` is not called on the 227 | //! receiver for whatever reason, the number of packets in the receiver's receive buffer will 228 | //! increase until its [maximum receive allocation](EndpointConfig#structfield.max_receive_alloc) 229 | //! has been reached. At that point, any new packets will be silently ignored. 230 | //! 231 | //! *Note*: This feature is intended to prevent memory allocation attacks. A well-behaved sender 232 | //! will ensure that it does not send new packets which would exceed the receiver's memory limit, 233 | //! and the stall will back-propagate accordingly. 234 | //! 235 | //! ##### Optimal Acknowledgements 236 | //! 237 | //! If desired, an application may call `flush()` on the associated client or server object 238 | //! immediately after all events from `step()` have been handled. By doing so, information relating 239 | //! to which packets have been delivered (and how much buffer space is available) will be relayed 240 | //! back to the sender as soon as possible. 241 | //! 242 | //! # Disconnecting 243 | //! 244 | //! A connection is explicitly closed by calling `disconnect()` or `disconnect_now()` on the 245 | //! corresponding [`Client`](client::Client) or [`RemoteClient`](server::RemoteClient) object; 246 | //! `disconnect()` will make sure to send all pending outbound packets prior to disconnecting, 247 | //! whereas `disconnect_now()` will initiate the disconnection process on the next call to 248 | //! `step()`. In both cases the application must continue to call `step()` to ensure that the 249 | //! disconnection takes place. 250 | //! 251 | //! ``` 252 | //! # let server_address = "127.0.0.1:8888"; 253 | //! # let config = Default::default(); 254 | //! # let mut client = uflow::client::Client::connect(server_address, config).unwrap(); 255 | //! client.disconnect(); 256 | //! 257 | //! // ... calls to client.step() continue 258 | //! ``` 259 | //! 260 | //! Servers may also call [`Server::drop()`](server::Server::drop), which sends no further packets 261 | //! and forgets the connection immediately. This will generate a timeout error on the client. 262 | 263 | mod half_connection; 264 | mod frame; 265 | mod packet_id; 266 | mod udp_frame_sink; 267 | 268 | /// Server-related connection objects and parameters. 269 | pub mod server; 270 | 271 | /// Client-related connection objects and parameters. 272 | pub mod client; 273 | 274 | /// The current protocol version ID. 275 | pub const PROTOCOL_VERSION: u8 = 3; 276 | 277 | /// The maximum number of channels which may be used on a given connection. 278 | pub const CHANNEL_COUNT: usize = frame::serial::MAX_CHANNELS; 279 | 280 | /// The maximum size of the frame transfer window, in sequence IDs. 281 | pub const MAX_FRAME_WINDOW_SIZE: u32 = 4096; 282 | 283 | /// The maximum size of the packet transfer window, in sequence IDs. 284 | pub const MAX_PACKET_WINDOW_SIZE: u32 = 4096; 285 | 286 | /// The common maximum transfer unit (MTU) of the internet. 287 | pub const INTERNET_MTU: usize = 1500; 288 | 289 | /// The number of bytes in a UDP header (including the IP header). 290 | pub const UDP_HEADER_SIZE: usize = 28; 291 | 292 | /// The maximum size of a `uflow` frame in bytes, according to the internet MTU and UDP header 293 | /// size. 294 | pub const MAX_FRAME_SIZE: usize = INTERNET_MTU - UDP_HEADER_SIZE; 295 | 296 | /// The maximum size of a packet fragment in bytes, according to frame serialization overhead. 297 | pub const MAX_FRAGMENT_SIZE: usize = MAX_FRAME_SIZE - frame::serial::DATA_FRAME_OVERHEAD - frame::serial::MAX_DATAGRAM_OVERHEAD; 298 | 299 | /// The absolute maximum size of a packet, in bytes. 300 | pub const MAX_PACKET_SIZE: usize = MAX_FRAGMENT_SIZE * frame::serial::MAX_FRAGMENTS; 301 | 302 | /// A mode by which a packet is sent. 303 | #[derive(Clone,Copy,Debug,PartialEq)] 304 | pub enum SendMode { 305 | /// This packet will be sent at most once. If this packet cannot be sent immediately (i.e. 306 | /// prior to the next call to [`Client::step`](client::Client::step) or 307 | /// [`Server::step`](server::Server::step)), it will be discarded by the sender. If this packet 308 | /// has not been received, but a subsequent packet has been received on the same channel, the 309 | /// receiver may skip this packet. 310 | TimeSensitive, 311 | /// This packet will be sent exactly once. If this packet has not been received, but a 312 | /// subsequent packet has been received on the same channel, the receiver may skip this packet. 313 | Unreliable, 314 | /// This packet will be sent and resent until acknowledged by the receiver. If this packet has 315 | /// not been received, but a subsequent packet has been received on the same channel, the 316 | /// receiver may skip this packet. 317 | /// 318 | /// *Note:* The packet will cease to be resent once the sender has detected a skip. 319 | Persistent, 320 | /// This packet will be sent until acknowledged by the receiver. The receiver will not deliver 321 | /// subsequent packets on the same channel until this packet has been delivered. 322 | Reliable, 323 | } 324 | 325 | /// Parameters used to configure either endpoint of a `uflow` connection. 326 | #[derive(Clone,Debug)] 327 | pub struct EndpointConfig { 328 | /// The maximum send rate, in bytes per second. The endpoint will ensure that its outgoing 329 | /// bandwidth does not exceed this value. 330 | /// 331 | /// Must be greater than 0. Values larger than 2^32 will be truncated. 332 | pub max_send_rate: usize, 333 | 334 | /// The maximum acceptable receive rate, in bytes per second. The opposing endpoint will ensure 335 | /// that its outgoing bandwidth does not exceed this value. 336 | /// 337 | /// Must be greater than 0. Values larger than 2^32 will be truncated. 338 | pub max_receive_rate: usize, 339 | 340 | /// The maximum size of a sent packet, in bytes. The endpoint will ensure that it does not send 341 | /// packets with a size exceeding this value. 342 | /// 343 | /// Must be greater than 0, and less than or equal to [`MAX_PACKET_SIZE`]. 344 | pub max_packet_size: usize, 345 | 346 | /// The maximum allocation size of the endpoint's receive buffer, in bytes. The endpoint will 347 | /// ensure that the total amount of memory allocated to receive packet data doesn't exceed this 348 | /// value, rounded up to the nearest multiple of 349 | /// [`MAX_FRAGMENT_SIZE`](crate::MAX_FRAGMENT_SIZE). 350 | /// 351 | /// Must be greater than 0. 352 | /// 353 | /// *Note*: The maximum allocation size necessarily constrains the maximum receivable packet 354 | /// size. A connection attempt will fail if the `max_packet_size` of the opposing endpoint 355 | /// exceeds this value. 356 | pub max_receive_alloc: usize, 357 | 358 | // TODO (0.8.0): Combine keepalive and keepalive_interval_ms into Option 359 | 360 | /// Whether the endpoint should automatically send keepalive frames if no data has been sent 361 | /// for one keepalive interval (see `keepalive_interval_ms`). If set to false, one endpoint 362 | /// must continually send data to avoid causing a timeout on the opposite host. 363 | pub keepalive: bool, 364 | 365 | /// The interval in milliseconds at which keepalive frames are sent. 366 | /// 367 | /// *Note*: Keepalive frames will not be sent faster than the connection RTO (computed 368 | /// according to TFRC) or 2s, whichever is longer. 369 | pub keepalive_interval_ms: u64, 370 | 371 | /// Time in milliseconds after which an active connection will terminate if no frames have been 372 | /// received from the remote endpoint. 373 | pub active_timeout_ms: u64, 374 | } 375 | 376 | impl Default for EndpointConfig { 377 | /// Creates an endpoint configuration with the following parameters: 378 | /// * Maximum outgoing bandwidth: 2MB/s 379 | /// * Maximum incoming bandwidth: 2MB/s 380 | /// * Maximum packet size: 1MB 381 | /// * Maximum packet receive allocation: 1MB 382 | /// * Keepalive: true 383 | /// * Keepalive interval: 5s 384 | /// * Active timeout: 20s 385 | fn default() -> Self { 386 | Self { 387 | max_send_rate: 2_000_000, 388 | max_receive_rate: 2_000_000, 389 | 390 | max_packet_size: 1_000_000, 391 | max_receive_alloc: 1_000_000, 392 | 393 | keepalive: true, 394 | keepalive_interval_ms: 5000, 395 | 396 | active_timeout_ms: 20000 397 | } 398 | } 399 | } 400 | 401 | impl EndpointConfig { 402 | /// Returns `true` if each parameter has a valid value. 403 | pub fn is_valid(&self) -> bool { 404 | self.max_send_rate > 0 && 405 | self.max_receive_rate > 0 && 406 | self.max_packet_size > 0 && 407 | self.max_packet_size <= MAX_PACKET_SIZE && 408 | self.max_receive_alloc > 0 409 | } 410 | } 411 | -------------------------------------------------------------------------------- /src/packet_id.rs: -------------------------------------------------------------------------------- 1 | 2 | pub type Type = u32; 3 | 4 | pub const MASK: Type = 0xFFFFF; 5 | pub const SPAN: Type = 0x100000; 6 | 7 | pub fn add(a: Type, b: Type) -> Type { 8 | a.wrapping_add(b) & MASK 9 | } 10 | 11 | pub fn sub(a: Type, b: Type) -> Type { 12 | a.wrapping_sub(b) & MASK 13 | } 14 | 15 | pub fn is_valid(a: Type) -> bool { 16 | a & MASK == a 17 | } 18 | 19 | -------------------------------------------------------------------------------- /src/server/event_queue.rs: -------------------------------------------------------------------------------- 1 | use std::cell::RefCell; 2 | use std::cmp::Ordering; 3 | use std::collections::BinaryHeap; 4 | use std::rc::Rc; 5 | 6 | use super::remote_client::RemoteClient; 7 | 8 | #[derive(Copy, Clone, PartialEq)] 9 | pub enum EventType { 10 | ResendHandshakeSynAck, 11 | ResendDisconnect, 12 | ClosedTimeout, 13 | } 14 | 15 | pub struct Event { 16 | pub client: Rc>, 17 | pub kind: EventType, 18 | pub time: u64, 19 | pub count: u8, 20 | } 21 | 22 | impl Event { 23 | pub fn new(client: Rc>, kind: EventType, time: u64, count: u8) -> Self { 24 | Self { 25 | client, 26 | kind, 27 | time, 28 | count, 29 | } 30 | } 31 | } 32 | 33 | impl PartialOrd for Event { 34 | fn partial_cmp(&self, other: &Self) -> Option { 35 | Some(self.time.cmp(&other.time).reverse()) 36 | } 37 | } 38 | 39 | impl PartialEq for Event { 40 | fn eq(&self, other: &Self) -> bool { 41 | self.time == other.time 42 | } 43 | } 44 | 45 | impl Eq for Event {} 46 | 47 | impl Ord for Event { 48 | fn cmp(&self, other: &Self) -> Ordering { 49 | self.time.cmp(&other.time).reverse() 50 | } 51 | } 52 | 53 | pub type EventQueue = BinaryHeap; 54 | -------------------------------------------------------------------------------- /src/server/remote_client.rs: -------------------------------------------------------------------------------- 1 | use std::net; 2 | 3 | use crate::half_connection::HalfConnection; 4 | use crate::SendMode; 5 | use crate::CHANNEL_COUNT; 6 | 7 | pub (super) enum DisconnectMode { 8 | Now, 9 | Flush, 10 | } 11 | 12 | pub (super) struct PendingState { 13 | pub local_nonce: u32, 14 | pub remote_nonce: u32, 15 | pub remote_max_receive_rate: u32, 16 | pub remote_max_receive_alloc: u32, 17 | pub reply_bytes: Box<[u8]>, 18 | } 19 | 20 | pub (super) struct ActiveState { 21 | pub half_connection: HalfConnection, 22 | pub timeout_time_ms: u64, 23 | pub disconnect_signal: Option, 24 | } 25 | 26 | pub (super) enum State { 27 | Pending(PendingState), 28 | Active(ActiveState), 29 | Closing, 30 | Closed, 31 | Fin, 32 | } 33 | 34 | /// Used by a [`Server`](super::Server) object to represent a connected client. 35 | pub struct RemoteClient { 36 | pub (super) address: net::SocketAddr, 37 | pub (super) state: State, 38 | pub (super) max_packet_size: usize, 39 | } 40 | 41 | impl RemoteClient { 42 | /// Returns `true` if the connection is active, that is, a connection handshake has been 43 | /// completed and the remote host has not yet timed out or disconnected. Returns `false` 44 | /// otherwise. 45 | pub fn is_active(&self) -> bool { 46 | match self.state { 47 | State::Active(_) => true, 48 | _ => false, 49 | } 50 | } 51 | 52 | /// Enqueues a packet for delivery to this client. The packet will be sent on the given channel 53 | /// according to the specified mode. 54 | /// 55 | /// If the connection is not active, the packet will be silently discarded. 56 | /// 57 | /// # Error Handling 58 | /// 59 | /// This function will panic if `channel_id` does not refer to a valid channel (i.e. if 60 | /// `channel_id >= CHANNEL_COUNT`), or if `data.len()` exceeds the [maximum packet 61 | /// size](crate::EndpointConfig#structfield.max_packet_size). 62 | pub fn send(&mut self, data: Box<[u8]>, channel_id: usize, mode: SendMode) { 63 | assert!(data.len() <= self.max_packet_size, 64 | "send failed: packet of size {} exceeds configured maximum of {}", 65 | data.len(), 66 | self.max_packet_size); 67 | 68 | assert!(channel_id < CHANNEL_COUNT, 69 | "send failed: channel ID {} is invalid", 70 | channel_id); 71 | 72 | match self.state { 73 | State::Active(ref mut state) => { 74 | state.half_connection.send(data, channel_id as u8, mode); 75 | } 76 | _ => (), 77 | } 78 | } 79 | 80 | /// Gracefully terminates this connection once all packets have been sent. 81 | /// 82 | /// If any outbound packets are pending, they will be sent prior to disconnecting. Reliable 83 | /// packets can be assumed to have been delievered, so long as the client does not also 84 | /// disconnect in the meantime. The connection will remain active until the next call to 85 | /// [`Server::step()`](super::Server::step) with no pending outbound packets. 86 | pub fn disconnect(&mut self) { 87 | match self.state { 88 | State::Active(ref mut state) => { 89 | state.disconnect_signal = Some(DisconnectMode::Flush); 90 | } 91 | _ => (), 92 | } 93 | } 94 | 95 | /// Gracefully terminates this connection as soon as possible. 96 | /// 97 | /// If any outbound packets are pending, they may be flushed prior to disconnecting, but no 98 | /// packets are guaranteed to be received by the client. The connection will remain active 99 | /// until the next call to [`Server::step()`](super::Server::step). 100 | pub fn disconnect_now(&mut self) { 101 | match self.state { 102 | State::Active(ref mut state) => { 103 | state.disconnect_signal = Some(DisconnectMode::Now); 104 | } 105 | _ => (), 106 | } 107 | } 108 | 109 | /// Returns the current estimate of the round-trip time (RTT), in seconds. 110 | /// 111 | /// If the RTT has not yet been computed, or if the connection is not active, `None` is 112 | /// returned instead. 113 | pub fn rtt_s(&self) -> Option { 114 | match self.state { 115 | State::Active(ref state) => state.half_connection.rtt_s(), 116 | _ => None, 117 | } 118 | } 119 | 120 | /// Returns the total size of the send buffer (i.e. those packets which have not yet been 121 | /// acknowledged), in bytes. 122 | /// 123 | /// This figure represents the amount of memory allocated for outgoing packets. Packets which 124 | /// are marked [`TimeSensitive`](SendMode::TimeSensitive) are included in this total, even if 125 | /// they would not be sent. 126 | pub fn send_buffer_size(&self) -> usize { 127 | match self.state { 128 | State::Active(ref state) => state.half_connection.send_buffer_size(), 129 | _ => 0, 130 | } 131 | } 132 | } 133 | -------------------------------------------------------------------------------- /src/udp_frame_sink.rs: -------------------------------------------------------------------------------- 1 | 2 | use crate::half_connection; 3 | 4 | use std::net; 5 | 6 | // TODO: A Result stored here could be used to forward errors to 7 | // client/server step/flush after the FrameSink has been used. 8 | pub struct UdpFrameSink<'a> { 9 | socket: &'a net::UdpSocket, 10 | address: net::SocketAddr, 11 | } 12 | 13 | impl<'a> UdpFrameSink<'a> { 14 | pub fn new(socket: &'a net::UdpSocket, address: net::SocketAddr) -> Self { 15 | Self { 16 | socket: socket, 17 | address: address, 18 | } 19 | } 20 | } 21 | 22 | impl<'a> half_connection::FrameSink for UdpFrameSink<'a> { 23 | fn send(&mut self, frame_data: &[u8]) { 24 | //use crate::frame; 25 | //use frame::serial::Serialize; 26 | //let time_millis = std::time::SystemTime::now().duration_since(std::time::UNIX_EPOCH).unwrap().as_millis(); 27 | //println!("{} {:?}", time_millis, frame::Frame::read(&frame_data)); 28 | let _ = self.socket.send_to(frame_data, self.address); 29 | } 30 | } 31 | 32 | -------------------------------------------------------------------------------- /tests/disconnect.rs: -------------------------------------------------------------------------------- 1 | use std::thread; 2 | use std::time; 3 | 4 | static DURATION: time::Duration = time::Duration::from_secs(1); 5 | static STEP_INTERVAL: time::Duration = time::Duration::from_millis(100); 6 | 7 | #[test] 8 | fn client_disconnect_now() { 9 | let server_thread = thread::spawn(|| { 10 | let cfg = Default::default(); 11 | let mut server = uflow::server::Server::bind("127.0.0.1:9999", cfg).unwrap(); 12 | 13 | // We expect to see exactly one Connect and one Disconnect within DURATION 14 | let mut connect_seen = false; 15 | let mut disconnect_seen = false; 16 | let end_time = time::Instant::now() + DURATION; 17 | 18 | while time::Instant::now() < end_time { 19 | for event in server.step() { 20 | match event { 21 | uflow::server::Event::Connect(_) => { 22 | assert_eq!(connect_seen, false); 23 | assert_eq!(disconnect_seen, false); 24 | connect_seen = true; 25 | } 26 | uflow::server::Event::Disconnect(_) => { 27 | assert_eq!(connect_seen, true); 28 | assert_eq!(disconnect_seen, false); 29 | disconnect_seen = true; 30 | } 31 | other => panic!("unexpected event: {:?}", other), 32 | } 33 | } 34 | 35 | thread::sleep(STEP_INTERVAL); 36 | } 37 | 38 | if !connect_seen { 39 | panic!("[server] no connect event received"); 40 | } 41 | 42 | if !disconnect_seen { 43 | panic!("[server] no disconnect event received"); 44 | } 45 | }); 46 | 47 | thread::sleep(STEP_INTERVAL/2); 48 | 49 | let client_thread = thread::spawn(|| { 50 | let cfg = Default::default(); 51 | let mut client = uflow::client::Client::connect("127.0.0.1:9999", cfg).unwrap(); 52 | 53 | // We expect to see exactly one Connect and one Disconnect within DURATION 54 | let mut connect_seen = false; 55 | let mut disconnect_seen = false; 56 | let end_time = time::Instant::now() + DURATION; 57 | 58 | while time::Instant::now() < end_time { 59 | for event in client.step() { 60 | match event { 61 | uflow::client::Event::Connect => { 62 | assert_eq!(connect_seen, false); 63 | assert_eq!(disconnect_seen, false); 64 | connect_seen = true; 65 | 66 | client.disconnect_now(); 67 | } 68 | uflow::client::Event::Disconnect => { 69 | assert_eq!(connect_seen, true); 70 | assert_eq!(disconnect_seen, false); 71 | disconnect_seen = true; 72 | } 73 | other => panic!("unexpected event: {:?}", other), 74 | } 75 | } 76 | 77 | thread::sleep(STEP_INTERVAL); 78 | } 79 | 80 | if !connect_seen { 81 | panic!("[client] no connect event received"); 82 | } 83 | 84 | if !disconnect_seen { 85 | panic!("[client] no disconnect event received"); 86 | } 87 | }); 88 | 89 | client_thread.join().unwrap(); 90 | server_thread.join().unwrap(); 91 | } 92 | 93 | #[test] 94 | fn server_disconnect_now() { 95 | let server_thread = thread::spawn(|| { 96 | let cfg = Default::default(); 97 | let mut server = uflow::server::Server::bind("127.0.0.1:8888", cfg).unwrap(); 98 | 99 | // We expect to see exactly one Connect and one Disconnect within DURATION 100 | let mut connect_seen = false; 101 | let mut disconnect_seen = false; 102 | let end_time = time::Instant::now() + DURATION; 103 | 104 | while time::Instant::now() < end_time { 105 | for event in server.step() { 106 | match event { 107 | uflow::server::Event::Connect(peer_address) => { 108 | assert_eq!(connect_seen, false); 109 | assert_eq!(disconnect_seen, false); 110 | connect_seen = true; 111 | 112 | server.client(&peer_address).unwrap().borrow_mut().disconnect_now(); 113 | } 114 | uflow::server::Event::Disconnect(_) => { 115 | assert_eq!(connect_seen, true); 116 | assert_eq!(disconnect_seen, false); 117 | disconnect_seen = true; 118 | } 119 | other => panic!("unexpected event: {:?}", other), 120 | } 121 | } 122 | 123 | thread::sleep(STEP_INTERVAL); 124 | } 125 | 126 | if !connect_seen { 127 | panic!("[server] no connect event received"); 128 | } 129 | 130 | if !disconnect_seen { 131 | panic!("[server] no disconnect event received"); 132 | } 133 | }); 134 | 135 | thread::sleep(STEP_INTERVAL/2); 136 | 137 | let client_thread = thread::spawn(|| { 138 | let cfg = Default::default(); 139 | let mut client = uflow::client::Client::connect("127.0.0.1:8888", cfg).unwrap(); 140 | 141 | // We expect to see exactly one Connect and one Disconnect within DURATION 142 | let mut connect_seen = false; 143 | let mut disconnect_seen = false; 144 | let end_time = time::Instant::now() + DURATION; 145 | 146 | while time::Instant::now() < end_time { 147 | for event in client.step() { 148 | match event { 149 | uflow::client::Event::Connect => { 150 | assert_eq!(connect_seen, false); 151 | assert_eq!(disconnect_seen, false); 152 | connect_seen = true; 153 | } 154 | uflow::client::Event::Disconnect => { 155 | assert_eq!(connect_seen, true); 156 | assert_eq!(disconnect_seen, false); 157 | disconnect_seen = true; 158 | } 159 | other => panic!("unexpected event: {:?}", other), 160 | } 161 | } 162 | 163 | thread::sleep(STEP_INTERVAL); 164 | } 165 | 166 | if !connect_seen { 167 | panic!("[client] no connect event received"); 168 | } 169 | 170 | if !disconnect_seen { 171 | panic!("[client] no disconnect event received"); 172 | } 173 | }); 174 | 175 | client_thread.join().unwrap(); 176 | server_thread.join().unwrap(); 177 | } 178 | 179 | #[test] 180 | fn client_disconnect_flush() { 181 | let server_thread = thread::spawn(|| { 182 | let cfg = Default::default(); 183 | let mut server = uflow::server::Server::bind("127.0.0.1:7777", cfg).unwrap(); 184 | 185 | // We expect to see exactly one Connect, one Receive, and one Disconnect within DURATION 186 | let mut connect_seen = false; 187 | let mut receive_seen = false; 188 | let mut disconnect_seen = false; 189 | let end_time = time::Instant::now() + DURATION; 190 | 191 | while time::Instant::now() < end_time { 192 | for event in server.step() { 193 | match event { 194 | uflow::server::Event::Connect(_) => { 195 | assert_eq!(connect_seen, false); 196 | assert_eq!(receive_seen, false); 197 | assert_eq!(disconnect_seen, false); 198 | connect_seen = true; 199 | } 200 | uflow::server::Event::Receive(_, data) => { 201 | assert_eq!(connect_seen, true); 202 | assert_eq!(receive_seen, false); 203 | assert_eq!(disconnect_seen, false); 204 | receive_seen = true; 205 | 206 | assert_eq!(data, [0, 1, 2, 3].into()); 207 | } 208 | uflow::server::Event::Disconnect(_) => { 209 | assert_eq!(connect_seen, true); 210 | assert_eq!(receive_seen, true); 211 | assert_eq!(disconnect_seen, false); 212 | disconnect_seen = true; 213 | } 214 | other => panic!("unexpected event: {:?}", other), 215 | } 216 | } 217 | 218 | thread::sleep(STEP_INTERVAL); 219 | } 220 | 221 | if !connect_seen { 222 | panic!("[server] no connect event received"); 223 | } 224 | 225 | if !receive_seen { 226 | panic!("[server] no receive event received"); 227 | } 228 | 229 | if !disconnect_seen { 230 | panic!("[server] no disconnect event received"); 231 | } 232 | }); 233 | 234 | thread::sleep(STEP_INTERVAL/2); 235 | 236 | let client_thread = thread::spawn(|| { 237 | let cfg = Default::default(); 238 | let mut client = uflow::client::Client::connect("127.0.0.1:7777", cfg).unwrap(); 239 | 240 | // We expect to see exactly one Connect, and one Disconnect within DURATION 241 | let mut connect_seen = false; 242 | let mut disconnect_seen = false; 243 | let end_time = time::Instant::now() + DURATION; 244 | 245 | while time::Instant::now() < end_time { 246 | for event in client.step() { 247 | match event { 248 | uflow::client::Event::Connect => { 249 | assert_eq!(connect_seen, false); 250 | assert_eq!(disconnect_seen, false); 251 | connect_seen = true; 252 | 253 | client.disconnect(); 254 | client.send([0, 1, 2, 3].into(), 0, uflow::SendMode::Reliable); 255 | } 256 | uflow::client::Event::Disconnect => { 257 | assert_eq!(connect_seen, true); 258 | assert_eq!(disconnect_seen, false); 259 | disconnect_seen = true; 260 | } 261 | other => panic!("unexpected event: {:?}", other), 262 | } 263 | } 264 | 265 | thread::sleep(STEP_INTERVAL); 266 | } 267 | 268 | if !connect_seen { 269 | panic!("[client] no connect event received"); 270 | } 271 | 272 | if !disconnect_seen { 273 | panic!("[client] no disconnect event received"); 274 | } 275 | }); 276 | 277 | client_thread.join().unwrap(); 278 | server_thread.join().unwrap(); 279 | } 280 | 281 | #[test] 282 | fn server_disconnect_flush() { 283 | let server_thread = thread::spawn(|| { 284 | let cfg = Default::default(); 285 | let mut server = uflow::server::Server::bind("127.0.0.1:6666", cfg).unwrap(); 286 | 287 | // We expect to see exactly one Connect, one Receive, and one Disconnect within DURATION 288 | let mut connect_seen = false; 289 | let mut disconnect_seen = false; 290 | let end_time = time::Instant::now() + DURATION; 291 | 292 | while time::Instant::now() < end_time { 293 | for event in server.step() { 294 | match event { 295 | uflow::server::Event::Connect(peer_address) => { 296 | assert_eq!(connect_seen, false); 297 | assert_eq!(disconnect_seen, false); 298 | connect_seen = true; 299 | 300 | let mut client = server.client(&peer_address).unwrap().borrow_mut(); 301 | client.disconnect(); 302 | client.send([0, 1, 2, 3].into(), 0, uflow::SendMode::Reliable); 303 | } 304 | uflow::server::Event::Disconnect(_) => { 305 | assert_eq!(connect_seen, true); 306 | assert_eq!(disconnect_seen, false); 307 | disconnect_seen = true; 308 | } 309 | other => panic!("unexpected event: {:?}", other), 310 | } 311 | } 312 | 313 | thread::sleep(STEP_INTERVAL); 314 | } 315 | 316 | if !connect_seen { 317 | panic!("[server] no connect event received"); 318 | } 319 | 320 | if !disconnect_seen { 321 | panic!("[server] no disconnect event received"); 322 | } 323 | }); 324 | 325 | thread::sleep(STEP_INTERVAL/2); 326 | 327 | let client_thread = thread::spawn(|| { 328 | let cfg = Default::default(); 329 | let mut client = uflow::client::Client::connect("127.0.0.1:6666", cfg).unwrap(); 330 | 331 | // We expect to see exactly one Connect, one Receive, and one Disconnect within DURATION 332 | let mut connect_seen = false; 333 | let mut receive_seen = false; 334 | let mut disconnect_seen = false; 335 | let end_time = time::Instant::now() + DURATION; 336 | 337 | while time::Instant::now() < end_time { 338 | for event in client.step() { 339 | match event { 340 | uflow::client::Event::Connect => { 341 | assert_eq!(connect_seen, false); 342 | assert_eq!(receive_seen, false); 343 | assert_eq!(disconnect_seen, false); 344 | connect_seen = true; 345 | } 346 | uflow::client::Event::Receive(data) => { 347 | assert_eq!(connect_seen, true); 348 | assert_eq!(receive_seen, false); 349 | assert_eq!(disconnect_seen, false); 350 | receive_seen = true; 351 | 352 | assert_eq!(data, [0, 1, 2, 3].into()); 353 | } 354 | uflow::client::Event::Disconnect => { 355 | assert_eq!(connect_seen, true); 356 | assert_eq!(receive_seen, true); 357 | assert_eq!(disconnect_seen, false); 358 | disconnect_seen = true; 359 | } 360 | other => panic!("unexpected event: {:?}", other), 361 | } 362 | } 363 | 364 | thread::sleep(STEP_INTERVAL); 365 | } 366 | 367 | if !connect_seen { 368 | panic!("[client] no connect event received"); 369 | } 370 | 371 | if !disconnect_seen { 372 | panic!("[client] no disconnect event received"); 373 | } 374 | }); 375 | 376 | client_thread.join().unwrap(); 377 | server_thread.join().unwrap(); 378 | } 379 | -------------------------------------------------------------------------------- /tests/ideal_transfer.rs: -------------------------------------------------------------------------------- 1 | use std::thread; 2 | use std::time; 3 | 4 | use std::convert::TryInto; 5 | 6 | extern crate md5; 7 | 8 | const STEP_DURATION: time::Duration = time::Duration::from_millis(15); 9 | 10 | const NUM_CHANNELS: usize = 4; 11 | 12 | fn server_thread() -> Vec { 13 | let cfg = Default::default(); 14 | 15 | let mut server = uflow::server::Server::bind("127.0.0.1:8888", cfg).unwrap(); 16 | 17 | let mut all_data: Vec> = vec![Vec::new(); NUM_CHANNELS as usize]; 18 | let mut packet_ids = [0u32; NUM_CHANNELS]; 19 | let mut connect_seen = false; 20 | 21 | 'outer: loop { 22 | for event in server.step() { 23 | match event { 24 | uflow::server::Event::Connect(client_addr) => { 25 | assert_eq!(connect_seen, false); 26 | connect_seen = true; 27 | 28 | println!("[server] client connected from {:?}", client_addr); 29 | } 30 | uflow::server::Event::Receive(_client_addr, data) => { 31 | let channel_id = data[0] as usize; 32 | let packet_id = u32::from_be_bytes(data[1..5].try_into().unwrap()); 33 | 34 | //println!("[server] received data on channel id {}\ndata begins with: {:?}", channel_id, &data[0..4]); 35 | 36 | let ref mut packet_id_expected = packet_ids[channel_id]; 37 | 38 | if packet_id != *packet_id_expected { 39 | panic!("[server] data skipped! received ID: {} expected ID: {}", packet_id, packet_id_expected); 40 | } 41 | 42 | all_data[channel_id].extend_from_slice(&data); 43 | *packet_id_expected += 1; 44 | } 45 | uflow::server::Event::Disconnect(_client_addr) => { 46 | println!("[server] client disconnected"); 47 | break 'outer; 48 | } 49 | other => panic!("[server] unexpected event: {:?}", other), 50 | } 51 | } 52 | 53 | server.flush(); 54 | 55 | thread::sleep(STEP_DURATION); 56 | } 57 | 58 | println!("[server] exiting"); 59 | 60 | return all_data.into_iter().map(|data| md5::compute(data)).collect(); 61 | } 62 | 63 | fn client_thread() -> Vec { 64 | let cfg = Default::default(); 65 | 66 | let mut client = uflow::client::Client::connect("127.0.0.1:8888", cfg).unwrap(); 67 | 68 | let num_steps = 100; 69 | let packets_per_step = 6; 70 | let packet_size = uflow::MAX_FRAGMENT_SIZE; 71 | 72 | let mut all_data: Vec> = vec![Vec::new(); NUM_CHANNELS as usize]; 73 | let mut packet_ids = [0u32; NUM_CHANNELS]; 74 | let mut connect_seen = false; 75 | 76 | for _ in 0..num_steps { 77 | for event in client.step() { 78 | match event { 79 | uflow::client::Event::Connect => { 80 | assert_eq!(connect_seen, false); 81 | connect_seen = true; 82 | 83 | println!("[client] connected to server"); 84 | } 85 | other => panic!("[client] unexpected event: {:?}", other), 86 | } 87 | } 88 | 89 | for _ in 0..packets_per_step { 90 | let channel_id = rand::random::() % NUM_CHANNELS; 91 | let ref mut packet_id = packet_ids[channel_id]; 92 | 93 | let mut data = (0..packet_size).map(|_| rand::random::()).collect::>().into_boxed_slice(); 94 | data[0] = channel_id as u8; 95 | data[1..5].clone_from_slice(&packet_id.to_be_bytes()); 96 | 97 | // Our local loopback connection is assumed to be both ordered and lossless! 98 | let mode = match rand::random::() % 3 { 99 | 0 => uflow::SendMode::Unreliable, 100 | 1 => uflow::SendMode::Persistent, 101 | 2 => uflow::SendMode::Reliable, 102 | _ => panic!("NANI!?"), 103 | }; 104 | 105 | all_data[channel_id].extend_from_slice(&data); 106 | 107 | client.send(data, channel_id, mode); 108 | 109 | println!("[client] sent packet {} on channel {}", packet_id, channel_id); 110 | 111 | *packet_id += 1; 112 | } 113 | 114 | client.flush(); 115 | 116 | thread::sleep(STEP_DURATION); 117 | } 118 | 119 | println!("[client] disconnecting"); 120 | client.disconnect(); 121 | 122 | 'outer: loop { 123 | for event in client.step() { 124 | match event { 125 | uflow::client::Event::Disconnect => { 126 | println!("[client] server disconnected"); 127 | break 'outer; 128 | } 129 | other => panic!("[client] unexpected event: {:?}", other), 130 | } 131 | } 132 | 133 | client.flush(); 134 | 135 | thread::sleep(STEP_DURATION); 136 | } 137 | 138 | println!("[client] Exiting"); 139 | 140 | return all_data.into_iter().map(|data| md5::compute(data)).collect(); 141 | } 142 | 143 | #[test] 144 | fn ideal_transfer() { 145 | let server = thread::spawn(server_thread); 146 | 147 | thread::sleep(time::Duration::from_millis(200)); 148 | 149 | let client = thread::spawn(client_thread); 150 | 151 | let server_md5s = server.join().unwrap(); 152 | let client_md5s = client.join().unwrap(); 153 | 154 | assert_eq!(server_md5s, client_md5s); 155 | } 156 | -------------------------------------------------------------------------------- /tests/reliable_transfer.rs: -------------------------------------------------------------------------------- 1 | use std::net; 2 | use std::thread; 3 | use std::time; 4 | 5 | use std::convert::TryInto; 6 | 7 | extern crate md5; 8 | 9 | const STEP_DURATION: time::Duration = time::Duration::from_millis(15); 10 | 11 | const NUM_CHANNELS: usize = 64; 12 | 13 | struct BandwidthLimiter { 14 | bandwidth: f64, 15 | limit: usize, 16 | last_send_time: time::Instant, 17 | count: usize, 18 | } 19 | 20 | impl BandwidthLimiter { 21 | fn new(bandwidth: f64, limit: usize) -> Self { 22 | Self { 23 | bandwidth: bandwidth, 24 | limit: limit, 25 | last_send_time: time::Instant::now(), 26 | count: 0, 27 | } 28 | } 29 | 30 | fn try_send(&mut self, size: usize) -> bool { 31 | let now = time::Instant::now(); 32 | 33 | let tokens = ((now - self.last_send_time).as_secs_f64() * self.bandwidth) as usize; 34 | 35 | self.last_send_time = now; 36 | 37 | if tokens <= self.count { 38 | self.count -= tokens; 39 | } else { 40 | self.count = 0; 41 | } 42 | 43 | if self.count + size <= self.limit { 44 | self.count += size; 45 | return true; 46 | } else { 47 | return false; 48 | } 49 | } 50 | } 51 | 52 | fn router_thread() { 53 | let socket = net::UdpSocket::bind("127.0.0.1:9001").unwrap(); 54 | 55 | // Throttle data to 300kB/s, with a maximum queue size of 20kB 56 | let server_addr: net::SocketAddr = "127.0.0.1:8888".parse().unwrap(); 57 | let mut server_limiter = BandwidthLimiter::new(300_000.0, 20_000); 58 | 59 | let mut server_bytes_sent = 0; 60 | let mut server_bytes_dropped = 0; 61 | 62 | let mut client_addr: Option = None; 63 | let mut client_limiter = BandwidthLimiter::new(300_000.0, 20_000); 64 | 65 | loop { 66 | let mut recv_buf = [0; 1500]; 67 | 68 | while let Ok((recv_size, src_addr)) = socket.recv_from(&mut recv_buf) { 69 | let udp_frame_size = recv_size + uflow::UDP_HEADER_SIZE; 70 | 71 | if src_addr.port() == 8888 { 72 | if let Some(client_addr) = client_addr { 73 | if client_limiter.try_send(udp_frame_size) { 74 | //println!("client-bound packet sent! size: {}", recv_size); 75 | socket.send_to(&recv_buf[..recv_size], client_addr).unwrap(); 76 | } else { 77 | println!("[router] client-bound packet dropped!"); 78 | } 79 | } 80 | } else { 81 | if client_addr.is_none() { 82 | client_addr = Some(src_addr); 83 | } 84 | if server_limiter.try_send(udp_frame_size) { 85 | socket.send_to(&recv_buf[..recv_size], server_addr).unwrap(); 86 | server_bytes_sent += udp_frame_size; 87 | 88 | /* 89 | println!("sent! dropped/total: {}/{} ({:.3})", 90 | server_bytes_dropped, 91 | server_bytes_sent + server_bytes_dropped, 92 | server_bytes_dropped as f64 / (server_bytes_sent + server_bytes_dropped) as f64); 93 | */ 94 | } else { 95 | //println!("server-bound packet dropped!"); 96 | server_bytes_dropped += udp_frame_size; 97 | 98 | println!("[router] drop! dropped/total: {}/{} ({:.3})", 99 | server_bytes_dropped, 100 | server_bytes_sent + server_bytes_dropped, 101 | server_bytes_dropped as f64 / (server_bytes_sent + server_bytes_dropped) as f64); 102 | } 103 | } 104 | } 105 | } 106 | } 107 | 108 | fn server_thread() -> Vec { 109 | let cfg = Default::default(); 110 | 111 | let mut server = uflow::server::Server::bind("127.0.0.1:8888", cfg).unwrap(); 112 | 113 | let mut all_data: Vec> = vec![Vec::new(); NUM_CHANNELS as usize]; 114 | let mut packet_ids = [0u32; NUM_CHANNELS]; 115 | let mut connect_seen = false; 116 | 117 | 'outer: loop { 118 | for event in server.step() { 119 | match event { 120 | uflow::server::Event::Connect(client_addr) => { 121 | assert_eq!(connect_seen, false); 122 | connect_seen = true; 123 | 124 | println!("[server] client connected from {:?}", client_addr); 125 | } 126 | uflow::server::Event::Receive(_client_addr, data) => { 127 | let channel_id = data[0] as usize; 128 | let packet_id = u32::from_be_bytes(data[1..5].try_into().unwrap()); 129 | 130 | //println!("[server] received data on channel id {}\ndata begins with: {:?}", channel_id, &data[0..4]); 131 | 132 | let ref mut packet_id_expected = packet_ids[channel_id]; 133 | 134 | if packet_id != *packet_id_expected { 135 | panic!("[server] data skipped! received ID: {} expected ID: {}", packet_id, packet_id_expected); 136 | } 137 | 138 | all_data[channel_id].extend_from_slice(&data); 139 | *packet_id_expected += 1; 140 | } 141 | uflow::server::Event::Disconnect(_client_addr) => { 142 | println!("[server] client disconnected"); 143 | break 'outer; 144 | } 145 | other => panic!("[server] unexpected event: {:?}", other), 146 | } 147 | } 148 | 149 | server.flush(); 150 | 151 | thread::sleep(STEP_DURATION); 152 | } 153 | 154 | println!("[server] exiting"); 155 | 156 | return all_data.into_iter().map(|data| md5::compute(data)).collect(); 157 | } 158 | 159 | fn client_thread() -> Vec { 160 | let cfg = Default::default(); 161 | 162 | let mut client = uflow::client::Client::connect("127.0.0.1:8888", cfg).unwrap(); 163 | 164 | // Send data at ~= 6 * 1500 B / 0.015 s = 600kB/s 165 | let num_steps = 200; 166 | let packets_per_step = 6; 167 | let packet_size = uflow::MAX_FRAGMENT_SIZE; 168 | 169 | let mut all_data: Vec> = vec![Vec::new(); NUM_CHANNELS as usize]; 170 | let mut packet_ids = [0u32; NUM_CHANNELS]; 171 | let mut connect_seen = false; 172 | 173 | for _ in 0..num_steps { 174 | for event in client.step() { 175 | match event { 176 | uflow::client::Event::Connect => { 177 | assert_eq!(connect_seen, false); 178 | connect_seen = true; 179 | 180 | println!("[client] connected to server"); 181 | } 182 | other => panic!("[client] unexpected event: {:?}", other), 183 | } 184 | } 185 | 186 | for _ in 0..packets_per_step { 187 | let channel_id = rand::random::() % NUM_CHANNELS; 188 | let ref mut packet_id = packet_ids[channel_id]; 189 | 190 | let mut data = (0..packet_size).map(|_| rand::random::()).collect::>().into_boxed_slice(); 191 | data[0] = channel_id as u8; 192 | data[1..5].clone_from_slice(&packet_id.to_be_bytes()); 193 | 194 | all_data[channel_id].extend_from_slice(&data); 195 | client.send(data, channel_id, uflow::SendMode::Reliable); 196 | 197 | //println!("[client] sent packet {} on channel {}", packet_id, channel_id); 198 | 199 | *packet_id += 1; 200 | } 201 | 202 | client.flush(); 203 | 204 | thread::sleep(STEP_DURATION); 205 | } 206 | 207 | println!("[client] disconnecting"); 208 | client.disconnect(); 209 | 210 | 'outer: loop { 211 | for event in client.step() { 212 | match event { 213 | uflow::client::Event::Disconnect => { 214 | println!("[client] server disconnected"); 215 | break 'outer; 216 | } 217 | other => panic!("[client] unexpected event: {:?}", other), 218 | } 219 | } 220 | 221 | client.flush(); 222 | 223 | thread::sleep(STEP_DURATION); 224 | } 225 | 226 | println!("[client] exiting"); 227 | 228 | return all_data.into_iter().map(|data| md5::compute(data)).collect(); 229 | } 230 | 231 | #[test] 232 | fn reliable_transfer() { 233 | thread::spawn(router_thread); 234 | 235 | thread::sleep(time::Duration::from_millis(200)); 236 | 237 | let server = thread::spawn(server_thread); 238 | 239 | thread::sleep(time::Duration::from_millis(200)); 240 | 241 | let client = thread::spawn(client_thread); 242 | 243 | let server_md5s = server.join().unwrap(); 244 | let client_md5s = client.join().unwrap(); 245 | 246 | assert_eq!(server_md5s, client_md5s); 247 | } 248 | -------------------------------------------------------------------------------- /tests/timeouts.rs: -------------------------------------------------------------------------------- 1 | use std::thread; 2 | use std::time; 3 | 4 | static TIMEOUT_MS: u64 = 3000; 5 | static TEST_TIMEOUT_MS: u64 = 4000; 6 | 7 | #[test] 8 | fn client_handshake_timeout() { 9 | let cfg = Default::default(); 10 | let mut client = uflow::client::Client::connect("127.0.0.1:8888", cfg).unwrap(); 11 | 12 | // We expect to see exactly one Error(ErrorType::Timeout) within 25 seconds 13 | let end_time = time::Instant::now() + time::Duration::from_secs(25); 14 | let mut timeout_seen = false; 15 | 16 | while time::Instant::now() < end_time { 17 | for event in client.step() { 18 | match event { 19 | uflow::client::Event::Error(error) => { 20 | assert_eq!(timeout_seen, false); 21 | assert_eq!(error, uflow::client::ErrorType::Timeout); 22 | timeout_seen = true; 23 | } 24 | other => panic!("unexpected event: {:?}", other), 25 | } 26 | } 27 | 28 | thread::sleep(time::Duration::from_millis(100)); 29 | } 30 | 31 | if !timeout_seen { 32 | panic!("no timeout event received"); 33 | } 34 | } 35 | 36 | #[test] 37 | fn client_active_timeout() { 38 | thread::spawn(|| { 39 | let cfg = Default::default(); 40 | let mut server = uflow::server::Server::bind("127.0.0.1:9999", cfg).unwrap(); 41 | 42 | loop { 43 | for event in server.step() { 44 | match event { 45 | uflow::server::Event::Connect(client_address) => { 46 | server.drop(&client_address); 47 | } 48 | _ => (), 49 | } 50 | } 51 | 52 | thread::sleep(time::Duration::from_millis(100)); 53 | } 54 | }); 55 | 56 | let client = thread::spawn(|| { 57 | let cfg = uflow::client::Config { 58 | endpoint_config: uflow::EndpointConfig { 59 | active_timeout_ms: TIMEOUT_MS, 60 | ..Default::default() 61 | } 62 | }; 63 | 64 | let mut client = uflow::client::Client::connect("127.0.0.1:9999", cfg).unwrap(); 65 | 66 | // We expect to see exactly one Connect and one Error(ErrorType::Timeout) within the test 67 | // timeout 68 | let end_time = time::Instant::now() + time::Duration::from_millis(TEST_TIMEOUT_MS); 69 | let mut connect_seen = false; 70 | let mut timeout_seen = false; 71 | 72 | while time::Instant::now() < end_time { 73 | for event in client.step() { 74 | match event { 75 | uflow::client::Event::Connect => { 76 | assert_eq!(connect_seen, false); 77 | connect_seen = true; 78 | } 79 | uflow::client::Event::Error(error) => { 80 | assert_eq!(timeout_seen, false); 81 | assert_eq!(error, uflow::client::ErrorType::Timeout); 82 | timeout_seen = true; 83 | } 84 | other => panic!("unexpected event: {:?}", other), 85 | } 86 | } 87 | 88 | thread::sleep(time::Duration::from_millis(100)); 89 | } 90 | 91 | if !connect_seen { 92 | panic!("no connect event received"); 93 | } 94 | 95 | if !timeout_seen { 96 | panic!("no timeout event received"); 97 | } 98 | }); 99 | 100 | client.join().unwrap(); 101 | } 102 | 103 | #[test] 104 | fn server_handshake_timeout() { 105 | let server = thread::spawn(|| { 106 | let cfg = uflow::server::Config { 107 | enable_handshake_errors: true, 108 | .. Default::default() 109 | }; 110 | 111 | let mut server = uflow::server::Server::bind("127.0.0.1:7777", cfg).unwrap(); 112 | 113 | // We expect to see exactly one Error(_, ErrorType::Timeout) within 25 seconds 114 | let end_time = time::Instant::now() + time::Duration::from_secs(25); 115 | let mut timeout_seen = false; 116 | 117 | while time::Instant::now() < end_time { 118 | for event in server.step() { 119 | match event { 120 | uflow::server::Event::Error(_, error) => { 121 | assert_eq!(timeout_seen, false); 122 | assert_eq!(error, uflow::server::ErrorType::Timeout); 123 | timeout_seen = true; 124 | } 125 | other => panic!("unexpected event: {:?}", other), 126 | } 127 | } 128 | 129 | thread::sleep(time::Duration::from_millis(100)); 130 | } 131 | 132 | if !timeout_seen { 133 | panic!("no timeout event received"); 134 | } 135 | }); 136 | 137 | thread::sleep(time::Duration::from_secs(1)); 138 | 139 | // Client::connect sends the first SYN, and this is all the server will receive 140 | uflow::client::Client::connect("127.0.0.1:7777", Default::default()).unwrap(); 141 | 142 | server.join().unwrap(); 143 | } 144 | 145 | #[test] 146 | fn server_active_timeout() { 147 | thread::spawn(|| { 148 | let cfg = Default::default(); 149 | let mut client = uflow::client::Client::connect("127.0.0.1:6666", cfg).unwrap(); 150 | 151 | loop { 152 | for event in client.step() { 153 | match event { 154 | uflow::client::Event::Connect => { 155 | return; 156 | } 157 | _ => (), 158 | } 159 | } 160 | 161 | thread::sleep(time::Duration::from_millis(100)); 162 | } 163 | }); 164 | 165 | let server = thread::spawn(|| { 166 | let cfg = uflow::server::Config { 167 | endpoint_config: uflow::EndpointConfig { 168 | active_timeout_ms: TIMEOUT_MS, 169 | ..Default::default() 170 | }, 171 | ..Default::default() 172 | }; 173 | 174 | let mut server = uflow::server::Server::bind("127.0.0.1:6666", cfg).unwrap(); 175 | 176 | // We expect to see exactly one Connect and one Error(ErrorType::Timeout) within the test 177 | // timeout 178 | let end_time = time::Instant::now() + time::Duration::from_millis(TEST_TIMEOUT_MS); 179 | let mut connect_seen = false; 180 | let mut timeout_seen = false; 181 | 182 | while time::Instant::now() < end_time { 183 | for event in server.step() { 184 | match event { 185 | uflow::server::Event::Connect(_) => { 186 | assert_eq!(connect_seen, false); 187 | connect_seen = true; 188 | } 189 | uflow::server::Event::Error(_, error) => { 190 | assert_eq!(timeout_seen, false); 191 | assert_eq!(error, uflow::server::ErrorType::Timeout); 192 | timeout_seen = true; 193 | } 194 | other => panic!("unexpected event: {:?}", other), 195 | } 196 | } 197 | 198 | thread::sleep(time::Duration::from_millis(100)); 199 | } 200 | 201 | if !connect_seen { 202 | panic!("no connect event received"); 203 | } 204 | 205 | if !timeout_seen { 206 | panic!("no timeout event received"); 207 | } 208 | }); 209 | 210 | server.join().unwrap(); 211 | } 212 | -------------------------------------------------------------------------------- /whitepaper.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lowquark/uflow/1e15c4552f591d8c9bcea2036337fd41f74e6f72/whitepaper.pdf --------------------------------------------------------------------------------