├── rustfmt.toml ├── .gitignore ├── src ├── either.rs ├── test_utils │ ├── mod.rs │ ├── fake_socket.rs │ └── network_emulator.rs ├── packet │ ├── header │ │ ├── header_writer.rs │ │ ├── header_reader.rs │ │ ├── arranging_header.rs │ │ ├── fragment_header.rs │ │ ├── acked_packet_header.rs │ │ └── standard_header.rs │ ├── header.rs │ ├── process_result.rs │ ├── enums.rs │ └── outgoing.rs ├── infrastructure.rs ├── net │ ├── events.rs │ ├── constants.rs │ ├── link_conditioner.rs │ ├── connection.rs │ ├── quality.rs │ ├── connection_impl.rs │ └── socket.rs ├── packet.rs ├── sequence_buffer │ ├── congestion_data.rs │ └── reassembly_data.rs ├── net.rs ├── protocol_version.rs ├── lib.rs ├── infrastructure │ ├── congestion.rs │ ├── arranging.rs │ └── fragmenter.rs ├── bin │ ├── cli.yml │ └── laminar-tester.rs ├── throughput.rs ├── config.rs ├── error.rs └── sequence_buffer.rs ├── docs ├── md_book │ ├── book.toml │ └── src │ │ ├── SUMMARY.md │ │ ├── congestion_avoidence │ │ ├── congestion_avoidance.md │ │ └── rtt.md │ │ ├── packet_header.md │ │ ├── reliability │ │ ├── basics.md │ │ ├── ordering.md │ │ └── reliability.md │ │ ├── important.md │ │ ├── intro.md │ │ ├── fragmentation.md │ │ ├── heartbeat.md │ │ └── protocols.md ├── technical │ ├── hearder_design.png │ ├── ordering_sequencing_bpmn.png │ ├── incoming_packet_processing_bpmn.png │ ├── outgoing_packet_processing_bpmn.png │ └── README.md ├── FEATURES.md ├── LICENSE-MIT ├── CHANGELOG.md └── CONTRIBUTING.md ├── codecov.yml ├── tests ├── common │ ├── mod.rs │ ├── client.rs │ └── server.rs ├── Makefile ├── docker-compose.yml ├── Dockerfile ├── fragmentation_packets_test.rs ├── unreliable_packets_test.rs └── basic_socket_test.rs ├── bors.toml ├── .github └── workflows │ └── build_and_test.yml ├── ci └── coverage.sh ├── Cargo.toml ├── examples ├── udp.rs ├── simple_udp.rs └── server_client.rs ├── benches └── packet_processing.rs ├── Jenkinsfile └── README.md /rustfmt.toml: -------------------------------------------------------------------------------- 1 | reorder_imports = true 2 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | target/ 2 | book/ 3 | .idea/ 4 | **/*.rs.bk 5 | Cargo.lock 6 | .DS_Store 7 | .idea 8 | -------------------------------------------------------------------------------- /src/either.rs: -------------------------------------------------------------------------------- 1 | #[derive(Debug)] 2 | pub(crate) enum Either { 3 | Left(L), 4 | Right(R), 5 | } 6 | -------------------------------------------------------------------------------- /docs/md_book/book.toml: -------------------------------------------------------------------------------- 1 | [book] 2 | authors = ["TimonPost"] 3 | multilingual = false 4 | src = "src" 5 | title = "Laminar" 6 | -------------------------------------------------------------------------------- /docs/technical/hearder_design.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Pycckue-Bnepeg/laminar/master/docs/technical/hearder_design.png -------------------------------------------------------------------------------- /codecov.yml: -------------------------------------------------------------------------------- 1 | coverage: 2 | precision: 2 3 | round: down 4 | range: "90...100" 5 | ignore: 6 | - "src/error" 7 | - "Jeninsfile" -------------------------------------------------------------------------------- /docs/technical/ordering_sequencing_bpmn.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Pycckue-Bnepeg/laminar/master/docs/technical/ordering_sequencing_bpmn.png -------------------------------------------------------------------------------- /docs/technical/incoming_packet_processing_bpmn.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Pycckue-Bnepeg/laminar/master/docs/technical/incoming_packet_processing_bpmn.png -------------------------------------------------------------------------------- /docs/technical/outgoing_packet_processing_bpmn.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Pycckue-Bnepeg/laminar/master/docs/technical/outgoing_packet_processing_bpmn.png -------------------------------------------------------------------------------- /src/test_utils/mod.rs: -------------------------------------------------------------------------------- 1 | pub use fake_socket::FakeSocket; 2 | pub use network_emulator::{EmulatedSocket, NetworkEmulator}; 3 | 4 | mod fake_socket; 5 | mod network_emulator; 6 | -------------------------------------------------------------------------------- /tests/common/mod.rs: -------------------------------------------------------------------------------- 1 | use std::net::SocketAddr; 2 | 3 | pub use self::client::Client; 4 | pub use self::server::{Server, ServerEvent}; 5 | 6 | mod client; 7 | mod server; 8 | 9 | pub fn client_addr() -> SocketAddr { 10 | "127.0.0.1:0".parse().unwrap() 11 | } 12 | -------------------------------------------------------------------------------- /src/packet/header/header_writer.rs: -------------------------------------------------------------------------------- 1 | /// Trait for writing a header 2 | pub trait HeaderWriter { 3 | /// Associated type since we parse the header into an Output 4 | type Output; 5 | 6 | /// Writes the header to the given buffer. 7 | fn parse(&self, buffer: &mut Vec) -> Self::Output; 8 | } 9 | -------------------------------------------------------------------------------- /tests/Makefile: -------------------------------------------------------------------------------- 1 | tester: 2 | cd ..; cargo build --release 3 | 4 | 5 | docker: 6 | cd ..; CROSS_COMPILE=x86_64-linux-musl- cargo build --target=x86_64-unknown-linux-musl --release --features tester 7 | cd ..; mv target/x86_64-unknown-linux-musl/release/laminar-tester scripts/laminar-tester 8 | docker build -t zerosubnet/laminar . 9 | docker push zerosubnet/laminar -------------------------------------------------------------------------------- /bors.toml: -------------------------------------------------------------------------------- 1 | # Test suite must pass on both Travis and AppVeyor before merging into `master`. 2 | status = [ 3 | "continuous-integration/travis-ci/push" 4 | ] 5 | 6 | # Do not allow pull requests with these labels to be merged. 7 | block_labels = [ 8 | "status: postponed", 9 | "status: stalled", 10 | "status: wontfix", 11 | "status: working" 12 | ] 13 | -------------------------------------------------------------------------------- /src/packet/header/header_reader.rs: -------------------------------------------------------------------------------- 1 | use std::io::Cursor; 2 | 3 | /// Trait that supports reading a Header from a packet 4 | pub trait HeaderReader { 5 | /// Associated type for the HeaderReader, since it reads it from a Header 6 | type Header; 7 | 8 | /// Reads the specified header from the given Cursor. 9 | fn read(rdr: &mut Cursor<&[u8]>) -> Self::Header; 10 | 11 | /// Returns the size of the header. 12 | fn size() -> u8; 13 | } 14 | -------------------------------------------------------------------------------- /.github/workflows/build_and_test.yml: -------------------------------------------------------------------------------- 1 | name: Rust 2 | 3 | on: 4 | push: 5 | branches: 6 | - master 7 | 8 | jobs: 9 | test: 10 | name: Running on ${{ matrix.os }} 11 | runs-on: ${{ matrix.os }} 12 | strategy: 13 | matrix: 14 | os: [ubuntu-latest, windows-latest, macOS-latest] 15 | steps: 16 | - uses: actions/checkout@v1 17 | - name: Build 18 | run: cargo build --verbose 19 | - name: Run tests 20 | run: cargo test --verbose 21 | -------------------------------------------------------------------------------- /src/infrastructure.rs: -------------------------------------------------------------------------------- 1 | //! This module provides the logic around the processing of the packet. 2 | //! Like ordering, sequencing, controlling congestion, fragmentation, and packet acknowledgment. 3 | 4 | pub use self::acknowledgment::AcknowledgmentHandler; 5 | pub use self::acknowledgment::SentPacket; 6 | pub use self::congestion::CongestionHandler; 7 | pub use self::fragmenter::Fragmentation; 8 | 9 | mod acknowledgment; 10 | mod congestion; 11 | mod fragmenter; 12 | 13 | pub mod arranging; 14 | -------------------------------------------------------------------------------- /docs/md_book/src/SUMMARY.md: -------------------------------------------------------------------------------- 1 | # Summary 2 | 3 | - [Intro](intro.md) 4 | - [Important Notices](important.md) 5 | - [Protocols](protocols.md) 6 | - [Heartbeat](heartbeat.md) 7 | - [Fragmentation](fragmentation.md) 8 | - [Reliability](reliability/basics.md) 9 | - [Basics](reliability/basics.md) 10 | - [Reliability](reliability/reliability.md) 11 | - [Ordering](reliability/ordering.md) 12 | - [Congestion Avoidance](congestion_avoidence/congestion_avoidance.md) 13 | - [Whit RTT](congestion_avoidence/rtt.md) -------------------------------------------------------------------------------- /tests/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | services: 3 | server: 4 | image: "laminar-tester:latest" 5 | command: server --bind-host 0.0.0.0 6 | ports: 7 | - "2264:2264/udp" 8 | environment: 9 | - RUST_LOG=debug 10 | - RUST_BACKTRACE=1 11 | 12 | client: 13 | image: "laminar-tester:latest" 14 | command: client --connect-host server --bind-host 0.0.0.0 15 | ports: 16 | - "2265:2265/udp" 17 | environment: 18 | - RUST_LOG=debug 19 | - RUST_BACKTRACE=1 20 | depends_on: 21 | - server 22 | -------------------------------------------------------------------------------- /src/net/events.rs: -------------------------------------------------------------------------------- 1 | use std::net::SocketAddr; 2 | 3 | use crate::packet::Packet; 4 | 5 | /// Events that can occur in `laminar` and that will be pushed through the `event_receiver` returned by `Socket::bind`. 6 | #[derive(Debug, PartialEq)] 7 | pub enum SocketEvent { 8 | /// A packet was received from a client. 9 | Packet(Packet), 10 | /// A new client connected. 11 | /// Clients are uniquely identified by the ip:port combination at this layer. 12 | Connect(SocketAddr), 13 | /// The client has been idling for a configurable amount of time. 14 | /// You can control the timeout in the config. 15 | Timeout(SocketAddr), 16 | } 17 | -------------------------------------------------------------------------------- /src/packet/header.rs: -------------------------------------------------------------------------------- 1 | //! This module provides parses and readers for the headers that could be appended to any packet. 2 | //! We use headers to control reliability, fragmentation, and ordering. 3 | 4 | pub use self::acked_packet_header::AckedPacketHeader; 5 | pub use self::arranging_header::ArrangingHeader; 6 | pub use self::fragment_header::FragmentHeader; 7 | pub use self::header_reader::HeaderReader; 8 | pub use self::header_writer::HeaderWriter; 9 | pub use self::standard_header::StandardHeader; 10 | 11 | mod acked_packet_header; 12 | mod arranging_header; 13 | mod fragment_header; 14 | mod header_reader; 15 | mod header_writer; 16 | mod standard_header; 17 | -------------------------------------------------------------------------------- /ci/coverage.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # Strict mode: http://redsymbol.net/articles/unofficial-bash-strict-mode/ 3 | set -euo pipefail 4 | IFS=$'\n\t' 5 | 6 | wget https://github.com/SimonKagstrom/kcov/archive/master.tar.gz 7 | tar xzf master.tar.gz 8 | cd kcov-master 9 | mkdir build 10 | cd build 11 | cmake .. 12 | make 13 | make install DESTDIR=../../kcov-build 14 | cd ../.. 15 | rm -rf kcov-master 16 | for file in target/debug/laminar-*[^\.d]; do mkdir -p "target/cov/$(basename $file)"; ./kcov-build/usr/local/bin/kcov --exclude-pattern=/.cargo,/usr/lib --verify "target/cov/$(basename $file)" "$file"; done 17 | bash <(curl -s https://codecov.io/bash) 18 | echo "Uploaded code coverage" 19 | -------------------------------------------------------------------------------- /src/packet.rs: -------------------------------------------------------------------------------- 1 | //! This module provides all the logic around the packet, such as reading, parsing, and constructing headers. 2 | 3 | pub use self::enums::{DeliveryGuarantee, OrderingGuarantee, PacketType}; 4 | pub use self::outgoing::{OutgoingPacket, OutgoingPacketBuilder}; 5 | pub use self::packet_reader::PacketReader; 6 | pub use self::packet_structure::{Packet, PacketInfo}; 7 | pub use self::process_result::{IncomingPackets, OutgoingPackets}; 8 | 9 | pub mod header; 10 | 11 | mod enums; 12 | mod outgoing; 13 | mod packet_reader; 14 | mod packet_structure; 15 | mod process_result; 16 | 17 | pub type SequenceNumber = u16; 18 | 19 | pub trait EnumConverter { 20 | type Enum; 21 | 22 | fn to_u8(&self) -> u8; 23 | } 24 | -------------------------------------------------------------------------------- /src/sequence_buffer/congestion_data.rs: -------------------------------------------------------------------------------- 1 | use std::time::Instant; 2 | 3 | use crate::packet::SequenceNumber; 4 | 5 | #[derive(Clone)] 6 | /// This contains the information required to reassemble fragments. 7 | pub struct CongestionData { 8 | pub sequence: SequenceNumber, 9 | pub sending_time: Instant, 10 | } 11 | 12 | impl CongestionData { 13 | pub fn new(sequence: SequenceNumber, sending_time: Instant) -> Self { 14 | CongestionData { 15 | sequence, 16 | sending_time, 17 | } 18 | } 19 | } 20 | 21 | impl Default for CongestionData { 22 | fn default() -> Self { 23 | CongestionData { 24 | sequence: 0, 25 | sending_time: Instant::now(), 26 | } 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /docs/md_book/src/congestion_avoidence/congestion_avoidance.md: -------------------------------------------------------------------------------- 1 | # Congestion Avoidance 2 | So let's start at what this congestion avoidance is if we send just packets without caring about the internet speed of the client we can flood the network. 3 | Since the router tries to deliver all packages it buffers up all packets in the cache. 4 | We do not want the router to buffer up packets instead it should drop them. 5 | We need to try to avoid sending too much bandwidth in the first place, and then if we detect congestion, we attempt to back off and send even less. 6 | 7 | There are a few methods we can implement to defeat congestion. 8 | 1. With [RTT](./rtt.md) 9 | 2. With packet loss [TODO] 10 | 11 | Unfortunately, congestion avoidance has not yet been implemented for laminar. -------------------------------------------------------------------------------- /docs/FEATURES.md: -------------------------------------------------------------------------------- 1 | # Features 2 | These are all the features we have and don't have. 3 | ## Added 4 | 5 | * [x] Fragmentation 6 | * [x] Unreliable packets 7 | * [x] Unreliable sequenced packets 8 | * [x] Reliable unordered packets 9 | * [x] Reliable ordered packets 10 | * [x] Reliable sequenced packets 11 | * [x] Fragmentation 12 | * [x] Rtt estimations 13 | * [x] Protocol version monitoring 14 | * [x] Basic connection management 15 | * [x] Heartbeat 16 | * [x] Basic DoS mitigation 17 | * [x] High Timing control 18 | * [x] Protocol Versioning 19 | * [x] Well-tested by integration and unit tests 20 | * [x] Can be used by multiple threads (Sender, Receiver) 21 | 22 | ## Planned 23 | 24 | * [ ] Handshake Protocol 25 | * [ ] Advanced Connection Management 26 | * [ ] Cryptography 27 | * [ ] Congestion Control 28 | -------------------------------------------------------------------------------- /tests/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM rust:1.32 as build 2 | 3 | # create a new empty shell project 4 | RUN USER=root cargo new --bin laminar 5 | WORKDIR /laminar 6 | 7 | # copy over manifests 8 | COPY ./Cargo.lock ./Cargo.lock 9 | COPY ./Cargo.toml ./Cargo.toml 10 | COPY ./src/bin ./src/bin 11 | COPY ./benches ./benches 12 | 13 | # caching build deps 14 | RUN cargo build --release 15 | RUN rm src/*.rs 16 | 17 | # copy source 18 | COPY ./src ./src 19 | COPY ./examples ./examples 20 | 21 | # build for release 22 | RUN cargo clean 23 | RUN cargo build --features="tester" --release 24 | 25 | # final base 26 | FROM debian:stretch-slim 27 | 28 | # copy the build artifact from the build stage and run 29 | COPY --from=build /laminar/target/release/laminar-tester /usr/bin/laminar-tester 30 | ENTRYPOINT ["laminar-tester"] 31 | -------------------------------------------------------------------------------- /src/net.rs: -------------------------------------------------------------------------------- 1 | //! This module provides the logic between the low-level abstract types and the types that the user will be interacting with. 2 | //! You can think of the socket, connection management, congestion control. 3 | 4 | pub use self::connection::{Connection, ConnectionEventAddress, ConnectionMessenger}; 5 | pub use self::connection_manager::{ConnectionManager, DatagramSocket}; 6 | pub use self::events::SocketEvent; 7 | pub use self::link_conditioner::LinkConditioner; 8 | pub use self::quality::{NetworkQuality, RttMeasurer}; 9 | pub use self::socket::Socket; 10 | pub use self::virtual_connection::VirtualConnection; 11 | 12 | mod connection; 13 | mod connection_impl; 14 | mod connection_manager; 15 | mod events; 16 | mod link_conditioner; 17 | mod quality; 18 | mod socket; 19 | mod virtual_connection; 20 | 21 | pub mod constants; 22 | -------------------------------------------------------------------------------- /docs/technical/README.md: -------------------------------------------------------------------------------- 1 | This folder contains some diagrams and images relteaded to laminar. 2 | 3 | - `header_design.png` 4 | 5 | This visualizes our packet headers. 6 | - `incoming_packet_procssing_bpmn.png` 7 | 8 | This is a BPMN-diagram that describes the incoming packet processing process. 9 | 10 | - `ougoing_packet_procssing_bpmn.png` 11 | 12 | This is a BPMN-diagram that describes the outgoing packet processing process. 13 | 14 | - `ordering_sequencing_bpmn.png` 15 | 16 | This is a BPMN-diagram showing the sequencing and ordering processes. 17 | 18 | **Remarks** 19 | 20 | Business Process Model and Notation (BPMN) is a graphical representation for specifying business processes in a business process model. 21 | You could read more about that over [here](https://en.wikipedia.org/wiki/Business_Process_Model_and_Notation). It can also be applied to software processes. 22 | I have to note here that there are some syntactic errors in those diagrams which are left to be resolved any time soon. -------------------------------------------------------------------------------- /docs/LICENSE-MIT: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2018 Lucio Franco 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /docs/md_book/src/packet_header.md: -------------------------------------------------------------------------------- 1 | # Packet Headers 2 | In this topic we'll discuss the different headers we are pre-pending to the data sent via laminar. 3 | We use different headers in different scenario's, we do this to reduce the packet size. 4 | 5 | Take a look over here: [image](/docs/technical/hearder_design.png) for the complete design. 6 | 7 | - `Standard header` 8 | 9 | The first header is the `StandardHeader`, this is included for each packet. 10 | It contains information like: protocol version, packet type, delivery and ordering guarantees. 11 | 12 | - `AckedHeader` 13 | 14 | This header will be included to the header if the packet is reliable. 15 | It contains information for our acknowledgment system. 16 | 17 | - `FragmentHeader` 18 | 19 | This header will be included if the packet payload is bigger than the MTU and thus needs to be [fragmented](./fragmentation.md). 20 | 21 | - `ArrangingHeader` 22 | 23 | This header will be included if the packet needs to be arranged e.g ordered, sequenced. 24 | It contains information like the stream it will be arranged on and an identifier for this packet. 25 | -------------------------------------------------------------------------------- /docs/md_book/src/reliability/basics.md: -------------------------------------------------------------------------------- 1 | # Introduction 2 | The internet is a dangerous place, and before you know it your data is gone or your data arrives duplicated because your data is split up along the way to its final destination. 3 | In order to have more control over the way in which the data is transported, we have invented protocols. 4 | 5 | In this chapter we will consider how laminar gives you more control over the transport of data. 6 | 7 | ## Important 8 | TCP is made for reliability and does this very well. 9 | We have been asked many times by people why reliability does not work well or is slow in laminar. 10 | Important to know is that laminar has reliability as an option but is not focused on trying to be faster and better than TCP. 11 | For fast-paced multiplayer online games, it is not desirable to use TCP because a delay in a packet can have a major impact on all subsequent packets. 12 | Reliability, after all, is less important for fast-paced FPS games; UDP. 13 | TCP should be used when the need for reliability trumps the need for low latency 14 | That said, laminar will support acknowledgement of fragments in the future. Checkout [fragmentation](../fragmentation.md) for more info. 15 | 16 | - [Ordering](ordering.md) 17 | How can we control the way the data is ordered. 18 | - [Reliability](reliability.md) 19 | How can we control the arrival of our data. -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "laminar" 3 | version = "0.3.2" 4 | authors = [ 5 | "Lucio Franco ", 6 | "Fletcher Haynes ", 7 | "TimonPost ", 8 | "Justin LeFebvre " 9 | ] 10 | description = "A simple semi-reliable UDP protocol for multiplayer games" 11 | keywords = ["gamedev", "networking", "udp", "amethyst"] 12 | categories = ["game-engines", "network-programming"] 13 | exclude = ["examples/*", "docs/*", "benches/*"] 14 | 15 | readme = "README.md" 16 | license = "MIT/Apache-2.0" 17 | 18 | repository = "https://github.com/amethyst/laminar" 19 | autobenches = false 20 | edition = "2018" 21 | 22 | [dependencies] 23 | byteorder = "1.3" 24 | crc = "1.8" 25 | crossbeam-channel = "0.3" 26 | lazy_static = "1.4" 27 | log = "0.4" 28 | rand = "0.7" 29 | rand_pcg = "0.2" 30 | 31 | clap = { version = "2.33", features = ["yaml"], optional = true } 32 | env_logger = { version = "0.6", optional = true } 33 | 34 | [dev-dependencies] 35 | bincode = "1.1.4" 36 | criterion = "0.3" 37 | serde = "1.0" 38 | serde_derive = "1.0" 39 | quickcheck = "0.9" 40 | quickcheck_macros = "0.8" 41 | 42 | [features] 43 | tester = [ 44 | "env_logger", 45 | "clap" 46 | ] 47 | 48 | [[bench]] 49 | name = "packet_processing" 50 | harness = false 51 | 52 | [[bin]] 53 | name = "laminar-tester" 54 | required-features = ["tester"] -------------------------------------------------------------------------------- /src/sequence_buffer/reassembly_data.rs: -------------------------------------------------------------------------------- 1 | use crate::net::constants::MAX_FRAGMENTS_DEFAULT; 2 | use crate::packet::header::AckedPacketHeader; 3 | use crate::packet::SequenceNumber; 4 | 5 | #[derive(Clone)] 6 | /// This contains the information required to reassemble fragments. 7 | pub struct ReassemblyData { 8 | pub sequence: SequenceNumber, 9 | pub num_fragments_received: u8, 10 | pub num_fragments_total: u8, 11 | pub buffer: Vec, 12 | pub fragments_received: [bool; MAX_FRAGMENTS_DEFAULT as usize], 13 | pub acked_header: Option, 14 | } 15 | 16 | impl ReassemblyData { 17 | pub fn new(sequence: SequenceNumber, num_fragments_total: u8, prealloc: usize) -> Self { 18 | Self { 19 | sequence, 20 | num_fragments_received: 0, 21 | num_fragments_total, 22 | buffer: Vec::with_capacity(prealloc), 23 | fragments_received: [false; MAX_FRAGMENTS_DEFAULT as usize], 24 | acked_header: None, 25 | } 26 | } 27 | } 28 | 29 | impl Default for ReassemblyData { 30 | fn default() -> Self { 31 | Self { 32 | sequence: 0, 33 | num_fragments_received: 0, 34 | num_fragments_total: 0, 35 | buffer: Vec::with_capacity(1024), 36 | fragments_received: [false; MAX_FRAGMENTS_DEFAULT as usize], 37 | acked_header: None, 38 | } 39 | } 40 | } 41 | -------------------------------------------------------------------------------- /src/net/constants.rs: -------------------------------------------------------------------------------- 1 | /// The size of the fragment header. 2 | pub const FRAGMENT_HEADER_SIZE: u8 = 4; 3 | /// The size of the acknowledgment header. 4 | pub const ACKED_PACKET_HEADER: u8 = 8; 5 | /// The size of the arranging header. 6 | pub const ARRANGING_PACKET_HEADER: u8 = 3; 7 | /// The size of the standard header. 8 | pub const STANDARD_HEADER_SIZE: u8 = 5; 9 | /// The ordering stream that will be used to order on if none was specified. 10 | pub const DEFAULT_ORDERING_STREAM: u8 = 255; 11 | /// The sequencing stream that will be used to sequence packets on if none was specified. 12 | pub const DEFAULT_SEQUENCING_STREAM: u8 = 255; 13 | /// Default maximal number of fragments to size. 14 | pub const MAX_FRAGMENTS_DEFAULT: u16 = 16; 15 | /// Default maximal size of each fragment. 16 | pub const FRAGMENT_SIZE_DEFAULT: u16 = 1024; 17 | /// Maximum transmission unit of the payload. 18 | /// 19 | /// Derived from ethernet_mtu - ipv6_header_size - udp_header_size - packet header size 20 | /// 1452 = 1500 - 40 - 8 - 8 21 | /// 22 | /// This is not strictly guaranteed -- there may be less room in an ethernet frame than this due to 23 | /// variability in ipv6 header size. 24 | pub const DEFAULT_MTU: u16 = 1452; 25 | /// This is the current protocol version. 26 | /// 27 | /// It is used for: 28 | /// - Generating crc16 for the packet header. 29 | /// - Validating if arriving packets have the same protocol version. 30 | pub const PROTOCOL_VERSION: &str = "laminar-0.1.0"; 31 | -------------------------------------------------------------------------------- /docs/md_book/src/important.md: -------------------------------------------------------------------------------- 1 | ## Some Important Notices 2 | 3 | There are a few important things you need to know in order to use laminar appropriately. 4 | If you do not follow these rules, then it is possible that either laminar is not suitable for your use case, and/or it will not work as expected. 5 | 6 | 1. Packet Consistency: 7 | 8 | Make sure that the client and the server send messages to each other at a consistent rate, i.e. 30Hz. If you don't do this, 9 | the connection may close, or cause the reliability and order aspect of laminar to be laggy. For more information checkout [heartbeat implementation](heartbeat.md). 10 | 11 | 2. Reliability and transferring big data: 12 | 13 | Laminar is not designed for transferring large amounts of data. 14 | The [fragments](fragmentation.md) of the fragmented packet will not be acknowledged. 15 | So if a fragment is lost - the whole packet is lost. This will likely be improved in the future, for more information check out [fragmentation](fragmentation.md) and [reliability](reliability/basics.md). 16 | 17 | 3. DoS Protection 18 | 19 | DoS protection ensures that new clients are unable to use memory resources on the machine. 20 | If this were the case, some malicious actor could easily spoof packets and DoS our server with new connections. 21 | 22 | **Make sure to respond to a message from another endpoint. Only if we respond, will the connection to the endpoint be stored.** 23 | 24 | In the future we want to have a [handshaking process](https://github.com/amethyst/laminar/issues/156) to simplify this process. 25 | 26 | 27 | [config]: https://github.com/amethyst/laminar/blob/master/src/config.rs#L8 28 | [DoS]: https://github.com/amethyst/laminar/issues/187 29 | -------------------------------------------------------------------------------- /src/protocol_version.rs: -------------------------------------------------------------------------------- 1 | use crc::crc16; 2 | 3 | use lazy_static::lazy_static; 4 | 5 | pub use crate::net::constants::PROTOCOL_VERSION; 6 | 7 | lazy_static! { 8 | // The CRC16 of the current protocol version. 9 | static ref VERSION_CRC16: u16 = crc16::checksum_x25(PROTOCOL_VERSION.as_bytes()); 10 | } 11 | 12 | /// Wrapper to provide some functions to perform with the current protocol version. 13 | pub struct ProtocolVersion; 14 | 15 | impl ProtocolVersion { 16 | /// Returns the current protocol version. 17 | #[inline] 18 | #[cfg(test)] 19 | pub fn get_version() -> &'static str { 20 | PROTOCOL_VERSION 21 | } 22 | 23 | /// This will return the crc16 from the current protocol version. 24 | #[inline] 25 | pub fn get_crc16() -> u16 { 26 | *VERSION_CRC16 27 | } 28 | 29 | /// Validate a crc16 with the current protocol version and return the results. 30 | #[inline] 31 | pub fn valid_version(protocol_version_crc16: u16) -> bool { 32 | protocol_version_crc16 == ProtocolVersion::get_crc16() 33 | } 34 | } 35 | 36 | #[cfg(test)] 37 | mod test { 38 | use crate::net::constants::PROTOCOL_VERSION; 39 | 40 | use super::*; 41 | 42 | #[test] 43 | fn valid_version() { 44 | let protocol_id = crc16::checksum_x25(PROTOCOL_VERSION.as_bytes()); 45 | assert!(ProtocolVersion::valid_version(protocol_id)); 46 | } 47 | 48 | #[test] 49 | fn not_valid_version() { 50 | let protocol_id = crc16::checksum_x25(b"not-laminar"); 51 | assert!(!ProtocolVersion::valid_version(protocol_id)); 52 | } 53 | 54 | #[test] 55 | fn get_crc16() { 56 | assert_eq!(ProtocolVersion::get_crc16(), *VERSION_CRC16); 57 | } 58 | 59 | #[test] 60 | fn get_version() { 61 | assert_eq!(ProtocolVersion::get_version(), PROTOCOL_VERSION); 62 | } 63 | } 64 | -------------------------------------------------------------------------------- /docs/md_book/src/intro.md: -------------------------------------------------------------------------------- 1 | # This book is still in active development. 2 | 3 | ## Introduction 4 | 5 | Welcome! This book will teach you everything you need to know about networking with Laminar. 6 | We will discuss important parts of network programming, why we made certain decisions and some explanations about networking concepts in general. 7 | 8 | Laminar is free and open source software, distributed under a dual license of [MIT][ml] 9 | and [Apache][al]. This means that the engine is provided to you at no cost 10 | and its source code is completely yours to tinker with. The code is available on 11 | [GitHub][am]. Contributions and feature requests will always be welcomed! 12 | 13 | [ml]: https://github.com/amethyst/laminar/blob/master/docs/LICENSE-MIT 14 | [al]: https://github.com/amethyst/laminar/blob/master/docs/LICENSE-APACHE 15 | [am]: https://github.com/amethyst/laminar/tree/master 16 | 17 | ## Motivation 18 | Laminar is fully written in Rust and therefore has no garbage collector, no data-races, and is completely memory safe. 19 | That's why Laminar is a good candidate to be a safe and better replacement for other reliable-UDP implementations. 20 | This library is originally written for use in the Amethyst game engine, however, Laminar can operate fully without Amethyst. 21 | 22 | ## Similar Projects 23 | We used some inspiration from other similar projects. 24 | 25 | - [NetCode IO, C++ with Go, Rust, C# bindings](https://github.com/networkprotocol/netcode.io) 26 | - [RakNet, C++](https://github.com/SLikeSoft/SLikeNet) 27 | - [Steam Network Socket, , C++](https://github.com/ValveSoftware/GameNetworkingSockets) 28 | - [LiteNetLib, C#](https://github.com/RevenantX/LiteNetLib) 29 | - [ENet, C](http://enet.bespin.org/) 30 | 31 | ## Contributing 32 | We are always happy to welcome new contributors! 33 | 34 | If you want to contribute, or have questions, let us know either on [GitHub][db], or on [Discord][di] (#net). 35 | 36 | [di]: https://discord.gg/amethyst 37 | [db]: https://github.com/amethyst/laminar/ 38 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | //! Laminar is an application-level transport protocol which provides configurable reliability and ordering guarantees built on top of UDP. 2 | //! It focuses on fast-paced fps-games and provides a lightweight, message-based interface. 3 | //! 4 | //! Laminar was designed to be used within the [Amethyst][amethyst] game engine but is usable without it. 5 | //! 6 | //! [amethyst]: https://github.com/amethyst/amethyst 7 | //! 8 | //! # Concepts 9 | //! 10 | //! This library is loosely based off of [Gaffer on Games][gog] and has features similar to RakNet, Steam Socket, and netcode.io. 11 | //! The idea is to provide a native Rust low-level UDP-protocol which supports the use of cases of video games that require multiplayer features. 12 | //! The library itself provides a few low-level types of packets that provide different types of guarantees. The most 13 | //! basic are unreliable and reliable packets. Ordering, sequencing can be done on multiple streams. 14 | //! For more information, read the projects [README.md][readme], [book][book], [docs][docs] or [examples][examples]. 15 | //! 16 | //! [gog]: https://gafferongames.com/ 17 | //! [readme]: https://github.com/amethyst/laminar/blob/master/README.md 18 | //! [book]: https://github.com/amethyst/laminar/tree/master/docs/md_book 19 | //! [docs]: https://docs.rs/laminar/ 20 | //! [examples]: https://github.com/amethyst/laminar/tree/master/examples 21 | 22 | #![warn(missing_docs)] 23 | #![allow(clippy::trivially_copy_pass_by_ref)] 24 | 25 | pub use self::config::Config; 26 | pub use self::error::{ErrorKind, Result}; 27 | pub use self::net::{LinkConditioner, Socket, SocketEvent}; 28 | pub use self::packet::{DeliveryGuarantee, OrderingGuarantee, Packet}; 29 | #[cfg(feature = "tester")] 30 | pub use self::throughput::ThroughputMonitoring; 31 | 32 | mod config; 33 | mod either; 34 | mod error; 35 | mod infrastructure; 36 | mod net; 37 | mod packet; 38 | mod protocol_version; 39 | mod sequence_buffer; 40 | 41 | #[cfg(feature = "tester")] 42 | mod throughput; 43 | 44 | #[cfg(test)] 45 | pub mod test_utils; 46 | -------------------------------------------------------------------------------- /src/net/link_conditioner.rs: -------------------------------------------------------------------------------- 1 | //! This module provides means to simulate various network conditions for development. The primary focus is 2 | //! for testing applications under adverse conditions such as high packet loss networks, or high latency 3 | //! networks. This is not in heavy use yet, hence the allowing dead code. These will be removed as our testing 4 | //! becomes more sophisticated. 5 | 6 | use std::time::Duration; 7 | 8 | use rand::Rng; 9 | use rand_pcg::Pcg64Mcg as Random; 10 | 11 | /// Network simulator. Used to simulate network conditions as dropped packets and packet delays. 12 | /// For use in [FakeSocket::set_link_conditioner](crate::test_utils::FakeSocket::set_link_conditioner). 13 | #[derive(Clone, Debug)] 14 | pub struct LinkConditioner { 15 | // Value between 0 and 1, representing the % change a packet will be dropped on sending 16 | packet_loss: f64, 17 | // Duration of the delay imposed between packets 18 | latency: Duration, 19 | // Random number generator 20 | random: Random, 21 | } 22 | 23 | impl LinkConditioner { 24 | /// Creates and returns a LinkConditioner 25 | #[allow(dead_code)] 26 | pub fn new() -> LinkConditioner { 27 | LinkConditioner { 28 | packet_loss: 0.0, 29 | latency: Duration::default(), 30 | random: Random::new(0), 31 | } 32 | } 33 | 34 | /// Sets the packet loss rate of Link Conditioner 35 | #[allow(dead_code)] 36 | pub fn set_packet_loss(&mut self, rate: f64) { 37 | self.packet_loss = rate; 38 | } 39 | 40 | /// Sets the latency the link conditioner should apply to each packet 41 | #[allow(dead_code)] 42 | pub fn set_latency(&mut self, latency: Duration) { 43 | self.latency = latency 44 | } 45 | 46 | /// Function that checks to see if a packet should be dropped or not 47 | pub fn should_send(&mut self) -> bool { 48 | self.random.gen_range(0.0, 1.0) >= self.packet_loss 49 | } 50 | } 51 | 52 | impl Default for LinkConditioner { 53 | fn default() -> Self { 54 | Self::new() 55 | } 56 | } 57 | -------------------------------------------------------------------------------- /docs/md_book/src/congestion_avoidence/rtt.md: -------------------------------------------------------------------------------- 1 | # Round Trip Time (RTT) 2 | The time between you sending the packet and you receiving an acknowledgment from the other side is called RTT. 3 | To avoid congestion we first need to find a way to calculate the `RTT` value of our connection so we can decide on top of that value if we have bad or good internet speeds. 4 | 5 | _Smoothing factor_ 6 | 7 | So you could say: "very simple, measure the time between sending and receiving you got the `RTT` and you're done right?" No! This is because a packet can travel any path over the internet the `RTT` can always defer every time you calculate it. And imagine a short internet lag we will directly get a huge RTT back. So we need to smooth out that RTT factor by some amount. Gaffer says that 10% of the RTT will be just fine. With this smoothed RTT we will be able to add it to our current RTT. 8 | 9 | _Allowed RTT value_ 10 | 11 | So now we have the smoothed RTT and our current RTT, GREAT! But RTT on its own is not bad. So there may be some max allowed RTT. We need to subtract that amount from our measured RTT multiplied by the smoothing factor. 12 | 13 | The formula would look like the following: 14 | 15 | ``` 16 | // rtt_max_value is in ms 17 | // rtt_smoothing_factor is in % 18 | let new_rtt_value = (rtt - rtt_max_value) * rtt_smoothing_factor. 19 | ``` 20 | Lets look at an example with numbers. The RTT values are in milliseconds. 21 | 22 | _bad internet_ 23 | ``` 24 | // this will result into: 5 25 | let new_rtt_value = (300 - 250) * 0.10. 26 | ``` 27 | 28 | _good internet_ 29 | ``` 30 | // this will result into: -15 31 | let new_rtt_value = (100 - 250) * 0.10. 32 | ``` 33 | 34 | As you see when our calculation is under 250ms we get a negative result, which is in this case positive. 35 | When our calculation is above 250ms it will be positive, which is in this case negative. 36 | 37 | So each time we receive an acknowledgment we can add our result, of the above formula, to the RTT time saved in the connection. 38 | 39 | ## Interesting Reads 40 | - [Wikipedia](https://en.wikipedia.org/wiki/Round-trip_delay_time) 41 | -------------------------------------------------------------------------------- /tests/fragmentation_packets_test.rs: -------------------------------------------------------------------------------- 1 | #[cfg(feature = "tester")] 2 | use std::net::SocketAddr; 3 | #[cfg(feature = "tester")] 4 | use std::{thread, time::Duration}; 5 | 6 | #[cfg(feature = "tester")] 7 | use log::debug; 8 | 9 | #[cfg(feature = "tester")] 10 | use common::{client_addr, Client, Server, ServerEvent}; 11 | #[cfg(feature = "tester")] 12 | use laminar::{DeliveryGuarantee, OrderingGuarantee, Packet}; 13 | 14 | #[cfg(feature = "tester")] 15 | mod common; 16 | 17 | #[test] 18 | #[cfg(feature = "tester")] 19 | fn send_receive_fragment_packets() { 20 | let listen_addr: SocketAddr = "127.0.0.1:12346".parse().unwrap(); 21 | let client_addr = client_addr(); 22 | 23 | let server = Server::new(listen_addr); 24 | 25 | let client = Client::new(Duration::from_millis(1), 5000); 26 | 27 | let assert_function = move |packet: Packet| { 28 | assert_eq!(packet.order_guarantee(), OrderingGuarantee::None); 29 | assert_eq!(packet.delivery_guarantee(), DeliveryGuarantee::Reliable); 30 | assert_eq!(packet.payload(), payload().as_slice()); 31 | }; 32 | 33 | let packet_factory = move || -> Packet { Packet::reliable_unordered(listen_addr, payload()) }; 34 | 35 | let server_handle = server.start_receiving(assert_function); 36 | 37 | client 38 | .run_instance(packet_factory, client_addr) 39 | .wait_until_finished(); 40 | 41 | // give the server time to process all packets. 42 | thread::sleep(Duration::from_millis(500)); 43 | 44 | server_handle.shutdown(); 45 | 46 | for event in server_handle.iter_events().collect::>() { 47 | match event { 48 | ServerEvent::Throughput(throughput) => { 49 | debug!("Throughput: {}", throughput); 50 | } 51 | ServerEvent::AverageThroughput(avg_throughput) => { 52 | debug!("Avg. Throughput: {}", avg_throughput); 53 | } 54 | ServerEvent::TotalSent(total) => { 55 | debug!("Total Packets Received {}", total); 56 | } 57 | _ => debug!("Not handled!"), 58 | } 59 | } 60 | 61 | server_handle.wait_until_finished(); 62 | } 63 | 64 | pub fn payload() -> Vec { 65 | vec![0; 4000] 66 | } 67 | -------------------------------------------------------------------------------- /docs/md_book/src/fragmentation.md: -------------------------------------------------------------------------------- 1 | # Fragmentation 2 | Fragmentation is dividing large packets into smaller fragments so that it can be sent over the network. 3 | 4 | TCP will automatically divide packets into smaller parts if you send large amounts of data. But UDP doesn't support fragmentation out-of-the-box. 5 | Fortunately, laminar does. 6 | 7 | Fragmentation will be applied to packets larger than the [MTU](https://en.wikipedia.org/wiki/Maximum_transmission_unit) with the following reliability types `Reliable Unordered`, `Reliable Ordered`, `Reliable Sequenced`. 8 | 9 | What is this [MTU](https://en.wikipedia.org/wiki/Maximum_transmission_unit)? This stands for 'maximum transmission unit'. 10 | On the Internet today (2016, IPv4) the real-world MTU is 1500 bytes. 11 | When a packet is larger than 1500 bytes we need to split it up into different fragments. 12 | Why 1500? That’s the default MTU for MacOS X and Windows. 13 | 14 | You should take note that each fragment will not be acknowledged with our implementation. 15 | So if you would send 200.000 bytes (+- 133 fragments) the risk of one fragment being dropped will be huge. 16 | If you really want to send large amounts of data over the line go for TCP instead, since that protocol is built for reliability and large data. 17 | 18 | When sending small packets with the size of about 4000 bytes (4 fragments) this method will work fine. And won't probably cause any problems. 19 | We are planning to support also [sending larger packets](https://gafferongames.com/post/sending_large_blocks_of_data/) with acknowledgments. 20 | 21 | ## Laminar's implementation 22 | Laminar fragments your packet if it exceeds the [fragment size](https://github.com/amethyst/laminar/blob/master/src/config.rs#L29). 23 | 24 | Fragments of a large packet are not yet acknowledged This is a problem if you want to send too large files. 25 | If you want to send really large files, I advise you to split up your package and send it in pieces with the option 'reliable ordered'. 26 | In the future laminar will be able to send large packets with acknowledgement. 27 | 28 | ## Interesting Reads 29 | - [Gaffer about Fragmentation](https://gafferongames.com/post/packet_fragmentation_and_reassembly/) 30 | - [Wikipedia](https://en.wikipedia.org/wiki/IP_fragmentation) 31 | - [MTU](https://en.wikipedia.org/wiki/Maximum_transmission_unit) -------------------------------------------------------------------------------- /docs/CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Change Log 2 | This document contains information about the releases of this crate. 3 | 4 | ## [0.3.2] - 2019-09-24 5 | - Acknowledgement is sent after all fragments arrived 6 | - Don't read out-of-bounds on malformed headers 7 | 8 | ## [0.3.1] - 2019-09-16 9 | - Documentation improvements (docs, book, readme) 10 | - Allow a Socket to be in blocking mode 11 | - Default heartbeat functionality 12 | - Series of patches and bug-fixes for ordering, sequencing. 13 | - Disconnect the connection after sending N un-acked packets 14 | - Dependency maintenance (removed and increased versions) 15 | - A lot of new unit tests 16 | 17 | ## [0.3.0] - 2019-06-29 18 | - Moved the packet sender and event receiver into socket struct 19 | - Exposed internal SocketAddr 20 | - Introduced a new method to manually step through the polling loop 21 | - Added a number of #[derive(Debug)] lines to Socket and member types 22 | - Implemented basic DoS mitigation 23 | - Added a customizable sleep to the polling loop. Defaults to 1ms 24 | 25 | ## [0.2.3] - 2019-06-13 26 | - Remove error 'WouldBlock' 27 | 28 | ## [0.2.2] - 2019-05-06 29 | - Improved Acknowledgement System 30 | - Fixed bug of not resending dropped packets 31 | 32 | ## [0.2.1] - 2019-05-06 33 | - Yanked version, incorrect code. 34 | 35 | ## [0.2.0] - 2019-04-13 36 | - Introduced Ordering, Sequencing of packets 37 | - Packets can be arranged on different streams. 38 | - A channel-based API, ready to switch over to MIO 39 | - Removed all locking and reference counters 40 | - Increased Unit Test coverage 41 | - Removed some dependencies 42 | - Introduced socket events: connect, timeout, packet 43 | - Bug fixes 44 | - Restructured code for better organization 45 | 46 | ## [0.1.0] - 2018-11-12 47 | The Networking team is happy to announce the release of `0.1.0`` of the [laminar crate](https://github.com/amethyst/laminar). 48 | It provides UDP networking modified for the needs of game networking. 49 | Most of the techniques used were published and detailed by [Glenn Fiedler](https://gafferongames.com/). 50 | We’d like to extend a special thanks to him and his articles. 51 | 52 | ### Added 53 | 54 | - UDP-based protocol 55 | - Automatic Fragmentation 56 | - RTT estimation 57 | - Connection tracking 58 | - Unreliable and Reliable sending of packets 59 | - Protocol version monitoring 60 | - A link conditioner to simulate packet loss and latency 61 | - Good error handling with **zero** panics 62 | - Well tested by integration and unit tests 63 | - Benchmarks 64 | -------------------------------------------------------------------------------- /examples/udp.rs: -------------------------------------------------------------------------------- 1 | //! This module provides examples for the UDP api. 2 | //! 1. sending data 3 | //! 2. receiving data 4 | //! 3. constructing the packet for sending. 5 | use std::net::SocketAddr; 6 | 7 | use laminar::{Packet, Result, Socket, SocketEvent}; 8 | 9 | /// The socket address of where the server is located. 10 | const SERVER_ADDR: &'static str = "127.0.0.1:12345"; 11 | // The client address from where the data is sent. 12 | const CLIENT_ADDR: &'static str = "127.0.0.1:12346"; 13 | 14 | fn client_address() -> SocketAddr { 15 | CLIENT_ADDR.parse().unwrap() 16 | } 17 | 18 | fn server_address() -> SocketAddr { 19 | SERVER_ADDR.parse().unwrap() 20 | } 21 | 22 | /// This is an example of how to send data to an specific address. 23 | pub fn send_data() -> Result<()> { 24 | // Setup a udp socket and bind it to the client address. 25 | let mut socket = Socket::bind(client_address()).unwrap(); 26 | 27 | let packet = construct_packet(); 28 | 29 | // next send or packet to the endpoint we earlier putted into the packet. 30 | socket.send(packet) 31 | } 32 | 33 | /// This is an example of how to receive data over udp. 34 | pub fn receive_data() { 35 | // setup an udp socket and bind it to the client address. 36 | let mut socket = Socket::bind(server_address()).unwrap(); 37 | 38 | // Next start receiving. 39 | loop { 40 | if let Some(result) = socket.recv() { 41 | match result { 42 | SocketEvent::Packet(packet) => { 43 | let endpoint: SocketAddr = packet.addr(); 44 | let received_data: &[u8] = packet.payload(); 45 | 46 | // you can here deserialize your bytes into the data you have passed it when sending. 47 | 48 | println!( 49 | "Received packet from: {:?} with length {}", 50 | endpoint, 51 | received_data.len() 52 | ); 53 | } 54 | _ => {} 55 | } 56 | break; 57 | } 58 | } 59 | } 60 | 61 | /// This is an example of how to construct a packet. 62 | pub fn construct_packet() -> Packet { 63 | // this is the destination address of the packet. 64 | let destination: SocketAddr = server_address(); 65 | 66 | // lets construct some payload (raw data) for or packet. 67 | let raw_data = "example data".as_bytes(); 68 | 69 | // lets construct or packet by passing in the destination for this packet and the bytes needed to be send.. 70 | let packet: Packet = Packet::reliable_unordered(destination, raw_data.to_owned()); 71 | 72 | packet 73 | } 74 | 75 | // TODO: Use functions in example 76 | fn main() {} 77 | -------------------------------------------------------------------------------- /src/test_utils/fake_socket.rs: -------------------------------------------------------------------------------- 1 | use std::{net::SocketAddr, time::Instant}; 2 | 3 | use crossbeam_channel::{Receiver, Sender}; 4 | 5 | use crate::net::{ConnectionManager, LinkConditioner, VirtualConnection}; 6 | use crate::test_utils::*; 7 | use crate::{error::Result, Config, Packet, SocketEvent}; 8 | 9 | /// Provides a similar to the real a `Socket`, but with emulated socket implementation. 10 | pub struct FakeSocket { 11 | handler: ConnectionManager, 12 | } 13 | 14 | impl FakeSocket { 15 | /// Binds to the socket. 16 | pub fn bind(network: &NetworkEmulator, addr: SocketAddr, config: Config) -> Result { 17 | Ok(Self { 18 | handler: ConnectionManager::new(network.new_socket(addr)?, config), 19 | }) 20 | } 21 | 22 | /// Returns a handle to the packet sender which provides a thread-safe way to enqueue packets 23 | /// to be processed. This should be used when the socket is busy running its polling loop in a 24 | /// separate thread. 25 | pub fn get_packet_sender(&self) -> Sender { 26 | self.handler.event_sender().clone() 27 | } 28 | 29 | /// Returns a handle to the event receiver which provides a thread-safe way to retrieve events 30 | /// from the socket. This should be used when the socket is busy running its polling loop in 31 | /// a separate thread. 32 | pub fn get_event_receiver(&self) -> Receiver { 33 | self.handler.event_receiver().clone() 34 | } 35 | 36 | /// Sends a packet. 37 | pub fn send(&mut self, packet: Packet) -> Result<()> { 38 | // we can savely unwrap, because receiver will always exist 39 | self.handler.event_sender().send(packet).unwrap(); 40 | Ok(()) 41 | } 42 | 43 | /// Receives a packet. 44 | pub fn recv(&mut self) -> Option { 45 | if let Ok(event) = self.handler.event_receiver().try_recv() { 46 | Some(event) 47 | } else { 48 | None 49 | } 50 | } 51 | 52 | /// Processes any inbound/outbound packets and handle idle clients. 53 | pub fn manual_poll(&mut self, time: Instant) { 54 | self.handler.manual_poll(time); 55 | } 56 | 57 | /// Returns a number of active connections. 58 | pub fn connection_count(&self) -> usize { 59 | self.handler.connections_count() 60 | } 61 | 62 | /// Sets the link conditioner for this socket. See [LinkConditioner] for further details. 63 | pub fn set_link_conditioner(&mut self, conditioner: Option) { 64 | self.handler.socket_mut().set_link_conditioner(conditioner); 65 | } 66 | } 67 | -------------------------------------------------------------------------------- /docs/md_book/src/heartbeat.md: -------------------------------------------------------------------------------- 1 | ## Heartbeat 2 | Laminar offers the possibility to keep the connection with a client open. 3 | This is done with heartbeat packets. 4 | This option is enabled by default. 5 | The behavior of the heart beat can be changed in the [configuration](https://github.com/amethyst/laminar/pull/224). 6 | It can also be disabled. 7 | 8 | A client is considered a connection when it sends a packet. 9 | If the client does not send a packet for x seconds, laminar sees this as an idling connection, and it is removed as an active connection. 10 | When this happens, the following data is removed: 11 | 12 | 1) the reliability data such as acknowledged packets 13 | 2) the buffers that keep track of the ordering/sequencing. 14 | 3) the RTT counter 15 | 4) fragmentation data 16 | 17 | Losing this data from the memory is often undesirable. 18 | Therefore, it is important to have a consistent flow of packets between the two endpoints which will prevent disconnection of the client. 19 | The time before the client is disconnected can be changed in the [configuration](https://github.com/amethyst/laminar/blob/master/src/config.rs#L10). 20 | 21 | ## Why a heartbeat? 22 | With game networking for fast-paced FPS games, you have to deal with a lot of data that has to go from point A to B. 23 | We are talking about numbers of 20/30/60 hz. 24 | Laminar is based and optimized for the situation where a consistent flow of packets from the server to the client and from the client to the server that are being sent. 25 | In a game, where everything runs at milliseconds and speed is important, you need fast communication and multiple updates per seconds. 26 | 27 | What are those scenarios and how can I know if laminar is useful for my use case? 28 | You can think of input synchronization, location updates, state updates, events, etc. 29 | Let's zoom in on input synchronization of an FPS game. 30 | The client sends the packages, the server receives it, validates it, and sends an update to all other clients. 31 | In an FPS game, a lot of input is shared, and it's not a strange idea for a client to share its input and receive updates 60 times a second. 32 | Laminar is based on this idea, and is optimized for it. 33 | When you are sending packets once a second, laminar might not be the best solution here. And your probably going to do fine with TCP. 34 | 35 | To add to this, note that clients will be seen as 'disconnected' if they don't send packets for some duration, this duration can be found in the [configuration][config]. 36 | When there is a scenario's that you are sending packets less frequent, laminar has the option to keep the connection alive by sending an heath beat message at a configurable interval. 37 | 38 | 39 | - [Original PR](https://github.com/amethyst/laminar/pull/224) -------------------------------------------------------------------------------- /tests/common/client.rs: -------------------------------------------------------------------------------- 1 | use std::net::SocketAddr; 2 | use std::thread::{self, JoinHandle}; 3 | use std::time::Duration; 4 | use std::time::Instant; 5 | 6 | use log::info; 7 | 8 | use laminar::{Packet, Socket}; 9 | 10 | /// Represents a client to some endpoint. 11 | pub struct Client { 12 | /// The sending timeout 13 | pub sending_timeout: Duration, 14 | /// The number of packets to send 15 | pub packets_to_send: u32, 16 | } 17 | 18 | impl Client { 19 | /// Constructs a new `Client`. 20 | pub fn new(timeout_sending: Duration, packets_to_send: u32) -> Client { 21 | Client { 22 | sending_timeout: timeout_sending, 23 | packets_to_send, 24 | } 25 | } 26 | 27 | /// Runs a specific instance of the client running at the given socket address. 28 | /// This function takes in a closure who constructs a packet which will be sent out to the client. 29 | pub fn run_instance(&self, create_packet: F, endpoint: SocketAddr) -> ClientHandle 30 | where 31 | F: Fn() -> Packet + Send + 'static, 32 | { 33 | let timeout = self.sending_timeout; 34 | let packets_to_send = self.packets_to_send; 35 | 36 | let handle = thread::spawn(move || { 37 | let mut socket = Socket::bind(endpoint).unwrap(); 38 | 39 | info!("Client {:?} starts to send packets.", endpoint); 40 | 41 | for _ in 0..packets_to_send { 42 | let packet = create_packet(); 43 | socket.send(packet).unwrap(); 44 | socket.manual_poll(Instant::now()); 45 | 46 | let beginning_park = Instant::now(); 47 | let mut timeout_remaining = timeout; 48 | loop { 49 | thread::park_timeout(timeout_remaining); 50 | let elapsed = beginning_park.elapsed(); 51 | if elapsed >= timeout { 52 | break; 53 | } 54 | timeout_remaining = timeout - elapsed; 55 | } 56 | } 57 | info!("Client {:?} sent all messages.", endpoint); 58 | }); 59 | 60 | ClientHandle::new(handle) 61 | } 62 | } 63 | 64 | /// This is a handle to a running client which is sending data to some endpoint. 65 | pub struct ClientHandle { 66 | thread_handle: JoinHandle<()>, 67 | } 68 | 69 | impl ClientHandle { 70 | /// Constructs a new `ClientHandle` by the given thread handle. 71 | pub fn new(handle: JoinHandle<()>) -> ClientHandle { 72 | ClientHandle { 73 | thread_handle: handle, 74 | } 75 | } 76 | 77 | /// Waits until the client has sent all of its packets. 78 | pub fn wait_until_finished(self) { 79 | self.thread_handle.join().unwrap(); 80 | } 81 | } 82 | -------------------------------------------------------------------------------- /examples/simple_udp.rs: -------------------------------------------------------------------------------- 1 | //! This module provides an simple client, server examples with communication over udp. 2 | //! 1. setting up server to receive data. 3 | //! 2. setting up client to send data. 4 | //! 3. serialize data to send and deserialize when received. 5 | use std::net::SocketAddr; 6 | use std::time::Instant; 7 | 8 | use bincode::{deserialize, serialize}; 9 | use serde_derive::{Deserialize, Serialize}; 10 | 11 | use laminar::{Packet, Socket, SocketEvent}; 12 | 13 | /// The socket address of where the server is located. 14 | const SERVER_ADDR: &'static str = "127.0.0.1:12345"; 15 | // The client address from where the data is sent. 16 | const CLIENT_ADDR: &'static str = "127.0.0.1:12346"; 17 | 18 | fn client_address() -> SocketAddr { 19 | CLIENT_ADDR.parse().unwrap() 20 | } 21 | 22 | fn server_address() -> SocketAddr { 23 | SERVER_ADDR.parse().unwrap() 24 | } 25 | 26 | /// This will run an simple example with client and server communicating. 27 | #[allow(unused_must_use)] 28 | pub fn main() { 29 | let mut server = Socket::bind(server_address()).unwrap(); 30 | 31 | /* setup our `Client` and send some test data. */ 32 | let mut client = Socket::bind(client_address()).unwrap(); 33 | 34 | client.send(Packet::unreliable( 35 | server_address(), 36 | serialize(&DataType::Coords { 37 | latitude: 10.55454, 38 | longitude: 10.555, 39 | altitude: 1.3, 40 | }) 41 | .unwrap(), 42 | )); 43 | 44 | client.send(Packet::unreliable( 45 | server_address(), 46 | serialize(&DataType::Coords { 47 | latitude: 3.344, 48 | longitude: 5.4545, 49 | altitude: 1.33, 50 | }) 51 | .unwrap(), 52 | )); 53 | 54 | client.send(Packet::unreliable( 55 | server_address(), 56 | serialize(&DataType::Text { 57 | string: String::from("Some information"), 58 | }) 59 | .unwrap(), 60 | )); 61 | 62 | // Send the queued send operations 63 | client.manual_poll(Instant::now()); 64 | 65 | // Check for any new packets 66 | server.manual_poll(Instant::now()); 67 | 68 | // ==== results ==== 69 | // Coords { longitude: 10.555, latitude: 10.55454, altitude: 1.3 } 70 | // Coords { longitude: 5.4545, latitude: 3.344, altitude: 1.33 } 71 | // Text { string: "Some information" } 72 | while let Some(pkt) = server.recv() { 73 | match pkt { 74 | SocketEvent::Packet(pkt) => { 75 | println!["{:?}", deserialize::(pkt.payload()).unwrap()] 76 | } 77 | _ => {} 78 | } 79 | } 80 | } 81 | 82 | #[derive(Debug, Serialize, Deserialize)] 83 | enum DataType { 84 | Coords { 85 | longitude: f32, 86 | latitude: f32, 87 | altitude: f32, 88 | }, 89 | Text { 90 | string: String, 91 | }, 92 | } 93 | -------------------------------------------------------------------------------- /src/packet/header/arranging_header.rs: -------------------------------------------------------------------------------- 1 | use std::io::Cursor; 2 | 3 | use byteorder::{BigEndian, ReadBytesExt, WriteBytesExt}; 4 | 5 | use crate::error::Result; 6 | use crate::net::constants::ARRANGING_PACKET_HEADER; 7 | use crate::packet::SequenceNumber; 8 | 9 | use super::{HeaderReader, HeaderWriter}; 10 | 11 | #[derive(Copy, Clone, Debug)] 12 | /// This header represents a fragmented packet header. 13 | pub struct ArrangingHeader { 14 | arranging_id: SequenceNumber, 15 | stream_id: u8, 16 | } 17 | 18 | impl ArrangingHeader { 19 | /// Creates new fragment with the given packet header 20 | pub fn new(arranging_id: SequenceNumber, stream_id: u8) -> Self { 21 | ArrangingHeader { 22 | arranging_id, 23 | stream_id, 24 | } 25 | } 26 | 27 | /// Returns the sequence number from this packet. 28 | pub fn arranging_id(&self) -> SequenceNumber { 29 | self.arranging_id 30 | } 31 | 32 | /// Returns the sequence number from this packet. 33 | pub fn stream_id(&self) -> u8 { 34 | self.stream_id 35 | } 36 | } 37 | 38 | impl HeaderWriter for ArrangingHeader { 39 | type Output = Result<()>; 40 | 41 | fn parse(&self, buffer: &mut Vec) -> Self::Output { 42 | buffer.write_u16::(self.arranging_id)?; 43 | buffer.write_u8(self.stream_id)?; 44 | 45 | Ok(()) 46 | } 47 | } 48 | 49 | impl HeaderReader for ArrangingHeader { 50 | type Header = Result; 51 | 52 | fn read(rdr: &mut Cursor<&[u8]>) -> Self::Header { 53 | let arranging_id = rdr.read_u16::()?; 54 | let stream_id = rdr.read_u8()?; 55 | 56 | let header = ArrangingHeader { 57 | arranging_id, 58 | stream_id, 59 | }; 60 | 61 | Ok(header) 62 | } 63 | 64 | /// Returns the size of this header. 65 | fn size() -> u8 { 66 | ARRANGING_PACKET_HEADER 67 | } 68 | } 69 | 70 | #[cfg(test)] 71 | mod tests { 72 | use std::io::Cursor; 73 | 74 | use crate::net::constants::ARRANGING_PACKET_HEADER; 75 | use crate::packet::header::{ArrangingHeader, HeaderReader, HeaderWriter}; 76 | 77 | #[test] 78 | fn serialize() { 79 | let mut buffer = Vec::new(); 80 | let header = ArrangingHeader::new(1, 2); 81 | assert![header.parse(&mut buffer).is_ok()]; 82 | 83 | assert_eq!(buffer[1], 1); 84 | assert_eq!(buffer[2], 2); 85 | } 86 | 87 | #[test] 88 | fn deserialize() { 89 | let buffer = vec![0, 1, 2]; 90 | let mut cursor = Cursor::new(buffer.as_slice()); 91 | 92 | let header = ArrangingHeader::read(&mut cursor).unwrap(); 93 | 94 | assert_eq!(header.arranging_id(), 1); 95 | assert_eq!(header.stream_id(), 2); 96 | } 97 | 98 | #[test] 99 | fn size() { 100 | assert_eq!(ArrangingHeader::size(), ARRANGING_PACKET_HEADER); 101 | } 102 | } 103 | -------------------------------------------------------------------------------- /benches/packet_processing.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Arc; 2 | 3 | use byteorder::{BigEndian, ReadBytesExt, WriteBytesExt}; 4 | use criterion::{criterion_group, criterion_main, Criterion}; 5 | 6 | use laminar::{Config, DeliveryMethod, ProtocolVersion, VirtualConnection}; 7 | 8 | const SERVER_ADDR: &str = "127.0.0.1:12345"; 9 | const CLIENT_ADDR: &str = "127.0.0.1:12346"; 10 | 11 | fn process_packet_before_send( 12 | connection: &mut VirtualConnection, 13 | _config: &Config, 14 | delivery_method: DeliveryMethod, 15 | ) { 16 | let payload = vec![1, 2, 3, 4, 5]; 17 | 18 | let _packet_data = connection 19 | .process_outgoing(&payload, delivery_method) 20 | .unwrap(); 21 | } 22 | 23 | fn process_packet_when_received(connection: &mut VirtualConnection, data: &[u8]) { 24 | connection.process_incoming(&data).unwrap().unwrap(); 25 | } 26 | 27 | /// This is mimicking the `HeaderParser for StandardHeader` implementation which is no longer 28 | /// visible externally 29 | fn standard_header_bytes(delivery_method: DeliveryMethod) -> Vec { 30 | let mut buffer = Vec::new(); 31 | buffer.write_u16::(ProtocolVersion::get_crc16()); 32 | // Represents a standard `Packet` 33 | buffer.write_u8(0); 34 | buffer.write_u8(delivery_method as u8); 35 | buffer 36 | } 37 | 38 | /// This is mimicking the `HeaderParser for AckedPacketHeader` implementation which is no longer 39 | /// visible externally 40 | fn acked_header_bytes( 41 | delivery_method: DeliveryMethod, 42 | seq: u16, 43 | ack_seq: u16, 44 | ack_field: u32, 45 | ) -> Vec { 46 | let mut buffer = standard_header_bytes(delivery_method); 47 | buffer.write_u16::(seq); 48 | buffer.write_u16::(ack_seq); 49 | buffer.write_u32::(ack_field); 50 | buffer 51 | } 52 | 53 | fn receive_unreliable_benchmark(c: &mut Criterion) { 54 | let mut connection = 55 | VirtualConnection::new(SERVER_ADDR.parse().unwrap(), &Arc::new(Config::default())); 56 | 57 | // setup fake received bytes. 58 | let mut buffer = standard_header_bytes(DeliveryMethod::UnreliableUnordered); 59 | buffer.append(&mut vec![1; 500]); 60 | 61 | c.bench_function("process unreliable packet on receive", move |b| { 62 | b.iter(|| process_packet_when_received(&mut connection, &buffer)) 63 | }); 64 | } 65 | 66 | fn receive_reliable_benchmark(c: &mut Criterion) { 67 | let mut connection = 68 | VirtualConnection::new(SERVER_ADDR.parse().unwrap(), &Arc::new(Config::default())); 69 | 70 | // setup fake received bytes. 71 | let mut buffer = acked_header_bytes(DeliveryMethod::ReliableUnordered, 0, 1, 2); 72 | buffer.append(&mut vec![1; 500]); 73 | 74 | c.bench_function("process reliable packet on receive", move |b| { 75 | b.iter(|| process_packet_when_received(&mut connection, &buffer)) 76 | }); 77 | } 78 | 79 | criterion_group!( 80 | benches, 81 | receive_unreliable_benchmark, 82 | receive_reliable_benchmark 83 | ); 84 | criterion_main!(benches); 85 | -------------------------------------------------------------------------------- /src/net/connection.rs: -------------------------------------------------------------------------------- 1 | use std::{self, fmt::Debug, net::SocketAddr, time::Instant}; 2 | 3 | use crate::config::Config; 4 | 5 | /// Allows connection to send packet, send event and get global configuration. 6 | pub trait ConnectionMessenger { 7 | /// Returns global configuration. 8 | fn config(&self) -> &Config; 9 | 10 | /// Sends a connection event. 11 | fn send_event(&mut self, address: &SocketAddr, event: ReceiveEvent); 12 | /// Sends a packet. 13 | fn send_packet(&mut self, address: &SocketAddr, payload: &[u8]); 14 | } 15 | 16 | /// Returns an address of an event. 17 | /// This is used by a `ConnectionManager`, because it doesn't know anything about connection events. 18 | pub trait ConnectionEventAddress { 19 | /// Returns event address 20 | fn address(&self) -> SocketAddr; 21 | } 22 | 23 | /// Allows to implement actual connection. 24 | /// Defines a type of `Send` and `Receive` events, that will be used by a connection. 25 | pub trait Connection: Debug { 26 | /// Defines a user event type. 27 | type SendEvent: Debug + ConnectionEventAddress; 28 | /// Defines a connection event type. 29 | type ReceiveEvent: Debug + ConnectionEventAddress; 30 | 31 | /// Creates new connection and initialize it by sending an connection event to the user. 32 | /// * messenger - allows to send packets and events, also provides a config. 33 | /// * address - defines a address that connection is associated with. 34 | /// * time - creation time, used by connection, so that it doesn't get dropped immediately or send heartbeat packet. 35 | /// * initial_data - if initiated by remote host, this will hold that a packet data. 36 | fn create_connection( 37 | messenger: &mut impl ConnectionMessenger, 38 | address: SocketAddr, 39 | time: Instant, 40 | initial_data: Option<&[u8]>, 41 | ) -> Self; 42 | 43 | /// Determines if the connection should be dropped due to its state. 44 | fn should_drop( 45 | &mut self, 46 | messenger: &mut impl ConnectionMessenger, 47 | time: Instant, 48 | ) -> bool; 49 | 50 | /// Processes a received packet: parse it and emit an event. 51 | fn process_packet( 52 | &mut self, 53 | messenger: &mut impl ConnectionMessenger, 54 | payload: &[u8], 55 | time: Instant, 56 | ); 57 | 58 | /// Processes a received event and send a packet. 59 | fn process_event( 60 | &mut self, 61 | messenger: &mut impl ConnectionMessenger, 62 | event: Self::SendEvent, 63 | time: Instant, 64 | ); 65 | 66 | /// Processes various connection-related tasks: resend dropped packets, send heartbeat packet, etc... 67 | /// This function gets called frequently. 68 | fn update( 69 | &mut self, 70 | messenger: &mut impl ConnectionMessenger, 71 | time: Instant, 72 | ); 73 | } 74 | -------------------------------------------------------------------------------- /src/infrastructure/congestion.rs: -------------------------------------------------------------------------------- 1 | use std::time::Instant; 2 | 3 | use crate::{ 4 | net::{NetworkQuality, RttMeasurer}, 5 | sequence_buffer::{CongestionData, SequenceBuffer}, 6 | Config, 7 | }; 8 | 9 | // /// Keeps track of congestion information. 10 | // pub struct CongestionHandler { 11 | // rtt_measurer: RttMeasurer, 12 | // congestion_data: SequenceBuffer, 13 | // _quality: NetworkQuality, 14 | // } 15 | 16 | // impl CongestionHandler { 17 | // /// Constructs a new `CongestionHandler` which you can use for keeping track of congestion information. 18 | // pub fn new(config: &Config) -> CongestionHandler { 19 | // CongestionHandler { 20 | // rtt_measurer: RttMeasurer::new(config), 21 | // congestion_data: SequenceBuffer::with_capacity(::max_value()), 22 | // _quality: NetworkQuality::Good, 23 | // } 24 | // } 25 | 26 | // /// Processes incoming sequence number. 27 | // /// 28 | // /// This will calculate the RTT-time and smooth down the RTT-value to prevent uge RTT-spikes. 29 | // pub fn process_incoming(&mut self, incoming_seq: u16) { 30 | // let congestion_data = self.congestion_data.get_mut(incoming_seq); 31 | // self.rtt_measurer.calculate_rrt(congestion_data); 32 | // } 33 | 34 | // /// Processes outgoing sequence number. 35 | // /// 36 | // /// This will insert an entry which is used for keeping track of the sending time. 37 | // /// Once we process incoming sequence numbers we can calculate the `RTT` time. 38 | // pub fn process_outgoing(&mut self, seq: u16, time: Instant) { 39 | // self.congestion_data 40 | // .insert(seq, CongestionData::new(seq, time)); 41 | // } 42 | // } 43 | 44 | pub struct CongestionHandler {} 45 | 46 | impl CongestionHandler { 47 | pub fn new(_config: &Config) -> CongestionHandler { 48 | CongestionHandler {} 49 | } 50 | 51 | pub fn process_incoming(&mut self, _incoming_seq: u16) { 52 | } 53 | 54 | pub fn process_outgoing(&mut self, _seq: u16, _time: Instant) { 55 | } 56 | } 57 | 58 | #[cfg(test)] 59 | mod test { 60 | use std::time::Instant; 61 | 62 | use crate::infrastructure::CongestionHandler; 63 | use crate::Config; 64 | 65 | #[test] 66 | fn congestion_entry_created() { 67 | let mut congestion_handler = CongestionHandler::new(&Config::default()); 68 | 69 | congestion_handler.process_outgoing(1, Instant::now()); 70 | 71 | // assert_eq!(congestion_handler.congestion_data.exists(1), true); 72 | } 73 | 74 | #[test] 75 | fn rtt_value_is_updated() { 76 | let mut congestion_handler = CongestionHandler::new(&Config::default()); 77 | 78 | // assert_eq!( 79 | // congestion_handler.rtt_measurer.get_rtt().abs() < std::f32::EPSILON, 80 | // true 81 | // ); 82 | congestion_handler.process_outgoing(1, Instant::now()); 83 | congestion_handler.process_incoming(1); 84 | // assert_eq!(congestion_handler.rtt_measurer.get_rtt() != 0., true); 85 | } 86 | } 87 | -------------------------------------------------------------------------------- /src/bin/cli.yml: -------------------------------------------------------------------------------- 1 | name: tester 2 | version: "0.1.0" 3 | author: Fletcher Haynes 4 | about: Testing binary for laminar 5 | 6 | subcommands: 7 | - server: 8 | about: Starts the tester in server mode 9 | args: 10 | - LISTEN_HOST: 11 | help: "Which host the server bind to. 0.0.0.0/0 will bind to all" 12 | required: true 13 | takes_value: true 14 | long: bind-host 15 | default_value: "127.0.0.1:2264" 16 | short: h 17 | - SHUTDOWN_TIMER: 18 | help: "The number of seconds the server will remain running, then shut itself down" 19 | required: false 20 | default_value: "600" 21 | takes_value: true 22 | long: shutdown 23 | short: s 24 | - client: 25 | about: Starts the tester in client mode 26 | args: 27 | - CONNECT_ADDR: 28 | help: "Which host the client will connect to, as a hostname or IP address" 29 | required: true 30 | takes_value: true 31 | long: connect-host 32 | default_value: "127.0.0.1:2264" 33 | short: H 34 | - LISTEN_HOST: 35 | help: "Which host the client should bind to. 0.0.0.0/0 will bind to all" 36 | required: true 37 | takes_value: true 38 | long: bind-host 39 | default_value: "127.0.0.1:2265" 40 | short: h 41 | - TEST_TO_RUN: 42 | help: "Which client test to run. Run 'laminar-tester show-tests' to see all available" 43 | required: true 44 | takes_value: true 45 | long: test 46 | short: t 47 | default_value: "steady-stream" 48 | - CLIENT_TIMEOUT: 49 | help: "How long the client will wait on the server before it times out in seconds" 50 | required: true 51 | takes_value: true 52 | long: timeout 53 | default_value: "10" 54 | - TEST_DURATION: 55 | help: "For tests based on duration, length of time it should run in seconds" 56 | required: false 57 | takes_value: true 58 | long: test-duration 59 | default_value: "60" 60 | - PACKETS_PER_SECOND: 61 | help: "How many packets per second to send for applicable tests." 62 | required: false 63 | takes_value: true 64 | long: pps 65 | default_value: "60" 66 | - SHUTDOWN_TIMER: 67 | help: "The number of seconds the client will remain running, then shut itself down" 68 | required: false 69 | default_value: "600" 70 | takes_value: true 71 | long: shutdown 72 | short: s 73 | - show-tests: 74 | about: Shows all tests available 75 | subcommands: 76 | - server: 77 | about: Shows server tests 78 | - client: 79 | about: Shows client tests 80 | -------------------------------------------------------------------------------- /src/packet/header/fragment_header.rs: -------------------------------------------------------------------------------- 1 | use std::io::Cursor; 2 | 3 | use byteorder::{BigEndian, ReadBytesExt, WriteBytesExt}; 4 | 5 | use crate::error::Result; 6 | use crate::net::constants::FRAGMENT_HEADER_SIZE; 7 | 8 | use super::{HeaderReader, HeaderWriter}; 9 | 10 | #[derive(Copy, Clone, Debug)] 11 | /// This header represents a fragmented packet header. 12 | pub struct FragmentHeader { 13 | sequence: u16, 14 | id: u8, 15 | num_fragments: u8, 16 | } 17 | 18 | impl FragmentHeader { 19 | /// Create new fragment with the given packet header. 20 | pub fn new(seq: u16, id: u8, num_fragments: u8) -> Self { 21 | FragmentHeader { 22 | id, 23 | num_fragments, 24 | sequence: seq, 25 | } 26 | } 27 | 28 | /// Returns the id of this fragment. 29 | pub fn id(&self) -> u8 { 30 | self.id 31 | } 32 | 33 | /// Returns the sequence number of this fragment. 34 | pub fn sequence(&self) -> u16 { 35 | self.sequence 36 | } 37 | 38 | /// Returns the total number of fragments in the packet this fragment is part of. 39 | pub fn fragment_count(&self) -> u8 { 40 | self.num_fragments 41 | } 42 | } 43 | 44 | impl HeaderWriter for FragmentHeader { 45 | type Output = Result<()>; 46 | 47 | fn parse(&self, buffer: &mut Vec) -> Self::Output { 48 | buffer.write_u16::(self.sequence)?; 49 | buffer.write_u8(self.id)?; 50 | buffer.write_u8(self.num_fragments)?; 51 | 52 | Ok(()) 53 | } 54 | } 55 | 56 | impl HeaderReader for FragmentHeader { 57 | type Header = Result; 58 | 59 | fn read(rdr: &mut Cursor<&[u8]>) -> Self::Header { 60 | let sequence = rdr.read_u16::()?; 61 | let id = rdr.read_u8()?; 62 | let num_fragments = rdr.read_u8()?; 63 | 64 | let header = FragmentHeader { 65 | sequence, 66 | id, 67 | num_fragments, 68 | }; 69 | 70 | Ok(header) 71 | } 72 | 73 | /// Returns the size of this header. 74 | fn size() -> u8 { 75 | FRAGMENT_HEADER_SIZE 76 | } 77 | } 78 | 79 | #[cfg(test)] 80 | mod tests { 81 | use std::io::Cursor; 82 | 83 | use crate::net::constants::FRAGMENT_HEADER_SIZE; 84 | use crate::packet::header::{FragmentHeader, HeaderReader, HeaderWriter}; 85 | 86 | #[test] 87 | fn serialize() { 88 | let mut buffer = Vec::new(); 89 | let header = FragmentHeader::new(1, 2, 3); 90 | assert![header.parse(&mut buffer).is_ok()]; 91 | 92 | assert_eq!(buffer[1], 1); 93 | assert_eq!(buffer[2], 2); 94 | assert_eq!(buffer[3], 3); 95 | } 96 | 97 | #[test] 98 | fn deserialize() { 99 | let buffer = vec![0, 1, 2, 3]; 100 | 101 | let mut cursor = Cursor::new(buffer.as_slice()); 102 | 103 | let header = FragmentHeader::read(&mut cursor).unwrap(); 104 | 105 | assert_eq!(header.sequence(), 1); 106 | assert_eq!(header.id(), 2); 107 | assert_eq!(header.fragment_count(), 3); 108 | } 109 | 110 | #[test] 111 | fn size() { 112 | assert_eq!(FragmentHeader::size(), FRAGMENT_HEADER_SIZE); 113 | } 114 | } 115 | -------------------------------------------------------------------------------- /src/packet/process_result.rs: -------------------------------------------------------------------------------- 1 | use std::collections::VecDeque; 2 | 3 | use crate::either::Either; 4 | use crate::packet::{OutgoingPacket, Packet, PacketType}; 5 | 6 | /// Used to return incoming (from bytes to packets) or outgoing (from packet to bytes) packets. 7 | /// It is used as optimization in cases, where most of the time there is only one element to iterate, and we don't want to create a vector for it. 8 | #[derive(Debug)] 9 | pub struct ZeroOrMore { 10 | data: Either, VecDeque>, 11 | } 12 | 13 | impl ZeroOrMore { 14 | fn zero() -> Self { 15 | Self { 16 | data: Either::Left(None), 17 | } 18 | } 19 | 20 | fn one(data: T) -> Self { 21 | Self { 22 | data: Either::Left(Some(data)), 23 | } 24 | } 25 | 26 | fn many(vec: VecDeque) -> Self { 27 | Self { 28 | data: Either::Right(vec), 29 | } 30 | } 31 | } 32 | 33 | impl Iterator for ZeroOrMore { 34 | type Item = T; 35 | 36 | fn next(&mut self) -> Option { 37 | match &mut self.data { 38 | Either::Left(option) => option.take(), 39 | Either::Right(vec) => vec.pop_front(), 40 | } 41 | } 42 | } 43 | 44 | /// Stores packets with headers that will be sent to the network, implements `IntoIterator` for convenience. 45 | #[derive(Debug)] 46 | pub struct OutgoingPackets<'a> { 47 | data: ZeroOrMore>, 48 | } 49 | 50 | impl<'a> OutgoingPackets<'a> { 51 | /// Stores only one packet, without allocating on the heap. 52 | pub fn one(packet: OutgoingPacket<'a>) -> Self { 53 | Self { 54 | data: ZeroOrMore::one(packet), 55 | } 56 | } 57 | 58 | /// Stores multiple packets, allocated on the heap. 59 | pub fn many(packets: VecDeque>) -> Self { 60 | Self { 61 | data: ZeroOrMore::many(packets), 62 | } 63 | } 64 | } 65 | 66 | impl<'a> IntoIterator for OutgoingPackets<'a> { 67 | type Item = OutgoingPacket<'a>; 68 | type IntoIter = ZeroOrMore; 69 | 70 | fn into_iter(self) -> Self::IntoIter { 71 | self.data 72 | } 73 | } 74 | 75 | /// Stores parsed packets with their types, that was received from network, implements `IntoIterator` for convenience. 76 | #[derive(Debug)] 77 | pub struct IncomingPackets { 78 | data: ZeroOrMore<(Packet, PacketType)>, 79 | } 80 | 81 | impl IncomingPackets { 82 | /// No packets are stored 83 | pub fn zero() -> Self { 84 | Self { 85 | data: ZeroOrMore::zero(), 86 | } 87 | } 88 | 89 | /// Stores only one packet, without allocating on the heap. 90 | pub fn one(packet: Packet, packet_type: PacketType) -> Self { 91 | Self { 92 | data: ZeroOrMore::one((packet, packet_type)), 93 | } 94 | } 95 | 96 | /// Stores multiple packets, allocated on the heap. 97 | pub fn many(vec: VecDeque<(Packet, PacketType)>) -> Self { 98 | Self { 99 | data: ZeroOrMore::many(vec), 100 | } 101 | } 102 | } 103 | 104 | impl IntoIterator for IncomingPackets { 105 | type Item = (Packet, PacketType); 106 | type IntoIter = ZeroOrMore; 107 | 108 | fn into_iter(self) -> Self::IntoIter { 109 | self.data 110 | } 111 | } 112 | -------------------------------------------------------------------------------- /examples/server_client.rs: -------------------------------------------------------------------------------- 1 | //! Note that the terms "client" and "server" here are purely what we logically associate with them. 2 | //! Technically, they both work the same. 3 | //! Note that in practice you don't want to implement a chat client using UDP. 4 | use std::io::stdin; 5 | use std::thread; 6 | use std::time::Instant; 7 | 8 | use laminar::{ErrorKind, Packet, Socket, SocketEvent}; 9 | 10 | const SERVER: &str = "127.0.0.1:12351"; 11 | 12 | fn server() -> Result<(), ErrorKind> { 13 | let mut socket = Socket::bind(SERVER)?; 14 | let (sender, receiver) = (socket.get_packet_sender(), socket.get_event_receiver()); 15 | let _thread = thread::spawn(move || socket.start_polling()); 16 | 17 | loop { 18 | if let Ok(event) = receiver.recv() { 19 | match event { 20 | SocketEvent::Packet(packet) => { 21 | let msg = packet.payload(); 22 | 23 | if msg == b"Bye!" { 24 | break; 25 | } 26 | 27 | let msg = String::from_utf8_lossy(msg); 28 | let ip = packet.addr().ip(); 29 | 30 | println!("Received {:?} from {:?}", msg, ip); 31 | 32 | sender 33 | .send(Packet::reliable_unordered( 34 | packet.addr(), 35 | "Copy that!".as_bytes().to_vec(), 36 | )) 37 | .expect("This should send"); 38 | } 39 | SocketEvent::Timeout(address) => { 40 | println!("Client timed out: {}", address); 41 | } 42 | _ => {} 43 | } 44 | } 45 | } 46 | 47 | Ok(()) 48 | } 49 | 50 | fn client() -> Result<(), ErrorKind> { 51 | let addr = "127.0.0.1:12352"; 52 | let mut socket = Socket::bind(addr)?; 53 | println!("Connected on {}", addr); 54 | 55 | let server = SERVER.parse().unwrap(); 56 | 57 | println!("Type a message and press Enter to send. Send `Bye!` to quit."); 58 | 59 | let stdin = stdin(); 60 | let mut s_buffer = String::new(); 61 | 62 | loop { 63 | s_buffer.clear(); 64 | stdin.read_line(&mut s_buffer)?; 65 | let line = s_buffer.replace(|x| x == '\n' || x == '\r', ""); 66 | 67 | socket.send(Packet::reliable_unordered( 68 | server, 69 | line.clone().into_bytes(), 70 | ))?; 71 | 72 | socket.manual_poll(Instant::now()); 73 | 74 | if line == "Bye!" { 75 | break; 76 | } 77 | 78 | match socket.recv() { 79 | Some(SocketEvent::Packet(packet)) => { 80 | if packet.addr() == server { 81 | println!("Server sent: {}", String::from_utf8_lossy(packet.payload())); 82 | } else { 83 | println!("Unknown sender."); 84 | } 85 | } 86 | Some(SocketEvent::Timeout(_)) => {} 87 | _ => println!("Silence.."), 88 | } 89 | } 90 | 91 | Ok(()) 92 | } 93 | 94 | fn main() -> Result<(), ErrorKind> { 95 | let stdin = stdin(); 96 | 97 | println!("Please type in `server` or `client`."); 98 | 99 | let mut s = String::new(); 100 | stdin.read_line(&mut s)?; 101 | 102 | if s.starts_with("s") { 103 | println!("Starting server.."); 104 | server() 105 | } else { 106 | println!("Starting client.."); 107 | client() 108 | } 109 | } 110 | -------------------------------------------------------------------------------- /src/packet/header/acked_packet_header.rs: -------------------------------------------------------------------------------- 1 | use std::io::Cursor; 2 | 3 | use byteorder::{BigEndian, ReadBytesExt, WriteBytesExt}; 4 | 5 | use crate::error::Result; 6 | use crate::net::constants::ACKED_PACKET_HEADER; 7 | 8 | use super::{HeaderReader, HeaderWriter}; 9 | 10 | #[derive(Copy, Clone, Debug)] 11 | /// This header provides reliability information. 12 | pub struct AckedPacketHeader { 13 | /// This is the sequence number so that we can know where in the sequence of packages this packet belongs. 14 | pub seq: u16, 15 | // This is the last acknowledged sequence number. 16 | ack_seq: u16, 17 | // This is an bitfield of all last 32 acknowledged packages 18 | ack_field: u32, 19 | } 20 | 21 | impl AckedPacketHeader { 22 | /// When we compose packet headers, the local sequence becomes the sequence number of the packet, and the remote sequence becomes the ack. 23 | /// The ack bitfield is calculated by looking into a queue of up to 33 packets, containing sequence numbers in the range [remote sequence - 32, remote sequence]. 24 | /// We set bit n (in [1,32]) in ack bits to 1 if the sequence number remote sequence - n is in the received queue. 25 | pub fn new(seq_num: u16, last_seq: u16, bit_field: u32) -> AckedPacketHeader { 26 | AckedPacketHeader { 27 | seq: seq_num, 28 | ack_seq: last_seq, 29 | ack_field: bit_field, 30 | } 31 | } 32 | 33 | /// Returns the sequence number from this packet. 34 | #[allow(dead_code)] 35 | pub fn sequence(&self) -> u16 { 36 | self.seq 37 | } 38 | 39 | /// Returns bit field of all last 32 acknowledged packages. 40 | pub fn ack_field(&self) -> u32 { 41 | self.ack_field 42 | } 43 | 44 | /// Returns last acknowledged sequence number. 45 | pub fn ack_seq(&self) -> u16 { 46 | self.ack_seq 47 | } 48 | } 49 | 50 | impl HeaderWriter for AckedPacketHeader { 51 | type Output = Result<()>; 52 | 53 | fn parse(&self, buffer: &mut Vec) -> Self::Output { 54 | buffer.write_u16::(self.seq)?; 55 | buffer.write_u16::(self.ack_seq)?; 56 | buffer.write_u32::(self.ack_field)?; 57 | Ok(()) 58 | } 59 | } 60 | 61 | impl HeaderReader for AckedPacketHeader { 62 | type Header = Result; 63 | 64 | fn read(rdr: &mut Cursor<&[u8]>) -> Self::Header { 65 | let seq = rdr.read_u16::()?; 66 | let ack_seq = rdr.read_u16::()?; 67 | let ack_field = rdr.read_u32::()?; 68 | 69 | Ok(AckedPacketHeader { 70 | seq, 71 | ack_seq, 72 | ack_field, 73 | }) 74 | } 75 | 76 | fn size() -> u8 { 77 | ACKED_PACKET_HEADER 78 | } 79 | } 80 | 81 | #[cfg(test)] 82 | mod tests { 83 | use std::io::Cursor; 84 | 85 | use crate::net::constants::ACKED_PACKET_HEADER; 86 | use crate::packet::header::{AckedPacketHeader, HeaderReader, HeaderWriter}; 87 | 88 | #[test] 89 | fn serialize() { 90 | let mut buffer = Vec::new(); 91 | let header = AckedPacketHeader::new(1, 2, 3); 92 | assert![header.parse(&mut buffer).is_ok()]; 93 | 94 | assert_eq!(buffer[1], 1); 95 | assert_eq!(buffer[3], 2); 96 | assert_eq!(buffer[7], 3); 97 | assert_eq!(buffer.len() as u8, AckedPacketHeader::size()); 98 | } 99 | 100 | #[test] 101 | fn deserialize() { 102 | let buffer = vec![0, 1, 0, 2, 0, 0, 0, 3]; 103 | 104 | let mut cursor = Cursor::new(buffer.as_slice()); 105 | 106 | let header = AckedPacketHeader::read(&mut cursor).unwrap(); 107 | 108 | assert_eq!(header.sequence(), 1); 109 | assert_eq!(header.ack_seq(), 2); 110 | assert_eq!(header.ack_field(), 3); 111 | } 112 | 113 | #[test] 114 | fn size() { 115 | assert_eq!(AckedPacketHeader::size(), ACKED_PACKET_HEADER); 116 | } 117 | } 118 | -------------------------------------------------------------------------------- /src/test_utils/network_emulator.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | cell::RefCell, 3 | collections::hash_map::Entry, 4 | collections::{HashMap, VecDeque}, 5 | io::Result, 6 | net::SocketAddr, 7 | rc::Rc, 8 | }; 9 | 10 | use crate::net::{DatagramSocket, LinkConditioner}; 11 | 12 | /// This type allows to share global state between all sockets, created from the same instance of `NetworkEmulator`. 13 | type GlobalBindings = Rc)>>>>; 14 | 15 | /// Enables to create the emulated socket, that share global state stored by this network emulator. 16 | #[derive(Debug, Default)] 17 | pub struct NetworkEmulator { 18 | network: GlobalBindings, 19 | } 20 | 21 | impl NetworkEmulator { 22 | /// Creates an emulated socket by binding to an address. 23 | /// If other socket already was bound to this address, error will be returned instead. 24 | pub fn new_socket(&self, address: SocketAddr) -> Result { 25 | match self.network.borrow_mut().entry(address) { 26 | Entry::Occupied(_) => Err(std::io::Error::new( 27 | std::io::ErrorKind::AddrInUse, 28 | "Cannot bind to address", 29 | )), 30 | Entry::Vacant(entry) => { 31 | entry.insert(Default::default()); 32 | Ok(EmulatedSocket { 33 | network: self.network.clone(), 34 | address, 35 | conditioner: Default::default(), 36 | }) 37 | } 38 | } 39 | } 40 | 41 | /// Clear all packets from a socket that is bound to provided address. 42 | pub fn clear_packets(&self, addr: SocketAddr) { 43 | if let Some(packets) = self.network.borrow_mut().get_mut(&addr) { 44 | packets.clear(); 45 | } 46 | } 47 | } 48 | 49 | /// Implementation of a socket, that is created by `NetworkEmulator`. 50 | #[derive(Debug, Clone)] 51 | pub struct EmulatedSocket { 52 | network: GlobalBindings, 53 | address: SocketAddr, 54 | conditioner: Option, 55 | } 56 | 57 | impl EmulatedSocket { 58 | pub fn set_link_conditioner(&mut self, conditioner: Option) { 59 | self.conditioner = conditioner; 60 | } 61 | } 62 | 63 | impl DatagramSocket for EmulatedSocket { 64 | /// Sends a packet to and address if there is a socket bound to it. Otherwise it will simply be ignored. 65 | fn send_packet(&mut self, addr: &SocketAddr, payload: &[u8]) -> Result { 66 | let send = if let Some(ref mut conditioner) = self.conditioner { 67 | conditioner.should_send() 68 | } else { 69 | true 70 | }; 71 | if send { 72 | if let Some(binded) = self.network.borrow_mut().get_mut(addr) { 73 | binded.push_back((self.address, payload.to_vec())); 74 | } 75 | Ok(payload.len()) 76 | } else { 77 | Ok(0) 78 | } 79 | } 80 | 81 | /// Receives a packet from this socket. 82 | fn receive_packet<'a>(&mut self, buffer: &'a mut [u8]) -> Result<(&'a [u8], SocketAddr)> { 83 | if let Some((addr, payload)) = self 84 | .network 85 | .borrow_mut() 86 | .get_mut(&self.address) 87 | .unwrap() 88 | .pop_front() 89 | { 90 | let slice = &mut buffer[..payload.len()]; 91 | slice.copy_from_slice(payload.as_ref()); 92 | Ok((slice, addr)) 93 | } else { 94 | Err(std::io::ErrorKind::WouldBlock.into()) 95 | } 96 | } 97 | 98 | /// Returns the socket address that this socket was created from. 99 | fn local_addr(&self) -> Result { 100 | Ok(self.address) 101 | } 102 | 103 | fn is_blocking_mode(&self) -> bool { 104 | false 105 | } 106 | } 107 | -------------------------------------------------------------------------------- /src/infrastructure/arranging.rs: -------------------------------------------------------------------------------- 1 | //! This module is about arranging items, over different streams, based on an certain algorithm. 2 | //! 3 | //! The above sentence contains a lot of important information, lets zoom in at the above sentence. 4 | //! 5 | //! ## Items 6 | //! 7 | //! By items, you can understand 'packets' and 'arranging' can be done based either with sequencing or ordering. 8 | //! 9 | //! ## Ordering VS Sequencing 10 | //! Let's define two concepts here: 11 | //! _"Sequencing: this is the process of only caring about the newest items."_ [1](https://dictionary.cambridge.org/dictionary/english/sequencing) 12 | //! _"Ordering: this is the process of putting something in a particular order."_ [2](https://dictionary.cambridge.org/dictionary/english/ordering) 13 | //! 14 | //! - Sequencing: Only the newest items will be passed trough e.g. `1,3,2,5,4` which results in `1,3,5`. 15 | //! - Ordering: All items are returned in order `1,3,2,5,4` which results in `1,2,3,4,5`. 16 | //! 17 | //! ## Arranging Streams 18 | //! What are these 'arranging streams'? 19 | //! You can see 'arranging streams' as something to arrange items that have no relationship at all with one another. 20 | //! 21 | //! ## Simple Example 22 | //! Think of a highway where you have several lanes where cars are driving. 23 | //! Because there are these lanes, cars can move on faster. 24 | //! For example, the cargo drivers drive on the right and the high-speed cars on the left. 25 | //! The cargo drivers have no influence on fast cars and vice versa. 26 | //! 27 | //! ## Real Example 28 | //! If a game developer wants to send data to a client, it may be that he wants to send data ordered, unordered or sequenced. 29 | //! Data might be the following: 30 | //! 1. Player movement, we want to order player movements because we don't care about old positions. 31 | //! 2. Bullet movement, we want to order bullet movement because we don't care about old positions of bullets. 32 | //! 3. Chat messages, we want to order chat messages because it is nice to see the text in the right order. 33 | //! 34 | //! Player movement and chat messages are totally unrelated to each other and you absolutely do not want that movement packets are interrupted when a chat message is not sent. 35 | //! With ordering, we can only return items when all packets up to the current package are received. 36 | //! 37 | //! So if a chat package is missing, the other packages will suffer from it. 38 | //! It would be nice if we could order player movements and chat messages separately. This is exactly what `ordering streams` are meant for. 39 | //! The game developer can indicate on which stream he can order his packets and how he wants to arrange them. 40 | //! For example, the game developer can say: "Let me set all chat messages to 'stream 1' and all motion packets to 'stream 2'. 41 | 42 | pub use self::ordering::{IterMut, OrderingStream, OrderingSystem}; 43 | pub use self::sequencing::{SequencingStream, SequencingSystem}; 44 | 45 | mod ordering; 46 | mod sequencing; 47 | 48 | /// A trait which can be implemented for arranging operations. 49 | pub trait Arranging { 50 | type ArrangingItem; 51 | 52 | /// Arrange the given item based on the given index. 53 | /// If the `incoming_offset` somehow does not satisfies the arranging algorithm it returns `None`. 54 | /// If the `incoming_offset` satisfies the arranging algorithm it returns `Some` with the passed item. 55 | fn arrange( 56 | &mut self, 57 | incoming_index: u16, 58 | item: Self::ArrangingItem, 59 | ) -> Option; 60 | } 61 | 62 | /// An arranging system that has multiple streams on which you can arrange items. 63 | pub trait ArrangingSystem { 64 | /// The type of stream that is used for arranging items. 65 | type Stream; 66 | 67 | /// Returns the number of streams currently created. 68 | fn stream_count(&self) -> usize; 69 | /// Try to get a `Stream` by `stream_id`. When the stream does not exist, it will be inserted by the given `stream_id` and returned. 70 | fn get_or_create_stream(&mut self, stream_id: u8) -> &mut Self::Stream; 71 | } 72 | -------------------------------------------------------------------------------- /src/throughput.rs: -------------------------------------------------------------------------------- 1 | use std::fmt::{self, Debug, Display}; 2 | use std::time::{Duration, Instant}; 3 | 4 | /// Entry for throughput monitor with measured information. 5 | #[derive(Debug)] 6 | struct ThroughputEntry { 7 | measured_throughput: u32, 8 | _start: Instant, 9 | } 10 | 11 | impl ThroughputEntry { 12 | /// Constructs a new throughput entry. 13 | pub fn new(measured_throughput: u32, time: Instant) -> ThroughputEntry { 14 | ThroughputEntry { 15 | measured_throughput, 16 | _start: time, 17 | } 18 | } 19 | } 20 | 21 | /// Helper to monitor throughput. 22 | /// 23 | /// Throughput is calculated at some duration. 24 | /// For each duration an entry is created to keep track of the history of throughput. 25 | /// 26 | /// With this type you can calculate the average or get the total and last measured throughput. 27 | pub struct ThroughputMonitoring { 28 | throughput_duration: Duration, 29 | timer: Instant, 30 | current_throughput: u32, 31 | measured_throughput: Vec, 32 | } 33 | 34 | impl ThroughputMonitoring { 35 | /// Constructs a new instance of `ThroughputMonitoring`. 36 | pub fn new(throughput_duration: Duration) -> ThroughputMonitoring { 37 | ThroughputMonitoring { 38 | throughput_duration, 39 | timer: Instant::now(), 40 | current_throughput: 0, 41 | measured_throughput: Vec::new(), 42 | } 43 | } 44 | 45 | /// Increases the throughput by one, when the `throughput_duration` has elapsed since the last call, then an throughput entry will be created. 46 | pub fn tick(&mut self) -> bool { 47 | if self.timer.elapsed() >= self.throughput_duration { 48 | self.measured_throughput 49 | .push(ThroughputEntry::new(self.current_throughput, self.timer)); 50 | self.current_throughput = 0; 51 | self.timer = Instant::now(); 52 | true 53 | } else { 54 | self.current_throughput += 1; 55 | false 56 | } 57 | } 58 | 59 | /// Returns the average throughput over all throughput up-till now. 60 | pub fn average(&self) -> u32 { 61 | if !self.measured_throughput.is_empty() { 62 | return self 63 | .measured_throughput 64 | .iter() 65 | .map(|x| x.measured_throughput) 66 | .sum::() 67 | / self.measured_throughput.len() as u32; 68 | } 69 | 0 70 | } 71 | 72 | /// Reset the throughput history. 73 | pub fn reset(&mut self) { 74 | self.current_throughput = 0; 75 | self.measured_throughput.clear(); 76 | } 77 | 78 | /// Returns the last measured throughput. 79 | pub fn last_throughput(&self) -> u32 { 80 | self.measured_throughput 81 | .last() 82 | .map(|x| x.measured_throughput) 83 | .unwrap_or(0) 84 | } 85 | 86 | /// Returns the totals measured throughput ticks. 87 | pub fn total_measured_ticks(&self) -> u32 { 88 | self.measured_throughput 89 | .iter() 90 | .map(|x| x.measured_throughput) 91 | .sum::() 92 | + self.current_throughput 93 | } 94 | } 95 | 96 | impl Debug for ThroughputMonitoring { 97 | fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { 98 | write!( 99 | f, 100 | "Current Throughput: {}, Elapsed Time: {:#?}, Average Throughput: {}", 101 | self.last_throughput(), 102 | self.timer.elapsed(), 103 | self.average() 104 | ) 105 | } 106 | } 107 | 108 | impl Display for ThroughputMonitoring { 109 | fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { 110 | write!( 111 | f, 112 | "Current Throughput: {}, Elapsed Time: {:#?}, Average Throughput: {}", 113 | self.last_throughput(), 114 | self.timer.elapsed(), 115 | self.average() 116 | ) 117 | } 118 | } 119 | -------------------------------------------------------------------------------- /docs/md_book/src/reliability/ordering.md: -------------------------------------------------------------------------------- 1 | ## Arranging packets 2 | 3 | Laminar provides a way to arrange packets, over different streams. 4 | 5 | The above sentence contains a lot of important information, let us zoom in a little more at the above sentence. 6 | 7 | ## Ordering VS Sequencing 8 | Let's define two concepts here: 9 | _"Sequencing: this is the process of only caring about the newest items."_ [1](https://dictionary.cambridge.org/dictionary/english/sequencing) 10 | _"Ordering: this is the process of putting something in a particular order."_ [2](https://dictionary.cambridge.org/dictionary/english/ordering) 11 | 12 | - Sequencing: Only the newest items will be passed trough e.g. `1,3,2,5,4` which results in `1,3,5`. 13 | - Ordering: All items are returned in order `1,3,2,5,4` which results in `1,2,3,4,5`. 14 | - Arranging: We call the process for ordering and sequencing 'arranging' of packets 15 | 16 | Due to the design of the internet, it is not always guaranteed that packets will arrive or that they will be received in the order they were sent. 17 | Fortunately, Laminar's implementation grants the ability to optionally specify how reliable and ordered (or not) the stream of packets is delivered to the client. 18 | 19 | ### How ordering works. 20 | If we were to send the following packets: `1,2,3,4,5`, 21 | but something happens on the internet which causes the packets to arrive at their final destination as: `1,5,4,2,3`, 22 | then Laminar ensures that your packets arrive to the client as `1,2,3,4,5`. 23 | 24 | ## Arranging Streams 25 | What are these 'arranging streams'? 26 | You can see 'arranging streams' as something to arrange packets that have no relationship at all with one another. 27 | You could either arrange packets in order or in sequence. 28 | 29 | ### Simple Example 30 | Think of a highway where you have several lanes where cars are driving. 31 | Because there are these lanes, cars can move on faster. 32 | For example, the cargo drivers drive on the right and the high-speed cars on the left. 33 | The cargo drivers do not influence fast cars and vice versa. 34 | 35 | ### Real Example 36 | If a game developer wants to send data to a client, he might want to send data either ordered, unordered or sequenced. 37 | 38 | 'Data' could be the following: 39 | 1. Player movement, we want to order player movements because we don't want the player to glitch. 40 | 2. Bullet movement, we want to sequence bullet movement because we don't care about old positions of bullets. 41 | 3. Chat messages, we want to order chat messages because it is nice to see the text in the right order. 42 | 43 | Player movement and chat messages are totally unrelated to each other and you absolutely do not want to interrupt the movement packets if a chat message is not sent. 44 | 45 | It would be nice if we could order player movements and chat messages separately. Guess what! This is exactly what 'arranging streams' do. 46 | A game developer can indicate which stream it likes to arrange the packets. 47 | For example, the game developer can say: "Let me order all chat messages to 'stream 1' and sequence all motion packets on 'stream 2'. 48 | 49 | ### Example 50 | ```rust 51 | // We can specify on which stream and how to order our packets, checkout our book and documentation for more information 52 | let unreliable_sequenced = Packet::unreliable_sequenced(destination, bytes, Some(1)); 53 | let reliable_sequenced = Packet::reliable_sequenced(destination, bytes, Some(2)); 54 | let reliable_ordered = Packet::reliable_ordered(destination, bytes, Some(3)); 55 | ``` 56 | 57 | Take notice of the last `Option` parameter, with this parameter you can specify which streams to order your packets on. 58 | One thing that is important to understand is that 'sequenced streams' are different from 'ordered streams', 59 | thus specifying `Some(1)` for a sequence stream and `Some(1)` for an ordered stream will be arranged separately from one another. 60 | You can use 254 different ordering or sequencing streams, in reality you'd probably only need a few. When specifying `None`, stream '255' will be used. 61 | 62 | ## Interesting Reads 63 | - [RakNet Ordering Streams](http://www.raknet.net/raknet/manual/sendingpackets.html) 64 | - [LiteNetLib Implementation](https://github.com/RevenantX/LiteNetLib/issues/67) 65 | -------------------------------------------------------------------------------- /docs/CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | ## Workflow 2 | 3 | As a team, we adopted the following the workflow agreements. When we begin work on the amethyst_network crate, we’ll use the same agreements. They are focused on maintaining a high level of quality in the code, and for working with a highly distributed team. We’re including them here as some of the other teams may find them of use. 4 | 5 | - All warnings produced by `cargo test` are treated as errors by the CI/CD system 6 | - All `clippy` warnings are treated as errors by the CI/CD system 7 | - We use `kcov` to track our code coverage; we do not have a required minimum, rather we use this as a potential indicator of issues 8 | - We included sample code about using the library 9 | - Setting up a benchmarking framework so we can track regressions 10 | - Unit and integration tests, as well as release testing with docker-compose 11 | 12 | ## Style Guidelines 13 | 14 | As a team, we (eventually) agreed on a coherent style for all our work. See this [document](https://github.com/amethyst/laminar/blob/master/docs/CONTRIBUTING.md#code-style) for more information. 15 | Some of the most helpful ones have been: 16 | 17 | - Keep PRs small, preferably under 200 lines of code when possible 18 | - Comments should explain why, not what 19 | - You must provide comments for public API 20 | - No hard-coded values 21 | - No panics nor unwraps in non-test code 22 | - `rustfmt` stable release must be used when a PR is merged. If `rustfmt` complains it can not be merged. 23 | - `clippy` will be used when merging a PR if clippy complains it can not be merged. 24 | - All tests must pass, with failing test the PR can not be merged. 25 | - We make use of the [forking workflow](https://nl.atlassian.com/git/tutorials/comparing-workflows/forking-workflow) 26 | 27 | ## Code style 28 | 29 | Some code guidelines to keep in mind when contributing to laminar or amethyst-networking 30 | 1. Comments 31 | - Comment all code you’ve added. Things you should comment: types, methods/functions public fields of structs. 32 | - Calculations should be documented. Whether it would be in a PR or code. But it must be clear what is done. 33 | - Public things should get docstring comments using `///`. Non-public things may use `//` comments 34 | - Keep comments small 35 | - Don’t create unnecessary comments. They must add value 36 | - Comments should explain the “why” not the “what” 37 | - All `///` comments should start capitilized and end with an 'dot'. 38 | - Function comments should be plural like: 'Returns', 'Creates', 'Instantiates' etc. 39 | - All `//` explain code 'in' functions should have no capital letter and not 'dot' 40 | - Referenced types, functions, variables should be put inside '`code markup`'. 41 | 2. Hard Coding 42 | - Don't hard code values anywhere 43 | - Use the ‘NetworkConfig’ type for common network settings, use consts or parameter input 44 | - Use of lazy_static is acceptable but first make sure you can’t fix the issue in other ways 45 | 3. Code markup 46 | - Keep files small. Better have small files with small pieces of logic than having one file with 1000 lines of logic with multiple types/structs etc. Note that I speak of logic, tests not included 47 | - No panics/unwraps in the main codebase, but they are accepted in tests 48 | 49 | ## Import Reordering 50 | All imports are semantically grouped and ordered. The order is: 51 | 52 | - standard library (`use std::...`) 53 | - external crates (`use rand::...`) 54 | - current crate (`use crate::...`) 55 | - parent module (`use super::..`) 56 | - current module (`use self::...`) 57 | - module declaration (`mod ...`) 58 | 59 | There must be an empty line between groups. An example: 60 | 61 | ```rust 62 | use crossterm_utils::{csi, write_cout, Result}; 63 | 64 | use crate::sys::{get_cursor_position, show_cursor}; 65 | 66 | use super::Cursor; 67 | ``` 68 | 69 | #### CLion Tips 70 | 71 | The CLion IDE does this for you (_Menu_ -> _Code_ -> _Optimize Imports_). Be aware that the CLion sorts 72 | imports in a group in a different way when compared to the `rustfmt`. It's effectively two steps operation 73 | to get proper grouping & sorting: 74 | 75 | * _Menu_ -> _Code_ -> _Optimize Imports_ - group & semantically order imports 76 | * `cargo fmt` - fix ordering within the group 77 | 78 | Second step can be automated via _CLion_ -> _Preferences_ -> 79 | _Languages & Frameworks_ -> _Rust_ -> _Rustfmt_ -> _Run rustfmt on save_. -------------------------------------------------------------------------------- /docs/md_book/src/protocols.md: -------------------------------------------------------------------------------- 1 | # Networking protocols 2 | 3 | So first and possibly the important one is which protocol to use and when. Let’s first take a look at TCP and UDP. 4 | 5 | ## IP 6 | All communication over the internet is happening over IP (Internet Protocol). 7 | This protocol only passes packets across the network without any guarantee that it will arrive at the destination. 8 | Sometimes IP passes along multiple copies of the same packet and these packets make their way to the destination via different paths, causing packets to arrive out of order and in duplicate. 9 | 10 | So to be able to communicate over the network we make use of existing protocols that provides some more certainty. 11 | We will first take a look at TCP where after we checkout UPD. 12 | 13 | ## TCP/IP 14 | TCP stands for “transmission control protocol”. IP stands for “internet protocol”. 15 | Together they form the backbone for almost everything you do online, from web browsing to IRC to email, it’s all built on top of TCP/IP. 16 | 17 | TCP is a connection-oriented protocol, which means a connection is established and maintained until the application programs at each end have finished exchanging messages. 18 | TCP provides full reliable, ordered communication between two machines. The data you send is guaranteed to arrive and in order. 19 | The TCP protocol will also split up and reassemble packets if those are too large. 20 | 21 | **Characteristics** 22 | - Reliable 23 | - Ordered 24 | - Automatic [fragmentation](fragmentation.md) of packets 25 | - Stream based 26 | - Control Flow ([Congestion Avoidance](congestion_avoidence/congestion_avoidance.md)) 27 | 28 | ## UDP 29 | UDP stands for “user datagram protocol” and it’s another protocol built on top of IP, but unlike TCP, instead of adding lots of features and complexity, UDP is a very thin layer over IP. 30 | 31 | Like IP, UDP is an unreliable protocol. In practice however, most packets that are sent will get through, but you’ll usually have around 1-5% packet loss, and occasionally you’ll get periods where no packets get through at all (remember there are lots of computers between you and your destination where things can go wrong…) 32 | 33 | **Characteristics** 34 | - Not Reliable 35 | - Not Ordered 36 | - No [fragmentation](fragmentation.md) of packets 37 | - No control flow ([Congestion Avoidance](congestion_avoidence/congestion_avoidance.md)) 38 | - Packet loss could happen. 39 | - Message based 40 | 41 | ## Why UDP and not TCP | More 42 | Those of you familiar with TCP know that it already has its own concept of connection, reliability-ordering and congestion avoidance, so why are we rewriting our own mini version of TCP on top of UDP? 43 | 44 | The issue is that multiplayer action games rely on a steady stream of packets sent at rates of 10 to 30 packets per second, and for the most part, the data contained in these packets is so time sensitive that only the most recent data is useful. 45 | This includes data such as player inputs, the position, orientation and velocity of each player character, and the state of physics objects in the world. 46 | 47 | The problem with TCP is that it abstracts data delivery as a reliable ordered stream. Because of this, if a packet is lost, TCP has to stop and wait for that packet to be resent. 48 | This interrupts the steady stream of packets because more recent packets must wait in a queue until the resent packet arrives, so packets are received in the same order they were sent. 49 | 50 | What we need is a different type of reliability. 51 | 52 | Instead of having all data treated as a reliable ordered stream, we want to send packets at a steady rate and get notified when packets are received by the other computer. 53 | This allows time sensitive data to get through without waiting for resent packets, while letting us make our own decision about how to handle packet loss at the application level. 54 | 55 | What TCP does is maintain a sliding window where the ACK sent is the sequence number of the next packet it expects to receive, in order. If TCP does not receive an ACK for a given packet, it stops and re-sends a packet with that sequence number again. This is exactly the behavior we want to avoid! 56 | 57 | It is not possible to implement a reliability system with these properties using TCP, so we have no choice but to roll our own reliability on top of UDP. TCP itself is built on UDP. 58 | 59 | ## When use TCP 60 | Of course there could be use-cases for TCP like chat, asset streaming, etc. We can setup a TCP socket for this that is distinct from UDP. 61 | 62 | We could also make our UDP channel reliable as described below so when we detect package lost on the client we could construct a new package 63 | -------------------------------------------------------------------------------- /src/config.rs: -------------------------------------------------------------------------------- 1 | use std::{default::Default, time::Duration}; 2 | 3 | use crate::net::constants::{DEFAULT_MTU, FRAGMENT_SIZE_DEFAULT, MAX_FRAGMENTS_DEFAULT}; 4 | 5 | #[derive(Clone, Debug)] 6 | /// Contains the configuration options to configure laminar for special use-cases. 7 | pub struct Config { 8 | /// Make the underlying UDP socket block when true, otherwise non-blocking. 9 | pub blocking_mode: bool, 10 | /// Value which can specify the amount of time that can pass without hearing from a client before considering them disconnected. 11 | pub idle_connection_timeout: Duration, 12 | /// Value which specifies at which interval (if at all) a heartbeat should be sent, if no other packet was sent in the meantime. 13 | /// If None, no heartbeats will be sent (the default). 14 | pub heartbeat_interval: Option, 15 | /// Value which can specify the maximum size a packet can be in bytes. This value is inclusive of fragmenting; if a packet is fragmented, the total size of the fragments cannot exceed this value. 16 | /// 17 | /// Recommended value: 16384. 18 | pub max_packet_size: usize, 19 | /// Value which can specify the maximal allowed fragments. 20 | /// 21 | /// Why can't I have more than 255 (u8)? 22 | /// This is because you don't want to send more then 256 fragments over UDP, with high amounts of fragments the chance for an invalid packet is very high. 23 | /// Use TCP instead (later we will probably support larger ranges but every fragment packet then needs to be resent if it doesn't get an acknowledgment). 24 | /// 25 | /// default: 16 but keep in mind that lower is better. 26 | pub max_fragments: u8, 27 | /// Value which can specify the size of a fragment. 28 | /// 29 | /// This is the maximum size of each fragment. It defaults to `1450` bytes, due to the default MTU on most network devices being `1500`. 30 | pub fragment_size: u16, 31 | /// Value which can specify the size of the buffer that queues up fragments ready to be reassembled once all fragments have arrived. 32 | pub fragment_reassembly_buffer_size: u16, 33 | /// Value that specifies the size of the buffer the UDP data will be read into. Defaults to `1450` bytes. 34 | pub receive_buffer_max_size: usize, 35 | /// Value which can specify the factor which will smooth out network jitter. 36 | /// 37 | /// use-case: If one packet hast not arrived we don't directly want to transform to a bad network state. 38 | /// Value that specifies the factor used to smooth out network jitter. It defaults to 10% of the round-trip time. It is expressed as a ratio, with 0 equal to 0% and 1 equal to 100%. This helps prevent flapping of `VirtualConnections` 39 | pub rtt_smoothing_factor: f32, 40 | /// Value which can specify the maximal round trip time (rtt) for packet. 41 | /// 42 | /// Value which specifies the maximum round trip time before we consider it a problem. This is expressed in milliseconds. 43 | pub rtt_max_value: u16, 44 | /// Value which can specify the event buffer we read socket events into. 45 | /// 46 | /// Value that specifies the size of the event buffer into which we receive socket events, in bytes. Defaults to 1024. 47 | pub socket_event_buffer_size: usize, 48 | /// Value which can specify how long we should block polling for socket events. 49 | /// 50 | /// Value that specifies how long we should block polling for socket events, in milliseconds. Defaults to `1ms`. 51 | pub socket_polling_timeout: Option, 52 | /// The maximum amount of reliable packets in flight on this connection before we drop the 53 | /// connection. 54 | /// 55 | /// When we send a reliable packet, it is stored locally until an acknowledgement comes back to 56 | /// us, if that store grows to a size. 57 | pub max_packets_in_flight: u16, 58 | } 59 | 60 | impl Default for Config { 61 | fn default() -> Self { 62 | Self { 63 | blocking_mode: false, 64 | idle_connection_timeout: Duration::from_secs(5), 65 | heartbeat_interval: None, 66 | max_packet_size: (MAX_FRAGMENTS_DEFAULT * FRAGMENT_SIZE_DEFAULT) as usize, 67 | max_fragments: MAX_FRAGMENTS_DEFAULT as u8, 68 | fragment_size: FRAGMENT_SIZE_DEFAULT, 69 | fragment_reassembly_buffer_size: 64, 70 | receive_buffer_max_size: DEFAULT_MTU as usize, 71 | rtt_smoothing_factor: 0.10, 72 | rtt_max_value: 250, 73 | socket_event_buffer_size: 1024, 74 | socket_polling_timeout: Some(Duration::from_millis(1)), 75 | max_packets_in_flight: 512, 76 | } 77 | } 78 | } 79 | -------------------------------------------------------------------------------- /Jenkinsfile: -------------------------------------------------------------------------------- 1 | pipeline { 2 | agent none 3 | stages { 4 | stage('Check Formatting') { 5 | environment { 6 | CARGO_HOME = '/home/jenkins/.cargo' 7 | RUSTUP_HOME = '/home/jenkins/.rustup' 8 | RUSTFLAGS = "-D warnings" 9 | } 10 | agent { 11 | label 'linux' 12 | } 13 | steps { 14 | echo 'Checking formatting...' 15 | sh '$CARGO_HOME/bin/cargo fmt -- --check' 16 | } 17 | } 18 | stage('Run Clippy') { 19 | environment { 20 | CARGO_HOME = '/home/jenkins/.cargo' 21 | RUSTUP_HOME = '/home/jenkins/.rustup' 22 | RUSTFLAGS = "-D warnings" 23 | } 24 | agent { 25 | label 'linux' 26 | } 27 | steps { 28 | echo 'Running Clippy...' 29 | sh '$CARGO_HOME/bin/cargo clippy --all --all-features -- -D warnings' 30 | } 31 | } 32 | stage('Run Tests') { 33 | parallel { 34 | stage("Test on Windows") { 35 | environment { 36 | CARGO_HOME = 'C:\\Users\\root\\.cargo' 37 | RUSTUP_HOME = 'C:\\Users\\root\\.rustup' 38 | } 39 | agent { 40 | label 'windows' 41 | } 42 | steps { 43 | echo 'Cleaning...' 44 | bat 'C:\\Users\\root\\.cargo\\bin\\cargo clean' 45 | echo 'Beginning tests...' 46 | bat 'C:\\Users\\root\\.cargo\\bin\\cargo test --features="tester"' 47 | echo 'Tests done!' 48 | } 49 | } 50 | stage("Test on Linux") { 51 | environment { 52 | CARGO_HOME = '/home/jenkins/.cargo' 53 | RUSTUP_HOME = '/home/jenkins/.rustup' 54 | } 55 | agent { 56 | label 'linux' 57 | } 58 | steps { 59 | echo 'Cleaning...' 60 | sh '/home/jenkins/.cargo/bin/cargo clean' 61 | echo 'Beginning tests...' 62 | sh '/home/jenkins/.cargo/bin/cargo test --features="tester"' 63 | echo 'Tests done!' 64 | } 65 | } 66 | // Skip macOS stage for now until we can get a stable machine to run on 67 | /* stage("Test on macOS") { 68 | environment { 69 | CARGO_HOME = '/Users/jenkins/.cargo' 70 | RUSTUP_HOME = '/Users/jenkins/.rustup' 71 | } 72 | agent { 73 | label 'mac' 74 | } 75 | steps { 76 | echo 'Cleaning...' 77 | sh '/Users/jenkins/.cargo/bin/cargo clean' 78 | echo 'Beginning tests...' 79 | sh '/Users/jenkins/.cargo/bin/cargo test --features="tester"' 80 | echo 'Tests done!' 81 | } 82 | } */ 83 | } 84 | } 85 | stage('Calculate Coverage') { 86 | environment { 87 | CARGO_HOME = '/home/jenkins/.cargo' 88 | RUSTUP_HOME = '/home/jenkins/.rustup' 89 | RUSTFLAGS = "-D warnings" 90 | } 91 | agent { 92 | label 'linux' 93 | } 94 | steps { 95 | withCredentials([string(credentialsId: 'codecov_token', variable: 'CODECOV_TOKEN')]) { 96 | echo 'Calculating code coverage...' 97 | sh 'for file in target/debug/laminar-[a-f0-9]*[^\\.d]; do mkdir -p \"target/cov/$(basename $file)\"; kcov --exclude-pattern=/.cargo,/usr/lib --verify \"target/cov/$(basename $file)\" \"$file\"; done' 98 | echo "Uploading coverage..." 99 | sh "curl -s https://codecov.io/bash | bash -s - -t $CODECOV_TOKEN" 100 | echo "Uploaded code coverage!" 101 | } 102 | } 103 | } 104 | stage('Publish book') { 105 | when { 106 | branch 'master' 107 | } 108 | steps{ 109 | echo 'Uploading book here' 110 | } 111 | } 112 | } 113 | } 114 | -------------------------------------------------------------------------------- /tests/unreliable_packets_test.rs: -------------------------------------------------------------------------------- 1 | #[cfg(feature = "tester")] 2 | use std::net::SocketAddr; 3 | #[cfg(feature = "tester")] 4 | use std::{thread, time::Duration}; 5 | 6 | #[cfg(feature = "tester")] 7 | use log::{debug, error, info}; 8 | 9 | #[cfg(feature = "tester")] 10 | use common::{client_addr, Client, Server, ServerEvent}; 11 | #[cfg(feature = "tester")] 12 | use laminar::{DeliveryGuarantee, OrderingGuarantee, Packet}; 13 | 14 | #[cfg(feature = "tester")] 15 | mod common; 16 | 17 | #[test] 18 | #[cfg(feature = "tester")] 19 | fn send_receive_unreliable_packets() { 20 | let client_addr = client_addr(); 21 | let listen_addr: SocketAddr = "127.0.0.1:12346".parse().unwrap(); 22 | let server = Server::new(listen_addr); 23 | 24 | let client = Client::new(Duration::from_millis(1), 5000); 25 | 26 | let assert_function = move |packet: Packet| { 27 | assert_eq!(packet.order_guarantee(), OrderingGuarantee::None); 28 | assert_eq!(packet.delivery_guarantee(), DeliveryGuarantee::Unreliable); 29 | assert_eq!(packet.payload(), payload().as_slice()); 30 | }; 31 | 32 | let packet_factory = move || -> Packet { Packet::unreliable(listen_addr, payload()) }; 33 | 34 | let server_handle = server.start_receiving(assert_function); 35 | 36 | client 37 | .run_instance(packet_factory, client_addr) 38 | .wait_until_finished(); 39 | 40 | // give the server time to process all packets. 41 | thread::sleep(Duration::from_millis(200)); 42 | 43 | server_handle.shutdown(); 44 | 45 | for event in server_handle.iter_events().collect::>() { 46 | match event { 47 | ServerEvent::Throughput(throughput) => { 48 | debug!("Throughput: {}", throughput); 49 | } 50 | ServerEvent::AverageThroughput(avg_throughput) => { 51 | debug!("Avg. Throughput: {}", avg_throughput); 52 | } 53 | ServerEvent::TotalSent(total) => { 54 | debug!("Total Packets Received {}", total); 55 | } 56 | ServerEvent::SocketEvent(event) => { 57 | info!("Socket Event: {:?}", event); 58 | } 59 | } 60 | } 61 | 62 | server_handle.wait_until_finished(); 63 | } 64 | 65 | #[test] 66 | #[cfg(feature = "tester")] 67 | fn send_receive_unreliable_packets_muliple_clients() { 68 | let listen_addr: SocketAddr = "127.0.0.1:12345".parse().unwrap(); 69 | let server = Server::new(listen_addr); 70 | let client = Client::new(Duration::from_millis(16), 500); 71 | 72 | let assert_function = move |packet: Packet| { 73 | assert_eq!(packet.order_guarantee(), OrderingGuarantee::None); 74 | assert_eq!(packet.delivery_guarantee(), DeliveryGuarantee::Unreliable); 75 | assert_eq!(packet.payload(), payload().as_slice()); 76 | }; 77 | 78 | let packet_factory = move || -> Packet { Packet::unreliable(listen_addr, payload()) }; 79 | 80 | let server_handle = server.start_receiving(assert_function); 81 | 82 | let received = server_handle.event_receiver(); 83 | 84 | let handle = thread::spawn(move || loop { 85 | match received.recv() { 86 | Ok(event) => { 87 | match event { 88 | ServerEvent::Throughput(throughput) => { 89 | info!("Throughput: {}", throughput); 90 | } 91 | ServerEvent::AverageThroughput(avg_throughput) => { 92 | info!("Avg. Throughput: {}", avg_throughput); 93 | } 94 | ServerEvent::TotalSent(total) => { 95 | info!("Total Received: {}", total); 96 | } 97 | ServerEvent::SocketEvent(event) => { 98 | info!("Socket Event: {:?}", event); 99 | } 100 | }; 101 | } 102 | Err(_) => { 103 | error!("Stopped receiving events; closing event handler."); 104 | return; 105 | } 106 | } 107 | }); 108 | 109 | let mut clients = Vec::new(); 110 | 111 | for _ in 0..10 { 112 | clients.push(client.run_instance(packet_factory, client_addr())); 113 | info!("Client started."); 114 | } 115 | 116 | for client in clients { 117 | client.wait_until_finished(); 118 | info!("Client finished."); 119 | } 120 | 121 | info!("Waiting 2 seconds"); 122 | // give the server time to process all packets. 123 | thread::sleep(Duration::from_millis(2000)); 124 | info!("Shutting down server!"); 125 | server_handle.shutdown(); 126 | server_handle.wait_until_finished(); 127 | info!("Server is stopped"); 128 | handle.join().unwrap(); 129 | } 130 | 131 | pub fn payload() -> Vec { 132 | vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9] 133 | } 134 | -------------------------------------------------------------------------------- /src/net/quality.rs: -------------------------------------------------------------------------------- 1 | use std::time::Duration; 2 | 3 | use crate::config::Config; 4 | use crate::sequence_buffer::CongestionData; 5 | 6 | /// Represents the quality of a network. 7 | #[allow(dead_code)] 8 | pub enum NetworkQuality { 9 | /// Connection is generally good, minimal packet loss or latency. 10 | Good, 11 | /// Connection is generally bad, having an impact on game performance. 12 | Bad, 13 | } 14 | 15 | /// This type helps with calculating the round trip time from any packet. 16 | /// It is able to smooth out the network jitter if there is any. 17 | pub struct RttMeasurer { 18 | config: Config, 19 | rtt: f32, 20 | } 21 | 22 | impl RttMeasurer { 23 | /// Creates and returns a new RttMeasurer. 24 | pub fn new(config: &Config) -> RttMeasurer { 25 | RttMeasurer { 26 | config: config.clone(), 27 | rtt: 0., 28 | } 29 | } 30 | 31 | /// Calculates the round trip time (rtt) from the given acknowledgment. 32 | /// Whereafter it updates the rtt from the given connection. 33 | pub fn calculate_rrt(&mut self, congestion_data: Option<&mut CongestionData>) { 34 | self.rtt = self.get_smoothed_rtt(congestion_data); 35 | } 36 | 37 | #[cfg(test)] 38 | pub fn get_rtt(&self) -> f32 { 39 | self.rtt 40 | } 41 | 42 | /// This will get the smoothed round trip time (rtt) from the time we last heard from a packet. 43 | fn get_smoothed_rtt(&self, congestion_avoidance_entry: Option<&mut CongestionData>) -> f32 { 44 | match congestion_avoidance_entry { 45 | Some(avoidance_data) => { 46 | let elapsed_time = avoidance_data.sending_time.elapsed(); 47 | 48 | let rtt_time = self.as_milliseconds(elapsed_time); 49 | 50 | self.smooth_out_rtt(rtt_time) 51 | } 52 | None => 0.0, 53 | } 54 | } 55 | 56 | /// Converts a duration to milliseconds. 57 | /// 58 | /// `as_milliseconds` is not supported yet supported in rust stable. 59 | /// See this stackoverflow post for more info: https://stackoverflow.com/questions/36816072/how-do-i-get-a-duration-as-a-number-of-milliseconds-in-rust 60 | fn as_milliseconds(&self, duration: Duration) -> u64 { 61 | let nanos = u64::from(duration.subsec_nanos()); 62 | (1000 * 1000 * 1000 * duration.as_secs() + nanos) / (1000 * 1000) 63 | } 64 | 65 | /// Smooths out round trip time (rtt) value by the specified smoothing factor. 66 | /// 67 | /// First we subtract the max allowed rtt. 68 | /// This way we can see by how many we are off from the max allowed rtt. 69 | /// Then we multiply with or smoothing factor. 70 | /// 71 | /// We do this so that if one packet has an bad rtt it will not directly bring down the or network quality estimation. 72 | /// The default is 10% smoothing so if in total or packet is 50 milliseconds later than max allowed rtt we will increase or rtt estimation with 5. 73 | fn smooth_out_rtt(&self, rtt: u64) -> f32 { 74 | let exceeded_rrt_time = rtt as i64 - i64::from(self.config.rtt_max_value); 75 | exceeded_rrt_time as f32 * self.config.rtt_smoothing_factor 76 | } 77 | } 78 | 79 | #[cfg(test)] 80 | mod test { 81 | use std::net::ToSocketAddrs; 82 | use std::time::{Duration, Instant}; 83 | 84 | use crate::config::Config; 85 | use crate::net::VirtualConnection; 86 | 87 | use super::RttMeasurer; 88 | 89 | static TEST_HOST_IP: &str = "127.0.0.1"; 90 | static TEST_PORT: &str = "20000"; 91 | 92 | #[test] 93 | fn test_create_connection() { 94 | let mut addr = format!("{}:{}", TEST_HOST_IP, TEST_PORT) 95 | .to_socket_addrs() 96 | .unwrap(); 97 | let _new_conn = 98 | VirtualConnection::new(addr.next().unwrap(), &Config::default(), Instant::now()); 99 | } 100 | 101 | #[test] 102 | fn convert_duration_to_milliseconds_test() { 103 | let network_quality = RttMeasurer::new(&Config::default()); 104 | let milliseconds1 = network_quality.as_milliseconds(Duration::from_secs(1)); 105 | let milliseconds2 = network_quality.as_milliseconds(Duration::from_millis(1500)); 106 | let milliseconds3 = network_quality.as_milliseconds(Duration::from_millis(1671)); 107 | 108 | assert_eq!(milliseconds1, 1000); 109 | assert_eq!(milliseconds2, 1500); 110 | assert_eq!(milliseconds3, 1671); 111 | } 112 | 113 | #[test] 114 | fn smooth_out_rtt() { 115 | let mut config = Config::default(); 116 | // for test purpose make sure we set smoothing factor to 10%. 117 | config.rtt_smoothing_factor = 0.10; 118 | config.rtt_max_value = 250; 119 | 120 | let network_quality = RttMeasurer::new(&config); 121 | let smoothed_rtt = network_quality.smooth_out_rtt(300); 122 | 123 | // 300ms has exceeded 50ms over the max allowed rtt. So we check if or smoothing factor is now 10% from 50. 124 | assert_eq!((smoothed_rtt - 5.0f32).abs() < std::f32::EPSILON, true); 125 | } 126 | } 127 | -------------------------------------------------------------------------------- /tests/basic_socket_test.rs: -------------------------------------------------------------------------------- 1 | use std::{collections::HashSet, net::SocketAddr, time::Instant}; 2 | 3 | #[cfg(feature = "tester")] 4 | use laminar::LinkConditioner; 5 | use laminar::{Config, Packet, Socket, SocketEvent}; 6 | 7 | #[test] 8 | fn binding_to_any() { 9 | // bind to 10 different addresses 10 | let sock_without_config = (0..5).map(|_| Socket::bind_any()); 11 | let sock_with_config = (0..5).map(|_| Socket::bind_any_with_config(Config::default())); 12 | 13 | let valid_socks: Vec<_> = sock_without_config 14 | .chain(sock_with_config) 15 | .filter_map(|sock| sock.ok()) 16 | .collect(); 17 | assert_eq!(valid_socks.len(), 10); 18 | 19 | let unique_addresses: HashSet<_> = valid_socks 20 | .into_iter() 21 | .map(|sock| sock.local_addr().unwrap()) 22 | .collect(); 23 | assert_eq!(unique_addresses.len(), 10); 24 | } 25 | 26 | #[test] 27 | fn blocking_sender_and_receiver() { 28 | let cfg = Config::default(); 29 | 30 | let mut client = Socket::bind_any_with_config(cfg.clone()).unwrap(); 31 | let mut server = Socket::bind_any_with_config(Config { 32 | blocking_mode: true, 33 | ..cfg 34 | }) 35 | .unwrap(); 36 | 37 | let server_addr = server.local_addr().unwrap(); 38 | let client_addr = client.local_addr().unwrap(); 39 | 40 | let time = Instant::now(); 41 | 42 | client 43 | .send(Packet::unreliable(server_addr, b"Hello world!".to_vec())) 44 | .unwrap(); 45 | 46 | client.manual_poll(time); 47 | server.manual_poll(time); 48 | 49 | assert_eq![SocketEvent::Connect(client_addr), server.recv().unwrap()]; 50 | if let SocketEvent::Packet(packet) = server.recv().unwrap() { 51 | assert_eq![b"Hello world!", packet.payload()]; 52 | } else { 53 | panic!["Did not receive a packet when it should"]; 54 | } 55 | } 56 | 57 | #[test] 58 | fn local_addr() { 59 | let port = 40000; 60 | let socket = 61 | Socket::bind(format!("127.0.0.1:{}", port).parse::().unwrap()).unwrap(); 62 | assert_eq!(port, socket.local_addr().unwrap().port()); 63 | } 64 | 65 | #[test] 66 | #[cfg(feature = "tester")] 67 | fn use_link_conditioner() { 68 | let mut client = Socket::bind_any().unwrap(); 69 | let mut server = Socket::bind_any().unwrap(); 70 | 71 | let server_addr = server.local_addr().unwrap(); 72 | 73 | let link_conditioner = { 74 | let mut lc = LinkConditioner::new(); 75 | lc.set_packet_loss(1.0); 76 | Some(lc) 77 | }; 78 | 79 | client.set_link_conditioner(link_conditioner); 80 | client 81 | .send(Packet::unreliable(server_addr, b"Hello world!".to_vec())) 82 | .unwrap(); 83 | 84 | let time = Instant::now(); 85 | client.manual_poll(time); 86 | server.manual_poll(time); 87 | 88 | assert_eq!(server.recv().is_none(), true); 89 | } 90 | 91 | #[test] 92 | #[cfg(feature = "tester")] 93 | fn poll_in_thread() { 94 | use std::thread; 95 | let mut server = Socket::bind_any().unwrap(); 96 | let mut client = Socket::bind_any().unwrap(); 97 | let server_addr = server.local_addr().unwrap(); 98 | 99 | // get sender and receiver from server, and start polling in separate thread 100 | let (sender, receiver) = (server.get_packet_sender(), server.get_event_receiver()); 101 | let _thread = thread::spawn(move || server.start_polling()); 102 | 103 | // server will responde to this 104 | client 105 | .send(Packet::reliable_unordered(server_addr, b"Hello!".to_vec())) 106 | .expect("This should send"); 107 | // this will break the loop 108 | client 109 | .send(Packet::reliable_unordered(server_addr, b"Bye!".to_vec())) 110 | .expect("This should send"); 111 | client.manual_poll(Instant::now()); 112 | 113 | // listen for received server messages, and break when "Bye!" is received. 114 | loop { 115 | if let Ok(event) = receiver.recv() { 116 | if let SocketEvent::Packet(packet) = event { 117 | let msg = packet.payload(); 118 | 119 | if msg == b"Bye!" { 120 | break; 121 | } 122 | 123 | sender 124 | .send(Packet::reliable_unordered( 125 | packet.addr(), 126 | b"Hi, there!".to_vec(), 127 | )) 128 | .expect("This should send"); 129 | } 130 | } 131 | } 132 | // loop until we get response from server. 133 | loop { 134 | client.manual_poll(Instant::now()); 135 | if let Some(packet) = client.recv() { 136 | assert_eq!( 137 | packet, 138 | SocketEvent::Packet(Packet::reliable_unordered( 139 | server_addr, 140 | b"Hi, there!".to_vec() 141 | )) 142 | ); 143 | break; 144 | } 145 | } 146 | } 147 | -------------------------------------------------------------------------------- /docs/md_book/src/reliability/reliability.md: -------------------------------------------------------------------------------- 1 | # Reliability 2 | 3 | So let's talk about reliability. 4 | This is a very important concept which could be at first sight difficult but which will be very handy later on. 5 | 6 | As you know we have two opposites, TCP on one hand and UDP on the other. 7 | TCP has a lot of feature UDP does not have, like shown below. 8 | 9 | _TCP_ 10 | - Guarantee of delivery. 11 | - Guarantee for order. 12 | - Packets will not be dropped. 13 | - Duplication not possible. 14 | - Automatic [fragmentation](./../fragmentation.md). 15 | 16 | _UDP_ 17 | - Unreliable. 18 | - No guarantee for delivery. 19 | - No guarantee for order. 20 | - No way of getting the dropped packet. 21 | - Duplication possible. 22 | - No [fragmentation](./../fragmentation.md). 23 | 24 | It would be useful if we could somehow specify the features we want on top of UDP. 25 | Like that you say: I want the guarantee for my packets to arrive, however they don't need to be in order. 26 | Or, I don't care if my packet arrives but I do want to receive only new ones. 27 | 28 | Before continuing, it would be helpful to understand the difference between ordering and sequencing: [ordering documentation](ordering.md) 29 | 30 | ## The 5 Reliability Guarantees 31 | Laminar provides 5 different ways for you to send your data: 32 | 33 | | Reliability Type | Packet Drop | Packet Duplication | Packet Order | Packet Fragmentation |Packet Delivery| 34 | | :-------------: | :-------------: | :-------------: | :-------------: | :-------------: | :-------------: 35 | | **Unreliable Unordered** | Any | Yes | No | No | No 36 | | **Unreliable Sequenced** | Any + old | No | Sequenced | No | No 37 | | **Reliable Unordered** | No | No | No | Yes | Yes 38 | | **Reliable Ordered** | No | No | Ordered | Yes | Yes 39 | | **Reliable Sequenced** | Only old | No | Sequenced | Yes | Only newest 40 | 41 | 42 | ## Unreliable 43 | Unreliable: Packets can be dropped, duplicated or arrive in any order. 44 | 45 | **Details** 46 | 47 | | Packet Drop | Packet Duplication | Packet Order | Packet Fragmentation | Packet Delivery | 48 | | :-------------: | :-------------: | :-------------: | :-------------: | :-------------: | 49 | | Any | Yes | No | No | No 50 | 51 | Basically just bare UDP. The packet may or may not be delivered. 52 | 53 | ## Unreliable Sequenced 54 | Unreliable Sequenced: Packets can be dropped, but could not be duplicated and arrive in sequence. 55 | 56 | *Details* 57 | 58 | | Packet Drop | Packet Duplication | Packet Order | Packet Fragmentation | Packet Delivery | 59 | | :-------------: | :-------------: | :-------------: | :-------------: | :-------------: | 60 | | Any + old | No | Sequenced | No | No 61 | 62 | Basically just bare UDP, free to be dropped, but has some sequencing to it so that only the newest packets are kept. 63 | 64 | ## Reliable Unordered 65 | Reliable UnOrder: All packets will be sent and received, but without order. 66 | 67 | *Details* 68 | 69 | | Packet Drop | Packet Duplication | Packet Order | Packet Fragmentation | Packet Delivery | 70 | | :-------------: | :-------------: | :-------------: | :-------------: | :-------------: | 71 | | No | No | No | Yes | Yes 72 | 73 | Basically, this is almost TCP without ordering of packets. 74 | 75 | ## Reliable Ordered 76 | Reliable Unordered: All packets will be sent and received, but in the order in which they arrived. 77 | 78 | *Details* 79 | 80 | | Packet Drop | Packet Duplication | Packet Order | Packet Fragmentation | Packet Delivery | 81 | | :-------------: | :-------------: | :-------------: | :-------------: | :-------------: | 82 | | No | No | Ordered | Yes | Yes 83 | 84 | Basically this is almost like TCP. 85 | 86 | ## Reliable Sequenced 87 | Reliable; All packets will be sent and received but arranged in sequence. 88 | Which means that only the newest packets will be let through, older packets will be received but they won't get to the user. 89 | 90 | *Details* 91 | 92 | | Packet Drop | Packet Duplication | Packet Order | Packet Fragmentation | Packet Delivery | 93 | | :-------------: | :-------------: | :-------------: | :-------------: | :-------------: | 94 | | Only old | No | Sequenced | Yes | Only newest 95 | 96 | Basically this is almost TCP-like but then sequencing instead of ordering. 97 | 98 | 99 | ### Example 100 | ```rust 101 | use laminar::Packet; 102 | 103 | // Creates packets with different reliabilities 104 | let unreliable = Packet::unreliable(destination, bytes); 105 | let reliable = Packet::reliable_unordered(destination, bytes); 106 | 107 | // Specifies on which stream and how to order our packets, checkout our book and documentation for more information 108 | let unreliable = Packet::unreliable_sequenced(destination, bytes, Some(1)); 109 | let reliable_sequenced = Packet::reliable_sequenced(destination, bytes, Some(2)); 110 | let reliable_ordered = Packet::reliable_ordered(destination, bytes, Some(3)); 111 | ``` 112 | 113 | # Related 114 | - [RakNet Reliability Types](http://www.jenkinssoftware.com/raknet/manual/reliabilitytypes.html) -------------------------------------------------------------------------------- /src/packet/header/standard_header.rs: -------------------------------------------------------------------------------- 1 | use std::convert::TryFrom; 2 | use std::io::Cursor; 3 | 4 | use byteorder::{BigEndian, ReadBytesExt, WriteBytesExt}; 5 | 6 | use crate::error::Result; 7 | use crate::net::constants::STANDARD_HEADER_SIZE; 8 | use crate::packet::{DeliveryGuarantee, EnumConverter, OrderingGuarantee, PacketType}; 9 | use crate::protocol_version::ProtocolVersion; 10 | 11 | use super::{HeaderReader, HeaderWriter}; 12 | 13 | #[derive(Copy, Clone, Debug)] 14 | /// This header will be included in each packet, and contains some basic information. 15 | pub struct StandardHeader { 16 | protocol_version: u16, 17 | packet_type: PacketType, 18 | delivery_guarantee: DeliveryGuarantee, 19 | ordering_guarantee: OrderingGuarantee, 20 | } 21 | 22 | impl StandardHeader { 23 | /// Creates new header. 24 | pub fn new( 25 | delivery_guarantee: DeliveryGuarantee, 26 | ordering_guarantee: OrderingGuarantee, 27 | packet_type: PacketType, 28 | ) -> Self { 29 | StandardHeader { 30 | protocol_version: ProtocolVersion::get_crc16(), 31 | delivery_guarantee, 32 | ordering_guarantee, 33 | packet_type, 34 | } 35 | } 36 | 37 | /// Returns the protocol version 38 | #[cfg(test)] 39 | pub fn protocol_version(&self) -> u16 { 40 | self.protocol_version 41 | } 42 | 43 | /// Returns the DeliveryGuarantee 44 | pub fn delivery_guarantee(&self) -> DeliveryGuarantee { 45 | self.delivery_guarantee 46 | } 47 | 48 | /// Returns the OrderingGuarantee 49 | pub fn ordering_guarantee(&self) -> OrderingGuarantee { 50 | self.ordering_guarantee 51 | } 52 | 53 | /// Returns the PacketType 54 | pub fn packet_type(&self) -> PacketType { 55 | self.packet_type 56 | } 57 | 58 | /// Returns true if the packet is a heartbeat packet, false otherwise 59 | pub fn is_heartbeat(&self) -> bool { 60 | self.packet_type == PacketType::Heartbeat 61 | } 62 | 63 | /// Returns true if the packet is a fragment, false if not 64 | pub fn is_fragment(&self) -> bool { 65 | self.packet_type == PacketType::Fragment 66 | } 67 | 68 | /// Checks if the protocol version in the packet is a valid version 69 | pub fn is_current_protocol(&self) -> bool { 70 | ProtocolVersion::valid_version(self.protocol_version) 71 | } 72 | } 73 | 74 | impl Default for StandardHeader { 75 | fn default() -> Self { 76 | StandardHeader::new( 77 | DeliveryGuarantee::Unreliable, 78 | OrderingGuarantee::None, 79 | PacketType::Packet, 80 | ) 81 | } 82 | } 83 | 84 | impl HeaderWriter for StandardHeader { 85 | type Output = Result<()>; 86 | 87 | fn parse(&self, buffer: &mut Vec) -> Self::Output { 88 | buffer.write_u16::(self.protocol_version)?; 89 | buffer.write_u8(self.packet_type.to_u8())?; 90 | buffer.write_u8(self.delivery_guarantee.to_u8())?; 91 | buffer.write_u8(self.ordering_guarantee.to_u8())?; 92 | Ok(()) 93 | } 94 | } 95 | 96 | impl HeaderReader for StandardHeader { 97 | type Header = Result; 98 | 99 | fn read(rdr: &mut Cursor<&[u8]>) -> Self::Header { 100 | let protocol_version = rdr.read_u16::()?; /* protocol id */ 101 | let packet_id = rdr.read_u8()?; 102 | let delivery_guarantee_id = rdr.read_u8()?; 103 | let order_guarantee_id = rdr.read_u8()?; 104 | 105 | let header = StandardHeader { 106 | protocol_version, 107 | packet_type: PacketType::try_from(packet_id)?, 108 | delivery_guarantee: DeliveryGuarantee::try_from(delivery_guarantee_id)?, 109 | ordering_guarantee: OrderingGuarantee::try_from(order_guarantee_id)?, 110 | }; 111 | 112 | Ok(header) 113 | } 114 | 115 | /// Returns the size of this header. 116 | fn size() -> u8 { 117 | STANDARD_HEADER_SIZE 118 | } 119 | } 120 | 121 | #[cfg(test)] 122 | mod tests { 123 | use std::io::Cursor; 124 | 125 | use crate::net::constants::STANDARD_HEADER_SIZE; 126 | use crate::packet::header::{HeaderReader, HeaderWriter, StandardHeader}; 127 | use crate::packet::{DeliveryGuarantee, EnumConverter, OrderingGuarantee, PacketType}; 128 | 129 | #[test] 130 | fn serialize() { 131 | let mut buffer = Vec::new(); 132 | let header = StandardHeader::new( 133 | DeliveryGuarantee::Unreliable, 134 | OrderingGuarantee::Sequenced(None), 135 | PacketType::Packet, 136 | ); 137 | assert![header.parse(&mut buffer).is_ok()]; 138 | 139 | // [0 .. 3] protocol version 140 | assert_eq!(buffer[2], PacketType::Packet.to_u8()); 141 | assert_eq!(buffer[3], DeliveryGuarantee::Unreliable.to_u8()); 142 | assert_eq!(buffer[4], OrderingGuarantee::Sequenced(None).to_u8()); 143 | } 144 | 145 | #[test] 146 | fn deserialize() { 147 | let buffer = vec![0, 1, 0, 1, 1]; 148 | 149 | let mut cursor = Cursor::new(buffer.as_slice()); 150 | 151 | let header = StandardHeader::read(&mut cursor).unwrap(); 152 | 153 | assert_eq!(header.protocol_version(), 1); 154 | assert_eq!(header.packet_type(), PacketType::Packet); 155 | assert_eq!(header.delivery_guarantee(), DeliveryGuarantee::Reliable); 156 | assert_eq!( 157 | header.ordering_guarantee(), 158 | OrderingGuarantee::Sequenced(None) 159 | ); 160 | } 161 | 162 | #[test] 163 | fn size() { 164 | assert_eq!(StandardHeader::size(), STANDARD_HEADER_SIZE); 165 | } 166 | } 167 | -------------------------------------------------------------------------------- /src/packet/enums.rs: -------------------------------------------------------------------------------- 1 | use std::convert::TryFrom; 2 | 3 | use crate::{ 4 | error::{DecodingErrorKind, ErrorKind}, 5 | packet::EnumConverter, 6 | }; 7 | 8 | /// Enum to specify how a packet should be delivered. 9 | #[derive(Copy, Clone, Debug, PartialOrd, PartialEq, Eq)] 10 | pub enum DeliveryGuarantee { 11 | /// Packet may or may not be delivered 12 | Unreliable, 13 | /// Packet will be delivered 14 | Reliable, 15 | } 16 | 17 | impl EnumConverter for DeliveryGuarantee { 18 | type Enum = DeliveryGuarantee; 19 | 20 | /// Returns an integer value from `DeliveryGuarantee` enum. 21 | fn to_u8(&self) -> u8 { 22 | *self as u8 23 | } 24 | } 25 | 26 | impl TryFrom for DeliveryGuarantee { 27 | type Error = ErrorKind; 28 | /// Gets the `DeliveryGuarantee` enum instance from integer value. 29 | fn try_from(value: u8) -> Result { 30 | match value { 31 | 0 => Ok(DeliveryGuarantee::Unreliable), 32 | 1 => Ok(DeliveryGuarantee::Reliable), 33 | _ => Err(ErrorKind::DecodingError( 34 | DecodingErrorKind::DeliveryGuarantee, 35 | )), 36 | } 37 | } 38 | } 39 | 40 | /// Enum to specify how a packet should be arranged. 41 | #[derive(Copy, Clone, Debug, PartialOrd, PartialEq, Eq)] 42 | pub enum OrderingGuarantee { 43 | /// No arranging will be done. 44 | None, 45 | /// Packets will be arranged in sequence. 46 | Sequenced(Option), 47 | /// Packets will be arranged in order. 48 | Ordered(Option), 49 | } 50 | 51 | impl Default for OrderingGuarantee { 52 | fn default() -> Self { 53 | OrderingGuarantee::None 54 | } 55 | } 56 | 57 | impl EnumConverter for OrderingGuarantee { 58 | type Enum = OrderingGuarantee; 59 | 60 | /// Returns the integer value from `OrderingGuarantee` enum. 61 | fn to_u8(&self) -> u8 { 62 | match self { 63 | OrderingGuarantee::None => 0, 64 | OrderingGuarantee::Sequenced(_) => 1, 65 | OrderingGuarantee::Ordered(_) => 2, 66 | } 67 | } 68 | } 69 | 70 | impl TryFrom for OrderingGuarantee { 71 | type Error = ErrorKind; 72 | /// Returns the `OrderingGuarantee` enum instance from integer value. 73 | fn try_from(value: u8) -> Result { 74 | match value { 75 | 0 => Ok(OrderingGuarantee::None), 76 | 1 => Ok(OrderingGuarantee::Sequenced(None)), 77 | 2 => Ok(OrderingGuarantee::Ordered(None)), 78 | _ => Err(ErrorKind::DecodingError( 79 | DecodingErrorKind::OrderingGuarantee, 80 | )), 81 | } 82 | } 83 | } 84 | 85 | #[derive(Copy, Clone, Debug, PartialOrd, PartialEq)] 86 | /// Id to identify a certain packet type. 87 | pub enum PacketType { 88 | /// Full packet that is not fragmented 89 | Packet = 0, 90 | /// Fragment of a full packet 91 | Fragment = 1, 92 | /// Heartbeat packet 93 | Heartbeat = 2, 94 | } 95 | 96 | impl EnumConverter for PacketType { 97 | type Enum = PacketType; 98 | 99 | fn to_u8(&self) -> u8 { 100 | *self as u8 101 | } 102 | } 103 | 104 | impl TryFrom for PacketType { 105 | type Error = ErrorKind; 106 | fn try_from(value: u8) -> Result { 107 | match value { 108 | 0 => Ok(PacketType::Packet), 109 | 1 => Ok(PacketType::Fragment), 110 | 2 => Ok(PacketType::Heartbeat), 111 | _ => Err(ErrorKind::DecodingError(DecodingErrorKind::PacketType)), 112 | } 113 | } 114 | } 115 | 116 | #[cfg(test)] 117 | mod tests { 118 | use std::convert::TryFrom; 119 | 120 | use crate::packet::{ 121 | enums::{DeliveryGuarantee, OrderingGuarantee, PacketType}, 122 | EnumConverter, 123 | }; 124 | 125 | #[test] 126 | fn assure_parsing_ordering_guarantee() { 127 | let none = OrderingGuarantee::None; 128 | let ordered = OrderingGuarantee::Ordered(None); 129 | let sequenced = OrderingGuarantee::Sequenced(None); 130 | 131 | assert_eq!( 132 | OrderingGuarantee::None, 133 | OrderingGuarantee::try_from(none.to_u8()).unwrap() 134 | ); 135 | assert_eq!( 136 | OrderingGuarantee::Ordered(None), 137 | OrderingGuarantee::try_from(ordered.to_u8()).unwrap() 138 | ); 139 | assert_eq!( 140 | OrderingGuarantee::Sequenced(None), 141 | OrderingGuarantee::try_from(sequenced.to_u8()).unwrap() 142 | ) 143 | } 144 | 145 | #[test] 146 | fn assure_parsing_delivery_guarantee() { 147 | let unreliable = DeliveryGuarantee::Unreliable; 148 | let reliable = DeliveryGuarantee::Reliable; 149 | assert_eq!( 150 | DeliveryGuarantee::Unreliable, 151 | DeliveryGuarantee::try_from(unreliable.to_u8()).unwrap() 152 | ); 153 | assert_eq!( 154 | DeliveryGuarantee::Reliable, 155 | DeliveryGuarantee::try_from(reliable.to_u8()).unwrap() 156 | ) 157 | } 158 | 159 | #[test] 160 | fn assure_parsing_packet_type() { 161 | let packet = PacketType::Packet; 162 | let fragment = PacketType::Fragment; 163 | let heartbeat = PacketType::Heartbeat; 164 | assert_eq!( 165 | PacketType::Packet, 166 | PacketType::try_from(packet.to_u8()).unwrap() 167 | ); 168 | assert_eq!( 169 | PacketType::Fragment, 170 | PacketType::try_from(fragment.to_u8()).unwrap() 171 | ); 172 | assert_eq!( 173 | PacketType::Heartbeat, 174 | PacketType::try_from(heartbeat.to_u8()).unwrap() 175 | ); 176 | } 177 | } 178 | -------------------------------------------------------------------------------- /tests/common/server.rs: -------------------------------------------------------------------------------- 1 | use std::net::SocketAddr; 2 | use std::thread::{self, JoinHandle}; 3 | use std::time::{Duration, Instant}; 4 | 5 | use crossbeam_channel::{Receiver, Sender, TryIter}; 6 | use log::error; 7 | 8 | use laminar::{Packet, Socket, SocketEvent, ThroughputMonitoring}; 9 | 10 | /// Enum with commands you can send to the server. 11 | #[derive(Debug)] 12 | pub enum ServerCommand { 13 | Shutdown, 14 | } 15 | 16 | /// Enums which events you can receive from the server. 17 | #[derive(Debug)] 18 | pub enum ServerEvent { 19 | Throughput(u32), 20 | AverageThroughput(u32), 21 | TotalSent(u32), 22 | SocketEvent(SocketEvent), 23 | } 24 | 25 | /// Represents a server which receives packets from some endpoint. 26 | pub struct Server { 27 | throughput_monitor: ThroughputMonitoring, 28 | listening_host: SocketAddr, 29 | } 30 | 31 | impl Server { 32 | /// Constructs a new `Server` instance. 33 | pub fn new(listening_host: SocketAddr) -> Server { 34 | Server { 35 | throughput_monitor: ThroughputMonitoring::new(Duration::from_millis(1000)), 36 | listening_host, 37 | } 38 | } 39 | 40 | /// Start to receive packets from some endpoint. 41 | /// This function takes in a closure with which a packet contents will be asserted. 42 | pub fn start_receiving(self, packet_assert: F) -> ServerHandle 43 | where 44 | F: Fn(Packet) + Send + Sized + 'static, 45 | { 46 | let mut socket = Socket::bind(self.listening_host).unwrap(); 47 | 48 | let (notify_tx, notify_rx) = crossbeam_channel::unbounded(); 49 | let (events_tx, events_rx) = crossbeam_channel::unbounded(); 50 | let mut throughput_monitor = self.throughput_monitor; 51 | 52 | let serve_handle = thread::spawn(move || { 53 | loop { 54 | socket.manual_poll(Instant::now()); 55 | match socket.recv() { 56 | Some(result) => match result { 57 | SocketEvent::Packet(p) => { 58 | packet_assert(p); 59 | if throughput_monitor.tick() { 60 | if let Err(e) = events_tx.send(ServerEvent::Throughput( 61 | throughput_monitor.last_throughput(), 62 | )) { 63 | error!("Client can not send packet {:?}", e); 64 | } 65 | } 66 | } 67 | _ => { 68 | if let Err(e) = events_tx.send(ServerEvent::SocketEvent(result)) { 69 | error!("Client cannot send packet {:?}", e); 70 | } 71 | } 72 | }, 73 | None => { 74 | // check if we received a notify to close the server. 75 | match notify_rx.try_recv() { 76 | Ok(notify) => match notify { 77 | ServerCommand::Shutdown => { 78 | let result = || -> Result<(), crossbeam_channel::SendError> { 79 | events_tx.send(ServerEvent::AverageThroughput( 80 | throughput_monitor.average(), 81 | ))?; 82 | events_tx.send(ServerEvent::TotalSent( 83 | throughput_monitor.total_measured_ticks(), 84 | ))?; 85 | Ok(()) 86 | }; 87 | 88 | if let Err(e) = result() { 89 | error!("Unable to sent an event {:?}", e); 90 | }; 91 | 92 | return; 93 | } 94 | }, 95 | Err(e) => { 96 | if !e.is_empty() { 97 | error!( 98 | "Error occurred when trying to receive on notify channel" 99 | ); 100 | } 101 | } 102 | } 103 | } 104 | } 105 | } 106 | }); 107 | 108 | ServerHandle::new(serve_handle, notify_tx, events_rx, self.listening_host) 109 | } 110 | } 111 | 112 | /// Handle to the running server. 113 | pub struct ServerHandle { 114 | server_handle: JoinHandle<()>, 115 | notify_tx: Sender, 116 | events_rx: Receiver, 117 | pub listening_host: SocketAddr, 118 | } 119 | 120 | impl ServerHandle { 121 | /// Construct a new `ServerHandle` 122 | pub fn new( 123 | server_handle: JoinHandle<()>, 124 | notify_tx: Sender, 125 | events_rx: Receiver, 126 | listening_host: SocketAddr, 127 | ) -> ServerHandle { 128 | ServerHandle { 129 | server_handle, 130 | notify_tx, 131 | events_rx, 132 | listening_host, 133 | } 134 | } 135 | 136 | /// Send the shutdown signal to the server. 137 | pub fn shutdown(&self) { 138 | self.notify_tx.send(ServerCommand::Shutdown).unwrap(); 139 | } 140 | 141 | /// Wait until this server is finished, if no shutdown signal is send or no error has been thrown then this will be a blocking call. 142 | pub fn wait_until_finished(self) { 143 | self.server_handle.join().unwrap(); 144 | } 145 | 146 | /// Iterate over the events that have happened on the server. 147 | pub fn iter_events(&self) -> TryIter { 148 | self.events_rx.try_iter() 149 | } 150 | 151 | #[allow(unused)] 152 | pub fn event_receiver(&self) -> Receiver { 153 | self.events_rx.clone() 154 | } 155 | } 156 | -------------------------------------------------------------------------------- /src/bin/laminar-tester.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | clone::Clone, 3 | net::SocketAddr, 4 | process::exit, 5 | thread, 6 | time::{Duration, Instant}, 7 | }; 8 | 9 | use clap::{load_yaml, App, AppSettings}; 10 | use log::{debug, error, info}; 11 | 12 | use laminar::{Packet, Result, Socket, SocketEvent, ThroughputMonitoring}; 13 | 14 | fn main() { 15 | env_logger::init(); 16 | let yaml = load_yaml!("cli.yml"); 17 | let matches = App::from_yaml(yaml) 18 | .setting(AppSettings::ArgRequiredElseHelp) 19 | .get_matches(); 20 | 21 | if let Some(m) = matches.subcommand_matches("server") { 22 | process_server_subcommand(m.to_owned()); 23 | } 24 | if let Some(m) = matches.subcommand_matches("client") { 25 | process_client_subcommand(m.to_owned()); 26 | } 27 | } 28 | 29 | struct ClientConfiguration { 30 | listen_host: SocketAddr, 31 | destination: SocketAddr, 32 | _run_duration: Duration, 33 | packet_ps: u64, 34 | maximal_duration: Duration, 35 | test_name: String, 36 | } 37 | 38 | impl From> for ClientConfiguration { 39 | fn from(args: clap::ArgMatches<'_>) -> Self { 40 | ClientConfiguration { 41 | listen_host: args 42 | .value_of("LISTEN_HOST") 43 | .expect("No `LISTEN_HOST` argument provided!") 44 | .parse() 45 | .expect("Could not parse `LISTEN_HOST` argument!"), 46 | destination: args 47 | .value_of("CONNECT_ADDR") 48 | .expect("No `CONNECT_ADDR` argument provided!") 49 | .parse() 50 | .expect("Could not parse `CONNECT_ADDR` argument!"), 51 | _run_duration: Duration::from_secs( 52 | args.value_of("SHUTDOWN_TIMER") 53 | .expect("No `SHUTDOWN_TIMER` argument provided!") 54 | .parse() 55 | .expect("Could not parse `SHUTDOWN_TIMER` argument!"), 56 | ), 57 | packet_ps: args 58 | .value_of("PACKETS_PER_SECOND") 59 | .expect("No `PACKETS_PER_SECOND` argument provided!") 60 | .parse() 61 | .expect("Could not parse `PACKETS_PER_SECOND` argument!"), 62 | maximal_duration: Duration::from_secs( 63 | args.value_of("TEST_DURATION") 64 | .expect("No `TEST_DURATION` argument provided!") 65 | .parse() 66 | .expect("Could not parse `TEST_DURATION` argument!"), 67 | ), 68 | test_name: String::from( 69 | args.value_of("TEST_TO_RUN") 70 | .expect("No `TEST_TO_RUN` argument provided!"), 71 | ), 72 | } 73 | } 74 | } 75 | 76 | #[derive(Clone)] 77 | struct ServerConfiguration { 78 | listen_host: SocketAddr, 79 | run_duration: Duration, 80 | } 81 | 82 | impl From> for ServerConfiguration { 83 | fn from(args: clap::ArgMatches<'_>) -> Self { 84 | ServerConfiguration { 85 | listen_host: args 86 | .value_of("LISTEN_HOST") 87 | .expect("No `LISTEN_HOST` argument provided!") 88 | .parse() 89 | .expect("Could not parse `LISTEN_HOST` argument!"), 90 | run_duration: Duration::from_secs( 91 | args.value_of("SHUTDOWN_TIMER") 92 | .expect("No `SHUTDOWN_TIMER` argument provided!") 93 | .parse() 94 | .expect("Could not parse `SHUTDOWN_TIMER` argument!"), 95 | ), 96 | } 97 | } 98 | } 99 | 100 | fn process_server_subcommand(m: clap::ArgMatches<'_>) { 101 | let config = ServerConfiguration::from(m); 102 | 103 | let run_duration = config.run_duration; 104 | 105 | thread::spawn(move || { 106 | info!("Server started"); 107 | info!("Server listening on: {:?}", config.listen_host); 108 | run_server(config).expect("Server should run."); 109 | }); 110 | 111 | info!("Main thread sleeping"); 112 | thread::sleep(run_duration); 113 | info!("Shutting down..."); 114 | exit(0); 115 | } 116 | 117 | fn process_client_subcommand(m: clap::ArgMatches<'_>) { 118 | let client_config = ClientConfiguration::from(m); 119 | debug!("Endpoint is: {:?}", client_config.listen_host); 120 | debug!("Client destination is: {:?}", client_config.destination); 121 | run_client(client_config).expect("Client should run."); 122 | exit(0); 123 | } 124 | 125 | fn run_server(server_config: ServerConfiguration) -> Result<()> { 126 | let mut socket = Socket::bind(server_config.listen_host)?; 127 | 128 | let mut throughput = ThroughputMonitoring::new(Duration::from_secs(1)); 129 | 130 | loop { 131 | socket.manual_poll(Instant::now()); 132 | if let Some(event) = socket.recv() { 133 | match event { 134 | SocketEvent::Packet(_) => { 135 | println!["Got a packet"]; 136 | throughput.tick(); 137 | } 138 | SocketEvent::Connect(address) => { 139 | socket.send(Packet::unreliable(address, vec![0])).unwrap(); 140 | } 141 | _ => error!("Event not handled yet."), 142 | } 143 | } 144 | 145 | info!("{}", throughput); 146 | } 147 | } 148 | 149 | fn run_client(config: ClientConfiguration) -> Result<()> { 150 | let socket = Socket::bind(config.listen_host)?; 151 | 152 | // see which test we want to run 153 | match config.test_name.as_str() { 154 | "steady-stream" => { 155 | test_steady_stream(config, socket); 156 | exit(0); 157 | } 158 | _ => { 159 | error!("Invalid test name"); 160 | exit(1); 161 | } 162 | } 163 | } 164 | 165 | // Basic test where the client sends packets at a steady rate to the server 166 | fn test_steady_stream(config: ClientConfiguration, mut socket: Socket) { 167 | info!("Beginning steady-state test"); 168 | 169 | let test_packet = Packet::reliable_unordered(config.destination, config.test_name.into_bytes()); 170 | 171 | let time_quantum = 1000 / config.packet_ps as u64; 172 | let start_time = Instant::now(); 173 | let mut packets_sent = 0; 174 | 175 | loop { 176 | socket.send(test_packet.clone()).unwrap(); 177 | socket.manual_poll(Instant::now()); 178 | while let Some(_) = socket.recv() {} 179 | 180 | packets_sent += 1; 181 | 182 | if start_time.elapsed() >= config.maximal_duration { 183 | info!("Ending test!"); 184 | info!("Sent: {} packets", packets_sent); 185 | return; 186 | } 187 | 188 | thread::sleep(Duration::from_millis(time_quantum)) 189 | } 190 | } 191 | -------------------------------------------------------------------------------- /src/net/connection_impl.rs: -------------------------------------------------------------------------------- 1 | use std::net::SocketAddr; 2 | use std::time::Instant; 3 | 4 | use log::error; 5 | 6 | use crate::error::{ErrorKind, Result}; 7 | use crate::packet::{DeliveryGuarantee, OutgoingPackets, Packet, PacketInfo}; 8 | 9 | use super::{ 10 | events::SocketEvent, Connection, ConnectionEventAddress, ConnectionMessenger, VirtualConnection, 11 | }; 12 | 13 | /// Required by `ConnectionManager` to properly handle connection event. 14 | impl ConnectionEventAddress for SocketEvent { 15 | /// Returns event address. 16 | fn address(&self) -> SocketAddr { 17 | match self { 18 | SocketEvent::Packet(packet) => packet.addr(), 19 | SocketEvent::Connect(addr) => *addr, 20 | SocketEvent::Timeout(addr) => *addr, 21 | } 22 | } 23 | } 24 | 25 | /// Required by `ConnectionManager` to properly handle user event. 26 | impl ConnectionEventAddress for Packet { 27 | /// Returns event address. 28 | fn address(&self) -> SocketAddr { 29 | self.addr() 30 | } 31 | } 32 | 33 | impl Connection for VirtualConnection { 34 | /// Defines a user event type. 35 | type SendEvent = Packet; 36 | /// Defines a connection event type. 37 | type ReceiveEvent = SocketEvent; 38 | 39 | /// Creates new connection and initialize it by sending an connection event to the user. 40 | /// * address - defines a address that connection is associated with. 41 | /// * time - creation time, used by connection, so that it doesn't get dropped immediately or send heartbeat packet. 42 | /// * initial_data - if initiated by remote host, this will hold that a packet data. 43 | fn create_connection( 44 | messenger: &mut impl ConnectionMessenger, 45 | address: SocketAddr, 46 | time: Instant, 47 | initial_data: Option<&[u8]>, 48 | ) -> VirtualConnection { 49 | // emit connect event if this is initiated by the remote host. 50 | if initial_data.is_some() { 51 | messenger.send_event(&address, SocketEvent::Connect(address)); 52 | } 53 | VirtualConnection::new(address, messenger.config(), time) 54 | } 55 | 56 | /// Determines if the given `Connection` should be dropped due to its state. 57 | fn should_drop( 58 | &mut self, 59 | messenger: &mut impl ConnectionMessenger, 60 | time: Instant, 61 | ) -> bool { 62 | let should_drop = self.packets_in_flight() > messenger.config().max_packets_in_flight 63 | || self.last_heard(time) >= messenger.config().idle_connection_timeout; 64 | if should_drop { 65 | messenger.send_event( 66 | &self.remote_address, 67 | SocketEvent::Timeout(self.remote_address), 68 | ); 69 | } 70 | should_drop 71 | } 72 | 73 | /// Processes a received packet: parse it and emit an event. 74 | fn process_packet( 75 | &mut self, 76 | messenger: &mut impl ConnectionMessenger, 77 | payload: &[u8], 78 | time: Instant, 79 | ) { 80 | if !payload.is_empty() { 81 | match self.process_incoming(payload, time) { 82 | Ok(packets) => { 83 | for incoming in packets { 84 | messenger.send_event(&self.remote_address, SocketEvent::Packet(incoming.0)); 85 | } 86 | } 87 | Err(err) => error!("Error occured processing incomming packet: {:?}", err), 88 | } 89 | } else { 90 | error!( 91 | "Error processing packet: {}", 92 | ErrorKind::ReceivedDataToShort 93 | ); 94 | } 95 | } 96 | 97 | /// Processes a received event and send a packet. 98 | fn process_event( 99 | &mut self, 100 | messenger: &mut impl ConnectionMessenger, 101 | event: Self::SendEvent, 102 | time: Instant, 103 | ) { 104 | let addr = self.remote_address; 105 | send_packets( 106 | messenger, 107 | &addr, 108 | self.process_outgoing( 109 | PacketInfo::user_packet( 110 | event.payload(), 111 | event.delivery_guarantee(), 112 | event.order_guarantee(), 113 | ), 114 | None, 115 | time, 116 | ), 117 | "user packet", 118 | ); 119 | } 120 | 121 | /// Processes various connection-related tasks: resend dropped packets, send heartbeat packet, etc... 122 | /// This function gets called very frequently. 123 | fn update( 124 | &mut self, 125 | messenger: &mut impl ConnectionMessenger, 126 | time: Instant, 127 | ) { 128 | // resend dropped packets 129 | for dropped in self.gather_dropped_packets() { 130 | let packets = self.process_outgoing( 131 | PacketInfo { 132 | packet_type: dropped.packet_type, 133 | payload: &dropped.payload, 134 | // because a delivery guarantee is only sent with reliable packets 135 | delivery: DeliveryGuarantee::Reliable, 136 | // this is stored with the dropped packet because they could be mixed 137 | ordering: dropped.ordering_guarantee, 138 | }, 139 | dropped.item_identifier, 140 | time, 141 | ); 142 | send_packets(messenger, &self.remote_address, packets, "dropped packets"); 143 | } 144 | 145 | // send heartbeat packets if required 146 | if let Some(heartbeat_interval) = messenger.config().heartbeat_interval { 147 | let addr = self.remote_address; 148 | if self.last_sent(time) >= heartbeat_interval { 149 | send_packets( 150 | messenger, 151 | &addr, 152 | self.process_outgoing(PacketInfo::heartbeat_packet(&[]), None, time), 153 | "heatbeat packet", 154 | ); 155 | } 156 | } 157 | } 158 | } 159 | 160 | // Sends multiple outgoing packets. 161 | fn send_packets( 162 | ctx: &mut impl ConnectionMessenger, 163 | address: &SocketAddr, 164 | packets: Result, 165 | err_context: &str, 166 | ) { 167 | match packets { 168 | Ok(packets) => { 169 | for outgoing in packets { 170 | ctx.send_packet(address, &outgoing.contents()); 171 | } 172 | } 173 | Err(error) => error!("Error occured processing {}: {:?}", err_context, error), 174 | } 175 | } 176 | -------------------------------------------------------------------------------- /src/packet/outgoing.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | net::constants::{DEFAULT_ORDERING_STREAM, DEFAULT_SEQUENCING_STREAM}, 3 | packet::{ 4 | header::{ 5 | AckedPacketHeader, ArrangingHeader, FragmentHeader, HeaderWriter, StandardHeader, 6 | }, 7 | DeliveryGuarantee, OrderingGuarantee, PacketType, 8 | }, 9 | }; 10 | 11 | /// Builder that could be used to construct an outgoing laminar packet. 12 | pub struct OutgoingPacketBuilder<'p> { 13 | header: Vec, 14 | payload: &'p [u8], 15 | } 16 | 17 | impl<'p> OutgoingPacketBuilder<'p> { 18 | /// Construct a new builder from the given `payload`. 19 | pub fn new(payload: &'p [u8]) -> OutgoingPacketBuilder<'p> { 20 | OutgoingPacketBuilder { 21 | header: Vec::new(), 22 | payload, 23 | } 24 | } 25 | 26 | /// Adds the `FragmentHeader` to the header. 27 | pub fn with_fragment_header(mut self, packet_seq: u16, id: u8, num_fragments: u8) -> Self { 28 | let header = FragmentHeader::new(packet_seq, id, num_fragments); 29 | 30 | header 31 | .parse(&mut self.header) 32 | .expect("Could not write fragment header to buffer"); 33 | 34 | self 35 | } 36 | 37 | /// Adds the [`StandardHeader`](./headers/standard_header) to the header. 38 | pub fn with_default_header( 39 | mut self, 40 | packet_type: PacketType, 41 | delivery_guarantee: DeliveryGuarantee, 42 | ordering_guarantee: OrderingGuarantee, 43 | ) -> Self { 44 | let header = StandardHeader::new(delivery_guarantee, ordering_guarantee, packet_type); 45 | header 46 | .parse(&mut self.header) 47 | .expect("Could not write default header to buffer"); 48 | 49 | self 50 | } 51 | 52 | /// Adds the [`AckedPacketHeader`](./headers/acked_packet_header) to the header. 53 | pub fn with_acknowledgment_header( 54 | mut self, 55 | seq_num: u16, 56 | last_seq: u16, 57 | bit_field: u32, 58 | ) -> Self { 59 | let header = AckedPacketHeader::new(seq_num, last_seq, bit_field); 60 | header 61 | .parse(&mut self.header) 62 | .expect("Could not write acknowledgment header to buffer"); 63 | 64 | self 65 | } 66 | 67 | /// Adds the [`ArrangingHeader`](./headers/arranging_header) if needed. 68 | /// 69 | /// - `arranging_id` = identifier for this packet that needs to be sequenced. 70 | /// - `stream_id` = stream on which this packet will be sequenced. If `None` than the a default stream will be used. 71 | pub fn with_sequencing_header(mut self, arranging_id: u16, stream_id: Option) -> Self { 72 | let header = 73 | ArrangingHeader::new(arranging_id, stream_id.unwrap_or(DEFAULT_SEQUENCING_STREAM)); 74 | 75 | header 76 | .parse(&mut self.header) 77 | .expect("Could not write arranging header to buffer"); 78 | 79 | self 80 | } 81 | 82 | /// Adds the [`ArrangingHeader`](./headers/arranging_header) if needed. 83 | /// 84 | /// - `arranging_id` = identifier for this packet that needs to be ordered. 85 | /// - `stream_id` = stream on which this packet will be ordered. If `None` than the a default stream will be used. 86 | pub fn with_ordering_header(mut self, arranging_id: u16, stream_id: Option) -> Self { 87 | let header = 88 | ArrangingHeader::new(arranging_id, stream_id.unwrap_or(DEFAULT_ORDERING_STREAM)); 89 | 90 | header 91 | .parse(&mut self.header) 92 | .expect("Could not write arranging header to buffer"); 93 | 94 | self 95 | } 96 | 97 | /// Constructs an `OutgoingPacket` from the contents constructed with this builder. 98 | pub fn build(self) -> OutgoingPacket<'p> { 99 | OutgoingPacket { 100 | header: self.header, 101 | payload: self.payload, 102 | } 103 | } 104 | } 105 | 106 | /// Packet that that contains data which is ready to be sent to a remote endpoint. 107 | #[derive(Debug)] 108 | pub struct OutgoingPacket<'p> { 109 | header: Vec, 110 | payload: &'p [u8], 111 | } 112 | 113 | impl<'p> OutgoingPacket<'p> { 114 | /// Return the contents of this packet; the content includes the header and payload bytes. 115 | /// 116 | /// # Remark 117 | /// - Until here we could use a reference to the outgoing data but here we need to do a hard copy. 118 | /// Because the header could vary in size but should be in front of the payload provided by the user. 119 | pub fn contents(&self) -> Box<[u8]> { 120 | [self.header.as_slice(), &self.payload] 121 | .concat() 122 | .into_boxed_slice() 123 | } 124 | } 125 | 126 | #[cfg(test)] 127 | mod tests { 128 | use crate::packet::PacketType; 129 | use crate::packet::{DeliveryGuarantee, OrderingGuarantee, OutgoingPacketBuilder}; 130 | 131 | fn test_payload() -> Vec { 132 | b"test".to_vec() 133 | } 134 | 135 | #[test] 136 | fn assure_creation_fragment_header() { 137 | let payload = test_payload(); 138 | 139 | let outgoing = OutgoingPacketBuilder::new(&payload) 140 | .with_fragment_header(0, 0, 0) 141 | .build(); 142 | 143 | let expected: Vec = [vec![0, 0, 0, 0], test_payload()].concat().to_vec(); 144 | 145 | assert_eq!(outgoing.contents().to_vec(), expected); 146 | } 147 | 148 | #[test] 149 | fn assure_creation_arranging_header() { 150 | let payload = test_payload(); 151 | 152 | let outgoing = OutgoingPacketBuilder::new(&payload) 153 | .with_sequencing_header(1, Some(2)) 154 | .build(); 155 | 156 | let expected: Vec = [vec![0, 1, 2], test_payload()].concat().to_vec(); 157 | 158 | assert_eq!(outgoing.contents().to_vec(), expected); 159 | } 160 | 161 | #[test] 162 | fn assure_creation_acknowledgment_header() { 163 | let payload = test_payload(); 164 | 165 | let outgoing = OutgoingPacketBuilder::new(&payload) 166 | .with_acknowledgment_header(1, 2, 3) 167 | .build(); 168 | 169 | let expected: Vec = [vec![0, 1, 0, 2, 0, 0, 0, 3], test_payload()] 170 | .concat() 171 | .to_vec(); 172 | 173 | assert_eq!(outgoing.contents().to_vec(), expected); 174 | } 175 | 176 | #[test] 177 | fn assure_creation_default_header() { 178 | let payload = test_payload(); 179 | 180 | let outgoing = OutgoingPacketBuilder::new(&payload) 181 | .with_default_header( 182 | PacketType::Packet, 183 | DeliveryGuarantee::Reliable, 184 | OrderingGuarantee::Sequenced(None), 185 | ) 186 | .build(); 187 | 188 | let expected: Vec = [vec![0, 1, 1], test_payload()].concat().to_vec(); 189 | 190 | assert_eq!( 191 | outgoing.contents()[2..outgoing.contents().len()].to_vec(), 192 | expected 193 | ); 194 | } 195 | } 196 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Laminar 2 | 3 | [![Build Status][s2]][l2] [![Latest Version][s1]][l1] [![docs.rs][s4]][l4] [![Join us on Discord][s5]][l5] [![MIT/Apache][s3]][l3] ![Lines of Code][s6] ![Coverage][s7] 4 | 5 | [s1]: https://img.shields.io/crates/v/laminar.svg 6 | [l1]: https://crates.io/crates/laminar 7 | [s2]: https://jenkins.amethyst-engine.org/buildStatus/icon?job=laminar%2Fmaster 8 | [l2]: https://jenkins.amethyst-engine.org/job/laminar/job/master/badge/icon 9 | [s3]: https://img.shields.io/badge/license-MIT%2FApache-blue.svg 10 | [l3]: docs/LICENSE-MIT 11 | [s4]: https://docs.rs/laminar/badge.svg 12 | [l4]: https://docs.rs/laminar/ 13 | [s5]: https://img.shields.io/discord/425678876929163284.svg?logo=discord 14 | [l5]: https://discord.gg/GnP5Whs 15 | [s6]: https://tokei.rs/b1/github/amethyst/laminar?category=code 16 | [s7]: https://codecov.io/gh/amethyst/laminar/branch/master/graphs/badge.svg 17 | 18 | Laminar is an application-level transport protocol which provides configurable reliability and ordering guarantees built on top of UDP. 19 | It focuses on fast-paced fps-games and provides a lightweight, message-based interface. 20 | 21 | Laminar was designed to be used within the [Amethyst][amethyst] game engine but is usable without it. 22 | 23 | If you are new to laminar or networking in general, We strongly recommend taking a look at the [laminar book][book] 24 | 25 | [amethyst]: https://github.com/amethyst/amethyst 26 | 27 | # Concepts 28 | 29 | This library is loosely based off of [Gaffer on Games][gog] and shares features similar as RakNet, Steam Socket, netcode.io. 30 | The idea is to provide an in rust written, low-level UDP-protocol which supports the use of cases of video games that require multiplayer features. 31 | The library itself provides a few low-level types of packets that provide different types of guarantees. The most 32 | basic are unreliable and reliable packets. Also ordering, sequencing can be done on multiple streams. 33 | For more information, read the projects [README.md][readme], [book][book], [docs][docs] or [examples][examples]. 34 | 35 | [gog]: https://gafferongames.com/ 36 | [readme]: https://github.com/amethyst/laminar/blob/master/README.md 37 | [book]: https://amethyst.github.io/laminar/docs/index.html 38 | [docs]: https://docs.rs/laminar/ 39 | [examples]: https://github.com/amethyst/laminar/tree/master/examples 40 | [amethyst]: https://github.com/amethyst/amethyst 41 | 42 | ## Table of contents: 43 | - [Useful links](#useful-links) 44 | - [Features](#features) 45 | - [Getting Started](#getting-stated) 46 | - [Examples](#examples) 47 | - [Notice](#notice) 48 | - [Contributing](#contribution) 49 | - [Authors](#authors) 50 | - [License](#license) 51 | 52 | ## Features 53 | These are the features this crate provides: 54 | 55 | * [x] Fragmentation 56 | * [x] Unreliable packets 57 | * [x] Unreliable sequenced packets 58 | * [x] Reliable unordered packets 59 | * [x] Reliable ordered packets 60 | * [x] Reliable sequenced packets 61 | * [x] Rtt estimations 62 | * [x] Protocol version monitoring 63 | * [x] Basic connection management 64 | * [x] Heartbeat 65 | * [x] Basic DoS mitigation 66 | * [x] High Timing control 67 | * [x] Protocol Versioning 68 | * [x] Well-tested by integration and unit tests 69 | * [x] Can be used by multiple threads (Sender, Receiver) 70 | 71 | ### Planned 72 | 73 | * [ ] Handshake Protocol 74 | * [ ] Advanced Connection Management 75 | * [ ] Cryptography 76 | * [ ] Congestion Control 77 | 78 | ## Getting Stated 79 | Add the laminar package to your `Cargo.toml` file. 80 | 81 | ```toml 82 | [dependencies] 83 | laminar = "0.3" 84 | ``` 85 | 86 | ### Useful Links 87 | 88 | - [Documentation](https://docs.rs/laminar/). 89 | - [Crates.io](https://crates.io/crates/laminar) 90 | - [Examples](https://github.com/amethyst/laminar/tree/master/examples) 91 | - [Contributing](https://github.com/amethyst/laminar/blob/master/docs/CONTRIBUTING) 92 | - [Book](https://amethyst.github.io/laminar/docs/index.html) 93 | 94 | ## Examples 95 | Please check out our [examples](https://github.com/amethyst/laminar/tree/master/examples) for more information. 96 | 97 | ### UDP API | [see more](https://github.com/amethyst/laminar/blob/master/examples/udp.rs) 98 | This is an example of how to use the UDP API. 99 | 100 | _Send packets_ 101 | 102 | ```rust 103 | use laminar::{Socket, Packet}; 104 | 105 | // Creates the socket 106 | let mut socket = Socket::bind("127.0.0.1:12345")?; 107 | let packet_sender = socket.get_packet_sender(); 108 | // Starts the socket, which will start a poll mechanism to receive and send messages. 109 | let _thread = thread::spawn(move || socket.start_polling()); 110 | 111 | // Bytes to sent 112 | let bytes = vec![...]; 113 | 114 | // Creates packets with different reliabilities 115 | let unreliable = Packet::unreliable(destination, bytes); 116 | let reliable = Packet::reliable_unordered(destination, bytes); 117 | 118 | // Specifies on which stream and how to order our packets, checkout our book and documentation for more information 119 | let unreliable = Packet::unreliable_sequenced(destination, bytes, Some(1)); 120 | let reliable_sequenced = Packet::reliable_sequenced(destination, bytes, Some(2)); 121 | let reliable_ordered = Packet::reliable_ordered(destination, bytes, Some(3)); 122 | 123 | // Sends the created packets 124 | packet_sender.send(unreliable_sequenced).unwrap(); 125 | packet_sender.send(reliable).unwrap(); 126 | packet_sender.send(unreliable_sequenced).unwrap(); 127 | packet_sender.send(reliable_sequenced).unwrap(); 128 | packet_sender.send(reliable_ordered).unwrap(); 129 | ``` 130 | 131 | _Receive Packets_ 132 | ```rust 133 | use laminar::{SocketEvent, Socket}; 134 | 135 | // Creates the socket 136 | let socket = Socket::bind("127.0.0.1:12346")?; 137 | let event_receiver = socket.get_event_receiver(); 138 | // Starts the socket, which will start a poll mechanism to receive and send messages. 139 | let _thread = thread::spawn(move || socket.start_polling()); 140 | 141 | // Waits until a socket event occurs 142 | let result = event_receiver.recv(); 143 | 144 | match result { 145 | Ok(socket_event) => { 146 | match socket_event { 147 | SocketEvent::Packet(packet) => { 148 | let endpoint: SocketAddr = packet.addr(); 149 | let received_data: &[u8] = packet.payload(); 150 | }, 151 | SocketEvent::Connect(connect_event) => { /* a client connected */ }, 152 | SocketEvent::Timeout(timeout_event) => { /* a client timed out */}, 153 | } 154 | } 155 | Err(e) => { 156 | println!("Something went wrong when receiving, error: {:?}", e); 157 | } 158 | } 159 | ``` 160 | 161 | ## Authors 162 | 163 | - [Lucio Franco](https://github.com/LucioFranco) 164 | - [Fletcher Haynes](https://github.com/fhaynes) 165 | - [Timon Post](https://github.com/TimonPost) 166 | - [Justin LeFebvre](https://github.com/jstnlef) 167 | 168 | ## Note 169 | 170 | This library is not fully stable yet, and there may be breaking changes to the API. 171 | For more advanced examples of using laminar, you can check out the [Amethyst-Network](https://github.com/amethyst/amethyst/tree/master/amethyst_network) crate. 172 | 173 | ## Contribution 174 | 175 | Unless you explicitly state otherwise, any contribution intentionally submitted 176 | for inclusion in the work by you, as defined in the Apache-2.0 license, shall be dual licensed as above, without any 177 | additional terms or conditions. 178 | 179 | ## License 180 | 181 | Licensed under either of 182 | * Apache License, Version 2.0 ([LICENSE-APACHE](docs/LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0) 183 | * MIT license ([LICENSE-MIT](docs/LICENSE-MIT) or http://opensource.org/licenses/MIT) 184 | at your option. 185 | -------------------------------------------------------------------------------- /src/net/socket.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | self, 3 | net::{Ipv4Addr, SocketAddr, SocketAddrV4, ToSocketAddrs, UdpSocket}, 4 | thread::{sleep, yield_now}, 5 | time::{Duration, Instant}, 6 | }; 7 | 8 | use crossbeam_channel::{self, Receiver, Sender, TryRecvError}; 9 | 10 | use crate::{ 11 | config::Config, 12 | error::Result, 13 | net::{ 14 | events::SocketEvent, ConnectionManager, DatagramSocket, LinkConditioner, VirtualConnection, 15 | }, 16 | packet::Packet, 17 | }; 18 | 19 | // Wraps `LinkConditioner` and `UdpSocket` together. LinkConditioner is enabled when building with a "tester" feature. 20 | #[derive(Debug)] 21 | struct SocketWithConditioner { 22 | is_blocking_mode: bool, 23 | socket: UdpSocket, 24 | link_conditioner: Option, 25 | } 26 | 27 | impl SocketWithConditioner { 28 | pub fn new(socket: UdpSocket, is_blocking_mode: bool) -> Result { 29 | socket.set_nonblocking(!is_blocking_mode)?; 30 | Ok(SocketWithConditioner { 31 | is_blocking_mode, 32 | socket, 33 | link_conditioner: None, 34 | }) 35 | } 36 | 37 | #[cfg(feature = "tester")] 38 | pub fn set_link_conditioner(&mut self, link_conditioner: Option) { 39 | self.link_conditioner = link_conditioner; 40 | } 41 | } 42 | 43 | /// Provides a `DatagramSocket` implementation for `SocketWithConditioner` 44 | impl DatagramSocket for SocketWithConditioner { 45 | // Determinate whether packet will be sent or not based on `LinkConditioner` if enabled. 46 | fn send_packet(&mut self, addr: &SocketAddr, payload: &[u8]) -> std::io::Result { 47 | if cfg!(feature = "tester") { 48 | if let Some(ref mut link) = &mut self.link_conditioner { 49 | if !link.should_send() { 50 | return Ok(0); 51 | } 52 | } 53 | } 54 | self.socket.send_to(payload, addr) 55 | } 56 | 57 | /// Receives a single packet from UDP socket. 58 | fn receive_packet<'a>( 59 | &mut self, 60 | buffer: &'a mut [u8], 61 | ) -> std::io::Result<(&'a [u8], SocketAddr)> { 62 | self.socket 63 | .recv_from(buffer) 64 | .map(move |(recv_len, address)| (&buffer[..recv_len], address)) 65 | } 66 | 67 | /// Returns the socket address that this socket was created from. 68 | fn local_addr(&self) -> std::io::Result { 69 | self.socket.local_addr() 70 | } 71 | 72 | /// Returns whether socket operates in blocking or non-blocking mode. 73 | fn is_blocking_mode(&self) -> bool { 74 | self.is_blocking_mode 75 | } 76 | } 77 | 78 | /// A reliable UDP socket implementation with configurable reliability and ordering guarantees. 79 | #[derive(Debug)] 80 | pub struct Socket { 81 | handler: ConnectionManager, 82 | } 83 | 84 | impl Socket { 85 | /// Binds to the socket and then sets up `ActiveConnections` to manage the "connections". 86 | /// Because UDP connections are not persistent, we can only infer the status of the remote 87 | /// endpoint by looking to see if they are still sending packets or not 88 | pub fn bind(addresses: A) -> Result { 89 | Self::bind_with_config(addresses, Config::default()) 90 | } 91 | 92 | /// Binds to any local port on the system, if available 93 | pub fn bind_any() -> Result { 94 | Self::bind_any_with_config(Config::default()) 95 | } 96 | 97 | /// Binds to any local port on the system, if available, with a given config 98 | pub fn bind_any_with_config(config: Config) -> Result { 99 | let loopback = Ipv4Addr::new(127, 0, 0, 1); 100 | let address = SocketAddrV4::new(loopback, 0); 101 | let socket = UdpSocket::bind(address)?; 102 | Self::bind_internal(socket, config) 103 | } 104 | 105 | /// Binds to the socket and then sets up `ActiveConnections` to manage the "connections". 106 | /// Because UDP connections are not persistent, we can only infer the status of the remote 107 | /// endpoint by looking to see if they are still sending packets or not 108 | /// 109 | /// This function allows you to configure laminar with the passed configuration. 110 | pub fn bind_with_config(addresses: A, config: Config) -> Result { 111 | let socket = UdpSocket::bind(addresses)?; 112 | Self::bind_internal(socket, config) 113 | } 114 | 115 | fn bind_internal(socket: UdpSocket, config: Config) -> Result { 116 | Ok(Socket { 117 | handler: ConnectionManager::new( 118 | SocketWithConditioner::new(socket, config.blocking_mode)?, 119 | config, 120 | ), 121 | }) 122 | } 123 | 124 | /// Returns a handle to the packet sender which provides a thread-safe way to enqueue packets 125 | /// to be processed. This should be used when the socket is busy running its polling loop in a 126 | /// separate thread. 127 | pub fn get_packet_sender(&self) -> Sender { 128 | self.handler.event_sender().clone() 129 | } 130 | 131 | /// Returns a handle to the event receiver which provides a thread-safe way to retrieve events 132 | /// from the socket. This should be used when the socket is busy running its polling loop in 133 | /// a separate thread. 134 | pub fn get_event_receiver(&self) -> Receiver { 135 | self.handler.event_receiver().clone() 136 | } 137 | 138 | /// Sends a single packet 139 | pub fn send(&mut self, packet: Packet) -> Result<()> { 140 | self.handler 141 | .event_sender() 142 | .send(packet) 143 | .expect("Receiver must exists."); 144 | Ok(()) 145 | } 146 | 147 | /// Receives a single packet 148 | pub fn recv(&mut self) -> Option { 149 | match self.handler.event_receiver().try_recv() { 150 | Ok(pkt) => Some(pkt), 151 | Err(TryRecvError::Empty) => None, 152 | Err(TryRecvError::Disconnected) => panic!["This can never happen"], 153 | } 154 | } 155 | 156 | /// Runs the polling loop with the default '1ms' sleep duration. This should run in a spawned thread 157 | /// since calls to `self.manual_poll` are blocking. 158 | pub fn start_polling(&mut self) { 159 | self.start_polling_with_duration(Some(Duration::from_millis(1))) 160 | } 161 | 162 | /// Runs the polling loop with a specified sleep duration. This should run in a spawned thread 163 | /// since calls to `self.manual_poll` are blocking. 164 | pub fn start_polling_with_duration(&mut self, sleep_duration: Option) { 165 | // nothing should break out of this loop! 166 | loop { 167 | self.manual_poll(Instant::now()); 168 | match sleep_duration { 169 | None => yield_now(), 170 | Some(duration) => sleep(duration), 171 | }; 172 | } 173 | } 174 | 175 | /// Processes any inbound/outbound packets and handle idle clients 176 | pub fn manual_poll(&mut self, time: Instant) { 177 | self.handler.manual_poll(time); 178 | } 179 | 180 | /// Returns the local socket address 181 | pub fn local_addr(&self) -> Result { 182 | Ok(self.handler.socket().local_addr()?) 183 | } 184 | 185 | /// Sets the link conditioner for this socket. See [LinkConditioner] for further details. 186 | #[cfg(feature = "tester")] 187 | pub fn set_link_conditioner(&mut self, link_conditioner: Option) { 188 | self.handler 189 | .socket_mut() 190 | .set_link_conditioner(link_conditioner); 191 | } 192 | } 193 | -------------------------------------------------------------------------------- /src/error.rs: -------------------------------------------------------------------------------- 1 | //! This module contains the laminar error handling logic. 2 | 3 | use std::{ 4 | error::Error, 5 | fmt::{self, Display, Formatter}, 6 | io, result, 7 | }; 8 | 9 | use crossbeam_channel::SendError; 10 | 11 | use crate::SocketEvent; 12 | 13 | /// Wrapped result type for Laminar errors. 14 | pub type Result = result::Result; 15 | 16 | #[derive(Debug)] 17 | /// Enum with all possible network errors that could occur. 18 | pub enum ErrorKind { 19 | /// Error in decoding the packet 20 | DecodingError(DecodingErrorKind), 21 | /// Error relating to receiving or parsing a fragment 22 | FragmentError(FragmentErrorKind), 23 | /// Error relating to receiving or parsing a packet 24 | PacketError(PacketErrorKind), 25 | /// Wrapper around a std io::Error 26 | IOError(io::Error), 27 | /// Did not receive enough data 28 | ReceivedDataToShort, 29 | /// Protocol versions did not match 30 | ProtocolVersionMismatch, 31 | /// Could not send on `SendChannel`. 32 | SendError(SendError), 33 | /// Expected header but could not be read from buffer. 34 | CouldNotReadHeader(String), 35 | } 36 | 37 | impl Display for ErrorKind { 38 | fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result { 39 | match self { 40 | ErrorKind::DecodingError(e) => write!( 41 | fmt, 42 | "Something went wrong with parsing the header. Reason: {:?}.", 43 | e 44 | ), 45 | ErrorKind::FragmentError(e) => write!( 46 | fmt, 47 | "Something went wrong with receiving/parsing fragments. Reason: {:?}.", 48 | e 49 | ), 50 | ErrorKind::PacketError(e) => write!( 51 | fmt, 52 | "Something went wrong with receiving/parsing packets. Reason: {:?}.", 53 | e 54 | ), 55 | ErrorKind::IOError(e) => write!(fmt, "An IO Error occurred. Reason: {:?}.", e), 56 | ErrorKind::ReceivedDataToShort => { 57 | write!(fmt, "The received data did not have any length.") 58 | } 59 | ErrorKind::ProtocolVersionMismatch => { 60 | write!(fmt, "The protocol versions do not match.") 61 | } 62 | ErrorKind::SendError(e) => write!( 63 | fmt, 64 | "Could not sent on channel because it was closed. Reason: {:?}", 65 | e 66 | ), 67 | ErrorKind::CouldNotReadHeader(header) => write!( 68 | fmt, 69 | "Expected {} header but could not be read from buffer.", 70 | header 71 | ), 72 | } 73 | } 74 | } 75 | 76 | impl Error for ErrorKind {} 77 | 78 | /// Errors that could occur while parsing packet contents 79 | #[derive(Debug, PartialEq, Eq, Clone)] 80 | pub enum DecodingErrorKind { 81 | /// The [PacketType] could not be read 82 | PacketType, 83 | /// The [OrderingGuarantee] could not be read 84 | OrderingGuarantee, 85 | /// The [DeliveryGuarantee] could not be read 86 | DeliveryGuarantee, 87 | } 88 | 89 | impl Display for DecodingErrorKind { 90 | fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result { 91 | match *self { 92 | DecodingErrorKind::PacketType => write!(fmt, "The packet type could not be read."), 93 | DecodingErrorKind::OrderingGuarantee => { 94 | write!(fmt, "The ordering guarantee could not be read.") 95 | } 96 | DecodingErrorKind::DeliveryGuarantee => { 97 | write!(fmt, "The delivery guarantee could not be read.") 98 | } 99 | } 100 | } 101 | } 102 | 103 | /// Errors that could occur while parsing packet contents 104 | #[derive(Debug, PartialEq, Eq, Clone)] 105 | pub enum PacketErrorKind { 106 | /// The maximal allowed size of the packet was exceeded 107 | ExceededMaxPacketSize, 108 | /// Only `PacketType::Packet` can be fragmented 109 | PacketCannotBeFragmented, 110 | } 111 | 112 | impl Display for PacketErrorKind { 113 | fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result { 114 | match *self { 115 | PacketErrorKind::ExceededMaxPacketSize => { 116 | write!(fmt, "The packet size was bigger than the max allowed size.") 117 | } 118 | PacketErrorKind::PacketCannotBeFragmented => { 119 | write!(fmt, "The packet type cannot be fragmented.") 120 | } 121 | } 122 | } 123 | } 124 | 125 | /// Errors that could occur with constructing/parsing fragment contents 126 | #[derive(Debug, PartialEq, Eq, Clone)] 127 | pub enum FragmentErrorKind { 128 | /// PacketHeader was not found in the packet 129 | PacketHeaderNotFound, 130 | /// Max number of allowed fragments has been exceeded 131 | ExceededMaxFragments, 132 | /// This fragment was already processed 133 | AlreadyProcessedFragment, 134 | /// Attempted to fragment with an incorrect number of fragments 135 | FragmentWithUnevenNumberOfFragments, 136 | /// Fragment we expected to be able to find we couldn't 137 | CouldNotFindFragmentById, 138 | /// Multiple ack headers sent with these fragments 139 | MultipleAckHeaders, 140 | /// Ack header is missing from a finished set of fragments 141 | MissingAckHeader, 142 | } 143 | 144 | impl Display for FragmentErrorKind { 145 | fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result { 146 | match *self { 147 | FragmentErrorKind::PacketHeaderNotFound => { 148 | write!(fmt, "Packet header was attached to fragment.") 149 | } 150 | FragmentErrorKind::ExceededMaxFragments => write!( 151 | fmt, 152 | "The total numbers of fragments are bigger than the allowed fragments." 153 | ), 154 | FragmentErrorKind::AlreadyProcessedFragment => { 155 | write!(fmt, "The fragment received was already processed.") 156 | } 157 | FragmentErrorKind::FragmentWithUnevenNumberOfFragments => write!( 158 | fmt, 159 | "The fragment header does not contain the right fragment count." 160 | ), 161 | FragmentErrorKind::CouldNotFindFragmentById => write!( 162 | fmt, 163 | "The fragment supposed to be in a the cache but it was not found." 164 | ), 165 | FragmentErrorKind::MultipleAckHeaders => write!( 166 | fmt, 167 | "The fragment contains an ack header but a previous ack header has already been registered." 168 | ), 169 | FragmentErrorKind::MissingAckHeader => write!( 170 | fmt, 171 | "No ack headers were registered with any of the fragments." 172 | ), 173 | } 174 | } 175 | } 176 | 177 | impl From for ErrorKind { 178 | fn from(inner: io::Error) -> ErrorKind { 179 | ErrorKind::IOError(inner) 180 | } 181 | } 182 | 183 | impl From for ErrorKind { 184 | fn from(inner: PacketErrorKind) -> Self { 185 | ErrorKind::PacketError(inner) 186 | } 187 | } 188 | 189 | impl From for ErrorKind { 190 | fn from(inner: FragmentErrorKind) -> Self { 191 | ErrorKind::FragmentError(inner) 192 | } 193 | } 194 | 195 | impl From> for ErrorKind { 196 | fn from(inner: SendError) -> Self { 197 | ErrorKind::SendError(inner) 198 | } 199 | } 200 | 201 | #[cfg(test)] 202 | mod tests { 203 | use super::*; 204 | 205 | #[test] 206 | fn able_to_box_errors() { 207 | let _: Box = Box::new(ErrorKind::CouldNotReadHeader("".into())); 208 | } 209 | } 210 | -------------------------------------------------------------------------------- /src/infrastructure/fragmenter.rs: -------------------------------------------------------------------------------- 1 | use std::io::Write; 2 | 3 | use crate::{ 4 | config::Config, 5 | error::{FragmentErrorKind, Result}, 6 | net::constants::FRAGMENT_HEADER_SIZE, 7 | packet::header::{AckedPacketHeader, FragmentHeader}, 8 | sequence_buffer::{ReassemblyData, SequenceBuffer}, 9 | }; 10 | 11 | /// Manages fragmentation of packets. 12 | pub struct Fragmentation { 13 | fragments: SequenceBuffer, 14 | config: Config, 15 | } 16 | 17 | impl Fragmentation { 18 | /// Creates and returns a new Fragmentation 19 | pub fn new(config: &Config) -> Fragmentation { 20 | Fragmentation { 21 | fragments: SequenceBuffer::with_capacity(config.fragment_reassembly_buffer_size), 22 | config: config.clone(), 23 | } 24 | } 25 | 26 | /// Checks how many times a number fits into another number and will round up. 27 | /// 28 | /// For example we have two numbers: 29 | /// - number 1 = 4000; 30 | /// - number 2 = 1024; 31 | /// If you do it the easy way the answer will be 4000/1024 = 3.90625. 32 | /// But since we care about how how many whole times the number fits in we need the result 4. 33 | /// 34 | /// Note that when rust is rounding it is always rounding to zero (3.456 as u32 = 3) 35 | /// 1. calculate with modulo if `number 1` fits exactly in the `number 2`. 36 | /// 2. Divide `number 1` with `number 2` (this wil be rounded to zero by rust) 37 | /// 3. So in all cases we need to add 1 to get the right amount of fragments. 38 | /// 39 | /// lets take an example 40 | /// 41 | /// Calculate modules: 42 | /// - number 1 % number 2 = 928 43 | /// - this is bigger than 0 so remainder = 1 44 | /// 45 | /// Calculate how many times the `number 1` fits in `number 2`: 46 | /// - number 1 / number 2 = 3,90625 (this will be rounded to 3) 47 | /// - add remainder from above to 3 = 4. 48 | /// 49 | /// The above described method will figure out for all number how many times it fits into another number rounded up. 50 | /// 51 | /// So an example of dividing a packet of bytes we get these fragments: 52 | /// 53 | /// So for 4000 bytes we need 4 fragments 54 | /// [fragment: 1024] [fragment: 1024] [fragment: 1024] [fragment: 928] 55 | pub fn fragments_needed(payload_length: u16, fragment_size: u16) -> u16 { 56 | let remainder = if payload_length % fragment_size > 0 { 57 | 1 58 | } else { 59 | 0 60 | }; 61 | ((payload_length / fragment_size) + remainder) 62 | } 63 | 64 | /// Splits the given payload into fragments and write those fragments to the passed packet data. 65 | pub fn spit_into_fragments<'a>(payload: &'a [u8], config: &Config) -> Result> { 66 | let mut fragments = Vec::new(); 67 | 68 | let payload_length = payload.len() as u16; 69 | let num_fragments = 70 | // Safe cast max fragments is u8 71 | Fragmentation::fragments_needed(payload_length, config.fragment_size) as u8; 72 | 73 | if num_fragments > config.max_fragments { 74 | return Err(FragmentErrorKind::ExceededMaxFragments.into()); 75 | } 76 | 77 | for fragment_id in 0..num_fragments { 78 | // get start and end position of buffer 79 | let start_fragment_pos = u16::from(fragment_id) * config.fragment_size; 80 | let mut end_fragment_pos = (u16::from(fragment_id) + 1) * config.fragment_size; 81 | 82 | // If remaining buffer fits int one packet just set the end position to the length of the packet payload. 83 | if end_fragment_pos > payload_length { 84 | end_fragment_pos = payload_length; 85 | } 86 | 87 | // get specific slice of data for fragment 88 | let fragment_data = &payload[start_fragment_pos as usize..end_fragment_pos as usize]; 89 | 90 | fragments.push(fragment_data); 91 | } 92 | 93 | Ok(fragments) 94 | } 95 | 96 | /// Reads fragment data and return the complete packet when all fragments are received. 97 | pub fn handle_fragment( 98 | &mut self, 99 | fragment_header: FragmentHeader, 100 | fragment_payload: &[u8], 101 | acked_header: Option, 102 | ) -> Result, AckedPacketHeader)>> { 103 | // read fragment packet 104 | 105 | self.create_fragment_if_not_exists(fragment_header); 106 | 107 | let num_fragments_received; 108 | let num_fragments_total; 109 | let sequence; 110 | let total_buffer; 111 | 112 | { 113 | // get entry of previous received fragments 114 | let reassembly_data = match self.fragments.get_mut(fragment_header.sequence()) { 115 | Some(val) => val, 116 | None => return Err(FragmentErrorKind::CouldNotFindFragmentById.into()), 117 | }; 118 | 119 | // got the data 120 | if reassembly_data.num_fragments_total != fragment_header.fragment_count() { 121 | return Err(FragmentErrorKind::FragmentWithUnevenNumberOfFragments.into()); 122 | } 123 | 124 | if usize::from(fragment_header.id()) >= reassembly_data.fragments_received.len() { 125 | return Err(FragmentErrorKind::ExceededMaxFragments.into()); 126 | } 127 | 128 | if reassembly_data.fragments_received[usize::from(fragment_header.id())] { 129 | return Err(FragmentErrorKind::AlreadyProcessedFragment.into()); 130 | } 131 | 132 | // increase number of received fragments and set the specific fragment to received 133 | reassembly_data.num_fragments_received += 1; 134 | reassembly_data.fragments_received[usize::from(fragment_header.id())] = true; 135 | 136 | // add the payload from the fragment to the buffer whe have in cache 137 | reassembly_data.buffer.write_all(&*fragment_payload)?; 138 | 139 | if let Some(acked_header) = acked_header { 140 | if reassembly_data.acked_header.is_none() { 141 | reassembly_data.acked_header = Some(acked_header); 142 | } else { 143 | return Err(FragmentErrorKind::MultipleAckHeaders.into()); 144 | } 145 | } 146 | 147 | num_fragments_received = reassembly_data.num_fragments_received; 148 | num_fragments_total = reassembly_data.num_fragments_total; 149 | sequence = reassembly_data.sequence as u16; 150 | total_buffer = reassembly_data.buffer.clone(); 151 | } 152 | 153 | // if we received all fragments then remove entry and return the total received bytes. 154 | if num_fragments_received == num_fragments_total { 155 | let sequence = sequence as u16; 156 | if let Some(mut reassembly_data) = self.fragments.remove(sequence) { 157 | if reassembly_data.acked_header.is_none() { 158 | return Err(FragmentErrorKind::MissingAckHeader.into()); 159 | } 160 | 161 | let acked_header = reassembly_data.acked_header.take().unwrap(); 162 | return Ok(Some((total_buffer, acked_header))); 163 | } else { 164 | return Err(FragmentErrorKind::CouldNotFindFragmentById.into()); 165 | } 166 | } 167 | 168 | Ok(None) 169 | } 170 | 171 | /// If fragment does not exist we need to insert a new entry. 172 | fn create_fragment_if_not_exists(&mut self, fragment_header: FragmentHeader) { 173 | if !self.fragments.exists(fragment_header.sequence()) { 174 | let reassembly_data = ReassemblyData::new( 175 | fragment_header.sequence(), 176 | fragment_header.fragment_count(), 177 | (u16::from(FRAGMENT_HEADER_SIZE) + self.config.fragment_size) as usize, 178 | ); 179 | 180 | self.fragments 181 | .insert(fragment_header.sequence(), reassembly_data); 182 | } 183 | } 184 | } 185 | 186 | #[cfg(test)] 187 | mod test { 188 | use super::Fragmentation; 189 | 190 | #[test] 191 | pub fn expect_right_number_of_fragments() { 192 | let fragment_number = Fragmentation::fragments_needed(4000, 1024); 193 | let fragment_number1 = Fragmentation::fragments_needed(500, 1024); 194 | 195 | assert_eq!(fragment_number, 4); 196 | assert_eq!(fragment_number1, 1); 197 | } 198 | } 199 | -------------------------------------------------------------------------------- /src/sequence_buffer.rs: -------------------------------------------------------------------------------- 1 | use std::clone::Clone; 2 | 3 | use crate::packet::SequenceNumber; 4 | 5 | pub use self::congestion_data::CongestionData; 6 | pub use self::reassembly_data::ReassemblyData; 7 | 8 | mod congestion_data; 9 | mod reassembly_data; 10 | 11 | /// Collection to store data of any kind. 12 | #[derive(Debug)] 13 | pub struct SequenceBuffer { 14 | sequence_num: SequenceNumber, 15 | entry_sequences: Box<[Option]>, 16 | entries: Box<[T]>, 17 | } 18 | 19 | impl SequenceBuffer { 20 | /// Creates a SequenceBuffer with a desired capacity. 21 | pub fn with_capacity(size: u16) -> Self { 22 | Self { 23 | sequence_num: 0, 24 | entry_sequences: vec![None; size as usize].into_boxed_slice(), 25 | entries: vec![T::default(); size as usize].into_boxed_slice(), 26 | } 27 | } 28 | 29 | /// Returns the most recently stored sequence number. 30 | pub fn sequence_num(&self) -> SequenceNumber { 31 | self.sequence_num 32 | } 33 | 34 | /// Returns a mutable reference to the entry with the given sequence number. 35 | pub fn get_mut(&mut self, sequence_num: SequenceNumber) -> Option<&mut T> { 36 | if self.exists(sequence_num) { 37 | let index = self.index(sequence_num); 38 | return Some(&mut self.entries[index]); 39 | } 40 | None 41 | } 42 | 43 | /// Inserts the entry data into the sequence buffer. If the requested sequence number is "too 44 | /// old", the entry will not be inserted and no reference will be returned. 45 | pub fn insert(&mut self, sequence_num: SequenceNumber, entry: T) -> Option<&mut T> { 46 | // sequence number is too old to insert into the buffer 47 | if sequence_less_than( 48 | sequence_num, 49 | self.sequence_num 50 | .wrapping_sub(self.entry_sequences.len() as u16), 51 | ) { 52 | return None; 53 | } 54 | 55 | self.advance_sequence(sequence_num); 56 | 57 | let index = self.index(sequence_num); 58 | self.entry_sequences[index] = Some(sequence_num); 59 | self.entries[index] = entry; 60 | Some(&mut self.entries[index]) 61 | } 62 | 63 | /// Returns whether or not we have previously inserted an entry for the given sequence number. 64 | pub fn exists(&self, sequence_num: SequenceNumber) -> bool { 65 | let index = self.index(sequence_num); 66 | if let Some(s) = self.entry_sequences[index] { 67 | return s == sequence_num; 68 | } 69 | false 70 | } 71 | 72 | /// Removes an entry from the sequence buffer 73 | pub fn remove(&mut self, sequence_num: SequenceNumber) -> Option { 74 | if self.exists(sequence_num) { 75 | let index = self.index(sequence_num); 76 | let value = std::mem::replace(&mut self.entries[index], T::default()); 77 | self.entry_sequences[index] = None; 78 | return Some(value); 79 | } 80 | None 81 | } 82 | 83 | // Advances the sequence number while removing older entries. 84 | fn advance_sequence(&mut self, sequence_num: SequenceNumber) { 85 | if sequence_greater_than(sequence_num.wrapping_add(1), self.sequence_num) { 86 | self.remove_entries(u32::from(sequence_num)); 87 | self.sequence_num = sequence_num.wrapping_add(1); 88 | } 89 | } 90 | 91 | fn remove_entries(&mut self, mut finish_sequence: u32) { 92 | let start_sequence = u32::from(self.sequence_num); 93 | if finish_sequence < start_sequence { 94 | finish_sequence += 65536; 95 | } 96 | 97 | if finish_sequence - start_sequence < self.entry_sequences.len() as u32 { 98 | for sequence in start_sequence..=finish_sequence { 99 | self.remove(sequence as u16); 100 | } 101 | } else { 102 | for index in 0..self.entry_sequences.len() { 103 | self.entries[index] = T::default(); 104 | self.entry_sequences[index] = None; 105 | } 106 | } 107 | } 108 | 109 | // Generates an index for use in `entry_sequences` and `entries`. 110 | fn index(&self, sequence: SequenceNumber) -> usize { 111 | sequence as usize % self.entry_sequences.len() 112 | } 113 | } 114 | 115 | pub fn sequence_greater_than(s1: u16, s2: u16) -> bool { 116 | ((s1 > s2) && (s1 - s2 <= 32768)) || ((s1 < s2) && (s2 - s1 > 32768)) 117 | } 118 | 119 | pub fn sequence_less_than(s1: u16, s2: u16) -> bool { 120 | sequence_greater_than(s2, s1) 121 | } 122 | 123 | #[cfg(test)] 124 | mod tests { 125 | use crate::packet::SequenceNumber; 126 | use crate::sequence_buffer::sequence_greater_than; 127 | use crate::sequence_buffer::sequence_less_than; 128 | 129 | use super::SequenceBuffer; 130 | 131 | #[derive(Clone, Default)] 132 | struct DataStub; 133 | 134 | #[test] 135 | fn test_sequence_comparisons_than() { 136 | assert!(sequence_greater_than(1, 0)); 137 | assert!(sequence_less_than(0, 1)); 138 | 139 | // right around the halfway point is where we cut over. 140 | assert!(sequence_greater_than(32768, 0)); 141 | assert!(sequence_less_than(32769, 0)); 142 | 143 | // in this case, 0 is greater than u16 max because we're likely at the wrapping case 144 | assert!(sequence_greater_than(0, u16::max_value())); 145 | } 146 | 147 | #[test] 148 | fn max_sequence_number_should_not_exist_by_default() { 149 | let buffer: SequenceBuffer = SequenceBuffer::with_capacity(2); 150 | assert!(!buffer.exists(u16::max_value())); 151 | } 152 | 153 | #[test] 154 | fn ensure_entries_and_entry_sequences_are_the_same_size() { 155 | let buffer: SequenceBuffer = SequenceBuffer::with_capacity(2); 156 | assert_eq!(buffer.entry_sequences.len(), buffer.entries.len()); 157 | } 158 | 159 | #[test] 160 | fn normal_inserts_should_fill_buffer() { 161 | let mut buffer = SequenceBuffer::with_capacity(8); 162 | for i in 0..8 { 163 | buffer.insert(i, DataStub); 164 | } 165 | assert_eq!(count_entries(&buffer), 8); 166 | } 167 | 168 | #[test] 169 | fn insert_into_buffer_test() { 170 | let mut buffer = SequenceBuffer::with_capacity(2); 171 | buffer.insert(0, DataStub); 172 | assert!(buffer.exists(0)); 173 | } 174 | 175 | #[test] 176 | fn remove_from_buffer_test() { 177 | let mut buffer = SequenceBuffer::with_capacity(2); 178 | buffer.insert(0, DataStub); 179 | buffer.remove(0); 180 | assert!(!buffer.exists(0)); 181 | } 182 | 183 | #[test] 184 | fn insert_into_buffer_old_entry_test() { 185 | let mut buffer = SequenceBuffer::with_capacity(8); 186 | buffer.insert(8, DataStub); 187 | // this entry would overlap with sequence 8 based on the buffer size so we must ensure that 188 | // it does not. 189 | buffer.insert(0, DataStub); 190 | assert!(!buffer.exists(0)); 191 | 192 | // however, this one is more recent so it should definitely exist. 193 | buffer.insert(16, DataStub); 194 | assert!(buffer.exists(16)); 195 | 196 | // since we are pretty far ahead at this point, there should only be 1 valid entry in here. 197 | assert_eq!(count_entries(&buffer), 1); 198 | } 199 | 200 | #[test] 201 | fn new_sequence_nums_evict_old_ones() { 202 | let mut buffer = SequenceBuffer::with_capacity(2); 203 | for i in 0..3 { 204 | buffer.insert(i, DataStub); 205 | assert_eq!(buffer.sequence_num(), i + 1); 206 | } 207 | assert!(!buffer.exists(0)); 208 | assert!(buffer.exists(1)); 209 | assert!(buffer.exists(2)); 210 | assert_eq!(count_entries(&buffer), 2); 211 | } 212 | 213 | #[test] 214 | fn older_sequence_numbers_arent_inserted() { 215 | let mut buffer = SequenceBuffer::with_capacity(8); 216 | buffer.insert(10, DataStub); 217 | 218 | assert_eq!(buffer.sequence_num(), 11); 219 | 220 | // inserting 'older' should fail to insert 221 | buffer.insert(2, DataStub); 222 | assert!(!buffer.exists(2)); 223 | 224 | // insert respects boundary wrap. Both of these would be earlier than 11 225 | buffer.insert(u16::max_value(), DataStub); 226 | buffer.insert(0, DataStub); 227 | assert!(!buffer.exists(u16::max_value())); 228 | assert!(!buffer.exists(0)); 229 | 230 | assert_eq!(count_entries(&buffer), 1); 231 | } 232 | 233 | fn count_entries(buffer: &SequenceBuffer) -> usize { 234 | let nums: Vec<&SequenceNumber> = buffer.entry_sequences.iter().flatten().collect(); 235 | nums.len() 236 | } 237 | } 238 | --------------------------------------------------------------------------------