├── .github ├── FUNDING.yml ├── dependabot.yml └── workflows │ └── ci.yml ├── .gitignore ├── Cargo.toml ├── LICENSE ├── README.md ├── backroll ├── Cargo.toml ├── README.md └── src │ ├── backend │ ├── mod.rs │ └── p2p.rs │ ├── command.rs │ ├── input.rs │ ├── lib.rs │ ├── protocol │ ├── bitfield.rs │ ├── compression.rs │ ├── event.rs │ ├── input_buffer.rs │ ├── message.rs │ └── mod.rs │ ├── sync.rs │ └── time_sync.rs ├── backroll_transport ├── Cargo.toml ├── README.md └── src │ ├── channel.rs │ ├── lib.rs │ ├── peer.rs │ └── peers.rs ├── backroll_transport_steam ├── Cargo.toml ├── README.md └── src │ └── lib.rs ├── backroll_transport_udp ├── Cargo.toml ├── README.md └── src │ └── lib.rs └── bevy_backroll ├── Cargo.toml ├── README.md └── src ├── id.rs ├── lib.rs ├── save_state.rs └── steam.rs /.github/FUNDING.yml: -------------------------------------------------------------------------------- 1 | github: james7132 2 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | # To get started with Dependabot version updates, you'll need to specify which 2 | # package ecosystems to update and where the package manifests are located. 3 | # Please see the documentation for all configuration options: 4 | # https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates 5 | 6 | version: 2 7 | updates: 8 | - package-ecosystem: "cargo" 9 | directory: "/" 10 | schedule: 11 | interval: "weekly" 12 | - package-ecosystem: "github-actions" 13 | directory: "/" 14 | schedule: 15 | interval: "weekly" 16 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | 3 | on: [push, pull_request] 4 | 5 | env: 6 | CARGO_TERM_COLOR: always 7 | 8 | jobs: 9 | build: 10 | strategy: 11 | matrix: 12 | os: [windows-latest, ubuntu-latest, macos-latest] 13 | toolchain: [stable] 14 | runs-on: ${{ matrix.os }} 15 | steps: 16 | - uses: actions/checkout@v3 17 | 18 | - uses: actions-rs/toolchain@v1 19 | id: toolchain 20 | with: 21 | toolchain: ${{ matrix.toolchain }} 22 | profile: minimal 23 | components: rustfmt, clippy 24 | override: true 25 | 26 | - name: Install alsa and udev 27 | run: sudo apt-get update; sudo apt-get install --no-install-recommends libasound2-dev libudev-dev 28 | if: runner.os == 'linux' 29 | 30 | - name: Setup cache 31 | uses: actions/cache@v3 32 | with: 33 | path: | 34 | ~/.cargo/registry 35 | ~/.cargo/git 36 | target 37 | key: ${{ runner.os }}-test-rustc-${{ steps.toolchain.outputs.rustc_hash }}-${{ hashFiles('**/Cargo.lock') }} 38 | 39 | - uses: actions-rs/cargo@v1 40 | if: runner.os == 'linux' 41 | with: 42 | command: fmt 43 | args: --all -- --check 44 | 45 | - uses: actions-rs/clippy-check@v1 46 | if: runner.os == 'linux' 47 | with: 48 | token: ${{ secrets.GITHUB_TOKEN }} 49 | args: --all-features 50 | 51 | - uses: actions-rs/cargo@v1 52 | with: 53 | command: test 54 | args: --workspace --exclude backroll_transport_steam 55 | env: 56 | CARGO_INCREMENTAL: 0 57 | RUSTFLAGS: "-C debuginfo=0" -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | Cargo.lock -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [workspace] 2 | resolver = "2" # Needed by Bevy/wgpu 3 | members = [ 4 | "backroll", 5 | "bevy_backroll", 6 | "backroll_transport", 7 | "backroll_transport_udp", 8 | "backroll_transport_steam", 9 | ] -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright 2021 Hourai Teahouse 2 | 3 | Permission to use, copy, modify, and/or distribute this software for any purpose 4 | with or without fee is hereby granted, provided that the above copyright notice 5 | and this permission notice appear in all copies. 6 | 7 | THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH 8 | REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND 9 | FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, 10 | INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS 11 | OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 12 | TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF 13 | THIS SOFTWARE. 14 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # backroll-rs 2 | 3 | [![crates.io](https://img.shields.io/crates/v/backroll.svg)](https://crates.io/crates/backroll) 4 | [![Documentation](https://docs.rs/backroll/badge.svg)](https://docs.rs/backroll) 5 | ![License](https://img.shields.io/crates/l/backroll) 6 | [![Discord](https://img.shields.io/discord/151219753434742784.svg?label=&logo=discord&logoColor=ffffff&color=7389D8&labelColor=6A7EC2)](https://discord.gg/VuZhs9V) 7 | 8 | Backroll is a pure Rust implementation of [GGPO](https://www.ggpo.net/) 9 | rollback networking library. 10 | 11 | ## Development Status 12 | This is still in an early beta stage. At time of writing, the public facing API 13 | is stable, and has undergone limited testing. There may still be notable bugs 14 | that have not been found yet. 15 | 16 | ## Differences with the C++ implementation 17 | 18 | * (Almost) 100% pure **safe** Rust. No unsafe pointer manipulation. 19 | * Type safety. backroll-rs heavily utilizes generics and associated types to 20 | avoid serialization overhead and potentially unsafe type conversions when 21 | saving and loading game state. 22 | * Abstracted transport layer protocols - integrate and use any transport layer 23 | library you need. Comes with a raw UDP socket based implementation. 24 | * Configurable at runtime - Many of the hard-coded constants in GGPO are exposed 25 | as configuration parameters during session initialization. 26 | * Reduced memory usage - Backroll's use of generics potentially shrinks down 27 | the sizes of many data types. 28 | * Vectorized input compression scheme - Backroll utilizes the same XOR + RLE 29 | encoding, but it's written to maximize CPU utilization. 30 | * Multithreaded I/O - All network communications run within an async task pool. 31 | I/O polling is no longer manual, nor blocks your game's execution. 32 | 33 | ## Repository Structure 34 | This repo contains the following crates: 35 | 36 | * backroll - the main Backroll interface, intended to be used as the original 37 | GGPO. 38 | * backroll\_transport - An isolated set of transport layer abstractions. 39 | * backroll\_transport\_udp - A transport layer implementation using raw UDP 40 | sockets. 41 | * backroll\_transport\_steam - A transport layer implementation using the 42 | Steam provided networking utilities. Enables access to the Steam Datagram 43 | Relay service. 44 | * bevy\_backroll - a integration plugin for [bevy](https://bevyengine.org/). 45 | (Complete, untested). 46 | * lib - third-party linking dependencies (i.e. Steam) for easier local 47 | development -------------------------------------------------------------------------------- /backroll/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "backroll" 3 | version = "0.5.0" 4 | edition = "2021" 5 | authors = ["Hourai Teahouse Developers "] 6 | description = "A pure Rust async implementation of GGPO." 7 | repository = "https://github.com/HouraiTeahouse/backroll-rs" 8 | license = "ISC" 9 | 10 | [features] 11 | default = ["bevy"] 12 | bevy = ["bevy_tasks"] 13 | 14 | [dependencies] 15 | backroll_transport = { path = "../backroll_transport", version = "0.2" } 16 | async-channel = "1.6" 17 | bevy_tasks = { version = "0.9", optional = true } 18 | bincode = "1.3" 19 | bytemuck = "1.5" 20 | futures = { version = "0.3", default-features = false, features = ["std", "async-await"] } 21 | futures-timer = "3.0" 22 | parking_lot = "0.12" 23 | rand = "0.8" 24 | serde = { version = "1.0", features = ["derive"] } 25 | thiserror = "1.0" 26 | tracing = "0.1" 27 | varinteger = "1.0" 28 | smallvec = "1.0" 29 | -------------------------------------------------------------------------------- /backroll/README.md: -------------------------------------------------------------------------------- 1 | # backroll-rs 2 | 3 | [![crates.io](https://img.shields.io/crates/v/backroll.svg)](https://crates.io/crates/backroll) 4 | [![Documentation](https://docs.rs/backroll/badge.svg)](https://docs.rs/backroll) 5 | ![License](https://img.shields.io/crates/l/backroll) 6 | [![Discord](https://img.shields.io/discord/151219753434742784.svg?label=&logo=discord&logoColor=ffffff&color=7389D8&labelColor=6A7EC2)](https://discord.gg/VuZhs9V) 7 | 8 | Backroll is a pure Rust implementation of [GGPO](https://www.ggpo.net/)-style 9 | rollback networking. 10 | 11 | ## Development Status 12 | This is still in an early beta stage. At time of writing, the public facing API 13 | is stable, and has undergone limited testing. There may still be notable bugs 14 | that have not been found yet. 15 | 16 | ## Differences with the C++ implementation 17 | 18 | * (Almost) 100% pure **safe** Rust. No unsafe pointer manipulation. 19 | * Type safety. backroll-rs heavily utilizes generics and associated types to 20 | avoid serialization overhead and potentially unsafe type conversions when 21 | saving and loading game state. 22 | * Abstracted transport layer protocols - integrate and use any transport layer 23 | library you need. Comes with a raw UDP socket based implementation. 24 | * Configurable at runtime - Many of the hard-coded constants in GGPO are exposed 25 | as configuration parameters during session initialization. 26 | * Reduced memory usage - Backroll's use of generics potentially shrinks down 27 | the sizes of many data types. 28 | * Vectorized input compression scheme - Backroll utilizes the same XOR + RLE 29 | encoding, but it's written to maximize CPU utilization. 30 | * Multithreaded I/O - All network communications run within an async task pool. 31 | I/O polling is no longer manual, nor blocks your game's execution. -------------------------------------------------------------------------------- /backroll/src/backend/mod.rs: -------------------------------------------------------------------------------- 1 | use super::{BackrollError, BackrollResult, Player, PlayerHandle}; 2 | 3 | mod p2p; 4 | 5 | pub use p2p::{P2PSession, P2PSessionBuilder}; 6 | -------------------------------------------------------------------------------- /backroll/src/backend/p2p.rs: -------------------------------------------------------------------------------- 1 | use super::{BackrollError, BackrollResult, Player, PlayerHandle}; 2 | use crate::{ 3 | command::{Command, Commands}, 4 | input::FrameInput, 5 | is_null, 6 | protocol::{ConnectionStatus, Event as ProtocolEvent, Peer, PeerConfig}, 7 | sync::{self, Sync}, 8 | transport::Peer as TransportPeer, 9 | Config, Event, Frame, NetworkStats, MAX_PLAYERS, 10 | }; 11 | use async_channel::TryRecvError; 12 | use parking_lot::RwLock; 13 | use std::sync::Arc; 14 | use std::time::Duration; 15 | use tracing::debug; 16 | 17 | const RECOMMENDATION_INTERVAL: Frame = 240; 18 | const DEFAULT_FRAME_DELAY: Frame = 3; 19 | const DEFAULT_DISCONNECT_TIMEOUT: Duration = Duration::from_millis(5000); 20 | const DEFAULT_DISCONNECT_NOTIFY_START: Duration = Duration::from_millis(750); 21 | 22 | enum PlayerType 23 | where 24 | T: Config, 25 | { 26 | Local, 27 | Remote { 28 | peer: Box>, 29 | rx: async_channel::Receiver>, 30 | }, 31 | } 32 | 33 | impl Clone for PlayerType { 34 | fn clone(&self) -> Self { 35 | match self { 36 | Self::Local => Self::Local, 37 | Self::Remote { peer, rx } => Self::Remote { 38 | peer: peer.clone(), 39 | rx: rx.clone(), 40 | }, 41 | } 42 | } 43 | } 44 | 45 | impl PlayerType { 46 | pub fn new( 47 | queue: usize, 48 | player: &Player, 49 | builder: &P2PSessionBuilder, 50 | connect: Arc<[RwLock]>, 51 | ) -> Self { 52 | match player { 53 | Player::Local => Self::Local, 54 | Player::Remote(peer) => { 55 | let (peer, rx) = Self::make_peer(queue, peer, builder, connect); 56 | PlayerType::::Remote { 57 | peer: Box::new(peer), 58 | rx, 59 | } 60 | } 61 | } 62 | } 63 | 64 | fn make_peer( 65 | queue: usize, 66 | peer: &TransportPeer, 67 | builder: &P2PSessionBuilder, 68 | connect: Arc<[RwLock]>, 69 | ) -> (Peer, async_channel::Receiver>) { 70 | let config = PeerConfig { 71 | peer: peer.clone(), 72 | disconnect_timeout: builder.disconnect_timeout, 73 | disconnect_notify_start: builder.disconnect_notify_start, 74 | }; 75 | 76 | Peer::::new(queue, config, connect) 77 | } 78 | 79 | pub fn peer(&self) -> Option<&Peer> { 80 | match self { 81 | Self::Local => None, 82 | Self::Remote { ref peer, .. } => Some(peer), 83 | } 84 | } 85 | 86 | pub fn is_local(&self) -> bool { 87 | self.peer().is_none() 88 | } 89 | 90 | pub fn is_remote_player(&self) -> bool { 91 | matches!(self, Self::Remote { .. }) 92 | } 93 | 94 | pub fn is_synchronized(&self) -> bool { 95 | if let Some(peer) = self.peer() { 96 | peer.is_running() 97 | } else { 98 | true 99 | } 100 | } 101 | 102 | pub fn send_input(&mut self, input: FrameInput) { 103 | if let Some(peer) = self.peer() { 104 | let _ = peer.send_input(input); 105 | } 106 | } 107 | 108 | pub fn disconnect(&mut self) { 109 | if let Some(peer) = self.peer() { 110 | peer.disconnect(); 111 | } 112 | } 113 | 114 | pub fn get_network_stats(&self) -> Option { 115 | self.peer().map(|peer| peer.get_network_stats()) 116 | } 117 | } 118 | 119 | /// A builder for [P2PSession]. 120 | /// 121 | /// [P2PSession]: self::P2PSession 122 | pub struct P2PSessionBuilder 123 | where 124 | T: Config, 125 | { 126 | players: Vec, 127 | frame_delay: Frame, 128 | disconnect_timeout: Duration, 129 | disconnect_notify_start: Duration, 130 | marker_: std::marker::PhantomData, 131 | } 132 | 133 | impl Default for P2PSessionBuilder 134 | where 135 | T: Config, 136 | { 137 | fn default() -> Self { 138 | Self::new() 139 | } 140 | } 141 | 142 | impl P2PSessionBuilder 143 | where 144 | T: Config, 145 | { 146 | /// Creates a new builder. Identical to [P2PSession::build]. 147 | /// 148 | /// [P2PSession]: self::P2PSession 149 | pub fn new() -> Self { 150 | Self { 151 | players: Vec::new(), 152 | frame_delay: DEFAULT_FRAME_DELAY, 153 | disconnect_timeout: DEFAULT_DISCONNECT_TIMEOUT, 154 | disconnect_notify_start: DEFAULT_DISCONNECT_NOTIFY_START, 155 | marker_: Default::default(), 156 | } 157 | } 158 | 159 | /// Sets how much frame delay is used for all active players. 160 | /// Defaults to 3 frames. 161 | pub fn with_frame_delay(mut self, frame_delay: Frame) -> Self { 162 | self.frame_delay = frame_delay; 163 | self 164 | } 165 | 166 | /// Sets how long the client will wait for a packet from a remote player 167 | /// before considering the connection disconnected. Defaults to 5000ms. 168 | pub fn with_disconnect_timeout(mut self, timeout: Duration) -> Self { 169 | self.disconnect_timeout = timeout; 170 | self 171 | } 172 | 173 | /// Sets how long the client will wait for a packet from a remote player before 174 | /// before firing a [Event::ConnectionInterrupted] event. Defaults to 750ms. 175 | /// 176 | /// [Event]: crate::Event 177 | pub fn with_disconnect_notify_start(mut self, timeout: Duration) -> Self { 178 | self.disconnect_timeout = timeout; 179 | self 180 | } 181 | 182 | /// Adds a player to the session and returns the corresponding handle. 183 | pub fn add_player(&mut self, player: Player) -> PlayerHandle { 184 | let id = self.players.len(); 185 | self.players.push(player); 186 | PlayerHandle(id) 187 | } 188 | 189 | /// Constructs and starts the P2PSession. Consumes the builder. 190 | /// 191 | /// # Errors 192 | /// Returns [BackrollError::MultipleLocalPlayers] if there are multiple local players. 193 | /// Backroll currently only supports one local player. 194 | /// 195 | /// [BackrolLError]: crate::BackrolLError 196 | pub fn start(self) -> BackrollResult> { 197 | P2PSession::new_internal(self) 198 | } 199 | } 200 | 201 | struct P2PSessionRef 202 | where 203 | T: Config, 204 | { 205 | sync: Sync, 206 | players: Vec>, 207 | 208 | synchronizing: bool, 209 | next_recommended_sleep: Frame, 210 | 211 | local_connect_status: Arc<[RwLock]>, 212 | } 213 | 214 | impl P2PSessionRef { 215 | fn players(&self) -> impl Iterator> { 216 | self.players 217 | .iter() 218 | .filter(|player| player.is_remote_player()) 219 | .filter_map(|player| player.peer()) 220 | } 221 | 222 | fn player_handle_to_queue(&self, player: PlayerHandle) -> BackrollResult { 223 | let offset = player.0; 224 | if offset >= self.sync.player_count() { 225 | return Err(BackrollError::InvalidPlayer(player)); 226 | } 227 | Ok(offset) 228 | } 229 | 230 | fn check_initial_sync(&mut self, commands: &mut Commands) { 231 | if self.synchronizing && self.is_synchronized() { 232 | commands.push(Command::Event(Event::Running)); 233 | self.synchronizing = false; 234 | } 235 | } 236 | 237 | fn disconnect_player( 238 | &mut self, 239 | commands: &mut Commands, 240 | player: PlayerHandle, 241 | ) -> BackrollResult<()> { 242 | let queue = self.player_handle_to_queue(player)?; 243 | let (last_frame, disconnected) = { 244 | let status = self.local_connect_status[queue].read(); 245 | (status.last_frame, status.disconnected) 246 | }; 247 | 248 | if disconnected { 249 | return Err(BackrollError::PlayerDisconnected(player)); 250 | } 251 | 252 | if self.players[queue].is_local() { 253 | // The player is local. This should disconnect the local player from the rest 254 | // of the game. All other players need to be disconnected. 255 | // that if the endpoint is not initalized, this must be the local player. 256 | let current_frame = self.sync.frame_count(); 257 | debug!( 258 | "Disconnecting local player {} at frame {} by user request.", 259 | queue, last_frame 260 | ); 261 | for i in 0..self.players.len() { 262 | if !self.players[i].is_local() { 263 | self.disconnect_player_queue(commands, i, current_frame); 264 | } 265 | } 266 | } else { 267 | debug!( 268 | "Disconnecting queue {} at frame {} by user request.", 269 | queue, last_frame 270 | ); 271 | self.disconnect_player_queue(commands, queue, last_frame); 272 | } 273 | Ok(()) 274 | } 275 | 276 | fn disconnect_player_queue(&mut self, commands: &mut Commands, queue: usize, syncto: Frame) { 277 | let frame_count = self.sync.frame_count(); 278 | 279 | self.players[queue].disconnect(); 280 | 281 | debug!("Changing queue {} local connect status for last frame from {} to {} on disconnect request (current: {}).", 282 | queue, self.local_connect_status[queue].read().last_frame, syncto, frame_count); 283 | 284 | { 285 | let mut status = self.local_connect_status[queue].write(); 286 | status.disconnected = true; 287 | status.last_frame = syncto; 288 | } 289 | 290 | if syncto < frame_count { 291 | debug!( 292 | "Adjusting simulation to account for the fact that {} disconnected @ {}.", 293 | queue, syncto 294 | ); 295 | self.sync.adjust_simulation(commands, syncto); 296 | debug!("Finished adjusting simulation."); 297 | } 298 | 299 | commands.push(Command::Event(Event::Disconnected(PlayerHandle(queue)))); 300 | 301 | self.check_initial_sync(commands); 302 | } 303 | 304 | fn flush_events(&mut self, commands: &mut Commands) { 305 | for (queue, player) in self.players.clone().iter().enumerate() { 306 | if let PlayerType::::Remote { rx, .. } = player { 307 | self.flush_peer_events(commands, queue, rx.clone()); 308 | } 309 | } 310 | } 311 | 312 | fn flush_peer_events( 313 | &mut self, 314 | commands: &mut Commands, 315 | queue: usize, 316 | rx: async_channel::Receiver>, 317 | ) { 318 | loop { 319 | match rx.try_recv() { 320 | Ok(evt) => self.handle_event(commands, queue, evt), 321 | Err(TryRecvError::Empty) => break, 322 | Err(TryRecvError::Closed) => { 323 | self.disconnect_player(commands, PlayerHandle(queue)) 324 | .expect("Disconnecting should not error on closing connection"); 325 | break; 326 | } 327 | } 328 | } 329 | } 330 | 331 | fn handle_event( 332 | &mut self, 333 | commands: &mut Commands, 334 | queue: usize, 335 | evt: ProtocolEvent, 336 | ) { 337 | let player = PlayerHandle(queue); 338 | match evt { 339 | ProtocolEvent::::Connected => { 340 | commands.push(Command::Event(Event::Connected(PlayerHandle(queue)))); 341 | } 342 | ProtocolEvent::::Synchronizing { total, count } => { 343 | commands.push(Command::Event(Event::Synchronizing { 344 | player, 345 | total, 346 | count, 347 | })); 348 | } 349 | ProtocolEvent::::Inputs(inputs) => { 350 | let mut status = self.local_connect_status[queue].write(); 351 | if status.disconnected { 352 | return; 353 | } 354 | 355 | for input in inputs { 356 | let current_remote_frame = status.last_frame; 357 | let new_remote_frame = input.frame; 358 | debug_assert!( 359 | crate::is_null(current_remote_frame) 360 | || new_remote_frame == (current_remote_frame + 1) 361 | ); 362 | self.sync.add_remote_input(queue, input); 363 | 364 | // Notify the other endpoints which frame we received from a peer 365 | debug!( 366 | "setting remote connect status for queue {} to {}", 367 | queue, new_remote_frame 368 | ); 369 | 370 | status.last_frame = new_remote_frame; 371 | } 372 | } 373 | ProtocolEvent::::Synchronized => { 374 | commands.push(Command::Event(Event::Synchronized(player))); 375 | self.check_initial_sync(commands); 376 | } 377 | ProtocolEvent::::NetworkInterrupted { disconnect_timeout } => { 378 | commands.push(Command::Event(Event::ConnectionInterrupted { 379 | player, 380 | disconnect_timeout, 381 | })); 382 | } 383 | ProtocolEvent::::NetworkResumed => { 384 | commands.push(Command::Event(Event::Synchronized(player))); 385 | } 386 | } 387 | } 388 | 389 | fn do_poll(&mut self, commands: &mut Commands) { 390 | if self.sync.in_rollback() { 391 | return; 392 | } 393 | 394 | self.flush_events(commands); 395 | 396 | if self.synchronizing { 397 | return; 398 | } 399 | 400 | self.sync.check_simulation(commands); 401 | 402 | // notify all of our endpoints of their local frame number for their 403 | // next connection quality report 404 | let current_frame = self.sync.frame_count(); 405 | for player in self.players() { 406 | player.set_local_frame_number(current_frame); 407 | } 408 | 409 | let remote_player_count = self 410 | .players 411 | .iter() 412 | .filter(|player| !player.is_local()) 413 | .count(); 414 | 415 | let min_frame = if remote_player_count == 0 { 416 | current_frame 417 | } else if self.players().count() <= 2 { 418 | self.poll_2_players(commands) 419 | } else { 420 | self.poll_n_players(commands) 421 | }; 422 | 423 | debug!("last confirmed frame in p2p backend is {}.", min_frame); 424 | if min_frame >= 0 { 425 | debug_assert!(min_frame != Frame::MAX); 426 | debug!("setting confirmed frame in sync to {}.", min_frame); 427 | self.sync.set_last_confirmed_frame(min_frame); 428 | } 429 | 430 | // send timesync notifications if now is the proper time 431 | if current_frame > self.next_recommended_sleep { 432 | let interval = self 433 | .players() 434 | .map(|player| player.recommend_frame_delay()) 435 | .max(); 436 | if let Some(interval) = interval { 437 | commands.push(Command::Event(Event::TimeSync { 438 | frames_ahead: interval as u8, 439 | })); 440 | self.next_recommended_sleep = current_frame + RECOMMENDATION_INTERVAL; 441 | } 442 | } 443 | } 444 | 445 | fn poll_2_players(&mut self, commands: &mut Commands) -> Frame { 446 | // discard confirmed frames as appropriate 447 | let mut min_frame = Frame::MAX; 448 | for i in 0..self.players.len() { 449 | let player = &self.players[i]; 450 | let mut queue_connected = true; 451 | if let Some(peer) = player.peer() { 452 | if peer.is_running() { 453 | queue_connected = !peer.get_peer_connect_status(i).disconnected; 454 | } 455 | } 456 | let local_status = self.local_connect_status[i].read().clone(); 457 | if !local_status.disconnected { 458 | min_frame = std::cmp::min(local_status.last_frame, min_frame); 459 | } 460 | debug!( 461 | "local endp: connected = {}, last_received = {}, total_min_confirmed = {}.", 462 | !local_status.disconnected, local_status.last_frame, min_frame 463 | ); 464 | if !queue_connected && !local_status.disconnected { 465 | debug!("disconnecting player {} by remote request.", i); 466 | self.disconnect_player_queue(commands, i, min_frame); 467 | } 468 | debug!("min_frame = {}.", min_frame); 469 | } 470 | min_frame 471 | } 472 | 473 | fn poll_n_players(&mut self, commands: &mut Commands) -> Frame { 474 | // discard confirmed frames as appropriate 475 | let mut min_frame = Frame::MAX; 476 | for queue in 0..self.players.len() { 477 | let mut queue_connected = true; 478 | let mut queue_min_confirmed = Frame::MAX; 479 | debug!("considering queue {}.", queue); 480 | for (i, player) in self.players.iter().enumerate() { 481 | // we're going to do a lot of logic here in consideration of endpoint i. 482 | // keep accumulating the minimum confirmed point for all n*n packets and 483 | // throw away the rest. 484 | if player.peer().map(|peer| peer.is_running()).unwrap_or(false) { 485 | let peer = player.peer().unwrap(); 486 | let status = peer.get_peer_connect_status(queue); 487 | queue_connected = queue_connected && !status.disconnected; 488 | queue_min_confirmed = std::cmp::min(status.last_frame, queue_min_confirmed); 489 | debug!("endpoint {}: connected = {}, last_received = {}, queue_min_confirmed = {}.", 490 | i, queue_connected, status.last_frame, queue_min_confirmed); 491 | } else { 492 | debug!("endpoint {}: ignoring... not running.", i); 493 | } 494 | } 495 | 496 | let local_status = self.local_connect_status[queue].read().clone(); 497 | // merge in our local status only if we're still connected! 498 | if !local_status.disconnected { 499 | queue_min_confirmed = std::cmp::min(local_status.last_frame, queue_min_confirmed); 500 | } 501 | debug!( 502 | "local endp: connected = {}, last_received = {}, queue_min_confirmed = {}.", 503 | !local_status.disconnected, local_status.last_frame, queue_min_confirmed 504 | ); 505 | 506 | if queue_connected { 507 | min_frame = std::cmp::min(queue_min_confirmed, min_frame); 508 | } else { 509 | // check to see if this disconnect notification is further back than we've been before. If 510 | // so, we need to re-adjust. This can happen when we detect our own disconnect at frame n 511 | // and later receive a disconnect notification for frame n-1. 512 | if !local_status.disconnected || local_status.last_frame > queue_min_confirmed { 513 | debug!("disconnecting queue {} by remote request.", queue); 514 | self.disconnect_player_queue(commands, queue, queue_min_confirmed); 515 | } 516 | } 517 | debug!("min_frame = {}.", min_frame); 518 | } 519 | min_frame 520 | } 521 | 522 | fn is_synchronized(&self) -> bool { 523 | // Check to see if everyone is now synchronized. If so, 524 | // go ahead and tell the client that we're ok to accept input. 525 | for (i, player) in self.players.iter().enumerate() { 526 | if !player.is_local() 527 | && !player.is_synchronized() 528 | && !self.local_connect_status[i].read().disconnected 529 | { 530 | return false; 531 | } 532 | } 533 | true 534 | } 535 | } 536 | 537 | /// The main peer-to-peer Backroll session. 538 | /// 539 | /// This type internally wraps an Arc>, so it is safe to 540 | /// send and access across threads, and is cheap to clone. 541 | pub struct P2PSession(Arc>>) 542 | where 543 | T: Config; 544 | 545 | impl Clone for P2PSession { 546 | fn clone(&self) -> Self { 547 | Self(self.0.clone()) 548 | } 549 | } 550 | 551 | impl P2PSession { 552 | pub fn build() -> P2PSessionBuilder { 553 | P2PSessionBuilder::new() 554 | } 555 | 556 | fn new_internal(builder: P2PSessionBuilder) -> BackrollResult { 557 | let local_player_count = builder 558 | .players 559 | .iter() 560 | .filter(|player| player.is_local()) 561 | .count(); 562 | let remote_player_count = builder.players.len() - local_player_count; 563 | 564 | if local_player_count > 1 && remote_player_count > 1 { 565 | return Err(BackrollError::MultipleLocalPlayers); 566 | } 567 | let player_count = builder.players.len(); 568 | let connect_status: Vec> = 569 | (0..player_count).map(|_| Default::default()).collect(); 570 | let connect_status: Arc<[RwLock]> = connect_status.into(); 571 | 572 | let players: Vec> = builder 573 | .players 574 | .iter() 575 | .enumerate() 576 | .map(|(i, player)| PlayerType::::new(i, player, &builder, connect_status.clone())) 577 | .collect(); 578 | 579 | let synchronizing = players.iter().any(|player| !player.is_local()); 580 | let config = sync::PlayerConfig { 581 | player_count, 582 | frame_delay: builder.frame_delay, 583 | }; 584 | let sync = Sync::::new(config, connect_status.clone()); 585 | Ok(Self(Arc::new(RwLock::new(P2PSessionRef:: { 586 | sync, 587 | players, 588 | synchronizing, 589 | next_recommended_sleep: 0, 590 | local_connect_status: connect_status, 591 | })))) 592 | } 593 | 594 | /// Gets the number of players in the current session. This includes 595 | /// users that are already disconnected. 596 | pub fn player_count(&self) -> usize { 597 | self.0.read().sync.player_count() 598 | } 599 | 600 | /// Checks if the session currently in the middle of a rollback. 601 | pub fn in_rollback(&self) -> bool { 602 | self.0.read().sync.in_rollback() 603 | } 604 | 605 | /// Gets the current frame of the game. 606 | pub fn current_frame(&self) -> Frame { 607 | self.0.read().sync.frame_count() 608 | } 609 | 610 | pub fn local_players(&self) -> smallvec::SmallVec<[PlayerHandle; MAX_PLAYERS]> { 611 | self.0 612 | .read() 613 | .players 614 | .iter() 615 | .enumerate() 616 | .filter(|(_, player)| player.is_local()) 617 | .map(|(i, _)| PlayerHandle(i)) 618 | .collect() 619 | } 620 | 621 | pub fn remote_players(&self) -> smallvec::SmallVec<[PlayerHandle; MAX_PLAYERS]> { 622 | self.0 623 | .read() 624 | .players 625 | .iter() 626 | .enumerate() 627 | .filter(|(_, player)| player.is_remote_player()) 628 | .map(|(i, _)| PlayerHandle(i)) 629 | .collect() 630 | } 631 | 632 | /// Checks if all remote players are synchronized. If all players are 633 | /// local, this will always return true. 634 | pub fn is_synchronized(&self) -> bool { 635 | self.0.read().is_synchronized() 636 | } 637 | 638 | /// Adds a local input for the current frame. This will register the input in the local 639 | /// input queues, as well as queue the input to be sent to all remote players. If called multiple 640 | /// times for the same player without advancing the session with [advance_frame], the previously 641 | /// queued input for the frame will be overwritten. 642 | /// 643 | /// For a corrrect simulation, this must be called on all local players every frame before calling 644 | /// [advance_frame]. 645 | /// 646 | /// # Errors 647 | /// Returns [BackrollError::InRollback] if the session is currently in the middle of a rollback. 648 | /// 649 | /// Returns [BackrollError::NotSynchronized] if the all of the remote peers have not yet 650 | /// synchornized. 651 | /// 652 | /// Returns [BackrollError::InvalidPlayer] if the provided player handle does not point a vali 653 | /// player. 654 | /// 655 | /// # Panics 656 | /// This function will panic if the player is not a local player. 657 | /// 658 | /// [BackrollError]: crate::BackrollError 659 | /// [advance_frame]: self::P2PSession::advance_frame 660 | pub fn add_local_input(&self, player: PlayerHandle, input: T::Input) -> BackrollResult<()> { 661 | let mut session_ref = self.0.write(); 662 | if session_ref.sync.in_rollback() { 663 | return Err(BackrollError::InRollback); 664 | } 665 | if session_ref.synchronizing { 666 | return Err(BackrollError::NotSynchronized); 667 | } 668 | 669 | let queue = session_ref.player_handle_to_queue(player)?; 670 | assert!( 671 | session_ref.players[queue].is_local(), 672 | "{:?} is not a local player!", 673 | player 674 | ); 675 | let frame = session_ref.sync.add_local_input(queue, input)?; 676 | if !is_null(frame) { 677 | // Update the local connect status state to indicate that we've got a 678 | // confirmed local frame for this player. this must come first so it 679 | // gets incorporated into the next packet we send. 680 | 681 | debug!( 682 | "setting local connect status for local queue {} to {}", 683 | queue, frame 684 | ); 685 | session_ref.local_connect_status[queue].write().last_frame = frame; 686 | 687 | for player in session_ref.players.iter_mut() { 688 | player.send_input(FrameInput:: { frame, input }); 689 | } 690 | } 691 | 692 | Ok(()) 693 | } 694 | 695 | /// Advances the game simulation by a single frame. This will issue a [Command::AdvanceFrame] 696 | /// then check if the simulation is consistent with the inputs sent by remote players. If not, a 697 | /// rollback will be triggered, and the game will be resimulated from the point of rollback. 698 | /// 699 | /// For a corrrect simulation, [add_local_input] must be called on all local players every frame before 700 | /// calling this. If any call to [add_local_input] fails, this should not be called. 701 | /// 702 | /// All of the provided commands must be executed in order, and must not be reordered or skipped. 703 | /// 704 | /// [add_local_input]: self::P2PSession::add_local_input 705 | /// [Command]: crate::command::Command 706 | pub fn advance_frame(&self) -> Commands { 707 | let mut session_ref = self.0.write(); 708 | let mut commands = Commands::::default(); 709 | debug!("End of frame ({})...", session_ref.sync.frame_count()); 710 | if !session_ref.synchronizing { 711 | session_ref.sync.increment_frame(&mut commands); 712 | } 713 | session_ref.do_poll(&mut commands); 714 | commands 715 | } 716 | 717 | /// Flushes lower level network events. This should always be called before adding local 718 | /// inputs every frame of the game regardless of if the game is advancing it's state or 719 | /// not. 720 | /// 721 | /// All of the provided commands must be executed in order, and must not be reordered or skipped. 722 | pub fn poll(&self) -> Commands { 723 | let mut session_ref = self.0.write(); 724 | let mut commands = Commands::default(); 725 | session_ref.do_poll(&mut commands); 726 | commands 727 | } 728 | 729 | /// Disconnects a player from the game. 730 | /// 731 | /// If called on a local player, this will disconnect the client from all remote peers. 732 | /// 733 | /// If called on a remote player, this will disconnect the connection with only that player. 734 | /// 735 | /// # Errors 736 | /// Returns [BackrollError::InvalidPlayer] if the provided player handle does not point a vali 737 | /// player. 738 | /// 739 | /// Returns [BackrollError::PlayerDisconnected] if the provided player is already disconnected. 740 | pub fn disconnect_player(&self, player: PlayerHandle) -> BackrollResult> { 741 | let mut session_ref = self.0.write(); 742 | let queue = session_ref.player_handle_to_queue(player)?; 743 | if session_ref.local_connect_status[queue].read().disconnected { 744 | return Err(BackrollError::PlayerDisconnected(player)); 745 | } 746 | 747 | let mut commands = Commands::::default(); 748 | let last_frame = session_ref.local_connect_status[queue].read().last_frame; 749 | if session_ref.players[queue].is_local() { 750 | // The player is local. This should disconnect the local player from the rest 751 | // of the game. All other players need to be disconnected. 752 | // that if the endpoint is not initalized, this must be the local player. 753 | let current_frame = session_ref.sync.frame_count(); 754 | debug!( 755 | "Disconnecting local player {} at frame {} by user request.", 756 | queue, last_frame 757 | ); 758 | for i in 0..session_ref.players.len() { 759 | if !session_ref.players[i].is_local() { 760 | session_ref.disconnect_player_queue(&mut commands, i, current_frame); 761 | } 762 | } 763 | } else { 764 | debug!( 765 | "Disconnecting queue {} at frame {} by user request.", 766 | queue, last_frame 767 | ); 768 | session_ref.disconnect_player_queue(&mut commands, queue, last_frame); 769 | } 770 | Ok(commands) 771 | } 772 | 773 | /// Gets network statistics with a remote player. 774 | /// 775 | /// # Errors 776 | /// Returns [BackrollError::InvalidPlayer] if the provided player handle does not point a vali 777 | /// player. 778 | pub fn get_network_stats(&self, player: PlayerHandle) -> BackrollResult { 779 | let session_ref = self.0.read(); 780 | let queue = session_ref.player_handle_to_queue(player)?; 781 | Ok(session_ref.players[queue] 782 | .get_network_stats() 783 | .unwrap_or_default()) 784 | } 785 | 786 | /// Sets the frame delay for a given player. 787 | /// 788 | /// # Errors 789 | /// Returns [BackrollError::InvalidPlayer] if the provided player handle does not point a vali 790 | /// player. 791 | pub fn set_frame_delay(&self, player: PlayerHandle, delay: Frame) -> BackrollResult<()> { 792 | let mut session_ref = self.0.write(); 793 | let queue = session_ref.player_handle_to_queue(player)?; 794 | session_ref.sync.set_frame_delay(queue, delay); 795 | Ok(()) 796 | } 797 | } 798 | -------------------------------------------------------------------------------- /backroll/src/command.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | input::GameInput, 3 | sync::{SavedCell, SavedFrame}, 4 | Config, Event, Frame, 5 | }; 6 | use std::{ 7 | collections::hash_map::DefaultHasher, 8 | hash::{Hash, Hasher}, 9 | }; 10 | use tracing::{debug, error}; 11 | 12 | /// A singular command for a Backroll session client to execute. 13 | /// 14 | /// Proper execution of the command is not optional, and must be done in the exact 15 | /// order [Commands] returns it in. Filtering or altering the order in which commands 16 | /// are executed, or dropping commands without executing them may result incorrect 17 | /// simulation and/or panics. 18 | /// 19 | /// [Commands]: self::Commands 20 | pub enum Command 21 | where 22 | T: Config, 23 | { 24 | /// The client should copy the entire contents of the current game state into a 25 | /// new state struct and return it. 26 | /// 27 | /// Optionally, the client can compute a 64-bit checksum of the data and return it. 28 | Save(SaveState), 29 | 30 | /// Backroll will issue this command at the beginning of a rollback. The argument 31 | /// provided will be a previously saved state returned from the save_state function. 32 | /// The client should make the current game state match the state contained in the 33 | /// argument. 34 | Load(LoadState), 35 | 36 | /// Clients should advance the game state by exactly one frame. 37 | /// The provided inputs will contain the inputs you should use for the given frame. 38 | AdvanceFrame(GameInput), 39 | 40 | /// Notification that something has happened in the lower level protocols. See the 41 | /// `[Event]` struct for more information. 42 | Event(Event), 43 | } 44 | 45 | /// A command for saving the state of the game. 46 | /// 47 | /// Consumers MUST save before the command is dropped. Failure to do so will 48 | /// result in a panic. 49 | pub struct SaveState { 50 | pub(crate) cell: SavedCell, 51 | pub(crate) frame: Frame, 52 | } 53 | 54 | impl SaveState { 55 | /// Saves a single frame's state to the session's state buffer and uses 56 | /// the hash of the state as the checksum. This uses the 57 | /// [DefaultHasher] implementation. 58 | /// 59 | /// This consumes the SaveState, saving multiple times is not allowed. 60 | /// 61 | /// [DefaultHasher]: std::collections::hash_map::DefaultHasher 62 | pub fn save(self, state: T) 63 | where 64 | T: Hash, 65 | { 66 | let mut hasher = DefaultHasher::new(); 67 | state.hash(&mut hasher); 68 | self.save_with_hash(state, hasher.finish()); 69 | } 70 | 71 | /// Saves a single frame's state to the session's state buffer without 72 | /// a saved checksum. 73 | /// 74 | /// This consumes the SaveState, saving multiple times is not allowed. 75 | pub fn save_without_hash(self, state: T) { 76 | self.save_state(state, None); 77 | } 78 | 79 | /// Saves a single frame's state to the session's state buffer with a 80 | /// provided checksum. 81 | /// 82 | /// This consumes the SaveState, saving multiple times is not allowed. 83 | pub fn save_with_hash(self, state: T, checksum: u64) { 84 | self.save_state(state, Some(checksum)); 85 | } 86 | 87 | fn save_state(self, state: T, checksum: Option) { 88 | debug!( 89 | "=== Saved frame state {} (checksum: {:08x}).", 90 | self.frame, 91 | checksum.unwrap_or(0) 92 | ); 93 | self.cell.save(SavedFrame:: { 94 | frame: self.frame, 95 | data: Some(Box::new(state)), 96 | checksum, 97 | }); 98 | assert!(self.cell.is_valid()); 99 | } 100 | } 101 | 102 | impl Drop for SaveState { 103 | fn drop(&mut self) { 104 | if !self.cell.is_valid() { 105 | error!("A SaveState command was dropped without saving a valid state."); 106 | } 107 | } 108 | } 109 | 110 | /// A command for loading a saved state of the game. 111 | pub struct LoadState { 112 | pub(crate) cell: SavedCell, 113 | } 114 | 115 | impl LoadState { 116 | /// Loads the saved state of the game. 117 | /// 118 | /// This will clone the internal copy ofthe save state. For games with 119 | /// potentially large save state, this might be expensive. 120 | /// 121 | /// Note this consumes the LoadState, loading multiple times is 122 | /// not allowed. 123 | pub fn load(self) -> T { 124 | self.cell.load() 125 | } 126 | } 127 | 128 | /// An ordered container of commands for clients to execute. 129 | /// 130 | /// Proper execution of the command is not optional, and must be done in the exact 131 | /// order they are returned in. Filtering or altering the order in which commands 132 | /// are executed, or dropping commands without executing them may result incorrect 133 | /// simulation and/or panics. 134 | pub struct Commands 135 | where 136 | T: Config, 137 | { 138 | commands: Vec>, 139 | } 140 | 141 | impl Commands { 142 | pub(crate) fn push(&mut self, command: Command) { 143 | self.commands.push(command); 144 | } 145 | } 146 | 147 | impl Default for Commands { 148 | fn default() -> Self { 149 | Self { 150 | commands: Vec::new(), 151 | } 152 | } 153 | } 154 | 155 | impl IntoIterator for Commands { 156 | type Item = Command; 157 | type IntoIter = std::vec::IntoIter>; 158 | fn into_iter(self) -> Self::IntoIter { 159 | self.commands.into_iter() 160 | } 161 | } 162 | -------------------------------------------------------------------------------- /backroll/src/input.rs: -------------------------------------------------------------------------------- 1 | use crate::{BackrollError, Frame, PlayerHandle, MAX_PLAYERS, MAX_ROLLBACK_FRAMES}; 2 | use std::convert::TryFrom; 3 | use std::mem::MaybeUninit; 4 | use tracing::debug; 5 | 6 | #[inline] 7 | fn previous_frame(offset: usize) -> usize { 8 | if offset == 0 { 9 | MAX_ROLLBACK_FRAMES - 1 10 | } else { 11 | offset - 1 12 | } 13 | } 14 | 15 | #[derive(Clone, Debug, PartialEq, Eq)] 16 | pub struct FrameInput { 17 | pub frame: Frame, 18 | pub input: T, 19 | } 20 | 21 | impl Default for FrameInput { 22 | fn default() -> Self { 23 | Self { 24 | frame: super::NULL_FRAME, 25 | input: T::zeroed(), 26 | } 27 | } 28 | } 29 | 30 | impl FrameInput { 31 | pub fn clear(&mut self) { 32 | self.input = T::zeroed(); 33 | } 34 | } 35 | 36 | #[derive(Clone, Debug, PartialEq, Eq)] 37 | /// A container of inputs for all of the players for single frame of a game. 38 | pub struct GameInput { 39 | /// The frame number the inputs correspond to. 40 | pub frame: Frame, 41 | pub(crate) disconnected: u8, 42 | pub(crate) inputs: [T; MAX_PLAYERS], 43 | } 44 | 45 | impl Default for GameInput { 46 | fn default() -> Self { 47 | Self { 48 | frame: super::NULL_FRAME, 49 | disconnected: 0, 50 | inputs: unsafe { core::mem::zeroed() }, 51 | } 52 | } 53 | } 54 | 55 | impl GameInput { 56 | /// Gets the input for a specific player. Returns [InvalidPlayer] 57 | /// if the provided player handle does not correspond to a valid player. 58 | /// 59 | /// [InvalidPlayer]: crate::BackrollError::InvalidPlayer 60 | pub fn get(&self, player: PlayerHandle) -> Result<&T, BackrollError> { 61 | if player.0 >= MAX_PLAYERS { 62 | return Err(BackrollError::InvalidPlayer(player)); 63 | } 64 | Ok(&self.inputs[player.0]) 65 | } 66 | 67 | /// Checks if a given player is currently disconnected. Returns [InvalidPlayer] 68 | /// if the provided player handle does not correspond to a valid player. 69 | /// 70 | /// [InvalidPlayer]: crate::BackrollError::InvalidPlayer 71 | pub fn is_disconnected(&self, player: PlayerHandle) -> Result { 72 | if player.0 >= MAX_PLAYERS { 73 | return Err(BackrollError::InvalidPlayer(player)); 74 | } 75 | Ok(self.disconnected & (1 << player.0) != 0) 76 | } 77 | } 78 | 79 | pub enum FetchedInput { 80 | Normal(FrameInput), 81 | Prediction(FrameInput), 82 | } 83 | 84 | impl FetchedInput { 85 | pub fn unwrap(self) -> FrameInput { 86 | match self { 87 | Self::Normal(input) => input, 88 | Self::Prediction(input) => input, 89 | } 90 | } 91 | } 92 | 93 | pub struct InputQueue { 94 | head: usize, 95 | tail: usize, 96 | length: usize, 97 | first_frame: bool, 98 | 99 | last_user_added_frame: Frame, 100 | last_added_frame: Frame, 101 | first_incorrect_frame: Frame, 102 | last_frame_requested: Frame, 103 | 104 | frame_delay: Frame, 105 | 106 | inputs: [FrameInput; MAX_ROLLBACK_FRAMES], 107 | prediction: FrameInput, 108 | } 109 | 110 | impl InputQueue { 111 | #[allow(clippy::uninit_assumed_init)] 112 | pub fn new(frame_delay: Frame) -> Self { 113 | // This is necessary as Default is not defined on arrays of more 114 | // than 32 without a Copy trait bound. 115 | // 116 | // SAFE: The entire buffer is initialized by the end of the for-loop. 117 | // Assuming Zeroable is implemented correctly, this should also never 118 | // panic, so a buffer will always correctly be allocated as a large 119 | // zeroed buffer. 120 | let inputs: [FrameInput; MAX_ROLLBACK_FRAMES] = unsafe { 121 | let mut inputs: [MaybeUninit>; MAX_ROLLBACK_FRAMES] = 122 | MaybeUninit::<[MaybeUninit>; MAX_ROLLBACK_FRAMES]>::uninit() 123 | .assume_init(); 124 | for input in inputs.iter_mut() { 125 | input.write(Default::default()); 126 | } 127 | inputs.map(|input| input.assume_init()) 128 | }; 129 | 130 | Self { 131 | head: 0, 132 | tail: 0, 133 | length: 0, 134 | frame_delay, 135 | first_frame: true, 136 | last_user_added_frame: super::NULL_FRAME, 137 | first_incorrect_frame: super::NULL_FRAME, 138 | last_frame_requested: super::NULL_FRAME, 139 | last_added_frame: super::NULL_FRAME, 140 | inputs, 141 | prediction: Default::default(), 142 | } 143 | } 144 | 145 | pub fn first_incorrect_frame(&self) -> Frame { 146 | self.first_incorrect_frame 147 | } 148 | 149 | pub fn set_frame_delay(&mut self, frame_delay: Frame) { 150 | debug_assert!(!super::is_null(frame_delay)); 151 | self.frame_delay = frame_delay; 152 | } 153 | 154 | pub fn discard_confirmed_frames(&mut self, mut frame: Frame) { 155 | debug_assert!(!super::is_null(frame)); 156 | if super::is_null(self.last_frame_requested) { 157 | frame = std::cmp::min(frame, self.last_frame_requested) 158 | } 159 | 160 | debug!( 161 | "discarding confirmed frames up to {} (last_added:{} length:{}).", 162 | frame, self.last_added_frame, self.length 163 | ); 164 | if frame >= self.last_added_frame { 165 | self.tail = self.head; 166 | self.length = 0; 167 | } else { 168 | let offset = frame - self.inputs[self.tail].frame + 1; 169 | let offset = usize::try_from(offset).unwrap(); 170 | 171 | debug!("difference of {} frames.", offset); 172 | 173 | self.tail = (self.tail + offset) % MAX_ROLLBACK_FRAMES; 174 | self.length -= offset; 175 | } 176 | } 177 | 178 | pub fn reset_prediction(&mut self, frame: Frame) { 179 | debug_assert!( 180 | super::is_null(self.first_incorrect_frame) || frame <= self.first_incorrect_frame 181 | ); 182 | 183 | debug!("resetting all prediction errors back to frame {}.", frame); 184 | 185 | // There's nothing really to do other than reset our prediction 186 | // state and the incorrect frame counter... 187 | self.prediction.frame = super::NULL_FRAME; 188 | self.first_incorrect_frame = super::NULL_FRAME; 189 | self.last_frame_requested = super::NULL_FRAME; 190 | } 191 | 192 | pub fn get_input(&mut self, frame: Frame) -> FetchedInput { 193 | debug!("requesting input frame {:?}.", frame); 194 | 195 | // No one should ever try to grab any input when we have a prediction 196 | // error. Doing so means that we're just going further down the wrong 197 | // path. Assert this to verify that it's true. 198 | debug_assert!(super::is_null(self.first_incorrect_frame)); 199 | 200 | // Remember the last requested frame number for later. We'll need 201 | // this in add_input() to drop out of prediction mode. 202 | self.last_frame_requested = frame; 203 | debug_assert!(frame >= self.inputs[self.tail].frame); 204 | 205 | if super::is_null(self.prediction.frame) { 206 | // If the frame requested is in our range, fetch it out of the queue and 207 | // return it. 208 | let offset = frame - self.inputs[self.tail].frame; 209 | let mut offset = usize::try_from(offset).unwrap(); 210 | if offset < self.len() { 211 | offset = (offset + self.tail) % MAX_ROLLBACK_FRAMES; 212 | let input = self.inputs[offset].clone(); 213 | debug_assert!(input.frame == frame); 214 | debug!("returning confirmed frame number {}.", input.frame); 215 | return FetchedInput::Normal(input); 216 | } 217 | 218 | // The requested frame isn't in the queue. Bummer. This means we need 219 | // to return a prediction frame. Predict that the user will do the 220 | // same thing they did last time. 221 | if frame == 0 { 222 | debug!("basing new prediction frame from nothing, you're client wants frame 0."); 223 | self.prediction.clear(); 224 | } else if super::is_null(self.last_added_frame) { 225 | debug!("basing new prediction frame from nothing, since we have no frames yet."); 226 | self.prediction.clear(); 227 | } else { 228 | debug!( 229 | "basing new prediction frame from previously added frame (frame: {}).", 230 | self.inputs[previous_frame(self.head)].frame 231 | ); 232 | self.prediction = self.inputs[previous_frame(self.head)].clone(); 233 | } 234 | self.prediction.frame += 1; 235 | } 236 | 237 | // If we've made it this far, we must be predicting. Go ahead and 238 | // forward the prediction frame contents. Be sure to return the 239 | // frame number requested by the client, though. 240 | let mut prediction = self.prediction.clone(); 241 | prediction.frame = frame; 242 | debug!( 243 | "returning prediction frame number {} ({}).", 244 | frame, self.prediction.frame 245 | ); 246 | FetchedInput::Prediction(prediction) 247 | } 248 | 249 | pub fn add_input(&mut self, input: FrameInput) -> Frame { 250 | // These next two lines simply verify that inputs are passed in 251 | // sequentially by the user, regardless of frame delay. 252 | debug_assert!( 253 | super::is_null(self.last_user_added_frame) 254 | || input.frame == self.last_user_added_frame + 1 255 | ); 256 | self.last_user_added_frame = input.frame; 257 | debug!("adding input frame number {} to queue.", input.frame); 258 | 259 | // Move the queue head to the correct point in preparation to 260 | // input the frame into the queue. 261 | let new_frame = self.advance_queue_head(input.frame); 262 | if !super::is_null(new_frame) { 263 | self.add_delayed_input(new_frame, input); 264 | } 265 | 266 | // Update the frame number for the input. This will also set the 267 | // frame to NULL_FRAME for frames that get dropped (by design). 268 | new_frame 269 | } 270 | 271 | fn add_delayed_input(&mut self, frame: Frame, input: FrameInput) { 272 | debug!("adding delayed input frame number {} to queue.", frame); 273 | debug_assert!(super::is_null(self.last_added_frame) || frame == self.last_added_frame + 1); 274 | debug_assert!(frame == 0 || self.inputs[previous_frame(self.head)].frame == frame - 1); 275 | 276 | // Add the frame to the back of the queue 277 | self.inputs[self.head] = input.clone(); 278 | self.inputs[self.head].frame = frame; 279 | self.head = (self.head + 1) % MAX_ROLLBACK_FRAMES; 280 | self.length += 1; 281 | self.first_frame = false; 282 | self.last_added_frame = frame; 283 | 284 | if !super::is_null(self.prediction.frame) { 285 | debug_assert!(frame == self.prediction.frame); 286 | // We've been predicting... See if the inputs we've gotten match 287 | // what we've been predicting. If so, don't worry about it. If not, 288 | // remember the first input which was incorrect so we can report it 289 | // in first_incorrect_frame() 290 | if super::is_null(self.first_incorrect_frame) && self.prediction != input { 291 | debug!("frame {} does not match prediction. marking error.", frame); 292 | self.first_incorrect_frame = frame; 293 | } 294 | 295 | // If this input is the same frame as the last one requested and we 296 | // still haven't found any mis-predicted inputs, we can dump out 297 | // of predition mode entirely! Otherwise, advance the prediction frame 298 | // count up. 299 | if self.prediction.frame == self.last_frame_requested 300 | && super::is_null(self.first_incorrect_frame) 301 | { 302 | debug!("prediction is correct! dumping out of prediction mode."); 303 | self.prediction.frame = super::NULL_FRAME; 304 | } else { 305 | self.prediction.frame += 1; 306 | } 307 | } 308 | debug_assert!(self.len() <= MAX_ROLLBACK_FRAMES); 309 | } 310 | 311 | fn advance_queue_head(&mut self, mut frame: Frame) -> Frame { 312 | debug!("advancing queue head to frame {}.", frame); 313 | let mut expected_frame = if self.first_frame { 314 | 0 315 | } else { 316 | self.inputs[previous_frame(self.head)].frame + 1 317 | }; 318 | frame += self.frame_delay; 319 | 320 | if expected_frame > frame { 321 | // This can occur when the frame delay has dropped since the last 322 | // time we shoved a frame into the system. In this case, there's 323 | // no room on the queue. Toss it. 324 | debug!( 325 | "Dropping input frame {} (expected next frame to be {}).", 326 | frame, expected_frame 327 | ); 328 | return super::NULL_FRAME; 329 | } 330 | 331 | while expected_frame < frame { 332 | // This can occur when the frame delay has been increased since the last 333 | // time we shoved a frame into the system. We need to replicate the 334 | // last frame in the queue several times in order to fill the space 335 | // left. 336 | debug!( 337 | "Adding padding frame {} to account for change in frame delay.", 338 | expected_frame 339 | ); 340 | self.add_delayed_input( 341 | expected_frame, 342 | self.inputs[previous_frame(self.head)].clone(), 343 | ); 344 | expected_frame += 1; 345 | } 346 | 347 | debug_assert!(frame == 0 || frame == self.inputs[previous_frame(self.head)].frame + 1); 348 | frame 349 | } 350 | 351 | pub fn len(&self) -> usize { 352 | self.length 353 | } 354 | } 355 | -------------------------------------------------------------------------------- /backroll/src/lib.rs: -------------------------------------------------------------------------------- 1 | use std::time::Duration; 2 | use thiserror::Error; 3 | 4 | mod backend; 5 | pub mod command; 6 | mod input; 7 | mod protocol; 8 | mod sync; 9 | mod time_sync; 10 | 11 | pub use backend::*; 12 | pub use backroll_transport as transport; 13 | pub use input::GameInput; 14 | 15 | /// The maximum number of players supported in a single game. 16 | pub const MAX_PLAYERS: usize = 8; 17 | // Approximately 2 seconds of frames. 18 | const MAX_ROLLBACK_FRAMES: usize = 120; 19 | 20 | type Frame = i32; 21 | const NULL_FRAME: Frame = -1; 22 | 23 | fn is_null(frame: Frame) -> bool { 24 | frame < 0 25 | } 26 | 27 | /// A handle for a player in a Backroll session. 28 | #[derive(Copy, Clone, Debug)] 29 | pub struct PlayerHandle(pub usize); 30 | 31 | /// Players within a Backroll session. 32 | #[derive(Clone)] 33 | pub enum Player { 34 | /// The local player. Backroll currently only supports one local player per machine. 35 | Local, 36 | /// A remote player that is not on the local session. 37 | Remote(transport::Peer), 38 | } 39 | 40 | impl Player { 41 | pub(crate) fn is_local(&self) -> bool { 42 | matches!(self, Self::Local) 43 | } 44 | } 45 | 46 | impl Default for Player { 47 | fn default() -> Self { 48 | Self::Local 49 | } 50 | } 51 | 52 | /// Compile time parameterization for Backroll sessions. 53 | pub trait Config: 'static { 54 | /// The input type for a Backroll session. This is the only game-related data 55 | /// transmitted over the network. 56 | /// 57 | /// Reminder: Types implementing [Pod] may not have the same byte representation 58 | /// on platforms with different endianness. Backroll assumes that all players are 59 | /// running with the same endianness when encoding and decoding inputs. It may be 60 | /// worthwhile to ensure that all players are running with the same endianess. 61 | /// 62 | /// [Pod]: bytemuck::Pod 63 | type Input: PartialEq + bytemuck::Pod + bytemuck::Zeroable + Send + Sync; 64 | 65 | /// The save state type for the session. This type must be safe to send across 66 | /// threads and have a 'static lifetime. This type is also responsible for 67 | /// dropping any internal linked state via [Drop]. 68 | /// 69 | /// [Drop]: std::ops::Drop 70 | type State: Clone + Send + Sync + 'static; 71 | } 72 | 73 | #[derive(Clone, Debug, Error)] 74 | pub enum BackrollError { 75 | #[error("Multiple players ")] 76 | MultipleLocalPlayers, 77 | #[error("Action cannot be taken while in rollback.")] 78 | InRollback, 79 | #[error("The session has not been synchronized yet.")] 80 | NotSynchronized, 81 | #[error("The simulation has reached the prediction barrier.")] 82 | ReachedPredictionBarrier, 83 | #[error("Invalid player handle: {:?}", .0)] 84 | InvalidPlayer(PlayerHandle), 85 | #[error("Player already disconnected: {:?}", .0)] 86 | PlayerDisconnected(PlayerHandle), 87 | } 88 | 89 | pub type BackrollResult = Result; 90 | 91 | #[derive(Clone, Debug, Default)] 92 | /// Event that occurs during the course of a session. 93 | pub struct NetworkStats { 94 | /// The round time trip duration between the local player and the 95 | /// remote. 96 | pub ping: Duration, 97 | /// The number of outgoing messages currently not sent. 98 | pub send_queue_len: usize, 99 | /// The number of incoming messages currently not processed. 100 | pub recv_queue_len: usize, 101 | /// The number of kilobytes sent per second, a rolling average. 102 | pub kbps_sent: u32, 103 | 104 | /// The local frame advantage relative to the associated peer. 105 | pub local_frames_behind: Frame, 106 | /// The remote frame advantage of the associated peer relative to the local player. 107 | pub remote_frames_behind: Frame, 108 | } 109 | 110 | #[derive(Clone, Debug)] 111 | /// Event that occurs during the course of a session. 112 | pub enum Event { 113 | /// A initial response packet from the remote player has been recieved. 114 | Connected(PlayerHandle), 115 | /// A response from a remote player has been recieved during the initial 116 | /// synchronization handshake. 117 | Synchronizing { 118 | player: PlayerHandle, 119 | count: u8, 120 | total: u8, 121 | }, 122 | /// The initial synchronization handshake has been completed. The connection 123 | /// is considered live now. 124 | Synchronized(PlayerHandle), 125 | /// All remote peers are now synchronized, the session is can now start 126 | /// running. 127 | Running, 128 | /// The connection with a remote player has been disconnected. 129 | Disconnected(PlayerHandle), 130 | /// The local client is several frames ahead of all other peers. Might need 131 | /// to stall a few frames to allow others to catch up. 132 | TimeSync { frames_ahead: u8 }, 133 | /// The connection with a remote player has been temporarily interrupted. 134 | ConnectionInterrupted { 135 | player: PlayerHandle, 136 | disconnect_timeout: Duration, 137 | }, 138 | /// The connection with a remote player has been resumed after being interrupted. 139 | ConnectionResumed(PlayerHandle), 140 | } 141 | -------------------------------------------------------------------------------- /backroll/src/protocol/bitfield.rs: -------------------------------------------------------------------------------- 1 | use super::compression::DecodeError; 2 | use varinteger as varint; 3 | 4 | /// Encode a bitfield. 5 | pub fn encode(buf: impl AsRef<[u8]>) -> Vec { 6 | let (enc, _) = encode_with_offset(&buf, 0); 7 | enc 8 | } 9 | 10 | /// Encode a bitfield at a specific offset 11 | pub fn encode_with_offset(buf: impl AsRef<[u8]>, offset: usize) -> (Vec, usize) { 12 | let buf = buf.as_ref(); 13 | let mut len = 0u64; 14 | let mut contiguous = false; 15 | let mut prev_bits = 0; 16 | let mut noncontiguous_bits = Vec::new(); 17 | let mut enc = Vec::with_capacity(encode_len_with_offset(buf, offset)); 18 | 19 | for (i, byte) in buf[offset..].iter().enumerate() { 20 | if contiguous && *byte == prev_bits { 21 | len += 1; 22 | continue; 23 | } else if contiguous { 24 | write_contiguous(&mut enc, len, prev_bits); 25 | } 26 | 27 | if *byte == 0 || *byte == 255 { 28 | if !contiguous && i > offset { 29 | write_noncontiguous(&mut enc, &mut noncontiguous_bits); 30 | } 31 | len = 1; 32 | prev_bits = *byte; 33 | contiguous = true; 34 | } else if !contiguous { 35 | noncontiguous_bits.push(*byte); 36 | } else { 37 | contiguous = false; 38 | noncontiguous_bits.push(*byte); 39 | } 40 | } 41 | 42 | if contiguous { 43 | write_contiguous(&mut enc, len, prev_bits); 44 | } else { 45 | write_noncontiguous(&mut enc, &mut noncontiguous_bits); 46 | } 47 | 48 | (enc, buf.len() - offset) 49 | } 50 | 51 | /// Writes a value for contiguous data to the encoded bitfield 52 | fn write_contiguous(enc: &mut Vec, mut len: u64, prev_bits: u8) { 53 | len <<= 2; 54 | len += 1; 55 | if prev_bits == 255 { 56 | len += 2; 57 | } 58 | let mut varint = vec![0u8; varint::length(len)]; 59 | varint::encode(len, &mut varint); 60 | enc.append(&mut varint); 61 | } 62 | 63 | /// Writes a value for noncontiguous data to the encoded bitfield 64 | fn write_noncontiguous(enc: &mut Vec, noncontiguous_bits: &mut Vec) { 65 | let mut len = noncontiguous_bits.len() as u64; 66 | len <<= 1; 67 | let mut varint = vec![0u8; varint::length(len)]; 68 | varint::encode(len, &mut varint); 69 | enc.append(&mut varint); 70 | enc.append(noncontiguous_bits); 71 | } 72 | 73 | /// Returns how many bytes an encoded bitfield will use, starting at a specific offset. 74 | pub fn encode_len_with_offset(buf: impl AsRef<[u8]>, offset: usize) -> usize { 75 | let buf = buf.as_ref(); 76 | let mut len = 0u64; 77 | let mut partial_len = 0u64; 78 | let mut contiguous = false; 79 | let mut prev_bits = 0; 80 | 81 | for (i, byte) in buf[offset..].iter().enumerate() { 82 | if contiguous && *byte == prev_bits { 83 | partial_len += 1; 84 | continue; 85 | } else if contiguous { 86 | len += varint::length(partial_len << 2) as u64; 87 | } 88 | 89 | if *byte == 0 || *byte == 255 { 90 | if !contiguous && i > offset { 91 | len += partial_len; 92 | len += varint::length(partial_len << 1) as u64; 93 | } 94 | partial_len = 1; 95 | prev_bits = *byte; 96 | contiguous = true; 97 | } else if !contiguous { 98 | partial_len += 1; 99 | } else { 100 | partial_len = 1; 101 | contiguous = false; 102 | } 103 | } 104 | 105 | if contiguous { 106 | len += varint::length(partial_len << 2) as u64; 107 | } else { 108 | len += partial_len; 109 | len += varint::length(partial_len << 1) as u64; 110 | } 111 | 112 | len as usize 113 | } 114 | 115 | /// Decode an encoded bitfield. 116 | pub fn decode(buf: impl AsRef<[u8]>) -> Result, DecodeError> { 117 | let (bitfield, _) = decode_with_offset(&buf, 0)?; 118 | Ok(bitfield) 119 | } 120 | 121 | /// Decode an encoded bitfield, starting at a specific offset. 122 | pub fn decode_with_offset( 123 | buf: impl AsRef<[u8]>, 124 | mut offset: usize, 125 | ) -> Result<(Vec, usize), DecodeError> { 126 | let buf = buf.as_ref(); 127 | let mut bitfield = vec![0; decode_len_with_offset(buf, offset)?]; 128 | let mut next = 0u64; 129 | let mut ptr = 0; 130 | 131 | while offset < buf.len() { 132 | offset += varint::decode_with_offset(buf, offset, &mut next); 133 | let repeat = next & 1; 134 | let len = if repeat > 0 { 135 | (next >> 2) as usize 136 | } else { 137 | (next >> 1) as usize 138 | }; 139 | 140 | if repeat > 0 { 141 | if next & 2 > 0 { 142 | for i in 0..len { 143 | bitfield[ptr + i] = 255; 144 | } 145 | } 146 | } else { 147 | bitfield[ptr..(len + ptr)].clone_from_slice(&buf[offset..(len + offset)]); 148 | offset += len; 149 | } 150 | 151 | ptr += len; 152 | } 153 | 154 | Ok((bitfield, buf.len() - offset)) 155 | } 156 | 157 | /// Returns how many bytes a decoded bitfield will use. 158 | pub fn decode_len(buf: impl AsRef<[u8]>) -> Result { 159 | decode_len_with_offset(&buf, 0) 160 | } 161 | 162 | /// Returns how many bytes a decoded bitfield will use, starting at a specific offset. 163 | pub fn decode_len_with_offset( 164 | buf: impl AsRef<[u8]>, 165 | mut offset: usize, 166 | ) -> Result { 167 | let buf = buf.as_ref(); 168 | let mut len = 0; 169 | let mut next = 0u64; 170 | 171 | while offset < buf.len() { 172 | offset += varint::decode_with_offset(buf, offset, &mut next); 173 | let repeat = next & 1; 174 | 175 | let slice = if repeat > 0 { 176 | (next >> 2) as usize 177 | } else { 178 | (next >> 1) as usize 179 | }; 180 | 181 | len += slice; 182 | if repeat == 0 { 183 | offset += slice; 184 | } 185 | } 186 | 187 | if offset > buf.len() { 188 | return Err(DecodeError::InvalidRLEBitfield { 189 | offset, 190 | len: buf.len(), 191 | }); 192 | } 193 | 194 | Ok(len) 195 | } 196 | 197 | #[cfg(test)] 198 | mod test { 199 | use super::*; 200 | 201 | #[test] 202 | fn test_should_encode_decode() { 203 | let mut bits: Vec = vec![0; 16]; 204 | bits[8] = 0b00000001; 205 | 206 | let enc = encode(&bits); 207 | assert_eq!(enc.len(), 4); 208 | 209 | let res = decode(enc).unwrap(); 210 | 211 | assert_eq!(res[8], 0b00000001); 212 | assert_eq!(res, bits); 213 | } 214 | 215 | #[test] 216 | fn test_encode() { 217 | let bitfield = vec![255, 255, 85, 84, 0, 0, 0, 183]; 218 | let enc = encode(&bitfield); 219 | let correct = vec![11, 4, 85, 84, 13, 2, 183]; 220 | assert_eq!(enc.len(), correct.len()); 221 | assert_eq!(enc, correct); 222 | } 223 | 224 | #[test] 225 | fn test_decode_len() { 226 | let enc = [11, 4, 85, 84, 13, 2, 183]; 227 | assert_eq!(8, decode_len(enc).unwrap()); 228 | } 229 | 230 | #[test] 231 | fn test_decode() { 232 | let enc = [11, 4, 85, 84, 13, 2, 183]; 233 | let res = decode(enc).unwrap(); 234 | let correct = vec![255, 255, 85, 84, 0, 0, 0, 183]; 235 | assert_eq!(res, correct); 236 | } 237 | 238 | #[test] 239 | fn test_not_power_of_two() { 240 | let deflated = encode(vec![255, 255, 255, 240]); 241 | let inflated = decode(deflated).unwrap(); 242 | assert_eq!(inflated, vec![255, 255, 255, 240]); 243 | } 244 | 245 | #[test] 246 | /// Differs on NodeJS: node trims final bits when 0 and returns a smaller payload 247 | /// Decoding returns the same result, but encoding the result is smaller 248 | /// Both are interoperable, with the different on the payload size when reading from node. 249 | /// 250 | /// ```js 251 | /// require('bitfield-rle').encode(Buffer.from([])) // => 252 | /// require('bitfield-rle').decode(Buffer.from([])) // => 253 | /// require('bitfield-rle').decode(Buffer.from([0])) // => 254 | /// ``` 255 | fn test_encodes_empty_bitfield() { 256 | assert_eq!(decode(encode(vec![])).unwrap(), vec![]); 257 | assert_eq!(decode(vec![]).unwrap(), vec![]); 258 | assert_eq!(decode(vec![0]).unwrap(), vec![]); 259 | assert_eq!(encode(vec![]), vec![0]); 260 | } 261 | 262 | #[test] 263 | /// Differs on NodeJS: node trims final bits when 0 and returns a smaller payload 264 | /// Decoding returns the same result, but encoding the result is smaller. 265 | /// Both are interoperable, with the different on the payload size when reading from node. 266 | /// 267 | /// ```js 268 | /// var data = require('bitfield-rle').decode(Buffer.from([2, 64, 253, 31])) // => 269 | /// var data = require('bitfield-rle').encode(data) // => 270 | /// var data = require('bitfield-rle').encode(data) // => skipping the last bits 271 | /// ``` 272 | fn test_does_not_trims_remaining_bytes() { 273 | let mut bitfield = vec![0; 1024]; 274 | bitfield[0] = 64; 275 | assert_eq!(encode(&bitfield), vec![2, 64, 253, 31]); 276 | } 277 | } 278 | -------------------------------------------------------------------------------- /backroll/src/protocol/compression.rs: -------------------------------------------------------------------------------- 1 | use super::bitfield; 2 | use bytemuck::Pod; 3 | use thiserror::Error; 4 | 5 | /// The maximum supported size of the raw buffer. 6 | const MAX_BUFFER_SIZE: usize = u16::MAX as usize; 7 | 8 | /// Encodes a set of `[Pod]` values into a byte buffer relative to a reference snapshot. 9 | /// 10 | /// # Security 11 | /// This function fails if the delta encoded output is bigger than `[MAX_BUFFER_SIZE]` to prevent 12 | /// memory exhaustion. 13 | /// 14 | /// [Pod](bytemuck::Pod) 15 | pub fn encode<'a, T: Pod>( 16 | base: &'a T, 17 | data: impl Iterator, 18 | ) -> Result, EncodeError> { 19 | let bytes = delta_encode(base, data)?; 20 | // Bitfield RLE the result 21 | Ok(bitfield::encode(bytes)) 22 | } 23 | 24 | fn delta_encode<'a, T: bytemuck::Pod>( 25 | base: &'a T, 26 | data: impl Iterator, 27 | ) -> Result, EncodeError> { 28 | let mut base = *base; 29 | let bits = bytemuck::bytes_of_mut(&mut base); 30 | let (lower, upper) = data.size_hint(); 31 | let capacity = std::cmp::min(MAX_BUFFER_SIZE, upper.unwrap_or(lower) * bits.len()); 32 | let mut bytes = Vec::with_capacity(capacity); 33 | 34 | // Create buffer of delta encoded bytes via XOR. 35 | for datum in data { 36 | let datum_bytes = bytemuck::bytes_of(datum); 37 | debug_assert!(bits.len() == datum_bytes.len()); 38 | for (b1, b2) in bits.iter_mut().zip(datum_bytes.iter()) { 39 | bytes.push(*b1 ^ *b2); 40 | *b1 = *b2; 41 | } 42 | 43 | if bytes.len() >= MAX_BUFFER_SIZE { 44 | return Err(EncodeError::TooBig { len: bytes.len() }); 45 | } 46 | } 47 | 48 | Ok(bytes) 49 | } 50 | 51 | /// Decodes a set of delta encoded bytes into a buffer of `[Pod]` values relative to a 52 | /// reference snapshot. 53 | /// 54 | /// # Security 55 | /// This function fails if the delta encoded output is bigger than `[MAX_BUFFER_SIZE]` to prevent 56 | /// memory exhaustion. Also fails if the provided bytes cannot be safely converted via 57 | /// `[bytemuck::bytes_of]`. 58 | /// 59 | /// [Pod](bytemuck::Pod) 60 | pub fn decode(base: &T, data: impl AsRef<[u8]>) -> Result, DecodeError> { 61 | let mut base = *base; 62 | let bits = bytemuck::bytes_of_mut(&mut base); 63 | let stride = bits.len(); 64 | debug_assert!(stride > 0); 65 | 66 | let delta_len = bitfield::decode_len(data.as_ref())?; 67 | 68 | // Ensure that the size of the buffer is not too big. 69 | if delta_len > MAX_BUFFER_SIZE { 70 | return Err(DecodeError::TooBig { len: delta_len }); 71 | } 72 | 73 | let delta = bitfield::decode(data)?; 74 | debug_assert!(delta.len() % stride == 0); 75 | let output_size = delta.len() / stride; 76 | let mut output = Vec::with_capacity(output_size); 77 | 78 | for idx in 0..output_size { 79 | for (local_idx, byte) in bits.iter_mut().enumerate() { 80 | *byte ^= delta[idx * stride + local_idx]; 81 | } 82 | output.push(*bytemuck::try_from_bytes::(bits)?) 83 | } 84 | 85 | Ok(output) 86 | } 87 | 88 | #[derive(Error, Debug)] 89 | pub enum EncodeError { 90 | #[error("Input buffer is too big: {}", .len)] 91 | TooBig { len: usize }, 92 | } 93 | 94 | #[derive(Error, Debug)] 95 | pub enum DecodeError { 96 | #[error("Cannot be cast {:?}", .0)] 97 | Cast(bytemuck::PodCastError), 98 | #[error("RLE decode error: offset: {}, len: {}", .offset, .len)] 99 | InvalidRLEBitfield { offset: usize, len: usize }, 100 | #[error("Output buffer is too big: {}", .len)] 101 | TooBig { len: usize }, 102 | } 103 | 104 | impl From for DecodeError { 105 | fn from(value: bytemuck::PodCastError) -> Self { 106 | Self::Cast(value) 107 | } 108 | } 109 | 110 | #[cfg(test)] 111 | mod test { 112 | use super::*; 113 | use bytemuck::{Pod, Zeroable}; 114 | use rand::RngCore; 115 | 116 | #[repr(C)] 117 | #[derive(Debug, Copy, Clone, PartialEq, Eq)] 118 | struct Input { 119 | x: i32, 120 | y: i32, 121 | } 122 | 123 | unsafe impl Pod for Input {} 124 | unsafe impl Zeroable for Input {} 125 | 126 | #[test] 127 | pub fn test_same_input_compresses_down() { 128 | let mut buf: Vec = Vec::new(); 129 | let base = Input { x: 120, y: 120 }; 130 | for _ in 0..100 { 131 | buf.push(Input { x: 420, y: 1337 }); 132 | } 133 | 134 | let encoded = encode(&base, buf.iter()).unwrap(); 135 | let decoded = decode(&base, encoded.iter()).unwrap(); 136 | assert_eq!(encoded, vec![4, 220, 1, 9, 4, 65, 5, 233, 24]); 137 | assert_eq!(decoded, buf); 138 | } 139 | 140 | #[test] 141 | pub fn test_empty_buffer() { 142 | let buf: Vec = Vec::new(); 143 | let base = Input { x: 120, y: 120 }; 144 | 145 | let encoded = encode(&base, buf.iter()).unwrap(); 146 | let decoded = decode(&base, encoded.iter()).unwrap(); 147 | assert_eq!(encoded, vec![0]); 148 | assert_eq!(decoded, buf); 149 | } 150 | 151 | #[test] 152 | pub fn test_random_data() { 153 | let mut rng = rand::thread_rng(); 154 | for _ in 0..100 { 155 | let mut buf: Vec = Vec::new(); 156 | let base = Input { 157 | x: rng.next_u32() as i32, 158 | y: rng.next_u32() as i32, 159 | }; 160 | for _ in 0..100 { 161 | if rng.next_u32() > u32::MAX / 4 { 162 | buf.push(base); 163 | } else { 164 | buf.push(Input { 165 | x: rng.next_u32() as i32, 166 | y: rng.next_u32() as i32, 167 | }); 168 | } 169 | } 170 | 171 | let encoded = encode(&base, buf.iter()).unwrap(); 172 | let decoded = decode(&base, encoded.iter()).unwrap(); 173 | assert!(encoded.len() <= std::mem::size_of::() * buf.len()); 174 | assert_eq!(decoded, buf); 175 | } 176 | } 177 | } 178 | -------------------------------------------------------------------------------- /backroll/src/protocol/event.rs: -------------------------------------------------------------------------------- 1 | use crate::input::FrameInput; 2 | use std::time::Duration; 3 | 4 | pub(crate) enum Event { 5 | Connected, 6 | Synchronizing { total: u8, count: u8 }, 7 | Synchronized, 8 | Inputs(Vec>), 9 | NetworkInterrupted { disconnect_timeout: Duration }, 10 | NetworkResumed, 11 | } 12 | -------------------------------------------------------------------------------- /backroll/src/protocol/input_buffer.rs: -------------------------------------------------------------------------------- 1 | use super::compression; 2 | use crate::{input::FrameInput, Frame}; 3 | use parking_lot::RwLock; 4 | use std::collections::VecDeque; 5 | use std::sync::Arc; 6 | 7 | struct InputEncoderRef 8 | where 9 | T: bytemuck::Zeroable, 10 | { 11 | pending: VecDeque>, 12 | 13 | last_acked: Frame, 14 | last_encoded: Frame, 15 | } 16 | 17 | /// A buffer of all inputs that have not been yet acknowledged by a connected remote peer. 18 | /// 19 | /// This struct wraps an Arc, so it's safe to make clones and pass it around. 20 | #[derive(Clone)] 21 | pub(super) struct InputEncoder(Arc>>) 22 | where 23 | T: bytemuck::Zeroable + bytemuck::Pod; 24 | 25 | impl Default for InputEncoder { 26 | fn default() -> Self { 27 | Self(Arc::new(RwLock::new(InputEncoderRef:: { 28 | pending: VecDeque::new(), 29 | 30 | last_acked: crate::NULL_FRAME, 31 | last_encoded: crate::NULL_FRAME, 32 | }))) 33 | } 34 | } 35 | 36 | impl InputEncoder { 37 | /// Adds an input to as the latest element in the queue. 38 | pub fn push(&self, input: FrameInput) { 39 | self.0.write().pending.push_back(input); 40 | } 41 | 42 | /// Gets the frame of the last input that was encoded via `[encode]`. 43 | pub fn last_encoded_frame(&self) -> Frame { 44 | self.0.read().last_encoded 45 | } 46 | } 47 | 48 | impl InputEncoder { 49 | /// Acknowledges a given frame. All inputs with of a prior frame will be dropped. 50 | /// 51 | /// This will update the reference input that is used to delta-encode. 52 | pub fn acknowledge_frame(&self, ack_frame: Frame) { 53 | let mut queue = self.0.write(); 54 | // Get rid of our buffered input 55 | let last = queue.pending.iter().filter(|i| i.frame < ack_frame).last(); 56 | if let Some(last) = last { 57 | queue.last_acked = last.frame; 58 | queue.pending.retain(|i| i.frame >= ack_frame); 59 | } 60 | } 61 | 62 | /// Encodes all pending output as a byte buffer. 63 | /// 64 | /// To minimize the size of the produced buffer, the sequence of is delta 65 | /// encoded by `[compression::encode]` relative to the last acknowledged 66 | /// input, which is updated via `[acknowledge_frame]`. 67 | /// 68 | /// This will not remove any of the inputs in the queue, but will update 69 | /// the value returned by `[last_encoded_frame]` to reflect the highest 70 | /// frame that has been encoded. 71 | pub fn encode(&self) -> Result<(Frame, Vec), compression::EncodeError> { 72 | let zeroed = T::zeroed(); 73 | let mut queue = self.0.write(); 74 | let pending = &queue.pending; 75 | if !pending.is_empty() { 76 | let start_frame = pending.front().unwrap().frame; 77 | let inputs = pending.iter().map(|f| &f.input); 78 | let bits = compression::encode(&zeroed, inputs)?; 79 | queue.last_encoded = queue.pending.back().unwrap().frame; 80 | Ok((start_frame, bits)) 81 | } else { 82 | Ok((queue.last_acked, Vec::new())) 83 | } 84 | } 85 | } 86 | 87 | struct InputDecoderRef 88 | where 89 | T: bytemuck::Zeroable, 90 | { 91 | last_decoded: Frame, 92 | phantom: std::marker::PhantomData, 93 | } 94 | 95 | /// A stateful decoder that decodes delta patches created by `[InputEncoder]`. 96 | /// 97 | /// This struct wraps an Arc, so it's safe to make clones and pass it around. 98 | #[derive(Clone)] 99 | pub(super) struct InputDecoder(Arc>>) 100 | where 101 | T: bytemuck::Zeroable + bytemuck::Pod; 102 | 103 | impl Default for InputDecoder { 104 | fn default() -> Self { 105 | Self(Arc::new(RwLock::new(InputDecoderRef:: { 106 | last_decoded: crate::NULL_FRAME, 107 | phantom: Default::default(), 108 | }))) 109 | } 110 | } 111 | 112 | impl InputDecoder { 113 | /// Gets the frame of the most recently decoded input if available. 114 | /// 115 | /// If no input has been decoded yet, this will be the NULL_FRAME. 116 | pub fn last_decoded_frame(&self) -> Frame { 117 | self.0.read().last_decoded 118 | } 119 | } 120 | 121 | impl InputDecoder { 122 | pub fn decode( 123 | &self, 124 | start_frame: Frame, 125 | bits: impl AsRef<[u8]>, 126 | ) -> Result>, compression::DecodeError> { 127 | let mut decoder = self.0.write(); 128 | let last_decoded_frame = decoder.last_decoded; 129 | let current_frame = if crate::is_null(decoder.last_decoded) { 130 | start_frame - 1 131 | } else { 132 | decoder.last_decoded 133 | }; 134 | let zeroed = T::zeroed(); 135 | let frame_inputs = compression::decode(&zeroed, bits)? 136 | .into_iter() 137 | .enumerate() 138 | .map(|(i, input)| FrameInput:: { 139 | frame: start_frame + i as Frame, 140 | input, 141 | }) 142 | .skip_while(|input| input.frame <= current_frame) 143 | .collect::>(); 144 | 145 | if let Some(latest) = frame_inputs.last() { 146 | decoder.last_decoded = latest.clone().frame; 147 | } 148 | 149 | debug_assert!(decoder.last_decoded >= last_decoded_frame); 150 | 151 | Ok(frame_inputs) 152 | } 153 | } 154 | 155 | #[cfg(test)] 156 | mod test { 157 | use super::*; 158 | use bytemuck::{Pod, Zeroable}; 159 | use rand::RngCore; 160 | 161 | #[repr(C)] 162 | #[derive(Debug, Copy, Clone, PartialEq, Eq)] 163 | struct Input { 164 | x: i32, 165 | y: i32, 166 | } 167 | 168 | unsafe impl Pod for Input {} 169 | unsafe impl Zeroable for Input {} 170 | 171 | #[test] 172 | pub fn test_same_input_compresses_down() { 173 | let encoder = InputEncoder::::default(); 174 | let decoder = InputDecoder::::default(); 175 | let mut buf: Vec = Vec::new(); 176 | for frame in 0..100 { 177 | let input = Input { x: 420, y: 1337 }; 178 | buf.push(input); 179 | encoder.push(FrameInput:: { frame, input }); 180 | } 181 | 182 | let (start, encoded) = encoder.encode().unwrap(); 183 | let decoded = decoder.decode(start, &encoded).unwrap(); 184 | assert_eq!(start, 0); 185 | assert_eq!(encoded, vec![4, 164, 1, 9, 4, 57, 5, 233, 24]); 186 | assert_eq!( 187 | decoded.into_iter().map(|f| f.input).collect::>(), 188 | buf 189 | ); 190 | assert_eq!(decoder.last_decoded_frame(), 99); 191 | } 192 | 193 | #[test] 194 | pub fn test_empty_buffer() { 195 | let encoder = InputEncoder::::default(); 196 | let decoder = InputDecoder::::default(); 197 | let buf: Vec = Vec::new(); 198 | 199 | let (start, encoded) = encoder.encode().unwrap(); 200 | let decoded = decoder.decode(start, encoded.clone()).unwrap(); 201 | assert_eq!(start, -1); 202 | assert_eq!( 203 | decoded.into_iter().map(|f| f.input).collect::>(), 204 | buf 205 | ); 206 | assert_eq!(decoder.last_decoded_frame(), -1); 207 | } 208 | 209 | #[test] 210 | pub fn test_encodes_the_same_until_acknowledged() { 211 | let mut rng = rand::thread_rng(); 212 | let encoder = InputEncoder::::default(); 213 | let mut buf: Vec = Vec::new(); 214 | let base = Input { 215 | x: rng.next_u32() as i32, 216 | y: rng.next_u32() as i32, 217 | }; 218 | for frame in 0..100 { 219 | let input = if rng.next_u32() > u32::MAX / 4 { 220 | base 221 | } else { 222 | Input { 223 | x: rng.next_u32() as i32, 224 | y: rng.next_u32() as i32, 225 | } 226 | }; 227 | buf.push(input); 228 | encoder.push(FrameInput:: { frame, input }); 229 | } 230 | 231 | let (start_1, encoded_1) = encoder.encode().unwrap(); 232 | let (start_2, encoded_2) = encoder.encode().unwrap(); 233 | assert_eq!(start_1, start_2); 234 | assert_eq!(encoded_1, encoded_2); 235 | encoder.acknowledge_frame(53); 236 | let (start_3, encoded_3) = encoder.encode().unwrap(); 237 | assert!(start_3 != start_1); 238 | assert!(encoded_3 != encoded_1); 239 | assert!(start_3 != start_2); 240 | assert!(encoded_3 != encoded_2); 241 | } 242 | 243 | #[test] 244 | pub fn test_random_data() { 245 | let mut rng = rand::thread_rng(); 246 | for _ in 0..100 { 247 | let encoder = InputEncoder::::default(); 248 | let decoder = InputDecoder::::default(); 249 | let mut buf: Vec = Vec::new(); 250 | let base = Input { 251 | x: rng.next_u32() as i32, 252 | y: rng.next_u32() as i32, 253 | }; 254 | for frame in 0..100 { 255 | let input = if rng.next_u32() > u32::MAX / 4 { 256 | base 257 | } else { 258 | Input { 259 | x: rng.next_u32() as i32, 260 | y: rng.next_u32() as i32, 261 | } 262 | }; 263 | buf.push(input); 264 | encoder.push(FrameInput:: { frame, input }); 265 | } 266 | 267 | let (start, encoded) = encoder.encode().unwrap(); 268 | let decoded = decoder.decode(start, &encoded).unwrap(); 269 | assert_eq!(start, 0); 270 | assert!(encoded.len() <= std::mem::size_of::() * buf.len()); 271 | assert_eq!(decoded.len(), buf.len()); 272 | assert_eq!(decoder.last_decoded_frame(), 99); 273 | assert_eq!( 274 | decoded.into_iter().map(|f| f.input).collect::>(), 275 | buf 276 | ); 277 | } 278 | } 279 | } 280 | -------------------------------------------------------------------------------- /backroll/src/protocol/message.rs: -------------------------------------------------------------------------------- 1 | use super::ConnectionStatus; 2 | use crate::{time_sync::UnixMillis, Frame}; 3 | use serde::{Deserialize, Serialize}; 4 | use std::num::Wrapping; 5 | 6 | #[derive(Clone, Debug, Serialize, Deserialize)] 7 | pub(super) struct Message { 8 | pub magic: u16, 9 | pub sequence_number: Wrapping, 10 | pub data: MessageData, 11 | } 12 | 13 | #[derive(Clone, Debug, Serialize, Deserialize)] 14 | pub(super) enum MessageData { 15 | KeepAlive, 16 | SyncRequest(SyncRequest), 17 | SyncReply(SyncReply), 18 | Input(Input), 19 | InputAck(InputAck), 20 | QualityReport(QualityReport), 21 | QualityReply(QualityReply), 22 | } 23 | 24 | impl MessageData { 25 | pub fn is_sync_message(&self) -> bool { 26 | matches!(self, Self::SyncRequest(_) | Self::SyncReply(_)) 27 | } 28 | } 29 | 30 | impl From for MessageData { 31 | fn from(value: SyncRequest) -> Self { 32 | Self::SyncRequest(value) 33 | } 34 | } 35 | 36 | impl From for MessageData { 37 | fn from(value: SyncReply) -> Self { 38 | Self::SyncReply(value) 39 | } 40 | } 41 | 42 | impl From for MessageData { 43 | fn from(value: Input) -> Self { 44 | Self::Input(value) 45 | } 46 | } 47 | 48 | impl From for MessageData { 49 | fn from(value: InputAck) -> Self { 50 | Self::InputAck(value) 51 | } 52 | } 53 | 54 | impl From for MessageData { 55 | fn from(value: QualityReport) -> Self { 56 | Self::QualityReport(value) 57 | } 58 | } 59 | 60 | impl From for MessageData { 61 | fn from(value: QualityReply) -> Self { 62 | Self::QualityReply(value) 63 | } 64 | } 65 | 66 | #[derive(Clone, Debug, Serialize, Deserialize)] 67 | pub(super) struct Input { 68 | pub peer_connect_status: Vec, 69 | pub start_frame: Frame, 70 | pub ack_frame: Frame, 71 | pub bits: Vec, 72 | } 73 | 74 | #[derive(Clone, Debug, Serialize, Deserialize)] 75 | pub(super) struct InputAck { 76 | pub ack_frame: Frame, 77 | } 78 | 79 | #[derive(Clone, Debug, Serialize, Deserialize)] 80 | pub(super) struct SyncRequest { 81 | pub random: u32, 82 | } 83 | 84 | #[derive(Clone, Debug, Serialize, Deserialize)] 85 | pub(super) struct SyncReply { 86 | pub random: u32, 87 | } 88 | 89 | #[derive(Clone, Debug, Serialize, Deserialize)] 90 | pub(super) struct QualityReport { 91 | pub frame_advantage: i32, 92 | pub ping: UnixMillis, 93 | } 94 | 95 | #[derive(Clone, Debug, Serialize, Deserialize)] 96 | pub(super) struct QualityReply { 97 | pub pong: UnixMillis, 98 | } 99 | -------------------------------------------------------------------------------- /backroll/src/protocol/mod.rs: -------------------------------------------------------------------------------- 1 | use self::input_buffer::*; 2 | use self::message::*; 3 | use crate::{ 4 | input::FrameInput, 5 | time_sync::{TimeSync, UnixMillis}, 6 | Config, Frame, NetworkStats, 7 | }; 8 | use async_channel::TrySendError; 9 | use backroll_transport::Peer as TransportPeer; 10 | use bevy_tasks::IoTaskPool; 11 | use bincode::config::Options; 12 | use futures::FutureExt; 13 | use futures_timer::Delay; 14 | use parking_lot::RwLock; 15 | use rand::RngCore; 16 | use serde::{Deserialize, Serialize}; 17 | use std::num::Wrapping; 18 | use std::sync::Arc; 19 | use std::time::Duration; 20 | use tracing::{debug, error}; 21 | 22 | pub(crate) use event::Event; 23 | 24 | mod bitfield; 25 | mod compression; 26 | mod event; 27 | mod input_buffer; 28 | mod message; 29 | 30 | pub enum PeerError { 31 | LocalDisconnected, 32 | RemoteDisconnected, 33 | InvalidMessage, 34 | } 35 | 36 | const UDP_HEADER_SIZE: usize = 28; // Size of IP + UDP headers 37 | const MAX_TRANSMISSION_UNIT: u64 = 1450; // A sane common packet size. 38 | const NUM_SYNC_PACKETS: u8 = 5; 39 | const TARGET_TPS: u64 = 60; 40 | const POLL_INTERVAL: Duration = Duration::from_millis(1000 / TARGET_TPS); 41 | const SYNC_RETRY_INTERVAL: Duration = Duration::from_millis(2000); 42 | const SYNC_FIRST_RETRY_INTERVAL: Duration = Duration::from_millis(500); 43 | const RUNNING_RETRY_INTERVAL: Duration = Duration::from_millis(200); 44 | const KEEP_ALIVE_INTERVAL: Duration = Duration::from_millis(200); 45 | const QUALITY_REPORT_INTERVAL: Duration = Duration::from_millis(1000); 46 | const NETWORK_STATS_INTERVAL: Duration = Duration::from_millis(1000); 47 | const MAX_SEQ_DISTANCE: Wrapping = Wrapping(1 << 15); 48 | 49 | fn random() -> u32 { 50 | let mut rng = rand::thread_rng(); 51 | loop { 52 | let random = rng.next_u32(); 53 | if random != 0 { 54 | return random; 55 | } 56 | } 57 | } 58 | 59 | #[derive(Clone, Copy, Debug)] 60 | pub enum PeerState { 61 | Connecting { 62 | random: u32, 63 | }, 64 | Syncing { 65 | random: u32, 66 | roundtrips_remaining: u8, 67 | }, 68 | Running { 69 | remote_magic: u16, 70 | }, 71 | Interrupted { 72 | remote_magic: u16, 73 | }, 74 | Disconnected, 75 | } 76 | 77 | impl PeerState { 78 | pub fn random(&self) -> Option { 79 | match *self { 80 | Self::Connecting { random } => Some(random), 81 | Self::Syncing { random, .. } => Some(random), 82 | _ => None, 83 | } 84 | } 85 | 86 | pub fn is_running(&self) -> bool { 87 | matches!(self, Self::Running { .. } | Self::Interrupted { .. }) 88 | } 89 | 90 | fn create_sync_request(&self) -> SyncRequest { 91 | if let PeerState::Connecting { random, .. } | PeerState::Syncing { random, .. } = self { 92 | SyncRequest { random: *random } 93 | } else { 94 | panic!("Sending sync request while not syncing.") 95 | } 96 | } 97 | 98 | pub fn is_interrupted(&self) -> bool { 99 | matches!(self, Self::Interrupted { .. }) 100 | } 101 | 102 | pub fn start_syncing(&mut self, round_trips: u8) { 103 | if let Self::Connecting { random } = *self { 104 | *self = Self::Syncing { 105 | random, 106 | roundtrips_remaining: round_trips, 107 | }; 108 | } 109 | } 110 | 111 | pub fn interrupt(&mut self) -> bool { 112 | if let Self::Running { remote_magic } = *self { 113 | *self = Self::Interrupted { remote_magic }; 114 | true 115 | } else { 116 | false 117 | } 118 | } 119 | 120 | pub fn resume(&mut self) -> bool { 121 | if let Self::Interrupted { remote_magic } = *self { 122 | *self = Self::Running { remote_magic }; 123 | true 124 | } else { 125 | false 126 | } 127 | } 128 | } 129 | 130 | impl Default for PeerState { 131 | fn default() -> Self { 132 | Self::Connecting { random: random() } 133 | } 134 | } 135 | 136 | #[derive(Default)] 137 | struct PeerStats { 138 | pub packets_sent: usize, 139 | pub bytes_sent: usize, 140 | pub last_send_time: Option, 141 | pub last_input_packet_recv_time: UnixMillis, 142 | pub round_trip_time: Duration, 143 | pub kbps_sent: u32, 144 | 145 | pub local_frame_advantage: Frame, 146 | pub remote_frame_advantage: Frame, 147 | } 148 | 149 | #[derive(Clone)] 150 | pub(crate) struct PeerConfig { 151 | pub peer: TransportPeer, 152 | pub disconnect_timeout: Duration, 153 | pub disconnect_notify_start: Duration, 154 | } 155 | 156 | pub(crate) struct Peer 157 | where 158 | T: Config, 159 | { 160 | queue: usize, 161 | config: PeerConfig, 162 | timesync: TimeSync, 163 | state: Arc>, 164 | 165 | stats: Arc>, 166 | local_connect_status: Arc<[RwLock]>, 167 | peer_connect_status: Vec, 168 | 169 | input_encoder: InputEncoder, 170 | input_decoder: InputDecoder, 171 | 172 | message_in: async_channel::Receiver, 173 | message_out: async_channel::Sender, 174 | events: async_channel::Sender>, 175 | } 176 | 177 | impl Clone for Peer { 178 | fn clone(&self) -> Self { 179 | Self { 180 | queue: self.queue, 181 | config: self.config.clone(), 182 | timesync: self.timesync.clone(), 183 | state: self.state.clone(), 184 | 185 | stats: self.stats.clone(), 186 | local_connect_status: self.local_connect_status.clone(), 187 | peer_connect_status: self.peer_connect_status.clone(), 188 | 189 | input_encoder: self.input_encoder.clone(), 190 | input_decoder: self.input_decoder.clone(), 191 | 192 | message_in: self.message_in.clone(), 193 | message_out: self.message_out.clone(), 194 | events: self.events.clone(), 195 | } 196 | } 197 | } 198 | 199 | impl Peer { 200 | pub fn new( 201 | queue: usize, 202 | config: PeerConfig, 203 | local_connect_status: Arc<[RwLock]>, 204 | ) -> (Self, async_channel::Receiver>) { 205 | let (deserialize_send, message_in) = async_channel::unbounded::(); 206 | let (message_out, serialize_recv) = async_channel::unbounded::(); 207 | let (events, events_rx) = async_channel::unbounded(); 208 | let peer_connect_status = local_connect_status 209 | .iter() 210 | .map(|status| status.read().clone()) 211 | .collect(); 212 | 213 | let peer = Self { 214 | queue, 215 | config, 216 | timesync: Default::default(), 217 | state: Default::default(), 218 | 219 | stats: Default::default(), 220 | local_connect_status, 221 | peer_connect_status, 222 | 223 | input_encoder: Default::default(), 224 | input_decoder: Default::default(), 225 | 226 | message_in, 227 | message_out, 228 | events, 229 | }; 230 | 231 | let task_pool = IoTaskPool::get(); 232 | 233 | // Start the base subtasks on the provided executor 234 | task_pool 235 | .spawn(peer.clone().serialize_outgoing(serialize_recv)) 236 | .detach(); 237 | task_pool 238 | .spawn(peer.clone().deserialize_incoming(deserialize_send)) 239 | .detach(); 240 | task_pool.spawn(peer.clone().run()).detach(); 241 | 242 | (peer, events_rx) 243 | } 244 | 245 | pub fn is_running(&self) -> bool { 246 | self.state.read().is_running() 247 | } 248 | 249 | pub fn disconnect(&self) { 250 | *self.state.write() = PeerState::Disconnected; 251 | self.message_in.close(); 252 | self.message_out.close(); 253 | self.events.close(); 254 | } 255 | 256 | fn push_event(&self, evt: Event) -> Result<(), PeerError> { 257 | // Failure to send just means 258 | match self.events.try_send(evt) { 259 | Ok(()) => Ok(()), 260 | Err(TrySendError::Full(_)) => { 261 | panic!("This channel should never be full, it should be unbounded") 262 | } 263 | Err(TrySendError::Closed(_)) => Err(PeerError::LocalDisconnected), 264 | } 265 | } 266 | 267 | fn send(&self, msg: impl Into) -> Result<(), PeerError> { 268 | match self.message_out.try_send(msg.into()) { 269 | Ok(()) => Ok(()), 270 | Err(TrySendError::Full(_)) => { 271 | panic!("This channel should never be full, it should be unbounded") 272 | } 273 | Err(TrySendError::Closed(_)) => Err(PeerError::RemoteDisconnected), 274 | } 275 | } 276 | 277 | pub fn get_network_stats(&self) -> NetworkStats { 278 | let stats = self.stats.read(); 279 | NetworkStats { 280 | ping: stats.round_trip_time, 281 | send_queue_len: self.message_out.len(), 282 | recv_queue_len: self.message_in.len(), 283 | kbps_sent: stats.kbps_sent, 284 | 285 | local_frames_behind: stats.local_frame_advantage, 286 | remote_frames_behind: stats.remote_frame_advantage, 287 | } 288 | } 289 | 290 | pub fn send_input(&self, input: FrameInput) -> Result<(), PeerError> { 291 | if self.state.read().is_running() { 292 | let stats = self.stats.read(); 293 | // Check to see if this is a good time to adjust for the rift... 294 | self.timesync.advance_frame( 295 | input.clone(), 296 | stats.local_frame_advantage, 297 | stats.remote_frame_advantage, 298 | ); 299 | 300 | // Save this input packet 301 | // 302 | // XXX: This queue may fill up for spectators who do not ack input packets in a timely 303 | // manner. When this happens, we can either resize the queue (ug) or disconnect them 304 | // (better, but still ug). For the meantime, make this queue really big to decrease 305 | // the odds of this happening... 306 | self.input_encoder.push(input); 307 | } 308 | self.send_pending_output() 309 | } 310 | 311 | fn send_pending_output(&self) -> Result<(), PeerError> { 312 | let (start_frame, bits) = self.input_encoder.encode().expect( 313 | "The Backroll client has somehow sent created an input \ 314 | queue of 65,535 bytes or more. This is ill advised. \ 315 | Consider further compressing your inputs.", 316 | ); 317 | self.send(Input { 318 | peer_connect_status: self 319 | .local_connect_status 320 | .iter() 321 | .map(|status| status.read().clone()) 322 | .collect(), 323 | start_frame, 324 | ack_frame: self.input_decoder.last_decoded_frame(), 325 | bits, 326 | }) 327 | } 328 | 329 | async fn heartbeat(self, interval: Duration) { 330 | while let Ok(()) = self.send(MessageData::KeepAlive) { 331 | debug!("Sent keep alive packet"); 332 | Delay::new(interval).await; 333 | } 334 | } 335 | 336 | async fn send_quality_reports(self, interval: Duration) -> Result<(), PeerError> { 337 | debug!("Starting quality reports to queue: {}", self.queue); 338 | let mut result = Ok(()); 339 | while self.is_running() { 340 | let frame_advantage = self.stats.read().local_frame_advantage; 341 | let msg = QualityReport { 342 | ping: UnixMillis::now(), 343 | frame_advantage, 344 | }; 345 | // Erroring means disconnection. 346 | if let Err(err) = self.send(msg) { 347 | result = Err(err); 348 | break; 349 | } 350 | Delay::new(interval).await; 351 | } 352 | debug!("Stopped sending quality reports to: {}", self.queue); 353 | result 354 | } 355 | 356 | async fn resend_inputs(self, interval: Duration) -> Result<(), PeerError> { 357 | while self.is_running() { 358 | { 359 | let mut stats = self.stats.write(); 360 | let now = UnixMillis::now(); 361 | // xxx: rig all this up with a timer wrapper 362 | if stats.last_input_packet_recv_time + RUNNING_RETRY_INTERVAL < now { 363 | debug!("Haven't exchanged packets in a while (last received: {} last sent: {}). Resending.", 364 | self.input_decoder.last_decoded_frame(), 365 | self.input_encoder.last_encoded_frame()); 366 | stats.last_input_packet_recv_time = now; 367 | self.send_pending_output()?; 368 | } 369 | } 370 | Delay::new(interval).await; 371 | } 372 | Ok(()) 373 | } 374 | 375 | async fn run(mut self) -> Result<(), PeerError> { 376 | let mut last_recv_time = UnixMillis::now(); 377 | loop { 378 | futures::select! { 379 | message = self.message_in.recv().fuse() => { 380 | let message = message.map_err(|_| PeerError::RemoteDisconnected)?; 381 | match self.handle_message(message).await { 382 | Ok(()) => { 383 | last_recv_time = UnixMillis::now(); 384 | if self.state.write().resume() { 385 | self.push_event(Event::::NetworkResumed)?; 386 | } 387 | }, 388 | Err(PeerError::InvalidMessage) => { 389 | error!("Invalid incoming message"); 390 | }, 391 | err => { 392 | self.disconnect(); 393 | return err; 394 | } 395 | } 396 | }, 397 | _ = Delay::new(POLL_INTERVAL).fuse() => { 398 | let timeout = self.config.disconnect_timeout; 399 | let notify_start = self.config.disconnect_notify_start; 400 | let now = UnixMillis::now(); 401 | 402 | { 403 | let mut state = self.state.write(); 404 | if !state.is_interrupted() && (last_recv_time + notify_start < now) { 405 | state.interrupt(); 406 | debug!("Endpoint has stopped receiving packets for {} ms. Sending notification.", 407 | notify_start.as_millis()); 408 | self.push_event(Event::::NetworkInterrupted { 409 | disconnect_timeout: timeout - notify_start 410 | })?; 411 | } 412 | } 413 | 414 | if last_recv_time + timeout < now { 415 | debug!( 416 | "Endpoint has stopped receiving packets for {} ms. Disconnecting.", 417 | timeout.as_millis() 418 | ); 419 | self.disconnect(); 420 | return Err(PeerError::RemoteDisconnected); 421 | } 422 | self.poll()?; 423 | }, 424 | } 425 | } 426 | } 427 | 428 | fn poll(&mut self) -> Result<(), PeerError> { 429 | let state = self.state.read(); 430 | let next_interval = match *state { 431 | PeerState::Connecting { .. } => SYNC_FIRST_RETRY_INTERVAL, 432 | PeerState::Syncing { .. } => SYNC_RETRY_INTERVAL, 433 | _ => return Ok(()), 434 | }; 435 | let now = UnixMillis::now(); 436 | if let Some(last_send_time) = self.stats.read().last_send_time { 437 | if last_send_time + next_interval < now { 438 | debug!( 439 | "No luck syncing after {:?} ms... Re-queueing sync packet.", 440 | next_interval 441 | ); 442 | self.send(state.create_sync_request())?; 443 | } 444 | } else { 445 | // If we have not sent anything yet, kick off the connection with a 446 | // sync request. 447 | self.send(state.create_sync_request())?; 448 | } 449 | 450 | Ok(()) 451 | } 452 | 453 | async fn serialize_outgoing(self, messages: async_channel::Receiver) { 454 | let magic = random() as u16; 455 | let mut next_send_seq = Wrapping(0); 456 | while let Ok(data) = messages.recv().await { 457 | let message = Message { 458 | magic, 459 | sequence_number: next_send_seq, 460 | data, 461 | }; 462 | next_send_seq += Wrapping(1); 463 | 464 | let mut bytes = Vec::new(); 465 | { 466 | let mut bincode = bincode::Serializer::new( 467 | &mut bytes, 468 | bincode::options().with_limit(MAX_TRANSMISSION_UNIT), 469 | ); 470 | if let Err(err) = message.serialize(&mut bincode) { 471 | error!( 472 | "Dropping outgoing packet. Error while serializing outgoing message: {:?}", 473 | err 474 | ); 475 | continue; 476 | } 477 | } 478 | 479 | let msg_size = bytes.len(); 480 | if let Ok(()) = self.config.peer.send(bytes.into()).await { 481 | let mut stats = self.stats.write(); 482 | stats.packets_sent += 1; 483 | stats.last_send_time = Some(UnixMillis::now()); 484 | stats.bytes_sent += msg_size; 485 | } else { 486 | break; 487 | } 488 | } 489 | debug!("Stopping sending of messages for queue: {}", self.queue); 490 | } 491 | 492 | async fn deserialize_incoming( 493 | self, 494 | messages: async_channel::Sender, 495 | ) -> Result<(), PeerError> { 496 | let mut next_recv_seq = Wrapping(0); 497 | 498 | while let Ok(bytes) = self.config.peer.recv().await { 499 | let mut bincode = bincode::de::Deserializer::with_reader( 500 | &*bytes, 501 | bincode::options().with_limit(MAX_TRANSMISSION_UNIT), 502 | ); 503 | let message = match Message::deserialize(&mut bincode) { 504 | Ok(message) => message, 505 | Err(err) => { 506 | error!("Dropping incoming message. Error while deserialilzing incoming message: {:?}", err); 507 | continue; 508 | } 509 | }; 510 | 511 | let seq = message.sequence_number; 512 | if message.data.is_sync_message() { 513 | if let PeerState::Running { remote_magic } = *self.state.read() { 514 | if message.magic != remote_magic { 515 | continue; 516 | } 517 | } 518 | 519 | // filter out out-of-order packets 520 | let skipped = seq - next_recv_seq; 521 | if skipped > MAX_SEQ_DISTANCE { 522 | debug!( 523 | "dropping out of order packet (seq: {}, last seq: {})", 524 | seq, next_recv_seq 525 | ); 526 | continue; 527 | } 528 | } 529 | 530 | next_recv_seq = message.sequence_number; 531 | messages 532 | .send(message) 533 | .await 534 | .map_err(|_| PeerError::LocalDisconnected)?; 535 | } 536 | 537 | debug!("Stopped receiving messages for queue: {}", self.queue); 538 | Ok(()) 539 | } 540 | 541 | async fn handle_message(&mut self, message: Message) -> Result<(), PeerError> { 542 | match message.data { 543 | MessageData::KeepAlive => Ok(()), 544 | MessageData::SyncRequest(data) => self.on_sync_request(message.magic, data), 545 | MessageData::SyncReply(data) => self.on_sync_reply(message.magic, data), 546 | MessageData::Input(input) => self.on_input(input), 547 | MessageData::InputAck(data) => { 548 | self.input_encoder.acknowledge_frame(data.ack_frame); 549 | Ok(()) 550 | } 551 | MessageData::QualityReport(data) => self.on_quality_report(data), 552 | MessageData::QualityReply(data) => { 553 | self.stats.write().round_trip_time = UnixMillis::now() - data.pong; 554 | Ok(()) 555 | } 556 | } 557 | } 558 | 559 | async fn update_network_stats(self, interval: Duration) { 560 | let mut start_time: Option = None; 561 | 562 | loop { 563 | Delay::new(interval).await; 564 | 565 | if !self.is_running() { 566 | start_time = None; 567 | continue; 568 | } 569 | 570 | let now = UnixMillis::now(); 571 | if start_time.is_none() { 572 | start_time = Some(now); 573 | } 574 | 575 | let mut stats = self.stats.write(); 576 | let total_bytes_sent = 577 | (stats.bytes_sent + (UDP_HEADER_SIZE * stats.packets_sent)) as f32; 578 | let seconds = (now - start_time.unwrap()).as_millis() as f32 / 1000.0; 579 | let bps = total_bytes_sent / seconds; 580 | let udp_overhead = 581 | 100.0 * (UDP_HEADER_SIZE * stats.packets_sent) as f32 / stats.bytes_sent as f32; 582 | stats.kbps_sent = (bps / 1024.0) as u32; 583 | 584 | debug!( 585 | "Network Stats -- Bandwidth: {} KBps Packets Sent: {} ({} pps) \ 586 | KB Sent: {} UDP Overhead: {:.2}.", 587 | stats.kbps_sent, 588 | stats.packets_sent, 589 | stats.packets_sent as f32 * 1000.0 / (now - start_time.unwrap()).as_millis() as f32, 590 | total_bytes_sent / 1024.0, 591 | udp_overhead 592 | ); 593 | } 594 | } 595 | 596 | pub fn get_peer_connect_status(&self, id: usize) -> &ConnectionStatus { 597 | &self.peer_connect_status[id] 598 | } 599 | 600 | fn on_sync_request(&mut self, magic: u16, data: SyncRequest) -> Result<(), PeerError> { 601 | let SyncRequest { random } = data; 602 | if let PeerState::Running { remote_magic } = *self.state.read() { 603 | if magic != remote_magic { 604 | debug!( 605 | "Ignoring sync request from unknown endpoint ({} != {:?}).", 606 | magic, remote_magic 607 | ); 608 | return Err(PeerError::InvalidMessage); 609 | } 610 | } 611 | self.send(SyncReply { random })?; 612 | Ok(()) 613 | } 614 | 615 | fn on_sync_reply(&self, magic: u16, data: SyncReply) -> Result<(), PeerError> { 616 | let mut state = self.state.write(); 617 | if let Some(random) = state.random() { 618 | if data.random != random { 619 | debug!("sync reply {} != {}. Keep looking...", data.random, random); 620 | return Err(PeerError::InvalidMessage); 621 | } 622 | } 623 | 624 | match *state { 625 | PeerState::Connecting { .. } => { 626 | self.push_event(Event::::Connected)?; 627 | state.start_syncing(NUM_SYNC_PACKETS); 628 | self.send(state.create_sync_request())?; 629 | Ok(()) 630 | } 631 | PeerState::Syncing { 632 | ref mut roundtrips_remaining, 633 | .. 634 | } => { 635 | debug!( 636 | "Checking sync state ({} round trips remaining).", 637 | *roundtrips_remaining 638 | ); 639 | debug_assert!(*roundtrips_remaining > 0); 640 | *roundtrips_remaining -= 1; 641 | if *roundtrips_remaining == 0 { 642 | debug!("Synchronized queue {}!", self.queue); 643 | self.push_event(Event::::Synchronized)?; 644 | self.stats.write().last_input_packet_recv_time = UnixMillis::now(); 645 | *state = PeerState::Running { 646 | remote_magic: magic, 647 | }; 648 | 649 | // FIXME(james7132): If the network is interrupted and a reconnection is completed 650 | // if these tasks do not die before they get reevaluated, there will be multiple 651 | // alive tasks. This is not the end of the world, but will use extra queue space 652 | // and bandwidth. 653 | let task_pool = IoTaskPool::get(); 654 | task_pool 655 | .spawn(self.clone().heartbeat(KEEP_ALIVE_INTERVAL)) 656 | .detach(); 657 | task_pool 658 | .spawn(self.clone().send_quality_reports(QUALITY_REPORT_INTERVAL)) 659 | .detach(); 660 | task_pool 661 | .spawn(self.clone().resend_inputs(QUALITY_REPORT_INTERVAL)) 662 | .detach(); 663 | task_pool 664 | .spawn(self.clone().update_network_stats(NETWORK_STATS_INTERVAL)) 665 | .detach(); 666 | } else { 667 | self.push_event(Event::::Synchronizing { 668 | total: NUM_SYNC_PACKETS, 669 | count: NUM_SYNC_PACKETS - *roundtrips_remaining as u8, 670 | })?; 671 | self.send(state.create_sync_request())?; 672 | } 673 | Ok(()) 674 | } 675 | PeerState::Running { remote_magic } if magic == remote_magic => Ok(()), 676 | _ => { 677 | debug!("Ignoring SyncReply while not syncing."); 678 | Err(PeerError::InvalidMessage) 679 | } 680 | } 681 | } 682 | 683 | fn on_input(&mut self, msg: Input) -> Result<(), PeerError> { 684 | let Input { 685 | peer_connect_status, 686 | start_frame, 687 | ack_frame, 688 | bits, 689 | } = msg; 690 | 691 | // Update the peer connection status if this peer is still considered to be part 692 | // of the network. 693 | for (i, remote_status) in peer_connect_status.iter().enumerate() { 694 | if i < self.peer_connect_status.len() { 695 | debug_assert!(remote_status.last_frame >= self.peer_connect_status[i].last_frame); 696 | self.peer_connect_status[i].disconnected |= remote_status.disconnected; 697 | self.peer_connect_status[i].last_frame = std::cmp::max( 698 | self.peer_connect_status[i].last_frame, 699 | remote_status.last_frame, 700 | ); 701 | } else { 702 | self.peer_connect_status.push(remote_status.clone()); 703 | } 704 | } 705 | 706 | // Decompress the input. 707 | match self.input_decoder.decode(start_frame, bits) { 708 | Ok(inputs) => { 709 | if !inputs.is_empty() { 710 | self.push_event(Event::::Inputs(inputs))?; 711 | self.stats.write().last_input_packet_recv_time = UnixMillis::now(); 712 | self.send(InputAck { 713 | ack_frame: self.input_decoder.last_decoded_frame(), 714 | })?; 715 | } 716 | } 717 | Err(err) => { 718 | error!( 719 | "Error while decoding recieved inputs. discarding: {:?}", 720 | err 721 | ); 722 | return Err(PeerError::InvalidMessage); 723 | } 724 | } 725 | 726 | // Get rid of our buffered input 727 | self.input_encoder.acknowledge_frame(ack_frame); 728 | Ok(()) 729 | } 730 | 731 | fn on_quality_report(&self, data: QualityReport) -> Result<(), PeerError> { 732 | self.stats.write().remote_frame_advantage = data.frame_advantage; 733 | self.send(QualityReply { pong: data.ping })?; 734 | Ok(()) 735 | } 736 | 737 | pub fn set_local_frame_number(&self, local_frame: Frame) { 738 | let mut stats = self.stats.write(); 739 | // Estimate which frame the other guy is one by looking at the 740 | // last frame they gave us plus some delta for the one-way packet 741 | // trip time. 742 | let remote_frame = self.input_decoder.last_decoded_frame() 743 | + ((stats.round_trip_time.as_secs() / 2) * TARGET_TPS) as i32; 744 | 745 | // Our frame advantage is how many frames *behind* the other guy 746 | // we are. Counter-intuative, I know. It's an advantage because 747 | // it means they'll have to predict more often and our moves will 748 | // pop more frequently. 749 | stats.local_frame_advantage = remote_frame - local_frame; 750 | } 751 | 752 | pub fn recommend_frame_delay(&self) -> Frame { 753 | // XXX: require idle input should be a configuration parameter 754 | self.timesync.recommend_frame_wait_duration(false) 755 | } 756 | } 757 | 758 | #[derive(Clone, Debug, Deserialize, Serialize)] 759 | pub struct ConnectionStatus { 760 | pub disconnected: bool, 761 | pub last_frame: Frame, 762 | } 763 | 764 | impl Default for ConnectionStatus { 765 | fn default() -> Self { 766 | Self { 767 | disconnected: false, 768 | last_frame: super::NULL_FRAME, 769 | } 770 | } 771 | } 772 | -------------------------------------------------------------------------------- /backroll/src/sync.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | command::Command, 3 | command::{Commands, LoadState, SaveState}, 4 | input::{FrameInput, GameInput, InputQueue}, 5 | protocol::ConnectionStatus, 6 | BackrollError, BackrollResult, Config, Frame, NULL_FRAME, 7 | }; 8 | use parking_lot::{Mutex, RwLock}; 9 | 10 | use std::ops::Deref; 11 | use std::sync::Arc; 12 | use tracing::{debug, warn}; 13 | 14 | const MAX_PREDICTION_FRAMES: usize = 8; 15 | 16 | pub struct PlayerConfig { 17 | pub player_count: usize, 18 | pub frame_delay: Frame, 19 | } 20 | 21 | #[derive(Clone)] 22 | pub(crate) struct SavedFrame { 23 | pub frame: super::Frame, 24 | pub data: Option>, 25 | pub checksum: Option, 26 | } 27 | 28 | impl Default for SavedFrame { 29 | fn default() -> Self { 30 | Self { 31 | frame: NULL_FRAME, 32 | data: None, 33 | checksum: None, 34 | } 35 | } 36 | } 37 | 38 | pub(crate) struct SavedCell(Arc>>); 39 | 40 | impl SavedCell { 41 | pub fn reset(&self, frame: Frame) { 42 | *self.0.lock() = SavedFrame:: { 43 | frame, 44 | ..Default::default() 45 | }; 46 | } 47 | 48 | pub fn save(&self, new_frame: SavedFrame) { 49 | debug_assert!(new_frame.data.is_some()); 50 | let mut saved_frame = self.0.lock(); 51 | saved_frame.data = new_frame.data; 52 | saved_frame.checksum = new_frame.checksum; 53 | } 54 | 55 | pub fn is_valid(&self) -> bool { 56 | let frame = self.0.lock(); 57 | frame.data.is_some() && !crate::is_null(frame.frame) 58 | } 59 | } 60 | 61 | impl SavedCell { 62 | pub fn load(&self) -> T { 63 | let frame = self.0.lock(); 64 | debug!( 65 | "=== Loading frame info (checksum: {:08x}).", 66 | frame.checksum.unwrap_or(0) 67 | ); 68 | if let Some(data) = &frame.data { 69 | data.deref().clone() 70 | } else { 71 | panic!("Trying to load data that wasn't saved to.") 72 | } 73 | } 74 | } 75 | 76 | impl Default for SavedCell { 77 | fn default() -> Self { 78 | Self(Arc::new(Mutex::new(Default::default()))) 79 | } 80 | } 81 | 82 | impl Clone for SavedCell { 83 | fn clone(&self) -> Self { 84 | Self(self.0.clone()) 85 | } 86 | } 87 | 88 | pub(crate) struct SavedState { 89 | head: usize, 90 | frames: [SavedCell; MAX_PREDICTION_FRAMES + 2], 91 | } 92 | 93 | impl SavedState { 94 | pub fn push(&mut self, frame: Frame) -> SavedCell { 95 | let saved_frame = self.frames[self.head].clone(); 96 | saved_frame.reset(frame); 97 | self.head = (self.head + 1) % self.frames.len(); 98 | debug_assert!(self.head < self.frames.len()); 99 | saved_frame 100 | } 101 | 102 | /// Finds a saved state for a frame. 103 | fn find_index(&self, frame: Frame) -> Option { 104 | self.frames 105 | .iter() 106 | .enumerate() 107 | .find(|(_, saved)| saved.0.lock().frame == frame) 108 | .map(|(i, _)| i) 109 | } 110 | 111 | pub fn reset_to(&mut self, frame: Frame) -> SavedCell { 112 | self.head = self 113 | .find_index(frame) 114 | .unwrap_or_else(|| panic!("Could not find saved frame index for frame: {}", frame)); 115 | self.frames[self.head].clone() 116 | } 117 | } 118 | 119 | impl Default for SavedState { 120 | fn default() -> Self { 121 | Self { 122 | head: 0, 123 | frames: Default::default(), 124 | } 125 | } 126 | } 127 | 128 | pub(crate) struct Sync 129 | where 130 | T: Config, 131 | { 132 | saved_state: SavedState, 133 | input_queues: Vec>, 134 | config: PlayerConfig, 135 | rolling_back: bool, 136 | 137 | last_confirmed_frame: Frame, 138 | frame_count: Frame, 139 | local_connect_status: Arc<[RwLock]>, 140 | } 141 | 142 | impl Sync { 143 | pub fn new( 144 | config: PlayerConfig, 145 | local_connect_status: Arc<[RwLock]>, 146 | ) -> Self { 147 | let input_queues = Self::create_queues(&config); 148 | Self { 149 | saved_state: Default::default(), 150 | local_connect_status, 151 | input_queues, 152 | config, 153 | 154 | rolling_back: false, 155 | last_confirmed_frame: super::NULL_FRAME, 156 | frame_count: 0, 157 | } 158 | } 159 | 160 | pub fn player_count(&self) -> usize { 161 | self.config.player_count 162 | } 163 | 164 | pub fn frame_count(&self) -> Frame { 165 | self.frame_count 166 | } 167 | 168 | pub fn in_rollback(&self) -> bool { 169 | self.rolling_back 170 | } 171 | 172 | pub fn set_last_confirmed_frame(&mut self, frame: Frame) { 173 | self.last_confirmed_frame = frame; 174 | if frame > 0 { 175 | for queue in self.input_queues.iter_mut() { 176 | queue.discard_confirmed_frames(frame - 1); 177 | } 178 | } 179 | } 180 | 181 | pub fn set_frame_delay(&mut self, queue: usize, delay: Frame) { 182 | self.input_queues[queue].set_frame_delay(delay); 183 | } 184 | 185 | pub fn increment_frame(&mut self, commands: &mut Commands) { 186 | if self.frame_count == 0 { 187 | self.save_current_frame(commands); 188 | } 189 | let inputs = self.synchronize_inputs(); 190 | commands.push(Command::AdvanceFrame(inputs)); 191 | self.frame_count += 1; 192 | self.save_current_frame(commands); 193 | } 194 | 195 | pub fn add_local_input(&mut self, queue: usize, input: T::Input) -> BackrollResult { 196 | let frames_behind = self.frame_count - self.last_confirmed_frame; 197 | if self.frame_count >= MAX_PREDICTION_FRAMES as i32 198 | && frames_behind >= MAX_PREDICTION_FRAMES as i32 199 | { 200 | warn!("Rejecting input: reached prediction barrier."); 201 | return Err(BackrollError::ReachedPredictionBarrier); 202 | } 203 | 204 | debug!( 205 | "Sending undelayed local frame {} to queue {}.", 206 | self.frame_count, queue 207 | ); 208 | 209 | self.input_queues[queue].add_input(FrameInput:: { 210 | frame: self.frame_count, 211 | input, 212 | }); 213 | 214 | Ok(self.frame_count) 215 | } 216 | 217 | pub fn add_remote_input(&mut self, queue: usize, input: FrameInput) { 218 | self.input_queues[queue].add_input(input); 219 | } 220 | 221 | pub fn synchronize_inputs(&mut self) -> GameInput { 222 | let mut output = GameInput:: { 223 | frame: self.frame_count, 224 | ..Default::default() 225 | }; 226 | for idx in 0..self.config.player_count { 227 | if self.is_disconnected(idx) { 228 | output.disconnected |= 1 << idx; 229 | } else { 230 | output.inputs[idx] = self.input_queues[idx] 231 | .get_input(self.frame_count) 232 | .unwrap() 233 | .input; 234 | } 235 | } 236 | output 237 | } 238 | 239 | pub fn check_simulation(&mut self, commands: &mut Commands) { 240 | if let Some(seek_to) = self.check_simulation_consistency() { 241 | self.adjust_simulation(commands, seek_to); 242 | } 243 | } 244 | 245 | pub fn load_frame(&mut self, commands: &mut Commands, frame: Frame) { 246 | // find the frame in question 247 | if frame == self.frame_count { 248 | debug!("Skipping NOP."); 249 | return; 250 | } 251 | 252 | let cell = self.saved_state.reset_to(frame); 253 | self.frame_count = cell.0.lock().frame; 254 | commands.push(Command::Load(LoadState:: { cell })); 255 | 256 | self.saved_state.head += 1; 257 | self.saved_state.head %= self.saved_state.frames.len(); 258 | } 259 | 260 | pub fn save_current_frame(&mut self, commands: &mut Commands) { 261 | let cell = self.saved_state.push(self.frame_count); 262 | commands.push(Command::Save(SaveState:: { 263 | cell, 264 | frame: self.frame_count, 265 | })); 266 | } 267 | 268 | pub fn adjust_simulation(&mut self, commands: &mut Commands, seek_to: Frame) { 269 | let frame_count = self.frame_count; 270 | let count = self.frame_count - seek_to; 271 | 272 | debug!("Catching up"); 273 | self.rolling_back = true; 274 | 275 | // Flush our input queue and load the last frame. 276 | self.load_frame(commands, seek_to); 277 | debug_assert!(self.frame_count == seek_to); 278 | 279 | // Advance frame by frame (stuffing notifications back to 280 | // the master). 281 | self.reset_prediction(self.frame_count); 282 | for _ in 0..count { 283 | self.increment_frame(commands); 284 | } 285 | debug_assert!(self.frame_count == frame_count); 286 | 287 | self.rolling_back = false; 288 | } 289 | 290 | pub fn check_simulation_consistency(&self) -> Option { 291 | self.input_queues 292 | .iter() 293 | .map(|queue| queue.first_incorrect_frame()) 294 | .filter(|frame| !super::is_null(*frame)) 295 | .min() 296 | } 297 | 298 | fn reset_prediction(&mut self, frame: Frame) { 299 | for queue in self.input_queues.iter_mut() { 300 | queue.reset_prediction(frame); 301 | } 302 | } 303 | 304 | fn is_disconnected(&self, player: usize) -> bool { 305 | let status = self.local_connect_status[player].read(); 306 | status.disconnected && status.last_frame < self.frame_count() 307 | } 308 | 309 | fn create_queues(config: &PlayerConfig) -> Vec> { 310 | (0..config.player_count) 311 | .map(|_| InputQueue::new(config.frame_delay)) 312 | .collect() 313 | } 314 | } 315 | -------------------------------------------------------------------------------- /backroll/src/time_sync.rs: -------------------------------------------------------------------------------- 1 | use super::{input::FrameInput, Frame}; 2 | use parking_lot::Mutex; 3 | use serde::{Deserialize, Serialize}; 4 | use std::convert::TryFrom; 5 | use std::ops::{Add, Sub}; 6 | use std::sync::Arc; 7 | use std::time::{Duration, SystemTime, UNIX_EPOCH}; 8 | use tracing::debug; 9 | 10 | const FRAME_WINDOW_SIZE: usize = 40; 11 | const MIN_UNIQUE_FRAMES: usize = 10; 12 | const MIN_FRAME_ADVANTAGE: super::Frame = 3; 13 | const MAX_FRAME_ADVANTAGE: super::Frame = 9; 14 | 15 | struct TimeSyncRef { 16 | local: [Frame; FRAME_WINDOW_SIZE], 17 | remote: [Frame; FRAME_WINDOW_SIZE], 18 | last_inputs: [FrameInput; MIN_UNIQUE_FRAMES], 19 | iteration: u32, 20 | } 21 | 22 | #[derive(Clone)] 23 | pub struct TimeSync(Arc>>); 24 | 25 | impl Default for TimeSync { 26 | fn default() -> Self { 27 | Self(Arc::new(Mutex::new(TimeSyncRef { 28 | local: [0; FRAME_WINDOW_SIZE], 29 | remote: [0; FRAME_WINDOW_SIZE], 30 | last_inputs: Default::default(), 31 | iteration: 0, 32 | }))) 33 | } 34 | } 35 | 36 | impl TimeSync { 37 | pub fn advance_frame(&self, input: FrameInput, advantage: Frame, radvantage: Frame) { 38 | let frame = usize::try_from(input.frame).unwrap(); 39 | let mut sync = self.0.lock(); 40 | // Remember the last frame and frame advantage 41 | sync.last_inputs[frame % MIN_UNIQUE_FRAMES] = input; 42 | sync.local[frame % FRAME_WINDOW_SIZE] = advantage; 43 | sync.remote[frame % FRAME_WINDOW_SIZE] = radvantage; 44 | } 45 | 46 | pub fn recommend_frame_wait_duration(&self, require_idle_input: bool) -> super::Frame { 47 | let mut sync = self.0.lock(); 48 | 49 | // Average our local and remote frame advantages 50 | let sum = sync.local.iter().sum::() as f32; 51 | let advantage = sum / (sync.local.len() as f32); 52 | 53 | let sum = sync.remote.iter().sum::() as f32; 54 | let radvantage = sum / (sync.remote.len() as f32); 55 | 56 | sync.iteration += 1; 57 | 58 | // See if someone should take action. The person furthest ahead 59 | // needs to slow down so the other user can catch up. 60 | // Only do this if both clients agree on who's ahead!! 61 | if advantage >= radvantage { 62 | return 0; 63 | } 64 | 65 | // Both clients agree that we're the one ahead. Split 66 | // the difference between the two to figure out how long to 67 | // sleep for. 68 | let sleep_frames = (((radvantage - advantage) / 2.0) + 0.5) as Frame; 69 | 70 | debug!( 71 | "iteration {}: sleep frames is {}", 72 | sync.iteration, sleep_frames 73 | ); 74 | 75 | // Some things just aren't worth correcting for. Make sure 76 | // the difference is relevant before proceeding. 77 | if sleep_frames < MIN_FRAME_ADVANTAGE { 78 | return 0; 79 | } 80 | 81 | // Make sure our input had been "idle enough" before recommending 82 | // a sleep. This tries to make the emulator sleep while the 83 | // user's input isn't sweeping in arcs (e.g. fireball motions in 84 | // Street Fighter), which could cause the player to miss moves. 85 | if require_idle_input { 86 | for idx in 0..sync.last_inputs.len() { 87 | if sync.last_inputs[idx] != sync.last_inputs[0] { 88 | debug!( 89 | "iteration {}: rejecting due to input stuff at position {}...!!!", 90 | sync.iteration, idx 91 | ); 92 | return 0; 93 | } 94 | } 95 | } 96 | 97 | // Success!!! Recommend the number of frames to sleep and adjust 98 | std::cmp::min(sleep_frames, MAX_FRAME_ADVANTAGE) 99 | } 100 | } 101 | 102 | #[derive(Debug, Default, Clone, Copy, Deserialize, Serialize, PartialEq, Eq, PartialOrd, Ord)] 103 | pub struct UnixMillis(u64); 104 | 105 | impl UnixMillis { 106 | pub fn now() -> Self { 107 | Self( 108 | SystemTime::now() 109 | .duration_since(UNIX_EPOCH) 110 | .unwrap() 111 | .as_millis() as u64, 112 | ) 113 | } 114 | } 115 | 116 | impl Add for UnixMillis { 117 | type Output = UnixMillis; 118 | fn add(self, other: Duration) -> Self::Output { 119 | Self(self.0 + other.as_millis() as u64) 120 | } 121 | } 122 | 123 | impl Sub for UnixMillis { 124 | type Output = UnixMillis; 125 | fn sub(self, other: Duration) -> Self::Output { 126 | Self(self.0 - other.as_millis() as u64) 127 | } 128 | } 129 | 130 | impl Sub for UnixMillis { 131 | type Output = Duration; 132 | fn sub(self, other: Self) -> Self::Output { 133 | Duration::from_millis(self.0 - other.0) 134 | } 135 | } 136 | -------------------------------------------------------------------------------- /backroll_transport/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "backroll_transport" 3 | version = "0.2.0" 4 | edition = "2021" 5 | authors = ["Hourai Teahouse Developers "] 6 | description = "An async transport abstraction layer." 7 | repository = "https://github.com/HouraiTeahouse/backroll-rs" 8 | license = "ISC" 9 | 10 | [dependencies] 11 | async-channel = "1.6" 12 | dashmap = "5.0" 13 | 14 | [dev-dependencies] 15 | static_assertions = "1.1" 16 | -------------------------------------------------------------------------------- /backroll_transport/README.md: -------------------------------------------------------------------------------- 1 | # backroll-transport 2 | 3 | [![crates.io](https://img.shields.io/crates/v/backroll-transport.svg)](https://crates.io/crates/backroll-transport) 4 | [![Documentation](https://docs.rs/backroll-transport/badge.svg)](https://docs.rs/backroll-transport) 5 | ![License](https://img.shields.io/crates/l/backroll-transport) 6 | [![Discord](https://img.shields.io/discord/151219753434742784.svg?label=&logo=discord&logoColor=ffffff&color=7389D8&labelColor=6A7EC2)](https://discord.gg/VuZhs9V) 7 | 8 | A async transport layer abstraction built atop async-channel. 9 | 10 | ## Implementations 11 | 12 | * Raw UDP Sockets - [backroll-transport-udp](https://crates.io/crates/backroll-transport-udp) 13 | * Steamworks P2P - [backroll-transport-steam](https://crates.io/crates/backroll-transport-steam) 14 | -------------------------------------------------------------------------------- /backroll_transport/src/channel.rs: -------------------------------------------------------------------------------- 1 | use async_channel::{TryRecvError, TrySendError}; 2 | 3 | #[derive(Clone)] 4 | pub struct BidirectionalAsyncChannel { 5 | incoming: async_channel::Receiver, 6 | outgoing: async_channel::Sender, 7 | } 8 | 9 | impl BidirectionalAsyncChannel { 10 | /// Creates a pair of connected Peers without limitations on 11 | /// how many messages can be buffered. 12 | pub fn create_unbounded_pair() -> (Self, Self) { 13 | Self::create_pair(async_channel::unbounded(), async_channel::unbounded()) 14 | } 15 | 16 | /// Creates a pair of connected Peers with a limited capacity 17 | /// for many messages can be buffered in either direction. 18 | pub fn create_bounded_pair(capacity: usize) -> (Self, Self) { 19 | Self::create_pair( 20 | async_channel::bounded(capacity), 21 | async_channel::bounded(capacity), 22 | ) 23 | } 24 | 25 | /// Sends a message to the connected peer. 26 | /// 27 | /// If the send buffer is full, this method waits until there is 28 | /// space for a message. 29 | /// 30 | /// If the peer is disconnected, this method returns an error. 31 | #[inline] 32 | pub fn send(&self, message: T) -> async_channel::Send<'_, T> { 33 | self.outgoing.send(message) 34 | } 35 | 36 | /// Receives a message from the connected peer. 37 | /// 38 | /// If there is no pending messages, this method waits until there is a 39 | /// message. 40 | /// 41 | /// If the peer is disconnected, this method receives a message or returns 42 | /// an error if there are no more messages. 43 | #[inline] 44 | pub fn recv(&self) -> async_channel::Recv<'_, T> { 45 | self.incoming.recv() 46 | } 47 | 48 | /// Attempts to send a message to the connected peer. 49 | #[inline] 50 | pub fn try_send(&self, message: T) -> Result<(), TrySendError> { 51 | self.outgoing.try_send(message) 52 | } 53 | 54 | /// Attempts to receive a message from the connected peer. 55 | #[inline] 56 | pub fn try_recv(&self) -> Result { 57 | self.incoming.try_recv() 58 | } 59 | 60 | /// Returns true if the associated peer is still connected. 61 | pub fn is_connected(&self) -> bool { 62 | !self.incoming.is_closed() && !self.outgoing.is_closed() 63 | } 64 | 65 | /// Disconnects the paired Peers from either end. Any future attempts 66 | /// to send messages in either direction will fail, but any messages 67 | /// not yet recieved. 68 | /// 69 | /// If the Peer, or it's constituent channels were cloned, all of the 70 | /// cloned instances will appear disconnected. 71 | pub fn disconnect(&self) { 72 | self.outgoing.close(); 73 | self.incoming.close(); 74 | } 75 | 76 | /// Gets the raw sender for the peer. 77 | pub fn sender(&self) -> async_channel::Sender { 78 | self.outgoing.clone() 79 | } 80 | 81 | /// Gets the raw reciever for the peer. 82 | pub fn reciever(&self) -> async_channel::Receiver { 83 | self.incoming.clone() 84 | } 85 | 86 | /// The number of messages that are currently buffered in the 87 | /// send queue. Returns 0 if the Peer is disconnected. 88 | pub fn pending_send_count(&self) -> usize { 89 | self.outgoing.len() 90 | } 91 | 92 | /// The number of messages that are currently buffered in the 93 | /// recieve queue. Returns 0 if the Peer is disconnected. 94 | pub fn pending_recv_count(&self) -> usize { 95 | self.incoming.len() 96 | } 97 | 98 | fn create_pair( 99 | a: (async_channel::Sender, async_channel::Receiver), 100 | b: (async_channel::Sender, async_channel::Receiver), 101 | ) -> (Self, Self) { 102 | let (a_send, a_recv) = a; 103 | let (b_send, b_recv) = b; 104 | let a = Self { 105 | incoming: a_recv, 106 | outgoing: b_send, 107 | }; 108 | let b = Self { 109 | incoming: b_recv, 110 | outgoing: a_send, 111 | }; 112 | (a, b) 113 | } 114 | } 115 | 116 | #[cfg(test)] 117 | mod test { 118 | use super::*; 119 | 120 | static_assertions::assert_impl_all!(BidirectionalAsyncChannel: Clone); 121 | 122 | #[test] 123 | pub fn send_works_both_ways() { 124 | let (a, b) = BidirectionalAsyncChannel::::create_unbounded_pair(); 125 | 126 | assert!(a.try_send(1).is_ok()); 127 | assert!(b.try_send(4).is_ok()); 128 | assert!(a.try_send(2).is_ok()); 129 | assert!(b.try_send(5).is_ok()); 130 | assert!(a.try_send(3).is_ok()); 131 | assert!(b.try_send(6).is_ok()); 132 | 133 | assert_eq!(a.pending_send_count(), 3); 134 | assert_eq!(b.pending_send_count(), 3); 135 | assert_eq!(a.pending_recv_count(), 3); 136 | assert_eq!(b.pending_recv_count(), 3); 137 | 138 | assert_eq!(a.try_recv(), Ok(4)); 139 | assert_eq!(a.try_recv(), Ok(5)); 140 | assert_eq!(a.try_recv(), Ok(6)); 141 | 142 | assert_eq!(b.try_recv(), Ok(1)); 143 | assert_eq!(b.try_recv(), Ok(2)); 144 | assert_eq!(b.try_recv(), Ok(3)); 145 | } 146 | 147 | #[test] 148 | pub fn bounded_pairs_error_on_being_full() { 149 | let (a, b) = BidirectionalAsyncChannel::::create_bounded_pair(2); 150 | 151 | assert!(a.try_send(1).is_ok()); 152 | assert!(a.try_send(2).is_ok()); 153 | assert!(matches!(a.try_send(3), Err(TrySendError::Full(3)))); 154 | assert!(b.try_send(4).is_ok()); 155 | assert!(b.try_send(5).is_ok()); 156 | assert!(matches!(b.try_send(6), Err(TrySendError::Full(6)))); 157 | 158 | assert_eq!(a.try_recv(), Ok(4)); 159 | assert_eq!(a.try_recv(), Ok(5)); 160 | assert_eq!(a.try_recv(), Err(TryRecvError::Empty)); 161 | 162 | assert_eq!(b.try_recv(), Ok(1)); 163 | assert_eq!(b.try_recv(), Ok(2)); 164 | assert_eq!(a.try_recv(), Err(TryRecvError::Empty)); 165 | } 166 | 167 | #[test] 168 | pub fn disconnecting_closes_both_sides() { 169 | let (a, b) = BidirectionalAsyncChannel::::create_bounded_pair(2); 170 | 171 | a.disconnect(); 172 | assert!(!a.is_connected()); 173 | assert!(!b.is_connected()); 174 | 175 | let (a, b) = BidirectionalAsyncChannel::::create_bounded_pair(2); 176 | 177 | b.disconnect(); 178 | assert!(!a.is_connected()); 179 | assert!(!b.is_connected()); 180 | } 181 | 182 | #[test] 183 | pub fn disconnecting_stop_any_future_sends() { 184 | let (a, b) = BidirectionalAsyncChannel::::create_bounded_pair(2); 185 | 186 | a.disconnect(); 187 | assert!(!a.is_connected()); 188 | assert!(!b.is_connected()); 189 | 190 | assert!(matches!(a.try_send(1), Err(TrySendError::Closed(1)))); 191 | assert!(matches!(b.try_send(1), Err(TrySendError::Closed(1)))); 192 | assert!(matches!(a.try_recv(), Err(TryRecvError::Closed))); 193 | assert!(matches!(b.try_recv(), Err(TryRecvError::Closed))); 194 | } 195 | 196 | #[test] 197 | pub fn disconnecting_allows_existing_items_to_be_flushed() { 198 | let (a, b) = BidirectionalAsyncChannel::::create_unbounded_pair(); 199 | 200 | assert!(a.try_send(1).is_ok()); 201 | assert!(a.try_send(2).is_ok()); 202 | a.disconnect(); 203 | assert!(matches!(a.try_send(3), Err(TrySendError::Closed(3)))); 204 | 205 | assert_eq!(b.try_recv(), Ok(1)); 206 | assert_eq!(b.try_recv(), Ok(2)); 207 | assert_eq!(b.try_recv(), Err(TryRecvError::Closed)); 208 | } 209 | 210 | #[test] 211 | pub fn dropping_leads_to_disconnect() { 212 | let (a, b) = BidirectionalAsyncChannel::::create_unbounded_pair(); 213 | 214 | assert!(a.is_connected()); 215 | drop(b); 216 | assert!(!a.is_connected()); 217 | 218 | let (a, b) = BidirectionalAsyncChannel::::create_unbounded_pair(); 219 | let c = b.clone(); 220 | 221 | assert!(a.is_connected()); 222 | drop(b); 223 | assert!(a.is_connected()); 224 | drop(c); 225 | assert!(!a.is_connected()); 226 | } 227 | } 228 | -------------------------------------------------------------------------------- /backroll_transport/src/lib.rs: -------------------------------------------------------------------------------- 1 | mod channel; 2 | mod peer; 3 | mod peers; 4 | 5 | pub use channel::*; 6 | pub use peer::*; 7 | pub use peers::*; 8 | -------------------------------------------------------------------------------- /backroll_transport/src/peer.rs: -------------------------------------------------------------------------------- 1 | use super::channel::BidirectionalAsyncChannel; 2 | use std::fmt; 3 | use std::ops::Deref; 4 | 5 | /// A bidirectional channel for binary messages. 6 | #[derive(Clone)] 7 | pub struct Peer(BidirectionalAsyncChannel>); 8 | 9 | impl Peer { 10 | /// Creates a pair of connected Peers without limitations on 11 | /// how many messages can be buffered. 12 | pub fn create_unbounded_pair() -> (Self, Self) { 13 | let (a, b) = BidirectionalAsyncChannel::create_unbounded_pair(); 14 | (Self(a), Self(b)) 15 | } 16 | 17 | /// Creates a pair of connected Peers with a limited capacity 18 | /// for many messages can be buffered in either direction. 19 | pub fn create_bounded_pair(capacity: usize) -> (Self, Self) { 20 | let (a, b) = BidirectionalAsyncChannel::create_bounded_pair(capacity); 21 | (Self(a), Self(b)) 22 | } 23 | } 24 | 25 | impl fmt::Debug for Peer { 26 | fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { 27 | write!(f, "Peer {{ connected: {} }}", self.is_connected()) 28 | } 29 | } 30 | 31 | impl Deref for Peer { 32 | type Target = BidirectionalAsyncChannel>; 33 | fn deref(&self) -> &Self::Target { 34 | &self.0 35 | } 36 | } 37 | 38 | #[cfg(test)] 39 | mod test { 40 | use super::*; 41 | static_assertions::assert_impl_all!(Peer: Deref, Clone, Send, Sync); 42 | } 43 | -------------------------------------------------------------------------------- /backroll_transport/src/peers.rs: -------------------------------------------------------------------------------- 1 | use crate::Peer; 2 | use core::hash::Hash; 3 | use dashmap::DashMap; 4 | 5 | /// A keyed mapping of [Peer]s with ownership semantics. 6 | /// 7 | /// Dropping will disconnect all owned peers. 8 | /// 9 | /// [Peer]: crate::Peer 10 | #[derive(Debug)] 11 | pub struct Peers(DashMap) 12 | where 13 | T: Eq + Hash; 14 | 15 | impl Peers { 16 | /// Gets a [Peer] by it's ID, if available. 17 | /// 18 | /// [Peer]: crate::Peer 19 | pub fn get(&self, id: &T) -> Option { 20 | self.0.get(id).and_then(|kv| { 21 | let peer = kv.value().clone(); 22 | if peer.is_connected() { 23 | Some(peer) 24 | } else { 25 | None 26 | } 27 | }) 28 | } 29 | 30 | /// Gets the number of active connections managed by it. 31 | #[allow(clippy::len_without_is_empty)] 32 | pub fn len(&self) -> usize { 33 | self.0.iter().filter(|kv| kv.value().is_connected()).count() 34 | } 35 | 36 | /// Checks if the store has a connection to the given ID. 37 | pub fn contains(&self, id: &T) -> bool { 38 | self.0 39 | .get(id) 40 | .map(|kv| kv.value().is_connected()) 41 | .unwrap_or(false) 42 | } 43 | 44 | /// Creates a new unbounded peer pair and stores one end, mapping it to 45 | /// the provided ID, returning the other end. 46 | /// 47 | /// If a peer was previous stored at the given ID, it will be replaced and 48 | /// disconnected. 49 | #[must_use] 50 | pub fn create_unbounded(&self, id: T) -> Peer { 51 | let (a, b) = Peer::create_unbounded_pair(); 52 | if let Some(prior) = self.0.insert(id, a) { 53 | prior.disconnect(); 54 | } 55 | b 56 | } 57 | 58 | /// Creates an bounded peer pair and stores one end, mapping it to the 59 | /// provided ID, returning the other end. 60 | /// 61 | /// If a peer was previous stored at the given ID, it will be dropped and 62 | /// replaced. 63 | #[must_use] 64 | pub fn create_bounded(&self, id: T, capacity: usize) -> Peer { 65 | let (a, b) = Peer::create_bounded_pair(capacity); 66 | self.0.insert(id, a); 67 | b 68 | } 69 | 70 | /// Disconnects and removes a connection by it's ID 71 | /// 72 | /// A no-op if there no Peer with the given ID. 73 | pub fn disconnect(&self, id: &T) { 74 | if let Some((_, peer)) = self.0.remove(id) { 75 | peer.disconnect(); 76 | } 77 | } 78 | 79 | /// Removes all peers that are disconnected. 80 | pub fn flush_disconnected(&self) { 81 | self.0.retain(|_, peer| peer.is_connected()) 82 | } 83 | } 84 | 85 | impl Default for Peers { 86 | fn default() -> Self { 87 | Self(DashMap::::new()) 88 | } 89 | } 90 | 91 | impl Drop for Peers { 92 | fn drop(&mut self) { 93 | for kv in self.0.iter() { 94 | kv.value().disconnect(); 95 | } 96 | } 97 | } 98 | 99 | #[cfg(test)] 100 | mod test { 101 | use super::*; 102 | 103 | static_assertions::assert_impl_all!(Peers: Default, Drop, Send, Sync); 104 | 105 | #[test] 106 | pub fn test_contains_works() { 107 | const ID: i32 = 420; 108 | let peers = Peers::::default(); 109 | let _peer = peers.create_unbounded(ID); 110 | assert!(peers.contains(&ID)); 111 | assert!(peers.get(&ID).is_some()); 112 | } 113 | 114 | #[test] 115 | pub fn disconnecting_removes_peer() { 116 | const ID: i32 = 420; 117 | let peers = Peers::::default(); 118 | let peer = peers.create_unbounded(ID); 119 | assert!(peers.contains(&ID)); 120 | assert!(peers.get(&ID).is_some()); 121 | peer.disconnect(); 122 | assert!(!peers.contains(&ID)); 123 | assert!(peers.get(&ID).is_none()); 124 | } 125 | 126 | #[test] 127 | pub fn disconnecting_via_drop_removes_peer() { 128 | const ID: i32 = 420; 129 | let peers = Peers::::default(); 130 | let peer = peers.create_unbounded(ID); 131 | assert!(peers.contains(&ID)); 132 | assert!(peers.get(&ID).is_some()); 133 | drop(peer); 134 | assert!(!peers.contains(&ID)); 135 | assert!(peers.get(&ID).is_none()); 136 | } 137 | 138 | #[test] 139 | pub fn disconnecting_local_disconnects_remote() { 140 | const ID: i32 = 420; 141 | let peers = Peers::::default(); 142 | let peer_remote = peers.create_unbounded(ID); 143 | peers.disconnect(&ID); 144 | assert!(!peer_remote.is_connected()); 145 | } 146 | 147 | #[test] 148 | pub fn dropping_disconnects_all_remotes() { 149 | let peers = Peers::::default(); 150 | let a = peers.create_unbounded(1); 151 | let b = peers.create_unbounded(2); 152 | let c = peers.create_unbounded(3); 153 | 154 | assert!(a.is_connected()); 155 | assert!(b.is_connected()); 156 | assert!(c.is_connected()); 157 | drop(peers); 158 | assert!(!a.is_connected()); 159 | assert!(!b.is_connected()); 160 | assert!(!c.is_connected()); 161 | } 162 | } 163 | -------------------------------------------------------------------------------- /backroll_transport_steam/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "backroll_transport_steam" 3 | version = "0.4.0" 4 | edition = "2021" 5 | authors = ["Hourai Teahouse Developers "] 6 | description = "A Steamworks implementation for backroll-transport." 7 | repository = "https://github.com/HouraiTeahouse/backroll-rs" 8 | license = "ISC" 9 | 10 | [dependencies] 11 | backroll_transport = { path = "../backroll_transport", version = "0.2" } 12 | async-channel = "1.6" 13 | bevy_tasks = "0.9" 14 | tracing = "0.1" 15 | steamworks = "0.9.0" -------------------------------------------------------------------------------- /backroll_transport_steam/README.md: -------------------------------------------------------------------------------- 1 | # backroll-transport-steam 2 | 3 | [![crates.io](https://img.shields.io/crates/v/backroll-transport-steam.svg)](https://crates.io/crates/backroll-transport-steam) 4 | [![Documentation](https://docs.rs/backroll-transport-steam/badge.svg)](https://docs.rs/backroll-transport-steam) 5 | ![License](https://img.shields.io/crates/l/backroll-transport-steam) 6 | [![Discord](https://img.shields.io/discord/151219753434742784.svg?label=&logo=discord&logoColor=ffffff&color=7389D8&labelColor=6A7EC2)](https://discord.gg/VuZhs9V) 7 | 8 | A Steamworks SDK based implementation for [backroll-transport](https://crates.io/crates/backroll-transport). 9 | -------------------------------------------------------------------------------- /backroll_transport_steam/src/lib.rs: -------------------------------------------------------------------------------- 1 | use async_channel::TrySendError; 2 | use backroll_transport::{Peer, Peers}; 3 | use bevy_tasks::IoTaskPool; 4 | use std::sync::{Arc, Weak}; 5 | use std::time::{Duration, Instant}; 6 | use steamworks::{CallbackHandle, Client, ClientManager, P2PSessionRequest, SendType, SteamId}; 7 | use tracing::{debug, error, warn}; 8 | 9 | /// The maximum size of unreliable packet that can be sent or recieved, 10 | /// in bytes. 11 | pub const UNRELIABLE_MTU: usize = 1200; 12 | 13 | // High cleanup interval since the P2P socket layer may take a while to 14 | // initialize and connect. 15 | const CLEANUP_INTERVAL: Duration = Duration::from_secs(20); 16 | 17 | pub struct SteamConnectionConfig { 18 | pub remote: SteamId, 19 | pub max_queue_size: Option, 20 | } 21 | 22 | impl SteamConnectionConfig { 23 | /// Shorthand for creating unbounded connections. Unbounded connections 24 | /// will never drop a recieved packet. However, because it will not drop 25 | /// packets, malicious actors can flood the connection. 26 | pub fn unbounded(remote: SteamId) -> Self { 27 | Self { 28 | remote, 29 | max_queue_size: None, 30 | } 31 | } 32 | 33 | /// Shorthand for creating bounded connections. Bounded connections 34 | /// will drop a recieved packet if the recieve queue is full. 35 | pub fn bounded(remote: SteamId, limit: usize) -> Self { 36 | Self { 37 | remote, 38 | max_queue_size: Some(limit), 39 | } 40 | } 41 | } 42 | 43 | pub struct SteamP2PManager { 44 | peers: Arc>, 45 | client: Client, 46 | _session_request: CallbackHandle, 47 | } 48 | 49 | impl SteamP2PManager { 50 | /// Starts a new thread to listen for P2P messages from Steam. 51 | pub fn bind(client: Client) -> Self { 52 | let peers = Arc::new(Peers::default()); 53 | 54 | let peer_p2p = Arc::downgrade(&peers); 55 | let client_p2p = client.clone(); 56 | 57 | let manager = Self { 58 | peers: peers.clone(), 59 | client: client.clone(), 60 | 61 | // Register a P2P session request handler. 62 | _session_request: client.register_callback(move |request: P2PSessionRequest| { 63 | if let Some(peers) = peer_p2p.upgrade() { 64 | if peers.contains(&request.remote) { 65 | client_p2p.networking().accept_p2p_session(request.remote); 66 | debug!( 67 | "Accepted P2P session request from remote: {:?}", 68 | request.remote 69 | ); 70 | } else { 71 | client_p2p.networking().close_p2p_session(request.remote); 72 | warn!( 73 | "Recieved P2P session request from uknown remote: {:?}. Dropping.", 74 | request.remote 75 | ); 76 | } 77 | } 78 | }), 79 | }; 80 | 81 | let peers = Arc::downgrade(&peers); 82 | std::thread::spawn(move || Self::recv(peers, client)); 83 | 84 | manager 85 | } 86 | 87 | /// Creates a [Peer] bound to a specific target [SteamId]. 88 | /// 89 | /// Note this does not block or send any I/O. It simply creates the 90 | /// tasks for reading and sending. 91 | /// 92 | /// [Peer]: backroll_transport::Peer 93 | /// [SteamId]: steamworks::SteamId 94 | pub fn connect(&self, config: SteamConnectionConfig) -> Peer { 95 | let peer = if let Some(limit) = config.max_queue_size { 96 | self.peers.create_bounded(config.remote, limit) 97 | } else { 98 | self.peers.create_unbounded(config.remote) 99 | }; 100 | let other = self.peers.get(&config.remote).unwrap(); 101 | let client = self.client.clone(); 102 | let task = Self::send(other, config.remote, client); 103 | IoTaskPool::get().spawn(task).detach(); 104 | peer 105 | } 106 | 107 | /// Disconnects the connection to a given [SteamId] if available. 108 | /// 109 | /// [SteamId]: steamworks::SteamId 110 | pub fn disconnect(&self, remote: SteamId) { 111 | self.peers.disconnect(&remote); 112 | } 113 | 114 | async fn send(peer: Peer, remote: SteamId, client: Client) { 115 | while let Ok(message) = peer.recv().await { 116 | if message.len() > UNRELIABLE_MTU { 117 | error!( 118 | "Failed to send unreliable message to {:?}: Too big, size ({}) exceeds MTU of {}", 119 | remote, message.len(), UNRELIABLE_MTU, 120 | ); 121 | continue; 122 | } 123 | if !client 124 | .networking() 125 | .send_p2p_packet(remote, SendType::Unreliable, message.as_ref()) 126 | { 127 | error!("Error while sending message to {:?}", remote); 128 | } 129 | } 130 | client.networking().close_p2p_session(remote); 131 | } 132 | 133 | fn recv(peers: Weak>, client: Client) { 134 | let mut read_buf = vec![0u8; UNRELIABLE_MTU]; 135 | let last_flush = Instant::now(); 136 | while let Some(peers) = peers.upgrade() { 137 | if let Some(size) = client.networking().is_p2p_packet_available() { 138 | if size >= read_buf.len() { 139 | read_buf.resize(size, 0u8); 140 | } 141 | 142 | let (remote, len) = client 143 | .networking() 144 | .read_p2p_packet(read_buf.as_mut()) 145 | .unwrap(); 146 | if let Some(peer) = peers.get(&remote) { 147 | Self::forward_packet(remote, peer, &read_buf[0..len]); 148 | } 149 | } 150 | 151 | // Periodically cleanup the peers. 152 | if Instant::now() - last_flush > CLEANUP_INTERVAL { 153 | peers.flush_disconnected(); 154 | } 155 | } 156 | } 157 | 158 | fn forward_packet(remote: SteamId, peer: Peer, data: &[u8]) { 159 | match peer.try_send(data.into()) { 160 | Ok(()) => {} 161 | Err(TrySendError::Full(_)) => { 162 | debug!( 163 | "Dropped packet due to the packet queue for {:?} being full", 164 | remote 165 | ); 166 | } 167 | Err(TrySendError::Closed(_)) => { 168 | debug!("Dropped packet for disconnected packet queue: {:?}", remote); 169 | } 170 | } 171 | } 172 | } 173 | -------------------------------------------------------------------------------- /backroll_transport_udp/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "backroll_transport_udp" 3 | version = "0.5.0" 4 | edition = "2021" 5 | authors = ["Hourai Teahouse Developers "] 6 | description = "A raw async UDP implementation for backroll-transport." 7 | repository = "https://github.com/HouraiTeahouse/backroll-rs" 8 | license = "ISC" 9 | 10 | [dependencies] 11 | backroll_transport = { path = "../backroll_transport", version = "0.2" } 12 | async-channel = "1.6" 13 | async-net = "1.6.0" 14 | bevy_tasks = "0.9" 15 | tracing = "0.1" 16 | 17 | [dev-dependencies] 18 | futures = "0.3" 19 | serial_test = "0.9" -------------------------------------------------------------------------------- /backroll_transport_udp/README.md: -------------------------------------------------------------------------------- 1 | # backroll-transport-udp 2 | 3 | [![crates.io](https://img.shields.io/crates/v/backroll-transport-udp.svg)](https://crates.io/crates/backroll-transport-udp) 4 | [![Documentation](https://docs.rs/backroll-transport-udp/badge.svg)](https://docs.rs/backroll-transport-udp) 5 | ![License](https://img.shields.io/crates/l/backroll-transport-udp) 6 | [![Discord](https://img.shields.io/discord/151219753434742784.svg?label=&logo=discord&logoColor=ffffff&color=7389D8&labelColor=6A7EC2)](https://discord.gg/VuZhs9V) 7 | 8 | A raw async UDP implementation for [backroll-transport](https://crates.io/crates/backroll-transport). 9 | -------------------------------------------------------------------------------- /backroll_transport_udp/src/lib.rs: -------------------------------------------------------------------------------- 1 | use async_channel::TrySendError; 2 | use async_net::{SocketAddr, UdpSocket}; 3 | use backroll_transport::{Peer, Peers}; 4 | use bevy_tasks::IoTaskPool; 5 | use std::convert::TryFrom; 6 | use std::net::{ToSocketAddrs, UdpSocket as BlockingUdpSocket}; 7 | use std::sync::{Arc, Weak}; 8 | use std::time::{Duration, Instant}; 9 | use tracing::{debug, error}; 10 | 11 | const CLEANUP_INTERVAL: Duration = Duration::from_millis(1000); 12 | 13 | /// The maximum size of packet that can be sent or recieved, in bytes. 14 | /// 15 | /// This is based on the ethernet standard MTU (1500 bytes), the size of the 16 | /// IPv4/6 header (20/40 bytes), and the UDP header (8 bytes). 17 | pub const MAX_TRANSMISSION_UNIT: usize = 1452; 18 | 19 | pub struct UdpConnectionConfig { 20 | pub addr: SocketAddr, 21 | pub max_queue_size: Option, 22 | } 23 | 24 | impl UdpConnectionConfig { 25 | /// Shorthand for creating unbounded connections. Unbounded connections 26 | /// will never drop a recieved packet. However, because it will not drop 27 | /// packets, malicious actors can flood the connection. 28 | pub fn unbounded(addr: SocketAddr) -> UdpConnectionConfig { 29 | Self { 30 | addr, 31 | max_queue_size: None, 32 | } 33 | } 34 | 35 | /// Shorthand for creating bounded connections. Bounded connections 36 | /// will drop a recieved packet if the recieve queue is full. 37 | pub fn bounded(addr: SocketAddr, limit: usize) -> UdpConnectionConfig { 38 | Self { 39 | addr, 40 | max_queue_size: Some(limit), 41 | } 42 | } 43 | } 44 | 45 | pub struct UdpManager { 46 | peers: Arc>, 47 | socket: UdpSocket, 48 | } 49 | 50 | impl UdpManager { 51 | /// Binds a [UdpSocket] and starts listening on it. 52 | /// 53 | /// # Errors 54 | /// Returns a [std::io::Error] if it fails to bind to the provided socket addresses 55 | /// or start an async poll on the socket. 56 | /// 57 | /// [UdpSocket]: async_net::UdpSocket 58 | pub fn bind(addrs: impl ToSocketAddrs) -> std::io::Result { 59 | let blocking = BlockingUdpSocket::bind(addrs)?; 60 | let socket = UdpSocket::try_from(blocking)?; 61 | let peers = Arc::new(Peers::default()); 62 | let manager = Self { 63 | peers: peers.clone(), 64 | socket: socket.clone(), 65 | }; 66 | 67 | IoTaskPool::get() 68 | .spawn(Self::recv(Arc::downgrade(&peers), socket)) 69 | .detach(); 70 | 71 | Ok(manager) 72 | } 73 | 74 | /// Creates a [Peer] bound to a specific target [SocketAddr]. 75 | /// 76 | /// Note this does not block or send any I/O. It simply creates the 77 | /// tasks for reading and sending. 78 | /// 79 | /// [Peer]: backroll_transport::Peer 80 | /// [SocketAddr]: std::net::SocketAddr 81 | pub fn connect(&self, config: UdpConnectionConfig) -> Peer { 82 | let peer = if let Some(limit) = config.max_queue_size { 83 | self.peers.create_bounded(config.addr, limit) 84 | } else { 85 | self.peers.create_unbounded(config.addr) 86 | }; 87 | let other = self.peers.get(&config.addr).unwrap(); 88 | let socket = self.socket.clone(); 89 | let task = Self::send(other, config.addr, socket); 90 | 91 | IoTaskPool::get().spawn(task).detach(); 92 | peer 93 | } 94 | 95 | /// Disconnects the connection to a given [SocketAddr] if available. 96 | /// 97 | /// [SocketAddr]: std::net::SocketAddr 98 | pub fn disconnect(&self, addr: SocketAddr) { 99 | self.peers.disconnect(&addr); 100 | } 101 | 102 | async fn send(peer: Peer, target_addr: SocketAddr, socket: UdpSocket) { 103 | while let Ok(message) = peer.recv().await { 104 | if let Err(err) = socket.send_to(message.as_ref(), target_addr).await { 105 | error!( 106 | "Error while sending message to {:?}: {:?}", 107 | target_addr, err 108 | ); 109 | } 110 | } 111 | } 112 | 113 | async fn recv(peers: Weak>, socket: UdpSocket) { 114 | let mut read_buf = [0u8; MAX_TRANSMISSION_UNIT]; 115 | let last_flush = Instant::now(); 116 | while let Some(peers) = peers.upgrade() { 117 | match socket.recv_from(&mut read_buf).await { 118 | Ok((len, addr)) => { 119 | debug_assert!(len < MAX_TRANSMISSION_UNIT); 120 | if let Some(peer) = peers.get(&addr) { 121 | Self::forward_packet(addr, peer, &read_buf[0..len]); 122 | } 123 | } 124 | Err(err) => { 125 | error!("Error while receiving UDP packets: {:?}", err); 126 | } 127 | } 128 | 129 | // Periodically cleanup the peers. 130 | if Instant::now() - last_flush > CLEANUP_INTERVAL { 131 | peers.flush_disconnected(); 132 | } 133 | } 134 | } 135 | 136 | fn forward_packet(addr: SocketAddr, peer: Peer, data: &[u8]) { 137 | match peer.try_send(data.into()) { 138 | Ok(()) => {} 139 | Err(TrySendError::Full(_)) => { 140 | debug!( 141 | "Dropped packet due to the packet queue for {} being full", 142 | addr 143 | ); 144 | } 145 | Err(TrySendError::Closed(_)) => { 146 | debug!("Dropped packet for disconnected packet queue: {} ", addr); 147 | } 148 | } 149 | } 150 | } 151 | 152 | #[cfg(test)] 153 | mod test { 154 | use super::*; 155 | use bevy_tasks::{IoTaskPool, TaskPool}; 156 | 157 | #[test] 158 | #[serial_test::serial] 159 | pub fn test_basic_connect() { 160 | IoTaskPool::init(TaskPool::default); 161 | 162 | const ADDR_A: &str = "127.0.0.1:10000"; 163 | const ADDR_B: &str = "127.0.0.1:10001"; 164 | 165 | let socket_a = UdpManager::bind(ADDR_A).unwrap(); 166 | let socket_b = UdpManager::bind(ADDR_B).unwrap(); 167 | 168 | let peer_a = socket_b.connect(UdpConnectionConfig::unbounded(ADDR_A.parse().unwrap())); 169 | let peer_b = socket_a.connect(UdpConnectionConfig::unbounded(ADDR_B.parse().unwrap())); 170 | 171 | let msg_a: Box<[u8]> = b"Hello A!"[0..].into(); 172 | let msg_b: Box<[u8]> = b"Hello B!"[0..].into(); 173 | 174 | peer_a.try_send(msg_b.clone()).unwrap(); 175 | peer_b.try_send(msg_a.clone()).unwrap(); 176 | 177 | let recv_msg_a = futures::executor::block_on(peer_a.recv()).unwrap(); 178 | let recv_msg_b = futures::executor::block_on(peer_b.recv()).unwrap(); 179 | 180 | assert_eq!(msg_a, recv_msg_a); 181 | assert_eq!(msg_b, recv_msg_b); 182 | } 183 | 184 | #[test] 185 | #[serial_test::serial] 186 | pub fn test_multiple_send() { 187 | IoTaskPool::init(TaskPool::default); 188 | 189 | const ADDR_A: &str = "127.0.0.1:10002"; 190 | const ADDR_B: &str = "127.0.0.1:10003"; 191 | 192 | let socket_a = UdpManager::bind(ADDR_A).unwrap(); 193 | let socket_b = UdpManager::bind(ADDR_B).unwrap(); 194 | 195 | let peer_a = socket_b.connect(UdpConnectionConfig::unbounded(ADDR_A.parse().unwrap())); 196 | let peer_b = socket_a.connect(UdpConnectionConfig::unbounded(ADDR_B.parse().unwrap())); 197 | 198 | peer_a.try_send(b"100"[0..].into()).unwrap(); 199 | peer_a.try_send(b"101"[0..].into()).unwrap(); 200 | peer_a.try_send(b"102"[0..].into()).unwrap(); 201 | peer_a.try_send(b"103"[0..].into()).unwrap(); 202 | peer_a.try_send(b"104"[0..].into()).unwrap(); 203 | peer_a.try_send(b"105"[0..].into()).unwrap(); 204 | 205 | assert_eq!( 206 | futures::executor::block_on(peer_b.recv()), 207 | Ok(b"100"[0..].into()) 208 | ); 209 | assert_eq!( 210 | futures::executor::block_on(peer_b.recv()), 211 | Ok(b"101"[0..].into()) 212 | ); 213 | assert_eq!( 214 | futures::executor::block_on(peer_b.recv()), 215 | Ok(b"102"[0..].into()) 216 | ); 217 | assert_eq!( 218 | futures::executor::block_on(peer_b.recv()), 219 | Ok(b"103"[0..].into()) 220 | ); 221 | assert_eq!( 222 | futures::executor::block_on(peer_b.recv()), 223 | Ok(b"104"[0..].into()) 224 | ); 225 | assert_eq!( 226 | futures::executor::block_on(peer_b.recv()), 227 | Ok(b"105"[0..].into()) 228 | ); 229 | } 230 | } 231 | -------------------------------------------------------------------------------- /bevy_backroll/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "bevy_backroll" 3 | version = "0.6.0" 4 | edition = "2021" 5 | authors = ["Hourai Teahouse Developers "] 6 | description = "A Bevy engine integration plugin for the backroll rollback networking library." 7 | repository = "https://github.com/HouraiTeahouse/backroll-rs" 8 | license = "ISC" 9 | 10 | [package.metadata.docs.rs] 11 | features = [ "docs-only" ] 12 | 13 | [features] 14 | default = [] 15 | steam = ["bevy-steamworks", "backroll_transport_steam"] 16 | 17 | [dependencies] 18 | backroll = { path = "../backroll", version = "0.5" } 19 | bytemuck = "1.5" 20 | bevy_ecs = "0.9" 21 | bevy_core = "0.9" 22 | bevy_tasks = "0.9" 23 | bevy_app = "0.9" 24 | bevy_log = "0.9" 25 | dashmap = "5.0" 26 | roaring = "0.10" 27 | 28 | # Optional dependencies 29 | bevy-steamworks = { version = "0.6", optional = true } 30 | backroll_transport_steam = { path = "../backroll_transport_steam", version = "0.4", optional = true } 31 | 32 | [dev-dependencies] 33 | bevy = "0.9" -------------------------------------------------------------------------------- /bevy_backroll/README.md: -------------------------------------------------------------------------------- 1 | # bevy-backroll 2 | 3 | [![crates.io](https://img.shields.io/crates/v/bevy-backroll.svg)](https://crates.io/crates/bevy-backroll) 4 | [![Documentation](https://docs.rs/bevy-backroll/badge.svg)](https://docs.rs/bevy-backroll) 5 | ![License](https://img.shields.io/crates/l/bevy-backroll) 6 | [![Discord](https://img.shields.io/discord/151219753434742784.svg?label=&logo=discord&logoColor=ffffff&color=7389D8&labelColor=6A7EC2)](https://discord.gg/VuZhs9V) 7 | [![Bevy tracking](https://img.shields.io/badge/Bevy%20tracking-released%20version-lightblue)](https://github.com/bevyengine/bevy/blob/main/docs/plugins_guidelines.md#main-branch-tracking) 8 | 9 | A [Bevy](https://bevyengine.com) engine integration plugin for [backroll](https://crates.io/crates/backroll) 10 | rollback networking library. 11 | 12 | ## Bevy Version Supported 13 | 14 | |Bevy Version|bevy\_backroll| 15 | |:-----------|:-------------| 16 | |0.8 |0.5 | 17 | |0.7 |0.4 | 18 | |0.6 |0.2, 0.3 | 19 | |0.5 |0.1 | 20 | 21 | ## Feature Flags 22 | 23 | - `steam` - Enables Steamworks SDK support, adds the `SteamP2PManager` from 24 | [backroll-transport-steam](https://crates.io/crates/bevy-backroll) as a 25 | resource to the app. -------------------------------------------------------------------------------- /bevy_backroll/src/id.rs: -------------------------------------------------------------------------------- 1 | use bevy_ecs::{component::Component, system::Resource}; 2 | 3 | /// A marker [`Component`]. Required to mark entities with network state. 4 | /// 5 | /// Registered network components will only be saved or loaded with this 6 | /// marker component present. 7 | #[derive(Debug, Component, Copy, Clone, Eq, Hash, PartialEq)] 8 | #[repr(transparent)] 9 | pub struct NetworkId(pub(crate) u32); 10 | 11 | /// A provider resource of new, globally unique [`NetworkId`] components. 12 | /// 13 | /// This resource itself is registered as a saveable resource and is guarenteed 14 | /// to deterministically produce IDs across rollbacks. 15 | /// 16 | /// This resource is reset upon starting a new session via 17 | /// [`BackrollCommands::start_backroll_session`][start_backroll_session]. 18 | /// 19 | /// [start_backroll_session]: crate::BackrollCommands::start_backroll_session 20 | #[derive(Resource, Debug, Clone)] 21 | #[repr(transparent)] 22 | pub struct NetworkIdProvider(u32); 23 | 24 | impl NetworkIdProvider { 25 | pub(crate) fn new() -> Self { 26 | Self(0) 27 | } 28 | 29 | /// Creates a new, unique [`NetworkId`]. 30 | pub fn new_id(&mut self) -> NetworkId { 31 | let id = NetworkId(self.0); 32 | self.0 = self 33 | .0 34 | .checked_add(1) 35 | .expect("NetworkId has overflowed u32::MAX."); 36 | id 37 | } 38 | } 39 | -------------------------------------------------------------------------------- /bevy_backroll/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![warn(missing_docs)] 2 | 3 | //! A [Bevy](https://bevyengine.org) plugin that adds support for running 4 | //! [Backroll](https://crates.io/crates/backroll) sessions. 5 | //! 6 | //! Installing the plugin: 7 | //! ```rust no_run 8 | //! use backroll::*; 9 | //! use bytemuck::*; 10 | //! use bevy::prelude::*; 11 | //! use bevy_backroll::*; 12 | //! 13 | //! // Create your Backroll input type 14 | //! #[repr(C)] 15 | //! #[derive(Clone, Copy, Eq, PartialEq, Pod, Zeroable)] 16 | //! pub struct PlayerInput { 17 | //! // Input data... 18 | //! pub buttons_pressed: u8, 19 | //! } 20 | //! 21 | //! // Create your state. Must implement Clone. 22 | //! #[derive(Component, Clone)] 23 | //! pub struct PlayerState { 24 | //! pub handle: PlayerHandle, 25 | //! pub current_value: u64, 26 | //! } 27 | //! 28 | //! // Sample input from the local player's controller. 29 | //! fn sample_player_input(player: In) -> PlayerInput { 30 | //! // Sample input data... 31 | //! PlayerInput { 32 | //! buttons_pressed: 1, 33 | //! } 34 | //! } 35 | //! 36 | //! // Use input to advance the game simulation. 37 | //! fn simulate_game( 38 | //! input: Res>, 39 | //! mut query: Query<&mut PlayerState> 40 | //! ) { 41 | //! for mut player in query.iter_mut() { 42 | //! if let Ok(input) = input.get(player.handle) { 43 | //! player.current_value += input.buttons_pressed as u64; 44 | //! } 45 | //! } 46 | //! } 47 | //! 48 | //! fn main() { 49 | //! App::new() 50 | //! .add_plugin(BackrollPlugin) 51 | //! .register_rollback_input(sample_player_input) 52 | //! .register_rollback_component::() 53 | //! .add_rollback_system(simulate_game) 54 | //! .run(); 55 | //! } 56 | //! ``` 57 | use backroll::{ 58 | command::{Command, Commands}, 59 | Config, Event, GameInput, PlayerHandle, 60 | }; 61 | use bevy_app::{App, CoreStage, Plugin}; 62 | use bevy_ecs::{ 63 | event::Events, 64 | prelude::*, 65 | schedule::{IntoSystemDescriptor, ShouldRun, Stage, SystemSet, SystemStage}, 66 | system::{Commands as BevyCommands, System}, 67 | world::World, 68 | }; 69 | use bevy_log::{debug, error}; 70 | use std::{ 71 | marker::PhantomData, 72 | ops::{Deref, DerefMut}, 73 | }; 74 | 75 | mod id; 76 | mod save_state; 77 | #[cfg(feature = "steam")] 78 | mod steam; 79 | 80 | pub use backroll; 81 | pub use id::*; 82 | use save_state::*; 83 | 84 | /// A [`P2PSession`] alias for bevy_backroll sessions. Uses [`BevyBackrollConfig`] 85 | /// as the config type. 86 | pub type P2PSession = backroll::P2PSession>; 87 | 88 | /// A Bevy-compatible wrapper around [`GameInput`]. Implements [`Resource`]. 89 | #[derive(Resource, Debug, Clone)] 90 | pub struct BackrollInput(GameInput); 91 | 92 | impl Deref for BackrollInput { 93 | type Target = GameInput; 94 | fn deref(&self) -> &Self::Target { 95 | &self.0 96 | } 97 | } 98 | 99 | impl DerefMut for BackrollInput { 100 | fn deref_mut(&mut self) -> &mut Self::Target { 101 | &mut self.0 102 | } 103 | } 104 | 105 | #[derive(Resource, Clone)] 106 | struct BackrollSession(P2PSession); 107 | 108 | /// The [SystemLabel] used by the [BackrollStage] added by [BackrollPlugin]. 109 | /// 110 | /// [SystemLabel]: bevy_ecs::schedule::SystemLabel 111 | /// [BackrollStage]: self::BackrollStage 112 | /// [BackrollPlugin]: self::BackrollPlugin 113 | #[derive(Debug, Clone, Eq, Hash, StageLabel, PartialEq)] 114 | pub struct BackrollUpdate; 115 | 116 | /// Manages when to inject frame stalls to keep in sync with remote players. 117 | struct FrameStaller { 118 | frames_ahead: u8, 119 | frames_until_stall: u8, 120 | } 121 | 122 | impl FrameStaller { 123 | pub fn new() -> Self { 124 | Self { 125 | frames_ahead: 0, 126 | frames_until_stall: 0, 127 | } 128 | } 129 | 130 | pub fn reset(&mut self, frames_ahead: u8) { 131 | self.frames_ahead = frames_ahead; 132 | self.frames_until_stall = self.stall_cadence(); 133 | } 134 | 135 | pub fn should_stall(&mut self) -> bool { 136 | if self.frames_ahead == 0 { 137 | return false; 138 | } 139 | if self.frames_until_stall == 0 { 140 | self.frames_ahead -= 1; 141 | self.frames_until_stall = self.stall_cadence(); 142 | true 143 | } else { 144 | self.frames_until_stall -= 1; 145 | false 146 | } 147 | } 148 | 149 | fn stall_cadence(&self) -> u8 { 150 | // Linearly decay the cadence based on how many frames ahead 151 | // the is. This will result in fast initial catch up and then 152 | // slowly smooth out small hitches. 153 | if self.frames_ahead > 9 { 154 | 1 155 | } else { 156 | 11 - self.frames_ahead 157 | } 158 | } 159 | } 160 | 161 | #[derive(Resource)] 162 | struct BackrollStages { 163 | save: SystemStage, 164 | simulate: SystemStage, 165 | before_load: SystemStage, 166 | load: SystemStage, 167 | run_criteria: Option>>, 168 | } 169 | 170 | /// The Backroll config type for bevy_backroll sessions. 171 | pub struct BevyBackrollConfig { 172 | _marker: PhantomData, 173 | } 174 | 175 | impl Config 176 | for BevyBackrollConfig 177 | { 178 | type Input = Input; 179 | type State = SaveState; 180 | } 181 | 182 | /// A [Stage] that transparently runs and handles Backroll sessions. 183 | /// 184 | /// Each time the stage runs, it will poll the Backroll session, sample local player 185 | /// inputs for the session, then advance the frame. 186 | /// 187 | /// The stage will automatically handle Backroll commands by doing the following: 188 | /// 189 | /// - [Command::Save]: Saves an immutable copy of the components and resoures from the 190 | /// main app [`World`] into a save state. 191 | /// - [Command::Load]: Loads a prior saved World state into the main app [`World`]. 192 | /// - [Command::AdvanceFrame]: Injects the provided [GameInput] as a resource then 193 | /// runs all simulation based systems once (see: [add_rollback_system]) 194 | /// - [Command::Event]: Forwards all events to Bevy. Can be read out via [EventReader]. 195 | /// Automatically handles time synchronization by smoothly injecting stall frames when 196 | /// ahead of remote players. 197 | /// 198 | /// This stage is best used with a [FixedTimestep] run criteria to ensure that the systems 199 | /// are running at a consistent rate on all players in the game. 200 | /// 201 | /// This stage will only run when there is a [P2PSession] with the same [Config] parameter 202 | /// registered as a resource within the running World. If the stage was added via 203 | /// [BackrollPlugin], [BackrollCommands::start_backroll_session] and [BackrollCommands::end_backroll_session] 204 | /// can be used to start or end a session. 205 | /// 206 | /// [Stage]: bevy_ecs::schedule::Stage 207 | /// [World]: bevy_ecs::world::World 208 | /// [Command]: backroll::Command 209 | /// [BackrollCommands]: self::BackrollCommands 210 | /// [FixedTimestep]: bevy_core::FixedTimestep 211 | /// [EventReader]: bevy_app::EventReader 212 | /// [add_rollback_system]: self::BackrollApp::add_rollback_system 213 | pub struct BackrollStage 214 | where 215 | Input: PartialEq + bytemuck::Pod + bytemuck::Zeroable + Send + Sync, 216 | { 217 | staller: FrameStaller, 218 | input_sample_fn: Box + Send + Sync + 'static>, 219 | } 220 | 221 | impl BackrollStage 222 | where 223 | Input: PartialEq + bytemuck::Pod + bytemuck::Zeroable + Send + Sync, 224 | { 225 | fn run_commands(&mut self, commands: Commands>, world: &mut World) { 226 | world.resource_scope(|world, mut stages: Mut| { 227 | for command in commands { 228 | match command { 229 | Command::Save(save_state) => { 230 | world.insert_resource(SaveStateBuilder::new()); 231 | stages.save.run(world); 232 | // TODO(james7132): Find a way to hash the state here generically. 233 | save_state.save_without_hash( 234 | world.remove_resource::().unwrap().build(), 235 | ); 236 | } 237 | Command::Load(load_state) => { 238 | world.insert_resource(load_state.load()); 239 | stages.before_load.run(world); 240 | stages.load.run(world); 241 | world.remove_resource::(); 242 | } 243 | Command::AdvanceFrame(inputs) => { 244 | // Insert input via Resource 245 | *world.get_resource_mut::>().unwrap() = 246 | BackrollInput(inputs); 247 | stages.simulate.run(world); 248 | } 249 | Command::Event(evt) => { 250 | debug!("Received Backroll Event: {:?}", evt); 251 | 252 | // Update time sync stalls properly. 253 | if let Event::TimeSync { frames_ahead } = &evt { 254 | self.staller.reset(*frames_ahead); 255 | } 256 | 257 | let mut events = world.get_resource_mut::>().unwrap(); 258 | events.send(evt.clone()); 259 | } 260 | } 261 | } 262 | }); 263 | } 264 | } 265 | 266 | impl Stage for BackrollStage 267 | where 268 | Input: PartialEq + bytemuck::Pod + bytemuck::Zeroable + Send + Sync, 269 | { 270 | fn run(&mut self, world: &mut World) { 271 | loop { 272 | let should_run = world.resource_scope(|world, mut stages: Mut| { 273 | if let Some(ref mut run_criteria) = stages.run_criteria { 274 | run_criteria.run((), world) 275 | } else { 276 | ShouldRun::Yes 277 | } 278 | }); 279 | 280 | if let ShouldRun::No = should_run { 281 | return; 282 | } 283 | 284 | let session = if let Some(session) = world.get_resource_mut::>() 285 | { 286 | session.0.clone() 287 | } else { 288 | // No ongoing session, don't run. 289 | return; 290 | }; 291 | 292 | self.run_commands(session.poll(), world); 293 | 294 | if self.staller.should_stall() { 295 | continue; 296 | } 297 | 298 | for player_handle in session.local_players() { 299 | let input = self.input_sample_fn.run(player_handle, world); 300 | if let Err(err) = session.add_local_input(player_handle, input) { 301 | error!( 302 | "Error while adding local input for {:?}: {:?}", 303 | player_handle, err 304 | ); 305 | return; 306 | } 307 | } 308 | 309 | self.run_commands(session.advance_frame(), world); 310 | 311 | if let ShouldRun::Yes = should_run { 312 | return; 313 | } 314 | } 315 | } 316 | } 317 | 318 | /// A Bevy plugin that adds a [BackrollStage] to the app. 319 | /// 320 | /// **Note:** This stage does not enforce any specific system execution order. 321 | /// Users of this stage should ensure that their included systems have a strict 322 | /// deterministic execution order, otherwise simulation may result in desyncs. 323 | /// 324 | /// Also registers Backroll's [Event] as an event type, which the stage will 325 | /// forward to Bevy. 326 | /// 327 | /// If the feature is enabled, this will also register the associated transport 328 | /// layer implementations for Steam. 329 | /// 330 | /// [BackrolLStage]: self::BackrollStage 331 | /// [Event]: backroll::Event 332 | #[derive(Default)] 333 | pub struct BackrollPlugin; 334 | 335 | impl Plugin for BackrollPlugin { 336 | fn build(&self, app: &mut App) { 337 | let mut save = SystemStage::parallel(); 338 | save.add_system(save_network_ids); 339 | let mut before_load = SystemStage::parallel(); 340 | before_load.add_system(sync_network_ids); 341 | app.add_event::() 342 | .insert_resource(NetworkIdProvider::new()) 343 | .insert_resource(BackrollStages { 344 | save, 345 | simulate: SystemStage::parallel(), 346 | before_load, 347 | load: SystemStage::parallel(), 348 | run_criteria: None, 349 | }) 350 | .register_rollback_resource::(); 351 | 352 | #[cfg(feature = "steam")] 353 | app.add_plugin(steam::BackrollSteamPlugin); 354 | } 355 | } 356 | 357 | /// Extension trait for configuring [App]s using a [BackrollPlugin]. 358 | /// 359 | /// [App]: bevy_app::App 360 | /// [BackrollPlugin]: self::BackrollPlugin 361 | pub trait BackrollApp { 362 | /// Sets the input sampler system for Backroll. This is required. Backroll will 363 | /// not start without this being set. 364 | fn register_rollback_input(&mut self, system: S) -> &mut Self 365 | where 366 | Input: PartialEq + bytemuck::Pod + bytemuck::Zeroable + Send + Sync, 367 | S: IntoSystem + Send + Sync + 'static; 368 | 369 | /// Registers a specific component type for saving into Backroll's save states. 370 | /// Any game simulation state stored in components should be registered here. 371 | fn register_rollback_component(&mut self) -> &mut Self; 372 | 373 | /// Registers a specific resource type for saving into Backroll's save states. 374 | /// Any game simulation state stored in resources should be registered here. 375 | fn register_rollback_resource(&mut self) -> &mut Self; 376 | 377 | /// Sets the [RunCriteria] for the [BackrollStage]. By default this uses a [FixedTimestep] 378 | /// set to 60 ticks per second. 379 | /// 380 | /// [RunCriteria]: bevy_ecs::schedule::RunCriteria 381 | /// [BackrollStage]: self::BackrollStage 382 | /// [FixedTimestep]: bevy_core::FixedTimestep 383 | fn with_rollback_run_criteria(&mut self, system: S) -> &mut Self 384 | where 385 | S: System; 386 | 387 | /// Adds a system to the Backroll stage. 388 | fn add_rollback_system(&mut self, system: S) -> &mut Self 389 | where 390 | S: IntoSystemDescriptor; 391 | 392 | /// Adds a [SystemSet] to the BackrollStage. 393 | /// 394 | /// [SystemSet]: bevy_ecs::schedule::SystemSet 395 | fn add_rollback_system_set(&mut self, system: impl Into) -> &mut Self; 396 | } 397 | 398 | impl BackrollApp for App { 399 | fn register_rollback_input(&mut self, system: S) -> &mut Self 400 | where 401 | Input: PartialEq + bytemuck::Pod + bytemuck::Zeroable + Send + Sync, 402 | S: IntoSystem + Send + Sync + 'static, 403 | { 404 | self.insert_resource(BackrollInput(GameInput::::default())); 405 | self.add_stage_before( 406 | CoreStage::Update, 407 | BackrollUpdate, 408 | BackrollStage:: { 409 | staller: FrameStaller::new(), 410 | input_sample_fn: Box::new(S::into_system(system)), 411 | }, 412 | ); 413 | 414 | self 415 | } 416 | 417 | fn register_rollback_component(&mut self) -> &mut Self { 418 | let mut stages = self 419 | .world 420 | .get_resource_mut::() 421 | .expect("No BackrollStages found! Did you install the plugin?"); 422 | stages.load.add_system(load_components::); 423 | stages.save.add_system(save_components::); 424 | self 425 | } 426 | 427 | fn register_rollback_resource(&mut self) -> &mut Self { 428 | let mut stages = self 429 | .world 430 | .get_resource_mut::() 431 | .expect("No BackrollStages found! Did you install the plugin?"); 432 | stages.load.add_system(load_resource::); 433 | stages.save.add_system(save_resource::); 434 | self 435 | } 436 | 437 | fn with_rollback_run_criteria(&mut self, run_criteria: S) -> &mut Self 438 | where 439 | S: System, 440 | { 441 | self.world 442 | .get_resource_mut::() 443 | .expect("No BackrollStages found! Did you install the plugin?") 444 | .run_criteria = Some(Box::new(run_criteria)); 445 | self 446 | } 447 | 448 | fn add_rollback_system(&mut self, system: S) -> &mut Self 449 | where 450 | S: IntoSystemDescriptor, 451 | { 452 | self.world 453 | .get_resource_mut::() 454 | .expect("No BackrollStages found! Did you install the plugin?") 455 | .simulate 456 | .add_system(system); 457 | self 458 | } 459 | 460 | fn add_rollback_system_set(&mut self, system: impl Into) -> &mut Self { 461 | self.world 462 | .get_resource_mut::() 463 | .expect("No BackrollStages found! Did you install the plugin?") 464 | .simulate 465 | .add_system_set(system.into()); 466 | self 467 | } 468 | } 469 | 470 | /// Extension trait for [Commands] to start and stop Backroll sessions. 471 | /// 472 | /// [Commands]: bevy_ecs::system::Commands 473 | pub trait BackrollCommands { 474 | /// Starts a new Backroll session. If one is already in progress, it will be replaced 475 | /// and the old session will be dropped. This will add the session as a resource, which 476 | /// can be accessed via [Res]. 477 | /// 478 | /// [Res]: bevy_ecs::system::Res 479 | fn start_backroll_session(&mut self, session: P2PSession) 480 | where 481 | Input: PartialEq + bytemuck::Pod + bytemuck::Zeroable + Send + Sync; 482 | 483 | /// Ends the ongoing Backroll session. This will remove the associated resource and drop 484 | /// the session. 485 | /// 486 | /// Does nothing if there is no ongoing session. 487 | fn end_backroll_session(&mut self) 488 | where 489 | Input: PartialEq + bytemuck::Pod + bytemuck::Zeroable + Send + Sync; 490 | } 491 | 492 | impl<'w, 's> BackrollCommands for BevyCommands<'w, 's> { 493 | fn start_backroll_session(&mut self, session: P2PSession) 494 | where 495 | Input: PartialEq + bytemuck::Pod + bytemuck::Zeroable + Send + Sync, 496 | { 497 | // Reset the NetworkIdProvider on the start of a new session. 498 | self.insert_resource(NetworkIdProvider::new()); 499 | self.insert_resource(BackrollSession(session)); 500 | } 501 | 502 | fn end_backroll_session(&mut self) 503 | where 504 | Input: PartialEq + bytemuck::Pod + bytemuck::Zeroable + Send + Sync, 505 | { 506 | self.remove_resource::>(); 507 | } 508 | } 509 | -------------------------------------------------------------------------------- /bevy_backroll/src/save_state.rs: -------------------------------------------------------------------------------- 1 | use crate::NetworkId; 2 | use bevy_ecs::prelude::*; 3 | use dashmap::DashMap; 4 | use roaring::RoaringBitmap; 5 | use std::any::*; 6 | use std::collections::HashMap; 7 | use std::sync::Arc; 8 | 9 | #[derive(Clone)] 10 | struct SavedComponents { 11 | components: HashMap, 12 | } 13 | 14 | /// A mutable builder for [`SaveState`]s. 15 | #[derive(Resource)] 16 | pub(crate) struct SaveStateBuilder { 17 | ids: RoaringBitmap, 18 | state: DashMap>, 19 | } 20 | 21 | impl SaveStateBuilder { 22 | pub fn new() -> Self { 23 | Self { 24 | ids: RoaringBitmap::new(), 25 | state: DashMap::new(), 26 | } 27 | } 28 | 29 | pub fn build(self) -> SaveState { 30 | SaveState(Arc::new(SaveStateRef { 31 | ids: self.ids, 32 | state: self.state.into_iter().collect(), 33 | })) 34 | } 35 | } 36 | 37 | struct SaveStateRef { 38 | ids: RoaringBitmap, 39 | state: HashMap>, 40 | } 41 | 42 | /// A read only save state of a Bevy world. 43 | #[derive(Resource, Clone)] 44 | pub struct SaveState(Arc); 45 | 46 | pub(crate) fn save_resource( 47 | save_state: Res, 48 | resource: Option>, 49 | ) { 50 | if let Some(resource) = resource { 51 | save_state 52 | .state 53 | .insert(TypeId::of::(), Box::new(resource.clone())); 54 | } 55 | } 56 | 57 | pub(crate) fn load_resource( 58 | save_state: Res, 59 | resource: Option>, 60 | mut commands: Commands, 61 | ) { 62 | // HACK: This is REALLY going to screw with any change detection on these types. 63 | let saved = save_state.0.state.get(&TypeId::of::()); 64 | match (saved, resource) { 65 | (Some(saved), Some(mut resource)) => { 66 | *resource = saved.downcast_ref::().unwrap().clone(); 67 | } 68 | (Some(saved), None) => { 69 | commands.insert_resource(saved.downcast_ref::().unwrap().clone()); 70 | } 71 | (None, Some(_)) => { 72 | commands.remove_resource::(); 73 | } 74 | (None, None) => {} 75 | } 76 | } 77 | 78 | pub(crate) fn save_network_ids(mut save_state: ResMut, query: Query<&NetworkId>) { 79 | save_state.ids = query.iter().map(|id| id.0).collect(); 80 | } 81 | 82 | pub(crate) fn save_components( 83 | save_state: Res, 84 | query: Query<(&NetworkId, &T)>, 85 | ) { 86 | let components: HashMap = query 87 | .iter() 88 | .map(|(id, component)| (*id, component.clone())) 89 | .collect(); 90 | if !components.is_empty() { 91 | save_state.state.insert( 92 | TypeId::of::>(), 93 | Box::new(SavedComponents { components }), 94 | ); 95 | } 96 | } 97 | 98 | pub(crate) fn sync_network_ids( 99 | save_state: Res, 100 | query: Query<(Entity, &NetworkId)>, 101 | mut commands: Commands, 102 | ) { 103 | // Despawn all network identities that shouldn't exist this frame. 104 | let mut ids = save_state.0.ids.clone(); 105 | for (entity, network_id) in query.iter() { 106 | if !ids.remove(network_id.0) { 107 | commands.entity(entity).despawn(); 108 | } 109 | } 110 | 111 | // All IDs that remain need to re-spawned. 112 | for network_id in ids { 113 | commands.spawn((NetworkId(network_id),)); 114 | } 115 | } 116 | 117 | pub(crate) fn load_components( 118 | save_state: Res, 119 | mut query: Query<(Entity, &NetworkId, Option<&mut T>)>, 120 | mut commands: Commands, 121 | ) { 122 | let saved = save_state.0.state.get(&TypeId::of::>()); 123 | let slab = if let Some(slab) = saved { 124 | slab.downcast_ref::>().unwrap() 125 | } else { 126 | for (entity, _, comp) in query.iter() { 127 | if comp.is_some() { 128 | commands.entity(entity).remove::(); 129 | } 130 | } 131 | return; 132 | }; 133 | 134 | // HACK: This is REALLY going to screw with any change detection on these types. 135 | for (entity, network_id, comp) in query.iter_mut() { 136 | match (slab.components.get(network_id), comp) { 137 | (Some(saved), Some(mut comp)) => { 138 | *comp = saved.clone(); 139 | } 140 | (Some(saved), None) => { 141 | commands.entity(entity).insert(saved.clone()); 142 | } 143 | (None, Some(_)) => { 144 | commands.entity(entity).remove::(); 145 | } 146 | (None, None) => {} 147 | } 148 | } 149 | } 150 | -------------------------------------------------------------------------------- /bevy_backroll/src/steam.rs: -------------------------------------------------------------------------------- 1 | use bevy_app::{App, Plugin}; 2 | use bevy_ecs::prelude::*; 3 | use bevy_steamworks::Client; 4 | use std::ops::Deref; 5 | 6 | #[derive(Resource)] 7 | pub struct SteamP2PManager(backroll_transport_steam::SteamP2PManager); 8 | 9 | fn initialize_steam_socket(client: Option>, mut commands: Commands) { 10 | if let Some(client) = client { 11 | let client = client.deref().deref(); 12 | commands.insert_resource(SteamP2PManager( 13 | backroll_transport_steam::SteamP2PManager::bind(client.clone()), 14 | )); 15 | } 16 | } 17 | 18 | pub struct BackrollSteamPlugin; 19 | 20 | impl Plugin for BackrollSteamPlugin { 21 | fn build(&self, builder: &mut App) { 22 | builder.add_startup_system(initialize_steam_socket); 23 | } 24 | } 25 | --------------------------------------------------------------------------------