├── .gitignore ├── src ├── lib.rs ├── util.rs ├── runtime.rs ├── tokio.rs ├── buffer_pool.rs ├── sdp.rs ├── crypto.rs ├── stun.rs ├── sctp.rs ├── server.rs └── client.rs ├── LICENSE ├── Cargo.toml ├── .circleci └── config.yml ├── README.md ├── CHANGELOG.md └── examples ├── echo-server.html └── echo-server.rs /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | Cargo.lock 3 | shell.nix 4 | .envrc 5 | .direnv 6 | .dir-locals.el 7 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | mod buffer_pool; 2 | mod client; 3 | mod crypto; 4 | pub mod runtime; 5 | mod sctp; 6 | mod sdp; 7 | mod server; 8 | mod stun; 9 | mod util; 10 | 11 | pub use client::{MessageType, MAX_MESSAGE_LEN}; 12 | pub use crypto::SslConfig; 13 | pub use server::{MessageBuffer, MessageResult, SendError, Server, SessionEndpoint}; 14 | 15 | #[cfg(feature = "tokio")] 16 | pub mod tokio; 17 | -------------------------------------------------------------------------------- /src/util.rs: -------------------------------------------------------------------------------- 1 | use rand::Rng; 2 | 3 | pub fn rand_string(rng: &mut R, size: usize) -> String { 4 | const RAND_CHAR_TABLE: &[u8; 62] = 5 | b"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"; 6 | 7 | let mut s = String::new(); 8 | s.reserve(size); 9 | for _ in 0..size { 10 | s.push(RAND_CHAR_TABLE[rng.gen_range(0..RAND_CHAR_TABLE.len())] as char); 11 | } 12 | s 13 | } 14 | -------------------------------------------------------------------------------- /src/runtime.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | future::Future, 3 | io, 4 | net::SocketAddr, 5 | task::{Context, Poll}, 6 | time::Duration, 7 | }; 8 | 9 | pub trait UdpSocket { 10 | fn poll_recv_from( 11 | &mut self, 12 | cx: &mut Context, 13 | buf: &mut [u8], 14 | ) -> Poll>; 15 | 16 | fn poll_send_to( 17 | &mut self, 18 | cx: &mut Context, 19 | buf: &[u8], 20 | addr: SocketAddr, 21 | ) -> Poll>; 22 | } 23 | 24 | pub trait Runtime { 25 | type Timer: Future; 26 | type UdpSocket: UdpSocket; 27 | 28 | fn bind_udp(&self, listen_addr: SocketAddr) -> Result; 29 | fn timer(&self, after: Duration) -> Self::Timer; 30 | } 31 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | This work is derived in part from the WebUDP project, Copyright (c) 2017 Siim 4 | Kallas 5 | 6 | Permission is hereby granted, free of charge, to any person obtaining a copy 7 | of this software and associated documentation files (the "Software"), to deal 8 | in the Software without restriction, including without limitation the rights 9 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | copies of the Software, and to permit persons to whom the Software is 11 | furnished to do so, subject to the following conditions: 12 | 13 | The above copyright notice and this permission notice shall be included in all 14 | copies or substantial portions of the Software. 15 | 16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 | SOFTWARE. 23 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "webrtc-unreliable" 3 | version = "0.6.0" 4 | authors = ["kyren "] 5 | edition = "2018" 6 | description = "UDP-like (unreliable, unordered) communication between Javascript/WebAssembly and native Rust via WebRTC" 7 | repository = "https://github.com/kyren/webrtc-unreliable" 8 | documentation = "https://docs.rs/webrtc-unreliable" 9 | readme = "README.md" 10 | keywords = ["wasm", "webrtc"] 11 | license = "MIT" 12 | 13 | [dependencies] 14 | byteorder = "1.3" 15 | crc = "3.0" 16 | futures-channel = { version = "0.3", features = ["sink"] } 17 | futures-core = { version = "0.3" } 18 | futures-util = { version = "0.3", features = ["sink"] } 19 | http = "1.0" 20 | log = "0.4" 21 | openssl = "0.10" 22 | pin-project = "1.0" 23 | rand = "0.8" 24 | tokio = { version = "1.0", features = ["net"], optional = true } 25 | 26 | [dev-dependencies] 27 | clap = "3.0" 28 | env_logger = "0.9" 29 | futures = { version = "0.3" } 30 | hyper = { version = "1.0", features = ["full"] } 31 | hyper-util = { version = "0.1", features = ["tokio"] } 32 | http-body-util = "0.1" 33 | tokio = { version = "1.0", features = ["full"] } 34 | 35 | [[example]] 36 | name = "echo-server" 37 | required-features = ["tokio"] 38 | -------------------------------------------------------------------------------- /.circleci/config.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | 3 | jobs: 4 | build: 5 | docker: 6 | - image: cimg/rust:1.70.0 7 | steps: 8 | - checkout 9 | - run: 10 | name: Setup Rust 11 | command: | 12 | rustup component add rustfmt 13 | - run: 14 | name: Version information 15 | command: | 16 | rustc --version 17 | cargo --version 18 | rustfmt --version 19 | rustup --version 20 | - run: 21 | name: Calculate dependencies 22 | command: cargo generate-lockfile 23 | - restore_cache: 24 | keys: 25 | - cargo-cache-{{ arch }}-{{ checksum "Cargo.lock" }} 26 | - run: 27 | name: Check formatting 28 | command: | 29 | rustfmt --version 30 | cargo fmt --all -- --check --color=auto 31 | - run: 32 | name: Build all targets 33 | command: cargo build --all --all-targets 34 | - save_cache: 35 | paths: 36 | - /usr/local/cargo/registry 37 | - target/debug/.fingerprint 38 | - target/debug/build 39 | - target/debug/deps 40 | key: cargo-cache-{{ arch }}-{{ checksum "Cargo.lock" }} 41 | -------------------------------------------------------------------------------- /src/tokio.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | future::Future, 3 | io, 4 | net::SocketAddr, 5 | pin::Pin, 6 | task::{Context, Poll}, 7 | time::Duration, 8 | }; 9 | 10 | use futures_util::ready; 11 | 12 | #[pin_project::pin_project] 13 | pub struct Timer(#[pin] tokio::time::Sleep); 14 | 15 | impl Future for Timer { 16 | type Output = (); 17 | 18 | fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll<()> { 19 | self.project().0.poll(cx) 20 | } 21 | } 22 | 23 | pub struct UdpSocket(tokio::net::UdpSocket); 24 | 25 | impl crate::runtime::UdpSocket for UdpSocket { 26 | fn poll_recv_from( 27 | &mut self, 28 | cx: &mut Context, 29 | buf: &mut [u8], 30 | ) -> Poll> { 31 | let mut buf = tokio::io::ReadBuf::new(buf); 32 | let socket_addr = ready!(self.0.poll_recv_from(cx, &mut buf))?; 33 | Poll::Ready(Ok((buf.filled().len(), socket_addr))) 34 | } 35 | 36 | fn poll_send_to( 37 | &mut self, 38 | cx: &mut Context, 39 | buf: &[u8], 40 | addr: SocketAddr, 41 | ) -> Poll> { 42 | self.0.poll_send_to(cx, buf, addr) 43 | } 44 | } 45 | 46 | pub struct Runtime; 47 | 48 | impl crate::runtime::Runtime for Runtime { 49 | type Timer = Timer; 50 | type UdpSocket = UdpSocket; 51 | 52 | fn bind_udp(&self, listen_addr: SocketAddr) -> Result { 53 | let socket = std::net::UdpSocket::bind(listen_addr)?; 54 | socket.set_nonblocking(true)?; 55 | Ok(UdpSocket(tokio::net::UdpSocket::from_std(socket)?)) 56 | } 57 | 58 | fn timer(&self, after: Duration) -> Timer { 59 | Timer(tokio::time::sleep(after)) 60 | } 61 | } 62 | 63 | pub type Server = crate::server::Server; 64 | 65 | pub fn new_server( 66 | listen_addr: SocketAddr, 67 | public_addr: SocketAddr, 68 | ) -> Result, io::Error> { 69 | crate::server::Server::new(Runtime, listen_addr, public_addr) 70 | } 71 | -------------------------------------------------------------------------------- /src/buffer_pool.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | ops::{Deref, DerefMut}, 3 | sync::{Arc, Mutex}, 4 | }; 5 | 6 | /// Shared pool of reusable Vec buffers. 7 | /// 8 | /// Send and lock-free, but will panic if accessed from multiple threads at one time. 9 | #[derive(Clone, Debug)] 10 | pub struct BufferPool(Arc>>); 11 | 12 | impl BufferPool { 13 | pub fn new() -> BufferPool { 14 | BufferPool(Arc::new(Mutex::new(Vec::new()))) 15 | } 16 | 17 | /// Acquire a buffer from the pool and return a handle to it, the buffer is guaranteed to have 18 | /// length zero. 19 | /// 20 | /// The buffer will be returned to the pool when the handle is dropped, unless it is converted 21 | /// to an `OwnedBuffer`. 22 | pub fn acquire(&self) -> BufferHandle { 23 | let mut buffer = self.0.try_lock().unwrap().pop().unwrap_or_default(); 24 | buffer.0.clear(); 25 | BufferHandle(self, Some(buffer)) 26 | } 27 | 28 | /// Adopt an owned buffer, returning a handle that will return the owned buffer to the pool on 29 | /// drop. 30 | pub fn adopt(&self, buffer: OwnedBuffer) -> BufferHandle { 31 | BufferHandle(self, Some(buffer)) 32 | } 33 | 34 | fn release(&self, buffer: OwnedBuffer) { 35 | self.0.try_lock().unwrap().push(buffer); 36 | } 37 | } 38 | 39 | /// A handle to a pooled buffer which will return the buffer to the pool on drop. 40 | pub struct BufferHandle<'a>(&'a BufferPool, Option); 41 | 42 | impl<'a> BufferHandle<'a> { 43 | /// Convert this buffer handle into an `OwnedBuffer`, which does not borrow the `BufferPool` and 44 | /// will not automatically return the buffer to the pool on drop. 45 | pub fn into_owned(mut self) -> OwnedBuffer { 46 | self.1.take().unwrap() 47 | } 48 | } 49 | 50 | impl<'a> Deref for BufferHandle<'a> { 51 | type Target = Vec; 52 | 53 | fn deref(&self) -> &Vec { 54 | &self.1.as_ref().unwrap().0 55 | } 56 | } 57 | 58 | impl<'a> DerefMut for BufferHandle<'a> { 59 | fn deref_mut(&mut self) -> &mut Vec { 60 | &mut self.1.as_mut().unwrap().0 61 | } 62 | } 63 | 64 | impl<'a> Drop for BufferHandle<'a> { 65 | fn drop(&mut self) { 66 | if let Some(owned) = self.1.take() { 67 | self.0.release(owned); 68 | } 69 | } 70 | } 71 | 72 | /// An buffer that has been taken out of a `BufferPool` and is no longer owned by it. 73 | /// 74 | /// It is an opaque type for transferring ownership of buffers, in order to access the inner buffer 75 | /// it must first be returned to the pool. By preventing the use of the buffer without returning 76 | /// ownership to the pool first, this wrapper type makes it less likely that owned buffers will be 77 | /// dropped without being returned to the pool. 78 | #[derive(Debug, Default)] 79 | pub struct OwnedBuffer(Vec); 80 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ## webrtc-unreliable 2 | 3 | --- 4 | 5 | [![Build Status](https://img.shields.io/circleci/project/github/kyren/webrtc-unreliable.svg)](https://circleci.com/gh/kyren/webrtc-unreliable) 6 | [![Latest Version](https://img.shields.io/crates/v/webrtc-unreliable.svg)](https://crates.io/crates/webrtc-unreliable) 7 | [![API Documentation](https://docs.rs/webrtc-unreliable/badge.svg)](https://docs.rs/webrtc-unreliable) 8 | 9 | This is a Rust library which allows you to write a game server with browser 10 | based clients and UDP-like networking. 11 | 12 | This crate is not meant as a general purpose WebRTC data channel system, it is 13 | instead designed to be used as the bottom level UDP-like transport layer of a 14 | higher level protocol in an environment where UDP is not available. It provides 15 | an async API for accepting WebRTC connections from browsers and sending and 16 | receiving WebRTC unreliable data channel messages from multiple clients. 17 | 18 | The full set of protocols needed to implement WebRTC is daunting. This 19 | crate implements only the bare minimum subset of WebRTC required to support 20 | unreliable, unordered data channel messages. Because the WebRTC support is 21 | so minimal, this crate does not need to depend on a pre-existing heavyweight 22 | WebRTC implementation, but as such the protocol support is extremely limited. 23 | 24 | You should expect *only* WebRTC data channels to function, and *only* in 25 | unreliable, unordered mode. Additionally, there is a stricter limit on WebRTC 26 | message lengths than what would be supported in a full WebRTC implementation. 27 | Only *unfragmented* SCTP packets are handled, so any message large enough to 28 | cause an SCTP packet to need fragmentation causes an error on write and is 29 | simply dropped on read. The maximum message length depends on the particular 30 | browser you connect with, but in my testing currently it is slightly smaller 31 | than 1200 bytes. 32 | 33 | This crate is async runtime agnostic by relying on the implementation of a 34 | `Runtime` trait for required functionality. The requirements are minimal: a 35 | `Sleep` future and a `UdpSocket` with polled recv / send. A tokio 36 | implementation is provided with activation of the `tokio` feature. 37 | 38 | ## Running the example 39 | 40 | In a terminal: 41 | 42 | ``` 43 | $ cargo run --features tokio --example echo-server -- --data 127.0.0.1:42424 --http 127.0.0.1:8080 --public 127.0.0.1:42424 44 | ``` 45 | 46 | Then, using a web browser, go to 'http://127.0.0.1:8080/index.html'. Open the 47 | debug console, if everything is working correctly you should see messages being 48 | sent and received. 49 | 50 | Please note that if you are using Firefox, Firefox does not accept WebRTC 51 | connections to 127.0.0.1, so you may need to use a different IP address. 52 | 53 | ## Credit 54 | 55 | This was originally a Rust / Tokio port of the 56 | [WebUDP](https://github.com/seemk/WebUdp) project, so the credit for the 57 | original design goes there. 58 | 59 | ## License 60 | 61 | This project is licensed under the [MIT license](LICENSE) 62 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | ## [0.6.0] 2 | - Support a shared `Crypto` instance between servers, useful on Firefox for 3 | multiple connections per page. 4 | - API incompatible change: Make `Server::new` sync. 5 | - API incompatible change: Add `Runtime` trait to be reactor agnostic, adds an optional `tokio` feature 6 | to provide the previous tokio implementation. 7 | 8 | ## [0.5.3] 9 | - Handle FORWARD_TSN support in SCTP when it is specified as an extension 10 | 11 | ## [0.5.2] 12 | - Fix a bug in STUN parsing that causes failures if the User attribute is the 13 | last attribute in the packet. 14 | - Update internal crc dependency 15 | 16 | ## [0.5.1] 17 | - *Attempt* to handle DTLS shutdown correctly at least when there is no packet 18 | loss. 19 | - Fix some bugs causing at least firefox to complain about ice attributes being 20 | incorrectly at the media level. 21 | - Don't log "starting shutdown" if the client has already shutdown. 22 | - Update rand dependency. 23 | - Add a method to check whether any clients are not fully shutdown. This should 24 | *theoretically* make it possible to implement an *attempt* at clean shutdown 25 | for all clients. See issue #15. 26 | 27 | ## [0.5] 28 | - Change crc32c dependency to crc to unbreak ARM build (thanks @tchamelot!), 29 | - Remove crc32fast in favor of only using crc dependency. 30 | - Handle better clients which have errored and are shutting down without 31 | spamming log warnings. 32 | - Don't deliver incoming messages in fragmented SCTP packets as whole messages, 33 | we do not support fragmented SCTP at all yet. 34 | - API incompatible change: Simplify the API for receiving messages, returning a 35 | borrowed buffer for incoming messages, eliminating both `RecvError` and a 36 | needless memcpy. 37 | - API incompatible change: There is no longer a distinction between a client 38 | that is not fully connected and a client that has been disconnected, both are 39 | now just `NotConnected`. 40 | - Add a method on the server to list all currently established connections. 41 | - Dependency change from tokio to async-io, no longer requires a tokio runtime. 42 | 43 | ## [0.4.1] 44 | - Remove crossbeam dependency, use a new buffer pooling strategy that should be 45 | much faster 46 | 47 | ## [0.4.0] 48 | - API incompatible change: depend on futures 0.3, tokio 0.2, and refactor API to 49 | use stable async / await. 50 | 51 | ## [0.3.0] 52 | - Sign x509 certificates with sha256 instead of sha1 53 | - API changes: don't stutter with `Rtc` prefix, include more precise error types 54 | - Fix message type for received binary messages (thanks @slugalisk!) 55 | - Properly handle SCTP unreliability negotiation in init, better error logging 56 | to catch protocol errors faster in the future. Fixes major brokenness with 57 | firefox (huge thanks @Healthire!) 58 | - Don't generate errors for what is indicative of logic bugs, simplifies error 59 | API somewhat 60 | 61 | ## [0.2.1] 62 | - Small doc fixes 63 | 64 | ## [0.2.0] 65 | - Remove internal hyper server, API now requires external channel for session 66 | initiation (more flexible, only a small amount of server code required to 67 | exchange sessions using hyper, see the echo_server.rs example). 68 | - Fix several SCTP handling bugs, marginally more informative SCTP errors 69 | - Easier to run the example (no longer requires nix, uses hyper to serve index page) 70 | - Remove some unnecessary dependencies 71 | - Fix some error handling bugs around SSL errors 72 | 73 | ## [0.1.1] 74 | - Change SCTP_COOKIE value to a more informative one 75 | - Add IPv6 support to SDP 76 | 77 | ## [0.1.0] 78 | - Initial release 79 | -------------------------------------------------------------------------------- /examples/echo-server.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | WebRTC Data Channel Echo Server Example 6 | 87 | 88 | 89 | 90 | -------------------------------------------------------------------------------- /src/sdp.rs: -------------------------------------------------------------------------------- 1 | use std::{error, str}; 2 | 3 | use futures_core::Stream; 4 | use futures_util::{pin_mut, StreamExt}; 5 | use rand::Rng; 6 | 7 | pub type Error = Box; 8 | 9 | #[derive(Debug)] 10 | pub struct SdpFields { 11 | pub ice_ufrag: String, 12 | pub ice_passwd: String, 13 | pub mid: String, 14 | } 15 | 16 | pub async fn parse_sdp_fields(body: S) -> Result 17 | where 18 | I: AsRef<[u8]>, 19 | E: Into, 20 | S: Stream>, 21 | { 22 | const MAX_SDP_LINE: usize = 512; 23 | 24 | fn after_prefix<'a>(s: &'a [u8], prefix: &[u8]) -> Option<&'a [u8]> { 25 | if s.starts_with(prefix) { 26 | Some(&s[prefix.len()..]) 27 | } else { 28 | None 29 | } 30 | } 31 | 32 | let mut line_buf = Vec::new(); 33 | line_buf.reserve(MAX_SDP_LINE); 34 | 35 | let mut found_ice_ufrag = None; 36 | let mut found_ice_passwd = None; 37 | let mut found_mid = None; 38 | 39 | pin_mut!(body); 40 | while let Some(res) = body.next().await { 41 | let chunk = res.map_err(Into::into)?; 42 | for &c in chunk.as_ref() { 43 | if c == b'\r' || c == b'\n' { 44 | if !line_buf.is_empty() { 45 | if let Some(ice_ufrag) = after_prefix(&line_buf, b"a=ice-ufrag:") { 46 | found_ice_ufrag = Some(String::from_utf8(ice_ufrag.to_vec())?); 47 | } 48 | if let Some(ice_passwd) = after_prefix(&line_buf, b"a=ice-pwd:") { 49 | found_ice_passwd = Some(String::from_utf8(ice_passwd.to_vec())?); 50 | } 51 | if let Some(mid) = after_prefix(&line_buf, b"a=mid:") { 52 | found_mid = Some(String::from_utf8(mid.to_vec())?); 53 | } 54 | line_buf.clear(); 55 | } 56 | } else { 57 | if line_buf.len() < MAX_SDP_LINE { 58 | line_buf.push(c); 59 | } 60 | } 61 | } 62 | } 63 | 64 | match (found_ice_ufrag, found_ice_passwd, found_mid) { 65 | (Some(ice_ufrag), Some(ice_passwd), Some(mid)) => Ok(SdpFields { 66 | ice_ufrag, 67 | ice_passwd, 68 | mid, 69 | }), 70 | _ => Err("not all SDP fields provided".into()), 71 | } 72 | } 73 | 74 | pub fn gen_sdp_response( 75 | rng: &mut R, 76 | cert_fingerprint: &str, 77 | server_ip: &str, 78 | server_is_ipv6: bool, 79 | server_port: u16, 80 | ufrag: &str, 81 | pass: &str, 82 | remote_mid: &str, 83 | ) -> String { 84 | format!( 85 | "{{\"answer\":{{\"sdp\":\"v=0\\r\\n\ 86 | o=- {rand1} 1 IN {ipv} {port}\\r\\n\ 87 | s=-\\r\\n\ 88 | c=IN {ipv} {ip}\\r\\n\ 89 | t=0 0\\r\\n\ 90 | a=ice-lite\\r\\n\ 91 | a=ice-ufrag:{ufrag}\\r\\n\ 92 | a=ice-pwd:{pass}\\r\\n\ 93 | m=application {port} UDP/DTLS/SCTP webrtc-datachannel\\r\\n\ 94 | a=fingerprint:sha-256 {fingerprint}\\r\\n\ 95 | a=ice-options:trickle\\r\\n\ 96 | a=setup:passive\\r\\n\ 97 | a=mid:{mid}\\r\\n\ 98 | a=sctp-port:{port}\\r\\n\",\ 99 | \"type\":\"answer\"}},\"candidate\":{{\"sdpMLineIndex\":0,\ 100 | \"sdpMid\":\"{mid}\",\"candidate\":\"candidate:1 1 UDP {rand2} {ip} {port} \ 101 | typ host\"}}}}", 102 | rand1 = rng.gen::(), 103 | rand2 = rng.gen::(), 104 | fingerprint = cert_fingerprint, 105 | ip = server_ip, 106 | port = server_port, 107 | ufrag = ufrag, 108 | pass = pass, 109 | mid = remote_mid, 110 | ipv = if server_is_ipv6 { "IP6" } else { "IP4" }, 111 | ) 112 | } 113 | -------------------------------------------------------------------------------- /src/crypto.rs: -------------------------------------------------------------------------------- 1 | use std::{fmt::Write as _, sync::Arc}; 2 | 3 | use openssl::{ 4 | asn1::Asn1Time, 5 | bn::BigNum, 6 | bn::MsbOption, 7 | error::ErrorStack, 8 | hash::MessageDigest, 9 | nid::Nid, 10 | pkey::PKey, 11 | rsa::Rsa, 12 | ssl::{SslAcceptor, SslMethod, SslVerifyMode}, 13 | x509::{X509NameBuilder, X509}, 14 | }; 15 | 16 | /// A TLS private / public key pair and certificate. 17 | #[derive(Clone)] 18 | pub struct SslConfig { 19 | pub(crate) fingerprint: String, 20 | pub(crate) ssl_acceptor: Arc, 21 | } 22 | 23 | impl SslConfig { 24 | /// Generates an anonymous private / public key pair and self-signed certificate. 25 | /// 26 | /// The certificate can be self-signed because the trust in the `webrtc-unreliable` server comes 27 | /// form the certificate fingerprint embedded in the session response. If the session response 28 | /// descriptor is deliviered over a trusted channel (such as HTTPS with a valid server 29 | /// certificate), the client will verify that the self-signed certificate matches 30 | /// the fingerprint, and so the resulting DTLS connection will have the same level of 31 | /// authentication. 32 | /// 33 | /// Client connections are assumed to be anonymous and are unverified, authentication can be 34 | /// handled through the resulting WebRTC data channel. 35 | pub fn create() -> Result { 36 | const X509_DAYS_NOT_BEFORE: u32 = 0; 37 | const X509_DAYS_NOT_AFTER: u32 = 365; 38 | 39 | // TODO: Let the user pick the crypto settings? 40 | let rsa = Rsa::generate(4096)?; 41 | let key = PKey::from_rsa(rsa)?; 42 | let x509_sign_digest = MessageDigest::sha256(); 43 | 44 | // TODO: Fingerprint digest is hard-coded to 'sha-256' in SDP. 45 | let x509_fingerprint_digest = MessageDigest::sha256(); 46 | 47 | let mut name_builder = X509NameBuilder::new()?; 48 | name_builder.append_entry_by_nid(Nid::COMMONNAME, "webrtc-unreliable")?; 49 | let name = name_builder.build(); 50 | 51 | let mut x509_builder = X509::builder()?; 52 | x509_builder.set_version(2)?; 53 | x509_builder.set_subject_name(&name)?; 54 | x509_builder.set_issuer_name(&name)?; 55 | let mut serial = BigNum::new().unwrap(); 56 | serial.rand(128, MsbOption::MAYBE_ZERO, false).unwrap(); 57 | x509_builder.set_serial_number(&serial.to_asn1_integer().unwrap())?; 58 | let not_before = Asn1Time::days_from_now(X509_DAYS_NOT_BEFORE)?; 59 | let not_after = Asn1Time::days_from_now(X509_DAYS_NOT_AFTER)?; 60 | x509_builder.set_not_before(¬_before)?; 61 | x509_builder.set_not_after(¬_after)?; 62 | x509_builder.set_pubkey(&key)?; 63 | x509_builder.sign(&key, x509_sign_digest)?; 64 | let x509 = x509_builder.build(); 65 | 66 | let x509_digest = x509.digest(x509_fingerprint_digest)?; 67 | let mut fingerprint = String::new(); 68 | for i in 0..x509_digest.len() { 69 | write!(fingerprint, "{:02X}", x509_digest[i]).unwrap(); 70 | if i != x509_digest.len() - 1 { 71 | write!(fingerprint, ":").unwrap(); 72 | } 73 | } 74 | 75 | let mut ssl_acceptor_builder = SslAcceptor::mozilla_intermediate(SslMethod::dtls())?; 76 | 77 | // `webrtc-unreliable` does not bother to verify client certificates because it is designed 78 | // to be used as a dedicated server with arbitrary clients. The client will verify the 79 | // server's certificate via the fingerprint provided inside the SDP descriptor, so if the 80 | // descriptor is delivered over a verified channel (such as HTTPS with a valid server 81 | // certificate), the resulting DTLS connection should have the same level of verification. 82 | // This should prevent MITM attacks against the DTLS connection (tricking the client to 83 | // connect to some other server than the verified one). Client authentication (such as 84 | // username / password) can then be handled through the resulting WebRTC data channel. 85 | // 86 | // TODO: Somebody who is actually good at this stuff should verify this. 87 | ssl_acceptor_builder.set_verify(SslVerifyMode::NONE); 88 | 89 | ssl_acceptor_builder.set_private_key(&key)?; 90 | ssl_acceptor_builder.set_certificate(&x509)?; 91 | let ssl_acceptor = Arc::new(ssl_acceptor_builder.build()); 92 | 93 | Ok(SslConfig { 94 | fingerprint, 95 | ssl_acceptor, 96 | }) 97 | } 98 | } 99 | -------------------------------------------------------------------------------- /examples/echo-server.rs: -------------------------------------------------------------------------------- 1 | use std::net::SocketAddr; 2 | 3 | use clap::{Arg, Command}; 4 | use futures::stream::TryStreamExt; 5 | use http_body_util::BodyStream; 6 | use hyper::{ 7 | header::{self, HeaderValue}, 8 | server::conn::http1, 9 | service::service_fn, 10 | Method, Response, StatusCode, 11 | }; 12 | use hyper_util::rt::TokioIo; 13 | use tokio::net::TcpListener; 14 | 15 | #[tokio::main] 16 | async fn main() { 17 | env_logger::init_from_env(env_logger::Env::new().default_filter_or("debug")); 18 | 19 | let matches = Command::new("echo-server") 20 | .arg( 21 | Arg::new("data") 22 | .short('d') 23 | .long("data") 24 | .takes_value(true) 25 | .required(true) 26 | .help("listen on the specified address/port for UDP WebRTC data channels"), 27 | ) 28 | .arg( 29 | Arg::new("public") 30 | .short('p') 31 | .long("public") 32 | .takes_value(true) 33 | .required(true) 34 | .help("advertise the given address/port as the public WebRTC address/port"), 35 | ) 36 | .arg( 37 | Arg::new("http") 38 | .short('h') 39 | .long("http") 40 | .takes_value(true) 41 | .required(true) 42 | .help("listen on the specified address/port for incoming HTTP (session reqeusts and test page"), 43 | ) 44 | .get_matches(); 45 | 46 | let webrtc_listen_addr: SocketAddr = matches 47 | .value_of("data") 48 | .unwrap() 49 | .parse() 50 | .expect("could not parse WebRTC data address/port"); 51 | 52 | let public_webrtc_addr: SocketAddr = matches 53 | .value_of("public") 54 | .unwrap() 55 | .parse() 56 | .expect("could not parse advertised public WebRTC data address/port"); 57 | 58 | let session_listen_addr: SocketAddr = matches 59 | .value_of("http") 60 | .unwrap() 61 | .parse() 62 | .expect("could not parse HTTP address/port"); 63 | 64 | let mut rtc_server = 65 | webrtc_unreliable::tokio::new_server(webrtc_listen_addr, public_webrtc_addr) 66 | .expect("could not start RTC server"); 67 | 68 | let session_endpoint = rtc_server.session_endpoint(); 69 | 70 | tokio::spawn(async move { 71 | let listener = TcpListener::bind(session_listen_addr) 72 | .await 73 | .expect("could not listen on HTTP address/port"); 74 | 75 | loop { 76 | match listener.accept().await { 77 | Err(err) => { 78 | log::warn!("error accepting incoming HTTP connection: {:?}", err); 79 | } 80 | Ok((stream, remote_addr)) => { 81 | let io = TokioIo::new(stream); 82 | let session_endpoint = session_endpoint.clone(); 83 | tokio::spawn(async move { 84 | if let Err(err) = http1::Builder::new() 85 | .serve_connection( 86 | io, 87 | service_fn(|req| async { 88 | let mut session_endpoint = session_endpoint.clone(); 89 | if req.uri().path() == "/" 90 | || req.uri().path() == "/index.html" 91 | && req.method() == Method::GET 92 | { 93 | log::info!("serving example index HTML to {}", remote_addr); 94 | Response::builder() 95 | .body(include_str!("./echo-server.html").to_owned()) 96 | } else if req.uri().path() == "/new_rtc_session" 97 | && req.method() == Method::POST 98 | { 99 | log::info!("WebRTC session request from {}", remote_addr); 100 | match session_endpoint 101 | .http_session_request( 102 | BodyStream::new(req.into_body()).try_filter_map( 103 | |f| async { Ok(f.into_data().ok()) }, 104 | ), 105 | ) 106 | .await 107 | { 108 | Ok(mut resp) => { 109 | resp.headers_mut().insert( 110 | header::ACCESS_CONTROL_ALLOW_ORIGIN, 111 | HeaderValue::from_static("*"), 112 | ); 113 | Ok(resp) 114 | } 115 | Err(err) => { 116 | log::warn!("bad rtc session request: {:?}", err); 117 | Response::builder() 118 | .status(StatusCode::BAD_REQUEST) 119 | .body(format!("error: {:?}", err)) 120 | } 121 | } 122 | } else { 123 | Response::builder() 124 | .status(StatusCode::NOT_FOUND) 125 | .body("not found".to_owned()) 126 | } 127 | }), 128 | ) 129 | .await 130 | { 131 | log::warn!("error serving connection: {:?}", err); 132 | } 133 | }); 134 | } 135 | } 136 | } 137 | }); 138 | 139 | let mut message_buf = Vec::new(); 140 | loop { 141 | let received = match rtc_server.recv().await { 142 | Ok(received) => { 143 | message_buf.clear(); 144 | message_buf.extend(received.message.as_ref()); 145 | Some((received.message_type, received.remote_addr)) 146 | } 147 | Err(err) => { 148 | log::warn!("could not receive RTC message: {:?}", err); 149 | None 150 | } 151 | }; 152 | 153 | if let Some((message_type, remote_addr)) = received { 154 | if let Err(err) = rtc_server 155 | .send(&message_buf, message_type, &remote_addr) 156 | .await 157 | { 158 | log::warn!("could not send message to {}: {:?}", remote_addr, err); 159 | } 160 | } 161 | } 162 | } 163 | -------------------------------------------------------------------------------- /src/stun.rs: -------------------------------------------------------------------------------- 1 | use std::{error, net::SocketAddr, str}; 2 | 3 | use byteorder::{ByteOrder, NetworkEndian}; 4 | use crc::{Crc, CRC_32_ISO_HDLC}; 5 | use openssl::{hash::MessageDigest, pkey::PKey, sign::Signer}; 6 | 7 | pub type Error = Box; 8 | 9 | #[derive(Debug)] 10 | pub struct StunBindingRequest { 11 | pub transaction_id: [u8; STUN_TRANSACTION_ID_LEN], 12 | pub remote_user: String, 13 | pub server_user: String, 14 | } 15 | 16 | pub fn parse_stun_binding_request(bytes: &[u8]) -> Option { 17 | if bytes.len() < STUN_HEADER_LEN { 18 | return None; 19 | } 20 | 21 | let stun_type = NetworkEndian::read_u16(&bytes[0..2]); 22 | if stun_type != StunType::BindingRequest as u16 { 23 | return None; 24 | } 25 | 26 | let length = NetworkEndian::read_u16(&bytes[2..4]) as usize; 27 | if length < 4 || STUN_HEADER_LEN + length > bytes.len() { 28 | return None; 29 | } 30 | 31 | if bytes[4..8] != STUN_COOKIE { 32 | return None; 33 | } 34 | 35 | let mut transaction_id = [0; STUN_TRANSACTION_ID_LEN]; 36 | transaction_id.copy_from_slice(&bytes[8..STUN_HEADER_LEN]); 37 | 38 | let mut offset = STUN_HEADER_LEN; 39 | while offset < STUN_HEADER_LEN + length - 4 { 40 | let payload_type = NetworkEndian::read_u16(&bytes[offset..offset + 2]); 41 | let payload_len = NetworkEndian::read_u16(&bytes[offset + 2..offset + 4]) as usize; 42 | offset += 4; 43 | let padded_len = (payload_len + STUN_ALIGNMENT - 1) & !(STUN_ALIGNMENT - 1); 44 | if offset + padded_len > STUN_HEADER_LEN + length { 45 | return None; 46 | } 47 | if payload_type == StunAttributeType::User as u16 { 48 | let server_and_remote_user = &bytes[offset..offset + payload_len]; 49 | let colon = server_and_remote_user.iter().position(|&c| c == b':')?; 50 | let server_user = &server_and_remote_user[0..colon]; 51 | let remote_user = &server_and_remote_user[colon + 1..]; 52 | if server_user.len() > STUN_MAX_IDENTIFIER_LEN 53 | || remote_user.len() > STUN_MAX_IDENTIFIER_LEN 54 | { 55 | return None; 56 | } 57 | let server_user = str::from_utf8(server_user).ok()?.to_owned(); 58 | let remote_user = str::from_utf8(remote_user).ok()?.to_owned(); 59 | 60 | return Some(StunBindingRequest { 61 | transaction_id, 62 | remote_user, 63 | server_user, 64 | }); 65 | } 66 | offset += padded_len; 67 | } 68 | None 69 | } 70 | 71 | pub fn write_stun_success_response( 72 | transaction_id: [u8; STUN_TRANSACTION_ID_LEN], 73 | remote_addr: SocketAddr, 74 | passwd: &[u8], 75 | out: &mut [u8], 76 | ) -> Result { 77 | const CRC: Crc = Crc::::new(&CRC_32_ISO_HDLC); 78 | 79 | const ATTRIBUTE_MARKER_LEN: usize = 4; 80 | const IPV4_ADDR_ATTRIBUTE_LEN: usize = 8; 81 | const IPV6_ADDR_ATTRIBUTE_LEN: usize = 20; 82 | const INTEGRITY_ATTRIBUTE_LEN: usize = 20; 83 | const FINGERPRINT_ATTRIBUTE_LEN: usize = 4; 84 | 85 | let addr_attribute_len = if remote_addr.is_ipv4() { 86 | IPV4_ADDR_ATTRIBUTE_LEN 87 | } else { 88 | IPV6_ADDR_ATTRIBUTE_LEN 89 | }; 90 | let content_len_integrity = 91 | ATTRIBUTE_MARKER_LEN * 2 + addr_attribute_len + INTEGRITY_ATTRIBUTE_LEN; 92 | let content_len = content_len_integrity + ATTRIBUTE_MARKER_LEN + FINGERPRINT_ATTRIBUTE_LEN; 93 | 94 | if STUN_HEADER_LEN + content_len > out.len() { 95 | return Err("output buffer too small for STUN response".into()); 96 | } 97 | 98 | let (header, rest) = out.split_at_mut(STUN_HEADER_LEN); 99 | let (addr_attribute, rest) = rest.split_at_mut(ATTRIBUTE_MARKER_LEN + addr_attribute_len); 100 | let (integrity_attribute, fingerprint_attribute) = 101 | rest.split_at_mut(ATTRIBUTE_MARKER_LEN + INTEGRITY_ATTRIBUTE_LEN); 102 | 103 | NetworkEndian::write_u16(&mut header[0..2], StunType::SuccessResponse as u16); 104 | NetworkEndian::write_u16(&mut header[2..4], content_len_integrity as u16); 105 | header[4..8].copy_from_slice(&STUN_COOKIE); 106 | header[8..20].copy_from_slice(&transaction_id); 107 | 108 | NetworkEndian::write_u16( 109 | &mut addr_attribute[0..2], 110 | StunAttributeType::XorMappedAddress as u16, 111 | ); 112 | NetworkEndian::write_u16(&mut addr_attribute[2..4], addr_attribute_len as u16); 113 | match remote_addr { 114 | SocketAddr::V4(remote_addr) => { 115 | addr_attribute[4] = 0; 116 | addr_attribute[5] = StunAddressFamily::IPV4 as u8; 117 | NetworkEndian::write_u16(&mut addr_attribute[6..8], remote_addr.port()); 118 | xor_range(&mut addr_attribute[6..8], &STUN_COOKIE); 119 | addr_attribute[8..12].copy_from_slice(&remote_addr.ip().octets()); 120 | xor_range(&mut addr_attribute[8..12], &STUN_COOKIE); 121 | } 122 | SocketAddr::V6(remote_addr) => { 123 | addr_attribute[4] = 0; 124 | addr_attribute[5] = StunAddressFamily::IPV6 as u8; 125 | NetworkEndian::write_u16(&mut addr_attribute[6..8], remote_addr.port()); 126 | xor_range(&mut addr_attribute[6..8], &STUN_COOKIE); 127 | addr_attribute[8..24].copy_from_slice(&remote_addr.ip().octets()); 128 | xor_range(&mut addr_attribute[8..12], &STUN_COOKIE); 129 | xor_range(&mut addr_attribute[12..24], &transaction_id); 130 | } 131 | } 132 | 133 | let key = PKey::hmac(passwd)?; 134 | let mut signer = Signer::new(MessageDigest::sha1(), &key)?; 135 | signer.update(header)?; 136 | signer.update(addr_attribute)?; 137 | let mut hmac = [0; INTEGRITY_ATTRIBUTE_LEN]; 138 | signer.sign(&mut hmac)?; 139 | 140 | NetworkEndian::write_u16( 141 | &mut integrity_attribute[0..2], 142 | StunAttributeType::MessageIntegrity as u16, 143 | ); 144 | NetworkEndian::write_u16( 145 | &mut integrity_attribute[2..4], 146 | INTEGRITY_ATTRIBUTE_LEN as u16, 147 | ); 148 | integrity_attribute[4..].copy_from_slice(&hmac); 149 | 150 | NetworkEndian::write_u16(&mut header[2..4], content_len as u16); 151 | 152 | let mut digest = CRC.digest(); 153 | digest.update(&header); 154 | digest.update(&addr_attribute); 155 | digest.update(&integrity_attribute); 156 | let crc = digest.finalize(); 157 | 158 | NetworkEndian::write_u16( 159 | &mut fingerprint_attribute[0..2], 160 | StunAttributeType::Fingerprint as u16, 161 | ); 162 | NetworkEndian::write_u16( 163 | &mut fingerprint_attribute[2..4], 164 | FINGERPRINT_ATTRIBUTE_LEN as u16, 165 | ); 166 | NetworkEndian::write_u32(&mut fingerprint_attribute[4..8], crc ^ STUN_CRC_XOR); 167 | 168 | Ok(STUN_HEADER_LEN + content_len) 169 | } 170 | 171 | enum StunType { 172 | BindingRequest = 0x0001, 173 | SuccessResponse = 0x0101, 174 | } 175 | 176 | enum StunAttributeType { 177 | User = 0x06, 178 | MessageIntegrity = 0x08, 179 | XorMappedAddress = 0x20, 180 | Fingerprint = 0x8028, 181 | } 182 | 183 | enum StunAddressFamily { 184 | IPV4 = 0x01, 185 | IPV6 = 0x02, 186 | } 187 | 188 | const STUN_TRANSACTION_ID_LEN: usize = 12; 189 | const STUN_MAX_IDENTIFIER_LEN: usize = 128; 190 | const STUN_HEADER_LEN: usize = 20; 191 | const STUN_ALIGNMENT: usize = 4; 192 | const STUN_COOKIE: [u8; 4] = [0x21, 0x12, 0xa4, 0x42]; 193 | const STUN_CRC_XOR: u32 = 0x5354554e; 194 | 195 | fn xor_range(target: &mut [u8], xor: &[u8]) { 196 | for i in 0..target.len() { 197 | target[i] ^= xor[i]; 198 | } 199 | } 200 | -------------------------------------------------------------------------------- /src/sctp.rs: -------------------------------------------------------------------------------- 1 | use std::{convert::TryInto, error::Error, fmt}; 2 | 3 | use byteorder::{ByteOrder, LittleEndian, NetworkEndian}; 4 | use crc::{Crc, CRC_32_ISCSI}; 5 | 6 | pub const SCTP_FLAG_END_FRAGMENT: u8 = 0x01; 7 | pub const SCTP_FLAG_BEGIN_FRAGMENT: u8 = 0x02; 8 | pub const SCTP_FLAG_UNRELIABLE: u8 = 0x04; 9 | 10 | pub const SCTP_FLAG_COMPLETE_UNRELIABLE: u8 = 11 | SCTP_FLAG_BEGIN_FRAGMENT | SCTP_FLAG_END_FRAGMENT | SCTP_FLAG_UNRELIABLE; 12 | 13 | #[derive(Debug, Copy, Clone)] 14 | pub enum SctpChunk<'a> { 15 | Data { 16 | chunk_flags: u8, 17 | tsn: u32, 18 | stream_id: u16, 19 | stream_seq: u16, 20 | proto_id: u32, 21 | user_data: &'a [u8], 22 | }, 23 | Init { 24 | initiate_tag: u32, 25 | window_credit: u32, 26 | num_outbound_streams: u16, 27 | num_inbound_streams: u16, 28 | initial_tsn: u32, 29 | support_unreliable: bool, 30 | }, 31 | InitAck { 32 | initiate_tag: u32, 33 | window_credit: u32, 34 | num_outbound_streams: u16, 35 | num_inbound_streams: u16, 36 | initial_tsn: u32, 37 | state_cookie: &'a [u8], 38 | }, 39 | SAck { 40 | cumulative_tsn_ack: u32, 41 | adv_recv_window: u32, 42 | num_gap_ack_blocks: u16, 43 | num_dup_tsn: u16, 44 | }, 45 | Heartbeat { 46 | heartbeat_info: Option<&'a [u8]>, 47 | }, 48 | HeartbeatAck { 49 | heartbeat_info: Option<&'a [u8]>, 50 | }, 51 | Abort, 52 | Shutdown { 53 | cumulative_tsn_ack: u32, 54 | }, 55 | ShutdownAck, 56 | Error { 57 | first_param_type: u16, 58 | first_param_data: &'a [u8], 59 | }, 60 | CookieEcho { 61 | state_cookie: &'a [u8], 62 | }, 63 | CookieAck, 64 | ShutdownComplete, 65 | Auth, 66 | AsConfAck, 67 | ReConfig, 68 | Pad, 69 | IData, 70 | ForwardTsn { 71 | new_cumulative_tsn: u32, 72 | }, 73 | AsConf, 74 | IForwardTsn, 75 | } 76 | 77 | #[derive(Debug)] 78 | pub struct SctpPacket<'a> { 79 | pub source_port: u16, 80 | pub dest_port: u16, 81 | pub verification_tag: u32, 82 | pub chunks: &'a [SctpChunk<'a>], 83 | } 84 | 85 | #[derive(Debug)] 86 | pub enum SctpReadError { 87 | BadPacket, 88 | BadChecksum, 89 | TooManyChunks, 90 | } 91 | 92 | impl fmt::Display for SctpReadError { 93 | fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { 94 | match self { 95 | SctpReadError::BadPacket => write!(f, "bad sctp packet"), 96 | SctpReadError::BadChecksum => write!(f, "bad sctp checksum"), 97 | SctpReadError::TooManyChunks => write!(f, "too many sctp chunks for provided buffer"), 98 | } 99 | } 100 | } 101 | 102 | impl Error for SctpReadError {} 103 | 104 | pub fn read_sctp_packet<'a>( 105 | src: &'a [u8], 106 | check_crc: bool, 107 | chunk_space: &'a mut [SctpChunk<'a>], 108 | ) -> Result, SctpReadError> { 109 | if src.len() < 16 { 110 | return Err(SctpReadError::BadPacket); 111 | } 112 | 113 | let source_port = NetworkEndian::read_u16(&src[0..2]); 114 | let dest_port = NetworkEndian::read_u16(&src[2..4]); 115 | let verification_tag = NetworkEndian::read_u32(&src[4..8]); 116 | let checksum = LittleEndian::read_u32(&src[8..12]); 117 | 118 | if check_crc { 119 | let mut digest = CRC.digest(); 120 | digest.update(&src[0..8]); 121 | digest.update(&[0, 0, 0, 0]); 122 | digest.update(&src[12..]); 123 | if checksum != digest.finalize() { 124 | return Err(SctpReadError::BadChecksum); 125 | } 126 | } 127 | 128 | let mut remaining_chunks = &src[12..]; 129 | let mut chunk_count = 0; 130 | while remaining_chunks.len() > 4 { 131 | if chunk_count >= chunk_space.len() { 132 | return Err(SctpReadError::TooManyChunks); 133 | } 134 | let chunk = &mut chunk_space[chunk_count]; 135 | 136 | let chunk_type = remaining_chunks[0]; 137 | let chunk_flags = remaining_chunks[1]; 138 | let chunk_len = NetworkEndian::read_u16(&remaining_chunks[2..4]); 139 | 140 | let chunk_next = next_multiple(chunk_len as usize, 4); 141 | if chunk_next > remaining_chunks.len() || chunk_len < 4 { 142 | return Err(SctpReadError::BadPacket); 143 | } 144 | 145 | let chunk_data = &remaining_chunks[4..chunk_len as usize]; 146 | match chunk_type { 147 | CHUNK_TYPE_DATA => { 148 | if chunk_data.len() < 12 { 149 | return Err(SctpReadError::BadPacket); 150 | } 151 | 152 | let tsn = NetworkEndian::read_u32(&chunk_data[0..4]); 153 | let stream_id = NetworkEndian::read_u16(&chunk_data[4..6]); 154 | let stream_seq = NetworkEndian::read_u16(&chunk_data[6..8]); 155 | let proto_id = NetworkEndian::read_u32(&chunk_data[8..12]); 156 | let user_data = &chunk_data[12..]; 157 | *chunk = SctpChunk::Data { 158 | chunk_flags, 159 | tsn, 160 | stream_id, 161 | stream_seq, 162 | proto_id, 163 | user_data, 164 | }; 165 | } 166 | CHUNK_TYPE_INIT | CHUNK_TYPE_INIT_ACK => { 167 | if chunk_data.len() < 16 { 168 | return Err(SctpReadError::BadPacket); 169 | } 170 | 171 | let initiate_tag = NetworkEndian::read_u32(&chunk_data[0..4]); 172 | let window_credit = NetworkEndian::read_u32(&chunk_data[4..8]); 173 | let num_outbound_streams = NetworkEndian::read_u16(&chunk_data[8..10]); 174 | let num_inbound_streams = NetworkEndian::read_u16(&chunk_data[10..12]); 175 | let initial_tsn = NetworkEndian::read_u32(&chunk_data[12..16]); 176 | 177 | if chunk_type == CHUNK_TYPE_INIT { 178 | let mut support_unreliable = false; 179 | for param in iter_params(&chunk_data, 16) { 180 | match param { 181 | Err(_) => return Err(SctpReadError::BadPacket), 182 | Ok((param_type, param_data)) => match param_type { 183 | INIT_PARAM_FORWARD_TSN => { 184 | support_unreliable = true; 185 | break; 186 | } 187 | INIT_PARAM_SUPPORTED_EXTENSIONS => { 188 | let mut index = 0; 189 | 'inner: loop { 190 | if index >= param_data.len() { 191 | break 'inner; 192 | } 193 | if param_data[index] == INIT_PARAM_EXT_FORWARD_TSN { 194 | support_unreliable = true; 195 | break 'inner; 196 | } 197 | index += 1; 198 | } 199 | if support_unreliable { 200 | break; 201 | } 202 | } 203 | _ => {} 204 | }, 205 | } 206 | } 207 | 208 | *chunk = SctpChunk::Init { 209 | initiate_tag, 210 | window_credit, 211 | num_outbound_streams, 212 | num_inbound_streams, 213 | initial_tsn, 214 | support_unreliable, 215 | }; 216 | } else { 217 | let (param_type, param_data) = iter_params(&chunk_data, 16) 218 | .next() 219 | .ok_or_else(|| SctpReadError::BadPacket) 220 | .and_then(|v| v.map_err(|_| SctpReadError::BadPacket))?; 221 | // first parameter must be the state cookie 222 | if param_type != INIT_ACK_PARAM_STATE_COOKIE { 223 | return Err(SctpReadError::BadPacket); 224 | } 225 | 226 | *chunk = SctpChunk::InitAck { 227 | initiate_tag, 228 | window_credit, 229 | num_outbound_streams, 230 | num_inbound_streams, 231 | initial_tsn, 232 | state_cookie: param_data, 233 | }; 234 | } 235 | } 236 | CHUNK_TYPE_SACK => { 237 | if chunk_data.len() < 12 { 238 | return Err(SctpReadError::BadPacket); 239 | } 240 | 241 | let cumulative_tsn_ack = NetworkEndian::read_u32(&chunk_data[0..4]); 242 | let adv_recv_window = NetworkEndian::read_u32(&chunk_data[4..8]); 243 | let num_gap_ack_blocks = NetworkEndian::read_u16(&chunk_data[8..10]); 244 | let num_dup_tsn = NetworkEndian::read_u16(&chunk_data[10..12]); 245 | 246 | *chunk = SctpChunk::SAck { 247 | cumulative_tsn_ack, 248 | adv_recv_window, 249 | num_gap_ack_blocks, 250 | num_dup_tsn, 251 | }; 252 | } 253 | CHUNK_TYPE_HEARTBEAT | CHUNK_TYPE_HEARTBEAT_ACK => { 254 | let mut heartbeat_info = None; 255 | if chunk_data.len() > 4 { 256 | let param_type = NetworkEndian::read_u16(&chunk_data[0..2]); 257 | let param_len = NetworkEndian::read_u16(&chunk_data[2..4]); 258 | if param_type == HEARTBEAT_PARAM_INFO && (param_len as usize) < chunk_data.len() 259 | { 260 | heartbeat_info = Some(&chunk_data[4..param_len as usize]); 261 | } 262 | } 263 | 264 | if chunk_type == CHUNK_TYPE_HEARTBEAT { 265 | *chunk = SctpChunk::Heartbeat { heartbeat_info }; 266 | } else { 267 | *chunk = SctpChunk::HeartbeatAck { heartbeat_info }; 268 | } 269 | } 270 | CHUNK_TYPE_ABORT => { 271 | *chunk = SctpChunk::Abort; 272 | } 273 | CHUNK_TYPE_SHUTDOWN => { 274 | if chunk_data.len() < 4 { 275 | return Err(SctpReadError::BadPacket); 276 | } 277 | 278 | let cumulative_tsn_ack = NetworkEndian::read_u32(&chunk_data[0..4]); 279 | 280 | *chunk = SctpChunk::Shutdown { cumulative_tsn_ack }; 281 | } 282 | CHUNK_TYPE_SHUTDOWN_ACK => { 283 | *chunk = SctpChunk::ShutdownAck; 284 | } 285 | CHUNK_TYPE_ERROR => { 286 | let (first_param_type, first_param_data) = iter_params(&chunk_data, 0) 287 | .next() 288 | .ok_or_else(|| SctpReadError::BadPacket) 289 | .and_then(|v| v.map_err(|_| SctpReadError::BadPacket))?; 290 | 291 | *chunk = SctpChunk::Error { 292 | first_param_type, 293 | first_param_data, 294 | }; 295 | } 296 | CHUNK_TYPE_COOKIE_ECHO => { 297 | *chunk = SctpChunk::CookieEcho { 298 | state_cookie: chunk_data, 299 | } 300 | } 301 | CHUNK_TYPE_COOKIE_ACK => { 302 | *chunk = SctpChunk::CookieAck; 303 | } 304 | CHUNK_TYPE_SHUTDOWN_COMPLETE => { 305 | *chunk = SctpChunk::ShutdownComplete; 306 | } 307 | CHUNK_TYPE_AUTH => { 308 | *chunk = SctpChunk::Auth; 309 | } 310 | CHUNK_TYPE_ASCONF_ACK => { 311 | *chunk = SctpChunk::AsConfAck; 312 | } 313 | CHUNK_TYPE_RE_CONFIG => { 314 | *chunk = SctpChunk::ReConfig; 315 | } 316 | CHUNK_TYPE_PAD => { 317 | *chunk = SctpChunk::Pad; 318 | } 319 | CHUNK_TYPE_IDATA => { 320 | *chunk = SctpChunk::IData; 321 | } 322 | CHUNK_TYPE_FORWARD_TSN => { 323 | if chunk_data.len() < 4 { 324 | return Err(SctpReadError::BadPacket); 325 | } 326 | 327 | let new_cumulative_tsn = NetworkEndian::read_u32(&chunk_data[0..4]); 328 | *chunk = SctpChunk::ForwardTsn { new_cumulative_tsn }; 329 | } 330 | CHUNK_TYPE_ASCONF => { 331 | *chunk = SctpChunk::AsConf; 332 | } 333 | CHUNK_TYPE_I_FORWARD_TSN => { 334 | *chunk = SctpChunk::IForwardTsn; 335 | } 336 | _ => return Err(SctpReadError::BadPacket), 337 | } 338 | 339 | remaining_chunks = &remaining_chunks[chunk_next..]; 340 | chunk_count += 1; 341 | } 342 | 343 | Ok(SctpPacket { 344 | source_port, 345 | dest_port, 346 | verification_tag, 347 | chunks: &chunk_space[0..chunk_count], 348 | }) 349 | } 350 | 351 | #[derive(Debug)] 352 | pub enum SctpWriteError { 353 | BufferSize, 354 | NoChunks, 355 | OutOfRange, 356 | } 357 | 358 | impl fmt::Display for SctpWriteError { 359 | fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { 360 | match self { 361 | SctpWriteError::BufferSize => write!(f, "insufficient output buffer size"), 362 | SctpWriteError::NoChunks => write!(f, "SCTP packet must have at least one chunk"), 363 | SctpWriteError::OutOfRange => write!(f, "SCTP packet field is too large"), 364 | } 365 | } 366 | } 367 | 368 | impl Error for SctpWriteError {} 369 | 370 | pub fn write_sctp_packet(dest: &mut [u8], packet: SctpPacket) -> Result { 371 | if packet.chunks.is_empty() { 372 | return Err(SctpWriteError::NoChunks); 373 | } 374 | 375 | if dest.len() < 12 { 376 | return Err(SctpWriteError::BufferSize); 377 | } 378 | 379 | NetworkEndian::write_u16(&mut dest[0..2], packet.source_port); 380 | NetworkEndian::write_u16(&mut dest[2..4], packet.dest_port); 381 | NetworkEndian::write_u32(&mut dest[4..8], packet.verification_tag); 382 | dest[8..12].copy_from_slice(&[0, 0, 0, 0]); 383 | 384 | let mut rest = &mut dest[12..]; 385 | 386 | for &chunk in packet.chunks { 387 | if rest.len() < 8 { 388 | return Err(SctpWriteError::BufferSize); 389 | } 390 | 391 | let (chunk_header, chunk_data) = rest.split_at_mut(4); 392 | let (chunk_type, chunk_flags, data_len) = match chunk { 393 | SctpChunk::Data { 394 | chunk_flags, 395 | tsn, 396 | stream_id, 397 | stream_seq, 398 | proto_id, 399 | user_data, 400 | } => { 401 | let data_len = 12 + user_data.len(); 402 | if chunk_data.len() < data_len { 403 | return Err(SctpWriteError::BufferSize); 404 | } 405 | 406 | NetworkEndian::write_u32(&mut chunk_data[0..4], tsn); 407 | NetworkEndian::write_u16(&mut chunk_data[4..6], stream_id); 408 | NetworkEndian::write_u16(&mut chunk_data[6..8], stream_seq); 409 | NetworkEndian::write_u32(&mut chunk_data[8..12], proto_id); 410 | chunk_data[12..12 + user_data.len()].copy_from_slice(user_data); 411 | 412 | (CHUNK_TYPE_DATA, chunk_flags, data_len) 413 | } 414 | SctpChunk::Init { 415 | initiate_tag, 416 | window_credit, 417 | num_outbound_streams, 418 | num_inbound_streams, 419 | initial_tsn, 420 | support_unreliable, 421 | } => { 422 | let data_len = 16 + if support_unreliable { 4 } else { 0 }; 423 | if chunk_data.len() < data_len { 424 | return Err(SctpWriteError::BufferSize); 425 | } 426 | 427 | NetworkEndian::write_u32(&mut chunk_data[0..4], initiate_tag); 428 | NetworkEndian::write_u32(&mut chunk_data[4..8], window_credit); 429 | NetworkEndian::write_u16(&mut chunk_data[8..10], num_outbound_streams); 430 | NetworkEndian::write_u16(&mut chunk_data[10..12], num_inbound_streams); 431 | NetworkEndian::write_u32(&mut chunk_data[12..16], initial_tsn); 432 | 433 | // forward tsn parameter 434 | if support_unreliable { 435 | NetworkEndian::write_u16(&mut chunk_data[16..18], INIT_PARAM_FORWARD_TSN); 436 | NetworkEndian::write_u16(&mut chunk_data[18..20], 4); 437 | } 438 | 439 | (CHUNK_TYPE_INIT, 0, data_len) 440 | } 441 | SctpChunk::InitAck { 442 | initiate_tag, 443 | window_credit, 444 | num_outbound_streams, 445 | num_inbound_streams, 446 | initial_tsn, 447 | state_cookie, 448 | } => { 449 | let data_len = 24 + state_cookie.len(); 450 | if chunk_data.len() < data_len { 451 | return Err(SctpWriteError::BufferSize); 452 | } 453 | 454 | NetworkEndian::write_u32(&mut chunk_data[0..4], initiate_tag); 455 | NetworkEndian::write_u32(&mut chunk_data[4..8], window_credit); 456 | NetworkEndian::write_u16(&mut chunk_data[8..10], num_outbound_streams); 457 | NetworkEndian::write_u16(&mut chunk_data[10..12], num_inbound_streams); 458 | NetworkEndian::write_u32(&mut chunk_data[12..16], initial_tsn); 459 | 460 | NetworkEndian::write_u16(&mut chunk_data[16..18], INIT_PARAM_FORWARD_TSN); 461 | NetworkEndian::write_u16(&mut chunk_data[18..20], 4); 462 | 463 | NetworkEndian::write_u16(&mut chunk_data[20..22], INIT_ACK_PARAM_STATE_COOKIE); 464 | NetworkEndian::write_u16( 465 | &mut chunk_data[22..24], 466 | (state_cookie.len() + 4) 467 | .try_into() 468 | .map_err(|_| SctpWriteError::OutOfRange)?, 469 | ); 470 | 471 | chunk_data[24..data_len].copy_from_slice(state_cookie); 472 | 473 | (CHUNK_TYPE_INIT_ACK, 0, data_len) 474 | } 475 | SctpChunk::SAck { 476 | cumulative_tsn_ack, 477 | adv_recv_window, 478 | num_gap_ack_blocks, 479 | num_dup_tsn, 480 | } => { 481 | let data_len = 12; 482 | if chunk_data.len() < data_len { 483 | return Err(SctpWriteError::BufferSize); 484 | } 485 | 486 | NetworkEndian::write_u32(&mut chunk_data[0..4], cumulative_tsn_ack); 487 | NetworkEndian::write_u32(&mut chunk_data[4..8], adv_recv_window); 488 | NetworkEndian::write_u16(&mut chunk_data[8..10], num_gap_ack_blocks); 489 | NetworkEndian::write_u16(&mut chunk_data[10..12], num_dup_tsn); 490 | 491 | (CHUNK_TYPE_SACK, 0, data_len) 492 | } 493 | SctpChunk::Heartbeat { heartbeat_info } 494 | | SctpChunk::HeartbeatAck { heartbeat_info } => { 495 | let chunk_type = if let SctpChunk::Heartbeat { .. } = chunk { 496 | CHUNK_TYPE_HEARTBEAT 497 | } else { 498 | CHUNK_TYPE_HEARTBEAT_ACK 499 | }; 500 | 501 | let data_len = if let Some(heartbeat_info) = heartbeat_info { 502 | let data_len = 4 + heartbeat_info.len(); 503 | if chunk_data.len() < data_len { 504 | return Err(SctpWriteError::BufferSize); 505 | } 506 | 507 | NetworkEndian::write_u16(&mut chunk_data[0..2], HEARTBEAT_PARAM_INFO); 508 | NetworkEndian::write_u16( 509 | &mut chunk_data[2..4], 510 | (heartbeat_info.len() + 4) 511 | .try_into() 512 | .map_err(|_| SctpWriteError::OutOfRange)?, 513 | ); 514 | chunk_data[4..4 + heartbeat_info.len()].copy_from_slice(heartbeat_info); 515 | data_len 516 | } else { 517 | 0 518 | }; 519 | 520 | (chunk_type, 0, data_len) 521 | } 522 | SctpChunk::Abort => (CHUNK_TYPE_ABORT, 0, 0), 523 | SctpChunk::Shutdown { cumulative_tsn_ack } => { 524 | let data_len = 4; 525 | if chunk_data.len() < 4 { 526 | return Err(SctpWriteError::BufferSize); 527 | } 528 | NetworkEndian::write_u32(&mut chunk_data[0..4], cumulative_tsn_ack); 529 | (CHUNK_TYPE_SHUTDOWN, 0, data_len) 530 | } 531 | SctpChunk::ShutdownAck => (CHUNK_TYPE_SHUTDOWN_ACK, 0, 0), 532 | SctpChunk::CookieEcho { state_cookie } => { 533 | if chunk_data.len() < state_cookie.len() { 534 | return Err(SctpWriteError::BufferSize); 535 | } 536 | chunk_data[0..state_cookie.len()].copy_from_slice(state_cookie); 537 | (CHUNK_TYPE_COOKIE_ECHO, 0, state_cookie.len()) 538 | } 539 | SctpChunk::CookieAck => (CHUNK_TYPE_COOKIE_ACK, 0, 0), 540 | SctpChunk::ShutdownComplete => (CHUNK_TYPE_SHUTDOWN_COMPLETE, 0, 0), 541 | SctpChunk::ForwardTsn { new_cumulative_tsn } => { 542 | let data_len = 4; 543 | if chunk_data.len() < 4 { 544 | return Err(SctpWriteError::BufferSize); 545 | } 546 | NetworkEndian::write_u32(&mut chunk_data[0..4], new_cumulative_tsn); 547 | (CHUNK_TYPE_FORWARD_TSN, 0, data_len) 548 | } 549 | chunk => unimplemented!("write for SCTP chunk {:?} not implemented", chunk), 550 | }; 551 | 552 | let data_padded_len = next_multiple(data_len, 4); 553 | for i in data_len..data_padded_len { 554 | chunk_data[i] = 0; 555 | } 556 | let chunk_len = data_len + 4; 557 | let chunk_padded_len = data_padded_len + 4; 558 | 559 | chunk_header[0] = chunk_type; 560 | chunk_header[1] = chunk_flags; 561 | NetworkEndian::write_u16( 562 | &mut chunk_header[2..4], 563 | chunk_len 564 | .try_into() 565 | .map_err(|_| SctpWriteError::OutOfRange)?, 566 | ); 567 | 568 | rest = &mut rest[chunk_padded_len..]; 569 | } 570 | 571 | let remainder = rest.len(); 572 | let len = dest.len() - remainder; 573 | let crc = CRC.checksum(&dest[0..len]); 574 | LittleEndian::write_u32(&mut dest[8..12], crc); 575 | 576 | Ok(len) 577 | } 578 | 579 | const CRC: Crc = Crc::::new(&CRC_32_ISCSI); 580 | 581 | const CHUNK_TYPE_DATA: u8 = 0x00; 582 | const CHUNK_TYPE_INIT: u8 = 0x01; 583 | const CHUNK_TYPE_INIT_ACK: u8 = 0x02; 584 | const CHUNK_TYPE_SACK: u8 = 0x03; 585 | const CHUNK_TYPE_HEARTBEAT: u8 = 0x04; 586 | const CHUNK_TYPE_HEARTBEAT_ACK: u8 = 0x05; 587 | const CHUNK_TYPE_ABORT: u8 = 0x06; 588 | const CHUNK_TYPE_SHUTDOWN: u8 = 0x07; 589 | const CHUNK_TYPE_SHUTDOWN_ACK: u8 = 0x08; 590 | const CHUNK_TYPE_ERROR: u8 = 0x09; 591 | const CHUNK_TYPE_COOKIE_ECHO: u8 = 0x0a; 592 | const CHUNK_TYPE_COOKIE_ACK: u8 = 0x0b; 593 | const CHUNK_TYPE_SHUTDOWN_COMPLETE: u8 = 0x0e; 594 | const CHUNK_TYPE_AUTH: u8 = 0x0f; 595 | const CHUNK_TYPE_ASCONF_ACK: u8 = 0x80; 596 | const CHUNK_TYPE_RE_CONFIG: u8 = 0x82; 597 | const CHUNK_TYPE_PAD: u8 = 0x84; 598 | const CHUNK_TYPE_IDATA: u8 = 0x40; 599 | const CHUNK_TYPE_FORWARD_TSN: u8 = 0xc0; 600 | const CHUNK_TYPE_ASCONF: u8 = 0xc1; 601 | const CHUNK_TYPE_I_FORWARD_TSN: u8 = 0xc2; 602 | 603 | const INIT_ACK_PARAM_STATE_COOKIE: u16 = 0x07; 604 | const INIT_PARAM_FORWARD_TSN: u16 = 0xc000; 605 | const INIT_PARAM_SUPPORTED_EXTENSIONS: u16 = 0x8008; 606 | const INIT_PARAM_EXT_FORWARD_TSN: u8 = 0xc0; 607 | const HEARTBEAT_PARAM_INFO: u16 = 0x07; 608 | 609 | enum IterParamsError { 610 | BufferSize, 611 | } 612 | 613 | fn iter_params<'a>( 614 | data: &'a [u8], 615 | start: usize, 616 | ) -> impl Iterator> + 'a { 617 | struct ParamIterator<'a> { 618 | data: &'a [u8], 619 | index: usize, 620 | } 621 | 622 | impl<'a> Iterator for ParamIterator<'a> { 623 | type Item = Result<(u16, &'a [u8]), IterParamsError>; 624 | 625 | fn next(&mut self) -> Option { 626 | if self.data.len() < self.index + 4 { 627 | return None; 628 | } 629 | 630 | let ty = NetworkEndian::read_u16(&self.data[self.index..self.index + 2]); 631 | let len = NetworkEndian::read_u16(&self.data[self.index + 2..self.index + 4]) as usize; 632 | 633 | if self.data.len() < self.index + len { 634 | return Some(Err(IterParamsError::BufferSize)); 635 | } 636 | 637 | let res = Some(Ok((ty, &self.data[self.index + 4..self.index + len]))); 638 | self.index = next_multiple(self.index + len, 4); 639 | res 640 | } 641 | } 642 | 643 | ParamIterator { data, index: start } 644 | } 645 | 646 | fn next_multiple(s: usize, m: usize) -> usize { 647 | if s % m == 0 { 648 | s 649 | } else { 650 | s - s % m + m 651 | } 652 | } 653 | -------------------------------------------------------------------------------- /src/server.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | collections::{hash_map::Entry as HashMapEntry, HashMap, VecDeque}, 3 | convert::AsRef, 4 | error::Error, 5 | fmt, 6 | future::Future, 7 | io::{Error as IoError, ErrorKind as IoErrorKind}, 8 | net::SocketAddr, 9 | ops::Deref, 10 | pin::Pin, 11 | sync::Arc, 12 | task::Poll, 13 | time::{Duration, Instant}, 14 | }; 15 | 16 | use futures_channel::mpsc; 17 | use futures_core::Stream; 18 | use futures_util::{future::poll_fn, ready, select, FutureExt, SinkExt, StreamExt}; 19 | use http::{header, Response}; 20 | use openssl::ssl::SslAcceptor; 21 | use rand::thread_rng; 22 | 23 | use crate::{ 24 | buffer_pool::{BufferHandle, BufferPool, OwnedBuffer}, 25 | client::{Client, ClientError, MessageType, MAX_UDP_PAYLOAD_SIZE}, 26 | crypto::SslConfig, 27 | runtime::{Runtime, UdpSocket}, 28 | sdp::{gen_sdp_response, parse_sdp_fields, SdpFields}, 29 | stun::{parse_stun_binding_request, write_stun_success_response}, 30 | util::rand_string, 31 | }; 32 | 33 | #[derive(Debug)] 34 | pub enum SendError { 35 | /// Non-fatal error trying to send a message to an unknown, disconnected, or not fully 36 | /// established client. 37 | ClientNotConnected, 38 | /// Non-fatal error writing a WebRTC Data Channel message that is too large to fit in the 39 | /// maximum message length. 40 | IncompleteMessageWrite, 41 | /// I/O error on the underlying socket. May or may not be fatal, depending on the specific 42 | /// error. 43 | Io(IoError), 44 | } 45 | 46 | impl fmt::Display for SendError { 47 | fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { 48 | match self { 49 | SendError::ClientNotConnected => write!(f, "client is not connected"), 50 | SendError::IncompleteMessageWrite => { 51 | write!(f, "incomplete write of WebRTC Data Channel message") 52 | } 53 | SendError::Io(err) => fmt::Display::fmt(err, f), 54 | } 55 | } 56 | } 57 | 58 | impl Error for SendError {} 59 | 60 | impl From for SendError { 61 | fn from(err: IoError) -> SendError { 62 | SendError::Io(err) 63 | } 64 | } 65 | 66 | #[derive(Debug)] 67 | pub enum SessionError { 68 | /// `SessionEndpoint` has beeen disconnected from its `Server` (the `Server` has been dropped). 69 | Disconnected, 70 | /// An error streaming the SDP descriptor 71 | StreamError(Box), 72 | } 73 | 74 | impl fmt::Display for SessionError { 75 | fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { 76 | match self { 77 | SessionError::Disconnected => write!(f, "`SessionEndpoint` disconnected from `Server`"), 78 | SessionError::StreamError(e) => { 79 | write!(f, "error streaming the incoming SDP descriptor: {}", e) 80 | } 81 | } 82 | } 83 | } 84 | 85 | impl Error for SessionError { 86 | fn source(&self) -> Option<&(dyn Error + 'static)> { 87 | match self { 88 | SessionError::Disconnected => None, 89 | SessionError::StreamError(e) => Some(e.as_ref()), 90 | } 91 | } 92 | } 93 | 94 | /// A reference to an internal buffer containing a received message. 95 | pub struct MessageBuffer<'a>(BufferHandle<'a>); 96 | 97 | impl<'a> Deref for MessageBuffer<'a> { 98 | type Target = Vec; 99 | 100 | fn deref(&self) -> &Vec { 101 | &self.0 102 | } 103 | } 104 | 105 | impl<'a> AsRef<[u8]> for MessageBuffer<'a> { 106 | fn as_ref(&self) -> &[u8] { 107 | &self.0 108 | } 109 | } 110 | 111 | pub struct MessageResult<'a> { 112 | pub message: MessageBuffer<'a>, 113 | pub message_type: MessageType, 114 | pub remote_addr: SocketAddr, 115 | } 116 | 117 | #[derive(Clone)] 118 | pub struct SessionEndpoint { 119 | public_addr: SocketAddr, 120 | cert_fingerprint: Arc, 121 | session_sender: mpsc::Sender, 122 | } 123 | 124 | impl SessionEndpoint { 125 | /// Receives an incoming SDP descriptor of an `RTCSessionDescription` from a browser, informs 126 | /// the corresponding `Server` of the new WebRTC session, and returns a JSON object 127 | /// containing two fields: 128 | /// 1) An `answer` field which is an SDP descriptor that can be used to construct an 129 | /// `RTCSessionDescription`. 130 | /// 2) a `candidate` field which is a configuration object for an `RTCIceCandidate`. 131 | /// 132 | /// The returned SDP descriptor contains a digest of the x509 certificate the server will use 133 | /// for DTLS, and the browser will ensure that this digest matches before starting a WebRTC 134 | /// connection. 135 | pub async fn session_request( 136 | &mut self, 137 | sdp_descriptor: S, 138 | ) -> Result 139 | where 140 | I: AsRef<[u8]>, 141 | E: Into>, 142 | S: Stream>, 143 | { 144 | const SERVER_USER_LEN: usize = 12; 145 | const SERVER_PASSWD_LEN: usize = 24; 146 | 147 | let SdpFields { ice_ufrag, mid, .. } = parse_sdp_fields(sdp_descriptor) 148 | .await 149 | .map_err(|e| SessionError::StreamError(e.into()))?; 150 | 151 | let (incoming_session, response) = { 152 | let mut rng = thread_rng(); 153 | let server_user = rand_string(&mut rng, SERVER_USER_LEN); 154 | let server_passwd = rand_string(&mut rng, SERVER_PASSWD_LEN); 155 | 156 | let incoming_session = IncomingSession { 157 | server_user: server_user.clone(), 158 | server_passwd: server_passwd.clone(), 159 | remote_user: ice_ufrag, 160 | }; 161 | 162 | let response = gen_sdp_response( 163 | &mut rng, 164 | &self.cert_fingerprint, 165 | &self.public_addr.ip().to_string(), 166 | self.public_addr.ip().is_ipv6(), 167 | self.public_addr.port(), 168 | &server_user, 169 | &server_passwd, 170 | &mid, 171 | ); 172 | 173 | (incoming_session, response) 174 | }; 175 | 176 | self.session_sender 177 | .send(incoming_session) 178 | .await 179 | .map_err(|_| SessionError::Disconnected)?; 180 | Ok(response) 181 | } 182 | 183 | /// Convenience method which returns an `http::Response` rather than a JSON string, with the 184 | /// correct format headers. 185 | pub async fn http_session_request( 186 | &mut self, 187 | sdp_descriptor: S, 188 | ) -> Result, SessionError> 189 | where 190 | I: AsRef<[u8]>, 191 | E: Into>, 192 | S: Stream>, 193 | { 194 | let r = self.session_request(sdp_descriptor).await?; 195 | Ok(Response::builder() 196 | .header(header::CONTENT_TYPE, "application/json") 197 | .body(r) 198 | .expect("could not construct session response")) 199 | } 200 | } 201 | 202 | pub struct Server { 203 | runtime: R, 204 | udp_socket: R::UdpSocket, 205 | session_endpoint: SessionEndpoint, 206 | incoming_session_stream: mpsc::Receiver, 207 | ssl_acceptor: Arc, 208 | outgoing_udp: VecDeque<(OwnedBuffer, SocketAddr)>, 209 | incoming_rtc: VecDeque<(OwnedBuffer, SocketAddr, MessageType)>, 210 | buffer_pool: BufferPool, 211 | sessions: HashMap, 212 | clients: HashMap, 213 | last_generate_periodic: Instant, 214 | last_cleanup: Instant, 215 | periodic_timer: Pin>, 216 | } 217 | 218 | impl Server { 219 | /// Start a new WebRTC data channel server listening on `listen_addr` and advertising its 220 | /// publicly available address as `public_addr`. 221 | /// 222 | /// WebRTC connections must be started via an external communication channel from a browser via 223 | /// the `SessionEndpoint`, after which a WebRTC data channel can be opened. 224 | pub fn new( 225 | runtime: R, 226 | listen_addr: SocketAddr, 227 | public_addr: SocketAddr, 228 | ) -> Result { 229 | Server::with_ssl_config( 230 | runtime, 231 | listen_addr, 232 | public_addr, 233 | SslConfig::create().expect("WebRTC server could not initialize OpenSSL primitives"), 234 | ) 235 | } 236 | 237 | /// Start a new WebRTC data channel server with the given `SslConfig`. 238 | /// 239 | /// This can be used to share self-signed TLS certificates between different `Server` instances, 240 | /// which is important in certain browsers (Firefox) when connecting to multiple WebRTC 241 | /// endpoints from the same page. 242 | pub fn with_ssl_config( 243 | runtime: R, 244 | listen_addr: SocketAddr, 245 | public_addr: SocketAddr, 246 | crypto: SslConfig, 247 | ) -> Result { 248 | const SESSION_BUFFER_SIZE: usize = 8; 249 | 250 | let udp_socket = runtime.bind_udp(listen_addr)?; 251 | 252 | let (session_sender, session_receiver) = mpsc::channel(SESSION_BUFFER_SIZE); 253 | 254 | log::info!( 255 | "new WebRTC data channel server listening on {}, public addr {}", 256 | listen_addr, 257 | public_addr 258 | ); 259 | 260 | let session_endpoint = SessionEndpoint { 261 | public_addr, 262 | cert_fingerprint: Arc::new(crypto.fingerprint), 263 | session_sender, 264 | }; 265 | 266 | let periodic_timer = Box::pin(runtime.timer(PERIODIC_TIMER_INTERVAL)); 267 | 268 | Ok(Server { 269 | runtime, 270 | udp_socket, 271 | session_endpoint, 272 | incoming_session_stream: session_receiver, 273 | ssl_acceptor: crypto.ssl_acceptor, 274 | outgoing_udp: VecDeque::new(), 275 | incoming_rtc: VecDeque::new(), 276 | buffer_pool: BufferPool::new(), 277 | sessions: HashMap::new(), 278 | clients: HashMap::new(), 279 | last_generate_periodic: Instant::now(), 280 | last_cleanup: Instant::now(), 281 | periodic_timer, 282 | }) 283 | } 284 | 285 | /// Returns a `SessionEndpoint` which can be used to start new WebRTC sessions. 286 | /// 287 | /// WebRTC connections must be started via an external communication channel from a browser via 288 | /// the returned `SessionEndpoint`, and this communication channel will be used to exchange 289 | /// session descriptions in SDP format. 290 | /// 291 | /// The returned `SessionEndpoint` will notify this `Server` of new sessions via a shared async 292 | /// channel. This is done so that the `SessionEndpoint` is easy to use in a separate server 293 | /// task (such as a `hyper` HTTP server). 294 | pub fn session_endpoint(&self) -> SessionEndpoint { 295 | self.session_endpoint.clone() 296 | } 297 | 298 | /// The total count of clients in any active state, whether still starting up, fully 299 | /// established, or still shutting down. 300 | pub fn active_clients(&self) -> usize { 301 | self.clients.values().filter(|c| !c.is_shutdown()).count() 302 | } 303 | 304 | /// List all the currently fully established client connections. 305 | pub fn connected_clients(&self) -> impl Iterator + '_ { 306 | self.clients.iter().filter_map(|(addr, client)| { 307 | if client.is_established() { 308 | Some(addr) 309 | } else { 310 | None 311 | } 312 | }) 313 | } 314 | 315 | /// Returns true if the client has a completely established WebRTC data channel connection and 316 | /// can send messages back and forth. Returns false for disconnected clients as well as those 317 | /// that are still starting up or are in the process of shutting down. 318 | pub fn is_connected(&self, remote_addr: &SocketAddr) -> bool { 319 | if let Some(client) = self.clients.get(remote_addr) { 320 | client.is_established() 321 | } else { 322 | false 323 | } 324 | } 325 | 326 | /// Disconect the given client, does nothing if the client is not currently connected. 327 | pub async fn disconnect(&mut self, remote_addr: &SocketAddr) -> Result<(), IoError> { 328 | if let Some(client) = self.clients.get_mut(remote_addr) { 329 | match client.start_shutdown() { 330 | Ok(true) => { 331 | log::info!("starting shutdown for client {}", remote_addr); 332 | } 333 | Ok(false) => {} 334 | Err(err) => { 335 | log::warn!( 336 | "error starting shutdown for client {}: {}", 337 | remote_addr, 338 | err 339 | ); 340 | } 341 | } 342 | 343 | self.outgoing_udp 344 | .extend(client.take_outgoing_packets().map(|p| (p, *remote_addr))); 345 | self.send_outgoing().await? 346 | } 347 | 348 | Ok(()) 349 | } 350 | 351 | /// Send the given message to the given remote client, if they are connected. 352 | /// 353 | /// The given message must be less than `MAX_MESSAGE_LEN`. 354 | pub async fn send( 355 | &mut self, 356 | message: &[u8], 357 | message_type: MessageType, 358 | remote_addr: &SocketAddr, 359 | ) -> Result<(), SendError> { 360 | let client = self 361 | .clients 362 | .get_mut(remote_addr) 363 | .ok_or(SendError::ClientNotConnected)?; 364 | 365 | match client.send_message(message_type, message) { 366 | Err(ClientError::NotConnected) | Err(ClientError::NotEstablished) => { 367 | return Err(SendError::ClientNotConnected).into(); 368 | } 369 | Err(ClientError::IncompletePacketWrite) => { 370 | return Err(SendError::IncompleteMessageWrite).into(); 371 | } 372 | Err(err) => { 373 | log::warn!( 374 | "message send for client {} generated unexpected error, shutting down: {}", 375 | remote_addr, 376 | err 377 | ); 378 | let _ = client.start_shutdown(); 379 | return Err(SendError::ClientNotConnected).into(); 380 | } 381 | Ok(()) => {} 382 | } 383 | 384 | self.outgoing_udp 385 | .extend(client.take_outgoing_packets().map(|p| (p, *remote_addr))); 386 | Ok(self.send_outgoing().await?) 387 | } 388 | 389 | /// Receive a WebRTC data channel message from any connected client. 390 | /// 391 | /// `Server::recv` *must* be called for proper operation of the server, as it also handles 392 | /// background tasks such as responding to STUN packets and timing out existing sessions. 393 | /// 394 | /// If the provided buffer is not large enough to hold the received message, the received 395 | /// message will be truncated, and the original length will be returned as part of 396 | /// `MessageResult`. 397 | pub async fn recv(&mut self) -> Result, IoError> { 398 | while self.incoming_rtc.is_empty() { 399 | self.process().await?; 400 | } 401 | 402 | let (message, remote_addr, message_type) = self.incoming_rtc.pop_front().unwrap(); 403 | let message = MessageBuffer(self.buffer_pool.adopt(message)); 404 | return Ok(MessageResult { 405 | message, 406 | message_type, 407 | remote_addr, 408 | }); 409 | } 410 | 411 | // Accepts new incoming WebRTC sessions, times out existing WebRTC sessions, sends outgoing UDP 412 | // packets, receives incoming UDP packets, and responds to STUN packets. 413 | async fn process(&mut self) -> Result<(), IoError> { 414 | enum Next { 415 | IncomingSession(IncomingSession), 416 | IncomingPacket(usize, SocketAddr), 417 | PeriodicTimer, 418 | } 419 | 420 | let mut packet_buffer = self.buffer_pool.acquire(); 421 | packet_buffer.resize(MAX_UDP_PAYLOAD_SIZE, 0); 422 | let next = { 423 | let recv_udp = { 424 | let udp_socket = &mut self.udp_socket; 425 | let packet_buffer = &mut packet_buffer; 426 | poll_fn(move |cx| udp_socket.poll_recv_from(cx, packet_buffer)).fuse() 427 | }; 428 | 429 | let next_timer = { 430 | let runtime = &self.runtime; 431 | let periodic_timer = &mut self.periodic_timer; 432 | poll_fn(move |cx| { 433 | ready!(periodic_timer.as_mut().poll(cx)); 434 | periodic_timer.set(runtime.timer(PERIODIC_TIMER_INTERVAL)); 435 | Poll::Ready(()) 436 | }) 437 | .fuse() 438 | }; 439 | 440 | select! { 441 | incoming_session = self.incoming_session_stream.next() => { 442 | Next::IncomingSession( 443 | incoming_session.expect("connection to SessionEndpoint has closed") 444 | ) 445 | } 446 | res = { recv_udp } => { 447 | let (len, remote_addr) = res?; 448 | Next::IncomingPacket(len, remote_addr) 449 | } 450 | _ = { next_timer } => { 451 | Next::PeriodicTimer 452 | } 453 | } 454 | }; 455 | 456 | match next { 457 | Next::IncomingSession(incoming_session) => { 458 | drop(packet_buffer); 459 | self.accept_session(incoming_session) 460 | } 461 | Next::IncomingPacket(len, remote_addr) => { 462 | if len > MAX_UDP_PAYLOAD_SIZE { 463 | return Err(IoError::new( 464 | IoErrorKind::Other, 465 | "failed to read entire datagram from socket", 466 | )); 467 | } 468 | packet_buffer.truncate(len); 469 | let packet_buffer = packet_buffer.into_owned(); 470 | self.receive_packet(remote_addr, packet_buffer); 471 | self.send_outgoing().await?; 472 | } 473 | Next::PeriodicTimer => { 474 | drop(packet_buffer); 475 | self.timeout_clients(); 476 | self.generate_periodic_packets(); 477 | self.send_outgoing().await?; 478 | } 479 | } 480 | 481 | Ok(()) 482 | } 483 | 484 | // Send all pending outgoing UDP packets 485 | async fn send_outgoing(&mut self) -> Result<(), IoError> { 486 | while let Some((packet, remote_addr)) = self.outgoing_udp.pop_front() { 487 | let packet = self.buffer_pool.adopt(packet); 488 | let len = poll_fn({ 489 | let udp_socket = &mut self.udp_socket; 490 | let packet = &packet; 491 | move |cx| udp_socket.poll_send_to(cx, packet, remote_addr) 492 | }) 493 | .await?; 494 | let packet_len = packet.len(); 495 | if len != packet_len { 496 | return Err(IoError::new( 497 | IoErrorKind::Other, 498 | "failed to write entire datagram to socket", 499 | )); 500 | } 501 | } 502 | Ok(()) 503 | } 504 | 505 | // Handle a single incoming UDP packet, either by responding to it as a STUN binding request or 506 | // by handling it as part of an existing WebRTC connection. 507 | fn receive_packet(&mut self, remote_addr: SocketAddr, packet_buffer: OwnedBuffer) { 508 | let mut packet_buffer = self.buffer_pool.adopt(packet_buffer); 509 | if let Some(stun_binding_request) = parse_stun_binding_request(&packet_buffer[..]) { 510 | if let Some(session) = self.sessions.get_mut(&SessionKey { 511 | server_user: stun_binding_request.server_user, 512 | remote_user: stun_binding_request.remote_user, 513 | }) { 514 | session.ttl = Instant::now(); 515 | packet_buffer.resize(MAX_UDP_PAYLOAD_SIZE, 0); 516 | let resp_len = write_stun_success_response( 517 | stun_binding_request.transaction_id, 518 | remote_addr, 519 | session.server_passwd.as_bytes(), 520 | &mut packet_buffer, 521 | ) 522 | .expect("could not write stun response"); 523 | 524 | packet_buffer.truncate(resp_len); 525 | self.outgoing_udp 526 | .push_back((packet_buffer.into_owned(), remote_addr)); 527 | 528 | match self.clients.entry(remote_addr) { 529 | HashMapEntry::Vacant(vacant) => { 530 | log::info!( 531 | "beginning client data channel connection with {}", 532 | remote_addr, 533 | ); 534 | 535 | vacant.insert( 536 | Client::new(&self.ssl_acceptor, self.buffer_pool.clone(), remote_addr) 537 | .expect("could not create new client instance"), 538 | ); 539 | } 540 | HashMapEntry::Occupied(_) => {} 541 | } 542 | } 543 | } else { 544 | if let Some(client) = self.clients.get_mut(&remote_addr) { 545 | if let Err(err) = client.receive_incoming_packet(packet_buffer.into_owned()) { 546 | if !client.shutdown_started() { 547 | log::warn!( 548 | "client {} had unexpected error receiving UDP packet, shutting down: {}", 549 | remote_addr, err 550 | ); 551 | let _ = client.start_shutdown(); 552 | } 553 | } 554 | self.outgoing_udp 555 | .extend(client.take_outgoing_packets().map(|p| (p, remote_addr))); 556 | self.incoming_rtc.extend( 557 | client 558 | .receive_messages() 559 | .map(|(message_type, message)| (message, remote_addr, message_type)), 560 | ); 561 | } 562 | } 563 | } 564 | 565 | // Call `Client::generate_periodic` on all clients, if we are due to do so. 566 | fn generate_periodic_packets(&mut self) { 567 | if self.last_generate_periodic.elapsed() >= PERIODIC_PACKET_INTERVAL { 568 | self.last_generate_periodic = Instant::now(); 569 | 570 | for (remote_addr, client) in &mut self.clients { 571 | if let Err(err) = client.generate_periodic() { 572 | if !client.shutdown_started() { 573 | log::warn!("error for client {}, shutting down: {}", remote_addr, err); 574 | let _ = client.start_shutdown(); 575 | } 576 | } 577 | self.outgoing_udp 578 | .extend(client.take_outgoing_packets().map(|p| (p, *remote_addr))); 579 | } 580 | } 581 | } 582 | 583 | // Clean up all client sessions / connections, if we are due to do so. 584 | fn timeout_clients(&mut self) { 585 | if self.last_cleanup.elapsed() >= CLEANUP_INTERVAL { 586 | self.last_cleanup = Instant::now(); 587 | self.sessions.retain(|session_key, session| { 588 | if session.ttl.elapsed() < RTC_SESSION_TIMEOUT { 589 | true 590 | } else { 591 | log::info!( 592 | "session timeout for server user '{}' and remote user '{}'", 593 | session_key.server_user, 594 | session_key.remote_user 595 | ); 596 | false 597 | } 598 | }); 599 | 600 | self.clients.retain(|remote_addr, client| { 601 | if !client.is_shutdown() 602 | && client.last_activity().elapsed() < RTC_CONNECTION_TIMEOUT 603 | { 604 | true 605 | } else { 606 | if !client.is_shutdown() { 607 | log::info!("connection timeout for client {}", remote_addr); 608 | } 609 | log::info!("client {} removed", remote_addr); 610 | false 611 | } 612 | }); 613 | } 614 | } 615 | 616 | fn accept_session(&mut self, incoming_session: IncomingSession) { 617 | log::info!( 618 | "session initiated with server user: '{}' and remote user: '{}'", 619 | incoming_session.server_user, 620 | incoming_session.remote_user 621 | ); 622 | 623 | self.sessions.insert( 624 | SessionKey { 625 | server_user: incoming_session.server_user, 626 | remote_user: incoming_session.remote_user, 627 | }, 628 | Session { 629 | server_passwd: incoming_session.server_passwd, 630 | ttl: Instant::now(), 631 | }, 632 | ); 633 | } 634 | } 635 | 636 | const RTC_CONNECTION_TIMEOUT: Duration = Duration::from_secs(30); 637 | const RTC_SESSION_TIMEOUT: Duration = Duration::from_secs(30); 638 | const CLEANUP_INTERVAL: Duration = Duration::from_secs(10); 639 | const PERIODIC_PACKET_INTERVAL: Duration = Duration::from_secs(1); 640 | const PERIODIC_TIMER_INTERVAL: Duration = Duration::from_secs(1); 641 | 642 | #[derive(Eq, PartialEq, Hash, Clone, Debug)] 643 | struct SessionKey { 644 | server_user: String, 645 | remote_user: String, 646 | } 647 | 648 | struct Session { 649 | server_passwd: String, 650 | ttl: Instant, 651 | } 652 | 653 | struct IncomingSession { 654 | pub server_user: String, 655 | pub server_passwd: String, 656 | pub remote_user: String, 657 | } 658 | -------------------------------------------------------------------------------- /src/client.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | collections::VecDeque, 3 | error::Error, 4 | fmt, 5 | io::{Error as IoError, ErrorKind as IoErrorKind, Read, Write}, 6 | iter::Iterator, 7 | mem, 8 | net::SocketAddr, 9 | time::{Duration, Instant}, 10 | }; 11 | 12 | use openssl::{ 13 | error::ErrorStack as OpenSslErrorStack, 14 | ssl::{ 15 | Error as SslError, ErrorCode, HandshakeError, MidHandshakeSslStream, ShutdownResult, 16 | SslAcceptor, SslStream, 17 | }, 18 | }; 19 | use rand::{thread_rng, Rng}; 20 | 21 | use crate::buffer_pool::{BufferPool, OwnedBuffer}; 22 | use crate::sctp::{ 23 | read_sctp_packet, write_sctp_packet, SctpChunk, SctpPacket, SctpWriteError, 24 | SCTP_FLAG_BEGIN_FRAGMENT, SCTP_FLAG_COMPLETE_UNRELIABLE, SCTP_FLAG_END_FRAGMENT, 25 | }; 26 | 27 | /// Heartbeat packets will be generated at a maximum of this rate (if the connection is otherwise 28 | /// idle). 29 | pub const HEARTBEAT_INTERVAL: Duration = Duration::from_secs(3); 30 | 31 | // Maximum theoretical UDP payload size 32 | pub const MAX_UDP_PAYLOAD_SIZE: usize = 65507; 33 | 34 | // Derived through experimentation, any larger and openssl reports 'dtls message too big'. 35 | pub const MAX_DTLS_MESSAGE_SIZE: usize = 16384; 36 | 37 | pub const MAX_SCTP_PACKET_SIZE: usize = MAX_DTLS_MESSAGE_SIZE; 38 | 39 | // The overhead of sending a single SCTP packet with a single data message. 40 | pub const SCTP_MESSAGE_OVERHEAD: usize = 28; 41 | 42 | /// Maximum supported theoretical size of a single WebRTC message, based on DTLS and SCTP packet 43 | /// size limits. 44 | /// 45 | /// WebRTC makes no attempt at packet fragmentation and re-assembly or to support fragmented 46 | /// received messages, all sent and received unreliable messages must fit into a single SCTP packet. 47 | /// As such, this maximum size is almost certainly too large for browsers to actually support. 48 | /// Start with a much lower MTU (around 1200) and test it. 49 | pub const MAX_MESSAGE_LEN: usize = MAX_SCTP_PACKET_SIZE - SCTP_MESSAGE_OVERHEAD; 50 | 51 | #[derive(Debug)] 52 | pub enum ClientError { 53 | TlsError(SslError), 54 | OpenSslError(OpenSslErrorStack), 55 | NotConnected, 56 | NotEstablished, 57 | IncompletePacketRead, 58 | IncompletePacketWrite, 59 | } 60 | 61 | impl fmt::Display for ClientError { 62 | fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { 63 | match self { 64 | ClientError::TlsError(err) => fmt::Display::fmt(err, f), 65 | ClientError::OpenSslError(err) => fmt::Display::fmt(err, f), 66 | ClientError::NotConnected => write!(f, "client is not connected"), 67 | ClientError::NotEstablished => { 68 | write!(f, "client does not have an established WebRTC data channel") 69 | } 70 | ClientError::IncompletePacketRead => { 71 | write!(f, "WebRTC connection packet not completely read") 72 | } 73 | ClientError::IncompletePacketWrite => { 74 | write!(f, "WebRTC connection packet not completely written") 75 | } 76 | } 77 | } 78 | } 79 | 80 | impl Error for ClientError {} 81 | 82 | #[derive(Copy, Clone, Eq, PartialEq, Debug)] 83 | pub enum MessageType { 84 | Text, 85 | Binary, 86 | } 87 | 88 | pub struct Client { 89 | buffer_pool: BufferPool, 90 | remote_addr: SocketAddr, 91 | ssl_state: ClientSslState, 92 | client_state: ClientState, 93 | } 94 | 95 | impl Client { 96 | pub fn new( 97 | ssl_acceptor: &SslAcceptor, 98 | buffer_pool: BufferPool, 99 | remote_addr: SocketAddr, 100 | ) -> Result { 101 | match ssl_acceptor.accept(ClientSslPackets { 102 | buffer_pool: buffer_pool.clone(), 103 | incoming_udp: VecDeque::new(), 104 | outgoing_udp: VecDeque::new(), 105 | }) { 106 | Ok(_) => unreachable!("handshake cannot finish with no incoming packets"), 107 | Err(HandshakeError::SetupFailure(err)) => return Err(err), 108 | Err(HandshakeError::Failure(_)) => { 109 | unreachable!("handshake cannot fail before starting") 110 | } 111 | Err(HandshakeError::WouldBlock(mid_handshake)) => Ok(Client { 112 | buffer_pool, 113 | remote_addr, 114 | ssl_state: ClientSslState::Handshake(mid_handshake), 115 | client_state: ClientState { 116 | last_activity: Instant::now(), 117 | last_sent: Instant::now(), 118 | received_messages: Vec::new(), 119 | sctp_state: SctpState::Shutdown, 120 | sctp_local_port: 0, 121 | sctp_remote_port: 0, 122 | sctp_local_verification_tag: 0, 123 | sctp_remote_verification_tag: 0, 124 | sctp_local_tsn: 0, 125 | sctp_remote_tsn: 0, 126 | }, 127 | }), 128 | } 129 | } 130 | 131 | /// DTLS and SCTP states are established, and RTC messages may be sent 132 | pub fn is_established(&self) -> bool { 133 | match (&self.ssl_state, self.client_state.sctp_state) { 134 | (ClientSslState::Established(_), SctpState::Established) => true, 135 | _ => false, 136 | } 137 | } 138 | 139 | /// Time of last activity that indicates a working connection 140 | pub fn last_activity(&self) -> Instant { 141 | self.client_state.last_activity 142 | } 143 | 144 | /// Request SCTP and DTLS shutdown, connection immediately becomes un-established 145 | pub fn start_shutdown(&mut self) -> Result { 146 | let started; 147 | self.ssl_state = match mem::replace(&mut self.ssl_state, ClientSslState::Shutdown) { 148 | ClientSslState::Established(mut ssl_stream) => { 149 | started = true; 150 | if self.client_state.sctp_state != SctpState::Shutdown { 151 | // TODO: For now, we just do an immediate one-sided SCTP abort 152 | send_sctp_packet( 153 | &self.buffer_pool, 154 | &mut ssl_stream, 155 | SctpPacket { 156 | source_port: self.client_state.sctp_local_port, 157 | dest_port: self.client_state.sctp_remote_port, 158 | verification_tag: self.client_state.sctp_remote_verification_tag, 159 | chunks: &[SctpChunk::Abort], 160 | }, 161 | )?; 162 | self.client_state.last_sent = Instant::now(); 163 | self.client_state.sctp_state = SctpState::Shutdown; 164 | } 165 | match ssl_stream.shutdown() { 166 | Err(err) => { 167 | if err.code() == ErrorCode::ZERO_RETURN { 168 | ClientSslState::Shutdown 169 | } else { 170 | return Err(ssl_err_to_client_err(err)); 171 | } 172 | } 173 | Ok(res) => ClientSslState::ShuttingDown(ssl_stream, res), 174 | } 175 | } 176 | prev_state => { 177 | started = false; 178 | prev_state 179 | } 180 | }; 181 | Ok(started) 182 | } 183 | 184 | /// Returns true if the shutdown process has been started or has already finished. 185 | pub fn shutdown_started(&self) -> bool { 186 | match &self.ssl_state { 187 | ClientSslState::ShuttingDown(_, _) | ClientSslState::Shutdown => true, 188 | _ => false, 189 | } 190 | } 191 | 192 | /// Connection has finished shutting down. 193 | pub fn is_shutdown(&self) -> bool { 194 | match &self.ssl_state { 195 | ClientSslState::ShuttingDown(_, ShutdownResult::Received) 196 | | ClientSslState::Shutdown => true, 197 | _ => false, 198 | } 199 | } 200 | 201 | /// Generate any periodic packets, currently only heartbeat packets. 202 | pub fn generate_periodic(&mut self) -> Result<(), ClientError> { 203 | // We send heartbeat packets if the last sent packet was more than HEARTBEAT_INTERVAL ago 204 | if self.client_state.last_sent.elapsed() > HEARTBEAT_INTERVAL { 205 | match &mut self.ssl_state { 206 | ClientSslState::Established(ssl_stream) => { 207 | if self.client_state.sctp_state == SctpState::Established { 208 | send_sctp_packet( 209 | &self.buffer_pool, 210 | ssl_stream, 211 | SctpPacket { 212 | source_port: self.client_state.sctp_local_port, 213 | dest_port: self.client_state.sctp_remote_port, 214 | verification_tag: self.client_state.sctp_remote_verification_tag, 215 | chunks: &[SctpChunk::Heartbeat { 216 | heartbeat_info: Some(SCTP_HEARTBEAT), 217 | }], 218 | }, 219 | )?; 220 | self.client_state.last_sent = Instant::now(); 221 | } 222 | } 223 | _ => {} 224 | } 225 | } 226 | Ok(()) 227 | } 228 | 229 | /// Pushes an available UDP packet. Will error if called when the client is currently in the 230 | /// shutdown state. 231 | pub fn receive_incoming_packet(&mut self, udp_packet: OwnedBuffer) -> Result<(), ClientError> { 232 | self.ssl_state = match mem::replace(&mut self.ssl_state, ClientSslState::Shutdown) { 233 | ClientSslState::Handshake(mut mid_handshake) => { 234 | mid_handshake.get_mut().incoming_udp.push_back(udp_packet); 235 | match mid_handshake.handshake() { 236 | Ok(ssl_stream) => { 237 | log::info!("DTLS handshake finished for remote {}", self.remote_addr); 238 | ClientSslState::Established(ssl_stream) 239 | } 240 | Err(handshake_error) => match handshake_error { 241 | HandshakeError::SetupFailure(err) => { 242 | return Err(ClientError::OpenSslError(err)); 243 | } 244 | HandshakeError::Failure(mid_handshake) => { 245 | log::warn!( 246 | "SSL handshake failure with remote {}: {}", 247 | self.remote_addr, 248 | mid_handshake.error() 249 | ); 250 | ClientSslState::Handshake(mid_handshake) 251 | } 252 | HandshakeError::WouldBlock(mid_handshake) => { 253 | ClientSslState::Handshake(mid_handshake) 254 | } 255 | }, 256 | } 257 | } 258 | ClientSslState::Established(mut ssl_stream) => { 259 | ssl_stream.get_mut().incoming_udp.push_back(udp_packet); 260 | ClientSslState::Established(ssl_stream) 261 | } 262 | ClientSslState::ShuttingDown(mut ssl_stream, shutdown_result) => { 263 | ssl_stream.get_mut().incoming_udp.push_back(udp_packet); 264 | match ssl_stream.shutdown() { 265 | Err(err) => { 266 | if err.code() == ErrorCode::WANT_READ { 267 | ClientSslState::ShuttingDown(ssl_stream, shutdown_result) 268 | } else if err.code() == ErrorCode::ZERO_RETURN { 269 | ClientSslState::Shutdown 270 | } else { 271 | return Err(ssl_err_to_client_err(err)); 272 | } 273 | } 274 | Ok(res) => ClientSslState::ShuttingDown(ssl_stream, res), 275 | } 276 | } 277 | ClientSslState::Shutdown => ClientSslState::Shutdown, 278 | }; 279 | 280 | while let ClientSslState::Established(ssl_stream) = &mut self.ssl_state { 281 | let mut ssl_buffer = self.buffer_pool.acquire(); 282 | ssl_buffer.resize(MAX_SCTP_PACKET_SIZE, 0); 283 | match ssl_stream.ssl_read(&mut ssl_buffer) { 284 | Ok(size) => { 285 | let mut sctp_chunks = [SctpChunk::Abort; SCTP_MAX_CHUNKS]; 286 | match read_sctp_packet(&ssl_buffer[0..size], false, &mut sctp_chunks) { 287 | Ok(sctp_packet) => { 288 | if !receive_sctp_packet( 289 | &self.buffer_pool, 290 | ssl_stream, 291 | &mut self.client_state, 292 | &sctp_packet, 293 | )? { 294 | drop(ssl_buffer); 295 | self.start_shutdown()?; 296 | } 297 | } 298 | Err(err) => { 299 | log::debug!("sctp read error on packet received over DTLS: {}", err); 300 | } 301 | } 302 | } 303 | Err(err) => { 304 | if err.code() == ErrorCode::WANT_READ { 305 | break; 306 | } else if err.code() == ErrorCode::ZERO_RETURN { 307 | log::info!("DTLS received close notify"); 308 | drop(ssl_buffer); 309 | self.start_shutdown()?; 310 | } else { 311 | return Err(ssl_err_to_client_err(err)); 312 | } 313 | } 314 | } 315 | } 316 | 317 | Ok(()) 318 | } 319 | 320 | pub fn take_outgoing_packets<'a>(&'a mut self) -> impl Iterator + 'a { 321 | (match &mut self.ssl_state { 322 | ClientSslState::Handshake(mid_handshake) => { 323 | Some(mid_handshake.get_mut().outgoing_udp.drain(..)) 324 | } 325 | ClientSslState::Established(ssl_stream) 326 | | ClientSslState::ShuttingDown(ssl_stream, _) => { 327 | Some(ssl_stream.get_mut().outgoing_udp.drain(..)) 328 | } 329 | ClientSslState::Shutdown => None, 330 | }) 331 | .into_iter() 332 | .flatten() 333 | } 334 | 335 | pub fn send_message( 336 | &mut self, 337 | message_type: MessageType, 338 | message: &[u8], 339 | ) -> Result<(), ClientError> { 340 | let ssl_stream = match &mut self.ssl_state { 341 | ClientSslState::Established(ssl_stream) => ssl_stream, 342 | _ => { 343 | return Err(ClientError::NotConnected); 344 | } 345 | }; 346 | 347 | if self.client_state.sctp_state != SctpState::Established { 348 | return Err(ClientError::NotEstablished); 349 | } 350 | 351 | let proto_id = if message_type == MessageType::Text { 352 | DATA_CHANNEL_PROTO_STRING 353 | } else { 354 | DATA_CHANNEL_PROTO_BINARY 355 | }; 356 | 357 | send_sctp_packet( 358 | &self.buffer_pool, 359 | ssl_stream, 360 | SctpPacket { 361 | source_port: self.client_state.sctp_local_port, 362 | dest_port: self.client_state.sctp_remote_port, 363 | verification_tag: self.client_state.sctp_remote_verification_tag, 364 | chunks: &[SctpChunk::Data { 365 | chunk_flags: SCTP_FLAG_COMPLETE_UNRELIABLE, 366 | tsn: self.client_state.sctp_local_tsn, 367 | stream_id: 0, 368 | stream_seq: 0, 369 | proto_id, 370 | user_data: message, 371 | }], 372 | }, 373 | )?; 374 | self.client_state.sctp_local_tsn = self.client_state.sctp_local_tsn.wrapping_add(1); 375 | 376 | Ok(()) 377 | } 378 | 379 | pub fn receive_messages<'a>( 380 | &'a mut self, 381 | ) -> impl Iterator + 'a { 382 | self.client_state.received_messages.drain(..) 383 | } 384 | } 385 | 386 | pub struct ClientState { 387 | last_activity: Instant, 388 | last_sent: Instant, 389 | 390 | received_messages: Vec<(MessageType, OwnedBuffer)>, 391 | 392 | sctp_state: SctpState, 393 | 394 | sctp_local_port: u16, 395 | sctp_remote_port: u16, 396 | 397 | sctp_local_verification_tag: u32, 398 | sctp_remote_verification_tag: u32, 399 | 400 | sctp_local_tsn: u32, 401 | sctp_remote_tsn: u32, 402 | } 403 | 404 | enum ClientSslState { 405 | Handshake(MidHandshakeSslStream), 406 | Established(SslStream), 407 | ShuttingDown(SslStream, ShutdownResult), 408 | Shutdown, 409 | } 410 | 411 | #[derive(Debug)] 412 | struct ClientSslPackets { 413 | buffer_pool: BufferPool, 414 | incoming_udp: VecDeque, 415 | outgoing_udp: VecDeque, 416 | } 417 | 418 | impl Read for ClientSslPackets { 419 | fn read(&mut self, buf: &mut [u8]) -> Result { 420 | if let Some(next_packet) = self.incoming_udp.pop_front() { 421 | let next_packet = self.buffer_pool.adopt(next_packet); 422 | if next_packet.len() > buf.len() { 423 | return Err(IoError::new( 424 | IoErrorKind::Other, 425 | ClientError::IncompletePacketRead, 426 | )); 427 | } 428 | buf[0..next_packet.len()].copy_from_slice(&next_packet); 429 | Ok(next_packet.len()) 430 | } else { 431 | Err(IoErrorKind::WouldBlock.into()) 432 | } 433 | } 434 | } 435 | 436 | impl Write for ClientSslPackets { 437 | fn write(&mut self, buf: &[u8]) -> Result { 438 | let mut buffer = self.buffer_pool.acquire(); 439 | buffer.extend_from_slice(buf); 440 | self.outgoing_udp.push_back(buffer.into_owned()); 441 | Ok(buf.len()) 442 | } 443 | 444 | fn flush(&mut self) -> Result<(), IoError> { 445 | Ok(()) 446 | } 447 | } 448 | 449 | const SCTP_COOKIE: &[u8] = b"WEBRTC-UNRELIABLE-COOKIE"; 450 | const SCTP_HEARTBEAT: &[u8] = b"WEBRTC-UNRELIABLE-HEARTBEAT"; 451 | const SCTP_MAX_CHUNKS: usize = 16; 452 | const SCTP_BUFFER_SIZE: u32 = 0x40000; 453 | 454 | const DATA_CHANNEL_PROTO_CONTROL: u32 = 50; 455 | const DATA_CHANNEL_PROTO_STRING: u32 = 51; 456 | const DATA_CHANNEL_PROTO_BINARY: u32 = 53; 457 | 458 | const DATA_CHANNEL_MESSAGE_ACK: u8 = 2; 459 | const DATA_CHANNEL_MESSAGE_OPEN: u8 = 3; 460 | 461 | #[derive(Debug, Eq, PartialEq, Copy, Clone)] 462 | enum SctpState { 463 | Shutdown, 464 | InitAck, 465 | Established, 466 | } 467 | 468 | fn ssl_err_to_client_err(err: SslError) -> ClientError { 469 | if let Some(io_err) = err.io_error() { 470 | if let Some(inner) = io_err.get_ref() { 471 | if inner.is::() { 472 | return *err 473 | .into_io_error() 474 | .unwrap() 475 | .into_inner() 476 | .unwrap() 477 | .downcast() 478 | .unwrap(); 479 | } 480 | } 481 | } 482 | 483 | ClientError::TlsError(err) 484 | } 485 | 486 | fn max_tsn(a: u32, b: u32) -> u32 { 487 | if a > b { 488 | if a - b < (1 << 31) { 489 | a 490 | } else { 491 | b 492 | } 493 | } else { 494 | if b - a < (1 << 31) { 495 | b 496 | } else { 497 | a 498 | } 499 | } 500 | } 501 | 502 | fn send_sctp_packet( 503 | buffer_pool: &BufferPool, 504 | ssl_stream: &mut SslStream, 505 | sctp_packet: SctpPacket, 506 | ) -> Result<(), ClientError> { 507 | let mut sctp_buffer = buffer_pool.acquire(); 508 | sctp_buffer.resize(MAX_SCTP_PACKET_SIZE, 0); 509 | 510 | let packet_len = match write_sctp_packet(&mut sctp_buffer, sctp_packet) { 511 | Ok(len) => len, 512 | Err(SctpWriteError::BufferSize) => { 513 | return Err(ClientError::IncompletePacketWrite); 514 | } 515 | Err(err) => panic!("error writing SCTP packet: {}", err), 516 | }; 517 | 518 | assert_eq!( 519 | ssl_stream 520 | .ssl_write(&sctp_buffer[0..packet_len]) 521 | .map_err(ssl_err_to_client_err)?, 522 | packet_len 523 | ); 524 | 525 | Ok(()) 526 | } 527 | 528 | fn receive_sctp_packet( 529 | buffer_pool: &BufferPool, 530 | ssl_stream: &mut SslStream, 531 | client_state: &mut ClientState, 532 | sctp_packet: &SctpPacket, 533 | ) -> Result { 534 | for chunk in sctp_packet.chunks { 535 | match *chunk { 536 | SctpChunk::Init { 537 | initiate_tag, 538 | window_credit: _, 539 | num_outbound_streams, 540 | num_inbound_streams, 541 | initial_tsn, 542 | support_unreliable, 543 | } => { 544 | if !support_unreliable { 545 | log::warn!("peer does not support selective unreliability, abort connection"); 546 | client_state.sctp_state = SctpState::Shutdown; 547 | return Ok(false); 548 | } 549 | 550 | let mut rng = thread_rng(); 551 | 552 | client_state.sctp_local_port = sctp_packet.dest_port; 553 | client_state.sctp_remote_port = sctp_packet.source_port; 554 | 555 | client_state.sctp_local_verification_tag = rng.gen(); 556 | client_state.sctp_remote_verification_tag = initiate_tag; 557 | 558 | client_state.sctp_local_tsn = rng.gen(); 559 | client_state.sctp_remote_tsn = initial_tsn; 560 | 561 | send_sctp_packet( 562 | &buffer_pool, 563 | ssl_stream, 564 | SctpPacket { 565 | source_port: client_state.sctp_local_port, 566 | dest_port: client_state.sctp_remote_port, 567 | verification_tag: client_state.sctp_remote_verification_tag, 568 | chunks: &[SctpChunk::InitAck { 569 | initiate_tag: client_state.sctp_local_verification_tag, 570 | window_credit: SCTP_BUFFER_SIZE, 571 | num_outbound_streams: num_outbound_streams, 572 | num_inbound_streams: num_inbound_streams, 573 | initial_tsn: client_state.sctp_local_tsn, 574 | state_cookie: SCTP_COOKIE, 575 | }], 576 | }, 577 | )?; 578 | 579 | client_state.sctp_state = SctpState::InitAck; 580 | client_state.last_activity = Instant::now(); 581 | client_state.last_sent = Instant::now(); 582 | } 583 | SctpChunk::CookieEcho { state_cookie } => { 584 | if state_cookie == SCTP_COOKIE && client_state.sctp_state != SctpState::Shutdown { 585 | send_sctp_packet( 586 | &buffer_pool, 587 | ssl_stream, 588 | SctpPacket { 589 | source_port: client_state.sctp_local_port, 590 | dest_port: client_state.sctp_remote_port, 591 | verification_tag: client_state.sctp_remote_verification_tag, 592 | chunks: &[SctpChunk::CookieAck], 593 | }, 594 | )?; 595 | client_state.last_sent = Instant::now(); 596 | 597 | if client_state.sctp_state == SctpState::InitAck { 598 | client_state.sctp_state = SctpState::Established; 599 | client_state.last_activity = Instant::now(); 600 | } 601 | } 602 | } 603 | SctpChunk::Data { 604 | chunk_flags, 605 | tsn, 606 | stream_id, 607 | stream_seq: _, 608 | proto_id, 609 | user_data, 610 | } => { 611 | if chunk_flags & SCTP_FLAG_BEGIN_FRAGMENT == 0 612 | || chunk_flags & SCTP_FLAG_END_FRAGMENT == 0 613 | { 614 | log::debug!("received fragmented SCTP packet, dropping"); 615 | } else { 616 | client_state.sctp_remote_tsn = max_tsn(client_state.sctp_remote_tsn, tsn); 617 | 618 | if proto_id == DATA_CHANNEL_PROTO_CONTROL { 619 | if !user_data.is_empty() { 620 | if user_data[0] == DATA_CHANNEL_MESSAGE_OPEN { 621 | send_sctp_packet( 622 | &buffer_pool, 623 | ssl_stream, 624 | SctpPacket { 625 | source_port: client_state.sctp_local_port, 626 | dest_port: client_state.sctp_remote_port, 627 | verification_tag: client_state.sctp_remote_verification_tag, 628 | chunks: &[SctpChunk::Data { 629 | chunk_flags: SCTP_FLAG_COMPLETE_UNRELIABLE, 630 | tsn: client_state.sctp_local_tsn, 631 | stream_id, 632 | stream_seq: 0, 633 | proto_id: DATA_CHANNEL_PROTO_CONTROL, 634 | user_data: &[DATA_CHANNEL_MESSAGE_ACK], 635 | }], 636 | }, 637 | )?; 638 | client_state.sctp_local_tsn = 639 | client_state.sctp_local_tsn.wrapping_add(1); 640 | } 641 | } 642 | } else if proto_id == DATA_CHANNEL_PROTO_STRING { 643 | let mut msg_buffer = buffer_pool.acquire(); 644 | msg_buffer.extend(user_data); 645 | client_state 646 | .received_messages 647 | .push((MessageType::Text, msg_buffer.into_owned())); 648 | } else if proto_id == DATA_CHANNEL_PROTO_BINARY { 649 | let mut msg_buffer = buffer_pool.acquire(); 650 | msg_buffer.extend(user_data); 651 | client_state 652 | .received_messages 653 | .push((MessageType::Binary, msg_buffer.into_owned())); 654 | } 655 | 656 | send_sctp_packet( 657 | &buffer_pool, 658 | ssl_stream, 659 | SctpPacket { 660 | source_port: client_state.sctp_local_port, 661 | dest_port: client_state.sctp_remote_port, 662 | verification_tag: client_state.sctp_remote_verification_tag, 663 | chunks: &[SctpChunk::SAck { 664 | cumulative_tsn_ack: client_state.sctp_remote_tsn, 665 | adv_recv_window: SCTP_BUFFER_SIZE, 666 | num_gap_ack_blocks: 0, 667 | num_dup_tsn: 0, 668 | }], 669 | }, 670 | )?; 671 | 672 | client_state.last_activity = Instant::now(); 673 | client_state.last_sent = Instant::now(); 674 | } 675 | } 676 | SctpChunk::Heartbeat { heartbeat_info } => { 677 | send_sctp_packet( 678 | &buffer_pool, 679 | ssl_stream, 680 | SctpPacket { 681 | source_port: client_state.sctp_local_port, 682 | dest_port: client_state.sctp_remote_port, 683 | verification_tag: client_state.sctp_remote_verification_tag, 684 | chunks: &[SctpChunk::HeartbeatAck { heartbeat_info }], 685 | }, 686 | )?; 687 | client_state.last_activity = Instant::now(); 688 | client_state.last_sent = Instant::now(); 689 | } 690 | SctpChunk::HeartbeatAck { .. } => { 691 | client_state.last_activity = Instant::now(); 692 | } 693 | SctpChunk::SAck { 694 | cumulative_tsn_ack: _, 695 | adv_recv_window: _, 696 | num_gap_ack_blocks, 697 | num_dup_tsn: _, 698 | } => { 699 | if num_gap_ack_blocks > 0 { 700 | send_sctp_packet( 701 | &buffer_pool, 702 | ssl_stream, 703 | SctpPacket { 704 | source_port: client_state.sctp_local_port, 705 | dest_port: client_state.sctp_remote_port, 706 | verification_tag: client_state.sctp_remote_verification_tag, 707 | chunks: &[SctpChunk::ForwardTsn { 708 | new_cumulative_tsn: client_state.sctp_local_tsn, 709 | }], 710 | }, 711 | )?; 712 | client_state.last_sent = Instant::now(); 713 | } 714 | client_state.last_activity = Instant::now(); 715 | } 716 | SctpChunk::Shutdown { .. } => { 717 | send_sctp_packet( 718 | &buffer_pool, 719 | ssl_stream, 720 | SctpPacket { 721 | source_port: client_state.sctp_local_port, 722 | dest_port: client_state.sctp_remote_port, 723 | verification_tag: client_state.sctp_remote_verification_tag, 724 | chunks: &[SctpChunk::ShutdownAck], 725 | }, 726 | )?; 727 | } 728 | SctpChunk::ShutdownAck { .. } | SctpChunk::Abort => { 729 | client_state.sctp_state = SctpState::Shutdown; 730 | return Ok(false); 731 | } 732 | SctpChunk::ForwardTsn { new_cumulative_tsn } => { 733 | client_state.sctp_remote_tsn = new_cumulative_tsn; 734 | } 735 | SctpChunk::InitAck { .. } | SctpChunk::CookieAck => {} 736 | SctpChunk::Error { 737 | first_param_type, 738 | first_param_data, 739 | } => { 740 | log::warn!( 741 | "SCTP error chunk received: {} {:?}", 742 | first_param_type, 743 | first_param_data 744 | ); 745 | } 746 | chunk => log::debug!("unhandled SCTP chunk {:?}", chunk), 747 | } 748 | } 749 | 750 | Ok(true) 751 | } 752 | --------------------------------------------------------------------------------