├── .github └── workflows │ └── semgrep.yml ├── .gitignore ├── Cargo.lock ├── Cargo.toml ├── LICENSE ├── README.md ├── cli ├── Cargo.toml └── src │ ├── aim_report.rs │ ├── args │ ├── mod.rs │ ├── packet_loss.rs │ ├── rpm.rs │ └── up_down.rs │ ├── latency.rs │ ├── main.rs │ ├── packet_loss.rs │ ├── report.rs │ ├── rpm.rs │ ├── up_down.rs │ └── util.rs └── crates ├── nq-core ├── Cargo.toml └── src │ ├── body │ ├── counting_body.rs │ ├── mod.rs │ └── upload_body.rs │ ├── client.rs │ ├── connection │ ├── http.rs │ ├── map.rs │ └── mod.rs │ ├── lib.rs │ ├── network │ └── mod.rs │ ├── time.rs │ ├── upgraded.rs │ └── util.rs ├── nq-latency ├── Cargo.toml └── src │ └── lib.rs ├── nq-load-generator ├── Cargo.toml └── src │ └── lib.rs ├── nq-packetloss ├── Cargo.toml └── src │ ├── lib.rs │ └── webrtc_data_channel.rs ├── nq-rpm ├── Cargo.toml └── src │ └── lib.rs ├── nq-stats ├── Cargo.toml └── src │ ├── counter.rs │ └── lib.rs └── nq-tokio-network ├── Cargo.toml └── src └── lib.rs /.github/workflows/semgrep.yml: -------------------------------------------------------------------------------- 1 | on: 2 | pull_request: {} 3 | workflow_dispatch: {} 4 | push: 5 | branches: 6 | - main 7 | - master 8 | schedule: 9 | - cron: '0 0 * * *' 10 | name: Semgrep config 11 | jobs: 12 | semgrep: 13 | name: semgrep/ci 14 | runs-on: ubuntu-latest 15 | env: 16 | SEMGREP_APP_TOKEN: ${{ secrets.SEMGREP_APP_TOKEN }} 17 | SEMGREP_URL: https://cloudflare.semgrep.dev 18 | SEMGREP_APP_URL: https://cloudflare.semgrep.dev 19 | SEMGREP_VERSION_CHECK_URL: https://cloudflare.semgrep.dev/api/check-version 20 | container: 21 | image: returntocorp/semgrep 22 | steps: 23 | - uses: actions/checkout@v4 24 | - run: semgrep ci 25 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [workspace] 2 | members = ["crates/*", "cli"] 3 | resolver = "2" 4 | 5 | [workspace.package] 6 | version = "3.3.0" 7 | repository = "https://github.com/cloudflare/foundations" 8 | edition = "2021" 9 | authors = [ 10 | "Fisher Darling ", 11 | "Lina Baquero ", 12 | "Cloudflare" 13 | ] 14 | license = "BSD-3-Clause" 15 | 16 | [profile.release] 17 | debug = 1 18 | 19 | [workspace.dependencies] 20 | nq-core = { path = "./crates/nq-core" } 21 | nq-stats = { path = "./crates/nq-stats" } 22 | nq-packetloss = { path = "./crates/nq-packetloss" } 23 | nq-proxy-network = { path = "./crates/nq-proxy-network" } 24 | nq-rpm = { path = "./crates/nq-rpm" } 25 | nq-latency = { path = "./crates/nq-latency" } 26 | nq-load-generator = { path = "./crates/nq-load-generator" } 27 | nq-tokio-network = { path = "./crates/nq-tokio-network" } 28 | 29 | anyhow = "1.0" 30 | async-trait = { version = "0.1" } 31 | boring = "4.11.0" 32 | clap = "4.3" 33 | clap-verbosity-flag = "2.1" 34 | http = "1.0" 35 | http-body-util = "0.1.2" 36 | hyper = "1.0" 37 | hyper-util = "0.1" 38 | pin-project-lite = "0.2" 39 | rand = "0.8" 40 | serde = "1.0" 41 | serde_json = { version = "1.0", features = ["preserve_order"] } 42 | tracing = "0.1" 43 | tracing-subscriber = "0.3" 44 | tokio = "1.0" 45 | tokio-util = "0.7" 46 | tokio-boring = "4.11.0" 47 | url = "2.4" 48 | webrtc = "0.12.0" 49 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | BSD 3-Clause License 2 | 3 | Copyright (c) 2023-2024, Cloudflare Inc. 4 | 5 | Redistribution and use in source and binary forms, with or without 6 | modification, are permitted provided that the following conditions are met: 7 | 8 | 1. Redistributions of source code must retain the above copyright notice, this 9 | list of conditions and the following disclaimer. 10 | 11 | 2. Redistributions in binary form must reproduce the above copyright notice, 12 | this list of conditions and the following disclaimer in the documentation 13 | and/or other materials provided with the distribution. 14 | 15 | 3. Neither the name of the copyright holder nor the names of its 16 | contributors may be used to endorse or promote products derived from 17 | this software without specific prior written permission. 18 | 19 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 20 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 22 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 23 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 25 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 26 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 27 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 28 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # networkquality-rs 2 | 3 | networkquality-rs is a collection of tools for measuring the quality of a 4 | network. This repo provides a CLI tool `mach` which can be used to run multiple 5 | different tests. The main focus of `mach` and this repo is to implement the IETF 6 | draft: ["Responsiveness under Working Conditions"](draft). 7 | 8 | The draft defines "responsiveness", measured in **R**ound trips **P**er 9 | **M**inute (RPM), as a useful measurement of network quality. `mach`'s default 10 | operation is to measure the responsiveness of a network using Cloudflare's 11 | responsiveness servers. 12 | 13 | # Installing 14 | 15 | First, [install rust](https://www.rust-lang.org/tools/install). 16 | 17 | Then build and run the binary at `./target/release/mach`: 18 | 19 | ```shell 20 | cargo build --release 21 | 22 | # run an rpm test 23 | ./target/release/mach 24 | ``` 25 | 26 | Or install it with cargo: 27 | 28 | ```shell 29 | cargo install --path ./cli 30 | 31 | # run an RPM test 32 | mach 33 | ``` 34 | 35 | # Running `mach` 36 | 37 | `mach` defaults to running a responsiveness test when given no arguments, the 38 | equivalent to `mach rpm`. 39 | 40 | Use `mach help` to see a list of subcommands and `mach help ` or 41 | `mach help` to see options for that command. 42 | 43 | ## Examples 44 | 45 | Running a responsiveness test: 46 | 47 | ```shell 48 | mach rpm 49 | { 50 | "unloaded_latency_ms": 10.819, 51 | "jitter_ms": 6.945, 52 | "download": { 53 | "throughput": 104846062, 54 | "loaded_latency_ms": 86.936, 55 | "rpm": 446 56 | }, 57 | "upload": { 58 | "throughput": 48758784, 59 | "loaded_latency_ms": 206.837, 60 | "rpm": 433 61 | } 62 | } 63 | ``` 64 | 65 | > RPM reports are automatically uploaded to Cloudflare's aim database and are 66 | > anonymous. See https://blog.cloudflare.com/aim-database-for-internet-quality/ 67 | > for more information. 68 | > 69 | > Use `--disable-aim-scores` to disable uploading reports. 70 | 71 | Running a responsiveness test with Apple's server: 72 | 73 | ```shell 74 | mach rpm -c https://mensura.cdn-apple.com/.well-known/nq 75 | ``` 76 | 77 | Timing the download of a resource: 78 | 79 | ```shell 80 | mach download https://cloudflare.com/cdn-cgi/trace 81 | { 82 | "dns_time": 0.0, 83 | "time_connect": 0.078, 84 | "time_secure": 0.243, 85 | "time_body": 0.0, 86 | "time_total": 0.243, 87 | "bytes_total": 228, 88 | "throughput": 7476 89 | } 90 | ``` 91 | 92 | Measuring latency using TCP connection timing: 93 | 94 | ```shell 95 | mach rtt 96 | { 97 | "jitter_ms": 2.949, 98 | "latency_ms": 10.549 99 | } 100 | ``` 101 | 102 | ## Debugging 103 | 104 | `mach` respects `RUST_LOG` env variables. If you want (likely too much) 105 | information on what's happening internally, run mach with `RUST_LOG=info` set. 106 | 107 | ```shell 108 | RUST_LOG=info mach 109 | ``` 110 | 111 | # Architecture 112 | 113 | The main complexity in the repo is due to the `Network` and `Time` trait 114 | abstractions. We need those abstractions for two reasons. First, it allows us to 115 | abstract over the underlying request/response implementation which will help 116 | with WASM/browser support in the future. Second, we use the network abstraction 117 | to define a Proxy network. This allows us to not only directly test H1, H2, or 118 | H3 connections, but also to test the impact of a multi-hop proxy network. The 119 | network trait is built in composable manner; multiple proxy networks can be 120 | layered on each other to build any proxy configuration. 121 | 122 | ## Crates 123 | 124 | networkquality-rs is split into multiple crates are under the `./crates` 125 | directory. They are combined together to form the `mach` cli under `./cli`. 126 | 127 | - `nq-core`: the core crate which defines the `Time` and `Network` abstractions. 128 | It also provides functionality for creating and driving H1 and H2 connections 129 | over an abstracted IO transport: `ByteStream`. Finally, it provides a 130 | low-level HTTP `Client` used to send arbitrary HTTP requests as well as a 131 | `ThroughputClient` which uses a special `CountingBody` to measure the 132 | throughput of an HTTP connection. 133 | 134 | - `nq-tokio-network`: a `Network` implementation based on tokio (and indirectly 135 | on hyper). 136 | 137 | - `nq-proxy-network`: a `Network` implementation that wraps another network. To 138 | create a new connection for users, the `ProxyNetwork` sends `CONNECT` requests 139 | over the inner, wrapped `Network` to the configured proxy server. This is 140 | currently unused, however will be added in the future. 141 | 142 | - `nq-stats`: provides `Timeseries` and `Counter` types for storing measurements 143 | and running simple statisitcs on those series. 144 | 145 | - `nq-rpm`: a speedtest which implements the 146 | ["Responsiveness under Working Conditions"](draft) draft. 147 | 148 | - `nq-latency`: a speedtest which measures latency by timing how long it takes 149 | to setup multiple TCP connections. 150 | 151 | - `nq-load-generator`: provides a `LoadGenerator` implementation used to create 152 | load saturated network conditions. 153 | 154 | # TODOs 155 | 156 | - [ ] implement upload command. 157 | - [ ] uploading a given number of bytes. 158 | - [ ] uploading arbitrary files. 159 | - [x] time DNS resolution. 160 | - [ ] better TUI experience for all commands. 161 | - [ ] QUIC support. 162 | - [ ] MASQUE proxying support. 163 | - [ ] support RPK TLS. 164 | - [ ] Output format: 165 | - [x] JSON 166 | - [ ] determine stability / extensions. 167 | - [ ] Human output 168 | - [x] send AIM score reports 169 | - [ ] automated testing 170 | - [ ] latency comparisions with curl 171 | - [ ] RPM comparisions with different tools against the same server 172 | - [ ] review/better test statistics 173 | - [ ] socket stats for measuring connection throughput 174 | - [ ] RPM stability decreases as interval duration decreases. Look into 175 | calculating a better `CountingBody` update rate. 176 | - [x] Properly signal the connections on a network to shutdown. 177 | 178 | [draft]: https://datatracker.ietf.org/doc/html/draft-ietf-ippm-responsiveness-03 179 | -------------------------------------------------------------------------------- /cli/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "mach-cli" 3 | version = "0.1.0" 4 | authors = ["Fisher Darling "] 5 | edition = "2021" 6 | 7 | [[bin]] 8 | name = "mach" 9 | path = "./src/main.rs" 10 | 11 | [dependencies] 12 | nq-core = { workspace = true } 13 | nq-latency = { workspace = true } 14 | nq-load-generator = { workspace = true } 15 | nq-packetloss = { workspace = true } 16 | nq-rpm = { workspace = true } 17 | nq-tokio-network = { workspace = true } 18 | 19 | anyhow = { workspace = true } 20 | clap = { workspace = true, features = ["derive"] } 21 | clap-verbosity-flag = { workspace = true } 22 | http = { workspace = true } 23 | http-body-util = { workspace = true } 24 | hyper = { workspace = true } 25 | serde = { workspace = true, features = ["derive"] } 26 | serde_json = { workspace = true } 27 | tokio = { workspace = true, features = ["rt-multi-thread", "time", "net", "macros",] } 28 | tokio-util = { workspace = true } 29 | tracing = { workspace = true } 30 | tracing-subscriber = { workspace = true, features = ["env-filter", "fmt"] } -------------------------------------------------------------------------------- /cli/src/aim_report.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2023-2024 Cloudflare, Inc. 2 | // Licensed under the BSD-3-Clause license found in the LICENSE file or at https://opensource.org/licenses/BSD-3-Clause 3 | 4 | //! Structures and utilities for reporting data to Cloudflare's AIM aggregation. 5 | 6 | use std::sync::Arc; 7 | 8 | use http::{HeaderMap, HeaderValue}; 9 | use http_body_util::BodyExt; 10 | use nq_core::client::Client; 11 | use nq_core::{Time, TokioTime}; 12 | use nq_latency::LatencyResult; 13 | use nq_load_generator::LoadedConnection; 14 | use nq_rpm::ResponsivenessResult; 15 | use nq_tokio_network::TokioNetwork; 16 | use serde::{Deserialize, Serialize}; 17 | use tokio_util::sync::CancellationToken; 18 | use tracing::debug; 19 | 20 | use crate::util::{pretty_ms, pretty_secs_to_ms}; 21 | 22 | /// Describes the format of Cloudflare AIM results uploaded with test runs. 23 | #[derive(Debug, Clone, Serialize, Deserialize)] 24 | #[serde(rename_all = "camelCase")] 25 | pub struct CloudflareAimResults { 26 | pub(crate) latency_ms: Vec, 27 | pub(crate) download: Vec, 28 | pub(crate) upload: Vec, 29 | pub(crate) down_loaded_latency_ms: Vec, 30 | pub(crate) up_loaded_latency_ms: Vec, 31 | pub(crate) packet_loss: PacketLossMeasurement, 32 | pub(crate) responsiveness: f64, 33 | #[serde(skip)] 34 | origin: String, 35 | } 36 | 37 | impl CloudflareAimResults { 38 | pub fn from_rpm_results( 39 | rtt_result: &LatencyResult, 40 | download_result: &ResponsivenessResult, 41 | upload_result: &ResponsivenessResult, 42 | config_url: Option, 43 | ) -> CloudflareAimResults { 44 | let latency_ms = rtt_result 45 | .measurements 46 | .values() 47 | .map(pretty_secs_to_ms) 48 | .collect(); 49 | 50 | let mut download = 51 | BpsMeasurement::from_loaded_connections(&download_result.loaded_connections); 52 | download.push(BpsMeasurement::from_rpm_result(download_result)); 53 | 54 | let mut upload = BpsMeasurement::from_loaded_connections(&upload_result.loaded_connections); 55 | upload.push(BpsMeasurement::from_rpm_result(upload_result)); 56 | 57 | let down_loaded_latency_ms = download_result 58 | .self_probe_latencies 59 | .values() 60 | .map(pretty_ms) 61 | .collect(); 62 | let up_loaded_latency_ms = upload_result 63 | .self_probe_latencies 64 | .values() 65 | .map(pretty_ms) 66 | .collect(); 67 | 68 | let packet_loss = PacketLossMeasurement { 69 | num_messages: 0, 70 | loss_ratio: 0.0, 71 | }; 72 | 73 | CloudflareAimResults { 74 | latency_ms, 75 | download, 76 | upload, 77 | down_loaded_latency_ms, 78 | up_loaded_latency_ms, 79 | packet_loss, 80 | responsiveness: download_result.rpm, 81 | origin: config_url.unwrap_or_else(|| "https://rpm.speed.cloudflare.com".to_string()), 82 | } 83 | } 84 | 85 | pub async fn upload(&self) -> anyhow::Result<()> { 86 | let results = self.clone(); 87 | let origin = self.origin.clone(); 88 | 89 | let shutdown = CancellationToken::new(); 90 | let time = Arc::new(TokioTime::new()); 91 | let network = Arc::new(TokioNetwork::new( 92 | Arc::clone(&time) as Arc, 93 | shutdown, 94 | )); 95 | 96 | let mut headers = HeaderMap::new(); 97 | headers.append("Origin", HeaderValue::from_str(origin.as_str()).unwrap()); 98 | headers.append("Content-Type", HeaderValue::from_static("application/json")); 99 | let body = serde_json::to_string(&results).unwrap(); 100 | 101 | let response = Client::default() 102 | .new_connection(nq_core::ConnectionType::H2) 103 | .new_connection(nq_core::ConnectionType::H2) 104 | .headers(headers) 105 | .method("POST") 106 | .send( 107 | "https://aim.cloudflare.com/__log".parse().unwrap(), 108 | body, 109 | network, 110 | time, 111 | )? 112 | .await?; 113 | 114 | let (status, status_text) = (response.status(), response.status().to_string()); 115 | let body = response.into_body().collect().await?.to_bytes(); 116 | 117 | debug!( 118 | "aim upload response: {status} ({status_text}); {}", 119 | String::from_utf8_lossy(&body) 120 | ); 121 | 122 | if status != 200 { 123 | anyhow::bail!("error uploading aim results"); 124 | } 125 | 126 | Ok(()) 127 | } 128 | } 129 | 130 | /// Describes the bits/s of some transfer. 131 | #[derive(Debug, Clone, Serialize, Deserialize)] 132 | #[serde(rename_all = "camelCase")] 133 | pub struct BpsMeasurement { 134 | /// The total number of bytes. 135 | bytes: usize, 136 | /// The bits per second of the transfer. 137 | bps: usize, 138 | } 139 | 140 | impl BpsMeasurement { 141 | fn from_loaded_connections(connections: &[LoadedConnection]) -> Vec { 142 | connections 143 | .iter() 144 | .map(|connection| { 145 | let bytes = connection.total_bytes_series().sum(); 146 | let bps = connection.total_bytes_series().average().unwrap_or(0.0); 147 | 148 | BpsMeasurement { 149 | bytes: bytes as usize, 150 | bps: bps as usize, 151 | } 152 | }) 153 | .collect() 154 | } 155 | 156 | /// Use the test duration and network capacity to create a synthetic bps result. 157 | fn from_rpm_result(rpm_result: &ResponsivenessResult) -> BpsMeasurement { 158 | let throughput = rpm_result.throughput().unwrap_or(0) as f64; 159 | 160 | let bytes = throughput * rpm_result.duration.as_secs_f64(); 161 | let bps = throughput as usize; 162 | 163 | BpsMeasurement { 164 | bytes: bytes as usize, 165 | bps, 166 | } 167 | } 168 | } 169 | 170 | /// A measure of packet loss. 171 | #[derive(Debug, Clone, Serialize, Deserialize)] 172 | #[serde(rename_all = "camelCase")] 173 | pub struct PacketLossMeasurement { 174 | num_messages: usize, 175 | loss_ratio: f64, 176 | } 177 | 178 | /// Default to 0% packet loss 179 | // todo(fisher): add network statistics as a part of the `Network` trait. 180 | impl Default for PacketLossMeasurement { 181 | fn default() -> Self { 182 | Self { 183 | num_messages: 1000, 184 | loss_ratio: 0.0, 185 | } 186 | } 187 | } 188 | 189 | /// Calculated AIM scores: 190 | /// https://developers.cloudflare.com/speed/aim/ 191 | #[derive(Serialize, Deserialize)] 192 | #[serde(rename_all = "camelCase")] 193 | #[allow(missing_docs)] 194 | pub enum AimScore { 195 | Streaming { 196 | points: usize, 197 | classification: String, 198 | }, 199 | Gaming { 200 | points: usize, 201 | classification: String, 202 | }, 203 | Rtc { 204 | points: usize, 205 | classification: String, 206 | }, 207 | } 208 | -------------------------------------------------------------------------------- /cli/src/args/mod.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2023-2024 Cloudflare, Inc. 2 | // Licensed under the BSD-3-Clause license found in the LICENSE file or at https://opensource.org/licenses/BSD-3-Clause 3 | 4 | pub(crate) mod packet_loss; 5 | pub(crate) mod rpm; 6 | pub(crate) mod up_down; 7 | 8 | use clap::{Parser, Subcommand, ValueEnum}; 9 | use packet_loss::PacketLossArgs; 10 | 11 | use crate::args::rpm::RpmArgs; 12 | use crate::args::up_down::DownloadArgs; 13 | 14 | /// mach runs multiple different network performance tests. The main focus of 15 | /// mach and this tool is to implement the IETF draft: "Responsiveness under 16 | /// Working Conditions". 17 | #[derive(Debug, Parser)] 18 | #[command(version, about, long_about = None)] 19 | pub struct Cli { 20 | #[command(flatten)] 21 | pub verbosity: clap_verbosity_flag::Verbosity, 22 | #[clap(subcommand)] 23 | pub command: Option, 24 | // todo(fisher): figure out proxies 25 | // #[clap(short = 'p', long = "proxy")] 26 | // proxies: Vec, 27 | } 28 | 29 | #[derive(Debug, Subcommand)] 30 | pub enum Command { 31 | /// Measure the network's responsiveness and report the download and upload 32 | /// capacity. 33 | /// 34 | /// This implements "Responsiveness under Working Conditions" draft: 35 | /// https://datatracker.ietf.org/doc/html/draft-ietf-ippm-responsiveness-03 36 | Rpm(RpmArgs), 37 | /// Download data (GET) from an endpoint, reporting latency measurements and total 38 | /// throughput. 39 | Download(DownloadArgs), 40 | /// Upload data (POST) to an endpoint, reporting latency measurements and total 41 | /// throughput. 42 | // Upload(UploadArgs), 43 | /// Determine the Round-Trip-Time (RTT), or latency, of a link using the 44 | /// time it takes to establish a TCP connection. 45 | /// 46 | /// This is not a perfect measurement of RTT, but it's close. 47 | Rtt { 48 | /// The URL to perform a GET request against. The full GET time is not 49 | /// measured, just the TCP handshake. 50 | #[clap(default_value = "https://h3.speed.cloudflare.com/__down?bytes=10")] 51 | #[clap(short, long)] 52 | url: String, 53 | /// How many measurements to perform. 54 | #[clap(default_value = "20")] 55 | #[clap(short, long)] 56 | runs: usize, 57 | }, 58 | /// Send UDP packets to a TURN server, reporting lost packets. 59 | PacketLoss(PacketLossArgs), 60 | } 61 | 62 | // todo(fisher): figure out proxy chaining. Preparsing args or using the -- sentinal? 63 | // #[derive(Debug, Clone, Args)] 64 | // struct Proxy { 65 | // /// The type of a proxy: h1, h2 or h3. 66 | // // #[clap(short = 't', long = "type")] 67 | // #[clap(short = 'h', long = "header")] 68 | // proxy_type: ProxyType, 69 | // /// The proxy's endpoint. 70 | // #[clap(short = 'h', long = "header")] 71 | // endpoint: String, 72 | // /// Headers sent on each connection to the proxy. 73 | // #[clap(short = 'h', long = "header")] 74 | // headers: Vec, 75 | // } 76 | 77 | /// Describes which underlying transport a connection uses. 78 | #[derive(Debug, Clone, ValueEnum)] 79 | pub enum ConnType { 80 | H1, 81 | H2, 82 | H3, 83 | } 84 | -------------------------------------------------------------------------------- /cli/src/args/packet_loss.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2025 Cloudflare, Inc. 2 | // Licensed under the BSD-3-Clause license found in the LICENSE file or at https://opensource.org/licenses/BSD-3-Clause 3 | 4 | //! Arguments for running simple packet loss test. 5 | 6 | use clap::Args; 7 | 8 | /// Send UDP packets to a TURN server, reporting lost packets. 9 | #[derive(Debug, Args)] 10 | pub struct PacketLossArgs { 11 | /// The target TURN server URI to send UDP packets. User's will have to provide a TURN server. 12 | #[clap(short = 't', long)] 13 | pub turn_uri: String, 14 | /// The URL to send the request to for TURN server credentials 15 | #[clap(short = 'c', long)] 16 | pub turn_cred_url: String, 17 | /// Total number of messages/packets to send 18 | #[clap(default_value = "1000")] 19 | #[clap(short = 'p', long)] 20 | pub num_packets: usize, 21 | /// Total number of messages to send in a batch before waiting 22 | #[clap(default_value = "10")] 23 | #[clap(short = 's', long)] 24 | pub batch_size: usize, 25 | /// Time to wait between batch sends in milliseconds (ms). 26 | #[clap(default_value = "10")] 27 | #[clap(short = 'w', long)] 28 | pub batch_wait_time_ms: u64, 29 | /// Time to wait for receiving messages after all messages have been sent in milliseconds (ms). 30 | #[clap(default_value = "3000")] 31 | #[clap(short = 'r', long)] 32 | pub response_wait_time_ms: u64, 33 | 34 | /// The download file endpoint used for load generation which should be multiple GBs. 35 | #[clap( 36 | short = 'd', 37 | long = "download", 38 | default_value = "https://h3.speed.cloudflare.com/__down?bytes=10000000000" 39 | )] 40 | pub download_url: String, 41 | /// The upload url used for load generation which accepts an arbitrary amount of data. 42 | #[clap( 43 | short = 'u', 44 | long = "upload", 45 | default_value = "https://h3.speed.cloudflare.com/__up" 46 | )] 47 | pub upload_url: String, 48 | } 49 | -------------------------------------------------------------------------------- /cli/src/args/rpm.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2023-2024 Cloudflare, Inc. 2 | // Licensed under the BSD-3-Clause license found in the LICENSE file or at https://opensource.org/licenses/BSD-3-Clause 3 | 4 | //! Arguments for running responsiveness tests. 5 | 6 | use clap::Args; 7 | 8 | #[derive(Debug, Args)] 9 | pub struct RpmArgs { 10 | /// The endpoint to get the responsiveness config from. Should be JSON in 11 | /// the form: 12 | /// 13 | /// { 14 | /// "version": number, 15 | /// "test_endpoint": string?, 16 | /// "urls": { 17 | /// "small_https_download_url": string, 18 | /// "large_https_download_url": string, 19 | /// "https_upload_url": string 20 | /// } 21 | /// } 22 | #[clap(short = 'c', long = "config")] 23 | pub config: Option, 24 | /// The large file endpoint which should be multiple GBs. 25 | #[clap( 26 | short = 'l', 27 | long = "large", 28 | default_value = "https://h3.speed.cloudflare.com/__down?bytes=10000000000" 29 | )] 30 | pub large_download_url: String, 31 | /// The small file endpoint which should be very small, only a few bytes. 32 | #[clap( 33 | short = 's', 34 | long = "small", 35 | default_value = "https://h3.speed.cloudflare.com/__down?bytes=10" 36 | )] 37 | pub small_download_url: String, 38 | /// The upload url which accepts an arbitrary amount of data. 39 | #[clap( 40 | short = 'u', 41 | long = "upload", 42 | default_value = "https://h3.speed.cloudflare.com/__up" 43 | )] 44 | pub upload_url: String, 45 | /// The number of intervals to use when calculating the moving average. 46 | #[clap(long = "mad", default_value = "4")] 47 | pub moving_average_distance: usize, 48 | /// How far a measurement is allowed to be from the previous moving average 49 | /// before the measurement is considered unstable. 50 | #[clap(long = "std", default_value = "0.05")] 51 | pub std_tolerance: f64, 52 | /// Determines which percentile to use for averaging when calculating the 53 | /// trimmed mean of throughputs or RPM scores. A value of `0.95` means to 54 | /// only use values in the 95th percentile to calculate an average. 55 | #[clap(long = "trim", default_value = "0.95")] 56 | pub trimmed_mean_percent: f64, 57 | /// The maximum number of loaded connections that the test can use to 58 | /// saturate the network. 59 | #[clap(long = "max-load", default_value = "16")] 60 | pub max_loaded_connections: usize, 61 | /// The duration between interval updates in milliseconds (ms). 62 | #[clap(long = "interval-duration", default_value = "1000")] 63 | pub interval_duration_ms: u64, 64 | /// The overall test duration in milliseconds (ms). 65 | #[clap(long = "test-duration", default_value = "12000")] 66 | pub test_duration_ms: u64, 67 | /// Disable AIM score reporting. 68 | /// 69 | /// https://blog.cloudflare.com/aim-database-for-internet-quality/ 70 | #[clap(long)] 71 | pub disable_aim_scores: bool, 72 | } 73 | 74 | impl Default for RpmArgs { 75 | fn default() -> Self { 76 | Self { 77 | config: None, 78 | large_download_url: "https://h3.speed.cloudflare.com/__down?bytes=10000000000" 79 | .to_string(), 80 | small_download_url: "https://h3.speed.cloudflare.com/__down?bytes=10".to_string(), 81 | upload_url: "https://h3.speed.cloudflare.com/__up".to_string(), 82 | moving_average_distance: 4, 83 | std_tolerance: 0.05, 84 | trimmed_mean_percent: 0.95, 85 | max_loaded_connections: 16, 86 | interval_duration_ms: 1000, // 1s 87 | test_duration_ms: 12_000, // 12s 88 | disable_aim_scores: false, 89 | } 90 | } 91 | } 92 | -------------------------------------------------------------------------------- /cli/src/args/up_down.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2023-2024 Cloudflare, Inc. 2 | // Licensed under the BSD-3-Clause license found in the LICENSE file or at https://opensource.org/licenses/BSD-3-Clause 3 | 4 | //! Arguments for running simple upload and download tests. 5 | 6 | use clap::Args; 7 | 8 | use crate::args::ConnType; 9 | 10 | /// Download data (GET) from an endpoint, reporting latency measurements and 11 | /// total throughput. 12 | #[derive(Debug, Args)] 13 | pub struct DownloadArgs { 14 | /// The URL to download data from. 15 | #[clap(default_value = "https://h3.speed.cloudflare.com/__down?bytes=10")] 16 | pub(crate) url: String, 17 | #[clap(default_value = "h2")] 18 | pub(crate) conn_type: ConnType, 19 | #[clap(short = 'H', long = "header")] 20 | pub(crate) headers: Vec, 21 | } 22 | 23 | /// Upload data (POST) to an endpoint, reporting latency measurements and total 24 | /// throughput. 25 | #[derive(Debug, Args)] 26 | pub struct UploadArgs { 27 | /// The URL to upload data to. 28 | #[clap(default_value = "https://h3.speed.cloudflare.com/__up")] 29 | pub(crate) url: String, 30 | /// The type of the connection. 31 | #[clap(default_value = "h2")] 32 | pub(crate) conn_type: ConnType, 33 | /// The number of arbitrary bytes to upload. Only one of `bytes` or `file` 34 | /// can be set. 35 | #[clap(short, long)] 36 | pub(crate) bytes: Option, 37 | /// Upload the contents of a file. Only one of `bytes` or `file` can be set. 38 | // #[clap(short, long)] 39 | // pub(crate) file: Option, 40 | /// Headers to add to the request. 41 | #[clap(short = 'H', long = "header")] 42 | pub(crate) headers: Vec, 43 | } 44 | -------------------------------------------------------------------------------- /cli/src/latency.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2023-2024 Cloudflare, Inc. 2 | // Licensed under the BSD-3-Clause license found in the LICENSE file or at https://opensource.org/licenses/BSD-3-Clause 3 | 4 | use std::sync::Arc; 5 | 6 | use anyhow::Context; 7 | use nq_core::{Network, Time, TokioTime}; 8 | use nq_latency::{Latency, LatencyConfig, LatencyResult}; 9 | use nq_tokio_network::TokioNetwork; 10 | use tokio_util::sync::CancellationToken; 11 | use tracing::info; 12 | 13 | use crate::util::pretty_secs_to_ms; 14 | 15 | pub async fn run(url: String, runs: usize) -> anyhow::Result<()> { 16 | info!("measuring rtt with {runs} runs against {url}"); 17 | 18 | if runs == 0 { 19 | anyhow::bail!("latency runs must be >= 1"); 20 | } 21 | 22 | let result = run_test(&LatencyConfig { 23 | url: url.parse()?, 24 | runs, 25 | }) 26 | .await?; 27 | 28 | let latency_ms = result 29 | .median() 30 | .map(pretty_secs_to_ms) 31 | .context("no measurements found, median rtt is null")?; 32 | 33 | let jitter_ms = result.jitter().map(pretty_secs_to_ms).unwrap_or(0.0); 34 | 35 | let json = serde_json::json!({ 36 | "jitter_ms": jitter_ms, 37 | "latency_ms": latency_ms, 38 | }); 39 | 40 | println!("{:#}", json); 41 | 42 | Ok(()) 43 | } 44 | 45 | pub async fn run_test(config: &LatencyConfig) -> anyhow::Result { 46 | let shutdown = CancellationToken::new(); 47 | let time = Arc::new(TokioTime::new()) as Arc; 48 | let network = Arc::new(TokioNetwork::new( 49 | Arc::clone(&time), 50 | shutdown.clone(), 51 | )) as Arc; 52 | 53 | let rtt = Latency::new(config.clone()); 54 | let results = rtt.run_test(network, time, shutdown).await?; 55 | 56 | Ok(results) 57 | } 58 | -------------------------------------------------------------------------------- /cli/src/main.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2023-2024 Cloudflare, Inc. 2 | // Licensed under the BSD-3-Clause license found in the LICENSE file or at https://opensource.org/licenses/BSD-3-Clause 3 | 4 | mod aim_report; 5 | pub(crate) mod args; 6 | mod latency; 7 | mod packet_loss; 8 | mod report; 9 | mod rpm; 10 | mod up_down; 11 | mod util; 12 | 13 | use clap::Parser; 14 | use clap_verbosity_flag::LevelFilter; 15 | 16 | use crate::args::rpm::RpmArgs; 17 | use crate::args::Command; 18 | 19 | #[tokio::main] 20 | async fn main() -> anyhow::Result<()> { 21 | let args = args::Cli::parse(); 22 | 23 | setup_logging(args.verbosity)?; 24 | 25 | // default to RPM 26 | let command = args 27 | .command 28 | .unwrap_or_else(|| Command::Rpm(RpmArgs::default())); 29 | 30 | match command { 31 | Command::Rpm(config) => rpm::run(config).await?, 32 | Command::Download(config) => up_down::download(config).await?, 33 | // Command::Upload(config) => up_down::upload(config).await?, 34 | Command::Rtt { url, runs } => latency::run(url, runs).await?, 35 | Command::PacketLoss(config) => packet_loss::run(config).await?, 36 | } 37 | 38 | Ok(()) 39 | } 40 | 41 | fn setup_logging(verbosity: clap_verbosity_flag::Verbosity) -> anyhow::Result<()> { 42 | let filter = if let Ok(log) = std::env::var("RUST_LOG") { 43 | log 44 | } else { 45 | match verbosity.log_level_filter() { 46 | LevelFilter::Off => "error", 47 | LevelFilter::Error => "mach=info,error", 48 | LevelFilter::Warn => "mach=info,nq_rpm=info,nq_latency=info,nq_core=error", 49 | LevelFilter::Info => "mach=info,nq_rpm=info,nq_latency=info,nq_core=info", 50 | LevelFilter::Debug => "debug", 51 | LevelFilter::Trace => "trace", 52 | } 53 | .to_string() 54 | }; 55 | 56 | tracing_subscriber::fmt() 57 | .with_env_filter(filter) 58 | .with_writer(std::io::stderr) 59 | .init(); 60 | 61 | Ok(()) 62 | } 63 | -------------------------------------------------------------------------------- /cli/src/packet_loss.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2025 Cloudflare, Inc. 2 | // Licensed under the BSD-3-Clause license found in the LICENSE file or at https://opensource.org/licenses/BSD-3-Clause 3 | 4 | use anyhow::{bail, Context}; 5 | use http::{HeaderMap, HeaderValue}; 6 | use http_body_util::BodyExt; 7 | use nq_core::{client::Client, ConnectionType, Time, TokioTime}; 8 | use nq_packetloss::{PacketLoss, PacketLossConfig, TurnServerCreds}; 9 | use nq_tokio_network::TokioNetwork; 10 | use std::{sync::Arc, time::Duration}; 11 | use tokio_util::sync::CancellationToken; 12 | use tracing::info; 13 | 14 | use crate::args::packet_loss::PacketLossArgs; 15 | 16 | pub async fn run(args: PacketLossArgs) -> anyhow::Result<()> { 17 | info!("running packet loss test"); 18 | 19 | let shutdown = CancellationToken::new(); 20 | let config = PacketLossConfig { 21 | turn_server_uri: args.turn_uri, 22 | turn_cred_request_url: args.turn_cred_url.parse()?, 23 | num_packets: args.num_packets, 24 | batch_size: args.batch_size, 25 | batch_wait_time: Duration::from_millis(args.batch_wait_time_ms), 26 | response_wait_time: Duration::from_millis(args.response_wait_time_ms), 27 | download_url: args.download_url.parse()?, 28 | upload_url: args.upload_url.parse()?, 29 | }; 30 | 31 | info!("fetching TURN server credentials"); 32 | let turn_server_creds = fetch_turn_server_creds(&config, shutdown.clone()).await?; 33 | 34 | info!("sending {} UDP packets to TURN server", config.num_packets); 35 | let packet_loss = PacketLoss::new_with_config(config)?; 36 | let packet_loss_result = packet_loss.run_test(turn_server_creds, shutdown).await?; 37 | 38 | println!("{}", serde_json::to_string_pretty(&packet_loss_result)?); 39 | Ok(()) 40 | } 41 | 42 | /// Fetch the TURN creds from the configured HTTP server 43 | async fn fetch_turn_server_creds( 44 | config: &PacketLossConfig, 45 | shutdown: CancellationToken, 46 | ) -> anyhow::Result { 47 | let request_url = config.turn_cred_request_url.clone(); 48 | let time = Arc::new(TokioTime::new()); 49 | let network = Arc::new(TokioNetwork::new( 50 | Arc::clone(&time) as Arc, 51 | shutdown.clone(), 52 | )); 53 | 54 | let host = config 55 | .turn_cred_request_url 56 | .host_str() 57 | .ok_or(anyhow::anyhow!("url has no host"))?; 58 | let mut headers = HeaderMap::new(); 59 | headers.append(hyper::header::HOST, HeaderValue::from_str(host)?); 60 | 61 | let response = Client::default() 62 | .new_connection(ConnectionType::H1) 63 | .method("GET") 64 | .headers(headers) 65 | .send( 66 | request_url.to_string().parse()?, 67 | http_body_util::Empty::new(), 68 | network, 69 | time, 70 | )? 71 | .await?; 72 | 73 | if !response.status().is_success() { 74 | bail!( 75 | "could not fetch turn credentials from: {request_url} {}", 76 | response.status() 77 | ); 78 | } 79 | 80 | let creds = serde_json::from_slice(&response.into_body().collect().await?.to_bytes()) 81 | .context("parsing json creds from turn server url")?; 82 | Ok(creds) 83 | } -------------------------------------------------------------------------------- /cli/src/report.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2023-2024 Cloudflare, Inc. 2 | // Licensed under the BSD-3-Clause license found in the LICENSE file or at https://opensource.org/licenses/BSD-3-Clause 3 | 4 | use anyhow::Context; 5 | use nq_latency::LatencyResult; 6 | use nq_rpm::ResponsivenessResult; 7 | use serde::{Deserialize, Serialize}; 8 | 9 | use crate::util::{pretty_ms, pretty_secs_to_ms}; 10 | 11 | #[derive(Serialize, Deserialize)] 12 | pub struct Report { 13 | unloaded_latency_ms: f64, 14 | // todo(fisher): implement packet loss from tcp info. 15 | // packet_loss: f64, 16 | jitter_ms: f64, 17 | 18 | download: RpmReport, 19 | upload: RpmReport, 20 | } 21 | 22 | impl Report { 23 | pub fn from_rtt_and_rpm_results( 24 | rtt_result: &LatencyResult, 25 | download_rpm_result: &ResponsivenessResult, 26 | upload_rpm_result: &ResponsivenessResult, 27 | ) -> anyhow::Result { 28 | let unloaded_latency_ms = rtt_result 29 | .median() 30 | .map(pretty_secs_to_ms) 31 | .context("no unloaded latency measurements")?; 32 | 33 | let jitter_ms = rtt_result.jitter().map(pretty_secs_to_ms).unwrap_or(0.0); 34 | 35 | let download = 36 | RpmReport::from_rpm_result(download_rpm_result).context("building download report")?; 37 | let upload = 38 | RpmReport::from_rpm_result(upload_rpm_result).context("building upload report")?; 39 | 40 | Ok(Report { 41 | unloaded_latency_ms, 42 | jitter_ms, 43 | download, 44 | upload, 45 | }) 46 | } 47 | } 48 | 49 | #[derive(Serialize, Deserialize)] 50 | struct RpmReport { 51 | throughput: usize, 52 | loaded_latency_ms: f64, 53 | rpm: usize, 54 | } 55 | 56 | impl RpmReport { 57 | pub fn from_rpm_result(result: &ResponsivenessResult) -> anyhow::Result { 58 | Ok(RpmReport { 59 | throughput: result.throughput().context("no throughputs available")?, 60 | loaded_latency_ms: result 61 | .self_probe_latencies 62 | .quantile(0.5) 63 | .map(pretty_ms) 64 | .context("no loaded latency measurements")?, 65 | rpm: result.rpm as usize, 66 | }) 67 | } 68 | } 69 | -------------------------------------------------------------------------------- /cli/src/rpm.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2023-2024 Cloudflare, Inc. 2 | // Licensed under the BSD-3-Clause license found in the LICENSE file or at https://opensource.org/licenses/BSD-3-Clause 3 | 4 | use std::sync::Arc; 5 | use std::time::Duration; 6 | 7 | use anyhow::{bail, Context}; 8 | use http_body_util::BodyExt; 9 | use nq_core::client::Client; 10 | use nq_core::{ConnectionType, Network, Time, TokioTime}; 11 | use nq_latency::LatencyConfig; 12 | use nq_rpm::{Responsiveness, ResponsivenessConfig, ResponsivenessResult}; 13 | use nq_tokio_network::TokioNetwork; 14 | use serde::{Deserialize, Serialize}; 15 | use tokio::time::timeout; 16 | use tokio_util::sync::CancellationToken; 17 | use tracing::{debug, error, info}; 18 | 19 | use crate::aim_report::CloudflareAimResults; 20 | use crate::args::rpm::RpmArgs; 21 | use crate::report::Report; 22 | use crate::util::pretty_secs_to_ms; 23 | 24 | /// Run a responsiveness test. 25 | pub async fn run(cli_config: RpmArgs) -> anyhow::Result<()> { 26 | info!("running responsiveness test"); 27 | 28 | let rpm_urls = match cli_config.config.clone() { 29 | Some(endpoint) => { 30 | info!("fetching configuration from {endpoint}"); 31 | let urls = get_rpm_config(endpoint).await?.urls; 32 | info!("retrieved configuration urls: {urls:?}"); 33 | 34 | urls 35 | } 36 | None => { 37 | let urls = RpmUrls { 38 | small_https_download_url: cli_config.small_download_url, 39 | large_https_download_url: cli_config.large_download_url, 40 | https_upload_url: cli_config.upload_url, 41 | }; 42 | info!("using default configuration urls: {urls:?}"); 43 | 44 | urls 45 | } 46 | }; 47 | 48 | // first get unloaded RTT measurements 49 | info!("determining unloaded latency"); 50 | let rtt_result = crate::latency::run_test(&LatencyConfig { 51 | url: rpm_urls.small_https_download_url.parse()?, 52 | runs: 20, 53 | }) 54 | .await?; 55 | info!( 56 | "unloaded latency: {} ms. jitter: {} ms", 57 | rtt_result 58 | .median() 59 | .map(pretty_secs_to_ms) 60 | .unwrap_or_default(), 61 | rtt_result 62 | .jitter() 63 | .map(pretty_secs_to_ms) 64 | .unwrap_or_default(), 65 | ); 66 | 67 | let config = ResponsivenessConfig { 68 | large_download_url: rpm_urls.large_https_download_url.parse()?, 69 | small_download_url: rpm_urls.small_https_download_url.parse()?, 70 | upload_url: rpm_urls.https_upload_url.parse()?, 71 | moving_average_distance: cli_config.moving_average_distance, 72 | interval_duration: Duration::from_millis(cli_config.interval_duration_ms), 73 | test_duration: Duration::from_millis(cli_config.test_duration_ms), 74 | trimmed_mean_percent: cli_config.trimmed_mean_percent, 75 | std_tolerance: cli_config.std_tolerance, 76 | max_loaded_connections: cli_config.max_loaded_connections, 77 | }; 78 | 79 | info!("running download test"); 80 | let download_result = run_test(&config, true).await?; 81 | debug!("download result={download_result:?}"); 82 | 83 | info!("running upload test"); 84 | let upload_result = run_test(&config, false).await?; 85 | debug!("upload result={upload_result:?}"); 86 | 87 | let aim_results = CloudflareAimResults::from_rpm_results( 88 | &rtt_result, 89 | &download_result, 90 | &upload_result, 91 | cli_config.config, 92 | ); 93 | 94 | let upload_handle = tokio::spawn(async move { 95 | if !cli_config.disable_aim_scores { 96 | debug!("uploading aim report"); 97 | if let Err(e) = aim_results.upload().await { 98 | error!("error uploading aim results: {e}"); 99 | } 100 | } 101 | }); 102 | 103 | info!("generating rpm report"); 104 | let report = Report::from_rtt_and_rpm_results(&rtt_result, &download_result, &upload_result) 105 | .context("building RPM report")?; 106 | 107 | println!("{}", serde_json::to_string_pretty(&report)?); 108 | 109 | let _ = timeout(Duration::from_secs(1), upload_handle).await; 110 | 111 | Ok(()) 112 | } 113 | 114 | async fn run_test( 115 | config: &ResponsivenessConfig, 116 | download: bool, 117 | ) -> anyhow::Result { 118 | let shutdown = CancellationToken::new(); 119 | let time = Arc::new(TokioTime::new()) as Arc; 120 | let network = Arc::new(TokioNetwork::new( 121 | Arc::clone(&time), 122 | shutdown.clone().into(), 123 | )) as Arc; 124 | 125 | let rpm = Responsiveness::new(config.clone(), download)?; 126 | let result = rpm.run_test(network, time, shutdown.clone()).await?; 127 | 128 | debug!("shutting down rpm test"); 129 | let _ = tokio::time::timeout(tokio::time::Duration::from_secs(1), async { 130 | shutdown.cancel(); 131 | }) 132 | .await; 133 | 134 | Ok(result) 135 | } 136 | 137 | #[derive(Debug, Serialize, Deserialize)] 138 | pub struct RpmServerConfig { 139 | urls: RpmUrls, 140 | } 141 | 142 | #[derive(Debug, Serialize, Deserialize)] 143 | pub struct RpmUrls { 144 | #[serde(alias = "small_download_url")] 145 | small_https_download_url: String, 146 | #[serde(alias = "large_download_url")] 147 | large_https_download_url: String, 148 | #[serde(alias = "upload_url")] 149 | https_upload_url: String, 150 | } 151 | 152 | pub async fn get_rpm_config(config_url: String) -> anyhow::Result { 153 | let shutdown = CancellationToken::new(); 154 | let time = Arc::new(TokioTime::new()); 155 | let network = Arc::new(TokioNetwork::new( 156 | Arc::clone(&time) as Arc, 157 | shutdown.clone(), 158 | )); 159 | 160 | let response = Client::default() 161 | .new_connection(ConnectionType::H2) 162 | .method("GET") 163 | .send( 164 | config_url.parse().context("parsing rpm config url")?, 165 | http_body_util::Empty::new(), 166 | network, 167 | time, 168 | )? 169 | .await?; 170 | 171 | if !response.status().is_success() { 172 | bail!("could not fetch rpm config from: {config_url}"); 173 | } 174 | 175 | let json = serde_json::from_slice(&response.into_body().collect().await?.to_bytes()) 176 | .context("parsing json config from rpm url")?; 177 | 178 | Ok(json) 179 | } 180 | -------------------------------------------------------------------------------- /cli/src/up_down.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2023-2024 Cloudflare, Inc. 2 | // Licensed under the BSD-3-Clause license found in the LICENSE file or at https://opensource.org/licenses/BSD-3-Clause 3 | 4 | use std::sync::Arc; 5 | 6 | use anyhow::Context; 7 | use nq_core::client::{wait_for_finish, ThroughputClient}; 8 | use nq_core::{ConnectionType, Network, Time, TokioTime}; 9 | use nq_tokio_network::TokioNetwork; 10 | use tokio_util::sync::CancellationToken; 11 | use tracing::info; 12 | 13 | use crate::args::up_down::{DownloadArgs, UploadArgs}; 14 | use crate::args::ConnType; 15 | use crate::util::pretty_secs; 16 | 17 | use serde_json::json; 18 | 19 | /// Run a download test. 20 | pub async fn download(args: DownloadArgs) -> anyhow::Result<()> { 21 | let shutdown = CancellationToken::new(); 22 | let time = Arc::new(TokioTime::new()) as Arc; 23 | let network = 24 | Arc::new(TokioNetwork::new(Arc::clone(&time), shutdown.clone())) as Arc; 25 | 26 | let conn_type = match args.conn_type { 27 | ConnType::H1 => ConnectionType::H1, 28 | ConnType::H2 => ConnectionType::H2, 29 | ConnType::H3 => unimplemented!("H3 is not yet implemented"), // ConnectionType::H3, 30 | }; 31 | 32 | info!("downloading: {}", args.url); 33 | 34 | let inflight_body = ThroughputClient::download() 35 | .new_connection(conn_type) 36 | .send( 37 | args.url.parse().context("parsing download url")?, 38 | Arc::clone(&network), 39 | Arc::clone(&time), 40 | shutdown.clone(), 41 | )? 42 | .await?; 43 | 44 | let timing = inflight_body 45 | .timing 46 | .context("expected inflight body to have connection timing data")?; 47 | 48 | let body_start = time.now(); 49 | let finished_result = wait_for_finish(inflight_body.events).await?; 50 | let finished = time.now(); 51 | 52 | let time_body_raw = finished.duration_since(body_start); 53 | let time_total_raw = timing.time_secure() + time_body_raw; 54 | 55 | let dns_time = pretty_secs(timing.dns_time().as_secs_f64()); 56 | let time_connect = pretty_secs(timing.time_connect().as_secs_f64()); 57 | let time_secure = pretty_secs(timing.time_secure().as_secs_f64()); 58 | let time_body = pretty_secs(time_body_raw.as_secs_f64()); 59 | let time_total = pretty_secs(time_total_raw.as_secs_f64()); 60 | let bytes_total = finished_result.total; 61 | let throughput = ((finished_result.total as f64 * 8.0) / time_total_raw.as_secs_f64()) as u64; 62 | 63 | let json = json!({ 64 | "dns_time": dns_time, 65 | "time_connect": time_connect, 66 | "time_secure": time_secure, 67 | "time_body": time_body, 68 | "time_total": time_total, 69 | "bytes_total": bytes_total, 70 | "throughput": throughput, 71 | }); 72 | 73 | println!("{:#}", json); 74 | 75 | let _ = tokio::time::timeout(tokio::time::Duration::from_secs(1), async { 76 | shutdown.cancel(); 77 | }) 78 | .await; 79 | 80 | Ok(()) 81 | } 82 | 83 | /// Run a download test. 84 | // todo(fisher): investigate body completion events. Moving to Socket stats is 85 | // likely the best option. 86 | #[allow(dead_code)] 87 | pub async fn upload(args: UploadArgs) -> anyhow::Result<()> { 88 | let shutdown = CancellationToken::new(); 89 | let time = Arc::new(TokioTime::new()) as Arc; 90 | let network = 91 | Arc::new(TokioNetwork::new(Arc::clone(&time), shutdown.clone())) as Arc; 92 | 93 | let conn_type = match args.conn_type { 94 | ConnType::H1 => ConnectionType::H1, // ConnectionType::H1, 95 | ConnType::H2 => ConnectionType::H2, 96 | ConnType::H3 => unimplemented!("H3 is not yet implemented"), // ConnectionType::H3, 97 | }; 98 | 99 | let bytes = args.bytes.unwrap_or(10_000_000); 100 | 101 | println!("{}\n", args.url); 102 | 103 | let inflight_body = ThroughputClient::upload(bytes) 104 | .new_connection(conn_type) 105 | .send( 106 | args.url.parse()?, 107 | Arc::clone(&network), 108 | Arc::clone(&time), 109 | shutdown.clone(), 110 | )? 111 | .await?; 112 | 113 | let timing = inflight_body 114 | .timing 115 | .context("expected inflight body to have connection timing data")?; 116 | 117 | let body_start = time.now(); 118 | let finished_result = wait_for_finish(inflight_body.events).await?; 119 | let finished = time.now(); 120 | 121 | let time_body_raw = finished.duration_since(body_start); 122 | let time_total_raw = timing.time_secure() + time_body_raw; 123 | 124 | let dns_time = pretty_secs(timing.dns_time().as_secs_f64()); 125 | let time_connect = pretty_secs(timing.time_connect().as_secs_f64()); 126 | let time_secure = pretty_secs(timing.time_secure().as_secs_f64()); 127 | let time_body = pretty_secs(time_body_raw.as_secs_f64()); 128 | let time_total = pretty_secs(time_total_raw.as_secs_f64()); 129 | let bytes_total = finished_result.total; 130 | let throughput = ((finished_result.total as f64 * 8.0) / time_total_raw.as_secs_f64()) as u64; 131 | 132 | let json = json!({ 133 | "dns_time": dns_time, 134 | "time_connect": time_connect, 135 | "time_secure": time_secure, 136 | "time_body": time_body, 137 | "time_total": time_total, 138 | "bytes_total": bytes_total, 139 | "throughput": throughput, 140 | }); 141 | 142 | println!("{:#}", json); 143 | 144 | let _ = tokio::time::timeout(tokio::time::Duration::from_secs(1), async { 145 | shutdown.cancel(); 146 | }) 147 | .await; 148 | 149 | Ok(()) 150 | } 151 | -------------------------------------------------------------------------------- /cli/src/util.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2023-2024 Cloudflare, Inc. 2 | // Licensed under the BSD-3-Clause license found in the LICENSE file or at https://opensource.org/licenses/BSD-3-Clause 3 | 4 | /// Converts f64 seconds to f64 ms rounded to 3 decimals. 5 | pub fn pretty_secs_to_ms(secs: f64) -> f64 { 6 | (secs * 1_000_000.0).trunc() / 1_000.0 7 | } 8 | 9 | /// Rounds f64 ms to 3 decimals. 10 | pub fn pretty_ms(ms: f64) -> f64 { 11 | (ms * 1_000.0).trunc() / 1_000.0 12 | } 13 | 14 | /// Round f64 seconds to 3 decimals. 15 | pub fn pretty_secs(secs: f64) -> f64 { 16 | (secs * 1_000.0).trunc() / 1_000.0 17 | } 18 | -------------------------------------------------------------------------------- /crates/nq-core/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "nq-core" 3 | version = { workspace = true } 4 | edition = { workspace = true } 5 | repository = { workspace = true } 6 | authors = { workspace = true } 7 | license = { workspace = true } 8 | readme = "../README.md" 9 | 10 | [dependencies] 11 | anyhow = { workspace = true } 12 | boring = { workspace = true } 13 | http = { workspace = true } 14 | http-body-util = { workspace = true } 15 | hyper = { workspace = true, features = ["client", "http1", "http2"] } 16 | hyper-util = { workspace = true, features = ["tokio"] } 17 | pin-project-lite = { workspace = true } 18 | rustls-native-certs = "0.7.0" 19 | tokio = { workspace = true, features = ["rt-multi-thread", "time", "net", "test-util"] } 20 | tokio-boring = { workspace = true } 21 | tokio-util = { workspace = true } 22 | tracing = { workspace = true } 23 | rand = "0.9.0-beta.1" 24 | bytes = "1.6.0" -------------------------------------------------------------------------------- /crates/nq-core/src/body/counting_body.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2023-2024 Cloudflare, Inc. 2 | // Licensed under the BSD-3-Clause license found in the LICENSE file or at https://opensource.org/licenses/BSD-3-Clause 3 | 4 | use std::{sync::Arc, task::Poll, time::Duration}; 5 | 6 | use hyper::body::{Body, Bytes}; 7 | use tokio::sync::mpsc; 8 | use tracing::{debug, error, trace}; 9 | 10 | use crate::{Time, Timestamp}; 11 | 12 | /// [`BodyEvent`]s are generated by a [`CountingBody`] and describe the number 13 | /// of total bytes seen or that the body has finished. 14 | #[derive(Debug)] 15 | pub enum BodyEvent { 16 | /// The number of bytes sent by the wrapped body at the given [`Timestamp`]. 17 | ByteCount { 18 | /// When the event was generated. 19 | at: Timestamp, 20 | /// The total number of bytes seen. 21 | total: usize, 22 | }, 23 | /// The [`CountingBody`] has finished sending the body it wraps. 24 | Finished { 25 | /// When the body finished. 26 | at: Timestamp, 27 | }, 28 | } 29 | 30 | pin_project_lite::pin_project! { 31 | #[allow(missing_docs)] 32 | pub struct CountingBody { 33 | #[pin] 34 | inner: B, 35 | time: Arc, 36 | last_sent: Timestamp, 37 | update_every: Duration, 38 | total: usize, 39 | events_tx: mpsc::UnboundedSender, 40 | sent_finished: bool, 41 | } 42 | } 43 | 44 | impl CountingBody { 45 | /// Create a [`CountingBody`] by wrapping the given body. Updates are sent 46 | /// every `update_every` duration and timestamps are taken with the given 47 | /// [`Arc`]. 48 | pub fn new( 49 | inner: B, 50 | update_every: Duration, 51 | time: Arc, 52 | ) -> (Self, mpsc::UnboundedReceiver) { 53 | let (events_tx, events_rx) = mpsc::unbounded_channel(); 54 | let last_sent = time.now(); 55 | 56 | events_tx 57 | .send(BodyEvent::ByteCount { 58 | at: last_sent, 59 | total: 0, 60 | }) 61 | .expect("no data buffered"); 62 | 63 | ( 64 | Self { 65 | inner, 66 | time, 67 | last_sent, 68 | update_every, 69 | total: 0, 70 | events_tx, 71 | sent_finished: false, 72 | }, 73 | events_rx, 74 | ) 75 | } 76 | } 77 | 78 | impl Body for CountingBody 79 | where 80 | B: Body, 81 | B::Error: std::fmt::Debug, 82 | { 83 | type Data = B::Data; 84 | 85 | type Error = B::Error; 86 | 87 | #[inline(always)] 88 | fn poll_frame( 89 | self: std::pin::Pin<&mut Self>, 90 | cx: &mut std::task::Context<'_>, 91 | ) -> Poll, Self::Error>>> { 92 | let this = self.project(); 93 | 94 | // stop the body if there's no event sender. 95 | if this.events_tx.is_closed() { 96 | debug!("events_tx is closed, stopping"); 97 | return Poll::Ready(None); 98 | } 99 | 100 | trace!("polling frame"); 101 | match this.inner.poll_frame(cx) { 102 | Poll::Ready(Some(Ok(frame))) => { 103 | if let Some(data) = frame.data_ref() { 104 | *this.total += data.len(); 105 | } 106 | 107 | let now = this.time.now(); 108 | 109 | // We've waited long enough, send an update. 110 | if now.duration_since(*this.last_sent) >= *this.update_every { 111 | let event = BodyEvent::ByteCount { 112 | at: now, 113 | total: *this.total, 114 | }; 115 | 116 | *this.last_sent = now; 117 | 118 | debug!(?event, "sending event"); 119 | 120 | // We can drop the error here since this is an 121 | // increasing counter. The next send will hopefully 122 | // capture it. 123 | let _ = this.events_tx.send(event); 124 | } 125 | 126 | Poll::Ready(Some(Ok(frame))) 127 | } 128 | // Stream finished, send the last count 129 | Poll::Ready(None) => { 130 | let now = this.time.now(); 131 | 132 | debug!("body finished"); 133 | let event = BodyEvent::ByteCount { 134 | at: now, 135 | total: *this.total, 136 | }; 137 | 138 | debug!(?event, "sending event"); 139 | let _ = this.events_tx.send(event); 140 | 141 | if !*this.sent_finished { 142 | debug!(at=?now, "sending finished"); 143 | let _ = this.events_tx.send(BodyEvent::Finished { at: now }); 144 | *this.sent_finished = true; 145 | } else { 146 | debug!("already sent finish"); 147 | } 148 | 149 | Poll::Ready(None) 150 | } 151 | Poll::Ready(Some(Err(e))) => { 152 | error!(error=?e, "body errored"); 153 | Poll::Ready(Some(Err(e))) 154 | } 155 | Poll::Pending => { 156 | trace!("body pending"); 157 | Poll::Pending 158 | } 159 | } 160 | } 161 | 162 | fn is_end_stream(&self) -> bool { 163 | self.inner.is_end_stream() 164 | } 165 | 166 | fn size_hint(&self) -> hyper::body::SizeHint { 167 | self.inner.size_hint() 168 | } 169 | } 170 | -------------------------------------------------------------------------------- /crates/nq-core/src/body/mod.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2023-2024 Cloudflare, Inc. 2 | // Licensed under the BSD-3-Clause license found in the LICENSE file or at https://opensource.org/licenses/BSD-3-Clause 3 | 4 | mod counting_body; 5 | mod upload_body; 6 | 7 | use std::convert::Infallible; 8 | use std::sync::Arc; 9 | use tokio::sync::RwLock; 10 | 11 | use http::{HeaderMap, HeaderValue}; 12 | use http_body_util::{combinators::BoxBody, Empty}; 13 | use hyper::body::Bytes; 14 | use tokio::sync::mpsc; 15 | 16 | /// A simple boxed body. 17 | pub type NqBody = BoxBody; 18 | 19 | /// Creates an empty body. 20 | pub fn empty() -> Empty { 21 | Empty::new() 22 | } 23 | 24 | use crate::{connection::ConnectionTiming, EstablishedConnection, Timestamp}; 25 | 26 | pub use self::{ 27 | counting_body::{BodyEvent, CountingBody}, 28 | upload_body::UploadBody, 29 | }; 30 | 31 | /// A body that is currently being sent or received. 32 | pub struct InflightBody { 33 | pub start: Timestamp, 34 | pub connection: Arc>, 35 | pub timing: Option, 36 | pub events: mpsc::UnboundedReceiver, 37 | pub headers: HeaderMap, 38 | } 39 | -------------------------------------------------------------------------------- /crates/nq-core/src/body/upload_body.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2023-2024 Cloudflare, Inc. 2 | // Licensed under the BSD-3-Clause license found in the LICENSE file or at https://opensource.org/licenses/BSD-3-Clause 3 | 4 | use bytes::BytesMut; 5 | use std::{ 6 | convert::Infallible, 7 | pin::Pin, 8 | task::{Context, Poll}, 9 | }; 10 | 11 | use hyper::body::{Body, Bytes, Frame, SizeHint}; 12 | use rand::rngs::StdRng; 13 | use rand::{Rng, SeedableRng}; 14 | use tracing::trace; 15 | 16 | /// A body that continually uploads a chunk of random bytes until 17 | /// it has sent a given number of bytes. 18 | #[derive(Debug)] 19 | pub struct UploadBody { 20 | remaining: usize, 21 | chunk: Bytes, 22 | rng: StdRng, 23 | } 24 | 25 | impl UploadBody { 26 | pub fn new(size: usize) -> Self { 27 | const CHUNK_SIZE: usize = 256 * 1024; // 256 KB 28 | 29 | let mut rng = StdRng::from_os_rng(); 30 | let chunk_size = std::cmp::min(CHUNK_SIZE, size); 31 | let mut chunk = vec![0u8; chunk_size]; 32 | rng.fill(&mut chunk[..]); 33 | 34 | UploadBody { 35 | remaining: size, 36 | chunk: Bytes::from(chunk), 37 | rng, 38 | } 39 | } 40 | } 41 | 42 | impl Body for UploadBody { 43 | type Data = Bytes; 44 | type Error = Infallible; 45 | 46 | fn poll_frame( 47 | mut self: Pin<&mut Self>, 48 | _cx: &mut Context<'_>, 49 | ) -> Poll, Self::Error>>> { 50 | trace!( 51 | "upload body poll_frame: remaining={}, chunk.len={}", 52 | self.remaining, 53 | self.chunk.len() 54 | ); 55 | 56 | Poll::Ready(match self.remaining { 57 | 0 => None, 58 | remaining if remaining > self.chunk.len() => { 59 | self.remaining -= self.chunk.len(); 60 | // Use BytesMut for in-place modifications 61 | let mut chunk = BytesMut::with_capacity(self.chunk.len()); 62 | chunk.resize(self.chunk.len(), 0); 63 | self.rng.fill(&mut chunk[..]); 64 | // Convert to Bytes 65 | self.chunk = chunk.freeze(); 66 | Some(Ok(Frame::data(self.chunk.clone()))) 67 | } 68 | remaining => { 69 | self.remaining = 0; 70 | Some(Ok(Frame::data(self.chunk.slice(..remaining)))) 71 | } 72 | }) 73 | } 74 | 75 | fn is_end_stream(&self) -> bool { 76 | self.remaining == 0 77 | } 78 | 79 | fn size_hint(&self) -> SizeHint { 80 | SizeHint::with_exact(self.remaining as u64) 81 | } 82 | } 83 | -------------------------------------------------------------------------------- /crates/nq-core/src/client.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2023-2024 Cloudflare, Inc. 2 | // Licensed under the BSD-3-Clause license found in the LICENSE file or at https://opensource.org/licenses/BSD-3-Clause 3 | 4 | //! Defines two clients, a [`ThroughputClient`] and a normal [`Client`]. The 5 | //! [`ThroughputClient`] trackes the sending or receiving of body data and sends 6 | //! byte count updates to a listener. This is useful for determining the 7 | //! throughput of a flow. 8 | 9 | use std::{convert::Infallible, net::ToSocketAddrs, sync::Arc, time::Duration}; 10 | use tokio::sync::RwLock; 11 | 12 | use anyhow::Context; 13 | use http::{HeaderMap, HeaderValue, Uri}; 14 | use http_body_util::BodyExt; 15 | use hyper::body::{Body, Bytes, Incoming}; 16 | use tokio::select; 17 | use tokio::sync::mpsc; 18 | use tokio_util::sync::CancellationToken; 19 | use tracing::{debug, error, info, Instrument}; 20 | 21 | use crate::{ 22 | body::{empty, BodyEvent, CountingBody, InflightBody, NqBody, UploadBody}, 23 | oneshot_result, ConnectionType, EstablishedConnection, Network, OneshotResult, Time, Timestamp, 24 | }; 25 | 26 | /// The default user agent for networkquality requests 27 | pub const MACH_USER_AGENT: &str = "mach/0.1.0"; 28 | 29 | /// Describes the direction of the client. This determines if the client times 30 | /// the upload or download of a body. 31 | #[derive(Debug, Clone, Copy, PartialEq, Eq)] 32 | pub enum Direction { 33 | /// Download the response body. 34 | Down, 35 | /// Upload the given number of bytes. 36 | Up(usize), 37 | } 38 | 39 | /// A [`ThroughputClient`] is a simple client which drives a request/response pair 40 | /// and returns an [`InflightBody`]. 41 | /// 42 | /// This should be used if you do not care about the request or response, and just 43 | /// need to load a connection. 44 | /// 45 | /// The returned [`InflightBody`] can be used to track the progress of an upload 46 | /// or download and when it finishes. 47 | pub struct ThroughputClient { 48 | connection: Option>>, 49 | new_connection_type: Option, 50 | headers: Option, 51 | direction: Direction, 52 | } 53 | 54 | impl ThroughputClient { 55 | /// Create an download oriented [`ThroughputClient`]. 56 | pub fn download() -> Self { 57 | Self { 58 | connection: None, 59 | new_connection_type: None, 60 | headers: None, 61 | direction: Direction::Down, 62 | } 63 | } 64 | 65 | /// Create an upload oriented [`ThroughputClient`]. 66 | pub fn upload(size: usize) -> Self { 67 | Self { 68 | connection: None, 69 | new_connection_type: None, 70 | headers: None, 71 | direction: Direction::Up(size), 72 | } 73 | } 74 | 75 | /// Send requests on the given [`EstablishedConnection`]. 76 | pub fn with_connection(mut self, connection: Arc>) -> Self { 77 | self.connection = Some(connection); 78 | self 79 | } 80 | 81 | /// Create a new connection for each request. 82 | pub fn new_connection(mut self, conn_type: ConnectionType) -> Self { 83 | self.new_connection_type = Some(conn_type); 84 | self 85 | } 86 | 87 | /// Set the headers for the upload or download request. 88 | pub fn headers(mut self, headers: HeaderMap) -> Self { 89 | self.headers = Some(headers); 90 | self 91 | } 92 | 93 | /// Execute a download or upload request against the given [`Uri`]. 94 | #[tracing::instrument(skip(self, network, time, shutdown))] 95 | pub fn send( 96 | self, 97 | uri: Uri, 98 | network: Arc, 99 | time: Arc, 100 | shutdown: CancellationToken, 101 | ) -> anyhow::Result> { 102 | let mut headers = self.headers.unwrap_or_default(); 103 | 104 | if !headers.contains_key("User-Agent") { 105 | headers.insert("User-Agent", HeaderValue::from_static("mach/0.1.0")); 106 | } 107 | 108 | let host = uri.host().context("uri is missing a host")?.to_string(); 109 | let host_with_port = format!("{}:{}", host, uri.port_u16().unwrap_or(443)); 110 | 111 | let method = match self.direction { 112 | Direction::Down => "GET", 113 | Direction::Up(_) => "POST", 114 | }; 115 | 116 | let (tx, rx) = oneshot_result(); 117 | let mut events = None; 118 | 119 | let body: NqBody = match self.direction { 120 | Direction::Up(size) => { 121 | tracing::debug!("tracking upload body"); 122 | let dummy_body = UploadBody::new(size); 123 | 124 | let (body, events_rx) = 125 | CountingBody::new(dummy_body, Duration::from_millis(50), Arc::clone(&time)); 126 | events = Some(events_rx); 127 | 128 | headers.insert("Content-Length", size.into()); 129 | headers.insert("Content-Type", HeaderValue::from_static("text/plain")); 130 | 131 | body.boxed() 132 | } 133 | Direction::Down => { 134 | tracing::debug!("created empty download body"); 135 | empty().boxed() 136 | } 137 | }; 138 | 139 | let mut request = http::Request::builder() 140 | .method(method) 141 | .uri(uri) 142 | .body(body)?; 143 | 144 | tracing::debug!("created request"); 145 | 146 | *request.headers_mut() = headers.clone(); 147 | 148 | tokio::spawn( 149 | async move { 150 | let start = time.now(); 151 | 152 | let connection = if let Some(connection) = self.connection { 153 | connection 154 | } else if let Some(conn_type) = self.new_connection_type { 155 | info!("creating new connection to {host_with_port}"); 156 | 157 | let addrs = network 158 | .resolve(host_with_port) 159 | .await 160 | .context("unable to resolve host")?; 161 | 162 | debug!("addrs: {addrs:?}"); 163 | 164 | network 165 | .new_connection(start, addrs[0], host, conn_type) 166 | .await 167 | .context("creating new connection")? 168 | } else { 169 | todo!() 170 | }; 171 | 172 | let conn_timing = { 173 | let conn = connection.read().await; 174 | conn.timing() 175 | }; 176 | 177 | debug!("connection used"); 178 | let response_fut = network.send_request(connection.clone(), request); 179 | 180 | let mut response_body = match self.direction { 181 | Direction::Up(_) => { 182 | debug!("sending upload events"); 183 | if tx 184 | .send(Ok(InflightBody { 185 | connection: connection.clone(), 186 | timing: Some(conn_timing), 187 | events: events.expect("events were set above"), 188 | start, 189 | headers, 190 | })) 191 | .is_err() 192 | { 193 | error!("error sending upload events"); 194 | } 195 | 196 | let (parts, incoming) = response_fut.await?.into_parts(); 197 | info!("upload response parts: {:?}", parts); 198 | 199 | incoming.boxed() 200 | } 201 | Direction::Down => { 202 | let (parts, incoming) = response_fut.await?.into_parts(); 203 | 204 | let (counting_body, events) = CountingBody::new( 205 | incoming, 206 | Duration::from_millis(100), 207 | Arc::clone(&time), 208 | ); 209 | 210 | debug!("sending download events"); 211 | if tx 212 | .send(Ok(InflightBody { 213 | connection: connection.clone(), 214 | timing: Some(conn_timing), 215 | start, 216 | events, 217 | headers: parts.headers, 218 | })) 219 | .is_err() 220 | { 221 | error!("error sending download events"); 222 | } 223 | 224 | counting_body.boxed() 225 | } 226 | }; 227 | 228 | tokio::spawn( 229 | async move { 230 | // Consume the response body and keep the connection alive. Stop if we hit an error. 231 | info!("waiting for response body"); 232 | 233 | loop { 234 | select! { 235 | Some(res) = response_body.frame() => if let Err(e) = res { 236 | error!("body closing: {e}"); 237 | break; 238 | }, 239 | _ = shutdown.cancelled() => break, 240 | } 241 | } 242 | } 243 | .in_current_span(), 244 | ); 245 | 246 | Ok::<_, anyhow::Error>(()) 247 | } 248 | .in_current_span(), 249 | ); 250 | 251 | Ok(rx) 252 | } 253 | } 254 | 255 | /// A [`Client`] is a simple client which sends a request and returns a response. 256 | /// 257 | /// The connection timing, e.g. TCP/TLS overhead, will be inserted into the response 258 | /// if it exits. 259 | #[derive(Default)] 260 | pub struct Client { 261 | connection: Option>>, 262 | new_connection_type: Option, 263 | headers: Option, 264 | method: Option, 265 | } 266 | 267 | impl Client { 268 | /// Send requests on the given [`EstablishedConnection`]. 269 | pub fn with_connection(mut self, connection: Arc>) -> Self { 270 | self.connection = Some(connection); 271 | self 272 | } 273 | 274 | /// Create a new connection for each request. 275 | pub fn new_connection(mut self, conn_type: ConnectionType) -> Self { 276 | self.new_connection_type = Some(conn_type); 277 | self 278 | } 279 | 280 | /// Set the headers for the upload or download request. 281 | pub fn headers(mut self, headers: HeaderMap) -> Self { 282 | self.headers = Some(headers); 283 | self 284 | } 285 | 286 | /// Set the method used by the client. 287 | pub fn method(mut self, method: &str) -> Self { 288 | self.method = Some(method.to_string()); 289 | self 290 | } 291 | 292 | /// Send a request to the given uri with the given body, timing how long it 293 | /// took. 294 | #[tracing::instrument(skip(self, body, network, time))] 295 | pub fn send( 296 | self, 297 | uri: Uri, 298 | body: B, 299 | network: Arc, 300 | time: Arc, 301 | ) -> anyhow::Result>> 302 | where 303 | B: Body + Send + Sync + 'static, 304 | { 305 | let mut headers = self.headers.unwrap_or_default(); 306 | 307 | if !headers.contains_key("User-Agent") { 308 | headers.insert("User-Agent", HeaderValue::from_static(MACH_USER_AGENT)); 309 | } 310 | 311 | let host = uri.host().context("uri is missing a host")?.to_string(); 312 | 313 | let remote_addr = (host.as_str(), uri.port_u16().unwrap_or(443)) 314 | .to_socket_addrs()? 315 | .next() 316 | .context("could not resolve large download url")?; 317 | 318 | let method: http::Method = self.method.as_deref().unwrap_or("GET").parse()?; 319 | 320 | let mut request = http::Request::builder() 321 | .method(method) 322 | .uri(uri) 323 | .body(body.boxed())?; 324 | 325 | *request.headers_mut() = headers.clone(); 326 | 327 | debug!("sending request"); 328 | 329 | let (tx, rx) = oneshot_result(); 330 | tokio::spawn( 331 | async move { 332 | let start = time.now(); 333 | 334 | let connection = if let Some(connection) = self.connection { 335 | connection 336 | } else if let Some(conn_type) = self.new_connection_type { 337 | info!("creating new connection"); 338 | network 339 | .new_connection(start, remote_addr, host, conn_type) 340 | .await? 341 | } else { 342 | todo!() 343 | }; 344 | 345 | // todo(fisher): fine-grained send timings for requests 346 | let mut response = network.send_request(connection.clone(), request).await?; 347 | 348 | let timing = { 349 | let conn = connection.read().await; 350 | conn.timing() 351 | }; 352 | 353 | debug!(?connection, "connection used"); 354 | 355 | response.extensions_mut().insert(timing); 356 | 357 | if tx.send(Ok(response)).is_err() { 358 | error!("unable to send response"); 359 | } 360 | 361 | Ok::<_, anyhow::Error>(()) 362 | } 363 | .in_current_span(), 364 | ); 365 | 366 | Ok(rx) 367 | } 368 | } 369 | 370 | /// Consumes body events until the body is finished and returns 371 | /// the time at which the body finished. 372 | pub async fn wait_for_finish( 373 | mut body_events: mpsc::UnboundedReceiver, 374 | ) -> anyhow::Result { 375 | let mut body_total = 0; 376 | 377 | while let Some(event) = body_events.recv().await { 378 | match event { 379 | BodyEvent::ByteCount { total, .. } => body_total = total, 380 | BodyEvent::Finished { at } => { 381 | return Ok(FinishResult { 382 | total: body_total, 383 | finished_at: at, 384 | }); 385 | } 386 | } 387 | } 388 | 389 | Err(anyhow::anyhow!("body did not finish")) 390 | } 391 | 392 | /// The result of [`wait_for_finish`] 393 | #[derive(Debug)] 394 | pub struct FinishResult { 395 | /// The total number of bytes seen by the body. 396 | pub total: usize, 397 | /// When the body finished. 398 | pub finished_at: Timestamp, 399 | } 400 | -------------------------------------------------------------------------------- /crates/nq-core/src/connection/http.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2023-2024 Cloudflare, Inc. 2 | // Licensed under the BSD-3-Clause license found in the LICENSE file or at https://opensource.org/licenses/BSD-3-Clause 3 | 4 | use std::fmt::Debug; 5 | use std::future::Future; 6 | use std::net::SocketAddr; 7 | use std::pin::Pin; 8 | 9 | use boring::ssl::{SslConnector, SslMethod, SslVerifyMode}; 10 | use boring::x509::store::X509StoreBuilder; 11 | use boring::x509::X509; 12 | use http::{Request, Response}; 13 | use hyper::body::Incoming; 14 | use hyper::client::conn::{http1, http2}; 15 | use hyper_util::rt::TokioIo; 16 | use tokio::select; 17 | use tokio_util::sync::CancellationToken; 18 | use tracing::{debug, error, info, Instrument}; 19 | 20 | use crate::body::NqBody; 21 | use crate::util::ByteStream; 22 | use crate::{ConnectionTiming, ConnectionType, ResponseFuture, Time}; 23 | 24 | pub type TlsStream = tokio_boring::SslStream>; 25 | 26 | /// An [`EstablishedConnection`] contains the connection's timing and a handle 27 | /// to send HTTP requests with. 28 | #[derive(Debug)] 29 | pub struct EstablishedConnection { 30 | timing: ConnectionTiming, 31 | send_request: Option, 32 | } 33 | 34 | /// Represents an established connection with timing information and a send request handler. 35 | impl EstablishedConnection { 36 | /// Creates a new `EstablishedConnection`. 37 | pub fn new(timing: ConnectionTiming, send_request: SendRequest) -> Self { 38 | Self { 39 | timing, 40 | send_request: Some(send_request), 41 | } 42 | } 43 | 44 | /// Sends a request using the connection. 45 | pub fn send_request(&mut self, req: Request) -> Option { 46 | self.send_request.as_mut().map(|s| s.send_request(req)) 47 | } 48 | 49 | /// Returns the timing information of the connection. 50 | pub fn timing(&self) -> ConnectionTiming { 51 | self.timing 52 | } 53 | 54 | /// Drops the send request handler. 55 | pub fn drop_send_request(&mut self) { 56 | self.send_request = None; 57 | } 58 | } 59 | 60 | #[tracing::instrument(skip(io, time))] 61 | pub async fn tls_connection( 62 | conn_type: ConnectionType, 63 | domain: &str, 64 | timing: &mut ConnectionTiming, 65 | io: impl ByteStream, 66 | time: &dyn Time, 67 | ) -> anyhow::Result { 68 | let mut builder = SslConnector::builder(SslMethod::tls_client())?; 69 | 70 | // Use platform CA certs 71 | let mut store_builder = X509StoreBuilder::new()?; 72 | if let Ok(ca_certs) = rustls_native_certs::load_native_certs() { 73 | for root in ca_certs { 74 | let _ = store_builder.add_cert(X509::from_der(&root)?); 75 | } 76 | } 77 | builder.set_verify_cert_store(store_builder.build())?; 78 | builder.set_verify(SslVerifyMode::PEER); 79 | 80 | let alpn: &[u8] = match conn_type { 81 | ConnectionType::H1 => b"\x08http/1.1", 82 | ConnectionType::H2 => b"\x02h2", 83 | ConnectionType::H3 => b"\x02h3", 84 | }; 85 | 86 | builder.set_alpn_protos(alpn)?; 87 | let config = builder.build().configure()?; 88 | 89 | let ssl_stream = tokio_boring::connect(config, domain, Box::new(io) as Box) 90 | .await 91 | .map_err(|e| anyhow::anyhow!("unable to create tls stream: {e}"))?; 92 | 93 | timing.set_secure(time.now()); 94 | 95 | debug!("created tls connection"); 96 | 97 | Ok(ssl_stream) 98 | } 99 | 100 | #[tracing::instrument(skip(io, time, shutdown))] 101 | pub async fn start_h1_conn( 102 | domain: String, 103 | mut timing: ConnectionTiming, 104 | io: impl ByteStream, 105 | time: &dyn Time, 106 | shutdown: CancellationToken, 107 | ) -> anyhow::Result { 108 | let (send_request, connection) = http1::handshake(TokioIo::new(io)).await?; 109 | timing.set_application(time.now()); 110 | 111 | tokio::spawn( 112 | async move { 113 | select! { 114 | Err(e) = connection => { 115 | error!(error=%e, "error running h1 connection"); 116 | } 117 | _ = shutdown.cancelled() => { 118 | debug!("shutting down h1 connection"); 119 | } 120 | } 121 | 122 | info!("connection finished"); 123 | } 124 | .in_current_span(), 125 | ); 126 | 127 | let established_connection = EstablishedConnection::new( 128 | timing, 129 | SendRequest::H1 { 130 | dispatch: send_request, 131 | }, 132 | ); 133 | 134 | Ok(established_connection) 135 | } 136 | 137 | #[tracing::instrument(skip(timing, io, time, shutdown))] 138 | pub async fn start_h2_conn( 139 | addr: SocketAddr, 140 | domain: String, 141 | mut timing: ConnectionTiming, 142 | io: impl ByteStream, 143 | time: &dyn Time, 144 | shutdown: CancellationToken, 145 | ) -> anyhow::Result { 146 | let (dispatch, connection) = http2::handshake(TokioExecutor, TokioIo::new(io)).await?; 147 | timing.set_application(time.now()); 148 | 149 | debug!("finished h2 handshake"); 150 | 151 | tokio::spawn( 152 | async move { 153 | select! { 154 | Err(e) = connection => { 155 | error!(error=%e, "error running h2 connection"); 156 | } 157 | _ = shutdown.cancelled() => { 158 | debug!("shutting down h2 connection"); 159 | } 160 | } 161 | 162 | info!("connection finished"); 163 | } 164 | .in_current_span(), 165 | ); 166 | 167 | info!(?timing, "established connection"); 168 | let established_connection = EstablishedConnection::new(timing, SendRequest::H2 { dispatch }); 169 | 170 | Ok(established_connection) 171 | } 172 | 173 | #[derive(Debug)] 174 | pub enum SendRequest { 175 | #[allow(unused)] 176 | H1 { 177 | dispatch: http1::SendRequest, 178 | }, 179 | H2 { 180 | dispatch: http2::SendRequest, 181 | }, 182 | } 183 | 184 | impl SendRequest { 185 | fn send_request( 186 | &mut self, 187 | req: Request, 188 | ) -> Pin>> + Send>> { 189 | match self { 190 | SendRequest::H1 { 191 | dispatch: send_request, 192 | } => Box::pin(send_request.send_request(req)), 193 | SendRequest::H2 { 194 | dispatch: send_request, 195 | } => Box::pin(send_request.send_request(req)), 196 | } 197 | } 198 | } 199 | 200 | #[derive(Clone)] 201 | struct TokioExecutor; 202 | 203 | impl hyper::rt::Executor for TokioExecutor 204 | where 205 | F: Future + Send + 'static, 206 | F::Output: Send + 'static, 207 | { 208 | fn execute(&self, future: F) { 209 | tokio::spawn(future); 210 | } 211 | } 212 | -------------------------------------------------------------------------------- /crates/nq-core/src/connection/map.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2023-2024 Cloudflare, Inc. 2 | // Licensed under the BSD-3-Clause license found in the LICENSE file or at https://opensource.org/licenses/BSD-3-Clause 3 | 4 | use std::collections::VecDeque; 5 | use std::convert::Infallible; 6 | use std::net::SocketAddr; 7 | use std::sync::Arc; 8 | 9 | use anyhow::Result; 10 | use http::Request; 11 | use http_body_util::combinators::BoxBody; 12 | use hyper::body::Bytes; 13 | use tokio::sync::RwLock; 14 | use tokio_util::sync::CancellationToken; 15 | use tracing::info; 16 | 17 | use crate::connection::http::{ 18 | start_h1_conn, start_h2_conn, tls_connection, EstablishedConnection, 19 | }; 20 | use crate::util::ByteStream; 21 | use crate::{ConnectionTiming, ConnectionType, ResponseFuture, Time}; 22 | 23 | /// Creates and holds [`EstablishedConnection`]s in a VecDeque. 24 | #[derive(Default, Debug)] 25 | pub struct ConnectionManager { 26 | connections: RwLock>>>, 27 | } 28 | 29 | impl ConnectionManager { 30 | /// Creates a new connection on the given io. 31 | #[allow(clippy::too_many_arguments)] 32 | pub async fn new_connection( 33 | &self, 34 | mut timing: ConnectionTiming, 35 | remote_addr: SocketAddr, 36 | domain: String, 37 | conn_type: ConnectionType, 38 | io: Box, 39 | time: &dyn Time, 40 | shutdown: CancellationToken, 41 | ) -> Result>> { 42 | let connection = match conn_type { 43 | ConnectionType::H1 => { 44 | let stream = tls_connection(conn_type, &domain, &mut timing, io, time).await?; 45 | start_h1_conn(domain, timing, stream, time, shutdown).await? 46 | } 47 | ConnectionType::H2 => { 48 | let stream = tls_connection(conn_type, &domain, &mut timing, io, time).await?; 49 | start_h2_conn(remote_addr, domain, timing, stream, time, shutdown).await? 50 | } 51 | ConnectionType::H3 => todo!(), 52 | }; 53 | 54 | let connection = Arc::new(RwLock::new(connection)); 55 | self.connections.write().await.push_back(connection.clone()); 56 | Ok(connection) 57 | } 58 | 59 | /// Sends a request on the given connection. 60 | pub async fn send_request( 61 | &self, 62 | connection: Arc>, 63 | request: Request>, 64 | ) -> Option { 65 | info!("Sending request on the specified connection"); 66 | let mut conn = connection.write().await; 67 | conn.send_request(request) 68 | } 69 | 70 | /// The number of [`EstablishedConnection`]s being held in the manager. 71 | pub async fn len(&self) -> usize { 72 | self.connections.read().await.len() 73 | } 74 | 75 | /// Returns if the [`ConnectionManager`] is empty. 76 | pub async fn is_empty(&self) -> bool { 77 | self.connections.read().await.is_empty() 78 | } 79 | 80 | /// Drop all `SendRequest` structs, effectively cancelling all connections. 81 | pub async fn shutdown(&self) { 82 | for connection in self.connections.write().await.iter_mut() { 83 | let mut conn = connection.write().await; 84 | conn.drop_send_request(); 85 | } 86 | } 87 | } 88 | -------------------------------------------------------------------------------- /crates/nq-core/src/connection/mod.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2023-2024 Cloudflare, Inc. 2 | // Licensed under the BSD-3-Clause license found in the LICENSE file or at https://opensource.org/licenses/BSD-3-Clause 3 | 4 | mod http; 5 | mod map; 6 | 7 | use std::time::Duration; 8 | 9 | use crate::Timestamp; 10 | 11 | pub use self::http::EstablishedConnection; 12 | pub use self::map::ConnectionManager; 13 | 14 | /// The L7 type of a connection. 15 | #[derive(Debug, Clone, Copy, PartialEq, Eq)] 16 | pub enum ConnectionType { 17 | /// Create an HTTP/1.1 connection. 18 | H1, 19 | /// Create an HTTP/2 connection. 20 | H2, 21 | /// Create an HTTP/3 connection. 22 | H3, 23 | } 24 | 25 | /// Timing stats for the establishment of a connection. All durations 26 | /// are calculated from the start of the connection. 27 | #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] 28 | pub struct ConnectionTiming { 29 | /// When the connection was started. 30 | start: Timestamp, 31 | /// How long it took to resolve the host to an IP. 32 | time_lookup: Duration, 33 | /// How long it took for the transport to handshake. 34 | /// 35 | /// If this was a TCP connection, this is the time 36 | /// until the first SYN+ACK. 37 | /// 38 | /// If this is a QUIC connection, this is the time until 39 | /// the QUIC handshake completes. 40 | time_connect: Duration, 41 | /// How long it took to secure the stream after the transport 42 | /// connected. 43 | /// 44 | /// For TCP streams, this is the time to perform the TLS handshake. 45 | /// 46 | /// For QUIC streams, this is 0, since the QUIC connection implies 47 | /// a secured connection. 48 | time_secure: Duration, 49 | /// How long it took to setup the L7 protocol, H1/2/3. 50 | time_application: Duration, 51 | 52 | // Duration of the DNS lookup 53 | dns_time: Duration, 54 | } 55 | 56 | impl ConnectionTiming { 57 | /// Creates a new [`ConnectionTiming`]. 58 | pub fn new(start: Timestamp) -> Self { 59 | Self { 60 | start, 61 | time_lookup: Duration::ZERO, 62 | time_connect: Duration::ZERO, 63 | time_secure: Duration::ZERO, 64 | time_application: Duration::ZERO, 65 | dns_time: Duration::ZERO, 66 | } 67 | } 68 | 69 | /// Set the time it took to perform DNS resolution of the peer's host. 70 | pub fn set_lookup(&mut self, at: Timestamp) { 71 | self.time_lookup = at.duration_since(self.start); 72 | } 73 | 74 | /// Set the time it took to create the connection with the remote peer. 75 | pub fn set_connect(&mut self, at: Timestamp) { 76 | self.time_connect = at.duration_since(self.start); 77 | } 78 | 79 | /// Set the time it took to secure a connection. 80 | pub fn set_secure(&mut self, at: Timestamp) { 81 | self.time_secure = at.duration_since(self.start); 82 | } 83 | 84 | /// Set the time it took to setup the L7 protocol, H1/2/3. 85 | pub fn set_application(&mut self, at: Timestamp) { 86 | self.time_application = at.duration_since(self.start); 87 | } 88 | 89 | /// Returns when the connection started. 90 | pub fn start(&self) -> Timestamp { 91 | self.start 92 | } 93 | 94 | /// Returns how long it took for DNS to resolve. 95 | pub fn time_lookup(&self) -> Duration { 96 | self.time_lookup 97 | } 98 | 99 | /// Returns how long it took for the transport to connect. 100 | pub fn time_connect(&self) -> Duration { 101 | self.time_connect 102 | } 103 | 104 | /// Set the duration of the DNS lookup 105 | pub fn set_dns_lookup(&mut self, duration: Duration) { 106 | self.dns_time = duration; 107 | } 108 | 109 | /// Returns the DNS lookup duration. 110 | pub fn dns_time(&self) -> Duration { 111 | self.dns_time 112 | } 113 | 114 | /// Returns how long it took for the security handshake to complete. 115 | pub fn time_secure(&self) -> Duration { 116 | self.time_secure 117 | } 118 | 119 | /// Returns how long it took for the H/{1,2,3} handshake to complete. 120 | pub fn time_application(&self) -> Duration { 121 | self.time_application 122 | } 123 | } 124 | -------------------------------------------------------------------------------- /crates/nq-core/src/lib.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2023-2024 Cloudflare, Inc. 2 | // Licensed under the BSD-3-Clause license found in the LICENSE file or at https://opensource.org/licenses/BSD-3-Clause 3 | 4 | //! The core abstraction for networkquality. 5 | //! 6 | //! Defines the main traits: 7 | //! - [`Network`]: for abstracting over connections and http requests. 8 | //! - [`Time`]: for abstracting over different implementations of time. 9 | 10 | #![deny(missing_docs)] 11 | 12 | mod body; 13 | pub mod client; 14 | mod connection; 15 | mod network; 16 | mod time; 17 | mod upgraded; 18 | mod util; 19 | 20 | pub use crate::{ 21 | body::{BodyEvent, CountingBody, NqBody}, 22 | connection::{ConnectionManager, ConnectionTiming, ConnectionType, EstablishedConnection}, 23 | network::Network, 24 | time::{Time, Timestamp, TokioTime}, 25 | upgraded::ConnectUpgraded, 26 | util::{oneshot_result, OneshotResult, ResponseFuture}, 27 | }; 28 | 29 | pub use anyhow::Error; 30 | pub use anyhow::Result; 31 | -------------------------------------------------------------------------------- /crates/nq-core/src/network/mod.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2023-2024 Cloudflare, Inc. 2 | // Licensed under the BSD-3-Clause license found in the LICENSE file or at https://opensource.org/licenses/BSD-3-Clause 3 | 4 | use crate::{body::NqBody, ConnectionType, EstablishedConnection, OneshotResult, Timestamp}; 5 | use http::Response; 6 | use hyper::body::Incoming; 7 | use std::net::SocketAddr; 8 | use std::sync::Arc; 9 | use tokio::sync::RwLock; 10 | 11 | /// A network abstraction for resolving hosts, creating connections, and sending requests. 12 | pub trait Network: Send + Sync + 'static { 13 | /// Resolves a host to a list of socket addresses. 14 | fn resolve(&self, host: String) -> OneshotResult>; 15 | 16 | /// Creates a new connection to the given domain. 17 | fn new_connection( 18 | &self, 19 | start: Timestamp, 20 | remote_addr: SocketAddr, 21 | domain: String, 22 | conn_type: ConnectionType, 23 | ) -> OneshotResult>>; 24 | 25 | /// Sends a request over the specified connection. 26 | fn send_request( 27 | &self, 28 | connection: Arc>, 29 | request: http::Request, 30 | ) -> OneshotResult>; 31 | } 32 | 33 | impl Network for Arc { 34 | fn resolve(&self, host: String) -> OneshotResult> { 35 | self.as_ref().resolve(host) 36 | } 37 | 38 | fn new_connection( 39 | &self, 40 | start: Timestamp, 41 | remote_addr: SocketAddr, 42 | domain: String, 43 | conn_type: ConnectionType, 44 | ) -> OneshotResult>> { 45 | self.as_ref() 46 | .new_connection(start, remote_addr, domain, conn_type) 47 | } 48 | 49 | fn send_request( 50 | &self, 51 | connection: Arc>, 52 | request: http::Request, 53 | ) -> OneshotResult> { 54 | self.as_ref().send_request(connection, request) 55 | } 56 | } 57 | -------------------------------------------------------------------------------- /crates/nq-core/src/time.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2023-2024 Cloudflare, Inc. 2 | // Licensed under the BSD-3-Clause license found in the LICENSE file or at https://opensource.org/licenses/BSD-3-Clause 3 | 4 | //! Defines a [`Time`] trait used to abstract over the different ways a 5 | //! timestamp can be created or a process slept. This lets us switch between 6 | //! tokio, system and in the future, wasi/wasm based time implementations. 7 | 8 | use std::ops::{Add, Sub}; 9 | use std::sync::Arc; 10 | use std::time::Duration; 11 | use tokio::time::Instant; 12 | 13 | /// A timestamp with `Instant` for precise time measurement. 14 | #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] 15 | pub struct Timestamp(Instant); 16 | 17 | impl Timestamp { 18 | /// Calculate the saturating duration since an earlier timestamp. 19 | pub fn duration_since(&self, earlier: Timestamp) -> Duration { 20 | self.0 21 | .checked_duration_since(earlier.0) 22 | .unwrap_or_else(|| Duration::from_secs(0)) 23 | } 24 | 25 | /// Calculate the duration elapsed since the creation of this timestamp. 26 | pub fn elapsed(&self) -> Duration { 27 | self.0.elapsed() 28 | } 29 | 30 | /// Create a new `Timestamp` from the current `Instant`. 31 | pub fn now() -> Self { 32 | Timestamp(Instant::now()) 33 | } 34 | 35 | /// Create a new `Timestamp` from the current `Instant` relative to a base `Instant`. 36 | pub fn now_instant(base_instant: Instant) -> Self { 37 | let now = Instant::now(); 38 | let duration = now.duration_since(base_instant); 39 | Timestamp(base_instant + duration) 40 | } 41 | 42 | /// Create a new `Timestamp` from a relative duration in microseconds 43 | pub fn from_duration_micros(micros: u64) -> Self { 44 | Timestamp(Instant::now() + Duration::from_micros(micros)) 45 | } 46 | } 47 | 48 | impl Add for Timestamp { 49 | type Output = Timestamp; 50 | 51 | fn add(self, duration: Duration) -> Self::Output { 52 | Timestamp(self.0 + duration) 53 | } 54 | } 55 | 56 | impl Sub for Timestamp { 57 | type Output = Timestamp; 58 | 59 | fn sub(self, duration: Duration) -> Self::Output { 60 | Timestamp(self.0 - duration) 61 | } 62 | } 63 | 64 | /// An abstraction over time. Provides the ability to create a timestamp. 65 | pub trait Time: Send + Sync { 66 | /// The current time. 67 | fn now(&self) -> Timestamp; 68 | } 69 | 70 | impl Time for Arc 71 | where 72 | T: Time, 73 | { 74 | fn now(&self) -> Timestamp { 75 | ::now(self) 76 | } 77 | } 78 | 79 | impl Time for Box 80 | where 81 | T: Time, 82 | { 83 | fn now(&self) -> Timestamp { 84 | ::now(self) 85 | } 86 | } 87 | 88 | impl Time for &T 89 | where 90 | T: Time, 91 | { 92 | fn now(&self) -> Timestamp { 93 | ::now(self) 94 | } 95 | } 96 | 97 | /// An implementation of `Time` based on `tokio::Instant`. 98 | #[derive(Debug, Clone, Copy)] 99 | pub struct TokioTime { 100 | base_instant: Instant, 101 | base_timestamp: Timestamp, 102 | } 103 | 104 | impl TokioTime { 105 | /// Creates a new `TokioTime`. 106 | pub fn new() -> Self { 107 | let base_instant = tokio::time::Instant::now(); 108 | let base_timestamp = Timestamp::now(); // Use the current timestamp 109 | 110 | Self { 111 | base_instant, 112 | base_timestamp, 113 | } 114 | } 115 | } 116 | 117 | impl Default for TokioTime { 118 | fn default() -> Self { 119 | Self::new() 120 | } 121 | } 122 | 123 | impl Time for TokioTime { 124 | fn now(&self) -> Timestamp { 125 | let now = Instant::now(); 126 | let elapsed = now.duration_since(self.base_instant); 127 | self.base_timestamp + elapsed 128 | } 129 | } 130 | -------------------------------------------------------------------------------- /crates/nq-core/src/upgraded.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2023-2024 Cloudflare, Inc. 2 | // Licensed under the BSD-3-Clause license found in the LICENSE file or at https://opensource.org/licenses/BSD-3-Clause 3 | 4 | //! An upgraded connection object which implements AsyncRead + AsyncWrite. 5 | //! 6 | //! Mostly copied from rxtx, thanks Ivan for the great abstractions :) 7 | 8 | use std::{ 9 | pin::Pin, 10 | task::{Context, Poll}, 11 | }; 12 | 13 | use tokio::io::{AsyncRead, AsyncWrite, ReadBuf}; 14 | 15 | pub trait ConnectUpgradedInner: AsyncRead + AsyncWrite + Send + Unpin + 'static {} 16 | impl ConnectUpgradedInner for hyper_util::rt::TokioIo {} 17 | 18 | /// A byte stream for CONNECT request upgrade communication. 19 | pub struct ConnectUpgraded(Box); 20 | 21 | // SAFETY: it's safe to assume that `Upgraded` is `Sync` as all the transports that we use 22 | // within the library (`TcpStream`, `tokio_boring::SslStream`) are `Sync`. 23 | unsafe impl Sync for ConnectUpgraded {} 24 | 25 | impl ConnectUpgraded { 26 | /// Create a [`ConnectUpgrade`] from something that implements [`ConnectUpgradedInner`]. 27 | pub fn new(upgraded: impl ConnectUpgradedInner) -> Self { 28 | Self(Box::new(upgraded)) 29 | } 30 | } 31 | 32 | impl AsyncRead for ConnectUpgraded { 33 | #[inline] 34 | fn poll_read( 35 | self: Pin<&mut Self>, 36 | cx: &mut Context<'_>, 37 | buf: &mut ReadBuf, 38 | ) -> Poll> { 39 | AsyncRead::poll_read(Pin::new(&mut self.get_mut().0), cx, buf) 40 | } 41 | } 42 | 43 | impl AsyncWrite for ConnectUpgraded { 44 | #[inline] 45 | fn poll_write( 46 | self: Pin<&mut Self>, 47 | cx: &mut Context, 48 | buf: &[u8], 49 | ) -> Poll> { 50 | AsyncWrite::poll_write(Pin::new(&mut self.get_mut().0), cx, buf) 51 | } 52 | 53 | #[inline] 54 | fn poll_flush(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { 55 | AsyncWrite::poll_flush(Pin::new(&mut self.get_mut().0), cx) 56 | } 57 | 58 | #[inline] 59 | fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { 60 | AsyncWrite::poll_shutdown(Pin::new(&mut self.get_mut().0), cx) 61 | } 62 | } 63 | -------------------------------------------------------------------------------- /crates/nq-core/src/util.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2023-2024 Cloudflare, Inc. 2 | // Licensed under the BSD-3-Clause license found in the LICENSE file or at https://opensource.org/licenses/BSD-3-Clause 3 | 4 | use std::{future::Future, pin::Pin, task::Poll}; 5 | 6 | use http::Response; 7 | use hyper::body::Incoming; 8 | use tokio::{ 9 | io::{AsyncRead, AsyncWrite}, 10 | sync::oneshot, 11 | }; 12 | 13 | /// A future that resolves to an http::Response. 14 | pub type ResponseFuture = Pin>> + Send>>; 15 | 16 | /// Create a oneshot channel with a custom implementation to make `Result` 17 | /// handling more ergonomic. 18 | pub fn oneshot_result() -> (oneshot::Sender>, OneshotResult) { 19 | let (tx, rx) = oneshot::channel(); 20 | 21 | (tx, OneshotResult { inner: rx }) 22 | } 23 | 24 | /// An abstraction over a [`oneshot::Receiver`] of a result. 25 | pub struct OneshotResult { 26 | inner: oneshot::Receiver>, 27 | } 28 | 29 | impl Future for OneshotResult { 30 | type Output = crate::Result; 31 | 32 | fn poll( 33 | self: std::pin::Pin<&mut Self>, 34 | cx: &mut std::task::Context<'_>, 35 | ) -> std::task::Poll { 36 | match Pin::new(&mut self.get_mut().inner).poll(cx) { 37 | Poll::Ready(Ok(t)) => Poll::Ready(t), 38 | Poll::Ready(Err(e)) => Poll::Ready(Err(e.into())), 39 | Poll::Pending => Poll::Pending, 40 | } 41 | } 42 | } 43 | 44 | /// Trait representing a generic readable and writable byte stream. 45 | /// Mostly copied from rxtx, thanks Ivan for the great abstractions :) 46 | pub trait ByteStream: AsyncRead + AsyncWrite + Send + Sync + Unpin + 'static {} 47 | 48 | impl ByteStream for T where T: AsyncRead + AsyncWrite + Sync + Send + Unpin + 'static {} 49 | -------------------------------------------------------------------------------- /crates/nq-latency/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "nq-latency" 3 | version = { workspace = true } 4 | edition = { workspace = true } 5 | repository = { workspace = true } 6 | authors = { workspace = true } 7 | license = { workspace = true } 8 | readme = "../README.md" 9 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 10 | 11 | [dependencies] 12 | nq-core = { workspace = true } 13 | nq-stats = { workspace = true } 14 | 15 | anyhow = { workspace = true, features = ["backtrace"] } 16 | http = { workspace = true } 17 | http-body-util = { workspace = true } 18 | tokio-util = { workspace = true } 19 | tracing = { workspace = true } 20 | url = { workspace = true, features = ["serde"] } -------------------------------------------------------------------------------- /crates/nq-latency/src/lib.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2023-2024 Cloudflare, Inc. 2 | // Licensed under the BSD-3-Clause license found in the LICENSE file or at https://opensource.org/licenses/BSD-3-Clause 3 | 4 | use std::{fmt::Debug, sync::Arc}; 5 | 6 | use anyhow::Context; 7 | use http::Request; 8 | use http_body_util::BodyExt; 9 | use nq_core::Network; 10 | use nq_core::{ConnectionType, Time, Timestamp}; 11 | use nq_stats::TimeSeries; 12 | use tokio_util::sync::CancellationToken; 13 | use tracing::info; 14 | use url::Url; 15 | 16 | #[derive(Debug, Clone)] 17 | pub struct LatencyConfig { 18 | pub url: Url, 19 | pub runs: usize, 20 | } 21 | 22 | impl Default for LatencyConfig { 23 | fn default() -> Self { 24 | Self { 25 | url: "https://h3.speed.cloudflare.com/__down?bytes=10" 26 | .parse() 27 | .unwrap(), 28 | runs: 20, 29 | } 30 | } 31 | } 32 | 33 | pub struct Latency { 34 | start: Timestamp, 35 | config: LatencyConfig, 36 | probe_results: TimeSeries, 37 | } 38 | 39 | impl Latency { 40 | pub fn new(config: LatencyConfig) -> Self { 41 | Self { 42 | start: Timestamp::now(), 43 | config, 44 | probe_results: TimeSeries::new(), 45 | } 46 | } 47 | 48 | pub async fn run_test( 49 | mut self, 50 | network: Arc, 51 | time: Arc, 52 | _shutdown: CancellationToken, 53 | ) -> anyhow::Result { 54 | self.start = time.now(); 55 | 56 | for run in 0..self.config.runs { 57 | let url = self.config.url.to_owned(); 58 | let network = Arc::clone(&network); 59 | let time = Arc::clone(&time); 60 | 61 | let host = url 62 | .host_str() 63 | .context("small download url must have a domain")?; 64 | let host_with_port = format!("{}:{}", host, url.port_or_known_default().unwrap_or(443)); 65 | 66 | let conn_start = time.now(); 67 | 68 | let addrs = network 69 | .resolve(host_with_port) 70 | .await 71 | .context("unable to resolve host")?; 72 | let time_lookup = time.now(); 73 | 74 | let connection = network 75 | .new_connection(conn_start, addrs[0], host.to_string(), ConnectionType::H1) 76 | .await 77 | .context("unable to create new connection")?; 78 | { 79 | let conn = connection.write().await; 80 | conn.timing().set_lookup(time_lookup); 81 | } 82 | 83 | let tcp_handshake_duration = { 84 | let conn = connection.read().await; 85 | conn.timing() 86 | .time_connect() 87 | .saturating_sub(conn.timing().time_lookup()) 88 | }; 89 | 90 | info!( 91 | "latency run {run}: {:2.4} s.", 92 | tcp_handshake_duration.as_secs_f32() 93 | ); 94 | 95 | // perform a simple GET to do some amount of work 96 | let response = network 97 | .send_request( 98 | connection, 99 | Request::get(url.as_str()).body(Default::default())?, 100 | ) 101 | .await 102 | .context("GET request failed")?; 103 | 104 | let _ = response 105 | .into_body() 106 | .collect() 107 | .await 108 | .context("unable to read GET request body")?; 109 | 110 | self.probe_results 111 | .add(conn_start, tcp_handshake_duration.as_secs_f64()); 112 | } 113 | 114 | // while let Some(res) = task_set.join_next().await { 115 | // let (conn_start, tcp_handshake_duration) = res??; 116 | // self.probe_results.add(conn_start, tcp_handshake_duration); 117 | // } 118 | 119 | Ok(LatencyResult { 120 | measurements: self.probe_results, 121 | }) 122 | } 123 | } 124 | 125 | #[derive(Default, Debug)] 126 | pub struct LatencyResult { 127 | pub measurements: TimeSeries, 128 | } 129 | 130 | impl LatencyResult { 131 | pub fn median(&self) -> Option { 132 | self.measurements.quantile(0.50) 133 | } 134 | 135 | /// Jitter as calculated as the average distance between consecutive rtt 136 | /// measurments. 137 | pub fn jitter(&self) -> Option { 138 | let values: Vec<_> = self.measurements.values().collect(); 139 | 140 | let distances = values.windows(2).filter_map(|window| { 141 | window 142 | .last() 143 | .zip(window.first()) 144 | .map(|(last, first)| (last - first).abs()) 145 | }); 146 | 147 | let sum: f64 = distances.clone().sum(); 148 | let count = distances.count(); 149 | 150 | if count > 0 { 151 | Some(sum / count as f64) 152 | } else { 153 | None 154 | } 155 | } 156 | } 157 | -------------------------------------------------------------------------------- /crates/nq-load-generator/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "nq-load-generator" 3 | version = { workspace = true } 4 | edition = { workspace = true } 5 | repository = { workspace = true } 6 | authors = { workspace = true } 7 | license = { workspace = true } 8 | readme = "../README.md" 9 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 10 | 11 | [dependencies] 12 | nq-core = { workspace = true } 13 | nq-stats = { workspace = true } 14 | 15 | anyhow = { workspace = true, features = ["backtrace"] } 16 | http = { workspace = true } 17 | rand = { workspace = true } 18 | serde = { workspace = true, features = ["derive"] } 19 | tokio = { workspace = true } 20 | tokio-util = { workspace = true } 21 | tracing = { workspace = true } 22 | url = { workspace = true, features = ["serde"] } -------------------------------------------------------------------------------- /crates/nq-load-generator/src/lib.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2023-2024 Cloudflare, Inc. 2 | // Licensed under the BSD-3-Clause license found in the LICENSE file or at https://opensource.org/licenses/BSD-3-Clause 3 | 4 | use std::{collections::HashMap, sync::Arc}; 5 | 6 | use anyhow::Context; 7 | use http::{HeaderMap, HeaderName, HeaderValue}; 8 | use nq_core::client::{Direction, ThroughputClient}; 9 | use nq_core::{ 10 | oneshot_result, BodyEvent, ConnectionType, EstablishedConnection, Network, OneshotResult, Time, 11 | Timestamp, 12 | }; 13 | use nq_stats::CounterSeries; 14 | use rand::seq::SliceRandom; 15 | use serde::Deserialize; 16 | use tokio::sync::mpsc::UnboundedReceiver; 17 | use tokio::sync::RwLock; 18 | use tokio_util::sync::CancellationToken; 19 | use tracing::Instrument; 20 | 21 | #[derive(Debug, Deserialize)] 22 | pub struct LoadConfig { 23 | pub headers: HashMap, 24 | pub download_url: url::Url, 25 | pub upload_url: url::Url, 26 | pub upload_size: usize, 27 | } 28 | 29 | pub struct LoadGenerator { 30 | headers: HeaderMap, 31 | config: LoadConfig, 32 | loads: Vec, 33 | } 34 | 35 | impl LoadGenerator { 36 | pub fn new(config: LoadConfig) -> anyhow::Result { 37 | let mut headers = HeaderMap::new(); 38 | 39 | for (key, value) in config.headers.iter() { 40 | headers.insert( 41 | HeaderName::from_bytes(key.as_bytes())?, 42 | HeaderValue::from_bytes(value.as_bytes())?, 43 | ); 44 | } 45 | 46 | Ok(Self { 47 | headers, 48 | config, 49 | loads: Vec::new(), 50 | }) 51 | } 52 | 53 | #[tracing::instrument(skip(self, network, time, shutdown))] 54 | pub fn new_loaded_connection( 55 | &self, 56 | direction: Direction, 57 | conn_type: ConnectionType, 58 | network: Arc, 59 | time: Arc, 60 | shutdown: CancellationToken, 61 | ) -> anyhow::Result> { 62 | let (tx, rx) = oneshot_result(); 63 | 64 | let client = match direction { 65 | Direction::Down => ThroughputClient::download(), 66 | Direction::Up(size) => ThroughputClient::upload(size), 67 | }; 68 | 69 | let response_fut = client 70 | .new_connection(conn_type) 71 | .headers(self.headers.clone()) 72 | .send( 73 | match direction { 74 | Direction::Up(_) => self.config.upload_url.as_str().parse()?, 75 | Direction::Down => self.config.download_url.as_str().parse()?, 76 | }, 77 | network, 78 | time, 79 | shutdown, 80 | )?; 81 | 82 | tracing::debug!("got loaded connection response future"); 83 | 84 | tokio::spawn( 85 | async move { 86 | let inflight_body = response_fut 87 | .await 88 | .context("could not await response for loaded connection")?; 89 | 90 | tracing::debug!("sending loaded connection"); 91 | 92 | let _ = tx.send(Ok(LoadedConnection { 93 | connection: inflight_body.connection, 94 | events_rx: inflight_body.events, 95 | total_bytes_series: CounterSeries::new(), 96 | finished_at: None, 97 | })); 98 | 99 | Ok::<_, anyhow::Error>(()) 100 | } 101 | .in_current_span(), 102 | ); 103 | 104 | Ok(rx) 105 | } 106 | 107 | pub fn connections(&self) -> impl Iterator { 108 | self.loads.iter() 109 | } 110 | 111 | pub fn random_connection(&self) -> Option>> { 112 | let loads: Vec<_> = self.ongoing_loads().collect(); 113 | loads 114 | .choose(&mut rand::thread_rng()) 115 | .map(|c| c.connection.clone()) 116 | } 117 | 118 | pub fn push(&mut self, loaded_connection: LoadedConnection) { 119 | self.loads.push(loaded_connection); 120 | } 121 | 122 | pub fn update(&mut self) { 123 | for load in &mut self.loads { 124 | load.update(); 125 | } 126 | } 127 | 128 | pub fn ongoing_loads(&self) -> impl Iterator { 129 | self.loads.iter().filter(|load| load.finished_at.is_none()) 130 | } 131 | 132 | pub fn count_loads(&self) -> usize { 133 | self.ongoing_loads().count() 134 | } 135 | 136 | pub fn into_connections(self) -> Vec { 137 | self.loads 138 | } 139 | } 140 | 141 | #[derive(Debug)] 142 | pub struct LoadedConnection { 143 | connection: Arc>, 144 | events_rx: UnboundedReceiver, 145 | total_bytes_series: CounterSeries, 146 | finished_at: Option, 147 | } 148 | 149 | impl LoadedConnection { 150 | pub fn update(&mut self) { 151 | while let Ok(event) = self.events_rx.try_recv() { 152 | match event { 153 | BodyEvent::ByteCount { at, total } => self.total_bytes_series.add(at, total as f64), 154 | BodyEvent::Finished { at } => self.finished_at = Some(at), 155 | } 156 | } 157 | } 158 | 159 | pub fn total_bytes_series(&self) -> &CounterSeries { 160 | &self.total_bytes_series 161 | } 162 | 163 | pub fn stop(&mut self) { 164 | self.events_rx.close(); 165 | self.update(); 166 | } 167 | } 168 | -------------------------------------------------------------------------------- /crates/nq-packetloss/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "nq-packetloss" 3 | version = { workspace = true } 4 | edition = { workspace = true } 5 | repository = { workspace = true } 6 | authors = { workspace = true } 7 | license = { workspace = true } 8 | readme = "../README.md" 9 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 10 | 11 | [dependencies] 12 | nq-core = { workspace = true } 13 | nq-load-generator = { workspace = true } 14 | nq-tokio-network = { workspace = true } 15 | 16 | anyhow = { workspace = true, features = ["backtrace"] } 17 | http = { workspace = true } 18 | http-body-util = { workspace = true } 19 | hyper = { workspace = true } 20 | rustls = { version = "0.23.10", default-features = false, features = ["std", "ring"] } 21 | serde = { workspace = true, features = ["derive"] } 22 | serde_json = { workspace = true } 23 | tokio = { workspace = true } 24 | tokio-util = { workspace = true } 25 | tracing = { workspace = true } 26 | url = { workspace = true, features = ["serde"] } 27 | webrtc = { workspace = true } -------------------------------------------------------------------------------- /crates/nq-packetloss/src/lib.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2025 Cloudflare, Inc. 2 | // Licensed under the BSD-3-Clause license found in the LICENSE file or at https://opensource.org/licenses/BSD-3-Clause 3 | 4 | //! This crate is a Rust implementation the Packet Loss measurement as performed by the javascript project 5 | //! at https://github.com/cloudflare/speedtest. 6 | //! 7 | //! As stated by that project, Packet loss is measured by submitting a set of UDP packets to a WebRTC TURN server 8 | //! in a round-trip fashion, and determining how many packets do not arrive. 9 | 10 | mod webrtc_data_channel; 11 | 12 | use nq_core::{ 13 | client::Direction, 14 | ConnectionType, Network, Time, TokioTime, 15 | }; 16 | use nq_load_generator::{LoadConfig, LoadGenerator, LoadedConnection}; 17 | use nq_tokio_network::TokioNetwork; 18 | use serde::{Deserialize, Serialize}; 19 | use std::{cmp::min, collections::HashMap, fmt::Display, sync::Arc, time::Duration}; 20 | use tokio::sync::{mpsc, RwLock}; 21 | use tokio_util::sync::CancellationToken; 22 | use tracing::Instrument; 23 | use url::Url; 24 | use webrtc_data_channel::{DataChannelEvent, WebRTCDataChannel}; 25 | 26 | #[derive(Clone, Debug)] 27 | pub struct PacketLossConfig { 28 | /// The target TURN server URI to send UDP packets 29 | pub turn_server_uri: String, 30 | /// The URL to send the request to for TURN server credentials 31 | pub turn_cred_request_url: Url, 32 | /// Total number of messages/packets to send 33 | pub num_packets: usize, 34 | /// Total number of messages to send in a batch before waiting 35 | pub batch_size: usize, 36 | /// Time to wait between batch sends 37 | pub batch_wait_time: Duration, 38 | /// Time to wait for receiving messages after all messages have been sent 39 | pub response_wait_time: Duration, 40 | /// Download URL to use for for the [`LoadGenerator`] 41 | pub download_url: Url, 42 | /// Upload URL to use for for the [`LoadGenerator`] 43 | pub upload_url: Url, 44 | } 45 | 46 | impl Default for PacketLossConfig { 47 | fn default() -> Self { 48 | Self { 49 | turn_server_uri: "turn:turn.speed.cloudflare.com:50000?transport=udp".to_owned(), 50 | turn_cred_request_url: "https://speed.cloudflare.com/turn-creds".parse().unwrap(), 51 | num_packets: 1000, 52 | batch_size: 10, 53 | batch_wait_time: Duration::from_millis(10), 54 | response_wait_time: Duration::from_millis(3000), 55 | download_url: "https://h3.speed.cloudflare.com/__down?bytes=10000000000" 56 | .parse() 57 | .unwrap(), 58 | upload_url: "https://h3.speed.cloudflare.com/__up" 59 | .parse() 60 | .unwrap(), 61 | } 62 | } 63 | } 64 | 65 | impl PacketLossConfig { 66 | pub fn load_config(&self) -> LoadConfig { 67 | LoadConfig { 68 | headers: HashMap::default(), 69 | download_url: self.download_url.clone(), 70 | upload_url: self.upload_url.clone(), 71 | upload_size: 4_000_000_000, // 4 GB 72 | } 73 | } 74 | } 75 | 76 | /// A [`PacketLoss`] is a test harness around a WebRTC connection that sends UDP packets to a 77 | /// TURN server and count the returned packets to calculate loss ratio. 78 | pub struct PacketLoss { 79 | config: Arc, 80 | load_generator: LoadGenerator, 81 | /// Used to track the messages received 82 | /// A sent message is stored as false until a response is received setting it to true 83 | message_tracker: Arc>>, 84 | } 85 | 86 | impl PacketLoss { 87 | pub fn new_with_config(config: PacketLossConfig) -> anyhow::Result { 88 | let load_generator = LoadGenerator::new(config.load_config())?; 89 | let message_vec = vec![false; config.num_packets]; 90 | let message_tracker: Arc>> = Arc::new(RwLock::new(message_vec)); 91 | Ok(Self { 92 | config: Arc::new(config), 93 | load_generator, 94 | message_tracker, 95 | }) 96 | } 97 | 98 | // Bi-directional load generators 99 | fn add_load_generators( 100 | &self, 101 | packet_event_tx: mpsc::Sender, 102 | shutdown: CancellationToken, 103 | ) -> anyhow::Result<()> { 104 | // Start generating load on the network in both directions 105 | let time = Arc::new(TokioTime::new()) as Arc; 106 | let network = 107 | Arc::new(TokioNetwork::new(Arc::clone(&time), shutdown.clone())) as Arc; 108 | 109 | self.new_load_generating_connection( 110 | packet_event_tx.clone(), 111 | Direction::Down, 112 | network.clone(), 113 | time.clone(), 114 | shutdown.clone(), 115 | )?; 116 | self.new_load_generating_connection( 117 | packet_event_tx.clone(), 118 | Direction::Up(4_000_000_000), 119 | network, 120 | time, 121 | shutdown.clone(), 122 | )?; 123 | Ok(()) 124 | } 125 | 126 | /// Run the Packet Loss test against the configured TURN server 127 | pub async fn run_test( 128 | mut self, 129 | turn_server_creds: TurnServerCreds, 130 | shutdown: CancellationToken, 131 | ) -> anyhow::Result { 132 | let (webrtc_event_tx, mut webrtc_event_rx) = 133 | tokio::sync::mpsc::unbounded_channel::(); 134 | let (packet_event_tx, mut packet_event_rx) = 135 | tokio::sync::mpsc::channel::(3); 136 | 137 | #[cfg(not(test))] 138 | self.add_load_generators(packet_event_tx.clone(), shutdown.clone())?; 139 | 140 | let mut webrtc_data_channel = WebRTCDataChannel::create_with_config( 141 | &self.config.turn_server_uri, 142 | &turn_server_creds, 143 | webrtc_event_tx.clone(), 144 | ) 145 | .await?; 146 | 147 | // Establishing the data channel starts the flow of messages to the TURN server 148 | webrtc_data_channel.establish_data_channel().await?; 149 | 150 | loop { 151 | tokio::select! { 152 | Some(event) = webrtc_event_rx.recv() => { 153 | match event { 154 | // Received when the WebRTC data channel has been established. Inidicates the data channel is ready for traffic 155 | DataChannelEvent::OnOpenChannel => { 156 | self.send_messages(webrtc_data_channel.clone(), packet_event_tx.clone(), shutdown.clone()); 157 | } 158 | 159 | // Each message that is received on the data channel is notified and handled here 160 | DataChannelEvent::OnReceivedMessage(message) => { 161 | self.message_tracker 162 | .write() 163 | .await 164 | .insert(message, true); 165 | } 166 | 167 | // A failure occured during setup of the data channel 168 | DataChannelEvent::ConnectionError(err) => { 169 | tracing::warn!("Failed to complete packet loss test {}", err); 170 | break; 171 | } 172 | } 173 | } 174 | 175 | Some(event) = packet_event_rx.recv() => { 176 | match event { 177 | // All of the messages / UDP packets have been submitted and the wait timeout has completed. 178 | PacketLossEvent::AllMessagesSent => { 179 | break; 180 | } 181 | 182 | // Successfully created a load generator 183 | PacketLossEvent::NewLoadedConnection(connection) => { 184 | self.load_generator.push(connection); 185 | } 186 | 187 | // Failed to create a load generator 188 | PacketLossEvent::Error(err) => { 189 | tracing::warn!("Failed to complete packet loss test {}", err); 190 | break; 191 | } 192 | } 193 | } 194 | 195 | _ = shutdown.cancelled() => { 196 | tracing::debug!("Shutdown requested"); 197 | break; 198 | } 199 | } 200 | } 201 | 202 | // stop all on-going loads generators 203 | let mut loads = self.load_generator.into_connections(); 204 | loads.iter_mut().for_each(|load| load.stop()); 205 | 206 | webrtc_data_channel.close_channel().await; 207 | 208 | // Only count the messages if they are flagged as true (i.e. received) 209 | let num_messages = self 210 | .message_tracker 211 | .read() 212 | .await 213 | .iter() 214 | .filter(|val| **val) 215 | .count(); 216 | let loss_ratio = (((self.config.num_packets - num_messages) as f64 217 | / self.config.num_packets as f64) 218 | * 10_000.0) 219 | .trunc() 220 | / 100.0; 221 | Ok(PacketLossResult { 222 | num_messages, 223 | loss_ratio, 224 | }) 225 | } 226 | 227 | /// Write messages to the data channel of an incrementing sequence of numbers used to identify unique messages/packets. 228 | fn send_messages( 229 | &self, 230 | mut webrtc_data_channel: WebRTCDataChannel, 231 | send_message_tx: mpsc::Sender, 232 | shutdown: CancellationToken, 233 | ) { 234 | let config = self.config.clone(); 235 | tokio::spawn(async move { 236 | let mut message_count = 0; 237 | loop { 238 | let batch_start = message_count; 239 | let batch_count = if config.batch_size == 0 { 240 | config.num_packets 241 | } else { 242 | min(message_count + config.batch_size, config.num_packets) 243 | }; 244 | // Send messages in batches of a configured size 245 | for i in batch_start..batch_count { 246 | // Send the message to the TURN server 247 | if let Err(err) = webrtc_data_channel.send_message(&i.to_be_bytes()).await { 248 | tracing::warn!("Send message failed: {}", err); 249 | } 250 | message_count += 1; 251 | } 252 | 253 | // Sleep for the configured amount of time then send noficiation that all messages have been sent 254 | if (message_count + 1) >= config.num_packets { 255 | tokio::time::sleep(config.response_wait_time).await; 256 | let _ = send_message_tx.send(PacketLossEvent::AllMessagesSent).await; 257 | break; 258 | } 259 | 260 | // Sleep the designated time between batch 261 | tokio::time::sleep(config.batch_wait_time).await; 262 | 263 | // Note, shutdown may be delayed by the configured sleeps 264 | if shutdown.is_cancelled() { 265 | tracing::debug!("Shutdown requested"); 266 | break; 267 | } 268 | } 269 | }); 270 | } 271 | 272 | /// A GET/POST to an endpoint which sends/receives a large number of bytes 273 | /// as quickly as possible. The intent of these connections is to saturate 274 | /// a single connection's flow. 275 | #[tracing::instrument(skip_all)] 276 | fn new_load_generating_connection( 277 | &self, 278 | event_tx: mpsc::Sender, 279 | direction: Direction, 280 | network: Arc, 281 | time: Arc, 282 | shutdown: CancellationToken, 283 | ) -> anyhow::Result<()> { 284 | let oneshot_res = self.load_generator.new_loaded_connection( 285 | direction, 286 | ConnectionType::H2, 287 | network, 288 | time, 289 | shutdown, 290 | )?; 291 | 292 | tokio::spawn( 293 | async move { 294 | let _ = match oneshot_res.await { 295 | Ok(conn) => event_tx.send(PacketLossEvent::NewLoadedConnection(conn)), 296 | Err(err) => event_tx.send(PacketLossEvent::Error(err)), 297 | } 298 | .await; 299 | } 300 | .in_current_span(), 301 | ); 302 | 303 | Ok(()) 304 | } 305 | } 306 | 307 | /// The result describes the number of UDP packets received and the loss ratio. 308 | #[derive(Debug, Serialize, Deserialize, PartialEq)] 309 | pub struct PacketLossResult { 310 | /// Number of messages received during test 311 | pub num_messages: usize, 312 | /// Calculated ratio based on expected returned messages and actual 313 | pub loss_ratio: f64, 314 | } 315 | 316 | impl Display for PacketLossResult { 317 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 318 | writeln!( 319 | f, 320 | "messages: {} loss ratio: {:.2}", 321 | self.num_messages, self.loss_ratio, 322 | ) 323 | } 324 | } 325 | 326 | pub enum PacketLossEvent { 327 | AllMessagesSent, 328 | NewLoadedConnection(LoadedConnection), 329 | Error(anyhow::Error), 330 | } 331 | 332 | /// A [`TurnServerCreds`] stores the fetched credentials required to communicat with the TURN server. 333 | #[derive(Serialize, Deserialize, Debug, Clone, PartialEq)] 334 | pub struct TurnServerCreds { 335 | pub username: String, 336 | pub credential: String, 337 | } 338 | 339 | #[cfg(test)] 340 | mod tests { 341 | use crate::{ 342 | webrtc_data_channel::tests::TestTurnServer, PacketLoss, PacketLossConfig, PacketLossResult, 343 | }; 344 | use std::time::Duration; 345 | use tokio_util::sync::CancellationToken; 346 | 347 | async fn run_test( 348 | num_packets: usize, 349 | batch_size: usize, 350 | batch_wait_time: u64, 351 | response_wait_time: u64, 352 | ) -> anyhow::Result { 353 | let shutdown = CancellationToken::new(); 354 | let server = TestTurnServer::start_turn_server().await?; 355 | 356 | let config = PacketLossConfig { 357 | turn_server_uri: format!("turn:127.0.0.1:{}?transport=udp", server.server_port), 358 | turn_cred_request_url: "https://127.0.0.1/creds".parse().unwrap(), 359 | num_packets, 360 | batch_size, 361 | batch_wait_time: Duration::from_millis(batch_wait_time), 362 | response_wait_time: Duration::from_millis(response_wait_time), 363 | download_url: "https://h3.speed.cloudflare.com/__down?bytes=10000000000" 364 | .parse() 365 | .unwrap(), 366 | upload_url: "https://h3.speed.cloudflare.com/__up" 367 | .parse() 368 | .unwrap(), 369 | }; 370 | let packet_loss = PacketLoss::new_with_config(config)?; 371 | let packet_loss_result = packet_loss 372 | .run_test(server.get_test_creds(), shutdown) 373 | .await; 374 | server.close().await?; 375 | packet_loss_result 376 | } 377 | 378 | #[tokio::test] 379 | async fn test_with_server() -> anyhow::Result<()> { 380 | // Basic packet loss test against the test server 381 | let packet_loss_result = run_test(1000, 10, 10, 100).await?; 382 | assert_eq!( 383 | PacketLossResult { 384 | num_messages: 1000, 385 | loss_ratio: 0.0, 386 | }, 387 | packet_loss_result 388 | ); 389 | println!("Results: {:?}", packet_loss_result); 390 | 391 | Ok(()) 392 | } 393 | 394 | #[tokio::test] 395 | async fn test_no_batch_size() -> anyhow::Result<()> { 396 | // Test with zero batch size which results in one big batch 397 | let packet_loss_result = run_test(1000, 0, 10, 100).await?; 398 | assert_eq!( 399 | PacketLossResult { 400 | num_messages: 1000, 401 | loss_ratio: 0.0, 402 | }, 403 | packet_loss_result 404 | ); 405 | println!("Results: {:?}", packet_loss_result); 406 | 407 | Ok(()) 408 | } 409 | 410 | #[tokio::test] 411 | async fn test_too_large_batch_size() -> anyhow::Result<()> { 412 | // Test batch size larger than total size 413 | let packet_loss_result = run_test(100, 1000, 10, 100).await?; 414 | assert_eq!( 415 | PacketLossResult { 416 | num_messages: 100, 417 | loss_ratio: 0.0, 418 | }, 419 | packet_loss_result 420 | ); 421 | println!("Results: {:?}", packet_loss_result); 422 | 423 | Ok(()) 424 | } 425 | 426 | #[tokio::test] 427 | async fn test_equal_batch_size() -> anyhow::Result<()> { 428 | // Test one big batch 429 | let packet_loss_result = run_test(100, 100, 10, 100).await?; 430 | assert_eq!( 431 | PacketLossResult { 432 | num_messages: 100, 433 | loss_ratio: 0.0, 434 | }, 435 | packet_loss_result 436 | ); 437 | println!("Results: {:?}", packet_loss_result); 438 | 439 | Ok(()) 440 | } 441 | 442 | #[tokio::test] 443 | async fn test_zero_batch_wait() -> anyhow::Result<()> { 444 | // Test without any waits between batches 445 | let packet_loss_result = run_test(100, 10, 0, 100).await?; 446 | assert_eq!( 447 | PacketLossResult { 448 | num_messages: 100, 449 | loss_ratio: 0.0, 450 | }, 451 | packet_loss_result 452 | ); 453 | println!("Results: {:?}", packet_loss_result); 454 | 455 | Ok(()) 456 | } 457 | 458 | #[tokio::test] 459 | async fn test_cancel() -> anyhow::Result<()> { 460 | let shutdown = CancellationToken::new(); 461 | let server = TestTurnServer::start_turn_server().await?; 462 | 463 | // Configure a large enough batch with enough batch wait to allow the test runs a bit longer 464 | let config = PacketLossConfig { 465 | turn_server_uri: format!("turn:127.0.0.1:{}?transport=udp", server.server_port), 466 | turn_cred_request_url: "https://127.0.0.1/creds".parse().unwrap(), 467 | num_packets: 5000, 468 | batch_size: 10, 469 | batch_wait_time: Duration::from_millis(25), 470 | response_wait_time: Duration::from_millis(100), 471 | download_url: "https://h3.speed.cloudflare.com/__down?bytes=10000000000" 472 | .parse() 473 | .unwrap(), 474 | upload_url: "https://h3.speed.cloudflare.com/__up" 475 | .parse() 476 | .unwrap(), 477 | }; 478 | let packet_loss = PacketLoss::new_with_config(config)?; 479 | 480 | // Sleep before invoking cancel/shutdown 481 | let shutdown_clone = shutdown.clone(); 482 | tokio::spawn(async move { 483 | tokio::time::sleep(Duration::from_millis(5000)).await; 484 | shutdown_clone.cancel(); 485 | }); 486 | 487 | // Start the loss test 488 | let packet_loss_result = packet_loss 489 | .run_test(server.get_test_creds(), shutdown) 490 | .await; 491 | server.close().await?; 492 | let packet_loss_result = packet_loss_result?; 493 | assert_ne!( 494 | PacketLossResult { 495 | num_messages: 5000, 496 | loss_ratio: 0.0, 497 | }, 498 | packet_loss_result 499 | ); 500 | println!("Results: {:?}", packet_loss_result); 501 | 502 | Ok(()) 503 | } 504 | } 505 | -------------------------------------------------------------------------------- /crates/nq-packetloss/src/webrtc_data_channel.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2025 Cloudflare, Inc. 2 | // Licensed under the BSD-3-Clause license found in the LICENSE file or at https://opensource.org/licenses/BSD-3-Clause 3 | 4 | use hyper::body::{Buf, Bytes}; 5 | use std::sync::{Arc, Weak}; 6 | use tokio::sync::mpsc::UnboundedSender; 7 | use webrtc::{ 8 | api::APIBuilder, 9 | data_channel::{data_channel_init::RTCDataChannelInit, RTCDataChannel}, 10 | ice_transport::{ 11 | ice_candidate::RTCIceCandidate, ice_protocol::RTCIceProtocol, ice_server::RTCIceServer, 12 | }, 13 | peer_connection::{ 14 | configuration::RTCConfiguration, peer_connection_state::RTCPeerConnectionState, 15 | policy::ice_transport_policy::RTCIceTransportPolicy, RTCPeerConnection, 16 | }, 17 | }; 18 | 19 | use crate::TurnServerCreds; 20 | 21 | /// A [`WebRTCDataChannel`] is a simple wrapper around a WebRTC peer connection and data channel used to 22 | /// send UDP packets over a TURN data connection. 23 | #[derive(Clone)] 24 | pub struct WebRTCDataChannel { 25 | sender_peer_connection: Arc, 26 | sender_data_channel: Arc, 27 | receiver_peer_connection: Arc, 28 | } 29 | 30 | /// Message results reported to run_test loop 31 | #[derive(Debug, PartialEq)] 32 | pub enum DataChannelEvent { 33 | /// Sent when sender data channel has been opened 34 | OnOpenChannel, 35 | /// Sent for each message received and the contents 36 | OnReceivedMessage(usize), 37 | /// A failure occured 38 | ConnectionError(String), 39 | } 40 | 41 | impl WebRTCDataChannel { 42 | /// Create the peer connections and configure callback handlers 43 | pub async fn create_with_config( 44 | turn_server_uri: &String, 45 | turn_server_creds: &TurnServerCreds, 46 | event_tx: UnboundedSender, 47 | ) -> anyhow::Result { 48 | // WebRTC uses rustls "ring" 49 | let _ = rustls::crypto::ring::default_provider().install_default(); 50 | 51 | // Create the sender peer and data connection used to generate messages 52 | let (sender_peer_connection, sender_data_channel) = 53 | create_sender_data_connection(turn_server_uri, turn_server_creds, event_tx.clone()) 54 | .await?; 55 | 56 | // Create the peer connection to receive the incoming messages 57 | let receiver_peer_connection = 58 | create_receiver_connection(turn_server_uri, turn_server_creds, event_tx.clone()) 59 | .await?; 60 | 61 | add_ice_candidate_handler( 62 | sender_peer_connection.clone(), 63 | receiver_peer_connection.clone(), 64 | ); 65 | add_ice_candidate_handler( 66 | receiver_peer_connection.clone(), 67 | sender_peer_connection.clone(), 68 | ); 69 | 70 | add_peer_state_failed_handler(sender_peer_connection.clone(), event_tx.clone()); 71 | add_peer_state_failed_handler(receiver_peer_connection.clone(), event_tx); 72 | 73 | Ok(Self { 74 | sender_peer_connection, 75 | sender_data_channel, 76 | receiver_peer_connection, 77 | }) 78 | } 79 | 80 | /// Initiate the handshake to establish the data channel 81 | pub async fn establish_data_channel(&self) -> anyhow::Result<()> { 82 | let reqs = self.sender_peer_connection.create_offer(None).await?; 83 | self.sender_peer_connection 84 | .set_local_description(reqs.clone()) 85 | .await?; 86 | self.receiver_peer_connection 87 | .set_remote_description(reqs) 88 | .await?; 89 | let resp = self.receiver_peer_connection.create_answer(None).await?; 90 | self.receiver_peer_connection 91 | .set_local_description(resp.clone()) 92 | .await?; 93 | self.sender_peer_connection 94 | .set_remote_description(resp) 95 | .await?; 96 | 97 | Ok(()) 98 | } 99 | 100 | /// Send the provided message over the data channel 101 | pub async fn send_message(&mut self, message: &[u8]) -> anyhow::Result { 102 | Ok(self 103 | .sender_data_channel 104 | .send(&Bytes::copy_from_slice(message)) 105 | .await?) 106 | } 107 | /// Closes the peer connections. The data channels will be closed internally to the peer connection. 108 | pub async fn close_channel(&mut self) { 109 | let _ = self.sender_peer_connection.close().await; 110 | let _ = self.receiver_peer_connection.close().await; 111 | } 112 | } 113 | 114 | // The message sender creates a [`RTCPeerConnection`] followed by a [`RTCDataChannel`] used to send the 115 | // test messages. The creation of the data channel is notified via the [`Sender`]. 116 | async fn create_sender_data_connection( 117 | turn_server_uri: &String, 118 | creds: &TurnServerCreds, 119 | event_tx: UnboundedSender, 120 | ) -> anyhow::Result<(Arc, Arc)> { 121 | let peer_connection = Arc::new(create_peer_connection(turn_server_uri, creds).await?); 122 | 123 | // Since this is a packet loss test make sure retransmits are set to 0 124 | let options = Some(RTCDataChannelInit { 125 | ordered: Some(false), 126 | max_retransmits: Some(0u16), 127 | ..Default::default() 128 | }); 129 | 130 | let data_channel = peer_connection 131 | .create_data_channel("channel", options) 132 | .await?; 133 | 134 | data_channel.on_open(Box::new(|| { 135 | Box::pin({ 136 | async move { 137 | let _ = event_tx.send(DataChannelEvent::OnOpenChannel); 138 | } 139 | }) 140 | })); 141 | 142 | Ok((peer_connection, data_channel)) 143 | } 144 | 145 | /// The message receiver creates a [`RTCPeerConnection`] and opens the receiving end of the senders [`RTCDataChannel`]. 146 | /// The data channel is used to receive the sent messages and forward the contents via a [`Sender`]. 147 | async fn create_receiver_connection( 148 | turn_server_uri: &String, 149 | creds: &TurnServerCreds, 150 | event_tx: UnboundedSender, 151 | ) -> anyhow::Result> { 152 | let peer_connection = Arc::new(create_peer_connection(turn_server_uri, creds).await?); 153 | 154 | peer_connection.on_data_channel(Box::new(move |data_channel| { 155 | Box::pin({ 156 | let event_tx = event_tx.clone(); 157 | async move { 158 | data_channel.on_message(Box::new(move |mut message| { 159 | Box::pin({ 160 | let event_tx = event_tx.clone(); 161 | async move { 162 | const USIZE_BYTES: usize = (usize::BITS / 8) as usize; 163 | if message.data.len() == USIZE_BYTES { 164 | let received_number = message.data.get_u64() as usize; 165 | let _ = event_tx 166 | .send(DataChannelEvent::OnReceivedMessage(received_number)); 167 | } else { 168 | tracing::warn!("message unexpected length! {}", message.data.len()); 169 | } 170 | } 171 | }) 172 | })); 173 | } 174 | }) 175 | })); 176 | 177 | Ok(peer_connection) 178 | } 179 | 180 | /// Allocates a new [`RTCPeerConnection`] 181 | async fn create_peer_connection( 182 | turn_server_uri: &String, 183 | creds: &TurnServerCreds, 184 | ) -> anyhow::Result { 185 | // Create API without any media options 186 | let api = APIBuilder::new().build(); 187 | 188 | // Configure the ice server 189 | let ice_servers = vec![RTCIceServer { 190 | urls: vec![turn_server_uri.to_string()], 191 | username: creds.username.clone(), 192 | credential: creds.credential.clone(), 193 | }]; 194 | 195 | // Configure the relay transport policy 196 | let config = RTCConfiguration { 197 | ice_servers, 198 | ice_transport_policy: RTCIceTransportPolicy::Relay, 199 | ..Default::default() 200 | }; 201 | 202 | Ok(api.new_peer_connection(config).await?) 203 | } 204 | 205 | /// Sends a notification upon connection establishment failure 206 | fn add_peer_state_failed_handler( 207 | peer_connection: Arc, 208 | event_tx: UnboundedSender, 209 | ) { 210 | peer_connection.on_peer_connection_state_change(Box::new( 211 | move |state: RTCPeerConnectionState| { 212 | let connection_tx = event_tx.clone(); 213 | 214 | Box::pin(async move { 215 | if state == RTCPeerConnectionState::Failed { 216 | let _ = connection_tx.send(DataChannelEvent::ConnectionError( 217 | "Data connection failed".to_owned(), 218 | )); 219 | } 220 | }) 221 | }, 222 | )); 223 | } 224 | 225 | /// Adds a peer connection as an ice candidate to the second peer connection 226 | fn add_ice_candidate_handler( 227 | first_connection: Arc, 228 | second_connection: Arc, 229 | ) { 230 | // Make sure the candidates match UDP protocol 231 | let maybe_first_connection = Arc::downgrade(&first_connection); 232 | second_connection.on_ice_candidate(Box::new(move |candidate: Option| { 233 | let maybe_first_connection = maybe_first_connection.clone(); 234 | 235 | Box::pin(async move { 236 | if let Some(candidate) = candidate { 237 | if let Err(err) = add_ice_candidate(&candidate, &maybe_first_connection).await { 238 | tracing::warn!(?err, "Failed to add sender ICE candidate") 239 | } 240 | } 241 | }) 242 | })); 243 | } 244 | 245 | /// Ensure the candidate is UDP 246 | async fn add_ice_candidate( 247 | candidate: &RTCIceCandidate, 248 | maybe_first_connection: &Weak, 249 | ) -> anyhow::Result<()> { 250 | if candidate.protocol == RTCIceProtocol::Udp { 251 | let candidate = candidate.to_json()?; 252 | let first = maybe_first_connection 253 | .upgrade() 254 | .ok_or(anyhow::anyhow!("Expected a peer connection"))?; 255 | if let Err(err) = first.add_ice_candidate(candidate.clone()).await { 256 | tracing::warn!(?err, "Failed to add sender ICE candidate"); 257 | } 258 | } 259 | Ok(()) 260 | } 261 | 262 | #[cfg(test)] 263 | pub(crate) mod tests { 264 | use std::{net::SocketAddr, sync::Arc, time::Duration}; 265 | 266 | use tokio::net::UdpSocket; 267 | use webrtc::{ 268 | turn::{self, auth::AuthHandler}, 269 | util, 270 | }; 271 | 272 | use crate::{ 273 | webrtc_data_channel::{DataChannelEvent, WebRTCDataChannel}, 274 | TurnServerCreds, 275 | }; 276 | 277 | // Canned password for the test TURN server 278 | pub(crate) struct TestAuthHandler {} 279 | 280 | impl AuthHandler for TestAuthHandler { 281 | fn auth_handle( 282 | &self, 283 | _username: &str, 284 | _realm: &str, 285 | _src_addr: SocketAddr, 286 | ) -> Result, turn::Error> { 287 | Ok(turn::auth::generate_auth_key( 288 | "username", 289 | "webrtc.rs", 290 | "password", 291 | )) 292 | } 293 | } 294 | 295 | // Test TURN server used by unit tests 296 | pub(crate) struct TestTurnServer { 297 | pub server: turn::server::Server, 298 | pub server_port: u16, 299 | } 300 | 301 | impl TestTurnServer { 302 | pub async fn start_turn_server() -> anyhow::Result { 303 | let sock_addr: SocketAddr = "127.0.0.1:0".parse()?; 304 | let udp_socket = Arc::new(UdpSocket::bind(sock_addr).await?); 305 | let server_port = udp_socket.local_addr()?.port(); 306 | 307 | let server = turn::server::Server::new(turn::server::config::ServerConfig { 308 | realm: "webrtc.rs".to_owned(), 309 | auth_handler: Arc::new(TestAuthHandler {}), 310 | conn_configs: vec![turn::server::config::ConnConfig { 311 | conn: udp_socket, 312 | relay_addr_generator: Box::new( 313 | turn::relay::relay_none::RelayAddressGeneratorNone { 314 | address: "127.0.0.1".to_owned(), 315 | net: Arc::new(util::vnet::net::Net::new(None)), 316 | }, 317 | ), 318 | }], 319 | channel_bind_timeout: Duration::from_secs(0), 320 | alloc_close_notify: None, 321 | }) 322 | .await?; 323 | 324 | Ok(Self { 325 | server, 326 | server_port, 327 | }) 328 | } 329 | 330 | pub fn get_test_creds(&self) -> TurnServerCreds { 331 | TurnServerCreds { 332 | username: "username".to_owned(), 333 | credential: "password".to_owned(), 334 | } 335 | } 336 | 337 | pub async fn close(&self) -> anyhow::Result<()> { 338 | Ok(self.server.close().await?) 339 | } 340 | } 341 | 342 | #[tokio::test] 343 | async fn test_channel_open() -> anyhow::Result<()> { 344 | let server = TestTurnServer::start_turn_server().await?; 345 | let (event_tx, mut event_rx) = tokio::sync::mpsc::unbounded_channel::(); 346 | let turn_server_creds = server.get_test_creds(); 347 | let turn_server_uri = format!("turn:127.0.0.1:{}?transport=udp", server.server_port); 348 | let webrtc_data_channel = WebRTCDataChannel::create_with_config( 349 | &turn_server_uri, 350 | &turn_server_creds, 351 | event_tx.clone(), 352 | ) 353 | .await?; 354 | 355 | // Establishing the data channel starts the flow of messages to the TURN server 356 | webrtc_data_channel.establish_data_channel().await?; 357 | let event = event_rx.recv().await; 358 | server.close().await?; 359 | 360 | assert_eq!(event.unwrap(), DataChannelEvent::OnOpenChannel); 361 | 362 | Ok(()) 363 | } 364 | 365 | #[tokio::test] 366 | async fn test_bad_port_connection_error() -> anyhow::Result<()> { 367 | let server = TestTurnServer::start_turn_server().await?; 368 | let (event_tx, mut event_rx) = tokio::sync::mpsc::unbounded_channel::(); 369 | let turn_server_creds = TurnServerCreds { 370 | username: "username".to_owned(), 371 | credential: "password".to_owned(), 372 | }; 373 | let turn_server_uri = "turn:127.0.0.1:0?transport=udp".to_string(); 374 | let webrtc_data_channel = WebRTCDataChannel::create_with_config( 375 | &turn_server_uri, 376 | &turn_server_creds, 377 | event_tx.clone(), 378 | ) 379 | .await?; 380 | 381 | // Establishing the data channel starts the flow of messages to the TURN server 382 | webrtc_data_channel.establish_data_channel().await?; 383 | let event = event_rx.recv().await; 384 | server.close().await?; 385 | 386 | assert_eq!( 387 | event.unwrap(), 388 | DataChannelEvent::ConnectionError("Data connection failed".to_string()) 389 | ); 390 | 391 | Ok(()) 392 | } 393 | 394 | #[tokio::test] 395 | async fn test_bad_creds_connection_error() -> anyhow::Result<()> { 396 | let server = TestTurnServer::start_turn_server().await?; 397 | let (event_tx, mut event_rx) = tokio::sync::mpsc::unbounded_channel::(); 398 | let turn_server_creds = TurnServerCreds { 399 | username: "bad_username".to_owned(), 400 | credential: "bad_password".to_owned(), 401 | }; 402 | let turn_server_uri = format!("turn:127.0.0.1:{}?transport=udp", server.server_port); 403 | let webrtc_data_channel = WebRTCDataChannel::create_with_config( 404 | &turn_server_uri, 405 | &turn_server_creds, 406 | event_tx.clone(), 407 | ) 408 | .await?; 409 | 410 | // Establishing the data channel starts the flow of messages to the TURN server 411 | webrtc_data_channel.establish_data_channel().await?; 412 | let event = event_rx.recv().await; 413 | server.close().await?; 414 | 415 | assert_eq!( 416 | event.unwrap(), 417 | DataChannelEvent::ConnectionError("Data connection failed".to_string()) 418 | ); 419 | 420 | Ok(()) 421 | } 422 | } 423 | -------------------------------------------------------------------------------- /crates/nq-rpm/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "nq-rpm" 3 | version = { workspace = true } 4 | edition = { workspace = true } 5 | repository = { workspace = true } 6 | authors = { workspace = true } 7 | license = { workspace = true } 8 | readme = "../README.md" 9 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 10 | 11 | [dependencies] 12 | nq-core = { workspace = true } 13 | nq-load-generator = { workspace = true } 14 | nq-stats = { workspace = true } 15 | 16 | anyhow = { workspace = true, features = ["backtrace"] } 17 | humansize = "*" 18 | tokio = { workspace = true } 19 | tokio-util = { workspace = true } 20 | tracing = { workspace = true } 21 | url = { workspace = true, features = ["serde"] } -------------------------------------------------------------------------------- /crates/nq-rpm/src/lib.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2023-2024 Cloudflare, Inc. 2 | // Licensed under the BSD-3-Clause license found in the LICENSE file or at https://opensource.org/licenses/BSD-3-Clause 3 | 4 | use std::{ 5 | collections::HashMap, 6 | fmt::{Debug, Display}, 7 | future::Future, 8 | ops::Div, 9 | sync::Arc, 10 | time::Duration, 11 | }; 12 | 13 | use humansize::{format_size, DECIMAL}; 14 | use nq_core::{ 15 | client::{wait_for_finish, Direction, ThroughputClient}, 16 | ConnectionType, Network, Time, Timestamp, 17 | }; 18 | use nq_load_generator::{LoadConfig, LoadGenerator, LoadedConnection}; 19 | use nq_stats::{instant_minus_intervals, TimeSeries}; 20 | use tokio::{select, sync::mpsc}; 21 | use tokio_util::sync::CancellationToken; 22 | use tracing::{debug, error, info, Instrument}; 23 | use url::Url; 24 | 25 | #[derive(Debug, Clone)] 26 | pub struct ResponsivenessConfig { 27 | pub large_download_url: Url, 28 | pub small_download_url: Url, 29 | pub upload_url: Url, 30 | pub moving_average_distance: usize, 31 | pub interval_duration: Duration, 32 | pub test_duration: Duration, 33 | pub trimmed_mean_percent: f64, 34 | pub std_tolerance: f64, 35 | pub max_loaded_connections: usize, 36 | } 37 | 38 | impl ResponsivenessConfig { 39 | pub fn load_config(&self) -> LoadConfig { 40 | LoadConfig { 41 | headers: HashMap::default(), 42 | download_url: self.large_download_url.clone(), 43 | upload_url: self.upload_url.clone(), 44 | upload_size: 4_000_000_000, // 4 GB 45 | } 46 | } 47 | } 48 | 49 | impl Default for ResponsivenessConfig { 50 | fn default() -> Self { 51 | Self { 52 | large_download_url: "https://h3.speed.cloudflare.com/__down?bytes=10000000000" 53 | .parse() 54 | .unwrap(), 55 | small_download_url: "https://h3.speed.cloudflare.com/__down?bytes=10" 56 | .parse() 57 | .unwrap(), 58 | upload_url: "https://h3.speed.cloudflare.com/__up" 59 | .parse() 60 | .unwrap(), 61 | moving_average_distance: 4, 62 | interval_duration: Duration::from_millis(1000), 63 | test_duration: Duration::from_secs(20), 64 | trimmed_mean_percent: 0.95, 65 | std_tolerance: 0.05, 66 | max_loaded_connections: 16, 67 | } 68 | } 69 | } 70 | 71 | pub struct Responsiveness { 72 | start: Timestamp, 73 | config: ResponsivenessConfig, 74 | load_generator: LoadGenerator, 75 | foreign_probe_results: ForeignProbeResults, 76 | self_probe_results: SelfProbeResults, 77 | average_goodput_series: TimeSeries, 78 | rpm_series: TimeSeries, 79 | goodput_saturated: bool, 80 | rpm_saturated: bool, 81 | direction: Direction, 82 | rpm: f64, 83 | capacity: f64, 84 | } 85 | 86 | impl Responsiveness { 87 | pub fn new(config: ResponsivenessConfig, download: bool) -> anyhow::Result { 88 | let load_generator = LoadGenerator::new(config.load_config())?; 89 | 90 | Ok(Self { 91 | start: Timestamp::now(), 92 | config, 93 | load_generator, 94 | foreign_probe_results: Default::default(), 95 | self_probe_results: Default::default(), 96 | average_goodput_series: TimeSeries::new(), 97 | rpm_series: TimeSeries::new(), 98 | goodput_saturated: false, 99 | rpm_saturated: false, 100 | direction: if download { 101 | Direction::Down 102 | } else { 103 | Direction::Up(std::cmp::min(32u64 * 1024 * 1024 * 1024, usize::MAX as u64) as usize) 104 | }, 105 | rpm: 0.0, 106 | capacity: 0.0, 107 | }) 108 | } 109 | } 110 | 111 | impl Responsiveness { 112 | /// Run the responsiveness tests. This is a simple event loop which: 113 | /// - executes an interval of the RPM algorithm every `interval_duration` 114 | /// seconds. 115 | /// - sends alternating self and foreign probes. todo(fisher): need to limit 116 | /// to 100 probes/sec. (simple semaphore enough?). 117 | /// 118 | /// When the test completes or the test has been running too long, the test 119 | /// completes and the results are reported. 120 | pub async fn run_test( 121 | mut self, 122 | network: Arc, 123 | time: Arc, 124 | shutdown: CancellationToken, 125 | ) -> anyhow::Result { 126 | let env = Env { time, network }; 127 | self.start = env.time.now(); 128 | 129 | info!("running responsiveness test: {:?}", self.config); 130 | 131 | let mut interval = None; 132 | 133 | // todo(fisher): switch to `Time` trait based sleep/interval impl to not 134 | // rely on tokio for rpm tests. 135 | let mut interval_timer = tokio::time::interval(self.config.interval_duration); 136 | 137 | let (event_tx, mut event_rx) = mpsc::channel(1024); 138 | 139 | self.new_load_generating_connection(event_tx.clone(), &env, shutdown.clone())?; 140 | self.send_foreign_probe(event_tx.clone(), &env, shutdown.clone())?; 141 | 142 | loop { 143 | select! { 144 | Some(event) = event_rx.recv() => { 145 | match event { 146 | Event::NewLoadedConnection(connection) => { 147 | self.load_generator.push(connection); 148 | } 149 | Event::ForeignProbe(f) => { 150 | self.foreign_probe_results.add(f); 151 | 152 | // There might not be an available load generating 153 | // connection to send a self probe on. If that's the 154 | // case, send another foreign probe. 155 | if !self.send_self_probe(event_tx.clone(), &env, shutdown.clone())? { 156 | self.send_foreign_probe(event_tx.clone(), &env, shutdown.clone())?; 157 | } 158 | } 159 | Event::SelfProbe(s) => { 160 | self.self_probe_results.add(s); 161 | 162 | self.send_foreign_probe(event_tx.clone(), &env, shutdown.clone())?; 163 | } 164 | Event::Error(e) => { 165 | error!("error: {e}"); 166 | } 167 | } 168 | } 169 | _ = interval_timer.tick() => { 170 | // updated the load generating connection state. 171 | self.load_generator.update(); 172 | 173 | if let Some(interval) = interval.as_mut() { 174 | if self.on_interval(*interval, event_tx.clone(), &env, shutdown.clone()).await? { 175 | break; 176 | } 177 | 178 | *interval += 1; 179 | } else { 180 | interval = Some(0); 181 | } 182 | } 183 | _ = shutdown.cancelled() => { 184 | debug!("shutdown requested"); 185 | break; 186 | } 187 | }; 188 | 189 | if env.time.now().duration_since(self.start) > self.config.test_duration { 190 | break; 191 | } 192 | } 193 | 194 | let now = env.time.now(); 195 | if self.rpm == 0.0 { 196 | self.rpm = self 197 | .rpm_series 198 | .interval_average(now - Duration::from_secs(2), now) 199 | .unwrap_or(0.0); 200 | } 201 | 202 | // stop all on-going loads. 203 | let mut loads = self.load_generator.into_connections(); 204 | loads.iter_mut().for_each(|load| load.stop()); 205 | 206 | Ok(ResponsivenessResult { 207 | capacity: self.capacity, 208 | rpm: self.rpm, 209 | foreign_loaded_latencies: self.foreign_probe_results.http, 210 | self_probe_latencies: self.self_probe_results.http, 211 | loaded_connections: loads, 212 | duration: now.duration_since(self.start), 213 | average_goodput_series: self.average_goodput_series, 214 | }) 215 | } 216 | 217 | /// Execute a single iteration of the responsiveness algorithm: 218 | /// 219 | /// * Create a load-generating connection. 220 | /// 221 | /// * At each interval: 222 | /// 223 | /// - Create an additional load-generating connection. 224 | /// 225 | /// - If goodput has not saturated: 226 | /// 227 | /// - Compute the moving average aggregate goodput at interval i as 228 | /// current_average. 229 | /// 230 | /// - If the standard deviation of the past MAD average goodput values is less 231 | /// than SDT of the current_average, declare goodput saturation and move on 232 | /// to probe responsiveness. 233 | /// 234 | /// - If goodput saturation has been declared: 235 | /// 236 | /// - Compute the responsiveness at interval i as current_responsiveness. 237 | /// 238 | /// - If the standard deviation of the past MAD responsiveness values is less 239 | /// than SDT of the current_responsiveness, declare responsiveness 240 | /// saturation and report current_responsiveness as the final test result. 241 | async fn on_interval( 242 | &mut self, 243 | interval: usize, 244 | event_tx: mpsc::Sender, 245 | env: &Env, 246 | shutdown: CancellationToken, 247 | ) -> anyhow::Result { 248 | // Determine the currently interval and round it to the interval duration. 249 | let end_data_interval = self.start + self.config.interval_duration * interval as u32; 250 | let start_data_interval = instant_minus_intervals( 251 | end_data_interval, 252 | self.config.moving_average_distance, 253 | self.config.interval_duration, 254 | ); 255 | 256 | // always start a load generating connection 257 | // TODO: only if goodput is not saturated? 258 | if self.load_generator.count_loads() < self.config.max_loaded_connections 259 | && interval % 2 == 0 260 | { 261 | self.new_load_generating_connection(event_tx, env, shutdown)?; 262 | } 263 | 264 | let current_goodput = self.current_average_throughput(end_data_interval); 265 | self.average_goodput_series 266 | .add(end_data_interval, current_goodput); 267 | 268 | let std_goodput = self 269 | .average_goodput_series 270 | .interval_std(start_data_interval, end_data_interval) 271 | .unwrap_or(f64::MAX); 272 | 273 | // Goodput is saturated if the std of the last MAD goodputs is within 274 | // tolerance % of the current_average. 275 | let goodput_saturated = std_goodput < current_goodput * self.config.std_tolerance; 276 | if goodput_saturated { 277 | // Goodput has stabilized, set the capacity to the average 278 | // throughput of the last interval. 279 | self.capacity = current_goodput; 280 | self.goodput_saturated = true; 281 | } 282 | 283 | let current_rpm = compute_responsiveness( 284 | &self.foreign_probe_results, 285 | &self.self_probe_results, 286 | start_data_interval, 287 | end_data_interval, 288 | self.config.trimmed_mean_percent, 289 | ) 290 | .unwrap_or(0.0); 291 | 292 | if current_rpm.is_nan() { 293 | panic!("NaN rpm!"); 294 | } 295 | 296 | self.rpm_series.add(end_data_interval, current_rpm); 297 | 298 | let std_rpm = self 299 | .rpm_series 300 | .interval_std(start_data_interval, end_data_interval); 301 | 302 | let is_rpm_saturated = if let Some(std_rpm) = std_rpm { 303 | // RPM is saturated if the std of the last MAD RPMs is 304 | // within tolerance % of the current_rpm. 305 | if std_rpm < current_rpm * self.config.std_tolerance { 306 | self.rpm = current_rpm; 307 | self.rpm_saturated = true; 308 | true 309 | } else { 310 | false 311 | } 312 | } else { 313 | false 314 | }; 315 | 316 | self.log_interval( 317 | interval, 318 | current_goodput, 319 | std_goodput, 320 | goodput_saturated, 321 | current_rpm, 322 | std_rpm, 323 | is_rpm_saturated, 324 | ); 325 | 326 | // stop testing if both goodput and RPM saturated: 327 | Ok(self.goodput_saturated && self.rpm_saturated) 328 | } 329 | 330 | #[allow(clippy::too_many_arguments)] 331 | fn log_interval( 332 | &mut self, 333 | interval: usize, 334 | current_goodput: f64, 335 | std_goodput: f64, 336 | goodput_saturated: bool, 337 | current_rpm: f64, 338 | std_rpm: Option, 339 | is_rpm_saturated: bool, 340 | ) { 341 | // pretty print the results of the interval 342 | let custom_options = humansize::FormatSizeOptions::from(DECIMAL) 343 | .base_unit(humansize::BaseUnit::Bit) 344 | .long_units(false) 345 | .decimal_places(2); 346 | 347 | info!( 348 | interval, 349 | loads = self.load_generator.count_loads(), 350 | throughput = format_size(current_goodput as usize, custom_options), 351 | rpm = current_rpm, 352 | throughput_saturated = goodput_saturated, 353 | rpm_saturated = is_rpm_saturated, 354 | "interval finished" 355 | ); 356 | 357 | info!( 358 | interval, 359 | throughput_std = format_size(std_goodput as usize, custom_options), 360 | throughput_target_std = format_size( 361 | (current_goodput * self.config.std_tolerance) as usize, 362 | custom_options 363 | ), 364 | rpm_std = std_rpm.unwrap_or(f64::NAN), 365 | rpm_target_std = current_rpm * self.config.std_tolerance, 366 | "interval stats" 367 | ); 368 | } 369 | 370 | /// moving average aggregate goodput at interval p: The number of total 371 | /// bytes of data transferred within interval p and the MAD (Moving Average Distance) - 1 immediately 372 | /// preceding intervals, divided by MAD times ID (Interval Duration). 373 | /// 374 | /// https://datatracker.ietf.org/doc/html/draft-ietf-ippm-responsiveness-03#section-4.4-5.2.1 375 | fn current_average_throughput(&self, end_data_interval: Timestamp) -> f64 { 376 | let start_data_interval = 377 | instant_minus_intervals(end_data_interval, 4, self.config.interval_duration); 378 | 379 | let mut bytes_seen = 0.0; 380 | 381 | for connection in self.load_generator.connections() { 382 | bytes_seen += connection 383 | .total_bytes_series() 384 | .interval_sum(start_data_interval, end_data_interval); 385 | } 386 | 387 | let total_time = end_data_interval 388 | .duration_since(start_data_interval) 389 | .as_secs_f64(); 390 | 391 | 8.0 * bytes_seen / total_time 392 | } 393 | 394 | /// A GET/POST to an endpoint which sends/receives a large number of bytes 395 | /// as quickly as possible. The intent of these connections is to saturate 396 | /// a single connection's flow. 397 | #[tracing::instrument(skip_all)] 398 | fn new_load_generating_connection( 399 | &self, 400 | event_tx: mpsc::Sender, 401 | env: &Env, 402 | shutdown: CancellationToken, 403 | ) -> anyhow::Result<()> { 404 | let oneshot_res = self.load_generator.new_loaded_connection( 405 | self.direction, 406 | ConnectionType::H2, 407 | Arc::clone(&env.network), 408 | Arc::clone(&env.time), 409 | shutdown, 410 | )?; 411 | 412 | tokio::spawn( 413 | async move { 414 | let _ = match oneshot_res.await { 415 | Ok(conn) => event_tx.send(Event::NewLoadedConnection(conn)), 416 | Err(e) => event_tx.send(Event::Error(e)), 417 | } 418 | .await; 419 | } 420 | .in_current_span(), 421 | ); 422 | 423 | Ok(()) 424 | } 425 | 426 | /// Sends a foreign probe which is a GET on a newly created connection. 427 | /// 428 | /// > An HTTP GET request on a connection separate from the load-generating 429 | /// > connections ("foreign probes"). This probe type mimics the time it 430 | /// > takes for a web browser to connect to a new web server and request the 431 | /// > first element of a web page (e.g., "index.html"), or the startup time 432 | /// > for a video streaming client to launch and begin fetching media. 433 | /// 434 | /// https://datatracker.ietf.org/doc/html/draft-ietf-ippm-responsiveness-03#section-4.3-3.1.1 435 | fn send_foreign_probe( 436 | &mut self, 437 | event_tx: mpsc::Sender, 438 | env: &Env, 439 | shutdown: CancellationToken, 440 | ) -> anyhow::Result<()> { 441 | let inflight_body_fut = ThroughputClient::download() 442 | .new_connection(ConnectionType::H2) 443 | .send( 444 | self.config.small_download_url.as_str().parse()?, 445 | Arc::clone(&env.network), 446 | Arc::clone(&env.time), 447 | shutdown, 448 | )?; 449 | 450 | tokio::spawn(report_err( 451 | event_tx.clone(), 452 | async move { 453 | let inflight_body = inflight_body_fut.await?; 454 | 455 | let finished_result = wait_for_finish(inflight_body.events).await?; 456 | 457 | let Some(connection_timing) = inflight_body.timing else { 458 | anyhow::bail!("a new connection with timing should have been created"); 459 | }; 460 | 461 | if event_tx 462 | .send(Event::ForeignProbe(ForeignProbeResult { 463 | start: connection_timing.start(), 464 | time_connect: connection_timing.time_connect(), 465 | time_secure: connection_timing.time_secure(), 466 | time_body: finished_result 467 | .finished_at 468 | .duration_since(connection_timing.start()), 469 | })) 470 | .await 471 | .is_err() 472 | { 473 | anyhow::bail!("unable to send foreign probe result"); 474 | } 475 | 476 | Ok(()) 477 | } 478 | .in_current_span(), 479 | )); 480 | 481 | Ok(()) 482 | } 483 | 484 | /// Sends a self probe which is a GET on a load-generating connection. 485 | /// 486 | /// 487 | /// > An HTTP GET request multiplexed on the load-generating connections 488 | /// > ("self probes"). This probe type mimics the time it takes for a video 489 | /// > streaming client to skip ahead to a different chapter in the same 490 | /// > video stream, or for a navigation mapping application to react and 491 | /// > fetch new map tiles when the user scrolls the map to view a different 492 | /// > area. In a well functioning system, fetching new data over an existing 493 | /// > connection should take less time than creating a brand new TLS 494 | /// > connection from scratch to do the same thing. 495 | /// 496 | /// https://datatracker.ietf.org/doc/html/draft-ietf-ippm-responsiveness-03#section-4.3-3.2.1 497 | fn send_self_probe( 498 | &mut self, 499 | event_tx: mpsc::Sender, 500 | env: &Env, 501 | shutdown: CancellationToken, 502 | ) -> anyhow::Result { 503 | // The test client should uniformly and randomly select from the active 504 | // load-generating connections on which to send self probes. 505 | let Some(connection) = self.load_generator.random_connection() else { 506 | return Ok(false); 507 | }; 508 | 509 | let inflight_body_fut = ThroughputClient::download() 510 | .with_connection(connection) 511 | .send( 512 | self.config.small_download_url.as_str().parse()?, 513 | Arc::clone(&env.network), 514 | Arc::clone(&env.time), 515 | shutdown, 516 | )?; 517 | 518 | tokio::spawn(report_err( 519 | event_tx.clone(), 520 | async move { 521 | let inflight_body = inflight_body_fut.await?; 522 | 523 | let finish_result = wait_for_finish(inflight_body.events).await?; 524 | debug!("self_probe_finished: {finish_result:?}"); 525 | 526 | if event_tx 527 | .send(Event::SelfProbe(SelfProbeResult { 528 | start: inflight_body.start, 529 | time_body: finish_result 530 | .finished_at 531 | .duration_since(inflight_body.start), 532 | })) 533 | .await 534 | .is_err() 535 | { 536 | anyhow::bail!("unable to send self probe result"); 537 | } 538 | 539 | Ok(()) 540 | } 541 | .in_current_span(), 542 | )); 543 | 544 | Ok(true) 545 | } 546 | } 547 | 548 | async fn report_err(event_tx: mpsc::Sender, f: impl Future>) { 549 | if let Err(e) = f.await { 550 | let _ = event_tx.send(Event::Error(e)).await; 551 | } 552 | } 553 | 554 | #[derive(Default)] 555 | pub struct ForeignProbeResults { 556 | connect: TimeSeries, 557 | secure: TimeSeries, 558 | http: TimeSeries, 559 | } 560 | 561 | impl ForeignProbeResults { 562 | pub fn add(&mut self, result: ForeignProbeResult) { 563 | self.connect 564 | .add(result.start, result.time_connect.as_secs_f64() * 1000.0); 565 | self.secure 566 | .add(result.start, result.time_secure.as_secs_f64() * 1000.0); 567 | self.http 568 | .add(result.start, result.time_body.as_secs_f64() * 1000.0); 569 | } 570 | 571 | pub fn connect(&self) -> &TimeSeries { 572 | &self.connect 573 | } 574 | 575 | pub fn secure(&self) -> &TimeSeries { 576 | &self.secure 577 | } 578 | 579 | pub fn http(&self) -> &TimeSeries { 580 | &self.http 581 | } 582 | } 583 | 584 | #[derive(Default)] 585 | pub struct SelfProbeResults { 586 | http: TimeSeries, 587 | } 588 | 589 | impl SelfProbeResults { 590 | pub fn add(&mut self, result: SelfProbeResult) { 591 | self.http 592 | .add(result.start, result.time_body.as_secs_f64() * 1000.0); 593 | } 594 | 595 | pub fn http(&self) -> &TimeSeries { 596 | &self.http 597 | } 598 | } 599 | 600 | /// The responsiveness is then calculated as the weighted mean: 601 | /// 602 | /// Responsiveness = 60000 / 603 | /// (1/6*(TM(tcp_f) + TM(tls_f) + TM(http_f)) + 1/2*TM(http_s)) 604 | /// https://datatracker.ietf.org/doc/html/draft-ietf-ippm-responsiveness-03#section-4.3.1-4 605 | fn compute_responsiveness( 606 | foreign_results: &ForeignProbeResults, 607 | self_results: &SelfProbeResults, 608 | from: Timestamp, 609 | to: Timestamp, 610 | percentile: f64, 611 | ) -> Option { 612 | let tm = |ts: &TimeSeries| ts.interval_trimmed_mean(from, to, percentile); 613 | 614 | let tcp_f = tm(foreign_results.connect())?; 615 | let tls_f = tm(foreign_results.secure())?; 616 | let http_f = tm(foreign_results.http())?; 617 | let http_s = tm(self_results.http())?; 618 | 619 | let foreign_sum = tcp_f + tls_f + http_f; 620 | 621 | Some(60_000.0 / (foreign_sum.div(6.0) + http_s.div(2.0))) 622 | } 623 | 624 | #[derive(Debug)] 625 | pub struct ForeignProbeResult { 626 | start: Timestamp, 627 | time_connect: Duration, 628 | time_secure: Duration, 629 | time_body: Duration, 630 | } 631 | 632 | #[derive(Debug)] 633 | pub struct SelfProbeResult { 634 | start: Timestamp, 635 | time_body: Duration, 636 | } 637 | 638 | enum Event { 639 | ForeignProbe(ForeignProbeResult), 640 | SelfProbe(SelfProbeResult), 641 | NewLoadedConnection(LoadedConnection), 642 | Error(anyhow::Error), 643 | } 644 | 645 | impl Debug for Event { 646 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 647 | match self { 648 | Self::ForeignProbe(_) => f.debug_tuple("ForeignProbe").finish(), 649 | Self::SelfProbe(_) => f.debug_tuple("SelfProbe").finish(), 650 | Self::NewLoadedConnection(_) => f.debug_tuple("NewLoadedConnection").finish(), 651 | Self::Error(_) => f.debug_tuple("Error").finish(), 652 | } 653 | } 654 | } 655 | 656 | #[derive(Clone)] 657 | struct Env { 658 | time: Arc, 659 | network: Arc, 660 | } 661 | 662 | #[derive(Default, Debug)] 663 | pub struct ResponsivenessResult { 664 | pub duration: Duration, 665 | pub capacity: f64, 666 | pub rpm: f64, 667 | pub foreign_loaded_latencies: TimeSeries, 668 | pub self_probe_latencies: TimeSeries, 669 | pub loaded_connections: Vec, 670 | pub average_goodput_series: TimeSeries, 671 | } 672 | 673 | impl ResponsivenessResult { 674 | pub fn throughput(&self) -> Option { 675 | self.average_goodput_series 676 | .quantile(0.90) 677 | .map(|t| t as usize) 678 | } 679 | } 680 | 681 | impl Display for ResponsivenessResult { 682 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 683 | let custom_options = humansize::FormatSizeOptions::from(DECIMAL) 684 | .base_unit(humansize::BaseUnit::Bit) 685 | .long_units(false) 686 | .decimal_places(2); 687 | writeln!( 688 | f, 689 | "{:8}: {}/s", 690 | "capacity", 691 | format_size(self.capacity as usize, custom_options) 692 | )?; 693 | write!(f, "{:>8}: {}", "rpm", self.rpm.round() as usize) 694 | } 695 | } 696 | -------------------------------------------------------------------------------- /crates/nq-stats/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "nq-stats" 3 | version = { workspace = true } 4 | edition = { workspace = true } 5 | repository = { workspace = true } 6 | authors = { workspace = true } 7 | license = { workspace = true } 8 | readme = "../README.md" 9 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 10 | 11 | [dependencies] 12 | nq-core = { workspace = true } 13 | tracing = { workspace = true } -------------------------------------------------------------------------------- /crates/nq-stats/src/counter.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2023-2024 Cloudflare, Inc. 2 | // Licensed under the BSD-3-Clause license found in the LICENSE file or at https://opensource.org/licenses/BSD-3-Clause 3 | 4 | use nq_core::Timestamp; 5 | use tracing::debug; 6 | 7 | #[derive(Debug, Default)] 8 | pub struct CounterSeries { 9 | // windows: Option, 10 | timestamps: Vec, 11 | samples: Vec, 12 | } 13 | 14 | pub struct SampleRange { 15 | start: (Timestamp, f64), 16 | end: (Timestamp, f64), 17 | } 18 | 19 | impl CounterSeries { 20 | pub fn new() -> Self { 21 | Self { 22 | timestamps: Vec::new(), 23 | samples: Vec::new(), 24 | } 25 | } 26 | 27 | pub fn add(&mut self, timestamp: Timestamp, sample: f64) { 28 | if let Some(&last_timestamp) = self.timestamps.last() { 29 | if timestamp > last_timestamp { 30 | self.timestamps.push(timestamp); 31 | self.samples.push(sample); 32 | return; 33 | } 34 | } 35 | let idx = self.timestamps.partition_point(|&p| p < timestamp); 36 | 37 | if idx == self.samples.len() { 38 | self.timestamps.push(timestamp); 39 | self.samples.push(sample); 40 | } else { 41 | self.timestamps.insert(idx, timestamp); 42 | self.samples.insert(idx, sample); 43 | } 44 | } 45 | 46 | pub fn sample_interval(&self, from: Timestamp, to: Timestamp) -> Option { 47 | let start_idx = self.timestamps.partition_point(|&t| t < from); 48 | let end_idx = self.timestamps.partition_point(|&t| t <= to); 49 | 50 | // Adjust start_idx to include the sample at the boundary of `from`. 51 | // This ensures the interval calculation correctly reflects the moving average nature 52 | // by capturing the cumulative metric up to the start of the interval. 53 | // For example, to calculate the sum for the last 4 intervals 54 | // the start sample should be at the index before the `from` timestamp. 55 | let start_idx = if start_idx > 0 { 56 | start_idx - 1 57 | } else { 58 | start_idx 59 | }; 60 | 61 | if start_idx >= end_idx || start_idx >= self.timestamps.len() { 62 | return None; 63 | } 64 | 65 | let start = self 66 | .timestamps 67 | .get(start_idx) 68 | .copied() 69 | .zip(self.samples.get(start_idx).copied())?; 70 | let end = self 71 | .timestamps 72 | .get(end_idx.saturating_sub(1)) 73 | .copied() 74 | .zip(self.samples.get(end_idx.saturating_sub(1)).copied())?; 75 | 76 | debug!("sample interval: from={from:?}, to={to:?}, start_idx={start_idx}, end_idx={end_idx}, start={start:?}, end={end:?}"); 77 | 78 | Some(SampleRange { start, end }) 79 | } 80 | 81 | pub fn average(&self) -> Option { 82 | self.interval_average(*self.timestamps.first()?, *self.timestamps.last()?) 83 | } 84 | 85 | pub fn sum(&self) -> f64 { 86 | self.samples.last().copied().unwrap_or_default() 87 | } 88 | 89 | pub fn interval_sum(&self, from: Timestamp, to: Timestamp) -> f64 { 90 | let Some(SampleRange { 91 | start: (_start_ts, start_sample), 92 | end: (_end_ts, end_sample), 93 | }) = self.sample_interval(from, to) 94 | else { 95 | return 0.0; 96 | }; 97 | 98 | end_sample - start_sample 99 | } 100 | 101 | pub fn interval_average(&self, from: Timestamp, to: Timestamp) -> Option { 102 | let SampleRange { 103 | start: (start_ts, _), 104 | end: (end_ts, _), 105 | } = self.sample_interval(from, to)?; 106 | 107 | let sum = self.interval_sum(from, to); 108 | let duration = end_ts.duration_since(start_ts).as_secs_f64(); 109 | 110 | if duration == 0.0 { 111 | return None; 112 | } 113 | 114 | Some(sum / duration) 115 | } 116 | 117 | pub fn samples(&self) -> impl Iterator + Clone + '_ { 118 | self.samples.iter().copied() 119 | } 120 | } 121 | 122 | #[cfg(test)] 123 | mod tests { 124 | use std::time::Duration; 125 | 126 | use nq_core::Timestamp; 127 | 128 | use crate::{counter::CounterSeries, instant_minus_intervals}; 129 | 130 | fn avg_first_n(n: f64) -> f64 { 131 | (n + 1.0) / 2.0 132 | } 133 | 134 | #[test] 135 | fn average_simple() { 136 | let mut ts = CounterSeries::new(); 137 | assert_eq!(ts.average(), None); 138 | 139 | let now = Timestamp::now(); 140 | 141 | for n in 0..10 { 142 | ts.add(now + n * Duration::from_secs(1), n as f64); 143 | } 144 | 145 | assert_eq!(ts.average(), Some(1.0)) 146 | } 147 | 148 | #[test] 149 | fn average_intervaled() { 150 | let mut ts = CounterSeries::new(); 151 | let start = Timestamp::now(); 152 | 153 | let intervals = 4; 154 | let interval_length = Duration::from_secs(1); 155 | 156 | // Ase cumulative samples 157 | for n in 0..=10 { 158 | ts.add(start + interval_length * n as u32, (n * (n + 1) / 2) as f64); 159 | } 160 | 161 | let total_avg = ts.average().unwrap(); 162 | assert_eq!(total_avg, avg_first_n(10.0)); 163 | 164 | let to = start + interval_length * 10 + Duration::from_millis(1); 165 | let from = instant_minus_intervals(to, intervals, interval_length); 166 | let interval_avg = ts.interval_average(from, to).unwrap(); 167 | assert_eq!(interval_avg, 8.5); // (10 + 9 + 8 + 7) / 4 168 | } 169 | } 170 | -------------------------------------------------------------------------------- /crates/nq-stats/src/lib.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2023-2024 Cloudflare, Inc. 2 | // Licensed under the BSD-3-Clause license found in the LICENSE file or at https://opensource.org/licenses/BSD-3-Clause 3 | 4 | mod counter; 5 | 6 | use std::cmp::Ordering; 7 | use std::time::Duration; 8 | 9 | use nq_core::Timestamp; 10 | 11 | pub use crate::counter::CounterSeries; 12 | 13 | pub fn instant_minus_intervals( 14 | to: Timestamp, 15 | intervals: usize, 16 | interval_length: Duration, 17 | ) -> Timestamp { 18 | to - (interval_length * intervals as u32) 19 | } 20 | 21 | #[derive(Debug, Default)] 22 | pub struct TimeSeries { 23 | // windows: Option, 24 | samples: Vec<(Timestamp, f64)>, 25 | } 26 | 27 | impl TimeSeries { 28 | pub fn new() -> Self { 29 | Self { 30 | samples: Vec::new(), 31 | } 32 | } 33 | 34 | pub fn add(&mut self, time: Timestamp, sample: f64) { 35 | match self.samples.binary_search_by_key(&time, |(time, _)| *time) { 36 | Ok(index) => self.samples.insert(index, (time, sample)), 37 | Err(index) => { 38 | if index == self.samples.len() { 39 | self.samples.push((time, sample)) 40 | } else { 41 | self.samples.insert(index, (time, sample)); 42 | } 43 | } 44 | } 45 | } 46 | 47 | pub fn sample_interval( 48 | &self, 49 | from: Timestamp, 50 | to: Timestamp, 51 | ) -> impl Iterator + Clone + '_ { 52 | self.samples 53 | .iter() 54 | .rev() 55 | .skip_while(move |(ts, _)| *ts > to) 56 | .take_while(move |(ts, _)| *ts >= from) 57 | .map(|(_, s)| s) 58 | .copied() 59 | } 60 | 61 | pub fn average(&self) -> Option { 62 | average(self.values()) 63 | } 64 | 65 | pub fn std(&self) -> Option { 66 | std(self.values()) 67 | } 68 | 69 | pub fn trimmed_mean(&self, percentile: f64) -> Option { 70 | trimmed_mean(self.values(), percentile) 71 | } 72 | 73 | pub fn sum(&self) -> f64 { 74 | self.values().sum::() 75 | } 76 | 77 | pub fn interval_sum(&self, from: Timestamp, to: Timestamp) -> f64 { 78 | self.sample_interval(from, to).sum() 79 | } 80 | 81 | pub fn interval_average(&self, from: Timestamp, to: Timestamp) -> Option { 82 | average(self.sample_interval(from, to)) 83 | } 84 | 85 | pub fn interval_std(&self, from: Timestamp, to: Timestamp) -> Option { 86 | std(self.sample_interval(from, to)) 87 | } 88 | 89 | pub fn interval_trimmed_mean( 90 | &self, 91 | from: Timestamp, 92 | to: Timestamp, 93 | percentile: f64, 94 | ) -> Option { 95 | trimmed_mean(self.sample_interval(from, to), percentile) 96 | } 97 | 98 | pub fn values(&self) -> impl Iterator + Clone + '_ { 99 | self.samples.iter().map(|(_, sample)| sample).copied() 100 | } 101 | 102 | pub fn samples(&self) -> &[(Timestamp, f64)] { 103 | &self.samples 104 | } 105 | 106 | pub fn quantile(&self, quantile: f64) -> Option { 107 | let mut values: Vec = self.values().collect(); 108 | values.sort_unstable_by(|a, b| a.partial_cmp(b).unwrap_or(Ordering::Less)); 109 | 110 | values 111 | .get((values.len() as f64 * quantile) as usize) 112 | .copied() 113 | } 114 | } 115 | 116 | pub fn average(samples: impl Iterator + Clone) -> Option { 117 | let n = samples.clone().count(); 118 | 119 | if n == 0 { 120 | None 121 | } else { 122 | Some(samples.sum::() / n as f64) 123 | } 124 | } 125 | 126 | pub fn std(samples: impl Iterator + Clone) -> Option { 127 | let avg = average(samples.clone())?; 128 | let n = samples.clone().count(); 129 | 130 | if n == 0 { 131 | None 132 | } else { 133 | let sum_of_squares = samples.map(|s| (avg - s).powi(2)).sum::(); 134 | Some((sum_of_squares / n as f64).sqrt()) 135 | } 136 | } 137 | 138 | /// Calculate the trimmed mean of a series. We keep all values 139 | /// less than the given percentile and the calculate the average. 140 | fn trimmed_mean(samples: impl Iterator + Clone, percentile: f64) -> Option { 141 | let mut samples: Vec<_> = samples.collect(); 142 | samples.sort_by(|a, b| a.partial_cmp(b).unwrap()); 143 | 144 | let to_keep = (samples.len() as f64 * percentile) as usize; 145 | samples.truncate(to_keep); 146 | 147 | average(samples.into_iter()) 148 | } 149 | 150 | #[cfg(test)] 151 | mod tests { 152 | use std::time::Duration; 153 | 154 | use nq_core::Timestamp; 155 | 156 | use crate::{instant_minus_intervals, TimeSeries}; 157 | 158 | fn avg_first_n(n: f64) -> f64 { 159 | (n + 1.0) / 2.0 160 | } 161 | 162 | fn std_first_n(n: f64) -> f64 { 163 | ((n.powi(2) - 1.0) / 12.0).sqrt() 164 | } 165 | 166 | #[test] 167 | fn average_simple() { 168 | let mut ts = TimeSeries::new(); 169 | assert_eq!(ts.average(), None); 170 | 171 | for n in 1..10 { 172 | ts.add(Timestamp::now(), n as f64); 173 | assert_eq!(ts.average(), Some(avg_first_n(n as f64))); 174 | } 175 | } 176 | 177 | #[test] 178 | fn average_intervaled() { 179 | let mut ts = TimeSeries::new(); 180 | let start = Timestamp::now(); 181 | 182 | let intervals = 4; 183 | let interval_length = Duration::from_secs(1); 184 | 185 | for n in 1..=10 { 186 | ts.add(start + interval_length * n as u32, n as f64); 187 | } 188 | 189 | let total_avg = ts.average().unwrap(); 190 | assert_eq!(total_avg, avg_first_n(10.0)); 191 | 192 | let to = start + interval_length * 10 + Duration::from_millis(1); 193 | let from = instant_minus_intervals(to, intervals, interval_length); 194 | let interval_avg = ts.interval_average(from, to).unwrap(); 195 | assert_eq!(interval_avg, 8.5); // (10 + 9 + 8 + 7) / 4 196 | } 197 | 198 | #[test] 199 | fn std_simple() { 200 | let mut ts = TimeSeries::new(); 201 | 202 | for n in 1..10 { 203 | ts.add(Timestamp::now(), n as f64); 204 | assert_eq!(ts.std(), Some(std_first_n(n as f64))); 205 | } 206 | } 207 | 208 | #[test] 209 | fn std_intervaled() { 210 | let mut ts = TimeSeries::new(); 211 | let start = Timestamp::now(); 212 | 213 | let intervals = 4; 214 | let interval_length = Duration::from_secs(1); 215 | 216 | for n in 1..=10 { 217 | ts.add(start + interval_length * n as u32, n as f64); 218 | } 219 | 220 | let total_avg = ts.std().unwrap(); 221 | assert_eq!(total_avg, std_first_n(10.0)); 222 | 223 | let to = start + interval_length * 10 + Duration::from_millis(1); 224 | let from = instant_minus_intervals(to, intervals, interval_length); 225 | let interval_std = ts.interval_std(from, to).unwrap(); 226 | 227 | // avg = (10 + 9 + 8 + 7) / 4 = 8.5 228 | // dist = 1.5, 0.5, -0.5, 1.5 229 | // sq = 2.25 + 0.25 + 0.25 + 2.25 = 5.0 230 | // div = 5.0 / 4.0 = 1.25 231 | // sqrt = 1.118033988749895 232 | assert_eq!(interval_std, 1.118033988749895); 233 | } 234 | 235 | #[test] 236 | fn trimmed_mean() { 237 | let mut ts = TimeSeries::new(); 238 | let start = Timestamp::now(); 239 | 240 | let intervals = 10; 241 | let interval_length = Duration::from_secs(1); 242 | 243 | // keep the 90th percentile of values; 244 | let percentile = 0.90; 245 | 246 | for n in 1..=20 { 247 | ts.add(start + interval_length * n as u32, n as f64); 248 | } 249 | 250 | // The 90% of 1..=20 == 18. So we should see the average 251 | // of the first 18 natural numbers. 252 | let trimmed_mean = ts.trimmed_mean(percentile).unwrap(); 253 | assert_eq!(trimmed_mean, avg_first_n(18.0)); 254 | 255 | // The 90% of the last 10 intervals, [11, 20] is 19, so the trimmed 256 | // mean should drop the last value, 20. Average of [11, 19] is 14.5 257 | let to = start + interval_length * 20 + Duration::from_millis(1); 258 | let from = instant_minus_intervals(to, intervals, interval_length); 259 | 260 | let trimmed_mean = ts.interval_trimmed_mean(from, to, percentile).unwrap(); 261 | assert_eq!(trimmed_mean, 15.0); 262 | } 263 | } 264 | -------------------------------------------------------------------------------- /crates/nq-tokio-network/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "nq-tokio-network" 3 | version = { workspace = true } 4 | edition = { workspace = true } 5 | repository = { workspace = true } 6 | authors = { workspace = true } 7 | license = { workspace = true } 8 | readme = "../README.md" 9 | 10 | [dependencies] 11 | anyhow = { workspace = true } 12 | http = { workspace = true } 13 | hyper = { workspace = true } 14 | nq-core = { workspace = true } 15 | tokio = { workspace = true, features = ["net"] } 16 | tokio-util = { workspace = true } 17 | tracing = { workspace = true } -------------------------------------------------------------------------------- /crates/nq-tokio-network/src/lib.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2017-2020 Cloudflare, Inc. 2 | // Licensed under the BSD-3-Clause license found in the LICENSE file or at https://opensource.org/licenses/BSD-3-Clause 3 | 4 | use std::fmt::Debug; 5 | use std::net::SocketAddr; 6 | use std::sync::Arc; 7 | use tokio::sync::RwLock; 8 | 9 | use http::{Request, Response}; 10 | use hyper::body::Incoming; 11 | use nq_core::{ 12 | oneshot_result, ConnectionManager, ConnectionTiming, ConnectionType, EstablishedConnection, 13 | Network, NqBody, OneshotResult, ResponseFuture, Time, Timestamp, 14 | }; 15 | 16 | use tokio::net::TcpStream; 17 | use tokio_util::sync::CancellationToken; 18 | use tracing::{error, info, Instrument}; 19 | 20 | #[derive(Debug, Clone)] 21 | pub struct TokioNetwork { 22 | inner: TokioNetworkInner, 23 | } 24 | 25 | impl TokioNetwork { 26 | pub fn new(time: Arc, shutdown: CancellationToken) -> Self { 27 | Self { 28 | inner: TokioNetworkInner::new(time, shutdown), 29 | } 30 | } 31 | } 32 | 33 | impl Network for TokioNetwork { 34 | fn resolve(&self, host: String) -> OneshotResult> { 35 | let (tx, rx) = oneshot_result(); 36 | let time = self.inner.time.clone(); 37 | 38 | tokio::spawn(async move { 39 | match timed_lookup_host(host, time).await { 40 | Ok(addrs) => { 41 | if tx.send(Ok(addrs)).is_err() { 42 | error!("Failed to send resolved addresses"); 43 | } 44 | } 45 | Err(e) => { 46 | if tx.send(Err(e)).is_err() { 47 | error!("Failed to send error"); 48 | } 49 | } 50 | } 51 | }); 52 | 53 | rx 54 | } 55 | 56 | fn new_connection( 57 | &self, 58 | start: Timestamp, 59 | remote_addr: SocketAddr, 60 | domain: String, 61 | conn_type: ConnectionType, 62 | ) -> OneshotResult>> { 63 | let (tx, rx) = oneshot_result(); 64 | 65 | let inner = self.inner.clone(); 66 | tokio::spawn(async move { 67 | let new_connection = inner 68 | .new_connection(start, remote_addr, domain, conn_type) 69 | .await; 70 | 71 | if tx.send(new_connection).is_err() { 72 | error!("unable to create connection"); 73 | } 74 | }); 75 | 76 | rx 77 | } 78 | 79 | #[tracing::instrument(skip(self, request), fields(uri=%request.uri()))] 80 | fn send_request( 81 | &self, 82 | connection: Arc>, 83 | request: Request, 84 | ) -> OneshotResult> { 85 | let (tx, rx) = oneshot_result(); 86 | 87 | let inner = self.inner.clone(); 88 | tokio::spawn( 89 | async move { 90 | info!("sending request"); 91 | 92 | let response_result = match inner.send_request(connection, request).await { 93 | Ok(fut) => fut.await, 94 | Err(error) => { 95 | let _ = tx.send(Err(error)); 96 | return; 97 | } 98 | }; 99 | 100 | let response = match response_result { 101 | Ok(response) => response, 102 | Err(error) => { 103 | let _ = tx.send(Err(error.into())); 104 | return; 105 | } 106 | }; 107 | 108 | info!("sending response future"); 109 | let _ = tx.send(Ok(response)); 110 | } 111 | .in_current_span(), 112 | ); 113 | 114 | rx 115 | } 116 | } 117 | 118 | #[derive(Clone)] 119 | pub struct TokioNetworkInner { 120 | connections: Arc, 121 | time: Arc, 122 | shutdown: CancellationToken, 123 | } 124 | 125 | impl TokioNetworkInner { 126 | pub fn new(time: Arc, shutdown: CancellationToken) -> Self { 127 | let connections: Arc = Default::default(); 128 | 129 | tokio::spawn({ 130 | let connections = Arc::clone(&connections); 131 | let cloned_shutdown = shutdown.clone(); 132 | 133 | async move { 134 | cloned_shutdown.cancelled().await; 135 | info!("shutting down connections"); 136 | connections.shutdown().await; 137 | } 138 | }); 139 | 140 | Self { 141 | connections, 142 | time, 143 | shutdown, 144 | } 145 | } 146 | 147 | async fn new_connection( 148 | &self, 149 | start: Timestamp, 150 | remote_addr: SocketAddr, 151 | domain: String, 152 | conn_type: ConnectionType, 153 | ) -> anyhow::Result>> { 154 | let mut timing = ConnectionTiming::new(start); 155 | 156 | let tcp_stream = TcpStream::connect(remote_addr).await?; 157 | timing.set_connect(self.time.now()); 158 | 159 | tcp_stream.set_nodelay(false).unwrap(); 160 | 161 | let connection = self 162 | .connections 163 | .new_connection( 164 | timing, 165 | remote_addr, 166 | domain, 167 | conn_type, 168 | Box::new(tcp_stream), 169 | &*self.time, 170 | self.shutdown.clone(), 171 | ) 172 | .await?; 173 | 174 | Ok(connection) 175 | } 176 | 177 | #[tracing::instrument(skip(self, request), fields(uri=%request.uri()))] 178 | async fn send_request( 179 | &self, 180 | connection: Arc>, 181 | request: http::Request, 182 | ) -> anyhow::Result { 183 | info!("sending request"); 184 | 185 | let mut conn = connection.write().await; 186 | let response_fut = conn 187 | .send_request(request) 188 | .ok_or_else(|| anyhow::anyhow!("Failed to send request"))?; 189 | 190 | Ok(response_fut) 191 | } 192 | } 193 | 194 | impl Debug for TokioNetworkInner { 195 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 196 | f.debug_struct("TokioNetworkInner") 197 | .field("connections", &self.connections) 198 | .field("time", &"Arc") 199 | .finish() 200 | } 201 | } 202 | 203 | async fn timed_lookup_host(host: String, time: Arc) -> anyhow::Result> { 204 | let dns_start = time.now(); 205 | let addrs = tokio::net::lookup_host(host).await?.collect(); 206 | let dns_end = time.now(); 207 | let dns_duration = dns_end.duration_since(dns_start); 208 | 209 | let mut timing = ConnectionTiming::new(dns_start); 210 | timing.set_dns_lookup(dns_duration); 211 | 212 | Ok(addrs) 213 | } 214 | --------------------------------------------------------------------------------