├── .github ├── actions │ ├── build-docs │ │ └── action.yml │ ├── ci │ │ └── action.yml │ └── publish │ │ └── action.yml └── workflows │ ├── ci.yml │ ├── lint-pr-title.yml │ ├── manual-publish.yml │ ├── release-please.yml │ └── stale.yml ├── .gitignore ├── .release-please-manifest.json ├── CODEOWNERS ├── CONTRIBUTORS ├── Cargo.toml ├── DEVELOPING.md ├── LICENSE.txt ├── Makefile ├── README.md ├── SECURITY.md ├── contract-tests ├── Cargo.toml └── src │ └── bin │ └── sse-test-api │ ├── main.rs │ └── stream_entity.rs ├── coverage.sh ├── eventsource-client ├── CHANGELOG.md ├── Cargo.toml ├── README.md ├── examples │ └── tail.rs ├── src │ ├── client.rs │ ├── config.rs │ ├── error.rs │ ├── event_parser.rs │ ├── lib.rs │ ├── response.rs │ └── retry.rs └── test-data │ ├── .gitattributes │ ├── big-event-followed-by-another-crlf.sse │ ├── big-event-followed-by-another.sse │ ├── one-event-crlf.sse │ ├── one-event.sse │ ├── persistent-event-id.sse │ ├── two-events-crlf.sse │ └── two-events.sse └── release-please-config.json /.github/actions/build-docs/action.yml: -------------------------------------------------------------------------------- 1 | name: Build Documentation 2 | description: 'Build Documentation.' 3 | 4 | runs: 5 | using: composite 6 | steps: 7 | - name: Build Documentation 8 | shell: bash 9 | run: cargo doc --no-deps -p eventsource-client 10 | -------------------------------------------------------------------------------- /.github/actions/ci/action.yml: -------------------------------------------------------------------------------- 1 | name: CI Workflow 2 | description: 'Shared CI workflow.' 3 | 4 | runs: 5 | using: composite 6 | steps: 7 | - name: Check formatting 8 | shell: bash 9 | run: cargo fmt --check 10 | 11 | - name: Run tests 12 | shell: bash 13 | run: cargo test --all-features -p eventsource-client 14 | 15 | - name: Run clippy checks 16 | shell: bash 17 | run: cargo clippy --all-features -p eventsource-client -- -D warnings 18 | 19 | - name: Build contract tests 20 | shell: bash 21 | run: make build-contract-tests 22 | 23 | - name: Start contract test service 24 | shell: bash 25 | run: make start-contract-test-service-bg 26 | 27 | - name: Run contract tests 28 | shell: bash 29 | run: make run-contract-tests 30 | -------------------------------------------------------------------------------- /.github/actions/publish/action.yml: -------------------------------------------------------------------------------- 1 | name: Publish Package 2 | description: 'Publish the package to crates.io' 3 | inputs: 4 | dry_run: 5 | description: 'Is this a dry run. If so no package will be published.' 6 | required: true 7 | 8 | runs: 9 | using: composite 10 | steps: 11 | - name: Publish Library 12 | shell: bash 13 | if: ${{ inputs.dry_run == 'false' }} 14 | run: cargo publish -p eventsource-client 15 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: Run CI 2 | on: 3 | push: 4 | branches: [ main ] 5 | paths-ignore: 6 | - '**.md' # Do not need to run CI for markdown changes. 7 | pull_request: 8 | branches: [ main ] 9 | paths-ignore: 10 | - '**.md' 11 | 12 | jobs: 13 | ci-build: 14 | runs-on: ubuntu-latest 15 | 16 | steps: 17 | - uses: actions/checkout@v4 18 | with: 19 | fetch-depth: 0 # If you only need the current version keep this. 20 | 21 | - name: Setup rust tooling 22 | run: | 23 | rustup override set 1.81 24 | rustup component add rustfmt clippy 25 | 26 | - uses: ./.github/actions/ci 27 | - uses: ./.github/actions/build-docs 28 | -------------------------------------------------------------------------------- /.github/workflows/lint-pr-title.yml: -------------------------------------------------------------------------------- 1 | name: Lint PR title 2 | 3 | on: 4 | pull_request_target: 5 | types: 6 | - opened 7 | - edited 8 | - synchronize 9 | 10 | jobs: 11 | lint-pr-title: 12 | uses: launchdarkly/gh-actions/.github/workflows/lint-pr-title.yml@main 13 | -------------------------------------------------------------------------------- /.github/workflows/manual-publish.yml: -------------------------------------------------------------------------------- 1 | name: Publish Package 2 | on: 3 | workflow_dispatch: 4 | inputs: 5 | dry_run: 6 | description: 'Is this a dry run. If so no package will be published.' 7 | type: boolean 8 | required: true 9 | 10 | jobs: 11 | build-publish: 12 | runs-on: ubuntu-latest 13 | # Needed to get tokens during publishing. 14 | permissions: 15 | id-token: write 16 | contents: read 17 | steps: 18 | - uses: actions/checkout@v4 19 | 20 | - name: Setup rust tooling 21 | run: | 22 | rustup override set 1.81 23 | rustup component add rustfmt clippy 24 | 25 | - uses: ./.github/actions/ci 26 | - uses: ./.github/actions/build-docs 27 | 28 | - uses: launchdarkly/gh-actions/actions/release-secrets@release-secrets-v1.2.0 29 | name: 'Get crates.io token' 30 | with: 31 | aws_assume_role: ${{ vars.AWS_ROLE_ARN }} 32 | ssm_parameter_pairs: '/production/common/releasing/cratesio/api_token = CARGO_REGISTRY_TOKEN' 33 | 34 | - uses: ./.github/actions/publish 35 | with: 36 | dry_run: ${{ inputs.dry_run }} 37 | -------------------------------------------------------------------------------- /.github/workflows/release-please.yml: -------------------------------------------------------------------------------- 1 | name: Run Release Please 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | 8 | jobs: 9 | release-package: 10 | runs-on: ubuntu-latest 11 | permissions: 12 | id-token: write # Needed if using OIDC to get release secrets. 13 | contents: write # Contents and pull-requests are for release-please to make releases. 14 | pull-requests: write 15 | steps: 16 | - uses: googleapis/release-please-action@v4 17 | id: release 18 | 19 | - uses: actions/checkout@v4 20 | if: ${{ steps.release.outputs['eventsource-client--release_created'] == 'true' }} 21 | with: 22 | fetch-depth: 0 # If you only need the current version keep this. 23 | 24 | - name: Setup rust tooling 25 | if: ${{ steps.release.outputs['eventsource-client--release_created'] == 'true' }} 26 | run: | 27 | rustup override set 1.81 28 | rustup component add rustfmt clippy 29 | 30 | - uses: launchdarkly/gh-actions/actions/release-secrets@release-secrets-v1.2.0 31 | name: 'Get crates.io token' 32 | if: ${{ steps.release.outputs['eventsource-client--release_created'] == 'true' }} 33 | with: 34 | aws_assume_role: ${{ vars.AWS_ROLE_ARN }} 35 | ssm_parameter_pairs: '/production/common/releasing/cratesio/api_token = CARGO_REGISTRY_TOKEN' 36 | 37 | - uses: ./.github/actions/ci 38 | if: ${{ steps.release.outputs['eventsource-client--release_created'] == 'true' }} 39 | 40 | - uses: ./.github/actions/build-docs 41 | if: ${{ steps.release.outputs['eventsource-client--release_created'] == 'true' }} 42 | 43 | - uses: ./.github/actions/publish 44 | if: ${{ steps.release.outputs['eventsource-client--release_created'] == 'true' }} 45 | with: 46 | dry_run: false 47 | -------------------------------------------------------------------------------- /.github/workflows/stale.yml: -------------------------------------------------------------------------------- 1 | name: 'Close stale issues and PRs' 2 | on: 3 | workflow_dispatch: 4 | schedule: 5 | # Happen once per day at 1:30 AM 6 | - cron: '30 1 * * *' 7 | 8 | jobs: 9 | sdk-close-stale: 10 | uses: launchdarkly/gh-actions/.github/workflows/sdk-stale.yml@main 11 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | **/*.rs.bk 3 | Cargo.lock 4 | .idea 5 | .DS_Store 6 | -------------------------------------------------------------------------------- /.release-please-manifest.json: -------------------------------------------------------------------------------- 1 | { 2 | "eventsource-client": "0.15.0" 3 | } 4 | -------------------------------------------------------------------------------- /CODEOWNERS: -------------------------------------------------------------------------------- 1 | # Repository Maintainers 2 | * @launchdarkly/team-sdk-rust 3 | -------------------------------------------------------------------------------- /CONTRIBUTORS: -------------------------------------------------------------------------------- 1 | Sam Stokes 2 | Kyle LeNeau 3 | Harpo Roeder 4 | Marcus Weiner 5 | JiggyDown 6 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [workspace] 2 | 3 | members = [ 4 | "contract-tests", 5 | "eventsource-client" 6 | ] 7 | 8 | resolver = "2" 9 | -------------------------------------------------------------------------------- /DEVELOPING.md: -------------------------------------------------------------------------------- 1 | # Guide to developing rust-eventsource-client 2 | 3 | Incomplete. 4 | 5 | ## Get detailed logging 6 | 7 | eventsource-client uses the standard [log crate](https://crates.io/crates/log) for logging. It will log additional detail about the protocol implementation at `trace` level. 8 | 9 | e.g. if using [env_logger](https://crates.io/crates/env_logger) (as the example script does), set `RUST_LOG=eventsource_client=trace`. 10 | -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- 1 | Copyright 2019 Catamorphic, Co. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | TEMP_TEST_OUTPUT=/tmp/contract-test-service.log 2 | 3 | build-contract-tests: 4 | @cargo build 5 | 6 | start-contract-test-service: build-contract-tests 7 | @./target/debug/sse-test-api 8 | 9 | start-contract-test-service-bg: 10 | @echo "Test service output will be captured in $(TEMP_TEST_OUTPUT)" 11 | @make start-contract-test-service >$(TEMP_TEST_OUTPUT) 2>&1 & 12 | 13 | run-contract-tests: 14 | @curl -s https://raw.githubusercontent.com/launchdarkly/sse-contract-tests/v2.0.0/downloader/run.sh \ 15 | | VERSION=v2 PARAMS="-url http://localhost:8080 -debug -stop-service-at-end $(SKIPFLAGS) $(TEST_HARNESS_PARAMS)" sh 16 | 17 | contract-tests: build-contract-tests start-contract-test-service-bg run-contract-tests 18 | 19 | .PHONY: build-contract-tests start-contract-test-service run-contract-tests contract-tests 20 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # eventsource-client 2 | 3 | [![Run CI](https://github.com/launchdarkly/rust-eventsource-client/actions/workflows/ci.yml/badge.svg)](https://github.com/launchdarkly/rust-eventsource-client/actions/workflows/ci.yml) 4 | 5 | Client for the [Server-Sent Events] protocol (aka [EventSource]). 6 | 7 | [Server-Sent Events]: https://html.spec.whatwg.org/multipage/server-sent-events.html 8 | [EventSource]: https://developer.mozilla.org/en-US/docs/Web/API/EventSource 9 | 10 | ## Requirements 11 | 12 | Requires tokio. 13 | 14 | ## Usage 15 | 16 | Example that just prints the type of each event received: 17 | 18 | ```rust 19 | use eventsource_client as es; 20 | 21 | let mut client = es::ClientBuilder::for_url("https://example.com/stream")? 22 | .header("Authorization", "Basic username:password")? 23 | .build(); 24 | 25 | client 26 | .stream() 27 | .map_ok(|event| println!("got event: {:?}", event)) 28 | .map_err(|err| eprintln!("error streaming events: {:?}", err)); 29 | ``` 30 | 31 | (Some boilerplate omitted for clarity; see [examples directory] for complete, 32 | working code.) 33 | 34 | [examples directory]: https://github.com/launchdarkly/rust-eventsource-client/tree/main/eventsource-client/examples 35 | ## Features 36 | 37 | * tokio-based streaming client. 38 | * Supports setting custom headers on the HTTP request (e.g. for endpoints 39 | requiring authorization). 40 | * Retry for failed connections. 41 | * Reconnection if connection is interrupted, with exponential backoff. 42 | 43 | ## Stability 44 | 45 | Early stage release for feedback purposes. May contain bugs or performance 46 | issues. API subject to change. 47 | 48 | ## Minimum Supported Rust Version 49 | 50 | This project aims to maintain compatibility with a Rust version that is at least six months old. 51 | 52 | Version updates may occur more frequently than the policy guideline states if external forces require it. For example, a CVE in a downstream dependency requiring an MSRV bump would be considered an acceptable reason to violate the six month guideline. 53 | -------------------------------------------------------------------------------- /SECURITY.md: -------------------------------------------------------------------------------- 1 | # Reporting and Fixing Security Issues 2 | 3 | Please report all security issues to the LaunchDarkly security team by submitting a bug bounty report to our [HackerOne program](https://hackerone.com/launchdarkly?type=team). LaunchDarkly will triage and address all valid security issues following the response targets defined in our program policy. Valid security issues may be eligible for a bounty. 4 | 5 | Please do not open issues or pull requests for security issues. This makes the problem immediately visible to everyone, including potentially malicious actors. 6 | -------------------------------------------------------------------------------- /contract-tests/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "contract-tests" 3 | version = "0.1.0" 4 | edition = "2021" 5 | license = "Apache-2.0" 6 | 7 | [dependencies] 8 | futures = { version = "0.3.21" } 9 | serde = { version = "1.0", features = ["derive"] } 10 | eventsource-client = { path = "../eventsource-client" } 11 | serde_json = { version = "1.0.39"} 12 | actix = { version = "0.13.1"} 13 | actix-web = { version = "4"} 14 | reqwest = { version = "0.11.6", default-features = false, features = ["json", "rustls-tls"] } 15 | env_logger = { version = "0.10.0" } 16 | hyper = { version = "0.14.19", features = ["client", "http1", "tcp"] } 17 | log = "0.4.6" 18 | 19 | [[bin]] 20 | name = "sse-test-api" 21 | -------------------------------------------------------------------------------- /contract-tests/src/bin/sse-test-api/main.rs: -------------------------------------------------------------------------------- 1 | mod stream_entity; 2 | 3 | use actix_web::{guard, web, App, HttpRequest, HttpResponse, HttpServer, Responder}; 4 | use eventsource_client as es; 5 | use futures::executor; 6 | use serde::{self, Deserialize, Serialize}; 7 | use std::collections::HashMap; 8 | use std::sync::{mpsc, Mutex}; 9 | use std::thread; 10 | use stream_entity::StreamEntity; 11 | 12 | #[derive(Serialize)] 13 | struct Status { 14 | capabilities: Vec, 15 | } 16 | 17 | #[derive(Deserialize, Debug)] 18 | #[serde(rename_all = "camelCase")] 19 | struct Config { 20 | /// The URL of an SSE endpoint created by the test harness. 21 | stream_url: String, 22 | /// The URL of a callback endpoint created by the test harness . 23 | callback_url: String, 24 | /// An optional integer specifying the initial reconnection delay parameter, in 25 | /// milliseconds. Not all SSE client implementations allow this to be configured, but the 26 | /// test harness will send a value anyway in an attempt to avoid having reconnection tests 27 | /// run unnecessarily slowly. 28 | initial_delay_ms: Option, 29 | /// A JSON object containing additional HTTP header names and string values. The SSE 30 | /// client should be configured to add these headers to its HTTP requests; the test harness 31 | /// will then verify that it receives those headers. The test harness will only set this 32 | /// property if the test service has the "headers" capability. Header names can be assumed 33 | /// to all be lowercase. 34 | headers: Option>, 35 | /// An optional integer specifying the read timeout for the connection, in 36 | /// milliseconds. 37 | read_timeout_ms: Option, 38 | /// An optional string which should be sent as the Last-Event-Id header in the initial 39 | /// HTTP request. The test harness will only set this property if the test service has the 40 | /// "last-event-id" capability. 41 | last_event_id: Option, 42 | /// A string specifying an HTTP method to use instead of GET. The test harness will only 43 | /// set this property if the test service has the "post" or "report" capability. 44 | method: Option, 45 | /// A string specifying data to be sent in the HTTP request body. The test harness will 46 | /// only set this property if the test service has the "post" or "report" capability. 47 | body: Option, 48 | } 49 | 50 | #[derive(Serialize, Debug)] 51 | #[serde(tag = "kind", rename_all = "camelCase")] 52 | enum EventType { 53 | Connected {}, 54 | Event { event: Event }, 55 | Comment { comment: String }, 56 | Error { error: String }, 57 | } 58 | 59 | impl From for EventType { 60 | fn from(event: es::SSE) -> Self { 61 | match event { 62 | es::SSE::Connected(_) => Self::Connected {}, 63 | es::SSE::Event(evt) => Self::Event { 64 | event: Event { 65 | event_type: evt.event_type, 66 | data: evt.data, 67 | id: evt.id, 68 | }, 69 | }, 70 | es::SSE::Comment(comment) => Self::Comment { comment }, 71 | } 72 | } 73 | } 74 | 75 | #[derive(Serialize, Debug)] 76 | struct Event { 77 | #[serde(rename = "type")] 78 | event_type: String, 79 | data: String, 80 | id: Option, 81 | } 82 | 83 | async fn status() -> impl Responder { 84 | web::Json(Status { 85 | capabilities: vec![ 86 | "comments".to_string(), 87 | "headers".to_string(), 88 | "last-event-id".to_string(), 89 | "read-timeout".to_string(), 90 | ], 91 | }) 92 | } 93 | 94 | async fn stream( 95 | req: HttpRequest, 96 | config: web::Json, 97 | app_state: web::Data, 98 | ) -> HttpResponse { 99 | let mut stream_entity = match StreamEntity::new(config.into_inner()) { 100 | Ok(se) => se, 101 | Err(e) => return HttpResponse::InternalServerError().body(e), 102 | }; 103 | 104 | let mut counter = match app_state.counter.lock() { 105 | Ok(c) => c, 106 | Err(_) => return HttpResponse::InternalServerError().body("Unable to retrieve counter"), 107 | }; 108 | 109 | let mut entities = match app_state.stream_entities.lock() { 110 | Ok(h) => h, 111 | Err(_) => return HttpResponse::InternalServerError().body("Unable to retrieve handles"), 112 | }; 113 | 114 | let stream_resource = match req.url_for("stop_stream", [counter.to_string()]) { 115 | Ok(sr) => sr, 116 | Err(_) => { 117 | return HttpResponse::InternalServerError() 118 | .body("Unable to generate stream response URL") 119 | } 120 | }; 121 | 122 | *counter += 1; 123 | stream_entity.start(); 124 | entities.insert(*counter, stream_entity); 125 | 126 | let mut response = HttpResponse::Ok(); 127 | response.insert_header(("Location", stream_resource.to_string())); 128 | response.finish() 129 | } 130 | 131 | async fn shutdown(stopper: web::Data>) -> HttpResponse { 132 | match stopper.send(()) { 133 | Ok(_) => HttpResponse::NoContent().finish(), 134 | Err(_) => HttpResponse::InternalServerError().body("Unable to send shutdown signal"), 135 | } 136 | } 137 | 138 | async fn stop_stream(req: HttpRequest, app_state: web::Data) -> HttpResponse { 139 | if let Some(stream_id) = req.match_info().get("id") { 140 | let stream_id: u32 = match stream_id.parse() { 141 | Ok(id) => id, 142 | Err(_) => return HttpResponse::BadRequest().body("Unable to parse stream id"), 143 | }; 144 | 145 | match app_state.stream_entities.lock() { 146 | Ok(mut entities) => { 147 | if let Some(mut entity) = entities.remove(&stream_id) { 148 | entity.stop(); 149 | } 150 | } 151 | Err(_) => { 152 | return HttpResponse::InternalServerError().body("Unable to retrieve handles") 153 | } 154 | }; 155 | 156 | HttpResponse::NoContent().finish() 157 | } else { 158 | HttpResponse::BadRequest().body("No stream id was provided in the URL") 159 | } 160 | } 161 | 162 | struct AppState { 163 | counter: Mutex, 164 | stream_entities: Mutex>, 165 | } 166 | 167 | #[actix_web::main] 168 | async fn main() -> std::io::Result<()> { 169 | env_logger::init(); 170 | 171 | let (tx, rx) = mpsc::channel::<()>(); 172 | 173 | let state = web::Data::new(AppState { 174 | counter: Mutex::new(0), 175 | stream_entities: Mutex::new(HashMap::new()), 176 | }); 177 | 178 | let server = HttpServer::new(move || { 179 | App::new() 180 | .app_data(web::Data::new(tx.clone())) 181 | .app_data(state.clone()) 182 | .route("/", web::get().to(status)) 183 | .route("/", web::post().to(stream)) 184 | .route("/", web::delete().to(shutdown)) 185 | .service( 186 | web::resource("/stream/{id}") 187 | .name("stop_stream") 188 | .guard(guard::Delete()) 189 | .to(stop_stream), 190 | ) 191 | }) 192 | .bind("127.0.0.1:8080")? 193 | .run(); 194 | 195 | let handle = server.handle(); 196 | 197 | thread::spawn(move || { 198 | // wait for shutdown signal 199 | if let Ok(()) = rx.recv() { 200 | executor::block_on(handle.stop(true)) 201 | } 202 | }); 203 | 204 | // run server 205 | server.await 206 | } 207 | -------------------------------------------------------------------------------- /contract-tests/src/bin/sse-test-api/stream_entity.rs: -------------------------------------------------------------------------------- 1 | use actix_web::rt::task::JoinHandle; 2 | use futures::TryStreamExt; 3 | use log::error; 4 | use std::{ 5 | sync::{Arc, Mutex}, 6 | time::Duration, 7 | }; 8 | 9 | use eventsource_client as es; 10 | 11 | use crate::{Config, EventType}; 12 | 13 | pub(crate) struct Inner { 14 | callback_counter: Mutex, 15 | callback_url: String, 16 | client: Box, 17 | } 18 | 19 | impl Inner { 20 | pub(crate) fn new(config: Config) -> Result { 21 | let client = Inner::build_client(&config)?; 22 | 23 | Ok(Self { 24 | callback_counter: Mutex::new(0), 25 | callback_url: config.callback_url, 26 | client, 27 | }) 28 | } 29 | 30 | pub(crate) async fn start(&self) { 31 | let mut stream = self.client.stream(); 32 | 33 | let client = reqwest::Client::new(); 34 | 35 | loop { 36 | match stream.try_next().await { 37 | Ok(Some(event)) => { 38 | let event_type: EventType = event.into(); 39 | if matches!(event_type, EventType::Connected { .. }) { 40 | continue; 41 | } 42 | 43 | if !self.send_message(event_type, &client).await { 44 | break; 45 | } 46 | } 47 | Ok(None) => continue, 48 | Err(e) => { 49 | let failure = EventType::Error { 50 | error: format!("Error: {:?}", e), 51 | }; 52 | 53 | if !self.send_message(failure, &client).await { 54 | break; 55 | } 56 | 57 | match e { 58 | es::Error::StreamClosed => break, 59 | _ => continue, 60 | } 61 | } 62 | }; 63 | } 64 | } 65 | 66 | async fn send_message(&self, event_type: EventType, client: &reqwest::Client) -> bool { 67 | let json = match serde_json::to_string(&event_type) { 68 | Ok(s) => s, 69 | Err(e) => { 70 | error!("Failed to json encode event type {:?}", e); 71 | return false; 72 | } 73 | }; 74 | 75 | // send_message is only invoked via the event loop, so this access and following 76 | // update will be serialized. The usage of a mutex is for the interior mutability. 77 | let counter_val = *self.callback_counter.lock().unwrap(); 78 | 79 | match client 80 | .post(format!("{}/{}", self.callback_url, counter_val)) 81 | .body(format!("{}\n", json)) 82 | .send() 83 | .await 84 | { 85 | Ok(_) => { 86 | let mut counter = self.callback_counter.lock().unwrap(); 87 | *counter = counter_val + 1 88 | } 89 | Err(e) => { 90 | error!("Failed to send post back to test harness {:?}", e); 91 | return false; 92 | } 93 | }; 94 | 95 | true 96 | } 97 | 98 | fn build_client(config: &Config) -> Result, String> { 99 | let mut client_builder = match es::ClientBuilder::for_url(&config.stream_url) { 100 | Ok(cb) => cb, 101 | Err(e) => return Err(format!("Failed to create client builder {:?}", e)), 102 | }; 103 | 104 | let mut reconnect_options = es::ReconnectOptions::reconnect(true); 105 | 106 | if let Some(delay_ms) = config.initial_delay_ms { 107 | reconnect_options = reconnect_options.delay(Duration::from_millis(delay_ms)); 108 | } 109 | 110 | if let Some(read_timeout_ms) = config.read_timeout_ms { 111 | client_builder = client_builder.read_timeout(Duration::from_millis(read_timeout_ms)); 112 | } 113 | 114 | if let Some(last_event_id) = &config.last_event_id { 115 | client_builder = client_builder.last_event_id(last_event_id.clone()); 116 | } 117 | 118 | if let Some(method) = &config.method { 119 | client_builder = client_builder.method(method.to_string()); 120 | } 121 | 122 | if let Some(body) = &config.body { 123 | client_builder = client_builder.body(body.to_string()); 124 | } 125 | 126 | if let Some(headers) = &config.headers { 127 | for (name, value) in headers { 128 | client_builder = match client_builder.header(name, value) { 129 | Ok(cb) => cb, 130 | Err(e) => return Err(format!("Unable to set header {:?}", e)), 131 | }; 132 | } 133 | } 134 | 135 | Ok(Box::new( 136 | client_builder.reconnect(reconnect_options.build()).build(), 137 | )) 138 | } 139 | } 140 | 141 | pub(crate) struct StreamEntity { 142 | inner: Arc, 143 | handle: Option>, 144 | } 145 | 146 | impl StreamEntity { 147 | pub(crate) fn new(config: Config) -> Result { 148 | let inner = Inner::new(config)?; 149 | 150 | Ok(Self { 151 | inner: Arc::new(inner), 152 | handle: None, 153 | }) 154 | } 155 | 156 | pub(crate) fn start(&mut self) { 157 | let inner = self.inner.clone(); 158 | 159 | self.handle = Some(actix_web::rt::spawn(async move { 160 | inner.start().await; 161 | })); 162 | } 163 | 164 | pub(crate) fn stop(&mut self) { 165 | if let Some(handle) = &self.handle { 166 | handle.abort(); 167 | } 168 | } 169 | } 170 | -------------------------------------------------------------------------------- /coverage.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -o errexit 4 | set -o pipefail 5 | 6 | rustup component add llvm-tools-preview 7 | cargo install cargo-llvm-cov 8 | 9 | # generate coverage report to command line by default; otherwise allow 10 | # CI to pass in '--html' (or other formats). 11 | 12 | if [ -n "$1" ]; then 13 | cargo llvm-cov --all-features --workspace "$1" 14 | else 15 | cargo llvm-cov --all-features --workspace 16 | fi 17 | -------------------------------------------------------------------------------- /eventsource-client/CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Change log 2 | 3 | All notable changes to the project will be documented in this file. This project adheres to [Semantic Versioning](http://semver.org). 4 | 5 | ## [0.15.0](https://github.com/launchdarkly/rust-eventsource-client/compare/0.14.0...0.15.0) (2025-03-27) 6 | 7 | 8 | ### Features 9 | 10 | * Add connect and write timeout configurations options ([#93](https://github.com/launchdarkly/rust-eventsource-client/issues/93)) ([cfb9321](https://github.com/launchdarkly/rust-eventsource-client/commit/cfb9321633f71f95116a61fe4d7a14c0aca4f982)) 11 | 12 | 13 | ### Bug Fixes 14 | 15 | * Bump MSRV to 1.81 ([#91](https://github.com/launchdarkly/rust-eventsource-client/issues/91)) ([3b63d64](https://github.com/launchdarkly/rust-eventsource-client/commit/3b63d644b1d9475ed7ec69bb3f386badb77abc63)) 16 | 17 | ## [0.14.0](https://github.com/launchdarkly/rust-eventsource-client/compare/0.13.0...0.14.0) (2025-02-24) 18 | 19 | 20 | ### Features 21 | 22 | * Add `basic_auth` method to `ClientBuilder` ([#88](https://github.com/launchdarkly/rust-eventsource-client/issues/88)) ([526b03f](https://github.com/launchdarkly/rust-eventsource-client/commit/526b03ffeb5b0ce7f42c531b1b579e2f9a4e9662)) 23 | 24 | ## [0.13.0](https://github.com/launchdarkly/rust-eventsource-client/compare/0.12.2...0.13.0) (2024-07-30) 25 | 26 | 27 | ### Features 28 | 29 | * Emit `SSE::Connected` event when stream is established ([#79](https://github.com/launchdarkly/rust-eventsource-client/issues/79)) ([791faf4](https://github.com/launchdarkly/rust-eventsource-client/commit/791faf4f2cda2165cf9df50a181344979d43429c)) 30 | * Update `Error::UnexpectedResponse` to include failed connection details ([#79](https://github.com/launchdarkly/rust-eventsource-client/issues/79)) ([791faf4](https://github.com/launchdarkly/rust-eventsource-client/commit/791faf4f2cda2165cf9df50a181344979d43429c)) 31 | 32 | ## [0.12.2](https://github.com/launchdarkly/rust-eventsource-client/compare/0.12.1...0.12.2) (2023-12-20) 33 | 34 | 35 | ### Bug Fixes 36 | 37 | * **deps:** Bump hyper to fix CVE-2022-31394 ([#72](https://github.com/launchdarkly/rust-eventsource-client/issues/72)) ([48d9555](https://github.com/launchdarkly/rust-eventsource-client/commit/48d955541dc29695a81b2535dafd7dec2fdb59d8)) 38 | 39 | ## [0.12.1](https://github.com/launchdarkly/rust-eventsource-client/compare/0.12.0...0.12.1) (2023-12-12) 40 | 41 | 42 | ### Bug Fixes 43 | 44 | * logify could panic if truncating mid-code point ([#70](https://github.com/launchdarkly/rust-eventsource-client/issues/70)) ([37316c4](https://github.com/launchdarkly/rust-eventsource-client/commit/37316c4f0e8c015db118dc1d082281838e88e522)) 45 | 46 | ## [0.12.0](https://github.com/launchdarkly/rust-eventsource-client/compare/0.11.0...0.12.0) (2023-11-15) 47 | 48 | 49 | ### ⚠ BREAKING CHANGES 50 | 51 | * Remove re-export of hyper_rustls types ([#59](https://github.com/launchdarkly/rust-eventsource-client/issues/59)) 52 | * Bump dependencies ([#58](https://github.com/launchdarkly/rust-eventsource-client/issues/58)) 53 | 54 | ### deps 55 | 56 | * Bump dependencies ([#58](https://github.com/launchdarkly/rust-eventsource-client/issues/58)) ([a7174e3](https://github.com/launchdarkly/rust-eventsource-client/commit/a7174e328f168af0a96f8c9671453a29c028d0f0)) 57 | 58 | 59 | ### Features 60 | 61 | * make Error implement std::fmt::Display, std::error::Error` and Sync ([#47](https://github.com/launchdarkly/rust-eventsource-client/issues/47)) ([0eaab6e](https://github.com/launchdarkly/rust-eventsource-client/commit/0eaab6eefb8d69aac01ded4ab53c527c84084ba6)) 62 | 63 | 64 | ### Bug Fixes 65 | 66 | * Remove re-export of hyper_rustls types ([#59](https://github.com/launchdarkly/rust-eventsource-client/issues/59)) ([ec24970](https://github.com/launchdarkly/rust-eventsource-client/commit/ec24970d4a9ed875a44fb9c84c67b587d46ca23d)) 67 | 68 | ## [0.11.0] - 2022-11-07 69 | ### Fixed: 70 | - Add missing retry interval reset behavior. 71 | - Add missing jitter to retry strategy. 72 | 73 | ## [0.10.2] - 2022-10-28 74 | ### Fixed: 75 | - Correctly handle comment payloads. 76 | 77 | ## [0.10.1] - 2022-04-14 78 | ### Fixed: 79 | - Comment events were incorrectly consuming non-comment event data. Now comment events are emitted as they are parsed and can no longer affect non-comment event data. 80 | 81 | ## [0.10.0] - 2022-03-23 82 | ### Added: 83 | - Added support for following 301 & 307 redirects with configurable redirect limit. 84 | 85 | ### Fixed: 86 | - Fixed `Last-Event-ID` handling when server sends explicit empty ID. 87 | 88 | ## [0.9.0] - 2022-03-15 89 | ### Added: 90 | - Added support for SSE test harness. 91 | 92 | ### Changed: 93 | - Change `ClientBuilder` to return `impl Client`, where `Client` is a sealed trait that exposes a `stream()` method. 94 | 95 | ### Fixed: 96 | - Fixed various bugs related to SSE protocol. 97 | 98 | ## [0.8.2] - 2022-02-03 99 | ### Added: 100 | - Support for creating an event source client with a pre-configured connection. 101 | 102 | ## [0.8.1] - 2022-01-19 103 | ### Changed: 104 | - Added missing changelog 105 | - Fixed keyword for crates.io publishing 106 | 107 | ## [0.8.0] - 2022-01-19 108 | ### Changed: 109 | - Introduced new Error variant 110 | -------------------------------------------------------------------------------- /eventsource-client/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "eventsource-client" 3 | version = "0.15.0" 4 | description = "Client for the Server-Sent Events protocol (aka EventSource)" 5 | repository = "https://github.com/launchdarkly/rust-eventsource-client" 6 | authors = ["LaunchDarkly"] 7 | edition = "2021" 8 | rust-version = "1.81.0" 9 | license = "Apache-2.0" 10 | keywords = ["launchdarkly", "feature-flags", "feature-toggles", "eventsource", "server-sent-events"] 11 | exclude = ["CHANGELOG.md"] 12 | 13 | [dependencies] 14 | futures = "0.3.21" 15 | hyper = { version = "0.14.19", features = ["client", "http1", "tcp"] } 16 | hyper-rustls = { version = "0.24.1", optional = true } 17 | log = "0.4.6" 18 | pin-project = "1.0.10" 19 | tokio = { version = "1.17.0", features = ["time"] } 20 | hyper-timeout = "0.4.1" 21 | rand = "0.8.5" 22 | base64 = "0.22.1" 23 | 24 | [dev-dependencies] 25 | env_logger = "0.10.0" 26 | maplit = "1.0.1" 27 | simplelog = "0.12.1" 28 | tokio = { version = "1.2.0", features = ["macros", "rt-multi-thread"] } 29 | test-case = "3.2.1" 30 | proptest = "1.0.0" 31 | 32 | 33 | [features] 34 | default = ["rustls"] 35 | rustls = ["hyper-rustls", "hyper-rustls/http2"] 36 | 37 | [[example]] 38 | name = "tail" 39 | required-features = ["rustls"] 40 | -------------------------------------------------------------------------------- /eventsource-client/README.md: -------------------------------------------------------------------------------- 1 | # eventsource-client 2 | 3 | [![Run CI](https://github.com/launchdarkly/rust-eventsource-client/actions/workflows/ci.yml/badge.svg)](https://github.com/launchdarkly/rust-eventsource-client/actions/workflows/ci.yml) 4 | 5 | Client for the [Server-Sent Events] protocol (aka [EventSource]). 6 | 7 | [Server-Sent Events]: https://html.spec.whatwg.org/multipage/server-sent-events.html 8 | [EventSource]: https://developer.mozilla.org/en-US/docs/Web/API/EventSource 9 | 10 | ## Requirements 11 | 12 | Requires tokio. 13 | 14 | ## Usage 15 | 16 | Example that just prints the type of each event received: 17 | 18 | ```rust 19 | use eventsource_client as es; 20 | 21 | let mut client = es::ClientBuilder::for_url("https://example.com/stream")? 22 | .header("Authorization", "Basic username:password")? 23 | .build(); 24 | 25 | client 26 | .stream() 27 | .map_ok(|event| println!("got event: {:?}", event)) 28 | .map_err(|err| eprintln!("error streaming events: {:?}", err)); 29 | ``` 30 | 31 | (Some boilerplate omitted for clarity; see [examples directory] for complete, 32 | working code.) 33 | 34 | [examples directory]: https://github.com/launchdarkly/rust-eventsource-client/tree/main/eventsource-client/examples 35 | ## Features 36 | 37 | * tokio-based streaming client. 38 | * Supports setting custom headers on the HTTP request (e.g. for endpoints 39 | requiring authorization). 40 | * Retry for failed connections. 41 | * Reconnection if connection is interrupted, with exponential backoff. 42 | 43 | ## Stability 44 | 45 | Early stage release for feedback purposes. May contain bugs or performance 46 | issues. API subject to change. 47 | -------------------------------------------------------------------------------- /eventsource-client/examples/tail.rs: -------------------------------------------------------------------------------- 1 | use futures::{Stream, TryStreamExt}; 2 | use std::{env, process, time::Duration}; 3 | 4 | use eventsource_client as es; 5 | 6 | #[tokio::main] 7 | async fn main() -> Result<(), es::Error> { 8 | env_logger::init(); 9 | 10 | let args: Vec = env::args().collect(); 11 | 12 | if args.len() != 3 { 13 | eprintln!("Please pass args: "); 14 | process::exit(1); 15 | } 16 | 17 | let url = &args[1]; 18 | let auth_header = &args[2]; 19 | 20 | let client = es::ClientBuilder::for_url(url)? 21 | .header("Authorization", auth_header)? 22 | .reconnect( 23 | es::ReconnectOptions::reconnect(true) 24 | .retry_initial(false) 25 | .delay(Duration::from_secs(1)) 26 | .backoff_factor(2) 27 | .delay_max(Duration::from_secs(60)) 28 | .build(), 29 | ) 30 | .build(); 31 | 32 | let mut stream = tail_events(client); 33 | 34 | while let Ok(Some(_)) = stream.try_next().await {} 35 | 36 | Ok(()) 37 | } 38 | 39 | fn tail_events(client: impl es::Client) -> impl Stream> { 40 | client 41 | .stream() 42 | .map_ok(|event| match event { 43 | es::SSE::Connected(connection) => { 44 | println!("got connected: \nstatus={}", connection.response().status()) 45 | } 46 | es::SSE::Event(ev) => { 47 | println!("got an event: {}\n{}", ev.event_type, ev.data) 48 | } 49 | es::SSE::Comment(comment) => { 50 | println!("got a comment: \n{}", comment) 51 | } 52 | }) 53 | .map_err(|err| eprintln!("error streaming events: {:?}", err)) 54 | } 55 | -------------------------------------------------------------------------------- /eventsource-client/src/client.rs: -------------------------------------------------------------------------------- 1 | use base64::prelude::*; 2 | 3 | use futures::{ready, Stream}; 4 | use hyper::{ 5 | body::HttpBody, 6 | client::{ 7 | connect::{Connect, Connection}, 8 | ResponseFuture, 9 | }, 10 | header::{HeaderMap, HeaderName, HeaderValue}, 11 | service::Service, 12 | Body, Request, Uri, 13 | }; 14 | use log::{debug, info, trace, warn}; 15 | use pin_project::pin_project; 16 | use std::{ 17 | boxed, 18 | fmt::{self, Debug, Formatter}, 19 | future::Future, 20 | io::ErrorKind, 21 | pin::Pin, 22 | str::FromStr, 23 | task::{Context, Poll}, 24 | time::{Duration, Instant}, 25 | }; 26 | 27 | use tokio::{ 28 | io::{AsyncRead, AsyncWrite}, 29 | time::Sleep, 30 | }; 31 | 32 | use crate::{ 33 | config::ReconnectOptions, 34 | response::{ErrorBody, Response}, 35 | }; 36 | use crate::{ 37 | error::{Error, Result}, 38 | event_parser::ConnectionDetails, 39 | }; 40 | 41 | use hyper::client::HttpConnector; 42 | use hyper_timeout::TimeoutConnector; 43 | 44 | use crate::event_parser::EventParser; 45 | use crate::event_parser::SSE; 46 | 47 | use crate::retry::{BackoffRetry, RetryStrategy}; 48 | use std::error::Error as StdError; 49 | 50 | #[cfg(feature = "rustls")] 51 | use hyper_rustls::HttpsConnectorBuilder; 52 | 53 | type BoxError = Box; 54 | 55 | /// Represents a [`Pin`]'d [`Send`] + [`Sync`] stream, returned by [`Client`]'s stream method. 56 | pub type BoxStream = Pin + Send + Sync>>; 57 | 58 | /// Client is the Server-Sent-Events interface. 59 | /// This trait is sealed and cannot be implemented for types outside this crate. 60 | pub trait Client: Send + Sync + private::Sealed { 61 | fn stream(&self) -> BoxStream>; 62 | } 63 | 64 | /* 65 | * TODO remove debug output 66 | * TODO specify list of stati to not retry (e.g. 204) 67 | */ 68 | 69 | /// Maximum amount of redirects that the client will follow before 70 | /// giving up, if not overridden via [ClientBuilder::redirect_limit]. 71 | pub const DEFAULT_REDIRECT_LIMIT: u32 = 16; 72 | 73 | /// ClientBuilder provides a series of builder methods to easily construct a [`Client`]. 74 | pub struct ClientBuilder { 75 | url: Uri, 76 | headers: HeaderMap, 77 | reconnect_opts: ReconnectOptions, 78 | connect_timeout: Option, 79 | read_timeout: Option, 80 | write_timeout: Option, 81 | last_event_id: Option, 82 | method: String, 83 | body: Option, 84 | max_redirects: Option, 85 | } 86 | 87 | impl ClientBuilder { 88 | /// Create a builder for a given URL. 89 | pub fn for_url(url: &str) -> Result { 90 | let url = url 91 | .parse() 92 | .map_err(|e| Error::InvalidParameter(Box::new(e)))?; 93 | 94 | let mut header_map = HeaderMap::new(); 95 | header_map.insert("Accept", HeaderValue::from_static("text/event-stream")); 96 | header_map.insert("Cache-Control", HeaderValue::from_static("no-cache")); 97 | 98 | Ok(ClientBuilder { 99 | url, 100 | headers: header_map, 101 | reconnect_opts: ReconnectOptions::default(), 102 | connect_timeout: None, 103 | read_timeout: None, 104 | write_timeout: None, 105 | last_event_id: None, 106 | method: String::from("GET"), 107 | max_redirects: None, 108 | body: None, 109 | }) 110 | } 111 | 112 | /// Set the request method used for the initial connection to the SSE endpoint. 113 | pub fn method(mut self, method: String) -> ClientBuilder { 114 | self.method = method; 115 | self 116 | } 117 | 118 | /// Set the request body used for the initial connection to the SSE endpoint. 119 | pub fn body(mut self, body: String) -> ClientBuilder { 120 | self.body = Some(body); 121 | self 122 | } 123 | 124 | /// Set the last event id for a stream when it is created. If it is set, it will be sent to the 125 | /// server in case it can replay missed events. 126 | pub fn last_event_id(mut self, last_event_id: String) -> ClientBuilder { 127 | self.last_event_id = Some(last_event_id); 128 | self 129 | } 130 | 131 | /// Set a HTTP header on the SSE request. 132 | pub fn header(mut self, name: &str, value: &str) -> Result { 133 | let name = HeaderName::from_str(name).map_err(|e| Error::InvalidParameter(Box::new(e)))?; 134 | 135 | let value = 136 | HeaderValue::from_str(value).map_err(|e| Error::InvalidParameter(Box::new(e)))?; 137 | 138 | self.headers.insert(name, value); 139 | Ok(self) 140 | } 141 | 142 | /// Set the Authorization header with the calculated basic authentication value. 143 | pub fn basic_auth(self, username: &str, password: &str) -> Result { 144 | let auth = format!("{}:{}", username, password); 145 | let encoded = BASE64_STANDARD.encode(auth); 146 | let value = format!("Basic {}", encoded); 147 | 148 | self.header("Authorization", &value) 149 | } 150 | 151 | /// Set a connect timeout for the underlying connection. There is no connect timeout by 152 | /// default. 153 | pub fn connect_timeout(mut self, connect_timeout: Duration) -> ClientBuilder { 154 | self.connect_timeout = Some(connect_timeout); 155 | self 156 | } 157 | 158 | /// Set a read timeout for the underlying connection. There is no read timeout by default. 159 | pub fn read_timeout(mut self, read_timeout: Duration) -> ClientBuilder { 160 | self.read_timeout = Some(read_timeout); 161 | self 162 | } 163 | 164 | /// Set a write timeout for the underlying connection. There is no write timeout by default. 165 | pub fn write_timeout(mut self, write_timeout: Duration) -> ClientBuilder { 166 | self.write_timeout = Some(write_timeout); 167 | self 168 | } 169 | 170 | /// Configure the client's reconnect behaviour according to the supplied 171 | /// [`ReconnectOptions`]. 172 | /// 173 | /// [`ReconnectOptions`]: struct.ReconnectOptions.html 174 | pub fn reconnect(mut self, opts: ReconnectOptions) -> ClientBuilder { 175 | self.reconnect_opts = opts; 176 | self 177 | } 178 | 179 | /// Customize the client's following behavior when served a redirect. 180 | /// To disable following redirects, pass `0`. 181 | /// By default, the limit is [`DEFAULT_REDIRECT_LIMIT`]. 182 | pub fn redirect_limit(mut self, limit: u32) -> ClientBuilder { 183 | self.max_redirects = Some(limit); 184 | self 185 | } 186 | 187 | /// Build with a specific client connector. 188 | pub fn build_with_conn(self, conn: C) -> impl Client 189 | where 190 | C: Service + Clone + Send + Sync + 'static, 191 | C::Response: Connection + AsyncRead + AsyncWrite + Send + Unpin, 192 | C::Future: Send + 'static, 193 | C::Error: Into, 194 | { 195 | let mut connector = TimeoutConnector::new(conn); 196 | connector.set_connect_timeout(self.connect_timeout); 197 | connector.set_read_timeout(self.read_timeout); 198 | connector.set_write_timeout(self.write_timeout); 199 | 200 | let client = hyper::Client::builder().build::<_, hyper::Body>(connector); 201 | 202 | ClientImpl { 203 | http: client, 204 | request_props: RequestProps { 205 | url: self.url, 206 | headers: self.headers, 207 | method: self.method, 208 | body: self.body, 209 | reconnect_opts: self.reconnect_opts, 210 | max_redirects: self.max_redirects.unwrap_or(DEFAULT_REDIRECT_LIMIT), 211 | }, 212 | last_event_id: self.last_event_id, 213 | } 214 | } 215 | 216 | /// Build with an HTTP client connector. 217 | pub fn build_http(self) -> impl Client { 218 | self.build_with_conn(HttpConnector::new()) 219 | } 220 | 221 | #[cfg(feature = "rustls")] 222 | /// Build with an HTTPS client connector, using the OS root certificate store. 223 | pub fn build(self) -> impl Client { 224 | let conn = HttpsConnectorBuilder::new() 225 | .with_native_roots() 226 | .https_or_http() 227 | .enable_http1() 228 | .enable_http2() 229 | .build(); 230 | 231 | self.build_with_conn(conn) 232 | } 233 | 234 | /// Build with the given [`hyper::client::Client`]. 235 | pub fn build_with_http_client(self, http: hyper::Client) -> impl Client 236 | where 237 | C: Connect + Clone + Send + Sync + 'static, 238 | { 239 | ClientImpl { 240 | http, 241 | request_props: RequestProps { 242 | url: self.url, 243 | headers: self.headers, 244 | method: self.method, 245 | body: self.body, 246 | reconnect_opts: self.reconnect_opts, 247 | max_redirects: self.max_redirects.unwrap_or(DEFAULT_REDIRECT_LIMIT), 248 | }, 249 | last_event_id: self.last_event_id, 250 | } 251 | } 252 | } 253 | 254 | #[derive(Clone)] 255 | struct RequestProps { 256 | url: Uri, 257 | headers: HeaderMap, 258 | method: String, 259 | body: Option, 260 | reconnect_opts: ReconnectOptions, 261 | max_redirects: u32, 262 | } 263 | 264 | /// A client implementation that connects to a server using the Server-Sent Events protocol 265 | /// and consumes the event stream indefinitely. 266 | /// Can be parameterized with different hyper Connectors, such as HTTP or HTTPS. 267 | struct ClientImpl { 268 | http: hyper::Client, 269 | request_props: RequestProps, 270 | last_event_id: Option, 271 | } 272 | 273 | impl Client for ClientImpl 274 | where 275 | C: Connect + Clone + Send + Sync + 'static, 276 | { 277 | /// Connect to the server and begin consuming the stream. Produces a 278 | /// [`Stream`] of [`Event`](crate::Event)s wrapped in [`Result`]. 279 | /// 280 | /// Do not use the stream after it returned an error! 281 | /// 282 | /// After the first successful connection, the stream will 283 | /// reconnect for retryable errors. 284 | fn stream(&self) -> BoxStream> { 285 | Box::pin(ReconnectingRequest::new( 286 | self.http.clone(), 287 | self.request_props.clone(), 288 | self.last_event_id.clone(), 289 | )) 290 | } 291 | } 292 | 293 | #[allow(clippy::large_enum_variant)] // false positive 294 | #[pin_project(project = StateProj)] 295 | enum State { 296 | New, 297 | Connecting { 298 | retry: bool, 299 | #[pin] 300 | resp: ResponseFuture, 301 | }, 302 | Connected(#[pin] hyper::Body), 303 | WaitingToReconnect(#[pin] Sleep), 304 | FollowingRedirect(Option), 305 | StreamClosed, 306 | } 307 | 308 | impl State { 309 | fn name(&self) -> &'static str { 310 | match self { 311 | State::New => "new", 312 | State::Connecting { retry: false, .. } => "connecting(no-retry)", 313 | State::Connecting { retry: true, .. } => "connecting(retry)", 314 | State::Connected(_) => "connected", 315 | State::WaitingToReconnect(_) => "waiting-to-reconnect", 316 | State::FollowingRedirect(_) => "following-redirect", 317 | State::StreamClosed => "closed", 318 | } 319 | } 320 | } 321 | 322 | impl Debug for State { 323 | fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { 324 | write!(f, "{}", self.name()) 325 | } 326 | } 327 | 328 | #[must_use = "streams do nothing unless polled"] 329 | #[pin_project] 330 | pub struct ReconnectingRequest { 331 | http: hyper::Client, 332 | props: RequestProps, 333 | #[pin] 334 | state: State, 335 | retry_strategy: Box, 336 | current_url: Uri, 337 | redirect_count: u32, 338 | event_parser: EventParser, 339 | last_event_id: Option, 340 | } 341 | 342 | impl ReconnectingRequest { 343 | fn new( 344 | http: hyper::Client, 345 | props: RequestProps, 346 | last_event_id: Option, 347 | ) -> ReconnectingRequest { 348 | let reconnect_delay = props.reconnect_opts.delay; 349 | let delay_max = props.reconnect_opts.delay_max; 350 | let backoff_factor = props.reconnect_opts.backoff_factor; 351 | 352 | let url = props.url.clone(); 353 | ReconnectingRequest { 354 | props, 355 | http, 356 | state: State::New, 357 | retry_strategy: Box::new(BackoffRetry::new( 358 | reconnect_delay, 359 | delay_max, 360 | backoff_factor, 361 | true, 362 | )), 363 | redirect_count: 0, 364 | current_url: url, 365 | event_parser: EventParser::new(), 366 | last_event_id, 367 | } 368 | } 369 | 370 | fn send_request(&self) -> Result 371 | where 372 | C: Connect + Clone + Send + Sync + 'static, 373 | { 374 | let mut request_builder = Request::builder() 375 | .method(self.props.method.as_str()) 376 | .uri(&self.current_url); 377 | 378 | for (name, value) in &self.props.headers { 379 | request_builder = request_builder.header(name, value); 380 | } 381 | 382 | if let Some(id) = self.last_event_id.as_ref() { 383 | if !id.is_empty() { 384 | let id_as_header = 385 | HeaderValue::from_str(id).map_err(|e| Error::InvalidParameter(Box::new(e)))?; 386 | 387 | request_builder = request_builder.header("last-event-id", id_as_header); 388 | } 389 | } 390 | 391 | let body = match &self.props.body { 392 | Some(body) => Body::from(body.to_string()), 393 | None => Body::empty(), 394 | }; 395 | 396 | let request = request_builder 397 | .body(body) 398 | .map_err(|e| Error::InvalidParameter(Box::new(e)))?; 399 | 400 | Ok(self.http.request(request)) 401 | } 402 | 403 | fn reset_redirects(self: Pin<&mut Self>) { 404 | let url = self.props.url.clone(); 405 | let this = self.project(); 406 | *this.current_url = url; 407 | *this.redirect_count = 0; 408 | } 409 | 410 | fn increment_redirect_counter(self: Pin<&mut Self>) -> bool { 411 | if self.redirect_count == self.props.max_redirects { 412 | return false; 413 | } 414 | *self.project().redirect_count += 1; 415 | true 416 | } 417 | } 418 | 419 | impl Stream for ReconnectingRequest 420 | where 421 | C: Connect + Clone + Send + Sync + 'static, 422 | { 423 | type Item = Result; 424 | 425 | fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { 426 | trace!("ReconnectingRequest::poll({:?})", &self.state); 427 | 428 | loop { 429 | let this = self.as_mut().project(); 430 | if let Some(event) = this.event_parser.get_event() { 431 | return match event { 432 | SSE::Connected(_) => Poll::Ready(Some(Ok(event))), 433 | SSE::Event(ref evt) => { 434 | this.last_event_id.clone_from(&evt.id); 435 | 436 | if let Some(retry) = evt.retry { 437 | this.retry_strategy 438 | .change_base_delay(Duration::from_millis(retry)); 439 | } 440 | Poll::Ready(Some(Ok(event))) 441 | } 442 | SSE::Comment(_) => Poll::Ready(Some(Ok(event))), 443 | }; 444 | } 445 | 446 | trace!("ReconnectingRequest::poll loop({:?})", &this.state); 447 | 448 | let state = this.state.project(); 449 | match state { 450 | StateProj::StreamClosed => return Poll::Ready(Some(Err(Error::StreamClosed))), 451 | // New immediately transitions to Connecting, and exists only 452 | // to ensure that we only connect when polled. 453 | StateProj::New => { 454 | *self.as_mut().project().event_parser = EventParser::new(); 455 | match self.send_request() { 456 | Ok(resp) => { 457 | let retry = self.props.reconnect_opts.retry_initial; 458 | self.as_mut() 459 | .project() 460 | .state 461 | .set(State::Connecting { resp, retry }) 462 | } 463 | Err(e) => { 464 | // This error seems to be unrecoverable. So we should just shut down the 465 | // stream. 466 | self.as_mut().project().state.set(State::StreamClosed); 467 | return Poll::Ready(Some(Err(e))); 468 | } 469 | } 470 | } 471 | StateProj::Connecting { retry, resp } => match ready!(resp.poll(cx)) { 472 | Ok(resp) => { 473 | debug!("HTTP response: {:#?}", resp); 474 | 475 | if resp.status().is_success() { 476 | self.as_mut().project().retry_strategy.reset(Instant::now()); 477 | self.as_mut().reset_redirects(); 478 | 479 | let status = resp.status(); 480 | let headers = resp.headers().clone(); 481 | 482 | self.as_mut() 483 | .project() 484 | .state 485 | .set(State::Connected(resp.into_body())); 486 | 487 | return Poll::Ready(Some(Ok(SSE::Connected(ConnectionDetails::new( 488 | Response::new(status, headers), 489 | ))))); 490 | } 491 | 492 | if resp.status() == 301 || resp.status() == 307 { 493 | debug!("got redirected ({})", resp.status()); 494 | 495 | if self.as_mut().increment_redirect_counter() { 496 | debug!("following redirect {}", self.redirect_count); 497 | 498 | self.as_mut().project().state.set(State::FollowingRedirect( 499 | resp.headers().get(hyper::header::LOCATION).cloned(), 500 | )); 501 | continue; 502 | } else { 503 | debug!("redirect limit reached ({})", self.props.max_redirects); 504 | 505 | self.as_mut().project().state.set(State::StreamClosed); 506 | return Poll::Ready(Some(Err(Error::MaxRedirectLimitReached( 507 | self.props.max_redirects, 508 | )))); 509 | } 510 | } 511 | 512 | self.as_mut().reset_redirects(); 513 | self.as_mut().project().state.set(State::New); 514 | 515 | return Poll::Ready(Some(Err(Error::UnexpectedResponse( 516 | Response::new(resp.status(), resp.headers().clone()), 517 | ErrorBody::new(resp.into_body()), 518 | )))); 519 | } 520 | Err(e) => { 521 | // This seems basically impossible. AFAIK we can only get this way if we 522 | // poll after it was already ready 523 | warn!("request returned an error: {}", e); 524 | if !*retry { 525 | self.as_mut().project().state.set(State::New); 526 | return Poll::Ready(Some(Err(Error::HttpStream(Box::new(e))))); 527 | } 528 | let duration = self 529 | .as_mut() 530 | .project() 531 | .retry_strategy 532 | .next_delay(Instant::now()); 533 | self.as_mut() 534 | .project() 535 | .state 536 | .set(State::WaitingToReconnect(delay(duration, "retrying"))) 537 | } 538 | }, 539 | StateProj::FollowingRedirect(maybe_header) => match uri_from_header(maybe_header) { 540 | Ok(uri) => { 541 | *self.as_mut().project().current_url = uri; 542 | self.as_mut().project().state.set(State::New); 543 | } 544 | Err(e) => { 545 | self.as_mut().project().state.set(State::StreamClosed); 546 | return Poll::Ready(Some(Err(e))); 547 | } 548 | }, 549 | StateProj::Connected(body) => match ready!(body.poll_data(cx)) { 550 | Some(Ok(result)) => { 551 | this.event_parser.process_bytes(result)?; 552 | continue; 553 | } 554 | Some(Err(e)) => { 555 | if self.props.reconnect_opts.reconnect { 556 | let duration = self 557 | .as_mut() 558 | .project() 559 | .retry_strategy 560 | .next_delay(Instant::now()); 561 | self.as_mut() 562 | .project() 563 | .state 564 | .set(State::WaitingToReconnect(delay(duration, "reconnecting"))); 565 | } 566 | 567 | if let Some(cause) = e.source() { 568 | if let Some(downcast) = cause.downcast_ref::() { 569 | if let std::io::ErrorKind::TimedOut = downcast.kind() { 570 | return Poll::Ready(Some(Err(Error::TimedOut))); 571 | } 572 | } 573 | } else { 574 | return Poll::Ready(Some(Err(Error::HttpStream(Box::new(e))))); 575 | } 576 | } 577 | None => { 578 | let duration = self 579 | .as_mut() 580 | .project() 581 | .retry_strategy 582 | .next_delay(Instant::now()); 583 | self.as_mut() 584 | .project() 585 | .state 586 | .set(State::WaitingToReconnect(delay(duration, "retrying"))); 587 | 588 | if self.event_parser.was_processing() { 589 | return Poll::Ready(Some(Err(Error::UnexpectedEof))); 590 | } 591 | return Poll::Ready(Some(Err(Error::Eof))); 592 | } 593 | }, 594 | StateProj::WaitingToReconnect(delay) => { 595 | ready!(delay.poll(cx)); 596 | info!("Reconnecting"); 597 | self.as_mut().project().state.set(State::New); 598 | } 599 | }; 600 | } 601 | } 602 | } 603 | 604 | fn uri_from_header(maybe_header: &Option) -> Result { 605 | let header = maybe_header.as_ref().ok_or_else(|| { 606 | Error::MalformedLocationHeader(Box::new(std::io::Error::new( 607 | ErrorKind::NotFound, 608 | "missing Location header", 609 | ))) 610 | })?; 611 | 612 | let header_string = header 613 | .to_str() 614 | .map_err(|e| Error::MalformedLocationHeader(Box::new(e)))?; 615 | 616 | header_string 617 | .parse::() 618 | .map_err(|e| Error::MalformedLocationHeader(Box::new(e))) 619 | } 620 | 621 | fn delay(dur: Duration, description: &str) -> Sleep { 622 | info!("Waiting {:?} before {}", dur, description); 623 | tokio::time::sleep(dur) 624 | } 625 | 626 | mod private { 627 | use crate::client::ClientImpl; 628 | 629 | pub trait Sealed {} 630 | impl Sealed for ClientImpl {} 631 | } 632 | 633 | #[cfg(test)] 634 | mod tests { 635 | use crate::ClientBuilder; 636 | use hyper::http::HeaderValue; 637 | use test_case::test_case; 638 | 639 | #[test_case("user", "pass", "dXNlcjpwYXNz")] 640 | #[test_case("user1", "password123", "dXNlcjE6cGFzc3dvcmQxMjM=")] 641 | #[test_case("user2", "", "dXNlcjI6")] 642 | #[test_case("user@name", "pass#word!", "dXNlckBuYW1lOnBhc3Mjd29yZCE=")] 643 | #[test_case("user3", "my pass", "dXNlcjM6bXkgcGFzcw==")] 644 | #[test_case( 645 | "weird@-/:stuff", 646 | "goes@-/:here", 647 | "d2VpcmRALS86c3R1ZmY6Z29lc0AtLzpoZXJl" 648 | )] 649 | fn basic_auth_generates_correct_headers(username: &str, password: &str, expected: &str) { 650 | let builder = ClientBuilder::for_url("http://example.com") 651 | .expect("failed to build client") 652 | .basic_auth(username, password) 653 | .expect("failed to add authentication"); 654 | 655 | let actual = builder.headers.get("Authorization"); 656 | let expected = HeaderValue::from_str(format!("Basic {}", expected).as_str()) 657 | .expect("unable to create expected header"); 658 | 659 | assert_eq!(Some(&expected), actual); 660 | } 661 | } 662 | -------------------------------------------------------------------------------- /eventsource-client/src/config.rs: -------------------------------------------------------------------------------- 1 | use std::time::Duration; 2 | 3 | /// Configuration for a [`Client`]'s reconnect behaviour. 4 | /// 5 | /// ``` 6 | /// # use std::time::Duration; 7 | /// # use eventsource_client::ReconnectOptions; 8 | /// # 9 | /// let reconnect_options = ReconnectOptions::reconnect(true) 10 | /// .retry_initial(false) 11 | /// .delay(Duration::from_secs(1)) 12 | /// .backoff_factor(2) 13 | /// .delay_max(Duration::from_secs(60)) 14 | /// .build(); 15 | /// ``` 16 | /// 17 | /// See [`default()`] for a description of the default behaviour. See 18 | /// [`ReconnectOptionsBuilder`] for descriptions of each configurable parameter. 19 | /// 20 | /// [`Client`]: struct.Client.html 21 | /// [`default()`]: #method.default 22 | /// [`ReconnectOptionsBuilder`]: struct.ReconnectOptionsBuilder.html 23 | #[derive(Clone, Debug)] 24 | pub struct ReconnectOptions { 25 | pub(crate) retry_initial: bool, 26 | pub(crate) reconnect: bool, 27 | pub(crate) delay: Duration, 28 | pub(crate) backoff_factor: u32, 29 | pub(crate) delay_max: Duration, 30 | } 31 | 32 | impl ReconnectOptions { 33 | /// Start building a `ReconnectOptions`, by enabling or disabling 34 | /// reconnection on stream error. 35 | /// 36 | /// If `reconnect` is `true` (the [default]), the client will automatically 37 | /// try to reconnect if the stream ends due to an error. If it is `false`, 38 | /// the client will stop receiving events after an error. 39 | /// 40 | /// [default]: #method.default 41 | pub fn reconnect(reconnect: bool) -> ReconnectOptionsBuilder { 42 | ReconnectOptionsBuilder::new(reconnect) 43 | } 44 | } 45 | 46 | impl Default for ReconnectOptions { 47 | /// The default reconnect behaviour is to automatically try to reconnect if 48 | /// the stream ends due to an error, but not to retry if the initial 49 | /// connection fails. 50 | /// 51 | /// The client will wait before each reconnect attempt, to allow time for 52 | /// the error condition to be resolved (e.g. for the SSE server to restart 53 | /// if it went down). It will wait 1 second before the first attempt, and 54 | /// then back off exponentially, up to a maximum wait of 1 minute. 55 | fn default() -> ReconnectOptions { 56 | ReconnectOptions { 57 | retry_initial: false, 58 | reconnect: true, 59 | delay: Duration::from_secs(1), 60 | backoff_factor: 2, 61 | delay_max: Duration::from_secs(60), 62 | } 63 | } 64 | } 65 | 66 | /// Builder for [`ReconnectOptions`]. 67 | /// 68 | /// [`ReconnectOptions`]: struct.ReconnectOptions.html 69 | pub struct ReconnectOptionsBuilder { 70 | opts: ReconnectOptions, 71 | } 72 | 73 | impl ReconnectOptionsBuilder { 74 | pub fn new(reconnect: bool) -> Self { 75 | let opts = ReconnectOptions { 76 | reconnect, 77 | ..Default::default() 78 | }; 79 | Self { opts } 80 | } 81 | 82 | /// Configure whether to retry if the initial connection to the server 83 | /// fails. 84 | /// 85 | /// If `true`, the client will automatically retry the connection, with the 86 | /// same delay and backoff behaviour as for reconnects due to stream error. 87 | /// If `false` (the [default]), the client will not retry the initial 88 | /// connection. 89 | /// 90 | /// [default]: struct.ReconnectOptions.html#method.default 91 | pub fn retry_initial(mut self, retry: bool) -> Self { 92 | self.opts.retry_initial = retry; 93 | self 94 | } 95 | 96 | /// Configure the initial delay before trying to reconnect (the [default] is 97 | /// 1 second). 98 | /// 99 | /// After an error, the client will wait this long before the first attempt 100 | /// to reconnect. Subsequent reconnect attempts may wait longer, depending 101 | /// on the [`backoff_factor`]. 102 | /// 103 | /// [default]: struct.ReconnectOptions.html#method.default 104 | /// [`backoff_factor`]: #method.backoff_factor 105 | pub fn delay(mut self, delay: Duration) -> Self { 106 | self.opts.delay = delay; 107 | self 108 | } 109 | 110 | /// Configure the factor by which delays between reconnect attempts will 111 | /// exponentially increase, up to [`delay_max`]. The [default] factor is 2, 112 | /// so each reconnect attempt will wait twice as long as the previous one. 113 | /// 114 | /// Set this to 1 to disable exponential backoff (i.e. to make reconnect 115 | /// attempts at regular intervals equal to the configured [`delay`]). 116 | /// 117 | /// [`delay_max`]: #method.delay_max 118 | /// [default]: struct.ReconnectOptions.html#method.default 119 | /// [`delay`]: #method.delay 120 | pub fn backoff_factor(mut self, factor: u32) -> Self { 121 | self.opts.backoff_factor = factor; 122 | self 123 | } 124 | 125 | /// Configure the maximum delay between reconnects (the [default] is 1 126 | /// minute). The exponential backoff configured by [`backoff_factor`] will 127 | /// not cause a delay greater than this value. 128 | /// 129 | /// [default]: struct.ReconnectOptions.html#method.default 130 | /// [`backoff_factor`]: #method.backoff_factor 131 | pub fn delay_max(mut self, max: Duration) -> Self { 132 | self.opts.delay_max = max; 133 | self 134 | } 135 | 136 | /// Finish building the `ReconnectOptions`. 137 | pub fn build(self) -> ReconnectOptions { 138 | self.opts 139 | } 140 | } 141 | -------------------------------------------------------------------------------- /eventsource-client/src/error.rs: -------------------------------------------------------------------------------- 1 | use crate::response::{ErrorBody, Response}; 2 | 3 | /// Error type for invalid response headers encountered in ResponseDetails. 4 | #[derive(Debug)] 5 | pub struct HeaderError { 6 | /// Wrapped inner error providing details about the header issue. 7 | inner_error: Box, 8 | } 9 | 10 | impl HeaderError { 11 | /// Constructs a new `HeaderError` wrapping an existing error. 12 | pub fn new(err: Box) -> Self { 13 | HeaderError { inner_error: err } 14 | } 15 | } 16 | 17 | impl std::fmt::Display for HeaderError { 18 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 19 | write!(f, "Invalid response header: {}", self.inner_error) 20 | } 21 | } 22 | 23 | impl std::error::Error for HeaderError { 24 | fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { 25 | Some(self.inner_error.as_ref()) 26 | } 27 | } 28 | 29 | /// Error type returned from this library's functions. 30 | #[derive(Debug)] 31 | pub enum Error { 32 | TimedOut, 33 | StreamClosed, 34 | /// An invalid request parameter 35 | InvalidParameter(Box), 36 | /// The HTTP response could not be handled. 37 | UnexpectedResponse(Response, ErrorBody), 38 | /// An error reading from the HTTP response body. 39 | HttpStream(Box), 40 | /// The HTTP response stream ended 41 | Eof, 42 | /// The HTTP response stream ended unexpectedly (e.g. in the 43 | /// middle of an event). 44 | UnexpectedEof, 45 | /// Encountered a line not conforming to the SSE protocol. 46 | InvalidLine(String), 47 | InvalidEvent, 48 | /// Encountered a malformed Location header. 49 | MalformedLocationHeader(Box), 50 | /// Reached maximum redirect limit after encountering Location headers. 51 | MaxRedirectLimitReached(u32), 52 | } 53 | 54 | impl std::fmt::Display for Error { 55 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 56 | use Error::*; 57 | match self { 58 | TimedOut => write!(f, "timed out"), 59 | StreamClosed => write!(f, "stream closed"), 60 | InvalidParameter(err) => write!(f, "invalid parameter: {err}"), 61 | UnexpectedResponse(r, _) => { 62 | let status = r.status(); 63 | write!(f, "unexpected response: {status}") 64 | } 65 | HttpStream(err) => write!(f, "http error: {err}"), 66 | Eof => write!(f, "eof"), 67 | UnexpectedEof => write!(f, "unexpected eof"), 68 | InvalidLine(line) => write!(f, "invalid line: {line}"), 69 | InvalidEvent => write!(f, "invalid event"), 70 | MalformedLocationHeader(err) => write!(f, "malformed header: {err}"), 71 | MaxRedirectLimitReached(limit) => write!(f, "maximum redirect limit reached: {limit}"), 72 | } 73 | } 74 | } 75 | 76 | impl std::error::Error for Error {} 77 | 78 | impl PartialEq for Error { 79 | fn eq(&self, other: &Error) -> bool { 80 | use Error::*; 81 | if let (InvalidLine(msg1), InvalidLine(msg2)) = (self, other) { 82 | return msg1 == msg2; 83 | } else if let (UnexpectedEof, UnexpectedEof) = (self, other) { 84 | return true; 85 | } 86 | false 87 | } 88 | } 89 | 90 | impl Error { 91 | pub fn is_http_stream_error(&self) -> bool { 92 | if let Error::HttpStream(_) = self { 93 | return true; 94 | } 95 | false 96 | } 97 | 98 | pub fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { 99 | match self { 100 | Error::HttpStream(err) => Some(err.as_ref()), 101 | _ => None, 102 | } 103 | } 104 | } 105 | 106 | pub type Result = std::result::Result; 107 | -------------------------------------------------------------------------------- /eventsource-client/src/event_parser.rs: -------------------------------------------------------------------------------- 1 | use std::{collections::VecDeque, convert::TryFrom, str::from_utf8}; 2 | 3 | use hyper::body::Bytes; 4 | use log::{debug, log_enabled, trace}; 5 | use pin_project::pin_project; 6 | 7 | use crate::response::Response; 8 | 9 | use super::error::{Error, Result}; 10 | 11 | #[derive(Default, PartialEq)] 12 | struct EventData { 13 | pub event_type: String, 14 | pub data: String, 15 | pub id: Option, 16 | pub retry: Option, 17 | } 18 | 19 | impl EventData { 20 | fn new() -> Self { 21 | Self::default() 22 | } 23 | 24 | pub fn append_data(&mut self, value: &str) { 25 | self.data.push_str(value); 26 | self.data.push('\n'); 27 | } 28 | 29 | pub fn with_id(mut self, value: Option) -> Self { 30 | self.id = value; 31 | self 32 | } 33 | } 34 | 35 | #[derive(Debug, Eq, PartialEq)] 36 | pub enum SSE { 37 | Connected(ConnectionDetails), 38 | Event(Event), 39 | Comment(String), 40 | } 41 | 42 | impl TryFrom for Option { 43 | type Error = Error; 44 | 45 | fn try_from(event_data: EventData) -> std::result::Result { 46 | if event_data == EventData::default() { 47 | return Err(Error::InvalidEvent); 48 | } 49 | 50 | if event_data.data.is_empty() { 51 | return Ok(None); 52 | } 53 | 54 | let event_type = if event_data.event_type.is_empty() { 55 | String::from("message") 56 | } else { 57 | event_data.event_type 58 | }; 59 | 60 | let mut data = event_data.data.clone(); 61 | data.truncate(data.len() - 1); 62 | 63 | let id = event_data.id.clone(); 64 | 65 | let retry = event_data.retry; 66 | 67 | Ok(Some(SSE::Event(Event { 68 | event_type, 69 | data, 70 | id, 71 | retry, 72 | }))) 73 | } 74 | } 75 | 76 | #[derive(Clone, Debug, Eq, PartialEq)] 77 | pub struct ConnectionDetails { 78 | response: Response, 79 | } 80 | 81 | impl ConnectionDetails { 82 | pub(crate) fn new(response: Response) -> Self { 83 | Self { response } 84 | } 85 | 86 | /// Returns information describing the response at the time of connection. 87 | pub fn response(&self) -> &Response { 88 | &self.response 89 | } 90 | } 91 | 92 | #[derive(Clone, Debug, Eq, PartialEq)] 93 | pub struct Event { 94 | pub event_type: String, 95 | pub data: String, 96 | pub id: Option, 97 | pub retry: Option, 98 | } 99 | 100 | const LOGIFY_MAX_CHARS: usize = 100; 101 | fn logify(bytes: &[u8]) -> String { 102 | let stringified = from_utf8(bytes).unwrap_or(""); 103 | stringified.chars().take(LOGIFY_MAX_CHARS).collect() 104 | } 105 | 106 | fn parse_field(line: &[u8]) -> Result> { 107 | if line.is_empty() { 108 | return Err(Error::InvalidLine( 109 | "should never try to parse an empty line (probably a bug)".into(), 110 | )); 111 | } 112 | 113 | match line.iter().position(|&b| b':' == b) { 114 | Some(0) => { 115 | let value = &line[1..]; 116 | debug!("comment: {}", logify(value)); 117 | Ok(Some(("comment", parse_value(value)?))) 118 | } 119 | Some(colon_pos) => { 120 | let key = &line[0..colon_pos]; 121 | let key = parse_key(key)?; 122 | 123 | let mut value = &line[colon_pos + 1..]; 124 | // remove the first initial space character if any (but remove no other whitespace) 125 | if value.starts_with(b" ") { 126 | value = &value[1..]; 127 | } 128 | 129 | debug!("key: {}, value: {}", key, logify(value)); 130 | 131 | Ok(Some((key, parse_value(value)?))) 132 | } 133 | None => Ok(Some((parse_key(line)?, ""))), 134 | } 135 | } 136 | 137 | fn parse_key(key: &[u8]) -> Result<&str> { 138 | from_utf8(key).map_err(|e| Error::InvalidLine(format!("malformed key: {e:?}"))) 139 | } 140 | 141 | fn parse_value(value: &[u8]) -> Result<&str> { 142 | from_utf8(value).map_err(|e| Error::InvalidLine(format!("malformed value: {e:?}"))) 143 | } 144 | 145 | #[pin_project] 146 | #[must_use = "streams do nothing unless polled"] 147 | pub struct EventParser { 148 | /// buffer for lines we know are complete (terminated) but not yet parsed into event fields, in 149 | /// the order received 150 | complete_lines: VecDeque>, 151 | /// buffer for the most-recently received line, pending completion (by a newline terminator) or 152 | /// extension (by more non-newline bytes) 153 | incomplete_line: Option>, 154 | /// flagged if the last character processed as a carriage return; used to help process CRLF 155 | /// pairs 156 | last_char_was_cr: bool, 157 | /// the event data currently being decoded 158 | event_data: Option, 159 | /// the last-seen event ID; events without an ID will take on this value until it is updated. 160 | last_event_id: Option, 161 | sse: VecDeque, 162 | } 163 | 164 | impl EventParser { 165 | pub fn new() -> Self { 166 | Self { 167 | complete_lines: VecDeque::with_capacity(10), 168 | incomplete_line: None, 169 | last_char_was_cr: false, 170 | event_data: None, 171 | last_event_id: None, 172 | sse: VecDeque::with_capacity(3), 173 | } 174 | } 175 | 176 | pub fn was_processing(&self) -> bool { 177 | if self.incomplete_line.is_some() || !self.complete_lines.is_empty() { 178 | true 179 | } else { 180 | !self.sse.is_empty() 181 | } 182 | } 183 | 184 | pub fn get_event(&mut self) -> Option { 185 | self.sse.pop_front() 186 | } 187 | 188 | pub fn process_bytes(&mut self, bytes: Bytes) -> Result<()> { 189 | trace!("Parsing bytes {:?}", bytes); 190 | // We get bytes from the underlying stream in chunks. Decoding a chunk has two phases: 191 | // decode the chunk into lines, and decode the lines into events. 192 | // 193 | // We counterintuitively do these two phases in reverse order. Because both lines and 194 | // events may be split across chunks, we need to ensure we have a complete 195 | // (newline-terminated) line before parsing it, and a complete event 196 | // (empty-line-terminated) before returning it. So we buffer lines between poll() 197 | // invocations, and begin by processing any incomplete events from previous invocations, 198 | // before requesting new input from the underlying stream and processing that. 199 | 200 | self.decode_and_buffer_lines(bytes); 201 | self.parse_complete_lines_into_event()?; 202 | 203 | Ok(()) 204 | } 205 | 206 | // Populate the event fields from the complete lines already seen, until we either encounter an 207 | // empty line - indicating we've decoded a complete event - or we run out of complete lines to 208 | // process. 209 | // 210 | // Returns the event for dispatch if it is complete. 211 | fn parse_complete_lines_into_event(&mut self) -> Result<()> { 212 | loop { 213 | let mut seen_empty_line = false; 214 | 215 | while let Some(line) = self.complete_lines.pop_front() { 216 | if line.is_empty() && self.event_data.is_some() { 217 | seen_empty_line = true; 218 | break; 219 | } else if line.is_empty() { 220 | continue; 221 | } 222 | 223 | if let Some((key, value)) = parse_field(&line)? { 224 | if key == "comment" { 225 | self.sse.push_back(SSE::Comment(value.to_string())); 226 | continue; 227 | } 228 | 229 | let id = &self.last_event_id; 230 | let event_data = self 231 | .event_data 232 | .get_or_insert_with(|| EventData::new().with_id(id.clone())); 233 | 234 | if key == "event" { 235 | event_data.event_type = value.to_string() 236 | } else if key == "data" { 237 | event_data.append_data(value); 238 | } else if key == "id" { 239 | // If id contains a null byte, it is a non-fatal error and the rest of 240 | // the event should be parsed if possible. 241 | if value.chars().any(|c| c == '\0') { 242 | debug!("Ignoring event ID containing null byte"); 243 | continue; 244 | } 245 | 246 | if value.is_empty() { 247 | self.last_event_id = Some("".to_string()); 248 | } else { 249 | self.last_event_id = Some(value.to_string()); 250 | } 251 | 252 | event_data.id.clone_from(&self.last_event_id) 253 | } else if key == "retry" { 254 | match value.parse::() { 255 | Ok(retry) => { 256 | event_data.retry = Some(retry); 257 | } 258 | _ => debug!("Failed to parse {:?} into retry value", value), 259 | }; 260 | } 261 | } 262 | } 263 | 264 | if seen_empty_line { 265 | let event_data = self.event_data.take(); 266 | 267 | trace!( 268 | "seen empty line, event_data is {:?})", 269 | event_data.as_ref().map(|event_data| &event_data.event_type) 270 | ); 271 | 272 | if let Some(event_data) = event_data { 273 | match Option::::try_from(event_data) { 274 | Err(e) => return Err(e), 275 | Ok(None) => (), 276 | Ok(Some(event)) => self.sse.push_back(event), 277 | }; 278 | } 279 | 280 | continue; 281 | } else { 282 | trace!("processed all complete lines but event_data not yet complete"); 283 | } 284 | 285 | break; 286 | } 287 | 288 | Ok(()) 289 | } 290 | 291 | // Decode a chunk into lines and buffer them for subsequent parsing, taking account of 292 | // incomplete lines from previous chunks. 293 | fn decode_and_buffer_lines(&mut self, chunk: Bytes) { 294 | let mut lines = chunk.split_inclusive(|&b| b == b'\n' || b == b'\r'); 295 | // The first and last elements in this split are special. The spec requires lines to be 296 | // terminated. But lines may span chunks, so: 297 | // * the last line, if non-empty (i.e. if chunk didn't end with a line terminator), 298 | // should be buffered as an incomplete line 299 | // * the first line should be appended to the incomplete line, if any 300 | 301 | if let Some(incomplete_line) = self.incomplete_line.as_mut() { 302 | if let Some(line) = lines.next() { 303 | trace!( 304 | "extending line from previous chunk: {:?}+{:?}", 305 | logify(incomplete_line), 306 | logify(line) 307 | ); 308 | 309 | self.last_char_was_cr = false; 310 | if !line.is_empty() { 311 | // Checking the last character handles lines where the last character is a 312 | // terminator, but also where the entire line is a terminator. 313 | match line.last().unwrap() { 314 | b'\r' => { 315 | incomplete_line.extend_from_slice(&line[..line.len() - 1]); 316 | let il = self.incomplete_line.take(); 317 | self.complete_lines.push_back(il.unwrap()); 318 | self.last_char_was_cr = true; 319 | } 320 | b'\n' => { 321 | incomplete_line.extend_from_slice(&line[..line.len() - 1]); 322 | let il = self.incomplete_line.take(); 323 | self.complete_lines.push_back(il.unwrap()); 324 | } 325 | _ => incomplete_line.extend_from_slice(line), 326 | }; 327 | } 328 | } 329 | } 330 | 331 | let mut lines = lines.peekable(); 332 | while let Some(line) = lines.next() { 333 | if let Some(actually_complete_line) = self.incomplete_line.take() { 334 | // we saw the next line, so the previous one must have been complete after all 335 | trace!( 336 | "previous line was complete: {:?}", 337 | logify(&actually_complete_line) 338 | ); 339 | self.complete_lines.push_back(actually_complete_line); 340 | } 341 | 342 | if self.last_char_was_cr && line == [b'\n'] { 343 | // This is a continuation of a \r\n pair, so we can ignore this line. We do need to 344 | // reset our flag though. 345 | self.last_char_was_cr = false; 346 | continue; 347 | } 348 | 349 | self.last_char_was_cr = false; 350 | if line.ends_with(b"\r") { 351 | self.complete_lines 352 | .push_back(line[..line.len() - 1].to_vec()); 353 | self.last_char_was_cr = true; 354 | } else if line.ends_with(b"\n") { 355 | // self isn't a continuation, but rather a line ending with a LF terminator. 356 | self.complete_lines 357 | .push_back(line[..line.len() - 1].to_vec()); 358 | } else if line.is_empty() { 359 | // this is the last line and it's empty, no need to buffer it 360 | trace!("chunk ended with a line terminator"); 361 | } else if lines.peek().is_some() { 362 | // this line isn't the last and we know from previous checks it doesn't end in a 363 | // terminator, so we can consider it complete 364 | self.complete_lines.push_back(line.to_vec()); 365 | } else { 366 | // last line needs to be buffered as it may be incomplete 367 | trace!("buffering incomplete line: {:?}", logify(line)); 368 | self.incomplete_line = Some(line.to_vec()); 369 | } 370 | } 371 | 372 | if log_enabled!(log::Level::Trace) { 373 | for line in &self.complete_lines { 374 | trace!("complete line: {:?}", logify(line)); 375 | } 376 | if let Some(line) = &self.incomplete_line { 377 | trace!("incomplete line: {:?}", logify(line)); 378 | } 379 | } 380 | } 381 | } 382 | 383 | #[cfg(test)] 384 | mod tests { 385 | use std::str::FromStr; 386 | 387 | use super::{Error::*, *}; 388 | use proptest::proptest; 389 | use test_case::test_case; 390 | 391 | fn field<'a>(key: &'a str, value: &'a str) -> Result> { 392 | Ok(Some((key, value))) 393 | } 394 | 395 | /// Requires an event to be popped from the given parser. 396 | /// Event properties can be asserted using a closure. 397 | fn require_pop_event(parser: &mut EventParser, f: F) 398 | where 399 | F: FnOnce(Event), 400 | { 401 | if let Some(SSE::Event(event)) = parser.get_event() { 402 | f(event) 403 | } else { 404 | panic!("Event should have been received") 405 | } 406 | } 407 | 408 | #[test] 409 | fn test_logify_handles_code_point_boundaries() { 410 | let phase = String::from_str( 411 | "这是一条很长的消息,最初导致我们的代码出现恐慌。我希望情况不再如此。这是一条很长的消息,最初导致我们的代码出现恐慌。我希望情况不再如此。这是一条很长的消息,最初导致我们的代码出现恐慌。我希望情况不再如此。这是一条很长的消息,最初导致我们的代码出现恐慌。我希望情况不再如此。", 412 | ) 413 | .expect("Invalid sample string"); 414 | 415 | let input: &[u8] = phase.as_bytes(); 416 | let result = logify(input); 417 | 418 | assert!(result == "这是一条很长的消息,最初导致我们的代码出现恐慌。我希望情况不再如此。这是一条很长的消息,最初导致我们的代码出现恐慌。我希望情况不再如此。这是一条很长的消息,最初导致我们的代码出现恐慌。我希望情况不再如"); 419 | } 420 | 421 | #[test] 422 | fn test_parse_field_invalid() { 423 | assert!(parse_field(b"").is_err()); 424 | 425 | match parse_field(b"\x80: invalid UTF-8") { 426 | Err(InvalidLine(msg)) => assert!(msg.contains("Utf8Error")), 427 | res => panic!("expected InvalidLine error, got {:?}", res), 428 | } 429 | } 430 | 431 | #[test] 432 | fn test_event_id_error_if_invalid_utf8() { 433 | let mut bytes = Vec::from("id: "); 434 | let mut invalid = vec![b'\xf0', b'\x28', b'\x8c', b'\xbc']; 435 | bytes.append(&mut invalid); 436 | bytes.push(b'\n'); 437 | let mut parser = EventParser::new(); 438 | assert!(parser.process_bytes(Bytes::from(bytes)).is_err()); 439 | } 440 | 441 | #[test] 442 | fn test_parse_field_comments() { 443 | assert_eq!(parse_field(b":"), field("comment", "")); 444 | assert_eq!( 445 | parse_field(b":hello \0 world"), 446 | field("comment", "hello \0 world") 447 | ); 448 | assert_eq!(parse_field(b":event: foo"), field("comment", "event: foo")); 449 | } 450 | 451 | #[test] 452 | fn test_parse_field_valid() { 453 | assert_eq!(parse_field(b"event:foo"), field("event", "foo")); 454 | assert_eq!(parse_field(b"event: foo"), field("event", "foo")); 455 | assert_eq!(parse_field(b"event: foo"), field("event", " foo")); 456 | assert_eq!(parse_field(b"event:\tfoo"), field("event", "\tfoo")); 457 | assert_eq!(parse_field(b"event: foo "), field("event", "foo ")); 458 | 459 | assert_eq!(parse_field(b"disconnect:"), field("disconnect", "")); 460 | assert_eq!(parse_field(b"disconnect: "), field("disconnect", "")); 461 | assert_eq!(parse_field(b"disconnect: "), field("disconnect", " ")); 462 | assert_eq!(parse_field(b"disconnect:\t"), field("disconnect", "\t")); 463 | 464 | assert_eq!(parse_field(b"disconnect"), field("disconnect", "")); 465 | 466 | assert_eq!(parse_field(b" : foo"), field(" ", "foo")); 467 | assert_eq!(parse_field(b"\xe2\x98\x83: foo"), field("☃", "foo")); 468 | } 469 | 470 | fn event(typ: &str, data: &str) -> SSE { 471 | SSE::Event(Event { 472 | data: data.to_string(), 473 | id: None, 474 | event_type: typ.to_string(), 475 | retry: None, 476 | }) 477 | } 478 | 479 | fn event_with_id(typ: &str, data: &str, id: &str) -> SSE { 480 | SSE::Event(Event { 481 | data: data.to_string(), 482 | id: Some(id.to_string()), 483 | event_type: typ.to_string(), 484 | retry: None, 485 | }) 486 | } 487 | 488 | #[test] 489 | fn test_event_without_data_yields_no_event() { 490 | let mut parser = EventParser::new(); 491 | assert!(parser.process_bytes(Bytes::from("id: abc\n\n")).is_ok()); 492 | assert!(parser.get_event().is_none()); 493 | } 494 | 495 | #[test] 496 | fn test_ignore_id_containing_null() { 497 | let mut parser = EventParser::new(); 498 | assert!(parser 499 | .process_bytes(Bytes::from("id: a\x00bc\nevent: add\ndata: abc\n\n")) 500 | .is_ok()); 501 | 502 | if let Some(SSE::Event(event)) = parser.get_event() { 503 | assert!(event.id.is_none()); 504 | } else { 505 | panic!("Event should have been received"); 506 | } 507 | } 508 | 509 | #[test_case("event: add\ndata: hello\n\n", "add".into())] 510 | #[test_case("data: hello\n\n", "message".into())] 511 | fn test_event_can_parse_type_correctly(chunk: &'static str, event_type: String) { 512 | let mut parser = EventParser::new(); 513 | 514 | assert!(parser.process_bytes(Bytes::from(chunk)).is_ok()); 515 | 516 | require_pop_event(&mut parser, |e| assert_eq!(event_type, e.event_type)); 517 | } 518 | 519 | #[test_case("data: hello\n\n", event("message", "hello"); "parses event body with LF")] 520 | #[test_case("data: hello\n\r", event("message", "hello"); "parses event body with LF and trailing CR")] 521 | #[test_case("data: hello\r\n\n", event("message", "hello"); "parses event body with CRLF")] 522 | #[test_case("data: hello\r\n\r", event("message", "hello"); "parses event body with CRLF and trailing CR")] 523 | #[test_case("data: hello\r\r", event("message", "hello"); "parses event body with CR")] 524 | #[test_case("data: hello\r\r\n", event("message", "hello"); "parses event body with CR and trailing CRLF")] 525 | #[test_case("id: 1\ndata: hello\n\n", event_with_id("message", "hello", "1"))] 526 | #[test_case("id: 😀\ndata: hello\n\n", event_with_id("message", "hello", "😀"))] 527 | fn test_decode_chunks_simple(chunk: &'static str, event: SSE) { 528 | let mut parser = EventParser::new(); 529 | assert!(parser.process_bytes(Bytes::from(chunk)).is_ok()); 530 | assert_eq!(parser.get_event().unwrap(), event); 531 | assert!(parser.get_event().is_none()); 532 | } 533 | 534 | #[test_case("persistent-event-id.sse"; "persistent-event-id.sse")] 535 | fn test_last_id_persists_if_not_overridden(file: &str) { 536 | let contents = read_contents_from_file(file); 537 | let mut parser = EventParser::new(); 538 | assert!(parser.process_bytes(Bytes::from(contents)).is_ok()); 539 | 540 | require_pop_event(&mut parser, |e| assert_eq!(e.id, Some("1".into()))); 541 | require_pop_event(&mut parser, |e| assert_eq!(e.id, Some("1".into()))); 542 | require_pop_event(&mut parser, |e| assert_eq!(e.id, Some("3".into()))); 543 | require_pop_event(&mut parser, |e| assert_eq!(e.id, Some("3".into()))); 544 | } 545 | 546 | #[test_case(b":hello\n"; "with LF")] 547 | #[test_case(b":hello\r"; "with CR")] 548 | #[test_case(b":hello\r\n"; "with CRLF")] 549 | fn test_decode_chunks_comments_are_generated(chunk: &'static [u8]) { 550 | let mut parser = EventParser::new(); 551 | assert!(parser.process_bytes(Bytes::from(chunk)).is_ok()); 552 | assert!(parser.get_event().is_some()); 553 | } 554 | 555 | #[test] 556 | fn test_comment_is_separate_from_event() { 557 | let mut parser = EventParser::new(); 558 | let result = parser.process_bytes(Bytes::from(":comment\ndata:hello\n\n")); 559 | assert!(result.is_ok()); 560 | 561 | let comment = parser.get_event(); 562 | assert!(matches!(comment, Some(SSE::Comment(_)))); 563 | 564 | let event = parser.get_event(); 565 | assert!(matches!(event, Some(SSE::Event(_)))); 566 | 567 | assert!(parser.get_event().is_none()); 568 | } 569 | 570 | #[test] 571 | fn test_comment_with_trailing_blank_line() { 572 | let mut parser = EventParser::new(); 573 | let result = parser.process_bytes(Bytes::from(":comment\n\r\n\r")); 574 | assert!(result.is_ok()); 575 | 576 | let comment = parser.get_event(); 577 | assert!(matches!(comment, Some(SSE::Comment(_)))); 578 | 579 | assert!(parser.get_event().is_none()); 580 | } 581 | 582 | #[test_case(&["data:", "hello\n\n"], event("message", "hello"); "data split")] 583 | #[test_case(&["data:hell", "o\n\n"], event("message", "hello"); "data truncated")] 584 | fn test_decode_message_split_across_chunks(chunks: &[&'static str], event: SSE) { 585 | let mut parser = EventParser::new(); 586 | 587 | if let Some((last, chunks)) = chunks.split_last() { 588 | for chunk in chunks { 589 | assert!(parser.process_bytes(Bytes::from(*chunk)).is_ok()); 590 | assert!(parser.get_event().is_none()); 591 | } 592 | 593 | assert!(parser.process_bytes(Bytes::from(*last)).is_ok()); 594 | assert_eq!(parser.get_event(), Some(event)); 595 | assert!(parser.get_event().is_none()); 596 | } else { 597 | panic!("Failed to split last"); 598 | } 599 | } 600 | 601 | #[test_case(&["data:hell", "o\n\ndata:", "world\n\n"], &[event("message", "hello"), event("message", "world")]; "with lf")] 602 | #[test_case(&["data:hell", "o\r\rdata:", "world\r\r"], &[event("message", "hello"), event("message", "world")]; "with cr")] 603 | #[test_case(&["data:hell", "o\r\n\ndata:", "world\r\n\n"], &[event("message", "hello"), event("message", "world")]; "with crlf")] 604 | fn test_decode_multiple_messages_split_across_chunks(chunks: &[&'static str], events: &[SSE]) { 605 | let mut parser = EventParser::new(); 606 | 607 | for chunk in chunks { 608 | assert!(parser.process_bytes(Bytes::from(*chunk)).is_ok()); 609 | } 610 | 611 | for event in events { 612 | assert_eq!(parser.get_event().unwrap(), *event); 613 | } 614 | 615 | assert!(parser.get_event().is_none()); 616 | } 617 | 618 | #[test] 619 | fn test_decode_line_split_across_chunks() { 620 | let mut parser = EventParser::new(); 621 | assert!(parser.process_bytes(Bytes::from("data:foo")).is_ok()); 622 | assert!(parser.process_bytes(Bytes::from("")).is_ok()); 623 | assert!(parser.process_bytes(Bytes::from("baz\n\n")).is_ok()); 624 | assert_eq!(parser.get_event(), Some(event("message", "foobaz"))); 625 | assert!(parser.get_event().is_none()); 626 | 627 | assert!(parser.process_bytes(Bytes::from("data:foo")).is_ok()); 628 | assert!(parser.process_bytes(Bytes::from("bar")).is_ok()); 629 | assert!(parser.process_bytes(Bytes::from("baz\n\n")).is_ok()); 630 | assert_eq!(parser.get_event(), Some(event("message", "foobarbaz"))); 631 | assert!(parser.get_event().is_none()); 632 | } 633 | 634 | #[test] 635 | fn test_decode_concatenates_multiple_values_for_same_field() { 636 | let mut parser = EventParser::new(); 637 | assert!(parser.process_bytes(Bytes::from("data:hello\n")).is_ok()); 638 | assert!(parser.process_bytes(Bytes::from("data:world\n\n")).is_ok()); 639 | assert_eq!(parser.get_event(), Some(event("message", "hello\nworld"))); 640 | assert!(parser.get_event().is_none()); 641 | } 642 | 643 | #[test_case("\n\n\n\n" ; "all LFs")] 644 | #[test_case("\r\r\r\r" ; "all CRs")] 645 | #[test_case("\r\n\r\n\r\n\r\n" ; "all CRLFs")] 646 | fn test_decode_repeated_terminators(chunk: &'static str) { 647 | let mut parser = EventParser::new(); 648 | assert!(parser.process_bytes(Bytes::from(chunk)).is_ok()); 649 | 650 | // spec seems unclear on whether this should actually dispatch empty events, but that seems 651 | // unhelpful for all practical purposes 652 | assert!(parser.get_event().is_none()); 653 | } 654 | 655 | #[test] 656 | fn test_decode_extra_terminators_between_events() { 657 | let mut parser = EventParser::new(); 658 | assert!(parser 659 | .process_bytes(Bytes::from("data: abc\n\n\ndata: def\n\n")) 660 | .is_ok()); 661 | 662 | assert_eq!(parser.get_event(), Some(event("message", "abc"))); 663 | assert_eq!(parser.get_event(), Some(event("message", "def"))); 664 | assert!(parser.get_event().is_none()); 665 | } 666 | 667 | #[test_case("one-event.sse"; "one-event.sse")] 668 | #[test_case("one-event-crlf.sse"; "one-event-crlf.sse")] 669 | fn test_decode_one_event(file: &str) { 670 | let contents = read_contents_from_file(file); 671 | let mut parser = EventParser::new(); 672 | assert!(parser.process_bytes(Bytes::from(contents)).is_ok()); 673 | 674 | require_pop_event(&mut parser, |e| { 675 | assert_eq!(e.event_type, "patch"); 676 | assert!(e 677 | .data 678 | .contains(r#"path":"/flags/goals.02.featureWithGoals"#)); 679 | }); 680 | } 681 | 682 | #[test_case("two-events.sse"; "two-events.sse")] 683 | #[test_case("two-events-crlf.sse"; "two-events-crlf.sse")] 684 | fn test_decode_two_events(file: &str) { 685 | let contents = read_contents_from_file(file); 686 | let mut parser = EventParser::new(); 687 | assert!(parser.process_bytes(Bytes::from(contents)).is_ok()); 688 | 689 | require_pop_event(&mut parser, |e| { 690 | assert_eq!(e.event_type, "one"); 691 | assert_eq!(e.data, "One"); 692 | }); 693 | 694 | require_pop_event(&mut parser, |e| { 695 | assert_eq!(e.event_type, "two"); 696 | assert_eq!(e.data, "Two"); 697 | }); 698 | } 699 | 700 | #[test_case("big-event-followed-by-another.sse"; "big-event-followed-by-another.sse")] 701 | #[test_case("big-event-followed-by-another-crlf.sse"; "big-event-followed-by-another-crlf.sse")] 702 | fn test_decode_big_event_followed_by_another(file: &str) { 703 | let contents = read_contents_from_file(file); 704 | let mut parser = EventParser::new(); 705 | assert!(parser.process_bytes(Bytes::from(contents)).is_ok()); 706 | 707 | require_pop_event(&mut parser, |e| { 708 | assert_eq!(e.event_type, "patch"); 709 | assert!(e.data.len() > 10_000); 710 | assert!(e.data.contains(r#"path":"/flags/big.00.bigFeatureKey"#)); 711 | }); 712 | 713 | require_pop_event(&mut parser, |e| { 714 | assert_eq!(e.event_type, "patch"); 715 | assert!(e 716 | .data 717 | .contains(r#"path":"/flags/goals.02.featureWithGoals"#)); 718 | }); 719 | } 720 | 721 | fn read_contents_from_file(name: &str) -> Vec { 722 | std::fs::read(format!("test-data/{}", name)) 723 | .unwrap_or_else(|_| panic!("couldn't read {}", name)) 724 | } 725 | 726 | proptest! { 727 | #[test] 728 | fn test_decode_and_buffer_lines_does_not_crash(next in "(\r\n|\r|\n)*event: [^\n\r:]*(\r\n|\r|\n)", previous in "(\r\n|\r|\n)*event: [^\n\r:]*(\r\n|\r|\n)") { 729 | let mut parser = EventParser::new(); 730 | parser.incomplete_line = Some(previous.as_bytes().to_vec()); 731 | parser.decode_and_buffer_lines(Bytes::from(next)); 732 | } 733 | } 734 | } 735 | -------------------------------------------------------------------------------- /eventsource-client/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![warn(rust_2018_idioms)] 2 | //! Client for the [Server-Sent Events] protocol (aka [EventSource]). 3 | //! 4 | //! ``` 5 | //! use futures::{TryStreamExt}; 6 | //! # use eventsource_client::Error; 7 | //! use eventsource_client::{Client, SSE}; 8 | //! # #[tokio::main] 9 | //! # async fn main() -> Result<(), eventsource_client::Error> { 10 | //! let mut client = eventsource_client::ClientBuilder::for_url("https://example.com/stream")? 11 | //! .header("Authorization", "Basic username:password")? 12 | //! .build(); 13 | //! 14 | //! let mut stream = Box::pin(client.stream()) 15 | //! .map_ok(|event| match event { 16 | //! SSE::Comment(comment) => println!("got a comment event: {:?}", comment), 17 | //! SSE::Event(evt) => println!("got an event: {}", evt.event_type), 18 | //! SSE::Connected(_) => println!("got connected") 19 | //! }) 20 | //! .map_err(|e| println!("error streaming events: {:?}", e)); 21 | //! # while let Ok(Some(_)) = stream.try_next().await {} 22 | //! # 23 | //! # Ok(()) 24 | //! # } 25 | //! ``` 26 | //! 27 | //![Server-Sent Events]: https://html.spec.whatwg.org/multipage/server-sent-events.html 28 | //![EventSource]: https://developer.mozilla.org/en-US/docs/Web/API/EventSource 29 | 30 | mod client; 31 | mod config; 32 | mod error; 33 | mod event_parser; 34 | mod response; 35 | mod retry; 36 | 37 | pub use client::*; 38 | pub use config::*; 39 | pub use error::*; 40 | pub use event_parser::Event; 41 | pub use event_parser::SSE; 42 | pub use response::Response; 43 | -------------------------------------------------------------------------------- /eventsource-client/src/response.rs: -------------------------------------------------------------------------------- 1 | use hyper::body::Buf; 2 | use hyper::{header::HeaderValue, Body, HeaderMap, StatusCode}; 3 | 4 | use crate::{Error, HeaderError}; 5 | 6 | pub struct ErrorBody { 7 | body: Body, 8 | } 9 | 10 | impl ErrorBody { 11 | pub fn new(body: Body) -> Self { 12 | Self { body } 13 | } 14 | 15 | /// Returns the body of the response as a vector of bytes. 16 | /// 17 | /// Caution: This method reads the entire body into memory. You should only use this method if 18 | /// you know the response is of a reasonable size. 19 | pub async fn body_bytes(self) -> Result, Error> { 20 | let buf = match hyper::body::aggregate(self.body).await { 21 | Ok(buf) => buf, 22 | Err(err) => return Err(Error::HttpStream(Box::new(err))), 23 | }; 24 | 25 | Ok(buf.chunk().to_vec()) 26 | } 27 | } 28 | 29 | impl std::fmt::Debug for ErrorBody { 30 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 31 | f.debug_struct("ErrorBody").finish() 32 | } 33 | } 34 | 35 | #[derive(Clone, Debug, Eq, PartialEq)] 36 | pub struct Response { 37 | status_code: StatusCode, 38 | headers: HeaderMap, 39 | } 40 | 41 | impl Response { 42 | pub fn new(status_code: StatusCode, headers: HeaderMap) -> Self { 43 | Self { 44 | status_code, 45 | headers, 46 | } 47 | } 48 | 49 | /// Returns the status code of this response. 50 | pub fn status(&self) -> u16 { 51 | self.status_code.as_u16() 52 | } 53 | 54 | /// Returns the list of header keys present in this response. 55 | pub fn get_header_keys(&self) -> Vec<&str> { 56 | self.headers.keys().map(|key| key.as_str()).collect() 57 | } 58 | 59 | /// Returns the value of a header. 60 | /// 61 | /// If the header contains more than one value, only the first value is returned. Refer to 62 | /// [`get_header_values`] for a method that returns all values. 63 | pub fn get_header_value(&self, key: &str) -> std::result::Result, HeaderError> { 64 | if let Some(value) = self.headers.get(key) { 65 | value 66 | .to_str() 67 | .map(Some) 68 | .map_err(|e| HeaderError::new(Box::new(e))) 69 | } else { 70 | Ok(None) 71 | } 72 | } 73 | 74 | /// Returns all values for a header. 75 | /// 76 | /// If the header contains only one value, it will be returned as a single-element vector. 77 | /// Refer to [`get_header_value`] for a method that returns only a single value. 78 | pub fn get_header_values(&self, key: &str) -> std::result::Result, HeaderError> { 79 | self.headers 80 | .get_all(key) 81 | .iter() 82 | .map(|value| value.to_str().map_err(|e| HeaderError::new(Box::new(e)))) 83 | .collect() 84 | } 85 | } 86 | -------------------------------------------------------------------------------- /eventsource-client/src/retry.rs: -------------------------------------------------------------------------------- 1 | use std::time::{Duration, Instant}; 2 | 3 | use rand::{thread_rng, Rng}; 4 | 5 | pub(crate) trait RetryStrategy { 6 | /// Return the next amount of time a failed request should delay before re-attempting. 7 | fn next_delay(&mut self, current_time: Instant) -> Duration; 8 | 9 | /// Modify the strategy's default base delay. 10 | fn change_base_delay(&mut self, base_delay: Duration); 11 | 12 | /// Used to indicate to the strategy that it can reset as a successful connection has been made. 13 | fn reset(&mut self, current_time: Instant); 14 | } 15 | 16 | const DEFAULT_RESET_RETRY_INTERVAL: Duration = Duration::from_secs(60); 17 | 18 | pub(crate) struct BackoffRetry { 19 | base_delay: Duration, 20 | max_delay: Duration, 21 | backoff_factor: u32, 22 | include_jitter: bool, 23 | 24 | reset_interval: Duration, 25 | next_delay: Duration, 26 | good_since: Option, 27 | } 28 | 29 | impl BackoffRetry { 30 | pub fn new( 31 | base_delay: Duration, 32 | max_delay: Duration, 33 | backoff_factor: u32, 34 | include_jitter: bool, 35 | ) -> Self { 36 | Self { 37 | base_delay, 38 | max_delay, 39 | backoff_factor, 40 | include_jitter, 41 | reset_interval: DEFAULT_RESET_RETRY_INTERVAL, 42 | next_delay: base_delay, 43 | good_since: None, 44 | } 45 | } 46 | } 47 | 48 | impl RetryStrategy for BackoffRetry { 49 | fn next_delay(&mut self, current_time: Instant) -> Duration { 50 | let mut current_delay = self.next_delay; 51 | 52 | if let Some(good_since) = self.good_since { 53 | if current_time - good_since >= self.reset_interval { 54 | current_delay = self.base_delay; 55 | } 56 | } 57 | 58 | self.good_since = None; 59 | self.next_delay = std::cmp::min(self.max_delay, current_delay * self.backoff_factor); 60 | 61 | if self.include_jitter { 62 | thread_rng().gen_range(current_delay / 2..=current_delay) 63 | } else { 64 | current_delay 65 | } 66 | } 67 | 68 | fn change_base_delay(&mut self, base_delay: Duration) { 69 | self.base_delay = base_delay; 70 | self.next_delay = self.base_delay; 71 | } 72 | 73 | fn reset(&mut self, current_time: Instant) { 74 | // While the external application has indicated success, we don't actually want to reset the 75 | // retry policy just yet. Instead, we want to record the time it was successful. Then when 76 | // we calculate the next delay, we can reset the strategy ONLY when it has been at least 77 | // DEFAULT_RESET_RETRY_INTERVAL seconds. 78 | self.good_since = Some(current_time); 79 | } 80 | } 81 | 82 | #[cfg(test)] 83 | mod tests { 84 | use std::ops::Add; 85 | use std::time::{Duration, Instant}; 86 | 87 | use crate::retry::{BackoffRetry, RetryStrategy}; 88 | 89 | #[test] 90 | fn test_fixed_retry() { 91 | let base = Duration::from_secs(10); 92 | let mut retry = BackoffRetry::new(base, Duration::from_secs(30), 1, false); 93 | let start = Instant::now() - Duration::from_secs(60); 94 | 95 | assert_eq!(retry.next_delay(start), base); 96 | assert_eq!(retry.next_delay(start.add(Duration::from_secs(1))), base); 97 | assert_eq!(retry.next_delay(start.add(Duration::from_secs(2))), base); 98 | } 99 | 100 | #[test] 101 | fn test_able_to_reset_base_delay() { 102 | let base = Duration::from_secs(10); 103 | let mut retry = BackoffRetry::new(base, Duration::from_secs(30), 1, false); 104 | let start = Instant::now(); 105 | 106 | assert_eq!(retry.next_delay(start), base); 107 | assert_eq!(retry.next_delay(start.add(Duration::from_secs(1))), base); 108 | 109 | let base = Duration::from_secs(3); 110 | retry.change_base_delay(base); 111 | assert_eq!(retry.next_delay(start.add(Duration::from_secs(2))), base); 112 | } 113 | 114 | #[test] 115 | fn test_with_backoff() { 116 | let base = Duration::from_secs(10); 117 | let max = Duration::from_secs(60); 118 | let mut retry = BackoffRetry::new(base, max, 2, false); 119 | let start = Instant::now() - Duration::from_secs(60); 120 | 121 | assert_eq!(retry.next_delay(start), base); 122 | assert_eq!( 123 | retry.next_delay(start.add(Duration::from_secs(1))), 124 | base * 2 125 | ); 126 | assert_eq!( 127 | retry.next_delay(start.add(Duration::from_secs(2))), 128 | base * 4 129 | ); 130 | assert_eq!(retry.next_delay(start.add(Duration::from_secs(3))), max); 131 | } 132 | 133 | #[test] 134 | fn test_with_jitter() { 135 | let base = Duration::from_secs(10); 136 | let max = Duration::from_secs(60); 137 | let mut retry = BackoffRetry::new(base, max, 1, true); 138 | let start = Instant::now() - Duration::from_secs(60); 139 | 140 | let delay = retry.next_delay(start); 141 | assert!(base / 2 <= delay && delay <= base); 142 | } 143 | 144 | #[test] 145 | fn test_retry_holds_at_max() { 146 | let base = Duration::from_secs(20); 147 | let max = Duration::from_secs(30); 148 | 149 | let mut retry = BackoffRetry::new(base, max, 2, false); 150 | let start = Instant::now(); 151 | retry.reset(start); 152 | 153 | let time = start.add(Duration::from_secs(20)); 154 | let delay = retry.next_delay(time); 155 | assert_eq!(delay, base); 156 | 157 | let time = time.add(Duration::from_secs(20)); 158 | let delay = retry.next_delay(time); 159 | assert_eq!(delay, max); 160 | 161 | let time = time.add(Duration::from_secs(20)); 162 | let delay = retry.next_delay(time); 163 | assert_eq!(delay, max); 164 | } 165 | 166 | #[test] 167 | fn test_reset_interval() { 168 | let base = Duration::from_secs(10); 169 | let max = Duration::from_secs(60); 170 | let reset_interval = Duration::from_secs(45); 171 | 172 | // Prepare a retry strategy that has succeeded at a specific point. 173 | let mut retry = BackoffRetry::new(base, max, 2, false); 174 | retry.reset_interval = reset_interval; 175 | let start = Instant::now() - Duration::from_secs(60); 176 | retry.reset(start); 177 | 178 | // Verify that calculating the next delay returns as expected 179 | let time = start.add(Duration::from_secs(1)); 180 | let delay = retry.next_delay(time); 181 | assert_eq!(delay, base); 182 | 183 | // Verify resetting the last known good time doesn't change the retry policy since it hasn't 184 | // exceeded the retry interval. 185 | let time = time.add(delay); 186 | retry.reset(time); 187 | 188 | let time = time.add(Duration::from_secs(10)); 189 | let delay = retry.next_delay(time); 190 | assert_eq!(delay, base * 2); 191 | 192 | // And finally check that if we exceed the reset interval, the retry strategy will default 193 | // back to base. 194 | let time = time.add(delay); 195 | retry.reset(time); 196 | 197 | let time = time.add(reset_interval); 198 | let delay = retry.next_delay(time); 199 | assert_eq!(delay, base); 200 | } 201 | } 202 | -------------------------------------------------------------------------------- /eventsource-client/test-data/.gitattributes: -------------------------------------------------------------------------------- 1 | big-event-followed-by-another.sse -diff 2 | -------------------------------------------------------------------------------- /eventsource-client/test-data/one-event-crlf.sse: -------------------------------------------------------------------------------- 1 | event:patch 2 | data:{"data":{"clientSide":true,"clientSideAvailability":{"usingEnvironmentId":true,"usingMobileKey":true},"debugEventsUntilDate":null,"deleted":false,"fallthrough":{"variation":0},"key":"goals.02.featureWithGoals","offVariation":null,"on":false,"prerequisites":[],"rules":[],"salt":"e51f55941194456da34ce93dd39badb1","sel":"a4806819a2b640d3a275b83606956bc4","targets":[],"trackEvents":true,"trackEventsFallthrough":false,"variations":[true,false],"version":52},"path":"/flags/goals.02.featureWithGoals"} 3 | 4 | -------------------------------------------------------------------------------- /eventsource-client/test-data/one-event.sse: -------------------------------------------------------------------------------- 1 | event:patch 2 | data:{"data":{"clientSide":true,"clientSideAvailability":{"usingEnvironmentId":true,"usingMobileKey":true},"debugEventsUntilDate":null,"deleted":false,"fallthrough":{"variation":0},"key":"goals.02.featureWithGoals","offVariation":null,"on":false,"prerequisites":[],"rules":[],"salt":"e51f55941194456da34ce93dd39badb1","sel":"a4806819a2b640d3a275b83606956bc4","targets":[],"trackEvents":true,"trackEventsFallthrough":false,"variations":[true,false],"version":52},"path":"/flags/goals.02.featureWithGoals"} 3 | 4 | -------------------------------------------------------------------------------- /eventsource-client/test-data/persistent-event-id.sse: -------------------------------------------------------------------------------- 1 | event:one 2 | id: 1 3 | data:One 4 | 5 | event:two 6 | data:Two 7 | 8 | event:three 9 | id: 3 10 | data:Three 11 | 12 | event:four 13 | data:Four 14 | 15 | -------------------------------------------------------------------------------- /eventsource-client/test-data/two-events-crlf.sse: -------------------------------------------------------------------------------- 1 | event:one 2 | data:One 3 | 4 | event:two 5 | data:Two 6 | 7 | -------------------------------------------------------------------------------- /eventsource-client/test-data/two-events.sse: -------------------------------------------------------------------------------- 1 | event:one 2 | data:One 3 | 4 | event:two 5 | data:Two 6 | 7 | -------------------------------------------------------------------------------- /release-please-config.json: -------------------------------------------------------------------------------- 1 | { 2 | "release-type": "rust", 3 | "bump-minor-pre-major": true, 4 | "versioning": "default", 5 | "include-component-in-tag": false, 6 | "include-v-in-tag": false, 7 | "packages": { 8 | "eventsource-client": {} 9 | } 10 | } 11 | --------------------------------------------------------------------------------