├── CODEOWNERS ├── rust-toolchain.toml ├── assets ├── s2-black.png └── s2-white.png ├── .gitmodules ├── clippy.toml ├── .rustfmt.toml ├── .gitignore ├── examples ├── delete_stream.rs ├── delete_basin.rs ├── list_streams.rs ├── get_latest_record.rs ├── create_stream.rs ├── list_all_basins.rs ├── explicit_trim.rs ├── consumer.rs ├── issue_access_token.rs ├── reconfigure_stream.rs ├── reconfigure_basin.rs ├── create_basin.rs └── producer.rs ├── .github └── workflows │ ├── release.yml │ ├── update_protos.yaml │ └── ci.yml ├── Cargo.toml ├── src ├── lib.rs ├── service.rs ├── service │ ├── basin.rs │ ├── account.rs │ └── stream.rs ├── batching.rs ├── append_session.rs └── client.rs ├── cliff.toml ├── README.md ├── LICENSE └── CHANGELOG.md /CODEOWNERS: -------------------------------------------------------------------------------- 1 | * @s2-streamstore/dev 2 | -------------------------------------------------------------------------------- /rust-toolchain.toml: -------------------------------------------------------------------------------- 1 | [toolchain] 2 | channel = "stable" 3 | -------------------------------------------------------------------------------- /assets/s2-black.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/s2-streamstore/s2-sdk-rust/HEAD/assets/s2-black.png -------------------------------------------------------------------------------- /assets/s2-white.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/s2-streamstore/s2-sdk-rust/HEAD/assets/s2-white.png -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "proto"] 2 | path = proto 3 | url = https://github.com/s2-streamstore/s2-protos 4 | branch = main 5 | -------------------------------------------------------------------------------- /clippy.toml: -------------------------------------------------------------------------------- 1 | # TODO: we can remove this override once tonic starts internally boxing, https://github.com/hyperium/tonic/issues/2253 2 | large-error-threshold = 256 3 | -------------------------------------------------------------------------------- /.rustfmt.toml: -------------------------------------------------------------------------------- 1 | max_width = 100 2 | 3 | group_imports = "StdExternalCrate" 4 | imports_granularity = "Crate" 5 | imports_layout = "Mixed" 6 | 7 | comment_width = 100 8 | wrap_comments = true 9 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Generated by Cargo 2 | # will have compiled files and executables 3 | debug/ 4 | target/ 5 | 6 | # These are backup files generated by rustfmt 7 | **/*.rs.bk 8 | 9 | # MSVC Windows builds of rustc generate these, which store debugging information 10 | *.pdb 11 | 12 | # IDE Specific configurations 13 | .idea/ 14 | .helix/ 15 | -------------------------------------------------------------------------------- /examples/delete_stream.rs: -------------------------------------------------------------------------------- 1 | use s2::{ 2 | client::{BasinClient, ClientConfig}, 3 | types::{BasinName, DeleteStreamRequest}, 4 | }; 5 | 6 | #[tokio::main] 7 | async fn main() -> Result<(), Box> { 8 | let token = std::env::var("S2_ACCESS_TOKEN")?; 9 | let config = ClientConfig::new(token); 10 | let basin: BasinName = "my-favorite-basin".parse()?; 11 | let basin_client = BasinClient::new(config, basin); 12 | 13 | let stream = "my-favorite-stream"; 14 | 15 | let delete_stream_request = DeleteStreamRequest::new(stream); 16 | 17 | basin_client.delete_stream(delete_stream_request).await?; 18 | 19 | Ok(()) 20 | } 21 | -------------------------------------------------------------------------------- /examples/delete_basin.rs: -------------------------------------------------------------------------------- 1 | use s2::{ 2 | client::{Client, ClientConfig}, 3 | types::{BasinName, DeleteBasinRequest}, 4 | }; 5 | 6 | #[tokio::main] 7 | async fn main() -> Result<(), Box> { 8 | let token = std::env::var("S2_ACCESS_TOKEN")?; 9 | let config = ClientConfig::new(token); 10 | let client = Client::new(config); 11 | 12 | let basin: BasinName = "my-favorite-basin".parse()?; 13 | 14 | let delete_basin_request = DeleteBasinRequest::new(basin) 15 | // Don't error if the basin doesn't exist. 16 | .with_if_exists(true); 17 | 18 | client.delete_basin(delete_basin_request).await?; 19 | 20 | Ok(()) 21 | } 22 | -------------------------------------------------------------------------------- /examples/list_streams.rs: -------------------------------------------------------------------------------- 1 | use s2::{ 2 | client::{BasinClient, ClientConfig}, 3 | types::{BasinName, ListStreamsRequest}, 4 | }; 5 | 6 | #[tokio::main] 7 | async fn main() -> Result<(), Box> { 8 | let token = std::env::var("S2_ACCESS_TOKEN")?; 9 | let config = ClientConfig::new(token); 10 | let basin: BasinName = "my-favorite-basin".parse()?; 11 | let basin_client = BasinClient::new(config, basin); 12 | 13 | let prefix = "my-"; 14 | let list_streams_request = ListStreamsRequest::new().with_prefix(prefix); 15 | 16 | let list_streams_response = basin_client.list_streams(list_streams_request).await?; 17 | 18 | println!("{list_streams_response:#?}"); 19 | 20 | Ok(()) 21 | } 22 | -------------------------------------------------------------------------------- /examples/get_latest_record.rs: -------------------------------------------------------------------------------- 1 | use s2::{ 2 | client::{ClientConfig, StreamClient}, 3 | types::{BasinName, ReadLimit, ReadRequest, ReadStart}, 4 | }; 5 | 6 | #[tokio::main] 7 | async fn main() -> Result<(), Box> { 8 | let token = std::env::var("S2_ACCESS_TOKEN")?; 9 | let config = ClientConfig::new(token); 10 | let basin: BasinName = "my-favorite-basin".parse()?; 11 | let stream = "my-favorite-stream"; 12 | let stream_client = StreamClient::new(config, basin, stream); 13 | 14 | let read_limit = ReadLimit::new().with_count(1); 15 | let read_request = ReadRequest::new(ReadStart::TailOffset(1)).with_limit(read_limit); 16 | let latest_record = stream_client.read(read_request).await?; 17 | 18 | println!("{latest_record:#?}"); 19 | 20 | Ok(()) 21 | } 22 | -------------------------------------------------------------------------------- /examples/create_stream.rs: -------------------------------------------------------------------------------- 1 | use s2::{ 2 | client::{Client, ClientConfig}, 3 | types::{BasinName, CreateStreamRequest, StorageClass, StreamConfig}, 4 | }; 5 | 6 | #[tokio::main] 7 | async fn main() -> Result<(), Box> { 8 | let token = std::env::var("S2_ACCESS_TOKEN")?; 9 | let config = ClientConfig::new(token); 10 | let client = Client::new(config); 11 | 12 | let basin: BasinName = "my-favorite-basin".parse()?; 13 | let basin_client = client.basin_client(basin); 14 | 15 | let stream = "my-favorite-stream"; 16 | 17 | let stream_config = StreamConfig::new().with_storage_class(StorageClass::Express); 18 | 19 | let create_stream_request = CreateStreamRequest::new(stream).with_config(stream_config); 20 | 21 | let created_stream = basin_client.create_stream(create_stream_request).await?; 22 | println!("{created_stream:#?}"); 23 | 24 | let stream_config = basin_client.get_stream_config(stream).await?; 25 | println!("{stream_config:#?}"); 26 | 27 | Ok(()) 28 | } 29 | -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | name: release 2 | on: 3 | push: 4 | tags: ["[0-9]+.[0-9]+.[0-9]+*"] 5 | workflow_dispatch: 6 | jobs: 7 | create_release: 8 | runs-on: ubuntu-latest 9 | permissions: 10 | contents: write 11 | steps: 12 | - name: checkout 13 | uses: actions/checkout@v4 14 | - name: version 15 | id: version 16 | uses: SebRollen/toml-action@v1.2.0 17 | with: 18 | file: Cargo.toml 19 | field: package.version 20 | - uses: mindsers/changelog-reader-action@v2 21 | id: changelog_reader 22 | with: 23 | version: ${{ steps.version.outputs.value }} 24 | - name: install rust 25 | uses: dtolnay/rust-toolchain@stable 26 | - name: publish to crates.io 27 | run: cargo publish --token ${{ secrets.CRATES_IO_TOKEN }} 28 | - name: create release 29 | uses: softprops/action-gh-release@v2 30 | with: 31 | name: ${{ steps.version.outputs.value }} 32 | body: ${{ steps.changelog_reader.outputs.changes }} 33 | -------------------------------------------------------------------------------- /examples/list_all_basins.rs: -------------------------------------------------------------------------------- 1 | use s2::{ 2 | client::{Client, ClientConfig}, 3 | types::ListBasinsRequest, 4 | }; 5 | 6 | #[tokio::main] 7 | async fn main() -> Result<(), Box> { 8 | let token = std::env::var("S2_ACCESS_TOKEN")?; 9 | let config = ClientConfig::new(token); 10 | let client = Client::new(config); 11 | 12 | let mut all_basins = Vec::new(); 13 | 14 | let mut has_more = true; 15 | let mut start_after: Option = None; 16 | 17 | while has_more { 18 | let mut list_basins_request = ListBasinsRequest::new(); 19 | if let Some(start_after) = start_after.take() { 20 | list_basins_request = list_basins_request.with_start_after(start_after); 21 | } 22 | 23 | let list_basins_response = client.list_basins(list_basins_request).await?; 24 | 25 | all_basins.extend(list_basins_response.basins); 26 | 27 | start_after = all_basins.last().map(|b| b.name.clone()); 28 | has_more = list_basins_response.has_more; 29 | } 30 | 31 | println!("{all_basins:#?}"); 32 | 33 | Ok(()) 34 | } 35 | -------------------------------------------------------------------------------- /examples/explicit_trim.rs: -------------------------------------------------------------------------------- 1 | use s2::{ 2 | client::{ClientConfig, StreamClient}, 3 | types::{AppendInput, AppendRecordBatch, BasinName, CommandRecord}, 4 | }; 5 | 6 | #[tokio::main] 7 | async fn main() -> Result<(), Box> { 8 | let token = std::env::var("S2_ACCESS_TOKEN")?; 9 | let config = ClientConfig::new(token); 10 | let basin: BasinName = "my-favorite-basin".parse()?; 11 | let stream = "my-favorite-stream"; 12 | let stream_client = StreamClient::new(config, basin, stream); 13 | 14 | let tail = stream_client.check_tail().await?; 15 | if tail.seq_num == 0 { 16 | println!("Empty stream"); 17 | return Ok(()); 18 | } 19 | 20 | let latest_seq_num = tail.seq_num - 1; 21 | let trim_request = CommandRecord::trim(latest_seq_num); 22 | 23 | let append_record_batch = AppendRecordBatch::try_from_iter([trim_request]) 24 | .expect("valid batch with 1 command record"); 25 | let append_input = AppendInput::new(append_record_batch); 26 | let _ = stream_client.append(append_input).await?; 27 | 28 | println!("Trim requested"); 29 | 30 | Ok(()) 31 | } 32 | -------------------------------------------------------------------------------- /examples/consumer.rs: -------------------------------------------------------------------------------- 1 | use futures::StreamExt; 2 | use s2::{ 3 | client::{ClientConfig, StreamClient}, 4 | types::{BasinName, ReadSessionRequest, ReadStart}, 5 | }; 6 | use tokio::select; 7 | 8 | #[tokio::main] 9 | async fn main() -> Result<(), Box> { 10 | let token = std::env::var("S2_ACCESS_TOKEN")?; 11 | let config = ClientConfig::new(token); 12 | let basin: BasinName = "my-favorite-basin".parse()?; 13 | let stream = "my-favorite-stream"; 14 | let stream_client = StreamClient::new(config, basin, stream); 15 | 16 | let start_seq_num = 0; 17 | let read_session_request = ReadSessionRequest::new(ReadStart::SeqNum(start_seq_num)); 18 | let mut read_stream = stream_client.read_session(read_session_request).await?; 19 | 20 | loop { 21 | select! { 22 | next_batch = read_stream.next() => { 23 | let Some(next_batch) = next_batch else { break }; 24 | let next_batch = next_batch?; 25 | println!("{next_batch:?}"); 26 | } 27 | _ = tokio::signal::ctrl_c() => break, 28 | } 29 | } 30 | 31 | Ok(()) 32 | } 33 | -------------------------------------------------------------------------------- /examples/issue_access_token.rs: -------------------------------------------------------------------------------- 1 | use s2::{ 2 | client::{Client, ClientConfig}, 3 | types::{ 4 | AccessTokenId, AccessTokenInfo, AccessTokenScope, Operation, PermittedOperationGroups, 5 | ReadWritePermissions, ResourceSet, 6 | }, 7 | }; 8 | 9 | #[tokio::main] 10 | async fn main() -> Result<(), Box> { 11 | let token = std::env::var("S2_ACCESS_TOKEN")?; 12 | let config = ClientConfig::new(token); 13 | let client = Client::new(config); 14 | 15 | let access_token_id: AccessTokenId = "my-access-token".parse()?; 16 | let access_token_info = AccessTokenInfo::new(access_token_id).with_scope( 17 | AccessTokenScope::new() 18 | .with_op_groups( 19 | PermittedOperationGroups::new() 20 | .with_account(ReadWritePermissions::new().with_read(true)), 21 | ) 22 | .with_op(Operation::CreateStream) 23 | .with_streams(ResourceSet::Prefix("my-stream-prefix".to_string())) 24 | .with_basins(ResourceSet::Exact("my-perfect-basin".to_string())), 25 | ); 26 | let token = client.issue_access_token(access_token_info).await?; 27 | 28 | println!("Access token: {token}"); 29 | 30 | Ok(()) 31 | } 32 | -------------------------------------------------------------------------------- /examples/reconfigure_stream.rs: -------------------------------------------------------------------------------- 1 | use std::time::Duration; 2 | 3 | use s2::{ 4 | client::{BasinClient, ClientConfig}, 5 | types::{BasinName, ReconfigureStreamRequest, RetentionPolicy, StreamConfig}, 6 | }; 7 | 8 | #[tokio::main] 9 | async fn main() -> Result<(), Box> { 10 | let token = std::env::var("S2_ACCESS_TOKEN")?; 11 | let config = ClientConfig::new(token); 12 | let basin: BasinName = "my-favorite-basin".parse()?; 13 | let basin_client = BasinClient::new(config, basin); 14 | 15 | let stream = "my-favorite-stream"; 16 | 17 | let stream_config_updates = StreamConfig::new().with_retention_policy(RetentionPolicy::Age( 18 | // Change to retention policy to 1 day 19 | Duration::from_secs(24 * 60 * 60), 20 | )); 21 | 22 | let reconfigure_stream_request = ReconfigureStreamRequest::new(stream) 23 | .with_config(stream_config_updates) 24 | // Field mask specifies which fields to update. 25 | .with_mask(vec!["retention_policy".to_string()]); 26 | 27 | let updated_stream_config = basin_client 28 | .reconfigure_stream(reconfigure_stream_request) 29 | .await?; 30 | 31 | println!("{updated_stream_config:#?}"); 32 | 33 | Ok(()) 34 | } 35 | -------------------------------------------------------------------------------- /examples/reconfigure_basin.rs: -------------------------------------------------------------------------------- 1 | use s2::{ 2 | client::{Client, ClientConfig}, 3 | types::{BasinConfig, BasinName, ReconfigureBasinRequest, StorageClass, StreamConfig}, 4 | }; 5 | 6 | #[tokio::main] 7 | async fn main() -> Result<(), Box> { 8 | let token = std::env::var("S2_ACCESS_TOKEN")?; 9 | let config = ClientConfig::new(token); 10 | let client = Client::new(config); 11 | 12 | let basin: BasinName = "my-favorite-basin".parse()?; 13 | 14 | let default_stream_config_updates = 15 | StreamConfig::new().with_storage_class(StorageClass::Standard); 16 | let basin_config_updates = BasinConfig::new() 17 | .with_default_stream_config(default_stream_config_updates) 18 | .with_create_stream_on_append(true) 19 | .with_create_stream_on_read(true); 20 | 21 | let reconfigure_basin_request = ReconfigureBasinRequest::new(basin) 22 | .with_config(basin_config_updates) 23 | // Field mask specifies which fields to update. 24 | .with_mask(vec!["default_stream_config.retention_policy".to_string()]); 25 | 26 | let updated_basin_config = client.reconfigure_basin(reconfigure_basin_request).await?; 27 | 28 | println!("{updated_basin_config:#?}"); 29 | 30 | Ok(()) 31 | } 32 | -------------------------------------------------------------------------------- /examples/create_basin.rs: -------------------------------------------------------------------------------- 1 | use std::time::Duration; 2 | 3 | use s2::{ 4 | client::{Client, ClientConfig}, 5 | types::{BasinConfig, BasinName, CreateBasinRequest, RetentionPolicy, StreamConfig}, 6 | }; 7 | 8 | #[tokio::main] 9 | async fn main() -> Result<(), Box> { 10 | let token = std::env::var("S2_ACCESS_TOKEN")?; 11 | let config = ClientConfig::new(token); 12 | let client = Client::new(config); 13 | 14 | let basin: BasinName = "my-favorite-basin".parse()?; 15 | 16 | let default_stream_config = StreamConfig::new().with_retention_policy(RetentionPolicy::Age( 17 | // Set the default retention age to 10 days. 18 | Duration::from_secs(10 * 24 * 60 * 60), 19 | )); 20 | 21 | let basin_config = BasinConfig::new() 22 | .with_default_stream_config(default_stream_config) 23 | .with_create_stream_on_append(false) 24 | .with_create_stream_on_read(false); 25 | 26 | let create_basin_request = CreateBasinRequest::new(basin.clone()).with_config(basin_config); 27 | 28 | let created_basin = client.create_basin(create_basin_request).await?; 29 | println!("{created_basin:#?}"); 30 | 31 | let basin_config = client.get_basin_config(basin).await?; 32 | println!("{basin_config:#?}"); 33 | 34 | Ok(()) 35 | } 36 | -------------------------------------------------------------------------------- /.github/workflows/update_protos.yaml: -------------------------------------------------------------------------------- 1 | name: Update protos 2 | 3 | permissions: 4 | contents: write 5 | pull-requests: write 6 | 7 | on: 8 | repository_dispatch: 9 | types: [s2-proto-update] 10 | 11 | jobs: 12 | generate-docs: 13 | runs-on: ubuntu-latest 14 | 15 | steps: 16 | 17 | - uses: actions/checkout@v4 18 | with: 19 | submodules: true 20 | 21 | - name: Update Submodules 22 | id: submodules 23 | uses: sgoudham/update-git-submodules@v2.1.1 24 | with: 25 | submodules: proto 26 | 27 | - name: install rust 28 | uses: dtolnay/rust-toolchain@stable 29 | with: 30 | toolchain: stable nightly 31 | components: rustfmt, clippy 32 | - name: install protoc 33 | uses: arduino/setup-protoc@v3 34 | - uses: Swatinem/rust-cache@v2 35 | - name: Install cargo-sort 36 | uses: baptiste0928/cargo-install@v3 37 | with: 38 | crate: cargo-sort 39 | version: "^1.0" 40 | - name: build protos 41 | run: | 42 | cargo build --features prost-build 43 | 44 | - name: Create Pull Request 45 | if: ${{ steps.submodules.outputs['proto--updated'] }} 46 | uses: peter-evans/create-pull-request@v7 47 | with: 48 | committer: s2-helper[bot] <194906454+s2-helper[bot]@users.noreply.github.com> 49 | author: s2-helper[bot] <194906454+s2-helper[bot]@users.noreply.github.com> 50 | title: chore - proto update 51 | branch: "proto/update-s2-protos-${{ steps.submodules.outputs['proto--latestShortCommitSha'] }}" 52 | body: ${{ steps.submodules.outputs.prBody }} 53 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "streamstore" 3 | description = "Rust SDK for S2" 4 | version = "0.21.0" 5 | edition = "2024" 6 | license = "Apache-2.0" 7 | keywords = ["wal", "grpc", "s2", "log", "stream"] 8 | repository = "https://github.com/s2-streamstore/s2-sdk-rust" 9 | homepage = "https://github.com/s2-streamstore/s2-sdk-rust" 10 | 11 | [lib] 12 | name = "s2" 13 | path = "src/lib.rs" 14 | 15 | [package.metadata.docs.rs] 16 | features = ["connector"] 17 | cargo-args = ["-Zunstable-options", "-Zrustdoc-scrape-examples"] 18 | 19 | [[example]] 20 | # `doc-scrape-examples` requires *any* one example to specify the option. 21 | name = "create_basin" 22 | doc-scrape-examples = true 23 | 24 | [dependencies] 25 | async-stream = "0.3.6" 26 | backon = "1.5.2" 27 | bytes = "1.10.1" 28 | futures = "0.3.31" 29 | http = "1.3.1" 30 | hyper = "1.7.0" 31 | hyper-util = "0.1.16" 32 | prost = "0.14.1" 33 | prost-types = "0.14.1" 34 | rand = "0.9.2" 35 | regex = "1.11.2" 36 | secrecy = "0.10.3" 37 | sync_docs = "0.2.0" 38 | thiserror = "2.0.16" 39 | tokio = { version = "1.47.1", features = ["time"] } 40 | tokio-muxt = "0.6.0" 41 | tokio-stream = "0.1.17" 42 | tonic = { version = "0.14.2", features = [ 43 | "tls-aws-lc", 44 | "tls-webpki-roots", 45 | "zstd", 46 | ] } 47 | tonic-prost = "0.14.2" 48 | tonic-side-effect = "0.4.0" 49 | tower-service = "0.3.3" 50 | tracing = "0.1.41" 51 | uuid = { version = "1.18.1", features = ["v4", "fast-rng"] } 52 | 53 | [build-dependencies] 54 | tonic-build = { version = "0.14.2", optional = true } 55 | tonic-prost-build = { version = "0.14.2", optional = true } 56 | 57 | [dev-dependencies] 58 | rstest = "0.26.1" 59 | tokio = { version = "1.47.1", features = ["full", "test-util"] } 60 | tokio-stream = "0.1.17" 61 | 62 | [features] 63 | connector = [] 64 | prost-build = ["dep:tonic-build", "dep:tonic-prost-build"] 65 | 66 | [lints.clippy] 67 | unused_async = "deny" 68 | -------------------------------------------------------------------------------- /examples/producer.rs: -------------------------------------------------------------------------------- 1 | use futures::StreamExt; 2 | use s2::{ 3 | batching::{AppendRecordsBatchingOpts, AppendRecordsBatchingStream}, 4 | client::{ClientConfig, StreamClient}, 5 | types::{AppendInput, AppendRecord, AppendRecordBatch, BasinName, CommandRecord, FencingToken}, 6 | }; 7 | 8 | #[tokio::main] 9 | async fn main() -> Result<(), Box> { 10 | let token = std::env::var("S2_ACCESS_TOKEN")?; 11 | let config = ClientConfig::new(token); 12 | let basin: BasinName = "my-favorite-basin".parse()?; 13 | let stream = "my-favorite-stream"; 14 | let stream_client = StreamClient::new(config, basin, stream); 15 | 16 | let fencing_token = FencingToken::generate(32).expect("valid fencing token"); 17 | 18 | // Set the fencing token. 19 | let fencing_token_record: AppendRecord = CommandRecord::fence(fencing_token.clone()).into(); 20 | let fencing_token_batch = AppendRecordBatch::try_from_iter([fencing_token_record]) 21 | .expect("valid batch with 1 append record"); 22 | let fencing_token_append_input = AppendInput::new(fencing_token_batch); 23 | let set_fencing_token = stream_client.append(fencing_token_append_input).await?; 24 | 25 | let match_seq_num = set_fencing_token.tail.seq_num; 26 | 27 | // Stream of records 28 | let append_stream = futures::stream::iter([ 29 | AppendRecord::new("record_1")?, 30 | AppendRecord::new("record_2")?, 31 | ]); 32 | 33 | let append_records_batching_opts = AppendRecordsBatchingOpts::new() 34 | .with_fencing_token(Some(fencing_token)) 35 | .with_match_seq_num(Some(match_seq_num)); 36 | 37 | let append_session_request = 38 | AppendRecordsBatchingStream::new(append_stream, append_records_batching_opts); 39 | 40 | let mut append_session_stream = stream_client.append_session(append_session_request).await?; 41 | 42 | while let Some(next) = append_session_stream.next().await { 43 | let next = next?; 44 | println!("{next:#?}"); 45 | } 46 | 47 | Ok(()) 48 | } 49 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | permissions: 3 | contents: read 4 | on: 5 | pull_request: 6 | push: 7 | branches: 8 | - main 9 | env: 10 | RUST_BACKTRACE: 1 11 | CARGO_TERM_COLOR: always 12 | CLICOLOR: 1 13 | CARGO_INCREMENTAL: 0 14 | CARGO_NET_GIT_FETCH_WITH_CLI: true 15 | concurrency: 16 | group: "${{ github.workflow }}-${{ github.ref }}" 17 | cancel-in-progress: true 18 | jobs: 19 | ci: 20 | permissions: 21 | contents: none 22 | name: CI 23 | needs: [test, lint] 24 | runs-on: ubuntu-latest 25 | if: always() 26 | steps: 27 | - name: Failed 28 | run: exit 1 29 | if: contains(needs.*.result, 'failure') || contains(needs.*.result, 'cancelled') || contains(needs.*.result, 'skipped') 30 | test: 31 | runs-on: ubuntu-latest 32 | steps: 33 | - name: checkout 34 | uses: actions/checkout@v4 35 | with: 36 | submodules: true 37 | - name: install rust 38 | uses: dtolnay/rust-toolchain@stable 39 | with: 40 | toolchain: stable 41 | components: rustfmt, clippy 42 | - name: install protoc 43 | uses: arduino/setup-protoc@v3 44 | with: 45 | repo-token: ${{ secrets.GITHUB_TOKEN }} 46 | - uses: Swatinem/rust-cache@v2 47 | - name: Install nextest 48 | uses: taiki-e/install-action@nextest 49 | - name: Run cargo tests 50 | run: cargo nextest run 51 | lint: 52 | runs-on: ubuntu-latest 53 | steps: 54 | - name: checkout 55 | uses: actions/checkout@v4 56 | with: 57 | submodules: true 58 | - name: install rust 59 | uses: dtolnay/rust-toolchain@stable 60 | with: 61 | toolchain: stable nightly 62 | components: rustfmt, clippy 63 | - name: install protoc 64 | uses: arduino/setup-protoc@v3 65 | with: 66 | repo-token: ${{ secrets.GITHUB_TOKEN }} 67 | - uses: Swatinem/rust-cache@v2 68 | - name: Install cargo-sort 69 | uses: baptiste0928/cargo-install@v3 70 | with: 71 | crate: cargo-sort 72 | version: "^1.0" 73 | - name: check proto build up-to-date 74 | run: | 75 | cargo build --features prost-build 76 | git diff --exit-code src/api.rs && echo "Up-to-date" || { 77 | echo "Not up-to-date" 78 | exit 1 79 | } 80 | - name: check documentation 81 | env: 82 | RUSTDOCFLAGS: -D warnings 83 | run: cargo doc --workspace --all-features --no-deps --document-private-items 84 | - name: check formatting 85 | run: cargo +nightly fmt --all -- --check 86 | - name: check clippy 87 | run: cargo clippy --workspace --all-features --all-targets -- -D warnings --allow deprecated 88 | - name: check Cargo.toml sorting 89 | run: cargo sort --workspace --check 90 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | /*! 2 | Rust SDK for S2. 3 | 4 | The Rust SDK provides ergonomic wrappers and utilities to interact with the 5 | [S2 API](https://s2.dev/docs/interface/grpc). 6 | 7 | # Getting started 8 | 9 | 1. Ensure you have `tokio` added as a dependency. The SDK relies on [Tokio](https://crates.io/crates/tokio) 10 | for executing async code. 11 | 12 | ```bash 13 | cargo add tokio --features full 14 | ``` 15 | 16 | 1. Add the `streamstore` dependency to your project: 17 | 18 | ```bash 19 | cargo add streamstore 20 | ``` 21 | 22 | 1. Generate an authentication token by logging onto the web console at [s2.dev](https://s2.dev/dashboard). 23 | 24 | 1. Make a request using SDK client. 25 | 26 | ```no_run 27 | # let _ = async move { 28 | let config = s2::ClientConfig::new(""); 29 | let client = s2::Client::new(config); 30 | 31 | let basins = client.list_basins(Default::default()).await?; 32 | println!("My basins: {:?}", basins); 33 | # return Ok::<(), s2::client::ClientError>(()); }; 34 | ``` 35 | 36 | See documentation for the [`client`] module for more information on how to 37 | use the client, and what requests can be made. 38 | 39 | # Examples 40 | 41 | We have curated a bunch of examples in the 42 | [SDK repository](https://github.com/s2-streamstore/s2-sdk-rust/tree/main/examples) 43 | demonstrating how to use the SDK effectively: 44 | 45 | * [List all basins](https://github.com/s2-streamstore/s2-sdk-rust/blob/main/examples/list_all_basins.rs) 46 | * [Explicit stream trimming](https://github.com/s2-streamstore/s2-sdk-rust/blob/main/examples/explicit_trim.rs) 47 | * [Producer (with concurrency control)](https://github.com/s2-streamstore/s2-sdk-rust/blob/main/examples/producer.rs) 48 | * [Consumer](https://github.com/s2-streamstore/s2-sdk-rust/blob/main/examples/consumer.rs) 49 | * and many more... 50 | 51 | This documentation is generated using 52 | [`rustdoc-scrape-examples`](https://doc.rust-lang.org/rustdoc/scraped-examples.html), 53 | so you will be able to see snippets from examples right here in the 54 | documentation. 55 | 56 | # Feedback 57 | 58 | We use [Github Issues](https://github.com/s2-streamstore/s2-sdk-rust/issues) 59 | to track feature requests and issues with the SDK. If you wish to provide 60 | feedback, report a bug or request a feature, feel free to open a Github 61 | issue. 62 | 63 | # Quick Links 64 | 65 | * [S2 Website](https://s2.dev) 66 | * [S2 Documentation](https://s2.dev/docs) 67 | * [CHANGELOG](https://github.com/s2-streamstore/s2-sdk-rust/blob/main/CHANGELOG.md) 68 | */ 69 | 70 | #![doc( 71 | html_favicon_url = "https://raw.githubusercontent.com/s2-streamstore/s2-sdk-rust/main/assets/s2-black.png" 72 | )] 73 | #![doc( 74 | html_logo_url = "https://raw.githubusercontent.com/s2-streamstore/s2-sdk-rust/main/assets/s2-black.png" 75 | )] 76 | #![warn(missing_docs)] 77 | 78 | #[rustfmt::skip] 79 | mod api; 80 | 81 | mod append_session; 82 | mod service; 83 | 84 | pub mod batching; 85 | pub mod client; 86 | pub mod types; 87 | 88 | pub use client::{BasinClient, Client, ClientConfig, StreamClient}; 89 | pub use service::Streaming; 90 | -------------------------------------------------------------------------------- /cliff.toml: -------------------------------------------------------------------------------- 1 | # git-cliff ~ default configuration file 2 | # https://git-cliff.org/docs/configuration 3 | # 4 | # Lines starting with "#" are comments. 5 | # Configuration options are organized into tables and keys. 6 | # See documentation for more information on available options. 7 | 8 | [changelog] 9 | # template for the changelog header 10 | header = """ 11 | # Changelog\n 12 | All notable changes to this project will be documented in this file.\n 13 | """ 14 | # template for the changelog body 15 | # https://keats.github.io/tera/docs/#introduction 16 | body = """ 17 | {% if version %}\ 18 | ## [{{ version | trim_start_matches(pat="v") }}] - {{ timestamp | date(format="%Y-%m-%d") }} 19 | {% else %}\ 20 | ## [unreleased] 21 | {% endif %}\ 22 | {% for group, commits in commits | group_by(attribute="group") %} 23 | ### {{ group | striptags | trim | upper_first }} 24 | {% for commit in commits %} 25 | - {% if commit.scope %}*({{ commit.scope }})* {% endif %}\ 26 | {% if commit.breaking %}[**breaking**] {% endif %}\ 27 | {{ commit.message | upper_first }}\ 28 | {% endfor %} 29 | {% endfor %}\n 30 | """ 31 | # template for the changelog footer 32 | footer = """ 33 | 34 | """ 35 | # remove the leading and trailing s 36 | trim = true 37 | # postprocessors 38 | postprocessors = [ 39 | { pattern = '', replace = "https://github.com/s2-streamstore/s2-sdk-rust" }, # replace repository URL 40 | ] 41 | # render body even when there are no releases to process 42 | # render_always = true 43 | # output file path 44 | # output = "test.md" 45 | 46 | [git] 47 | # parse the commits based on https://www.conventionalcommits.org 48 | conventional_commits = true 49 | # filter out the commits that are not conventional 50 | filter_unconventional = true 51 | # process each line of a commit as an individual commit 52 | split_commits = false 53 | # regex for preprocessing the commit messages 54 | commit_preprocessors = [ 55 | # Replace issue numbers 56 | { pattern = '\((\w+\s)?#([0-9]+)\)', replace = "([#${2}](/issues/${2}))"}, 57 | # Check spelling of the commit with https://github.com/crate-ci/typos 58 | # If the spelling is incorrect, it will be automatically fixed. 59 | #{ pattern = '.*', replace_command = 'typos --write-changes -' }, 60 | ] 61 | # regex for parsing and grouping commits 62 | commit_parsers = [ 63 | { message = "^feat", group = " Features" }, 64 | { message = "^fix", group = " Bug Fixes" }, 65 | { message = "^doc", group = " Documentation" }, 66 | { message = "^perf", group = " Performance" }, 67 | { message = "^refactor", group = " Refactor" }, 68 | { message = "^style", group = " Styling" }, 69 | { message = "^test", group = " Testing" }, 70 | { message = "^chore\\(release\\): prepare for", skip = true }, 71 | { message = "^chore\\(deps.*\\)", skip = true }, 72 | { message = "^chore\\(pr\\)", skip = true }, 73 | { message = "^chore\\(pull\\)", skip = true }, 74 | { message = "^chore|^ci", group = " Miscellaneous Tasks" }, 75 | { body = ".*security", group = " Security" }, 76 | { message = "^revert", group = " Revert" }, 77 | ] 78 | # filter out the commits that are not matched by commit parsers 79 | filter_commits = false 80 | # sort the tags topologically 81 | topo_order = false 82 | # sort the commits inside sections by oldest/newest order 83 | sort_commits = "oldest" 84 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 |
2 |

3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 |

12 | 13 |

Rust SDK for S2

14 | 15 |

16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 |

27 |
28 | 29 | The Rust SDK provides ergonomic wrappers and utilities to interact with the 30 | [S2 API](https://s2.dev/docs/interface/grpc). 31 | 32 | ## Getting started 33 | 34 | 1. Ensure you have `tokio` added as a dependency. The SDK relies on 35 | [Tokio](https://crates.io/crates/tokio) for executing async code. 36 | ```bash 37 | cargo add tokio --features full 38 | ``` 39 | 40 | 1. Add the `streamstore` dependency to your project: 41 | ```bash 42 | cargo add streamstore 43 | ``` 44 | 45 | 1. Generate an access token by logging onto the web console at 46 | [s2.dev](https://s2.dev/dashboard). 47 | 48 | 1. Make a request using SDK client. 49 | ```rust 50 | #[tokio::main] 51 | async fn main() -> Result<(), Box> { 52 | let config = s2::ClientConfig::new(""); 53 | let client = s2::Client::new(config); 54 | 55 | let basins = client.list_basins(Default::default()).await?; 56 | println!("My basins: {:?}", basins); 57 | 58 | Ok(()) 59 | } 60 | ``` 61 | 62 | ## Examples 63 | 64 | The [`examples`](./examples) directory in this repository contains a variety of 65 | example use cases demonstrating how to use the SDK effectively. 66 | 67 | Run any example using the following command: 68 | 69 | ```bash 70 | export S2_ACCESS_TOKEN="" 71 | cargo run --example 72 | ``` 73 | 74 | > [!NOTE] 75 | > You might want to update the basin name in the example before running since 76 | > basin names are globally unique and each example uses the same basin name 77 | > (`"my-favorite-basin"`). 78 | 79 | ## SDK Docs and Reference 80 | 81 | Head over to [docs.rs](https://docs.rs/streamstore/latest/s2/) for 82 | detailed documentation and crate reference. 83 | 84 | ## Feedback 85 | 86 | We use [Github Issues](https://github.com/s2-streamstore/s2-sdk-rust/issues) to 87 | track feature requests and issues with the SDK. If you wish to provide feedback, 88 | report a bug or request a feature, feel free to open a Github issue. 89 | 90 | ### Contributing 91 | 92 | Developers are welcome to submit Pull Requests on the repository. If there is 93 | no tracking issue for the bug or feature request corresponding to the PR, we 94 | encourage you to open one for discussion before submitting the PR. 95 | 96 | ## Reach out to us 97 | 98 | Join our [Discord](https://discord.gg/vTCs7kMkAf) server. We would love to hear 99 | from you. 100 | 101 | You can also email us at [hi@s2.dev](mailto:hi@s2.dev). 102 | 103 | ## License 104 | 105 | This project is licensed under the [Apache-2.0 License](./LICENSE). 106 | -------------------------------------------------------------------------------- /src/service.rs: -------------------------------------------------------------------------------- 1 | pub mod account; 2 | pub mod basin; 3 | pub mod stream; 4 | 5 | use std::{ 6 | pin::Pin, 7 | task::{Context, Poll}, 8 | }; 9 | 10 | use futures::StreamExt; 11 | use prost_types::method_options::IdempotencyLevel; 12 | use secrecy::{ExposeSecret, SecretString}; 13 | use tonic::metadata::{AsciiMetadataKey, AsciiMetadataValue, MetadataMap}; 14 | 15 | use crate::{client::ClientError, types}; 16 | 17 | pub async fn send_request( 18 | mut service: T, 19 | token: &SecretString, 20 | basin_header: Option, 21 | ) -> Result { 22 | let req = prepare_request(&mut service, token, basin_header)?; 23 | match service.send(req).await { 24 | Ok(resp) => Ok(service.parse_response(resp)?), 25 | Err(status) => Err(ClientError::Service(status)), 26 | } 27 | } 28 | 29 | fn prepare_request( 30 | service: &mut T, 31 | token: &SecretString, 32 | basin_header: Option, 33 | ) -> Result, types::ConvertError> { 34 | let mut req = service.prepare_request()?; 35 | add_authorization_header(req.metadata_mut(), token)?; 36 | if let Some(basin) = basin_header { 37 | req.metadata_mut() 38 | .insert(AsciiMetadataKey::from_static("s2-basin"), basin); 39 | } 40 | Ok(req) 41 | } 42 | 43 | fn add_authorization_header( 44 | meta: &mut MetadataMap, 45 | token: &SecretString, 46 | ) -> Result<(), types::ConvertError> { 47 | let mut val: AsciiMetadataValue = format!("Bearer {}", token.expose_secret()) 48 | .try_into() 49 | .map_err(|_| "failed to parse token as metadata value")?; 50 | val.set_sensitive(true); 51 | meta.insert("authorization", val); 52 | Ok(()) 53 | } 54 | 55 | pub(crate) fn add_s2_request_token_header( 56 | meta: &mut MetadataMap, 57 | s2_request_token: &str, 58 | ) -> Result<(), types::ConvertError> { 59 | let s2_request_token: AsciiMetadataValue = s2_request_token 60 | .try_into() 61 | .map_err(|_| "failed to parse token as metadata value")?; 62 | 63 | meta.insert("s2-request-token", s2_request_token); 64 | 65 | Ok(()) 66 | } 67 | 68 | pub(crate) fn gen_s2_request_token() -> String { 69 | uuid::Uuid::new_v4().simple().to_string() 70 | } 71 | 72 | pub trait ServiceRequest: std::fmt::Debug { 73 | /// Request parameters generated by prost. 74 | type ApiRequest; 75 | /// Response to be returned by the RPC. 76 | type Response; 77 | /// Response generated by prost to be returned. 78 | type ApiResponse; 79 | 80 | /// Idempotency level for the underlying service. 81 | const IDEMPOTENCY_LEVEL: IdempotencyLevel; 82 | 83 | /// Take the request parameters and generate the corresponding tonic request. 84 | fn prepare_request(&mut self) -> Result, types::ConvertError>; 85 | 86 | /// Actually send the tonic request to receive a raw response and the parsed error. 87 | async fn send( 88 | &mut self, 89 | req: tonic::Request, 90 | ) -> Result, tonic::Status>; 91 | 92 | /// Return true if the request should be retried based on the error returned. 93 | fn should_retry(&self, err: &ClientError) -> bool { 94 | if Self::IDEMPOTENCY_LEVEL == IdempotencyLevel::IdempotencyUnknown { 95 | return false; 96 | }; 97 | 98 | // The request is definitely idempotent. 99 | if let ClientError::Service(status) = err { 100 | matches!( 101 | status.code(), 102 | tonic::Code::Unavailable 103 | | tonic::Code::DeadlineExceeded 104 | | tonic::Code::Cancelled 105 | | tonic::Code::Unknown 106 | | tonic::Code::ResourceExhausted 107 | ) 108 | } else { 109 | false 110 | } 111 | } 112 | 113 | /// Take the tonic response and generate the response to be returned. 114 | fn parse_response( 115 | &self, 116 | resp: tonic::Response, 117 | ) -> Result; 118 | } 119 | 120 | pub trait StreamingRequest: Unpin { 121 | type RequestItem; 122 | type ApiRequestItem; 123 | 124 | fn prepare_request_item(&self, req: Self::RequestItem) -> Self::ApiRequestItem; 125 | } 126 | 127 | pub struct ServiceStreamingRequest 128 | where 129 | R: StreamingRequest, 130 | S: futures::Stream + Unpin, 131 | { 132 | req: R, 133 | stream: S, 134 | } 135 | 136 | impl ServiceStreamingRequest 137 | where 138 | R: StreamingRequest, 139 | S: futures::Stream + Unpin, 140 | { 141 | pub fn new(req: R, stream: S) -> Self { 142 | Self { req, stream } 143 | } 144 | } 145 | 146 | impl futures::Stream for ServiceStreamingRequest 147 | where 148 | R: StreamingRequest, 149 | S: futures::Stream + Unpin, 150 | { 151 | type Item = R::ApiRequestItem; 152 | 153 | fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { 154 | match self.stream.poll_next_unpin(cx) { 155 | Poll::Pending => Poll::Pending, 156 | Poll::Ready(None) => Poll::Ready(None), 157 | Poll::Ready(Some(req)) => Poll::Ready(Some(self.req.prepare_request_item(req))), 158 | } 159 | } 160 | } 161 | 162 | pub trait StreamingResponse: Unpin { 163 | /// Response message item to be returned by the RPC stream. 164 | type ResponseItem; 165 | /// Response message item generated by prost in the stream. 166 | type ApiResponseItem; 167 | 168 | /// Take the tonic response message from stream item and generate stream item. 169 | fn parse_response_item( 170 | &self, 171 | resp: Self::ApiResponseItem, 172 | ) -> Result; 173 | } 174 | 175 | pub struct ServiceStreamingResponse { 176 | req: S, 177 | stream: tonic::Streaming, 178 | } 179 | 180 | impl ServiceStreamingResponse { 181 | pub fn new(req: S, stream: tonic::Streaming) -> Self { 182 | Self { req, stream } 183 | } 184 | } 185 | 186 | impl futures::Stream for ServiceStreamingResponse { 187 | type Item = Result; 188 | 189 | fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { 190 | match self.stream.poll_next_unpin(cx) { 191 | Poll::Pending => Poll::Pending, 192 | Poll::Ready(None) => Poll::Ready(None), 193 | Poll::Ready(Some(item)) => { 194 | let item = match item { 195 | Ok(resp) => self.req.parse_response_item(resp), 196 | Err(status) => Err(ClientError::Service(status)), 197 | }; 198 | Poll::Ready(Some(item)) 199 | } 200 | } 201 | } 202 | } 203 | 204 | /// Generic type for streaming response. 205 | pub type Streaming = Pin>>>; 206 | -------------------------------------------------------------------------------- /src/service/basin.rs: -------------------------------------------------------------------------------- 1 | use prost_types::method_options::IdempotencyLevel; 2 | use tonic::{IntoRequest, transport::Channel}; 3 | 4 | use super::{ServiceRequest, add_s2_request_token_header, gen_s2_request_token}; 5 | use crate::{ 6 | api::{self, basin_service_client::BasinServiceClient}, 7 | types, 8 | }; 9 | 10 | #[derive(Debug, Clone)] 11 | pub struct ListStreamsServiceRequest { 12 | client: BasinServiceClient, 13 | req: types::ListStreamsRequest, 14 | } 15 | 16 | impl ListStreamsServiceRequest { 17 | pub fn new(client: BasinServiceClient, req: types::ListStreamsRequest) -> Self { 18 | Self { client, req } 19 | } 20 | } 21 | 22 | impl ServiceRequest for ListStreamsServiceRequest { 23 | type ApiRequest = api::ListStreamsRequest; 24 | type Response = types::ListStreamsResponse; 25 | type ApiResponse = api::ListStreamsResponse; 26 | const IDEMPOTENCY_LEVEL: IdempotencyLevel = IdempotencyLevel::NoSideEffects; 27 | 28 | fn prepare_request(&mut self) -> Result, types::ConvertError> { 29 | let req: api::ListStreamsRequest = self.req.clone().try_into()?; 30 | Ok(req.into_request()) 31 | } 32 | 33 | async fn send( 34 | &mut self, 35 | req: tonic::Request, 36 | ) -> Result, tonic::Status> { 37 | self.client.list_streams(req).await 38 | } 39 | 40 | fn parse_response( 41 | &self, 42 | resp: tonic::Response, 43 | ) -> Result { 44 | Ok(resp.into_inner().into()) 45 | } 46 | } 47 | 48 | #[derive(Debug, Clone)] 49 | pub struct GetStreamConfigServiceRequest { 50 | client: BasinServiceClient, 51 | stream: String, 52 | } 53 | 54 | impl GetStreamConfigServiceRequest { 55 | pub fn new(client: BasinServiceClient, stream: impl Into) -> Self { 56 | Self { 57 | client, 58 | stream: stream.into(), 59 | } 60 | } 61 | } 62 | 63 | impl ServiceRequest for GetStreamConfigServiceRequest { 64 | type ApiRequest = api::GetStreamConfigRequest; 65 | type Response = types::StreamConfig; 66 | type ApiResponse = api::GetStreamConfigResponse; 67 | const IDEMPOTENCY_LEVEL: IdempotencyLevel = IdempotencyLevel::NoSideEffects; 68 | 69 | fn prepare_request(&mut self) -> Result, types::ConvertError> { 70 | let req = api::GetStreamConfigRequest { 71 | stream: self.stream.clone(), 72 | }; 73 | Ok(req.into_request()) 74 | } 75 | 76 | async fn send( 77 | &mut self, 78 | req: tonic::Request, 79 | ) -> Result, tonic::Status> { 80 | self.client.get_stream_config(req).await 81 | } 82 | 83 | fn parse_response( 84 | &self, 85 | resp: tonic::Response, 86 | ) -> Result { 87 | resp.into_inner().try_into() 88 | } 89 | } 90 | 91 | #[derive(Debug, Clone)] 92 | pub struct CreateStreamServiceRequest { 93 | client: BasinServiceClient, 94 | req: types::CreateStreamRequest, 95 | s2_request_token: String, 96 | } 97 | 98 | impl CreateStreamServiceRequest { 99 | pub fn new(client: BasinServiceClient, req: types::CreateStreamRequest) -> Self { 100 | Self { 101 | client, 102 | req, 103 | s2_request_token: gen_s2_request_token(), 104 | } 105 | } 106 | } 107 | 108 | impl ServiceRequest for CreateStreamServiceRequest { 109 | type ApiRequest = api::CreateStreamRequest; 110 | type Response = types::StreamInfo; 111 | type ApiResponse = api::CreateStreamResponse; 112 | const IDEMPOTENCY_LEVEL: IdempotencyLevel = IdempotencyLevel::Idempotent; 113 | 114 | fn prepare_request(&mut self) -> Result, types::ConvertError> { 115 | let req: api::CreateStreamRequest = self.req.clone().into(); 116 | let mut tonic_req = req.into_request(); 117 | add_s2_request_token_header(tonic_req.metadata_mut(), &self.s2_request_token)?; 118 | Ok(tonic_req) 119 | } 120 | 121 | async fn send( 122 | &mut self, 123 | req: tonic::Request, 124 | ) -> Result, tonic::Status> { 125 | self.client.create_stream(req).await 126 | } 127 | 128 | fn parse_response( 129 | &self, 130 | resp: tonic::Response, 131 | ) -> Result { 132 | resp.into_inner().try_into() 133 | } 134 | } 135 | 136 | #[derive(Debug, Clone)] 137 | pub struct DeleteStreamServiceRequest { 138 | client: BasinServiceClient, 139 | req: types::DeleteStreamRequest, 140 | } 141 | 142 | impl DeleteStreamServiceRequest { 143 | pub fn new(client: BasinServiceClient, req: types::DeleteStreamRequest) -> Self { 144 | Self { client, req } 145 | } 146 | } 147 | 148 | impl ServiceRequest for DeleteStreamServiceRequest { 149 | type ApiRequest = api::DeleteStreamRequest; 150 | type Response = (); 151 | type ApiResponse = api::DeleteStreamResponse; 152 | const IDEMPOTENCY_LEVEL: IdempotencyLevel = IdempotencyLevel::Idempotent; 153 | 154 | fn prepare_request(&mut self) -> Result, types::ConvertError> { 155 | let req: api::DeleteStreamRequest = self.req.clone().into(); 156 | Ok(req.into_request()) 157 | } 158 | 159 | async fn send( 160 | &mut self, 161 | req: tonic::Request, 162 | ) -> Result, tonic::Status> { 163 | match self.client.delete_stream(req).await { 164 | Err(status) if self.req.if_exists && status.code() == tonic::Code::NotFound => { 165 | Ok(tonic::Response::new(api::DeleteStreamResponse {})) 166 | } 167 | other => other, 168 | } 169 | } 170 | 171 | fn parse_response( 172 | &self, 173 | _resp: tonic::Response, 174 | ) -> Result { 175 | Ok(()) 176 | } 177 | } 178 | 179 | #[derive(Debug, Clone)] 180 | pub struct ReconfigureStreamServiceRequest { 181 | client: BasinServiceClient, 182 | req: types::ReconfigureStreamRequest, 183 | } 184 | 185 | impl ReconfigureStreamServiceRequest { 186 | pub fn new(client: BasinServiceClient, req: types::ReconfigureStreamRequest) -> Self { 187 | Self { client, req } 188 | } 189 | } 190 | 191 | impl ServiceRequest for ReconfigureStreamServiceRequest { 192 | type ApiRequest = api::ReconfigureStreamRequest; 193 | type Response = types::StreamConfig; 194 | type ApiResponse = api::ReconfigureStreamResponse; 195 | const IDEMPOTENCY_LEVEL: IdempotencyLevel = IdempotencyLevel::IdempotencyUnknown; 196 | 197 | fn prepare_request(&mut self) -> Result, types::ConvertError> { 198 | let req: api::ReconfigureStreamRequest = self.req.clone().into(); 199 | Ok(req.into_request()) 200 | } 201 | 202 | async fn send( 203 | &mut self, 204 | req: tonic::Request, 205 | ) -> Result, tonic::Status> { 206 | self.client.reconfigure_stream(req).await 207 | } 208 | 209 | fn parse_response( 210 | &self, 211 | resp: tonic::Response, 212 | ) -> Result { 213 | resp.into_inner().try_into() 214 | } 215 | } 216 | -------------------------------------------------------------------------------- /src/service/account.rs: -------------------------------------------------------------------------------- 1 | use prost_types::method_options::IdempotencyLevel; 2 | use tonic::{IntoRequest, transport::Channel}; 3 | 4 | use super::{ServiceRequest, add_s2_request_token_header, gen_s2_request_token}; 5 | use crate::{ 6 | api::{self, account_service_client::AccountServiceClient}, 7 | types, 8 | }; 9 | 10 | #[derive(Debug, Clone)] 11 | pub struct CreateBasinServiceRequest { 12 | client: AccountServiceClient, 13 | req: types::CreateBasinRequest, 14 | s2_request_token: String, 15 | } 16 | 17 | impl CreateBasinServiceRequest { 18 | pub fn new(client: AccountServiceClient, req: types::CreateBasinRequest) -> Self { 19 | Self { 20 | client, 21 | req, 22 | s2_request_token: gen_s2_request_token(), 23 | } 24 | } 25 | } 26 | 27 | impl ServiceRequest for CreateBasinServiceRequest { 28 | type ApiRequest = api::CreateBasinRequest; 29 | type Response = types::BasinInfo; 30 | type ApiResponse = api::CreateBasinResponse; 31 | const IDEMPOTENCY_LEVEL: IdempotencyLevel = IdempotencyLevel::Idempotent; 32 | 33 | fn prepare_request(&mut self) -> Result, types::ConvertError> { 34 | let req: api::CreateBasinRequest = self.req.clone().into(); 35 | let mut tonic_req = req.into_request(); 36 | add_s2_request_token_header(tonic_req.metadata_mut(), &self.s2_request_token)?; 37 | Ok(tonic_req) 38 | } 39 | 40 | async fn send( 41 | &mut self, 42 | req: tonic::Request, 43 | ) -> Result, tonic::Status> { 44 | self.client.create_basin(req).await 45 | } 46 | 47 | fn parse_response( 48 | &self, 49 | resp: tonic::Response, 50 | ) -> Result { 51 | resp.into_inner().try_into() 52 | } 53 | } 54 | 55 | #[derive(Debug, Clone)] 56 | pub struct ListBasinsServiceRequest { 57 | client: AccountServiceClient, 58 | req: types::ListBasinsRequest, 59 | } 60 | 61 | impl ListBasinsServiceRequest { 62 | pub fn new(client: AccountServiceClient, req: types::ListBasinsRequest) -> Self { 63 | Self { client, req } 64 | } 65 | } 66 | 67 | impl ServiceRequest for ListBasinsServiceRequest { 68 | type ApiRequest = api::ListBasinsRequest; 69 | type Response = types::ListBasinsResponse; 70 | type ApiResponse = api::ListBasinsResponse; 71 | const IDEMPOTENCY_LEVEL: IdempotencyLevel = IdempotencyLevel::NoSideEffects; 72 | 73 | fn prepare_request(&mut self) -> Result, types::ConvertError> { 74 | let req: api::ListBasinsRequest = self.req.clone().try_into()?; 75 | Ok(req.into_request()) 76 | } 77 | 78 | async fn send( 79 | &mut self, 80 | req: tonic::Request, 81 | ) -> Result, tonic::Status> { 82 | self.client.list_basins(req).await 83 | } 84 | 85 | fn parse_response( 86 | &self, 87 | resp: tonic::Response, 88 | ) -> Result { 89 | resp.into_inner().try_into() 90 | } 91 | } 92 | 93 | #[derive(Debug, Clone)] 94 | pub struct DeleteBasinServiceRequest { 95 | client: AccountServiceClient, 96 | req: types::DeleteBasinRequest, 97 | } 98 | 99 | impl DeleteBasinServiceRequest { 100 | pub fn new(client: AccountServiceClient, req: types::DeleteBasinRequest) -> Self { 101 | Self { client, req } 102 | } 103 | } 104 | 105 | impl ServiceRequest for DeleteBasinServiceRequest { 106 | type ApiRequest = api::DeleteBasinRequest; 107 | type Response = (); 108 | type ApiResponse = api::DeleteBasinResponse; 109 | const IDEMPOTENCY_LEVEL: IdempotencyLevel = IdempotencyLevel::Idempotent; 110 | 111 | fn prepare_request(&mut self) -> Result, types::ConvertError> { 112 | let req: api::DeleteBasinRequest = self.req.clone().into(); 113 | Ok(req.into_request()) 114 | } 115 | 116 | async fn send( 117 | &mut self, 118 | req: tonic::Request, 119 | ) -> Result, tonic::Status> { 120 | match self.client.delete_basin(req).await { 121 | Err(status) if self.req.if_exists && status.code() == tonic::Code::NotFound => { 122 | Ok(tonic::Response::new(api::DeleteBasinResponse {})) 123 | } 124 | other => other, 125 | } 126 | } 127 | 128 | fn parse_response( 129 | &self, 130 | _resp: tonic::Response, 131 | ) -> Result { 132 | Ok(()) 133 | } 134 | } 135 | 136 | #[derive(Debug, Clone)] 137 | pub struct GetBasinConfigServiceRequest { 138 | client: AccountServiceClient, 139 | basin: types::BasinName, 140 | } 141 | 142 | impl GetBasinConfigServiceRequest { 143 | pub fn new(client: AccountServiceClient, basin: types::BasinName) -> Self { 144 | Self { client, basin } 145 | } 146 | } 147 | 148 | impl ServiceRequest for GetBasinConfigServiceRequest { 149 | type ApiRequest = api::GetBasinConfigRequest; 150 | type Response = types::BasinConfig; 151 | type ApiResponse = api::GetBasinConfigResponse; 152 | const IDEMPOTENCY_LEVEL: IdempotencyLevel = IdempotencyLevel::NoSideEffects; 153 | 154 | fn prepare_request(&mut self) -> Result, types::ConvertError> { 155 | let req = api::GetBasinConfigRequest { 156 | basin: self.basin.to_string(), 157 | }; 158 | Ok(req.into_request()) 159 | } 160 | 161 | async fn send( 162 | &mut self, 163 | req: tonic::Request, 164 | ) -> Result, tonic::Status> { 165 | self.client.get_basin_config(req).await 166 | } 167 | 168 | fn parse_response( 169 | &self, 170 | resp: tonic::Response, 171 | ) -> Result { 172 | resp.into_inner().try_into() 173 | } 174 | } 175 | 176 | #[derive(Debug, Clone)] 177 | pub struct ReconfigureBasinServiceRequest { 178 | client: AccountServiceClient, 179 | req: types::ReconfigureBasinRequest, 180 | } 181 | 182 | impl ReconfigureBasinServiceRequest { 183 | pub fn new(client: AccountServiceClient, req: types::ReconfigureBasinRequest) -> Self { 184 | Self { client, req } 185 | } 186 | } 187 | 188 | impl ServiceRequest for ReconfigureBasinServiceRequest { 189 | type ApiRequest = api::ReconfigureBasinRequest; 190 | type Response = types::BasinConfig; 191 | type ApiResponse = api::ReconfigureBasinResponse; 192 | const IDEMPOTENCY_LEVEL: IdempotencyLevel = IdempotencyLevel::Idempotent; 193 | 194 | fn prepare_request(&mut self) -> Result, types::ConvertError> { 195 | let req: api::ReconfigureBasinRequest = self.req.clone().into(); 196 | Ok(req.into_request()) 197 | } 198 | 199 | async fn send( 200 | &mut self, 201 | req: tonic::Request, 202 | ) -> Result, tonic::Status> { 203 | self.client.reconfigure_basin(req).await 204 | } 205 | 206 | fn parse_response( 207 | &self, 208 | resp: tonic::Response, 209 | ) -> Result { 210 | resp.into_inner().try_into() 211 | } 212 | } 213 | 214 | #[derive(Debug, Clone)] 215 | pub struct IssueAccessTokenServiceRequest { 216 | client: AccountServiceClient, 217 | info: types::AccessTokenInfo, 218 | } 219 | 220 | impl IssueAccessTokenServiceRequest { 221 | pub fn new(client: AccountServiceClient, info: types::AccessTokenInfo) -> Self { 222 | Self { client, info } 223 | } 224 | } 225 | 226 | impl ServiceRequest for IssueAccessTokenServiceRequest { 227 | type ApiRequest = api::IssueAccessTokenRequest; 228 | type Response = String; 229 | type ApiResponse = api::IssueAccessTokenResponse; 230 | const IDEMPOTENCY_LEVEL: IdempotencyLevel = IdempotencyLevel::IdempotencyUnknown; 231 | 232 | fn prepare_request(&mut self) -> Result, types::ConvertError> { 233 | let req: api::IssueAccessTokenRequest = self.info.clone().into(); 234 | Ok(req.into_request()) 235 | } 236 | 237 | async fn send( 238 | &mut self, 239 | req: tonic::Request, 240 | ) -> Result, tonic::Status> { 241 | self.client.issue_access_token(req).await 242 | } 243 | 244 | fn parse_response( 245 | &self, 246 | resp: tonic::Response, 247 | ) -> Result { 248 | Ok(resp.into_inner().into()) 249 | } 250 | } 251 | 252 | #[derive(Debug, Clone)] 253 | pub struct RevokeAccessTokenServiceRequest { 254 | client: AccountServiceClient, 255 | id: types::AccessTokenId, 256 | } 257 | impl RevokeAccessTokenServiceRequest { 258 | pub fn new(client: AccountServiceClient, id: types::AccessTokenId) -> Self { 259 | Self { client, id } 260 | } 261 | } 262 | 263 | impl ServiceRequest for RevokeAccessTokenServiceRequest { 264 | type ApiRequest = api::RevokeAccessTokenRequest; 265 | type Response = types::AccessTokenInfo; 266 | type ApiResponse = api::RevokeAccessTokenResponse; 267 | const IDEMPOTENCY_LEVEL: IdempotencyLevel = IdempotencyLevel::Idempotent; 268 | 269 | fn prepare_request(&mut self) -> Result, types::ConvertError> { 270 | let req: api::RevokeAccessTokenRequest = self.id.clone().into(); 271 | Ok(req.into_request()) 272 | } 273 | 274 | async fn send( 275 | &mut self, 276 | req: tonic::Request, 277 | ) -> Result, tonic::Status> { 278 | self.client.revoke_access_token(req).await 279 | } 280 | 281 | fn parse_response( 282 | &self, 283 | resp: tonic::Response, 284 | ) -> Result { 285 | resp.into_inner().try_into() 286 | } 287 | } 288 | 289 | #[derive(Debug, Clone)] 290 | pub struct ListAccessTokensServiceRequest { 291 | client: AccountServiceClient, 292 | req: types::ListAccessTokensRequest, 293 | } 294 | impl ListAccessTokensServiceRequest { 295 | pub fn new(client: AccountServiceClient, req: types::ListAccessTokensRequest) -> Self { 296 | Self { client, req } 297 | } 298 | } 299 | 300 | impl ServiceRequest for ListAccessTokensServiceRequest { 301 | type ApiRequest = api::ListAccessTokensRequest; 302 | type Response = types::ListAccessTokensResponse; 303 | type ApiResponse = api::ListAccessTokensResponse; 304 | const IDEMPOTENCY_LEVEL: IdempotencyLevel = IdempotencyLevel::NoSideEffects; 305 | 306 | fn prepare_request(&mut self) -> Result, types::ConvertError> { 307 | let req: api::ListAccessTokensRequest = self.req.clone().try_into()?; 308 | Ok(req.into_request()) 309 | } 310 | 311 | async fn send( 312 | &mut self, 313 | req: tonic::Request, 314 | ) -> Result, tonic::Status> { 315 | self.client.list_access_tokens(req).await 316 | } 317 | 318 | fn parse_response( 319 | &self, 320 | resp: tonic::Response, 321 | ) -> Result { 322 | resp.into_inner().try_into() 323 | } 324 | } 325 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /src/service/stream.rs: -------------------------------------------------------------------------------- 1 | use prost_types::method_options::IdempotencyLevel; 2 | use tonic::{IntoRequest, codec::CompressionEncoding, transport::Channel}; 3 | use tonic_side_effect::{FrameSignal, RequestFrameMonitor}; 4 | 5 | use super::{ 6 | ClientError, ServiceRequest, ServiceStreamingRequest, ServiceStreamingResponse, 7 | StreamingRequest, StreamingResponse, 8 | }; 9 | use crate::{ 10 | api::{self, stream_service_client::StreamServiceClient}, 11 | client::AppendRetryPolicy, 12 | types::{self, StreamPosition}, 13 | }; 14 | 15 | #[derive(Debug, Clone)] 16 | pub struct CheckTailServiceRequest { 17 | client: StreamServiceClient, 18 | stream: String, 19 | } 20 | 21 | impl CheckTailServiceRequest { 22 | pub fn new(client: StreamServiceClient, stream: impl Into) -> Self { 23 | Self { 24 | client, 25 | stream: stream.into(), 26 | } 27 | } 28 | } 29 | 30 | impl ServiceRequest for CheckTailServiceRequest { 31 | type ApiRequest = api::CheckTailRequest; 32 | type Response = StreamPosition; 33 | type ApiResponse = api::CheckTailResponse; 34 | const IDEMPOTENCY_LEVEL: IdempotencyLevel = IdempotencyLevel::NoSideEffects; 35 | 36 | fn prepare_request(&mut self) -> Result, types::ConvertError> { 37 | let req = api::CheckTailRequest { 38 | stream: self.stream.clone(), 39 | }; 40 | Ok(req.into_request()) 41 | } 42 | 43 | async fn send( 44 | &mut self, 45 | req: tonic::Request, 46 | ) -> Result, tonic::Status> { 47 | self.client.check_tail(req).await 48 | } 49 | 50 | fn parse_response( 51 | &self, 52 | resp: tonic::Response, 53 | ) -> Result { 54 | Ok(resp.into_inner().into()) 55 | } 56 | } 57 | 58 | #[derive(Debug, Clone)] 59 | pub struct ReadServiceRequest { 60 | client: StreamServiceClient, 61 | stream: String, 62 | req: types::ReadRequest, 63 | } 64 | 65 | impl ReadServiceRequest { 66 | pub fn new( 67 | mut client: StreamServiceClient, 68 | stream: impl Into, 69 | req: types::ReadRequest, 70 | compression: bool, 71 | ) -> Self { 72 | if compression { 73 | client = client.accept_compressed(CompressionEncoding::Zstd); 74 | } 75 | Self { 76 | client, 77 | stream: stream.into(), 78 | req, 79 | } 80 | } 81 | } 82 | 83 | impl ServiceRequest for ReadServiceRequest { 84 | type ApiRequest = api::ReadRequest; 85 | type Response = types::ReadOutput; 86 | type ApiResponse = api::ReadResponse; 87 | const IDEMPOTENCY_LEVEL: IdempotencyLevel = IdempotencyLevel::NoSideEffects; 88 | 89 | fn prepare_request(&mut self) -> Result, types::ConvertError> { 90 | let req = self.req.clone().try_into_api_type(self.stream.clone())?; 91 | Ok(req.into_request()) 92 | } 93 | 94 | async fn send( 95 | &mut self, 96 | req: tonic::Request, 97 | ) -> Result, tonic::Status> { 98 | self.client.read(req).await 99 | } 100 | 101 | fn parse_response( 102 | &self, 103 | resp: tonic::Response, 104 | ) -> Result { 105 | resp.into_inner().try_into() 106 | } 107 | } 108 | 109 | #[derive(Debug, Clone)] 110 | pub struct ReadSessionServiceRequest { 111 | client: StreamServiceClient, 112 | stream: String, 113 | req: types::ReadSessionRequest, 114 | } 115 | 116 | impl ReadSessionServiceRequest { 117 | pub fn new( 118 | mut client: StreamServiceClient, 119 | stream: impl Into, 120 | req: types::ReadSessionRequest, 121 | compression: bool, 122 | ) -> Self { 123 | if compression { 124 | client = client.accept_compressed(CompressionEncoding::Zstd); 125 | } 126 | Self { 127 | client, 128 | stream: stream.into(), 129 | req, 130 | } 131 | } 132 | 133 | pub(crate) fn req_mut(&mut self) -> &mut types::ReadSessionRequest { 134 | &mut self.req 135 | } 136 | } 137 | 138 | impl ServiceRequest for ReadSessionServiceRequest { 139 | type ApiRequest = api::ReadSessionRequest; 140 | type Response = ServiceStreamingResponse; 141 | type ApiResponse = tonic::Streaming; 142 | const IDEMPOTENCY_LEVEL: IdempotencyLevel = IdempotencyLevel::NoSideEffects; 143 | 144 | fn prepare_request(&mut self) -> Result, types::ConvertError> { 145 | let req = self.req.clone().into_api_type(self.stream.clone()); 146 | Ok(req.into_request()) 147 | } 148 | 149 | async fn send( 150 | &mut self, 151 | req: tonic::Request, 152 | ) -> Result, tonic::Status> { 153 | self.client.read_session(req).await 154 | } 155 | 156 | fn parse_response( 157 | &self, 158 | resp: tonic::Response, 159 | ) -> Result { 160 | Ok(ServiceStreamingResponse::new( 161 | ReadSessionStreamingResponse, 162 | resp.into_inner(), 163 | )) 164 | } 165 | } 166 | 167 | pub struct ReadSessionStreamingResponse; 168 | 169 | impl StreamingResponse for ReadSessionStreamingResponse { 170 | type ResponseItem = types::ReadOutput; 171 | type ApiResponseItem = api::ReadSessionResponse; 172 | 173 | fn parse_response_item( 174 | &self, 175 | resp: Self::ApiResponseItem, 176 | ) -> Result { 177 | resp.try_into().map_err(Into::into) 178 | } 179 | } 180 | 181 | #[derive(Debug, Clone)] 182 | pub struct AppendServiceRequest { 183 | client: StreamServiceClient, 184 | append_retry_policy: AppendRetryPolicy, 185 | frame_signal: FrameSignal, 186 | stream: String, 187 | req: types::AppendInput, 188 | } 189 | 190 | impl AppendServiceRequest { 191 | pub fn new( 192 | mut client: StreamServiceClient, 193 | append_retry_policy: AppendRetryPolicy, 194 | frame_signal: FrameSignal, 195 | stream: impl Into, 196 | req: types::AppendInput, 197 | compression: bool, 198 | ) -> Self { 199 | if compression { 200 | client = client.send_compressed(CompressionEncoding::Zstd); 201 | } 202 | Self { 203 | client, 204 | append_retry_policy, 205 | frame_signal, 206 | stream: stream.into(), 207 | req, 208 | } 209 | } 210 | } 211 | 212 | impl ServiceRequest for AppendServiceRequest { 213 | type ApiRequest = api::AppendRequest; 214 | type Response = types::AppendAck; 215 | type ApiResponse = api::AppendResponse; 216 | const IDEMPOTENCY_LEVEL: IdempotencyLevel = IdempotencyLevel::IdempotencyUnknown; 217 | 218 | fn prepare_request(&mut self) -> Result, types::ConvertError> { 219 | Ok(api::AppendRequest { 220 | input: Some(self.req.clone().into_api_type(self.stream.clone())), 221 | } 222 | .into_request()) 223 | } 224 | 225 | async fn send( 226 | &mut self, 227 | req: tonic::Request, 228 | ) -> Result, tonic::Status> { 229 | self.client.append(req).await 230 | } 231 | 232 | fn parse_response( 233 | &self, 234 | resp: tonic::Response, 235 | ) -> Result { 236 | resp.into_inner().try_into() 237 | } 238 | 239 | fn should_retry(&self, err: &ClientError) -> bool { 240 | if let ClientError::Service(status) = err { 241 | let retryable_error = matches!( 242 | status.code(), 243 | tonic::Code::Unavailable 244 | | tonic::Code::DeadlineExceeded 245 | | tonic::Code::Cancelled 246 | | tonic::Code::Unknown 247 | | tonic::Code::ResourceExhausted 248 | ); 249 | let policy_compliant = match self.append_retry_policy { 250 | AppendRetryPolicy::All => true, 251 | AppendRetryPolicy::NoSideEffects => !self.frame_signal.is_signalled(), 252 | }; 253 | retryable_error && policy_compliant 254 | } else { 255 | false 256 | } 257 | } 258 | } 259 | 260 | #[derive(Debug, Clone)] 261 | pub struct AppendSessionServiceRequest 262 | where 263 | S: Send + futures::Stream + Unpin, 264 | { 265 | client: StreamServiceClient, 266 | stream: String, 267 | req: Option, 268 | } 269 | 270 | impl AppendSessionServiceRequest 271 | where 272 | S: Send + futures::Stream + Unpin, 273 | { 274 | pub fn new( 275 | mut client: StreamServiceClient, 276 | stream: impl Into, 277 | req: S, 278 | compression: bool, 279 | ) -> Self { 280 | if compression { 281 | client = client.send_compressed(CompressionEncoding::Zstd); 282 | } 283 | Self { 284 | client, 285 | stream: stream.into(), 286 | req: Some(req), 287 | } 288 | } 289 | } 290 | 291 | impl ServiceRequest for AppendSessionServiceRequest 292 | where 293 | S: 'static + Send + futures::Stream + Unpin, 294 | { 295 | type ApiRequest = ServiceStreamingRequest; 296 | type Response = ServiceStreamingResponse; 297 | type ApiResponse = tonic::Streaming; 298 | 299 | const IDEMPOTENCY_LEVEL: IdempotencyLevel = IdempotencyLevel::IdempotencyUnknown; 300 | 301 | fn prepare_request(&mut self) -> Result, types::ConvertError> { 302 | let req = ServiceStreamingRequest::new( 303 | AppendSessionStreamingRequest::new(&self.stream), 304 | self.req.take().ok_or("missing streaming append request")?, 305 | ); 306 | Ok(req.into_request()) 307 | } 308 | 309 | async fn send( 310 | &mut self, 311 | req: tonic::Request, 312 | ) -> Result, tonic::Status> { 313 | self.client.append_session(req).await 314 | } 315 | 316 | fn parse_response( 317 | &self, 318 | resp: tonic::Response, 319 | ) -> Result { 320 | Ok(ServiceStreamingResponse::new( 321 | AppendSessionStreamingResponse, 322 | resp.into_inner(), 323 | )) 324 | } 325 | } 326 | 327 | pub struct AppendSessionStreamingRequest { 328 | stream: String, 329 | } 330 | 331 | impl AppendSessionStreamingRequest { 332 | fn new(stream: impl Into) -> Self { 333 | Self { 334 | stream: stream.into(), 335 | } 336 | } 337 | } 338 | 339 | impl StreamingRequest for AppendSessionStreamingRequest { 340 | type RequestItem = types::AppendInput; 341 | type ApiRequestItem = api::AppendSessionRequest; 342 | 343 | fn prepare_request_item(&self, req: Self::RequestItem) -> Self::ApiRequestItem { 344 | api::AppendSessionRequest { 345 | input: Some(req.into_api_type(&self.stream)), 346 | } 347 | } 348 | } 349 | 350 | pub struct AppendSessionStreamingResponse; 351 | 352 | impl StreamingResponse for AppendSessionStreamingResponse { 353 | type ResponseItem = types::AppendAck; 354 | type ApiResponseItem = api::AppendSessionResponse; 355 | 356 | fn parse_response_item( 357 | &self, 358 | resp: Self::ApiResponseItem, 359 | ) -> Result { 360 | resp.try_into().map_err(Into::into) 361 | } 362 | } 363 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | 3 | All notable changes to this project will be documented in this file. 4 | 5 | ## [0.21.0] - 2025-09-11 6 | 7 | ### Miscellaneous Tasks 8 | 9 | - Upgrade to tonic & prost 0.14 ([#212](https://github.com/s2-streamstore/s2-sdk-rust/issues/212)) 10 | 11 | ## [0.20.0] - 2025-09-03 12 | 13 | ### Features 14 | 15 | - Support `Infinite` retention policy ([#209](https://github.com/s2-streamstore/s2-sdk-rust/issues/209)) 16 | 17 | ## [0.19.2] - 2025-08-13 18 | 19 | ### Bug Fixes 20 | 21 | - Append session assertion failure ([#206](https://github.com/s2-streamstore/s2-sdk-rust/issues/206)) 22 | 23 | ## [0.19.1] - 2025-07-28 24 | 25 | ### Miscellaneous Tasks 26 | 27 | - Renaming DeleteOnEmpty -> DeleteOnEmptyConfig ([#202](https://github.com/s2-streamstore/s2-sdk-rust/issues/202)) 28 | 29 | ## [0.19.0] - 2025-07-24 30 | 31 | ### Features 32 | 33 | - Delete-on-empty ([#199](https://github.com/s2-streamstore/s2-sdk-rust/issues/199)) 34 | 35 | ### Miscellaneous Tasks 36 | 37 | - Fixes to delete-on-empty impl ([#200](https://github.com/s2-streamstore/s2-sdk-rust/issues/200)) 38 | 39 | ### Release 40 | 41 | - 0.18.0 ([#198](https://github.com/s2-streamstore/s2-sdk-rust/issues/198)) 42 | 43 | ## [0.18.0] - 2025-07-22 44 | 45 | ### Features 46 | 47 | - Clamp ([#197](https://github.com/s2-streamstore/s2-sdk-rust/issues/197)) 48 | 49 | ## [0.17.0] - 2025-06-06 50 | 51 | ### Features 52 | 53 | - `until` timestamp in reads + new metrics op types ([#193](https://github.com/s2-streamstore/s2-sdk-rust/issues/193)) 54 | 55 | ### Miscellaneous Tasks 56 | 57 | - Derive `PartialEq`, `Eq`, `Hash` on stringy newtypes ([#191](https://github.com/s2-streamstore/s2-sdk-rust/issues/191)) 58 | 59 | ## [0.16.2] - 2025-05-25 60 | 61 | ### Features 62 | 63 | - Impl `FromStr` / `Display` for `FencingToken` ([#189](https://github.com/s2-streamstore/s2-sdk-rust/issues/189)) 64 | 65 | ## [0.16.1] - 2025-05-25 66 | 67 | ## [0.16.0] - 2025-05-25 68 | 69 | ### Features 70 | 71 | - [**breaking**] Fencing token is now a string ([#185](https://github.com/s2-streamstore/s2-sdk-rust/issues/185)) 72 | 73 | ## [0.15.0] - 2025-05-10 74 | 75 | ### Features 76 | 77 | - Retry on `ResourceExhausted` and use `retry-after` if present ([#177](https://github.com/s2-streamstore/s2-sdk-rust/issues/177)) 78 | 79 | ## [0.14.0] - 2025-05-08 80 | 81 | ### Features 82 | 83 | - Timestamping config and refactoring of type conversion ([#179](https://github.com/s2-streamstore/s2-sdk-rust/issues/179)) 84 | 85 | ### Miscellaneous Tasks 86 | 87 | - Dependency upgrades ([#180](https://github.com/s2-streamstore/s2-sdk-rust/issues/180)) 88 | 89 | ## [0.13.0] - 2025-04-30 90 | 91 | ### Features 92 | 93 | - [**breaking**] Updated read APIs ([#174](https://github.com/s2-streamstore/s2-sdk-rust/issues/174)) 94 | 95 | ## [0.12.0] - 2025-04-18 96 | 97 | ### Features 98 | 99 | - [**breaking**] Remove `ReadOutput::FirstSeqNum` ([#169](https://github.com/s2-streamstore/s2-sdk-rust/issues/169)) 100 | 101 | ### Bug Fixes 102 | 103 | - Access token example ([#168](https://github.com/s2-streamstore/s2-sdk-rust/issues/168)) 104 | 105 | ### Miscellaneous Tasks 106 | 107 | - Add missing builders ([#170](https://github.com/s2-streamstore/s2-sdk-rust/issues/170)) 108 | - Dependency upgrades ([#171](https://github.com/s2-streamstore/s2-sdk-rust/issues/171)) 109 | 110 | ## [0.11.0] - 2025-04-15 111 | 112 | ### Features 113 | 114 | - Access token methods [S2-758] ([#163](https://github.com/s2-streamstore/s2-sdk-rust/issues/163)) 115 | 116 | ## [0.10.0] - 2025-03-19 117 | 118 | ### Features 119 | 120 | - [**breaking**] Timestamped records ([#157](https://github.com/s2-streamstore/s2-sdk-rust/issues/157)) 121 | 122 | ## [0.9.0] - 2025-03-12 123 | 124 | ### Features 125 | 126 | - Configurable option for compression ([#151](https://github.com/s2-streamstore/s2-sdk-rust/issues/151)) 127 | 128 | ### Miscellaneous Tasks 129 | 130 | - Upgrade proto ([#153](https://github.com/s2-streamstore/s2-sdk-rust/issues/153)) 131 | - Proto update ([#154](https://github.com/s2-streamstore/s2-sdk-rust/issues/154)) 132 | 133 | ## [0.8.2] - 2025-02-07 134 | 135 | ### Bug Fixes 136 | 137 | - Retry CANCELLED gRPC status code ([#149](https://github.com/s2-streamstore/s2-sdk-rust/issues/149)) 138 | 139 | ## [0.8.1] - 2025-02-05 140 | 141 | ### Features 142 | 143 | - Enable compression ([#147](https://github.com/s2-streamstore/s2-sdk-rust/issues/147)) 144 | 145 | ## [0.8.0] - 2025-01-21 146 | 147 | ### Bug Fixes 148 | 149 | - Respect limits with read session resumption ([#139](https://github.com/s2-streamstore/s2-sdk-rust/issues/139)) 150 | 151 | ### Miscellaneous Tasks 152 | 153 | - Make `with_limit()` take option ([#145](https://github.com/s2-streamstore/s2-sdk-rust/issues/145)) 154 | 155 | ## [0.7.0] - 2025-01-16 156 | 157 | ### Miscellaneous Tasks 158 | 159 | - Update proto ([#135](https://github.com/s2-streamstore/s2-sdk-rust/issues/135)) 160 | 161 | ## [0.6.0] - 2025-01-13 162 | 163 | ### Documentation 164 | 165 | - Update README link for docs.rs ([#128](https://github.com/s2-streamstore/s2-sdk-rust/issues/128)) 166 | 167 | ### Miscellaneous Tasks 168 | 169 | - Update proto ([#129](https://github.com/s2-streamstore/s2-sdk-rust/issues/129)) 170 | - Default impl for AppendInput ([#130](https://github.com/s2-streamstore/s2-sdk-rust/issues/130)) 171 | - Update protos ([#133](https://github.com/s2-streamstore/s2-sdk-rust/issues/133)) 172 | 173 | ## [0.5.1] - 2024-12-20 174 | 175 | ### Documentation 176 | 177 | - Update S2 doc links ([#126](https://github.com/s2-streamstore/s2-sdk-rust/issues/126)) 178 | 179 | ## [0.5.0] - 2024-12-19 180 | 181 | ### Documentation 182 | 183 | - `batching` module Rust docs ([#119](https://github.com/s2-streamstore/s2-sdk-rust/issues/119)) 184 | - Update basin and stream names ([#122](https://github.com/s2-streamstore/s2-sdk-rust/issues/122)) 185 | - Update README API link ([#123](https://github.com/s2-streamstore/s2-sdk-rust/issues/123)) 186 | - `s2::client` ([#121](https://github.com/s2-streamstore/s2-sdk-rust/issues/121)) 187 | - Crate level documentation ([#124](https://github.com/s2-streamstore/s2-sdk-rust/issues/124)) 188 | 189 | ### Miscellaneous Tasks 190 | 191 | - Rename `[lib]` to `s2` ([#120](https://github.com/s2-streamstore/s2-sdk-rust/issues/120)) 192 | - *(release)* 0.5.0 193 | 194 | ## [0.4.1] - 2024-12-17 195 | 196 | ### Bug Fixes 197 | 198 | - Deadlock potential due to `await`s in append_session's `select!` loop ([#115](https://github.com/s2-streamstore/s2-sdk-rust/issues/115)) 199 | 200 | ## [0.4.0] - 2024-12-16 201 | 202 | ### Features 203 | 204 | - Add `FencingToken::generate` method ([#110](https://github.com/s2-streamstore/s2-sdk-rust/issues/110)) 205 | - Return `StreamInfo` from `BasinClient::create_stream` ([#114](https://github.com/s2-streamstore/s2-sdk-rust/issues/114)) 206 | 207 | ### Miscellaneous Tasks 208 | 209 | - Remove `GH_TOKEN` use to clone submodule in CI ([#109](https://github.com/s2-streamstore/s2-sdk-rust/issues/109)) 210 | - Proto up-to-date check ([#112](https://github.com/s2-streamstore/s2-sdk-rust/issues/112)) 211 | - Upgrade proto ([#111](https://github.com/s2-streamstore/s2-sdk-rust/issues/111)) 212 | - Add examples for API ([#113](https://github.com/s2-streamstore/s2-sdk-rust/issues/113)) 213 | - Add `README.md` ([#116](https://github.com/s2-streamstore/s2-sdk-rust/issues/116)) 214 | 215 | ## [0.3.1] - 2024-12-12 216 | 217 | ### Miscellaneous Tasks 218 | 219 | - Switch on `missing_docs` ([#106](https://github.com/s2-streamstore/s2-sdk-rust/issues/106)) 220 | 221 | ## [0.3.0] - 2024-12-11 222 | 223 | ### Features 224 | 225 | - Return reconfigured stream ([#95](https://github.com/s2-streamstore/s2-sdk-rust/issues/95)) 226 | - Implement `SequencedRecord::as_command_record` ([#96](https://github.com/s2-streamstore/s2-sdk-rust/issues/96)) 227 | - Make protoc requirement optional ([#103](https://github.com/s2-streamstore/s2-sdk-rust/issues/103)) 228 | 229 | ### Bug Fixes 230 | 231 | - Tonic-side-effect version ([#102](https://github.com/s2-streamstore/s2-sdk-rust/issues/102)) 232 | 233 | ### Miscellaneous Tasks 234 | 235 | - Update proto and associated types for non-optional `start_seq_num` ([#97](https://github.com/s2-streamstore/s2-sdk-rust/issues/97)) 236 | - `CommandRecord::Fence` requires `FencingToken` even if empty ([#98](https://github.com/s2-streamstore/s2-sdk-rust/issues/98)) 237 | - Rm serde ([#99](https://github.com/s2-streamstore/s2-sdk-rust/issues/99)) 238 | - Lower `max_append_inflight_bytes` default 239 | - Move sync_docs to separate repository ([#101](https://github.com/s2-streamstore/s2-sdk-rust/issues/101)) 240 | 241 | ## [0.2.0] - 2024-12-04 242 | 243 | ### Features 244 | 245 | - Redo endpoint logic ([#39](https://github.com/s2-streamstore/s2-sdk-rust/issues/39)) ([#40](https://github.com/s2-streamstore/s2-sdk-rust/issues/40)) 246 | - Metered_size impl ([#51](https://github.com/s2-streamstore/s2-sdk-rust/issues/51)) 247 | - Allow custom tonic connectors & expose more errors ([#54](https://github.com/s2-streamstore/s2-sdk-rust/issues/54)) 248 | - Implement lingering for append record stream ([#55](https://github.com/s2-streamstore/s2-sdk-rust/issues/55)) 249 | - Read session resumption ([#64](https://github.com/s2-streamstore/s2-sdk-rust/issues/64)) 250 | - Pre-validate append record batch ([#72](https://github.com/s2-streamstore/s2-sdk-rust/issues/72)) 251 | - Retryable `append_session` + side-effect logic 252 | - Validate fencing token length ([#87](https://github.com/s2-streamstore/s2-sdk-rust/issues/87)) 253 | - S2_request_token header (exercise idempotence) ([#86](https://github.com/s2-streamstore/s2-sdk-rust/issues/86)) 254 | 255 | ### Bug Fixes 256 | 257 | - Only connect lazily (remove option to connect eagerly) ([#49](https://github.com/s2-streamstore/s2-sdk-rust/issues/49)) 258 | - Update `HostEndpoints::from_env` with new spec ([#58](https://github.com/s2-streamstore/s2-sdk-rust/issues/58)) 259 | - Add Send bound to Streaming wrapper ([#63](https://github.com/s2-streamstore/s2-sdk-rust/issues/63)) 260 | - Validate append input when converting from sdk type to api ([#65](https://github.com/s2-streamstore/s2-sdk-rust/issues/65)) 261 | - Limit retries when read resumes but stream keeps erroring ([#66](https://github.com/s2-streamstore/s2-sdk-rust/issues/66)) 262 | - Retry on deadline exceeded ([#67](https://github.com/s2-streamstore/s2-sdk-rust/issues/67)) 263 | - Remove `ConnectionError` in favour of pre-processing ([#68](https://github.com/s2-streamstore/s2-sdk-rust/issues/68)) 264 | - Rename 'max_retries' to 'max_attempts' 265 | - Validate `types::AppendRecord` for metered size ([#79](https://github.com/s2-streamstore/s2-sdk-rust/issues/79)) 266 | - Adapt to recent gRPC interface updates ([#84](https://github.com/s2-streamstore/s2-sdk-rust/issues/84)) 267 | - Use `if_exists` for delete basin/stream ([#85](https://github.com/s2-streamstore/s2-sdk-rust/issues/85)) 268 | - `append_session` inner loop while condition ([#91](https://github.com/s2-streamstore/s2-sdk-rust/issues/91)) 269 | 270 | ### Documentation 271 | 272 | - ConnectError 273 | 274 | ### Testing 275 | 276 | - `fencing_token` and `match_seq_num` for `AppendRecordsBatchStream` ([#77](https://github.com/s2-streamstore/s2-sdk-rust/issues/77)) 277 | 278 | ### Miscellaneous Tasks 279 | 280 | - Rename `ClientError` to `ConnectError` ([#47](https://github.com/s2-streamstore/s2-sdk-rust/issues/47)) 281 | - Make `ReadLimit` fields pub 282 | - Add clippy to CI ([#50](https://github.com/s2-streamstore/s2-sdk-rust/issues/50)) 283 | - Expose Aborted as an error variant ([#52](https://github.com/s2-streamstore/s2-sdk-rust/issues/52)) 284 | - Expose tonic Internal error message ([#53](https://github.com/s2-streamstore/s2-sdk-rust/issues/53)) 285 | - Refactor errors to return tonic::Status ([#57](https://github.com/s2-streamstore/s2-sdk-rust/issues/57)) 286 | - Conversion from `HostCloud` for `HostEndpoints` ([#59](https://github.com/s2-streamstore/s2-sdk-rust/issues/59)) 287 | - Rm unneeded async ([#62](https://github.com/s2-streamstore/s2-sdk-rust/issues/62)) 288 | - Create LICENSE 289 | - Update Cargo.toml with license 290 | - Update license for sync_docs 291 | - Add expect messages instead of unwraps ([#69](https://github.com/s2-streamstore/s2-sdk-rust/issues/69)) 292 | - Make `ClientConfig` fields private + revise docs ([#73](https://github.com/s2-streamstore/s2-sdk-rust/issues/73)) 293 | - Whoops, max_attempts -> with_max_attempts 294 | - Endpoints re-rejig ([#70](https://github.com/s2-streamstore/s2-sdk-rust/issues/70)) 295 | - Add back `S2Endpoints::from_env()` ([#74](https://github.com/s2-streamstore/s2-sdk-rust/issues/74)) 296 | - Example from_env 297 | - Assertions instead of errors for batch capacity & size ([#75](https://github.com/s2-streamstore/s2-sdk-rust/issues/75)) 298 | - Simplify s2_request_token creation 299 | - Remove `bytesize` dependency ([#89](https://github.com/s2-streamstore/s2-sdk-rust/issues/89)) 300 | - Update proto ([#93](https://github.com/s2-streamstore/s2-sdk-rust/issues/93)) 301 | 302 | ## [0.1.0] - 2024-11-06 303 | 304 | ### Features 305 | 306 | - Implement `BasinService/{ListStreams, GetBasinConfig}` ([#3](https://github.com/s2-streamstore/s2-sdk-rust/issues/3)) 307 | - Implement `BasinService/{CreateStream, GetStreamConfig}` ([#8](https://github.com/s2-streamstore/s2-sdk-rust/issues/8)) 308 | - Implement `AccountService/{ListBasins, DeleteBasin}` ([#10](https://github.com/s2-streamstore/s2-sdk-rust/issues/10)) 309 | - Implement `BasinService` ([#12](https://github.com/s2-streamstore/s2-sdk-rust/issues/12)) 310 | - Add request timeout ([#14](https://github.com/s2-streamstore/s2-sdk-rust/issues/14)) 311 | - Display impl for types::BasinState ([#18](https://github.com/s2-streamstore/s2-sdk-rust/issues/18)) 312 | - Implement `StreamService` (complete) ([#16](https://github.com/s2-streamstore/s2-sdk-rust/issues/16)) 313 | - Implement `AppendRecordStream` with batching support ([#24](https://github.com/s2-streamstore/s2-sdk-rust/issues/24)) 314 | - Enable tls config and make connection uri depend on env ([#25](https://github.com/s2-streamstore/s2-sdk-rust/issues/25)) 315 | - Doc reuse ([#32](https://github.com/s2-streamstore/s2-sdk-rust/issues/32)) 316 | - Support for overriding user-agent ([#33](https://github.com/s2-streamstore/s2-sdk-rust/issues/33)) 317 | - Sync rpc - sdk wrapper docs ([#34](https://github.com/s2-streamstore/s2-sdk-rust/issues/34)) 318 | 319 | ### Bug Fixes 320 | 321 | - Use usize for ListBasins ([#15](https://github.com/s2-streamstore/s2-sdk-rust/issues/15)) 322 | - Make all errors public ([#20](https://github.com/s2-streamstore/s2-sdk-rust/issues/20)) 323 | 324 | ### Miscellaneous Tasks 325 | 326 | - Update proto submodule ([#7](https://github.com/s2-streamstore/s2-sdk-rust/issues/7)) 327 | - Replace url with http::uri::Uri ([#6](https://github.com/s2-streamstore/s2-sdk-rust/issues/6)) 328 | - Update `HAS_NO_SIDE_EFFECTS` to `IDEMPOTENCY_LEVEL` ([#11](https://github.com/s2-streamstore/s2-sdk-rust/issues/11)) 329 | - FromStr impl to convert str to StorageClass enum ([#13](https://github.com/s2-streamstore/s2-sdk-rust/issues/13)) 330 | - Move `get_basin_config`, `reconfigure_basin` to `AccountService` ([#17](https://github.com/s2-streamstore/s2-sdk-rust/issues/17)) 331 | - Remove usage of deprecated `tonic_build` method ([#21](https://github.com/s2-streamstore/s2-sdk-rust/issues/21)) 332 | - Add+feature-gate serde Serialize/Deserialize derives ([#22](https://github.com/s2-streamstore/s2-sdk-rust/issues/22)) 333 | - Deps 334 | - Updated repo for proto submodule ([#38](https://github.com/s2-streamstore/s2-sdk-rust/issues/38)) 335 | - Http2 adaptive window [S2-412] ([#41](https://github.com/s2-streamstore/s2-sdk-rust/issues/41)) 336 | - Add CI action ([#44](https://github.com/s2-streamstore/s2-sdk-rust/issues/44)) 337 | - Add release action ([#45](https://github.com/s2-streamstore/s2-sdk-rust/issues/45)) 338 | 339 | 340 | -------------------------------------------------------------------------------- /src/batching.rs: -------------------------------------------------------------------------------- 1 | //! Append records batching stream. 2 | //! 3 | //! [`StreamClient::append_session`] accepts a stream of [`AppendInput`]s which 4 | //! requires a user to batch records into [`AppendRecordBatch`]es. This module 5 | //! provides a way to smartly batch [`AppendRecord`]s based on size limits and 6 | //! linger duration. 7 | //! 8 | //! The stream enforces the provided fencing token (if any) and auto-increments 9 | //! matching sequence number for concurrency control. 10 | //! 11 | //! # Example usage 12 | //! 13 | //! ```no_run 14 | //! # use s2::client::*; 15 | //! # use s2::types::*; 16 | //! # use s2::batching::*; 17 | //! # use std::time::Duration; 18 | //! # let config = ClientConfig::new("token"); 19 | //! # let basin: BasinName = "my-favorite-basin".parse().unwrap(); 20 | //! # let stream_client = StreamClient::new(config, basin, "stream"); 21 | //! # let fencing_token = FencingToken::generate(16).unwrap(); 22 | //! let append_records_stream = futures::stream::iter([ 23 | //! AppendRecord::new("hello").unwrap(), 24 | //! AppendRecord::new("bye").unwrap(), 25 | //! // ... 26 | //! ]); 27 | //! 28 | //! let batching_opts = AppendRecordsBatchingOpts::new() 29 | //! .with_max_batch_records(100) 30 | //! .with_linger(Duration::from_millis(100)) 31 | //! .with_fencing_token(Some(fencing_token)) 32 | //! .with_match_seq_num(Some(10)); 33 | //! 34 | //! let batching_stream = AppendRecordsBatchingStream::new( 35 | //! append_records_stream, 36 | //! batching_opts, 37 | //! ); 38 | //! 39 | //! # let _ = async move { 40 | //! let ack_stream = stream_client.append_session(batching_stream).await?; 41 | //! # return Ok::<(), ClientError>(()); }; 42 | //! ``` 43 | //! 44 | //! [`StreamClient::append_session`]: crate::client::StreamClient::append_session 45 | //! [`AppendInput`]: crate::types::AppendInput 46 | //! [`AppendRecordBatch`]: crate::types::AppendRecordBatch 47 | //! [`AppendRecord`]: crate::types::AppendRecord 48 | 49 | use std::{ 50 | pin::Pin, 51 | task::{Context, Poll}, 52 | time::Duration, 53 | }; 54 | 55 | use futures::{Stream, StreamExt}; 56 | 57 | use crate::types; 58 | 59 | /// Options to configure batching scheme for [`AppendRecordsBatchingStream`]. 60 | #[derive(Debug, Clone)] 61 | pub struct AppendRecordsBatchingOpts { 62 | max_batch_records: usize, 63 | #[cfg(test)] 64 | max_batch_bytes: u64, 65 | match_seq_num: Option, 66 | fencing_token: Option, 67 | linger_duration: Duration, 68 | } 69 | 70 | impl Default for AppendRecordsBatchingOpts { 71 | fn default() -> Self { 72 | Self { 73 | max_batch_records: 1000, 74 | #[cfg(test)] 75 | max_batch_bytes: types::AppendRecordBatch::MAX_BYTES, 76 | match_seq_num: None, 77 | fencing_token: None, 78 | linger_duration: Duration::from_millis(5), 79 | } 80 | } 81 | } 82 | 83 | impl AppendRecordsBatchingOpts { 84 | /// Construct an options struct with defaults. 85 | pub fn new() -> Self { 86 | Self::default() 87 | } 88 | 89 | /// Maximum number of records in a batch. 90 | pub fn with_max_batch_records(self, max_batch_records: usize) -> Self { 91 | assert!( 92 | max_batch_records > 0 && max_batch_records <= types::AppendRecordBatch::MAX_CAPACITY, 93 | "Batch capacity must be between 1 and 1000" 94 | ); 95 | Self { 96 | max_batch_records, 97 | ..self 98 | } 99 | } 100 | 101 | /// Maximum size of a batch in bytes. 102 | #[cfg(test)] 103 | pub fn with_max_batch_bytes(self, max_batch_bytes: u64) -> Self { 104 | assert!( 105 | max_batch_bytes > 0 && max_batch_bytes <= types::AppendRecordBatch::MAX_BYTES, 106 | "Batch capacity must be between 1 byte and 1 MiB" 107 | ); 108 | Self { 109 | max_batch_bytes, 110 | ..self 111 | } 112 | } 113 | 114 | /// Enforce that the sequence number issued to the first record matches. 115 | /// 116 | /// This is incremented automatically for each batch. 117 | pub fn with_match_seq_num(self, match_seq_num: Option) -> Self { 118 | Self { 119 | match_seq_num, 120 | ..self 121 | } 122 | } 123 | 124 | /// Enforce a fencing token. 125 | pub fn with_fencing_token(self, fencing_token: Option) -> Self { 126 | Self { 127 | fencing_token, 128 | ..self 129 | } 130 | } 131 | 132 | /// Linger duration for records before flushing. 133 | /// 134 | /// A linger duration of 5ms is set by default. Set to `Duration::ZERO` 135 | /// to disable. 136 | pub fn with_linger(self, linger_duration: impl Into) -> Self { 137 | Self { 138 | linger_duration: linger_duration.into(), 139 | ..self 140 | } 141 | } 142 | } 143 | 144 | /// Wrap a stream of [`AppendRecord`]s as a stream of [`AppendInput`]s by 145 | /// smartly batching records together based on batching options provided by 146 | /// [`AppendRecordsBatchingOpts`]. 147 | /// 148 | /// See the module level documentation for detailed usage. 149 | /// 150 | /// [`AppendRecord`]: crate::types::AppendRecord 151 | /// [`AppendInput`]: crate::types::AppendInput 152 | pub struct AppendRecordsBatchingStream(Pin + Send>>); 153 | 154 | impl AppendRecordsBatchingStream { 155 | /// Create a new batching stream. 156 | pub fn new(stream: S, opts: AppendRecordsBatchingOpts) -> Self 157 | where 158 | R: 'static + Into, 159 | S: 'static + Send + Stream + Unpin, 160 | { 161 | Self(Box::pin(append_records_batching_stream(stream, opts))) 162 | } 163 | } 164 | 165 | impl Stream for AppendRecordsBatchingStream { 166 | type Item = types::AppendInput; 167 | 168 | fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { 169 | self.0.poll_next_unpin(cx) 170 | } 171 | } 172 | 173 | fn append_records_batching_stream( 174 | mut stream: S, 175 | opts: AppendRecordsBatchingOpts, 176 | ) -> impl Stream + Send 177 | where 178 | R: Into, 179 | S: 'static + Send + Stream + Unpin, 180 | { 181 | async_stream::stream! { 182 | let mut terminated = false; 183 | let mut batch_builder = BatchBuilder::new(&opts); 184 | 185 | let batch_deadline = tokio::time::sleep(Duration::ZERO); 186 | tokio::pin!(batch_deadline); 187 | 188 | while !terminated { 189 | while !batch_builder.is_full() { 190 | if batch_builder.len() == 1 { 191 | // Start the timer when the first record is added. 192 | batch_deadline 193 | .as_mut() 194 | .reset(tokio::time::Instant::now() + opts.linger_duration); 195 | } 196 | 197 | tokio::select! { 198 | biased; 199 | next = stream.next() => { 200 | if let Some(record) = next { 201 | batch_builder.push(record); 202 | } else { 203 | terminated = true; 204 | break; 205 | } 206 | }, 207 | _ = &mut batch_deadline, if !batch_builder.is_empty() => { 208 | break; 209 | } 210 | }; 211 | } 212 | 213 | if let Some(input) = batch_builder.flush() { 214 | yield input; 215 | } 216 | } 217 | } 218 | } 219 | 220 | struct BatchBuilder<'a> { 221 | opts: &'a AppendRecordsBatchingOpts, 222 | peeked_record: Option, 223 | next_match_seq_num: Option, 224 | batch: types::AppendRecordBatch, 225 | } 226 | 227 | impl<'a> BatchBuilder<'a> { 228 | pub fn new<'b: 'a>(opts: &'b AppendRecordsBatchingOpts) -> Self { 229 | Self { 230 | peeked_record: None, 231 | next_match_seq_num: opts.match_seq_num, 232 | batch: Self::new_batch(opts), 233 | opts, 234 | } 235 | } 236 | 237 | #[cfg(not(test))] 238 | fn new_batch(opts: &AppendRecordsBatchingOpts) -> types::AppendRecordBatch { 239 | types::AppendRecordBatch::with_max_capacity(opts.max_batch_records) 240 | } 241 | 242 | #[cfg(test)] 243 | fn new_batch(opts: &AppendRecordsBatchingOpts) -> types::AppendRecordBatch { 244 | types::AppendRecordBatch::with_max_capacity_and_bytes( 245 | opts.max_batch_records, 246 | opts.max_batch_bytes, 247 | ) 248 | } 249 | 250 | pub fn push(&mut self, record: impl Into) { 251 | if let Err(record) = self.batch.push(record) { 252 | let ret = self.peeked_record.replace(record); 253 | assert_eq!(ret, None); 254 | } 255 | } 256 | 257 | pub fn is_empty(&self) -> bool { 258 | self.batch.is_empty() 259 | } 260 | 261 | pub fn len(&self) -> usize { 262 | self.batch.len() 263 | } 264 | 265 | pub fn is_full(&self) -> bool { 266 | self.batch.is_full() || self.peeked_record.is_some() 267 | } 268 | 269 | pub fn flush(&mut self) -> Option { 270 | let ret = if self.batch.is_empty() { 271 | None 272 | } else { 273 | let match_seq_num = self.next_match_seq_num; 274 | if let Some(next_match_seq_num) = self.next_match_seq_num.as_mut() { 275 | *next_match_seq_num += self.batch.len() as u64; 276 | } 277 | 278 | let records = std::mem::replace(&mut self.batch, Self::new_batch(self.opts)); 279 | Some(types::AppendInput { 280 | records, 281 | match_seq_num, 282 | fencing_token: self.opts.fencing_token.clone(), 283 | }) 284 | }; 285 | 286 | if let Some(record) = self.peeked_record.take() { 287 | self.push(record); 288 | } 289 | 290 | // If the peeked record could not be moved into the batch, it doesn't 291 | // fit size limits. This shouldn't happen though, since each append 292 | // record is validated for size before creating it. 293 | assert_eq!(self.peeked_record, None); 294 | 295 | ret 296 | } 297 | } 298 | 299 | #[cfg(test)] 300 | mod tests { 301 | use std::time::Duration; 302 | 303 | use bytes::Bytes; 304 | use futures::StreamExt as _; 305 | use rstest::rstest; 306 | use tokio::sync::mpsc; 307 | use tokio_stream::wrappers::UnboundedReceiverStream; 308 | 309 | use super::{AppendRecordsBatchingOpts, AppendRecordsBatchingStream}; 310 | use crate::types::{self, AppendInput, AppendRecordBatch}; 311 | 312 | #[rstest] 313 | #[case(Some(2), None)] 314 | #[case(None, Some(30))] 315 | #[case(Some(2), Some(100))] 316 | #[case(Some(10), Some(30))] 317 | #[tokio::test] 318 | async fn test_append_record_batching_mechanics( 319 | #[case] max_batch_records: Option, 320 | #[case] max_batch_bytes: Option, 321 | ) { 322 | let stream_iter = (0..100) 323 | .map(|i| { 324 | let body = format!("r_{i}"); 325 | if let Some(max_batch_size) = max_batch_bytes { 326 | types::AppendRecord::with_max_bytes(max_batch_size, body) 327 | } else { 328 | types::AppendRecord::new(body) 329 | } 330 | .unwrap() 331 | }) 332 | .collect::>(); 333 | let stream = futures::stream::iter(stream_iter); 334 | 335 | let mut opts = AppendRecordsBatchingOpts::new().with_linger(Duration::ZERO); 336 | if let Some(max_batch_records) = max_batch_records { 337 | opts = opts.with_max_batch_records(max_batch_records); 338 | } 339 | if let Some(max_batch_size) = max_batch_bytes { 340 | opts = opts.with_max_batch_bytes(max_batch_size); 341 | } 342 | 343 | let batch_stream = AppendRecordsBatchingStream::new(stream, opts); 344 | 345 | let batches = batch_stream 346 | .map(|batch| batch.records) 347 | .collect::>() 348 | .await; 349 | 350 | let mut i = 0; 351 | for batch in batches { 352 | assert_eq!(batch.len(), 2); 353 | for record in batch { 354 | assert_eq!(record.into_parts().body, format!("r_{i}").into_bytes()); 355 | i += 1; 356 | } 357 | } 358 | } 359 | 360 | #[tokio::test(start_paused = true)] 361 | async fn test_append_record_batching_linger() { 362 | let (stream_tx, stream_rx) = mpsc::unbounded_channel::(); 363 | let mut i = 0; 364 | 365 | let size_limit = 40; 366 | 367 | let collect_batches_handle = tokio::spawn(async move { 368 | let batch_stream = AppendRecordsBatchingStream::new( 369 | UnboundedReceiverStream::new(stream_rx), 370 | AppendRecordsBatchingOpts::new() 371 | .with_linger(Duration::from_secs(2)) 372 | .with_max_batch_records(3) 373 | .with_max_batch_bytes(size_limit), 374 | ); 375 | 376 | batch_stream 377 | .map(|batch| { 378 | batch 379 | .records 380 | .into_iter() 381 | .map(|rec| rec.into_parts().body) 382 | .collect::>() 383 | }) 384 | .collect::>() 385 | .await 386 | }); 387 | 388 | let mut send_next = |padding: Option<&str>| { 389 | let mut record = 390 | types::AppendRecord::with_max_bytes(size_limit, format!("r_{i}")).unwrap(); 391 | if let Some(padding) = padding { 392 | // The padding exists just to increase the size of record in 393 | // order to test the size limits. 394 | record = record 395 | .with_headers(vec![types::Header::new("padding", padding.to_owned())]) 396 | .unwrap(); 397 | } 398 | stream_tx.send(record).unwrap(); 399 | i += 1; 400 | }; 401 | 402 | async fn sleep_secs(secs: u64) { 403 | let dur = Duration::from_secs(secs) + Duration::from_millis(10); 404 | tokio::time::sleep(dur).await; 405 | } 406 | 407 | send_next(None); 408 | send_next(None); 409 | 410 | sleep_secs(2).await; 411 | 412 | send_next(None); 413 | 414 | // Waiting for a short time before sending next record. 415 | sleep_secs(1).await; 416 | 417 | send_next(None); 418 | 419 | sleep_secs(1).await; 420 | 421 | // Checking batch count limits here. The first 3 records should be 422 | // flushed immediately. 423 | send_next(None); 424 | send_next(None); 425 | send_next(None); 426 | send_next(None); 427 | 428 | // Waiting for a long time before sending any records. 429 | sleep_secs(200).await; 430 | 431 | // Checking size limits here. The first record should be flushed 432 | // immediately. 433 | send_next(Some("large string")); 434 | send_next(None); 435 | 436 | std::mem::drop(stream_tx); // Should close the stream 437 | 438 | let batches = collect_batches_handle.await.unwrap(); 439 | 440 | let expected_batches: Vec> = vec![ 441 | vec!["r_0".into(), "r_1".into()], 442 | vec!["r_2".into(), "r_3".into()], 443 | vec!["r_4".into(), "r_5".into(), "r_6".into()], 444 | vec!["r_7".into()], 445 | vec!["r_8".into()], 446 | vec!["r_9".into()], 447 | ]; 448 | 449 | assert_eq!(batches, expected_batches); 450 | } 451 | 452 | #[tokio::test] 453 | #[should_panic] 454 | async fn test_append_record_batching_panic_size_limits() { 455 | let size_limit = 1; 456 | let record = 457 | types::AppendRecord::with_max_bytes(size_limit, "too long to fit into size limits") 458 | .unwrap(); 459 | 460 | let mut batch_stream = AppendRecordsBatchingStream::new( 461 | futures::stream::iter([record]), 462 | AppendRecordsBatchingOpts::new().with_max_batch_bytes(size_limit), 463 | ); 464 | 465 | let _ = batch_stream.next().await; 466 | } 467 | 468 | #[tokio::test] 469 | async fn test_append_record_batching_append_input_opts() { 470 | let test_record = types::AppendRecord::new("a").unwrap(); 471 | 472 | let total_records = 12; 473 | let test_records = (0..total_records) 474 | .map(|_| test_record.clone()) 475 | .collect::>(); 476 | 477 | let expected_fencing_token: types::FencingToken = "hello".parse().unwrap(); 478 | let mut expected_match_seq_num = 10; 479 | 480 | let num_batch_records = 3; 481 | 482 | let batch_stream = AppendRecordsBatchingStream::new( 483 | futures::stream::iter(test_records), 484 | AppendRecordsBatchingOpts::new() 485 | .with_max_batch_records(num_batch_records) 486 | .with_fencing_token(Some(expected_fencing_token.clone())) 487 | .with_match_seq_num(Some(expected_match_seq_num)), 488 | ); 489 | 490 | let batches = batch_stream.collect::>().await; 491 | 492 | assert_eq!(batches.len(), total_records / num_batch_records); 493 | 494 | let expected_batch = 495 | AppendRecordBatch::try_from_iter((0..num_batch_records).map(|_| test_record.clone())) 496 | .unwrap(); 497 | 498 | for input in batches { 499 | let AppendInput { 500 | records, 501 | match_seq_num, 502 | fencing_token, 503 | } = input; 504 | assert_eq!(records, expected_batch); 505 | assert_eq!(fencing_token.as_ref(), Some(&expected_fencing_token)); 506 | assert_eq!(match_seq_num, Some(expected_match_seq_num)); 507 | expected_match_seq_num += num_batch_records as u64; 508 | } 509 | } 510 | } 511 | -------------------------------------------------------------------------------- /src/append_session.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | collections::VecDeque, 3 | ops::{DerefMut, Range}, 4 | sync::Arc, 5 | time::Duration, 6 | }; 7 | 8 | use futures::StreamExt; 9 | use tokio::{ 10 | sync::{Mutex, mpsc, mpsc::Permit}, 11 | time::Instant, 12 | }; 13 | use tokio_muxt::{CoalesceMode, MuxTimer}; 14 | use tokio_stream::wrappers::ReceiverStream; 15 | use tonic::Status; 16 | use tonic_side_effect::FrameSignal; 17 | use tracing::{debug, warn}; 18 | 19 | use crate::{ 20 | client::{AppendRetryPolicy, ClientError, StreamClient}, 21 | service::{ 22 | ServiceStreamingResponse, 23 | stream::{AppendSessionServiceRequest, AppendSessionStreamingResponse}, 24 | }, 25 | types::{self, MeteredBytes, RETRY_AFTER_MS_METADATA_KEY}, 26 | }; 27 | 28 | async fn connect( 29 | stream_client: &StreamClient, 30 | frame_signal: FrameSignal, 31 | compression: bool, 32 | ) -> Result< 33 | ( 34 | mpsc::Sender, 35 | ServiceStreamingResponse, 36 | ), 37 | ClientError, 38 | > { 39 | frame_signal.reset(); 40 | let (input_tx, input_rx) = mpsc::channel(10); 41 | let service_req = AppendSessionServiceRequest::new( 42 | stream_client 43 | .inner 44 | .frame_monitoring_stream_service_client(frame_signal.clone()), 45 | &stream_client.stream, 46 | ReceiverStream::new(input_rx), 47 | compression, 48 | ); 49 | 50 | Ok((input_tx, stream_client.inner.send(service_req).await?)) 51 | } 52 | 53 | struct InflightBatch { 54 | start: Instant, 55 | metered_bytes: u64, 56 | inner: types::AppendInput, 57 | } 58 | 59 | #[derive(Debug, Clone, Copy, PartialEq, Eq)] 60 | enum TimerEvent { 61 | MetricUpdate, 62 | BatchDeadline, 63 | } 64 | 65 | const N_TIMER_VARIANTS: usize = 2; 66 | const MAX_BATCH_SIZE: u64 = 1024 * 1024; 67 | 68 | impl From for usize { 69 | fn from(event: TimerEvent) -> Self { 70 | match event { 71 | TimerEvent::MetricUpdate => 0, 72 | TimerEvent::BatchDeadline => 1, 73 | } 74 | } 75 | } 76 | 77 | impl From for TimerEvent { 78 | fn from(value: usize) -> Self { 79 | match value { 80 | 0 => TimerEvent::MetricUpdate, 81 | 1 => TimerEvent::BatchDeadline, 82 | _ => panic!("invalid ordinal"), 83 | } 84 | } 85 | } 86 | 87 | struct AppendState 88 | where 89 | S: 'static + Send + Unpin + futures::Stream, 90 | { 91 | /// Append batches which are "inflight" currently, and have not yet received acknowledgement. 92 | inflight: VecDeque, 93 | 94 | /// Size of `inflight` queue in `metered_bytes`. 95 | inflight_size: u64, 96 | 97 | /// Stream of `AppendInput` from client. 98 | request_stream: S, 99 | 100 | /// Number of records received from client, over the course of this append session. 101 | total_records: usize, 102 | 103 | /// Number of acknowledged records from S2, over the course of this append session. 104 | total_records_acknowledged: usize, 105 | 106 | /// Used to temporarily store the most recent `AppendInput` from the client stream. 107 | stashed_request: Option, 108 | } 109 | 110 | /// Handle S2 acknowledgment by forwarding it to the client, 111 | /// and updating the inflight data. 112 | fn ack_and_pop( 113 | s2_ack: types::AppendAck, 114 | inflight: &mut VecDeque, 115 | inflight_size: &mut u64, 116 | permit: Permit<'_, Result>, 117 | ) -> Range { 118 | let corresponding_batch = inflight.pop_front().expect("inflight should not be empty"); 119 | 120 | assert_eq!( 121 | (s2_ack.end.seq_num - s2_ack.start.seq_num) as usize, 122 | corresponding_batch.inner.records.len(), 123 | "number of acknowledged records from S2 should equal amount from first inflight batch" 124 | ); 125 | 126 | *inflight_size -= corresponding_batch.metered_bytes; 127 | let ack_range = s2_ack.start.seq_num..s2_ack.end.seq_num; 128 | 129 | permit.send(Ok(s2_ack)); 130 | 131 | ack_range 132 | } 133 | 134 | async fn resend( 135 | request_timeout: Duration, 136 | inflight: &mut VecDeque, 137 | inflight_size: &mut u64, 138 | s2_input_tx: mpsc::Sender, 139 | s2_ack_stream: &mut ServiceStreamingResponse, 140 | total_records_acknowledged: &mut usize, 141 | output_tx: mpsc::Sender>, 142 | ) -> Result<(), ClientError> { 143 | debug!( 144 | inflight_len = inflight.len(), 145 | inflight_bytes = inflight_size, 146 | "resending" 147 | ); 148 | 149 | let mut resend_index = 0; 150 | let mut resend_tx_finished = false; 151 | let mut stashed_ack = None; 152 | 153 | let timer = MuxTimer::::default(); 154 | tokio::pin!(timer); 155 | 156 | while !inflight.is_empty() { 157 | tokio::select! { 158 | (event_ord, _deadline) = &mut timer, if timer.is_armed() => { 159 | match TimerEvent::from(event_ord) { 160 | TimerEvent::BatchDeadline => Err(ClientError::Service(Status::cancelled("client: hit deadline (`request_timeout`) waiting for an append acknowledgement")))?, 161 | _ => unreachable!("only batch deadline timer in resend mode") 162 | } 163 | } 164 | 165 | s2_permit = s2_input_tx.reserve(), if !resend_tx_finished => { 166 | let s2_permit = s2_permit.map_err(|_| ClientError::Service(Status::unavailable("client: s2 server disconnected")))?; 167 | match inflight.get(resend_index) { 168 | Some(batch) => { 169 | timer.as_mut().fire_at(TimerEvent::BatchDeadline, batch.start + request_timeout, CoalesceMode::Earliest); 170 | s2_permit.send(batch.inner.clone()); 171 | resend_index += 1; 172 | }, 173 | None => resend_tx_finished = true 174 | } 175 | }, 176 | 177 | next_ack = s2_ack_stream.next(), if stashed_ack.is_none() => { 178 | stashed_ack = Some(next_ack.ok_or(ClientError::Service(Status::internal("client: response stream closed early")))?); 179 | } 180 | 181 | client_permit = output_tx.reserve(), if stashed_ack.is_some() => { 182 | let ack_range = ack_and_pop( 183 | stashed_ack.take().expect("stashed ack")?, 184 | inflight, 185 | inflight_size, 186 | client_permit.map_err(|_| ClientError::Service(Status::cancelled("client: disconnected")))? 187 | ); 188 | 189 | *total_records_acknowledged += (ack_range.end - ack_range.start) as usize; 190 | resend_index -= 1; 191 | 192 | // Adjust next timer. 193 | match inflight.front() { 194 | Some(batch) => timer.as_mut().fire_at( 195 | TimerEvent::BatchDeadline, 196 | batch.start + request_timeout, 197 | CoalesceMode::Latest 198 | ), 199 | None => timer.as_mut().cancel(TimerEvent::BatchDeadline), 200 | }; 201 | } 202 | } 203 | } 204 | 205 | assert!(stashed_ack.is_none()); 206 | assert_eq!(resend_index, 0); 207 | 208 | debug!("finished resending"); 209 | 210 | Ok(()) 211 | } 212 | 213 | async fn session_inner( 214 | state: Arc>>, 215 | frame_signal: FrameSignal, 216 | stream_client: StreamClient, 217 | output_tx: mpsc::Sender>, 218 | compression: bool, 219 | ) -> Result<(), ClientError> 220 | where 221 | S: 'static + Send + Unpin + futures::Stream, 222 | { 223 | let mut lock = state.lock().await; 224 | let AppendState { 225 | inflight, 226 | inflight_size, 227 | request_stream, 228 | total_records, 229 | total_records_acknowledged, 230 | stashed_request, 231 | } = lock.deref_mut(); 232 | 233 | assert!(*inflight_size <= stream_client.inner.config.max_append_inflight_bytes); 234 | 235 | let (s2_input_tx, mut s2_ack_stream) = 236 | connect(&stream_client, frame_signal.clone(), compression).await?; 237 | let batch_ack_deadline = stream_client.inner.config.request_timeout; 238 | 239 | if !inflight.is_empty() { 240 | resend( 241 | batch_ack_deadline, 242 | inflight, 243 | inflight_size, 244 | s2_input_tx.clone(), 245 | &mut s2_ack_stream, 246 | total_records_acknowledged, 247 | output_tx.clone(), 248 | ) 249 | .await?; 250 | 251 | frame_signal.reset(); 252 | 253 | assert!(inflight.is_empty()); 254 | assert_eq!(*inflight_size, 0); 255 | assert_eq!(total_records, total_records_acknowledged); 256 | } 257 | 258 | let timer = MuxTimer::::default(); 259 | tokio::pin!(timer); 260 | 261 | let mut client_input_terminated = false; 262 | let mut stashed_ack = None; 263 | 264 | while !(client_input_terminated 265 | && inflight.is_empty() 266 | && stashed_ack.is_none() 267 | && stashed_request.is_none()) 268 | { 269 | tokio::select! { 270 | (event_ord, _deadline) = &mut timer, if timer.is_armed() => { 271 | match TimerEvent::from(event_ord) { 272 | TimerEvent::MetricUpdate => todo!(), 273 | TimerEvent::BatchDeadline => Err(ClientError::Service(Status::cancelled("client: hit deadline (`request_timeout`) waiting for an append acknowledgement")))? 274 | } 275 | } 276 | 277 | next_request = request_stream.next(), if 278 | stashed_request.is_none() && 279 | !client_input_terminated && 280 | *inflight_size + MAX_BATCH_SIZE <= stream_client.inner.config.max_append_inflight_bytes 281 | => { 282 | match next_request { 283 | Some(append_input) => *stashed_request = Some(append_input), 284 | None => client_input_terminated = true 285 | } 286 | } 287 | 288 | s2_permit = s2_input_tx.reserve(), if stashed_request.is_some() => { 289 | let s2_permit = s2_permit.map_err(|_| ClientError::Service(Status::unavailable("client: s2 server disconnected")))?; 290 | let append_input = stashed_request.take().expect("stashed request"); 291 | let metered_bytes = append_input.metered_bytes(); 292 | let start = Instant::now(); 293 | 294 | *inflight_size += metered_bytes; 295 | *total_records += append_input.records.len(); 296 | inflight.push_back(InflightBatch { 297 | start, 298 | metered_bytes, 299 | inner: append_input.clone() 300 | }); 301 | 302 | s2_permit.send(append_input); 303 | 304 | timer.as_mut().fire_at(TimerEvent::BatchDeadline, start + batch_ack_deadline, CoalesceMode::Earliest); 305 | } 306 | 307 | next_ack = s2_ack_stream.next(), if stashed_ack.is_none() => { 308 | stashed_ack = Some(next_ack.ok_or(ClientError::Service(Status::internal("client: response stream closed early")))?); 309 | } 310 | 311 | client_permit = output_tx.reserve(), if stashed_ack.is_some() => { 312 | let ack_range = ack_and_pop( 313 | stashed_ack.take().expect("stashed ack")?, 314 | inflight, 315 | inflight_size, 316 | client_permit.map_err(|_| ClientError::Service(Status::cancelled("client: disconnected")))? 317 | ); 318 | 319 | *total_records_acknowledged += (ack_range.end - ack_range.start) as usize; 320 | 321 | // Safe to reset frame signal whenever we reach a sync point between 322 | // records received and acknowledged. 323 | if inflight.is_empty() { 324 | assert_eq!(total_records, total_records_acknowledged); 325 | frame_signal.reset() 326 | } 327 | 328 | // Adjust next timer. 329 | match inflight.front() { 330 | Some(batch) => timer.as_mut().fire_at( 331 | TimerEvent::BatchDeadline, 332 | batch.start + batch_ack_deadline, 333 | CoalesceMode::Latest 334 | ), 335 | None => timer.as_mut().cancel(TimerEvent::BatchDeadline), 336 | }; 337 | } 338 | } 339 | } 340 | 341 | assert!(stashed_ack.is_none()); 342 | assert!(stashed_request.is_none()); 343 | assert!(client_input_terminated); 344 | 345 | assert_eq!(total_records, total_records_acknowledged); 346 | assert_eq!(inflight.len(), 0); 347 | assert_eq!(*inflight_size, 0); 348 | 349 | Ok(()) 350 | } 351 | 352 | pub(crate) async fn manage_session( 353 | stream_client: StreamClient, 354 | input: S, 355 | output_tx: mpsc::Sender>, 356 | compression: bool, 357 | ) where 358 | S: 'static + Send + Unpin + futures::Stream, 359 | { 360 | let state = Arc::new(Mutex::new(AppendState { 361 | inflight: Default::default(), 362 | inflight_size: Default::default(), 363 | request_stream: input, 364 | total_records: 0, 365 | total_records_acknowledged: 0, 366 | stashed_request: None, 367 | })); 368 | 369 | let frame_signal = FrameSignal::new(); 370 | let mut attempts = 1; 371 | let mut acks_out: usize = 0; 372 | loop { 373 | match session_inner( 374 | state.clone(), 375 | frame_signal.clone(), 376 | stream_client.clone(), 377 | output_tx.clone(), 378 | compression, 379 | ) 380 | .await 381 | { 382 | Ok(()) => return, 383 | Err(e) => { 384 | let new_acks_out = state.lock().await.total_records_acknowledged; 385 | if acks_out < new_acks_out { 386 | // Progress has been made during the last attempt, so reset the retry counter. 387 | acks_out = new_acks_out; 388 | attempts = 1; 389 | } 390 | 391 | let now = Instant::now(); 392 | let remaining_attempts = attempts < stream_client.inner.config.max_attempts; 393 | let (enough_time, retryable_error, retry_backoff_duration) = { 394 | let mut retry_backoff_duration = 395 | stream_client.inner.config.retry_backoff_duration; 396 | let retryable_error = match &e { 397 | ClientError::Service(status) => { 398 | if let Some(value) = status.metadata().get(RETRY_AFTER_MS_METADATA_KEY) 399 | { 400 | if let Some(retry_after_ms) = value 401 | .to_str() 402 | .ok() 403 | .map(|v| v.parse()) 404 | .transpose() 405 | .ok() 406 | .flatten() 407 | { 408 | retry_backoff_duration = Duration::from_millis(retry_after_ms); 409 | } else { 410 | warn!( 411 | "Failed to convert {RETRY_AFTER_MS_METADATA_KEY} metadata to u64. 412 | Falling back to default backoff duration: {:?}", 413 | retry_backoff_duration 414 | ); 415 | } 416 | } 417 | matches!( 418 | status.code(), 419 | tonic::Code::Unavailable 420 | | tonic::Code::DeadlineExceeded 421 | | tonic::Code::Unknown 422 | | tonic::Code::ResourceExhausted 423 | ) 424 | } 425 | ClientError::Conversion(_) => false, 426 | }; 427 | let enough_time = state 428 | .lock() 429 | .await 430 | .inflight 431 | .front() 432 | .map(|state| { 433 | let next_deadline = 434 | state.start + stream_client.inner.config.request_timeout; 435 | now + retry_backoff_duration < next_deadline 436 | }) 437 | .unwrap_or(true); 438 | (enough_time, retryable_error, retry_backoff_duration) 439 | }; 440 | let policy_compliant = { 441 | match stream_client.inner.config.append_retry_policy { 442 | AppendRetryPolicy::All => true, 443 | AppendRetryPolicy::NoSideEffects => { 444 | // If no request frame has been produced, we conclude that the failing 445 | // append never left this host, so it is safe to retry. 446 | !frame_signal.is_signalled() 447 | } 448 | } 449 | }; 450 | 451 | if remaining_attempts && enough_time && retryable_error && policy_compliant { 452 | tokio::time::sleep(retry_backoff_duration).await; 453 | attempts += 1; 454 | debug!(attempts, ?e, "retrying"); 455 | } else { 456 | debug!( 457 | ?e, 458 | remaining_attempts, 459 | enough_time, 460 | retryable_error, 461 | policy_compliant, 462 | "not retrying" 463 | ); 464 | _ = output_tx.send(Err(e)).await; 465 | return; 466 | } 467 | } 468 | } 469 | } 470 | } 471 | -------------------------------------------------------------------------------- /src/client.rs: -------------------------------------------------------------------------------- 1 | //! SDK client implementation. 2 | //! 3 | //! The module defines three clients: 4 | //! 5 | //! | | Operations | API Service | 6 | //! |------------------|--------------------------|------------------| 7 | //! | [`Client`] | Account level operations | [AccountService] | 8 | //! | [`BasinClient`] | Basin level operations | [BasinService] | 9 | //! | [`StreamClient`] | Stream level operations | [StreamService] | 10 | //! 11 | //! To interact with any client, you need an authentication token which can be 12 | //! generated by the the web console at [s2.dev]. This token is passed to the 13 | //! client via a [`ClientConfig`]. 14 | //! 15 | //! Along with the authentication token, a [`ClientConfig`] defines other 16 | //! request parameters such as timeouts, S2 endpoints, etc. 17 | //! 18 | //! A client can be created using the corresponding `new()` method. To avoid 19 | //! creating multiple connections to each service, [`Client::basin_client`] can be 20 | //! used to create a [`BasinClient`] and [`BasinClient::stream_client`] can be 21 | //! used to create a [`StreamClient`]. 22 | //! 23 | //! **Note:** Even though the client creation operation is cheap, a 24 | //! [`BasinClient`] should preferably be stored instead of using 25 | //! [`Client::basin_client`] multiple times as the account endpoint might vary 26 | //! from the basin endpoint creating a new connection each time the request is 27 | //! sent. See [`S2Endpoints`]. 28 | //! 29 | //! [AccountService]: https://s2.dev/docs/interface/grpc#accountservice 30 | //! [BasinService]: https://s2.dev/docs/interface/grpc#basinservice 31 | //! [StreamService]: https://s2.dev/docs/interface/grpc#streamservice 32 | //! [s2.dev]: https://s2.dev/dashboard 33 | 34 | use std::{env::VarError, fmt::Display, str::FromStr, time::Duration}; 35 | 36 | use backon::{BackoffBuilder, ConstantBuilder, Retryable}; 37 | use futures::StreamExt; 38 | use http::{HeaderValue, uri::Authority}; 39 | use hyper_util::client::legacy::connect::HttpConnector; 40 | use secrecy::SecretString; 41 | use sync_docs::sync_docs; 42 | use tokio::{sync::mpsc, time::sleep}; 43 | use tokio_stream::wrappers::ReceiverStream; 44 | use tonic::{ 45 | metadata::AsciiMetadataValue, 46 | transport::{Channel, ClientTlsConfig, Endpoint}, 47 | }; 48 | use tonic_side_effect::{FrameSignal, RequestFrameMonitor}; 49 | use tracing::warn; 50 | 51 | use crate::{ 52 | api::{ 53 | account_service_client::AccountServiceClient, basin_service_client::BasinServiceClient, 54 | stream_service_client::StreamServiceClient, 55 | }, 56 | append_session, 57 | service::{ 58 | ServiceRequest, ServiceStreamingResponse, Streaming, 59 | account::{ 60 | CreateBasinServiceRequest, DeleteBasinServiceRequest, GetBasinConfigServiceRequest, 61 | IssueAccessTokenServiceRequest, ListAccessTokensServiceRequest, 62 | ListBasinsServiceRequest, ReconfigureBasinServiceRequest, 63 | RevokeAccessTokenServiceRequest, 64 | }, 65 | basin::{ 66 | CreateStreamServiceRequest, DeleteStreamServiceRequest, GetStreamConfigServiceRequest, 67 | ListStreamsServiceRequest, ReconfigureStreamServiceRequest, 68 | }, 69 | send_request, 70 | stream::{ 71 | AppendServiceRequest, CheckTailServiceRequest, ReadServiceRequest, 72 | ReadSessionServiceRequest, ReadSessionStreamingResponse, 73 | }, 74 | }, 75 | types::{ 76 | self, MIB_BYTES, MeteredBytes, RETRY_AFTER_MS_METADATA_KEY, ReadStart, StreamPosition, 77 | }, 78 | }; 79 | 80 | const DEFAULT_CONNECTOR: Option = None; 81 | 82 | /// S2 cloud environment to connect with. 83 | #[derive(Debug, Clone, Copy, PartialEq, Eq)] 84 | pub enum S2Cloud { 85 | /// S2 running on AWS. 86 | Aws, 87 | } 88 | 89 | impl S2Cloud { 90 | const AWS: &'static str = "aws"; 91 | 92 | fn as_str(&self) -> &'static str { 93 | match self { 94 | Self::Aws => Self::AWS, 95 | } 96 | } 97 | } 98 | 99 | impl Display for S2Cloud { 100 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 101 | f.write_str(self.as_str()) 102 | } 103 | } 104 | 105 | impl FromStr for S2Cloud { 106 | type Err = String; 107 | 108 | fn from_str(s: &str) -> Result { 109 | if s.eq_ignore_ascii_case(Self::AWS) { 110 | Ok(Self::Aws) 111 | } else { 112 | Err(s.to_owned()) 113 | } 114 | } 115 | } 116 | 117 | /// Endpoint for connecting to an S2 basin. 118 | #[derive(Debug, Clone)] 119 | pub enum BasinEndpoint { 120 | /// Parent zone for basins. 121 | /// DNS is used to route to the correct cell for the basin. 122 | ParentZone(Authority), 123 | /// Direct cell endpoint. 124 | /// The `S2-Basin` header is included in requests to specify the basin, 125 | /// which is expected to be hosted by this cell. 126 | Direct(Authority), 127 | } 128 | 129 | /// Endpoints for the S2 environment. 130 | /// 131 | /// You can find the S2 endpoints in our [documentation]. 132 | /// 133 | /// [documentation]: https://s2.dev/docs/interface/endpoints 134 | #[derive(Debug, Clone)] 135 | pub struct S2Endpoints { 136 | /// Used by `AccountService` requests. 137 | pub account: Authority, 138 | /// Used by `BasinService` and `StreamService` requests. 139 | pub basin: BasinEndpoint, 140 | } 141 | 142 | /// Retry policy for append requests. 143 | #[derive(Debug, Clone)] 144 | pub enum AppendRetryPolicy { 145 | /// Retry all eligible failures encountered during an append. 146 | /// 147 | /// This could result in append batches being duplicated on the stream. 148 | All, 149 | 150 | /// Retry only failures with no side effects. 151 | /// 152 | /// Will not attempt to retry failures where it cannot be concluded whether 153 | /// an append may become durable, in order to prevent duplicates. 154 | NoSideEffects, 155 | } 156 | 157 | impl S2Endpoints { 158 | /// Get S2 endpoints for the specified cloud. 159 | pub fn for_cloud(cloud: S2Cloud) -> Self { 160 | Self { 161 | account: format!("{cloud}.s2.dev") 162 | .try_into() 163 | .expect("valid authority"), 164 | basin: BasinEndpoint::ParentZone( 165 | format!("b.{cloud}.s2.dev") 166 | .try_into() 167 | .expect("valid authority"), 168 | ), 169 | } 170 | } 171 | 172 | /// Get S2 endpoints for the specified cell. 173 | pub fn for_cell( 174 | cloud: S2Cloud, 175 | cell_id: impl Into, 176 | ) -> Result { 177 | let cell_endpoint: Authority = format!("{}.o.{cloud}.s2.dev", cell_id.into()).try_into()?; 178 | Ok(Self { 179 | account: cell_endpoint.clone(), 180 | basin: BasinEndpoint::Direct(cell_endpoint), 181 | }) 182 | } 183 | 184 | /// Get S2 endpoints from environment variables. 185 | /// 186 | /// The following environment variables are used: 187 | /// - `S2_CLOUD`: Valid S2 cloud name. Defaults to AWS. 188 | /// - `S2_ACCOUNT_ENDPOINT`: Overrides the account endpoint. 189 | /// - `S2_BASIN_ENDPOINT`: Overrides the basin endpoint. The prefix `"{basin}."` indicates the 190 | /// basin endpoint is `ParentZone` else `Direct`. 191 | pub fn from_env() -> Result { 192 | let cloud: S2Cloud = std::env::var("S2_CLOUD") 193 | .ok() 194 | .as_deref() 195 | .unwrap_or(S2Cloud::AWS) 196 | .parse() 197 | .map_err(|cloud| format!("Invalid S2_CLOUD: {cloud}"))?; 198 | 199 | let mut endpoints = Self::for_cloud(cloud); 200 | 201 | match std::env::var("S2_ACCOUNT_ENDPOINT") { 202 | Ok(spec) => { 203 | endpoints.account = spec 204 | .as_str() 205 | .try_into() 206 | .map_err(|_| format!("Invalid S2_ACCOUNT_ENDPOINT: {spec}"))?; 207 | } 208 | Err(VarError::NotPresent) => {} 209 | Err(VarError::NotUnicode(_)) => { 210 | return Err("Invalid S2_ACCOUNT_ENDPOINT: not Unicode".to_owned()); 211 | } 212 | } 213 | 214 | match std::env::var("S2_BASIN_ENDPOINT") { 215 | Ok(spec) => { 216 | endpoints.basin = if let Some(parent_zone) = spec.strip_prefix("{basin}.") { 217 | BasinEndpoint::ParentZone( 218 | parent_zone 219 | .try_into() 220 | .map_err(|e| format!("Invalid S2_BASIN_ENDPOINT ({e}): {spec}"))?, 221 | ) 222 | } else { 223 | BasinEndpoint::Direct( 224 | spec.as_str() 225 | .try_into() 226 | .map_err(|e| format!("Invalid S2_BASIN_ENDPOINT ({e}): {spec}"))?, 227 | ) 228 | } 229 | } 230 | Err(VarError::NotPresent) => {} 231 | Err(VarError::NotUnicode(_)) => { 232 | return Err("Invalid S2_BASIN_ENDPOINT: not Unicode".to_owned()); 233 | } 234 | } 235 | 236 | Ok(endpoints) 237 | } 238 | } 239 | 240 | /// Client configuration. 241 | #[derive(Debug, Clone)] 242 | pub struct ClientConfig { 243 | pub(crate) token: SecretString, 244 | pub(crate) endpoints: S2Endpoints, 245 | pub(crate) connection_timeout: Duration, 246 | pub(crate) request_timeout: Duration, 247 | pub(crate) user_agent: HeaderValue, 248 | pub(crate) append_retry_policy: AppendRetryPolicy, 249 | #[cfg(feature = "connector")] 250 | pub(crate) uri_scheme: http::uri::Scheme, 251 | pub(crate) retry_backoff_duration: Duration, 252 | pub(crate) max_attempts: usize, 253 | pub(crate) max_append_inflight_bytes: u64, 254 | pub(crate) compression: bool, 255 | } 256 | 257 | impl ClientConfig { 258 | /// Initialize a default client configuration with the specified authentication token. 259 | pub fn new(token: impl Into) -> Self { 260 | Self { 261 | token: token.into().into(), 262 | endpoints: S2Endpoints::for_cloud(S2Cloud::Aws), 263 | connection_timeout: Duration::from_secs(3), 264 | request_timeout: Duration::from_secs(5), 265 | user_agent: "s2-sdk-rust".parse().expect("valid user agent"), 266 | append_retry_policy: AppendRetryPolicy::All, 267 | #[cfg(feature = "connector")] 268 | uri_scheme: http::uri::Scheme::HTTPS, 269 | retry_backoff_duration: Duration::from_millis(100), 270 | max_attempts: 3, 271 | max_append_inflight_bytes: 100 * MIB_BYTES, 272 | compression: false, 273 | } 274 | } 275 | 276 | /// S2 endpoints to connect to. 277 | pub fn with_endpoints(self, host_endpoints: impl Into) -> Self { 278 | Self { 279 | endpoints: host_endpoints.into(), 280 | ..self 281 | } 282 | } 283 | 284 | /// Timeout for connecting and transparently reconnecting. Defaults to 3s. 285 | pub fn with_connection_timeout(self, connection_timeout: impl Into) -> Self { 286 | Self { 287 | connection_timeout: connection_timeout.into(), 288 | ..self 289 | } 290 | } 291 | 292 | /// Timeout for a particular request. Defaults to 5s. 293 | pub fn with_request_timeout(self, request_timeout: impl Into) -> Self { 294 | Self { 295 | request_timeout: request_timeout.into(), 296 | ..self 297 | } 298 | } 299 | 300 | /// User agent. Defaults to `s2-sdk-rust`. Feel free to say hi. 301 | pub fn with_user_agent(self, user_agent: HeaderValue) -> Self { 302 | Self { user_agent, ..self } 303 | } 304 | 305 | /// Retry policy for appends. 306 | /// Only relevant if `max_attempts > 1`. 307 | /// 308 | /// Defaults to retries of all failures, meaning duplicates on a stream are possible. 309 | pub fn with_append_retry_policy( 310 | self, 311 | append_retry_policy: impl Into, 312 | ) -> Self { 313 | Self { 314 | append_retry_policy: append_retry_policy.into(), 315 | ..self 316 | } 317 | } 318 | 319 | /// Maximum total size of currently inflight (pending acknowledgment) append 320 | /// batches, per append session, as measured by `MeteredSize` formula. 321 | /// 322 | /// Must be at least 1 MiB. Defaults to 100 MiB. 323 | pub fn with_max_append_inflight_bytes(self, max_append_inflight_bytes: u64) -> Self { 324 | assert!( 325 | max_append_inflight_bytes >= MIB_BYTES, 326 | "max_append_inflight_bytes must be at least 1MiB" 327 | ); 328 | Self { 329 | max_append_inflight_bytes, 330 | ..self 331 | } 332 | } 333 | 334 | /// URI scheme to use when connecting with a custom connector. Defaults to `https`. 335 | #[cfg(feature = "connector")] 336 | pub fn with_uri_scheme(self, uri_scheme: impl Into) -> Self { 337 | Self { 338 | uri_scheme: uri_scheme.into(), 339 | ..self 340 | } 341 | } 342 | 343 | /// Backoff duration when retrying. 344 | /// Defaults to 100ms. 345 | /// A jitter is always applied. 346 | pub fn with_retry_backoff_duration(self, retry_backoff_duration: impl Into) -> Self { 347 | Self { 348 | retry_backoff_duration: retry_backoff_duration.into(), 349 | ..self 350 | } 351 | } 352 | 353 | /// Maximum number of attempts per request. 354 | /// Setting it to 1 disables retrying. 355 | /// The default is to make 3 attempts. 356 | pub fn with_max_attempts(self, max_attempts: usize) -> Self { 357 | assert!(max_attempts > 0, "max attempts must be greater than 0"); 358 | Self { 359 | max_attempts, 360 | ..self 361 | } 362 | } 363 | 364 | /// Configure compression for requests and responses. 365 | /// Disabled by default. 366 | pub fn with_compression(self, compression: bool) -> Self { 367 | Self { 368 | compression, 369 | ..self 370 | } 371 | } 372 | } 373 | 374 | /// Error from client operations. 375 | #[derive(Debug, Clone, thiserror::Error)] 376 | pub enum ClientError { 377 | /// SDK type conversion errors. 378 | /// 379 | /// Indicates an incompatibility between the SDK version and service. 380 | #[error(transparent)] 381 | Conversion(#[from] types::ConvertError), 382 | /// Error status from service. 383 | #[error(transparent)] 384 | Service(#[from] tonic::Status), 385 | } 386 | 387 | /// Client for account-level operations. 388 | #[derive(Debug, Clone)] 389 | pub struct Client { 390 | inner: ClientInner, 391 | } 392 | 393 | impl Client { 394 | /// Create a new SDK client. 395 | pub fn new(config: ClientConfig) -> Self { 396 | Self { 397 | inner: ClientInner::new(ClientKind::Account, config, DEFAULT_CONNECTOR), 398 | } 399 | } 400 | 401 | /// Create a new SDK client using a custom connector. 402 | #[cfg(feature = "connector")] 403 | pub fn new_with_connector(config: ClientConfig, connector: C) -> Self 404 | where 405 | C: tower_service::Service + Send + 'static, 406 | C::Response: hyper::rt::Read + hyper::rt::Write + Send + Unpin, 407 | C::Future: Send, 408 | C::Error: std::error::Error + Send + Sync + 'static, 409 | { 410 | Self { 411 | inner: ClientInner::new(ClientKind::Account, config, Some(connector)), 412 | } 413 | } 414 | 415 | /// Create basin client from the given name. 416 | pub fn basin_client(&self, basin: types::BasinName) -> BasinClient { 417 | BasinClient { 418 | inner: self.inner.for_basin(basin), 419 | } 420 | } 421 | 422 | #[sync_docs] 423 | pub async fn list_basins( 424 | &self, 425 | req: types::ListBasinsRequest, 426 | ) -> Result { 427 | self.inner 428 | .send_retryable(ListBasinsServiceRequest::new( 429 | self.inner.account_service_client(), 430 | req, 431 | )) 432 | .await 433 | } 434 | 435 | #[sync_docs] 436 | pub async fn create_basin( 437 | &self, 438 | req: types::CreateBasinRequest, 439 | ) -> Result { 440 | self.inner 441 | .send_retryable(CreateBasinServiceRequest::new( 442 | self.inner.account_service_client(), 443 | req, 444 | )) 445 | .await 446 | } 447 | 448 | #[sync_docs] 449 | pub async fn delete_basin(&self, req: types::DeleteBasinRequest) -> Result<(), ClientError> { 450 | self.inner 451 | .send_retryable(DeleteBasinServiceRequest::new( 452 | self.inner.account_service_client(), 453 | req, 454 | )) 455 | .await 456 | } 457 | 458 | #[sync_docs] 459 | pub async fn get_basin_config( 460 | &self, 461 | basin: types::BasinName, 462 | ) -> Result { 463 | self.inner 464 | .send_retryable(GetBasinConfigServiceRequest::new( 465 | self.inner.account_service_client(), 466 | basin, 467 | )) 468 | .await 469 | } 470 | 471 | #[sync_docs] 472 | pub async fn reconfigure_basin( 473 | &self, 474 | req: types::ReconfigureBasinRequest, 475 | ) -> Result { 476 | self.inner 477 | .send_retryable(ReconfigureBasinServiceRequest::new( 478 | self.inner.account_service_client(), 479 | req, 480 | )) 481 | .await 482 | } 483 | 484 | #[sync_docs] 485 | pub async fn issue_access_token( 486 | &self, 487 | info: types::AccessTokenInfo, 488 | ) -> Result { 489 | self.inner 490 | .send_retryable(IssueAccessTokenServiceRequest::new( 491 | self.inner.account_service_client(), 492 | info, 493 | )) 494 | .await 495 | } 496 | 497 | #[sync_docs] 498 | pub async fn revoke_access_token( 499 | &self, 500 | id: types::AccessTokenId, 501 | ) -> Result { 502 | self.inner 503 | .send_retryable(RevokeAccessTokenServiceRequest::new( 504 | self.inner.account_service_client(), 505 | id, 506 | )) 507 | .await 508 | } 509 | 510 | #[sync_docs] 511 | pub async fn list_access_tokens( 512 | &self, 513 | req: types::ListAccessTokensRequest, 514 | ) -> Result { 515 | self.inner 516 | .send_retryable(ListAccessTokensServiceRequest::new( 517 | self.inner.account_service_client(), 518 | req, 519 | )) 520 | .await 521 | } 522 | } 523 | 524 | /// Client for basin-level operations. 525 | #[derive(Debug, Clone)] 526 | pub struct BasinClient { 527 | inner: ClientInner, 528 | } 529 | 530 | impl BasinClient { 531 | /// Create a new basin client. 532 | pub fn new(config: ClientConfig, basin: types::BasinName) -> Self { 533 | Self { 534 | inner: ClientInner::new(ClientKind::Basin(basin), config, DEFAULT_CONNECTOR), 535 | } 536 | } 537 | 538 | /// Create a new basin client using a custom connector. 539 | #[cfg(feature = "connector")] 540 | pub fn new_with_connector( 541 | config: ClientConfig, 542 | basin: types::BasinName, 543 | connector: C, 544 | ) -> Self 545 | where 546 | C: tower_service::Service + Send + 'static, 547 | C::Response: hyper::rt::Read + hyper::rt::Write + Send + Unpin, 548 | C::Future: Send, 549 | C::Error: std::error::Error + Send + Sync + 'static, 550 | { 551 | Self { 552 | inner: ClientInner::new(ClientKind::Basin(basin), config, Some(connector)), 553 | } 554 | } 555 | 556 | /// Create a new client for stream-level operations. 557 | pub fn stream_client(&self, stream: impl Into) -> StreamClient { 558 | StreamClient { 559 | inner: self.inner.clone(), 560 | stream: stream.into(), 561 | } 562 | } 563 | 564 | #[sync_docs] 565 | pub async fn create_stream( 566 | &self, 567 | req: types::CreateStreamRequest, 568 | ) -> Result { 569 | self.inner 570 | .send_retryable(CreateStreamServiceRequest::new( 571 | self.inner.basin_service_client(), 572 | req, 573 | )) 574 | .await 575 | } 576 | 577 | #[sync_docs] 578 | pub async fn list_streams( 579 | &self, 580 | req: types::ListStreamsRequest, 581 | ) -> Result { 582 | self.inner 583 | .send_retryable(ListStreamsServiceRequest::new( 584 | self.inner.basin_service_client(), 585 | req, 586 | )) 587 | .await 588 | } 589 | 590 | #[sync_docs] 591 | pub async fn get_stream_config( 592 | &self, 593 | stream: impl Into, 594 | ) -> Result { 595 | self.inner 596 | .send_retryable(GetStreamConfigServiceRequest::new( 597 | self.inner.basin_service_client(), 598 | stream, 599 | )) 600 | .await 601 | } 602 | 603 | #[sync_docs] 604 | pub async fn reconfigure_stream( 605 | &self, 606 | req: types::ReconfigureStreamRequest, 607 | ) -> Result { 608 | self.inner 609 | .send(ReconfigureStreamServiceRequest::new( 610 | self.inner.basin_service_client(), 611 | req, 612 | )) 613 | .await 614 | } 615 | 616 | #[sync_docs] 617 | pub async fn delete_stream(&self, req: types::DeleteStreamRequest) -> Result<(), ClientError> { 618 | self.inner 619 | .send_retryable(DeleteStreamServiceRequest::new( 620 | self.inner.basin_service_client(), 621 | req, 622 | )) 623 | .await 624 | } 625 | } 626 | 627 | /// Client for stream-level operations. 628 | #[derive(Debug, Clone)] 629 | pub struct StreamClient { 630 | pub(crate) inner: ClientInner, 631 | pub(crate) stream: String, 632 | } 633 | 634 | impl StreamClient { 635 | /// Create a new stream client. 636 | pub fn new(config: ClientConfig, basin: types::BasinName, stream: impl Into) -> Self { 637 | BasinClient::new(config, basin).stream_client(stream) 638 | } 639 | 640 | /// Create a new stream client using a custom connector. 641 | #[cfg(feature = "connector")] 642 | pub fn new_with_connector( 643 | config: ClientConfig, 644 | basin: types::BasinName, 645 | stream: impl Into, 646 | connector: C, 647 | ) -> Self 648 | where 649 | C: tower_service::Service + Send + 'static, 650 | C::Response: hyper::rt::Read + hyper::rt::Write + Send + Unpin, 651 | C::Future: Send, 652 | C::Error: std::error::Error + Send + Sync + 'static, 653 | { 654 | BasinClient::new_with_connector(config, basin, connector).stream_client(stream) 655 | } 656 | 657 | #[sync_docs] 658 | pub async fn check_tail(&self) -> Result { 659 | self.inner 660 | .send_retryable(CheckTailServiceRequest::new( 661 | self.inner.stream_service_client(), 662 | &self.stream, 663 | )) 664 | .await 665 | } 666 | 667 | #[sync_docs] 668 | pub async fn read(&self, req: types::ReadRequest) -> Result { 669 | self.inner 670 | .send_retryable(ReadServiceRequest::new( 671 | self.inner.stream_service_client(), 672 | &self.stream, 673 | req, 674 | self.inner.config.compression, 675 | )) 676 | .await 677 | } 678 | 679 | #[sync_docs] 680 | pub async fn read_session( 681 | &self, 682 | req: types::ReadSessionRequest, 683 | ) -> Result, ClientError> { 684 | let request = ReadSessionServiceRequest::new( 685 | self.inner.stream_service_client(), 686 | &self.stream, 687 | req, 688 | self.inner.config.compression, 689 | ); 690 | self.inner 691 | .send_retryable(request.clone()) 692 | .await 693 | .map(|responses| { 694 | Box::pin(read_resumption_stream( 695 | request, 696 | responses, 697 | self.inner.clone(), 698 | )) as _ 699 | }) 700 | } 701 | 702 | #[sync_docs] 703 | pub async fn append(&self, req: types::AppendInput) -> Result { 704 | let frame_signal = FrameSignal::new(); 705 | self.inner 706 | .send_retryable(AppendServiceRequest::new( 707 | self.inner 708 | .frame_monitoring_stream_service_client(frame_signal.clone()), 709 | self.inner.config.append_retry_policy.clone(), 710 | frame_signal, 711 | &self.stream, 712 | req, 713 | self.inner.config.compression, 714 | )) 715 | .await 716 | } 717 | 718 | #[sync_docs] 719 | #[allow(clippy::unused_async)] 720 | pub async fn append_session( 721 | &self, 722 | req: S, 723 | ) -> Result, ClientError> 724 | where 725 | S: 'static + Send + Unpin + futures::Stream, 726 | { 727 | let (response_tx, response_rx) = mpsc::channel(10); 728 | _ = tokio::spawn(append_session::manage_session( 729 | self.clone(), 730 | req, 731 | response_tx, 732 | self.inner.config.compression, 733 | )); 734 | 735 | Ok(Box::pin(ReceiverStream::new(response_rx))) 736 | } 737 | } 738 | 739 | #[derive(Debug, Clone)] 740 | enum ClientKind { 741 | Account, 742 | Basin(types::BasinName), 743 | } 744 | 745 | impl ClientKind { 746 | fn to_authority(&self, endpoints: &S2Endpoints) -> Authority { 747 | match self { 748 | ClientKind::Account => endpoints.account.clone(), 749 | ClientKind::Basin(basin) => match &endpoints.basin { 750 | BasinEndpoint::ParentZone(zone) => format!("{basin}.{zone}") 751 | .try_into() 752 | .expect("valid authority as basin pre-validated"), 753 | BasinEndpoint::Direct(endpoint) => endpoint.clone(), 754 | }, 755 | } 756 | } 757 | } 758 | 759 | #[derive(Debug, Clone)] 760 | pub(crate) struct ClientInner { 761 | kind: ClientKind, 762 | channel: Channel, 763 | pub(crate) config: ClientConfig, 764 | } 765 | 766 | impl ClientInner { 767 | fn new(kind: ClientKind, config: ClientConfig, connector: Option) -> Self 768 | where 769 | C: tower_service::Service + Send + 'static, 770 | C::Response: hyper::rt::Read + hyper::rt::Write + Send + Unpin, 771 | C::Future: Send, 772 | C::Error: std::error::Error + Send + Sync + 'static, 773 | { 774 | let authority = kind.to_authority(&config.endpoints); 775 | 776 | #[cfg(not(feature = "connector"))] 777 | let scheme = "https"; 778 | #[cfg(feature = "connector")] 779 | let scheme = config.uri_scheme.as_str(); 780 | 781 | let endpoint = format!("{scheme}://{authority}") 782 | .parse::() 783 | .expect("previously validated endpoint scheme and authority") 784 | .user_agent(config.user_agent.clone()) 785 | .expect("converting HeaderValue into HeaderValue") 786 | .http2_adaptive_window(true) 787 | .tls_config( 788 | ClientTlsConfig::default() 789 | .with_webpki_roots() 790 | .assume_http2(true), 791 | ) 792 | .expect("valid TLS config") 793 | .connect_timeout(config.connection_timeout) 794 | .timeout(config.request_timeout); 795 | 796 | let channel = if let Some(connector) = connector { 797 | assert!( 798 | matches!(&config.endpoints.basin, BasinEndpoint::Direct(a) if a == &config.endpoints.account), 799 | "Connector only supported when connecting directly to a cell for account as well as basins" 800 | ); 801 | endpoint.connect_with_connector_lazy(connector) 802 | } else { 803 | endpoint.connect_lazy() 804 | }; 805 | 806 | Self { 807 | kind, 808 | channel, 809 | config, 810 | } 811 | } 812 | 813 | fn for_basin(&self, basin: types::BasinName) -> ClientInner { 814 | let current_authority = self.kind.to_authority(&self.config.endpoints); 815 | let new_kind = ClientKind::Basin(basin); 816 | let new_authority = new_kind.to_authority(&self.config.endpoints); 817 | if current_authority == new_authority { 818 | Self { 819 | kind: new_kind, 820 | ..self.clone() 821 | } 822 | } else { 823 | Self::new(new_kind, self.config.clone(), DEFAULT_CONNECTOR) 824 | } 825 | } 826 | 827 | pub(crate) async fn send( 828 | &self, 829 | service_req: T, 830 | ) -> Result { 831 | let basin_header = match (&self.kind, &self.config.endpoints.basin) { 832 | (ClientKind::Basin(basin), BasinEndpoint::Direct(_)) => { 833 | Some(AsciiMetadataValue::from_str(basin).expect("valid")) 834 | } 835 | _ => None, 836 | }; 837 | send_request(service_req, &self.config.token, basin_header).await 838 | } 839 | 840 | async fn send_retryable_with_backoff( 841 | &self, 842 | service_req: T, 843 | backoff_builder: impl BackoffBuilder, 844 | ) -> Result { 845 | let retry_fn = || async { self.send(service_req.clone()).await }; 846 | 847 | retry_fn 848 | .retry(backoff_builder) 849 | .when(|e| service_req.should_retry(e)) 850 | .adjust(|e, backoff_duration| match e { 851 | ClientError::Service(s) => { 852 | if let Some(value) = s.metadata().get(RETRY_AFTER_MS_METADATA_KEY) { 853 | if let Some(retry_after_ms) = value 854 | .to_str() 855 | .ok() 856 | .map(|v| v.parse()) 857 | .transpose() 858 | .ok() 859 | .flatten() 860 | { 861 | Some(Duration::from_millis(retry_after_ms)) 862 | } else { 863 | warn!( 864 | "Failed to convert {RETRY_AFTER_MS_METADATA_KEY} metadata to u64. 865 | Falling back to default backoff duration: {:?}", 866 | backoff_duration 867 | ); 868 | backoff_duration 869 | } 870 | } else { 871 | backoff_duration 872 | } 873 | } 874 | _ => backoff_duration, 875 | }) 876 | .await 877 | } 878 | 879 | pub(crate) async fn send_retryable( 880 | &self, 881 | service_req: T, 882 | ) -> Result { 883 | self.send_retryable_with_backoff(service_req, self.backoff_builder()) 884 | .await 885 | } 886 | 887 | pub(crate) fn backoff_builder(&self) -> impl BackoffBuilder + use<> { 888 | ConstantBuilder::default() 889 | .with_delay(self.config.retry_backoff_duration) 890 | .with_max_times(self.config.max_attempts) 891 | .with_jitter() 892 | } 893 | 894 | fn account_service_client(&self) -> AccountServiceClient { 895 | AccountServiceClient::new(self.channel.clone()) 896 | } 897 | 898 | fn basin_service_client(&self) -> BasinServiceClient { 899 | BasinServiceClient::new(self.channel.clone()) 900 | } 901 | 902 | pub(crate) fn stream_service_client(&self) -> StreamServiceClient { 903 | StreamServiceClient::new(self.channel.clone()) 904 | } 905 | 906 | pub(crate) fn frame_monitoring_stream_service_client( 907 | &self, 908 | frame_signal: FrameSignal, 909 | ) -> StreamServiceClient { 910 | StreamServiceClient::new(RequestFrameMonitor::new(self.channel.clone(), frame_signal)) 911 | } 912 | } 913 | 914 | fn read_resumption_stream( 915 | mut request: ReadSessionServiceRequest, 916 | mut responses: ServiceStreamingResponse, 917 | client: ClientInner, 918 | ) -> impl Send + futures::Stream> { 919 | let mut backoff = None; 920 | async_stream::stream! { 921 | while let Some(item) = responses.next().await { 922 | match item { 923 | Err(e) if request.should_retry(&e) => { 924 | if backoff.is_none() { 925 | backoff = Some(client.backoff_builder().build()); 926 | } 927 | if let Some(duration) = backoff.as_mut().and_then(|b| b.next()) { 928 | sleep(duration).await; 929 | if let Ok(new_responses) = client.send_retryable(request.clone()).await { 930 | responses = new_responses; 931 | } else { 932 | yield Err(e); 933 | } 934 | } else { 935 | yield Err(e); 936 | } 937 | } 938 | item => { 939 | if item.is_ok() { 940 | backoff = None; 941 | } 942 | if let Ok(types::ReadOutput::Batch(types::SequencedRecordBatch { records })) = &item { 943 | let req = request.req_mut(); 944 | if let Some(record) = records.last() { 945 | req.start = ReadStart::SeqNum(record.seq_num + 1); 946 | } 947 | if let Some(count) = req.limit.count.as_mut() { 948 | *count = count.saturating_sub(records.len() as u64); 949 | } 950 | if let Some(bytes) = req.limit.bytes.as_mut() { 951 | *bytes = bytes.saturating_sub(records.metered_bytes()); 952 | } 953 | } 954 | yield item; 955 | } 956 | } 957 | } 958 | } 959 | } 960 | --------------------------------------------------------------------------------