├── client ├── src │ ├── crypto │ │ ├── mod.rs │ │ └── cert.rs │ ├── utils │ │ ├── mod.rs │ │ └── net.rs │ ├── logging │ │ ├── mod.rs │ │ ├── connection.rs │ │ └── keep_alive.rs │ ├── streams │ │ ├── pubsub │ │ │ ├── mod.rs │ │ │ └── states.rs │ │ ├── request_reply │ │ │ ├── mod.rs │ │ │ └── states.rs │ │ ├── aliases.rs │ │ ├── mod.rs │ │ └── builder.rs │ ├── traits │ │ ├── mod.rs │ │ ├── keep_alive.rs │ │ ├── stream.rs │ │ └── try_into_u64.rs │ ├── lib.rs │ ├── prelude.rs │ ├── constants.rs │ ├── keep_alive │ │ ├── mod.rs │ │ ├── helpers.rs │ │ ├── connection_status.rs │ │ └── reqrep.rs │ ├── client │ │ ├── cloud │ │ │ ├── states.rs │ │ │ └── mod.rs │ │ ├── custom │ │ │ └── states.rs │ │ ├── mod.rs │ │ └── builder.rs │ ├── batching │ │ ├── message_batch.rs │ │ ├── mod.rs │ │ └── batch_config.rs │ └── connection.rs ├── ca.debug.der ├── ca.prod.der ├── examples │ ├── publish_chrono.rs │ ├── decompression.rs │ ├── publish.rs │ ├── subscribe.rs │ ├── compression.rs │ ├── subscribe_bincode.rs │ ├── publish_multitasking.rs │ ├── reply.rs │ ├── publish_bincode.rs │ ├── batching_and_compression.rs │ └── request.rs ├── CHANGELOG.md ├── README.md └── Cargo.toml ├── tools ├── src │ ├── commands │ │ ├── mod.rs │ │ └── gen_certs │ │ │ ├── validity_range.rs │ │ │ ├── mod.rs │ │ │ ├── key_pair.rs │ │ │ ├── cert_gen.rs │ │ │ └── certificate_builder.rs │ ├── lib.rs │ ├── traits.rs │ ├── main.rs │ └── cli.rs ├── CHANGELOG.md └── Cargo.toml ├── benchmarks ├── src │ ├── lib.rs │ ├── main.rs │ ├── args.rs │ ├── results.rs │ └── runner.rs └── Cargo.toml ├── .gitignore ├── tests ├── tests │ └── streams │ │ ├── main.rs │ │ ├── request_reply.rs │ │ └── helpers.rs └── Cargo.toml ├── log ├── CHANGELOG.md ├── src │ ├── tasks │ │ ├── mod.rs │ │ ├── flusher.rs │ │ └── cleaner.rs │ ├── error.rs │ ├── message │ │ ├── slice.rs │ │ ├── headers.rs │ │ └── mod.rs │ ├── config │ │ ├── flush_policy.rs │ │ └── mod.rs │ ├── index │ │ ├── entry.rs │ │ └── mod.rs │ └── data │ │ └── iterator.rs ├── examples │ └── basic_usage.rs ├── Cargo.toml ├── benches │ ├── write_benchmark.rs │ └── read_benchmark.rs └── tests │ └── helpers.rs ├── server ├── proxy.debug.der ├── proxy.prod.der ├── src │ ├── lib.rs │ ├── topic │ │ ├── config.rs │ │ └── mod.rs │ ├── main.rs │ ├── sink │ │ ├── mod.rs │ │ ├── filter.rs │ │ └── ordered.rs │ ├── args.rs │ ├── quic.rs │ └── cloud.rs ├── selium-server.service ├── CHANGELOG.md ├── Cargo.toml └── README.md ├── standard ├── src │ ├── traits │ │ ├── mod.rs │ │ ├── codec.rs │ │ └── compression.rs │ ├── compression │ │ ├── zstd │ │ │ ├── mod.rs │ │ │ ├── decomp.rs │ │ │ └── comp.rs │ │ ├── brotli │ │ │ ├── mod.rs │ │ │ ├── decomp.rs │ │ │ └── comp.rs │ │ ├── lz4 │ │ │ ├── mod.rs │ │ │ ├── comp.rs │ │ │ └── decomp.rs │ │ └── deflate │ │ │ ├── types.rs │ │ │ ├── mod.rs │ │ │ ├── decomp.rs │ │ │ └── comp.rs │ ├── lib.rs │ └── codecs │ │ ├── bytes_codec.rs │ │ ├── string_codec.rs │ │ └── bincode_codec.rs ├── CHANGELOG.md ├── Cargo.toml └── benches │ ├── codecs.rs │ └── compression.rs ├── protocol ├── src │ ├── operation.rs │ ├── error_codes.rs │ ├── lib.rs │ ├── request_id.rs │ ├── offset.rs │ ├── traits.rs │ ├── utils.rs │ └── topic_name.rs ├── CHANGELOG.md └── Cargo.toml ├── ci └── changelog.py ├── .github ├── workflows │ ├── audit_schedule.yml │ ├── audit.yml │ ├── check_release.yml │ ├── test.yml │ └── release.yml └── dependabot.yml └── Cargo.toml /client/src/crypto/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod cert; 2 | -------------------------------------------------------------------------------- /client/src/utils/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod net; 2 | -------------------------------------------------------------------------------- /tools/src/commands/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod gen_certs; 2 | -------------------------------------------------------------------------------- /client/src/logging/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod connection; 2 | pub mod keep_alive; 3 | -------------------------------------------------------------------------------- /tools/src/lib.rs: -------------------------------------------------------------------------------- 1 | pub mod cli; 2 | pub mod commands; 3 | pub mod traits; 4 | -------------------------------------------------------------------------------- /benchmarks/src/lib.rs: -------------------------------------------------------------------------------- 1 | pub mod args; 2 | pub mod results; 3 | pub mod runner; 4 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .cargo 2 | target/ 3 | **/certs/ 4 | .DS_Store 5 | **/logs/ 6 | log/path 7 | -------------------------------------------------------------------------------- /tests/tests/streams/main.rs: -------------------------------------------------------------------------------- 1 | mod helpers; 2 | mod pub_sub; 3 | mod request_reply; 4 | -------------------------------------------------------------------------------- /client/ca.debug.der: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/seliumlabs/selium/HEAD/client/ca.debug.der -------------------------------------------------------------------------------- /client/ca.prod.der: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/seliumlabs/selium/HEAD/client/ca.prod.der -------------------------------------------------------------------------------- /log/CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # CHANGELOG 2 | 3 | ## v0.1.0 4 | 5 | - Initial release of Selium Log 6 | -------------------------------------------------------------------------------- /server/proxy.debug.der: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/seliumlabs/selium/HEAD/server/proxy.debug.der -------------------------------------------------------------------------------- /server/proxy.prod.der: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/seliumlabs/selium/HEAD/server/proxy.prod.der -------------------------------------------------------------------------------- /tools/src/traits.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Result; 2 | 3 | pub trait CommandRunner { 4 | fn run(self) -> Result<()>; 5 | } 6 | -------------------------------------------------------------------------------- /log/src/tasks/mod.rs: -------------------------------------------------------------------------------- 1 | mod cleaner; 2 | mod flusher; 3 | 4 | pub use cleaner::CleanerTask; 5 | pub use flusher::FlusherTask; 6 | -------------------------------------------------------------------------------- /standard/src/traits/mod.rs: -------------------------------------------------------------------------------- 1 | //! Exports interfaces to allow developers to adapt their own custom codecs, compression, etc, 2 | //! for use with Selium. 3 | 4 | pub mod codec; 5 | pub mod compression; 6 | -------------------------------------------------------------------------------- /client/src/streams/pubsub/mod.rs: -------------------------------------------------------------------------------- 1 | //! Asynchronous Pub/Sub streams. 2 | 3 | mod publisher; 4 | mod subscriber; 5 | 6 | pub(crate) mod states; 7 | pub use publisher::Publisher; 8 | pub use subscriber::Subscriber; 9 | -------------------------------------------------------------------------------- /client/src/streams/request_reply/mod.rs: -------------------------------------------------------------------------------- 1 | //! Synchronous Request/Reply streams. 2 | 3 | mod replier; 4 | mod requestor; 5 | 6 | pub(crate) mod states; 7 | pub use replier::Replier; 8 | pub use requestor::Requestor; 9 | -------------------------------------------------------------------------------- /client/src/streams/aliases.rs: -------------------------------------------------------------------------------- 1 | use selium_std::traits::compression::{Compress, Decompress}; 2 | use std::sync::Arc; 3 | 4 | pub type Comp = Arc; 5 | pub type Decomp = Arc; 6 | -------------------------------------------------------------------------------- /protocol/src/operation.rs: -------------------------------------------------------------------------------- 1 | use serde::{Deserialize, Serialize}; 2 | 3 | #[doc(hidden)] 4 | #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] 5 | pub enum Operation { 6 | Map(String), 7 | Filter(String), 8 | } 9 | -------------------------------------------------------------------------------- /tools/CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # CHANGELOG 2 | 3 | ## v0.1.0 4 | 5 | - Initial release of Selium 6 | 7 | ## v0.2.0 8 | 9 | - Added request-reply messaging pattern 10 | - Features for Selium Cloud 11 | - Bump dependency versions 12 | -------------------------------------------------------------------------------- /client/src/traits/mod.rs: -------------------------------------------------------------------------------- 1 | //! A collection of traits used by `Selium` and end-users. 2 | 3 | mod keep_alive; 4 | mod stream; 5 | mod try_into_u64; 6 | 7 | pub use keep_alive::*; 8 | pub use stream::*; 9 | pub use try_into_u64::*; 10 | -------------------------------------------------------------------------------- /server/src/lib.rs: -------------------------------------------------------------------------------- 1 | use std::pin::Pin; 2 | 3 | use futures::Sink; 4 | 5 | pub mod args; 6 | #[cfg(feature = "__cloud")] 7 | mod cloud; 8 | pub mod quic; 9 | pub mod server; 10 | pub mod sink; 11 | pub mod topic; 12 | 13 | type BoxSink = Pin + Send>>; 14 | -------------------------------------------------------------------------------- /protocol/src/error_codes.rs: -------------------------------------------------------------------------------- 1 | pub const UNKNOWN_ERROR: u32 = 0x0; 2 | pub const SHUTDOWN_IN_PROGRESS: u32 = 0x1; 3 | pub const SHUTDOWN: u32 = 0x2; 4 | pub const STREAM_CLOSED_PREMATURELY: u32 = 0x3; 5 | pub const INVALID_TOPIC_NAME: u32 = 0x4; 6 | pub const REPLIER_ALREADY_BOUND: u32 = 0x5; 7 | pub const CLOUD_AUTH_FAILED: u32 = 0x6; 8 | -------------------------------------------------------------------------------- /server/src/topic/config.rs: -------------------------------------------------------------------------------- 1 | use std::{sync::Arc, time::Duration}; 2 | 3 | pub type SharedTopicConfig = Arc; 4 | 5 | #[derive(Debug)] 6 | pub struct TopicConfig { 7 | pub polling_interval: Duration, 8 | } 9 | 10 | impl TopicConfig { 11 | pub fn new(polling_interval: Duration) -> Self { 12 | Self { polling_interval } 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /standard/src/compression/zstd/mod.rs: -------------------------------------------------------------------------------- 1 | //! Support for zstd, a [lossless data compression algorithm developed by 2 | //! Facebook](https://github.com/facebook/zstd). 3 | //! 4 | //! Adapts the [zstd] crate, a stable implementation of the zstd compression algorithm 5 | //! built in Rust for use with `Selium`. 6 | mod comp; 7 | mod decomp; 8 | 9 | pub use comp::*; 10 | pub use decomp::*; 11 | -------------------------------------------------------------------------------- /benchmarks/src/main.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Result; 2 | use clap::Parser; 3 | use selium_benchmarks::{args::Args, runner::BenchmarkRunner}; 4 | 5 | #[tokio::main] 6 | async fn main() -> Result<()> { 7 | let args = Args::parse(); 8 | let runner = BenchmarkRunner::init().await?; 9 | let results = runner.run(args).await?; 10 | 11 | println!("{results}"); 12 | 13 | Ok(()) 14 | } 15 | -------------------------------------------------------------------------------- /protocol/src/lib.rs: -------------------------------------------------------------------------------- 1 | mod bistream; 2 | mod codec; 3 | mod frame; 4 | mod offset; 5 | mod operation; 6 | mod request_id; 7 | mod topic_name; 8 | 9 | pub mod error_codes; 10 | pub mod traits; 11 | pub mod utils; 12 | 13 | pub use bistream::*; 14 | pub use codec::*; 15 | pub use frame::*; 16 | pub use offset::*; 17 | pub use operation::*; 18 | pub use request_id::*; 19 | pub use topic_name::*; 20 | -------------------------------------------------------------------------------- /protocol/src/request_id.rs: -------------------------------------------------------------------------------- 1 | use std::sync::atomic::{AtomicU32, Ordering}; 2 | 3 | #[derive(Debug)] 4 | pub struct RequestId(AtomicU32); 5 | 6 | impl Default for RequestId { 7 | fn default() -> Self { 8 | Self(AtomicU32::new(0)) 9 | } 10 | } 11 | 12 | impl RequestId { 13 | pub fn next_id(&self) -> u32 { 14 | self.0.fetch_add(1, Ordering::Relaxed) 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /benchmarks/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "selium-benchmarks" 3 | version = "0.2.0" 4 | edition = "2021" 5 | 6 | [dependencies] 7 | selium = { path = "../client", features = ["std"] } 8 | selium-server = { path = "../server" } 9 | clap = { version = "4.4", features = ["derive"] } 10 | tokio = { version = "1.34", features = ["macros"] } 11 | futures = "0.3" 12 | anyhow = "1.0" 13 | num-format = "0.4" 14 | -------------------------------------------------------------------------------- /ci/changelog.py: -------------------------------------------------------------------------------- 1 | """Read the changelog for a given version.""" 2 | 3 | import re 4 | import sys 5 | 6 | if len(sys.argv) < 3: 7 | sys.exit(1) 8 | 9 | with open(sys.argv[1] + "/CHANGELOG.md", "r") as f: 10 | m = re.search("## v" + re.escape(sys.argv[2]) + "\n+(.+?)(?:##|$)", f.read(), re.DOTALL) 11 | if m is None: 12 | sys.exit(2) 13 | else: 14 | print(m.group(1).rstrip()) 15 | -------------------------------------------------------------------------------- /standard/src/compression/brotli/mod.rs: -------------------------------------------------------------------------------- 1 | //! Support for brotli, a [lossless data compression algorithm developed by 2 | //! Google](https://github.com/google/brotli). 3 | //! 4 | //! Adapts the [brotli] crate, a popular and stable implementation of the brotli compression algorithm 5 | //! built in Rust for use with `Selium`. 6 | 7 | mod comp; 8 | mod decomp; 9 | 10 | pub use comp::*; 11 | pub use decomp::*; 12 | -------------------------------------------------------------------------------- /client/src/logging/connection.rs: -------------------------------------------------------------------------------- 1 | pub fn get_cloud_endpoint() { 2 | tracing::info!("Retrieving Selium server endpoint from Selium Cloud."); 3 | } 4 | 5 | pub fn connect_to_address(endpoint: &str) { 6 | tracing::info!(endpoint, "Connecting to remote address."); 7 | } 8 | 9 | pub fn successful_connection(endpoint: &str) { 10 | tracing::info!(endpoint, "Successfully connected to remote address."); 11 | } 12 | -------------------------------------------------------------------------------- /standard/src/compression/lz4/mod.rs: -------------------------------------------------------------------------------- 1 | //! Support for lz4, a [lossless data compression algorithm](https://github.com/lz4/lz4) that prioritizes 2 | //! comp/decomp speed over compression ratio. 3 | //! 4 | //! Adapts the [lz4_flex] crate, a performant implementation of the lz4 compression algorithm built 5 | //! in Rust for use with `Selium`. 6 | 7 | mod comp; 8 | mod decomp; 9 | 10 | pub use comp::*; 11 | pub use decomp::*; 12 | -------------------------------------------------------------------------------- /client/src/utils/net.rs: -------------------------------------------------------------------------------- 1 | use selium_std::errors::{ParseRemoteAddressError, Result}; 2 | use std::net::{SocketAddr, ToSocketAddrs}; 3 | 4 | pub(crate) fn get_socket_addrs(addr: &str) -> Result { 5 | let addr = addr 6 | .to_socket_addrs() 7 | .map_err(ParseRemoteAddressError::InvalidAddress)? 8 | .next() 9 | .ok_or(ParseRemoteAddressError::NoAddressResolved)?; 10 | 11 | Ok(addr) 12 | } 13 | -------------------------------------------------------------------------------- /tools/src/main.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Result; 2 | use clap::Parser; 3 | use selium_tools::cli::{Commands, ToolsCli}; 4 | use selium_tools::commands::gen_certs::GenCertsRunner; 5 | use selium_tools::traits::CommandRunner; 6 | 7 | fn main() -> Result<()> { 8 | let cli = ToolsCli::parse(); 9 | 10 | match cli.command { 11 | Commands::GenCerts(args) => GenCertsRunner::from(args).run()?, 12 | }; 13 | 14 | Ok(()) 15 | } 16 | -------------------------------------------------------------------------------- /client/src/lib.rs: -------------------------------------------------------------------------------- 1 | mod client; 2 | mod streams; 3 | 4 | pub mod batching; 5 | pub mod constants; 6 | pub mod keep_alive; 7 | pub mod logging; 8 | pub mod prelude; 9 | pub mod traits; 10 | 11 | pub(crate) mod connection; 12 | pub(crate) mod crypto; 13 | pub(crate) mod utils; 14 | 15 | pub use client::*; 16 | pub use streams::*; 17 | 18 | pub mod std { 19 | //! Re-exports [selium_std] modules. 20 | pub use selium_std::*; 21 | } 22 | -------------------------------------------------------------------------------- /standard/src/compression/deflate/types.rs: -------------------------------------------------------------------------------- 1 | /// Used by [DeflateComp](crate::compression::deflate::DeflateComp) and 2 | /// [DeflateDecomp](crate::compression::deflate::DeflateDecomp) to specify the preferred DEFLATE 3 | /// implementation. 4 | #[derive(Debug)] 5 | pub enum DeflateLibrary { 6 | Gzip, 7 | Zlib, 8 | } 9 | 10 | impl Default for DeflateLibrary { 11 | fn default() -> Self { 12 | Self::Gzip 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /protocol/src/offset.rs: -------------------------------------------------------------------------------- 1 | use serde::{Deserialize, Serialize}; 2 | 3 | #[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] 4 | pub enum Offset { 5 | FromBeginning(u64), 6 | FromEnd(u64), 7 | } 8 | 9 | impl Default for Offset { 10 | fn default() -> Self { 11 | Self::FromEnd(0) 12 | } 13 | } 14 | 15 | impl From for Offset { 16 | fn from(value: u64) -> Self { 17 | Self::FromBeginning(value) 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /standard/CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # CHANGELOG 2 | 3 | ## v0.1.0 4 | 5 | - Initial release of Selium 6 | 7 | ## v0.2.0 8 | 9 | - Added connection reestablishment for failed connections 10 | - Features for Selium Cloud 11 | - Bump dependency versions 12 | 13 | ## v0.2.1 14 | 15 | - Improve RequestHandlerFailure error 16 | - Added BytesCodec to transmit raw bytes 17 | 18 | ## v0.2.2 19 | 20 | - New error types 21 | 22 | ## v0.2.3 23 | 24 | - Improved generic arguments for Codec traits 25 | -------------------------------------------------------------------------------- /protocol/src/traits.rs: -------------------------------------------------------------------------------- 1 | use std::pin::Pin; 2 | 3 | use futures::{stream::BoxStream, Sink}; 4 | 5 | pub trait ShutdownStream { 6 | fn shutdown_stream(&mut self); 7 | } 8 | 9 | pub trait ShutdownSink { 10 | fn shutdown_sink(&mut self); 11 | } 12 | 13 | impl<'a, T> ShutdownStream for BoxStream<'a, T> { 14 | fn shutdown_stream(&mut self) {} 15 | } 16 | 17 | impl ShutdownSink for Pin + Send>> { 18 | fn shutdown_sink(&mut self) {} 19 | } 20 | -------------------------------------------------------------------------------- /client/src/prelude.rs: -------------------------------------------------------------------------------- 1 | //! Re-exports commonly used types and traits. 2 | //! 3 | //! Aside from conveniently re-exporting all traits required for client usage, the prelude may continue to expand 4 | //! as the client API evolves, so it's encouraged to import the prelude to help alleviate any migration efforts as new 5 | //! versions of the library are released. 6 | //! 7 | //! ``` 8 | //! use selium::prelude::*; 9 | //! ``` 10 | //! 11 | pub use crate::traits::{Open, Operations, Retain}; 12 | -------------------------------------------------------------------------------- /tests/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "selium-tests" 3 | version = "0.3.0" 4 | edition = "2021" 5 | 6 | [dev-dependencies] 7 | anyhow = "1.0" 8 | futures = "0.3" 9 | selium = { path = "../client", features = ["std"] } 10 | selium-server = { path = "../server" } 11 | serde = { version = "1.0", features = ["derive"] } 12 | tempfile = "3.10.1" 13 | tokio = { version = "1.34", features = ["macros"] } 14 | uuid = { version = "1.6", features = ["v4"] } 15 | 16 | [dependencies] 17 | clap = "4.4" 18 | -------------------------------------------------------------------------------- /.github/workflows/audit_schedule.yml: -------------------------------------------------------------------------------- 1 | name: Scheduled Audit 2 | 3 | on: 4 | schedule: 5 | - cron: "0 0 * * *" 6 | 7 | jobs: 8 | audit: 9 | name: Daily Crate Security Audit 10 | runs-on: ubuntu-latest 11 | steps: 12 | - name: Checkout code 13 | uses: actions/checkout@v3 14 | with: 15 | ref: main 16 | 17 | - name: Audit crates 18 | uses: actions-rs/audit-check@v1 19 | with: 20 | token: ${{ secrets.GITHUB_TOKEN }} 21 | -------------------------------------------------------------------------------- /standard/src/compression/zstd/decomp.rs: -------------------------------------------------------------------------------- 1 | use crate::traits::compression::Decompress; 2 | use anyhow::Result; 3 | use bytes::Bytes; 4 | 5 | /// Decompression half of zstd implementation. 6 | /// 7 | /// `ZstdDecomp` implements [Decompress], and can be constructed for use with a `Subscriber` stream. 8 | #[derive(Debug)] 9 | pub struct ZstdDecomp; 10 | 11 | impl Decompress for ZstdDecomp { 12 | fn decompress(&self, input: Bytes) -> Result { 13 | let output = zstd::decode_all(&input[..])?; 14 | Ok(output.into()) 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /.github/workflows/audit.yml: -------------------------------------------------------------------------------- 1 | name: Audit 2 | 3 | on: 4 | push: 5 | paths: 6 | - '**/Cargo.toml' 7 | - '**/Cargo.lock' 8 | branches: 9 | - main 10 | 11 | pull_request: 12 | branches: 13 | - main 14 | 15 | jobs: 16 | audit: 17 | name: Crate Security Audit 18 | runs-on: ubuntu-latest 19 | steps: 20 | - name: Checkout code 21 | uses: actions/checkout@v3 22 | 23 | - name: Audit crates 24 | uses: actions-rs/audit-check@v1 25 | with: 26 | token: ${{ secrets.GITHUB_TOKEN }} 27 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [workspace] 2 | members = [ 3 | "client", 4 | "server", 5 | "protocol", 6 | "tests", 7 | "benchmarks", 8 | "standard", 9 | "tools", 10 | "log", 11 | ] 12 | resolver = "2" 13 | 14 | [workspace.package] 15 | edition = "2021" 16 | authors = ["Selium Contributors"] 17 | license = "MPL-2.0" 18 | homepage = "https://selium.com" 19 | repository = "https://github.com/seliumlabs/selium" 20 | readme = "README.md" 21 | keywords = ["selium", "messaging", "streaming", "stream", "queue"] 22 | categories = ["network-programming", "webassembly"] 23 | -------------------------------------------------------------------------------- /server/selium-server.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Selium Server 3 | After=network-online.target 4 | Wants=network-online.target 5 | 6 | [Service] 7 | Type=simple 8 | ExecStart=/usr/local/bin/selium-server \ 9 | --bind-addr 0.0.0.0:7001 \ 10 | --ca /etc/selium/ca.der \ 11 | --key /etc/selium/server.key.der \ 12 | --cert /etc/selium/server.der \ 13 | -vvv 14 | Restart=always 15 | User=selium 16 | Group=selium 17 | StandardOutput=journal 18 | StandardError=journal 19 | SyslogIdentifier=SeliumServer 20 | 21 | [Install] 22 | WantedBy=multi-user.target 23 | -------------------------------------------------------------------------------- /tools/src/commands/gen_certs/validity_range.rs: -------------------------------------------------------------------------------- 1 | use time::{Duration, OffsetDateTime}; 2 | 3 | const SECONDS_IN_DAY: i64 = 86_400; 4 | 5 | pub struct ValidityRange { 6 | pub start: OffsetDateTime, 7 | pub end: OffsetDateTime, 8 | } 9 | 10 | impl ValidityRange { 11 | pub fn new(days: i64) -> Self { 12 | let offset = Duration::new(SECONDS_IN_DAY * days, 0); 13 | let start = OffsetDateTime::now_utc().checked_sub(offset).unwrap(); 14 | let end = OffsetDateTime::now_utc().checked_add(offset).unwrap(); 15 | 16 | Self { start, end } 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /tools/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "selium-tools" 3 | version = "0.2.0" 4 | description = "A CLI application containing tools for Selium developers" 5 | include = ["src/**/*"] 6 | edition.workspace = true 7 | authors.workspace = true 8 | license.workspace = true 9 | homepage.workspace = true 10 | repository.workspace = true 11 | readme.workspace = true 12 | keywords.workspace = true 13 | categories.workspace = true 14 | 15 | [dependencies] 16 | anyhow = "1.0" 17 | clap = { version = "4.4", features = ["derive"] } 18 | colored = "2.0" 19 | rcgen = "0.11" 20 | rustls = "0.21" 21 | time = "0.3" 22 | -------------------------------------------------------------------------------- /standard/src/compression/deflate/mod.rs: -------------------------------------------------------------------------------- 1 | //! Support for DEFLATE, a [lossless data compression algorithm](https://datatracker.ietf.org/doc/html/rfc1951). 2 | //! 3 | //! Adapts the [flate2] crate, the most widely used implementation of the DEFLATE compression algorithm 4 | //! built in Rust for use with `Selium`. 5 | //! 6 | //! The `Selium Standard` support for DEFLATE includes two widely used implementations, 7 | //! [gzip](https://gzip.org) and [zlib](https://github.com/madler/zlib). 8 | 9 | mod comp; 10 | mod decomp; 11 | mod types; 12 | 13 | pub use comp::*; 14 | pub use decomp::*; 15 | pub use types::*; 16 | -------------------------------------------------------------------------------- /protocol/CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # CHANGELOG 2 | 3 | ## v0.1.0 4 | 5 | - Initial release of Selium 6 | 7 | ## v0.2.0 8 | 9 | - Added request-reply messaging pattern 10 | - Added connection reestablishment for failed connections 11 | - Added graceful shutdown protocol 12 | - Features for Selium Cloud 13 | - Bump dependency versions 14 | 15 | ## v0.2.1 16 | 17 | - Bug fix for Selium Cloud 18 | 19 | ## v0.3.0 20 | 21 | - Improve error handling and reporting between clients and servers 22 | 23 | ## v0.4.0 24 | 25 | - Updated Frame::BatchMessage payload to include batch length. 26 | - Added Offset type to enable interoperability with Selium Log 27 | -------------------------------------------------------------------------------- /standard/src/compression/lz4/comp.rs: -------------------------------------------------------------------------------- 1 | use crate::traits::compression::Compress; 2 | use anyhow::Result; 3 | use bytes::Bytes; 4 | use lz4_flex::frame::FrameEncoder; 5 | use std::io::Write; 6 | 7 | /// Compression half of lz4 implementation. 8 | /// 9 | /// `Lz4Comp` implements [Compress], and can be constructed for use with a `Publisher` stream. 10 | #[derive(Debug)] 11 | pub struct Lz4Comp; 12 | 13 | impl Compress for Lz4Comp { 14 | fn compress(&self, input: Bytes) -> Result { 15 | let mut encoder = FrameEncoder::new(vec![]); 16 | encoder.write_all(&input)?; 17 | 18 | Ok(encoder.finish()?.into()) 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /standard/src/traits/codec.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Result; 2 | use bytes::{Bytes, BytesMut}; 3 | 4 | /// Provides an `encode` method for implementors to build their own encoder types. 5 | /// 6 | /// See [codecs](crate::codecs) for more information. 7 | pub trait MessageEncoder { 8 | type Item: Clone; 9 | 10 | fn encode(&self, item: Self::Item) -> Result; 11 | } 12 | 13 | /// Provides a `decode` method for implementors to build their own decoder types. 14 | /// 15 | /// See [codecs](crate::codecs) for more information. 16 | pub trait MessageDecoder { 17 | type Item; 18 | 19 | fn decode(&self, buffer: &mut BytesMut) -> Result; 20 | } 21 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | # To get started with Dependabot version updates, you'll need to specify which 2 | # package ecosystems to update and where the package manifests are located. 3 | # Please see the documentation for all configuration options: 4 | # https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates 5 | 6 | version: 2 7 | updates: 8 | - package-ecosystem: "cargo" # See documentation for possible values 9 | directory: "/" # Location of package manifests 10 | schedule: 11 | interval: "weekly" 12 | groups: 13 | minor: 14 | update-types: 15 | - "minor" 16 | - "patch" 17 | -------------------------------------------------------------------------------- /standard/src/compression/lz4/decomp.rs: -------------------------------------------------------------------------------- 1 | use crate::traits::compression::Decompress; 2 | use anyhow::Result; 3 | use bytes::Bytes; 4 | use lz4_flex::frame::FrameDecoder; 5 | use std::io::Read; 6 | 7 | /// Decompression half of lz4 implementation. 8 | /// 9 | /// `Lz4Decomp` implements [Decompress], and can be constructed for use with a `Subscriber` stream. 10 | #[derive(Debug)] 11 | pub struct Lz4Decomp; 12 | 13 | impl Decompress for Lz4Decomp { 14 | fn decompress(&self, input: Bytes) -> Result { 15 | let mut buf = Vec::new(); 16 | let mut decoder = FrameDecoder::new(&input[..]); 17 | decoder.read_to_end(&mut buf)?; 18 | 19 | Ok(buf.into()) 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /server/src/main.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Result; 2 | use clap::Parser; 3 | use env_logger::Builder; 4 | use log::error; 5 | use selium_server::{args::UserArgs, server::Server}; 6 | 7 | #[tokio::main] 8 | async fn main() -> Result<()> { 9 | let args = UserArgs::parse(); 10 | 11 | let mut logger = Builder::new(); 12 | logger 13 | .filter_module( 14 | &env!("CARGO_PKG_NAME").replace('-', "_"), 15 | args.verbose.log_level_filter(), 16 | ) 17 | .init(); 18 | 19 | let server = Server::try_from(args)?; 20 | 21 | if let Err(e) = server.listen().await { 22 | error!("Error occurred while accepting connections: {:?}", e); 23 | } 24 | 25 | Ok(()) 26 | } 27 | -------------------------------------------------------------------------------- /client/src/constants.rs: -------------------------------------------------------------------------------- 1 | //! Commonly used constants. 2 | 3 | /// The default `keep_alive` interval for a client connection. 4 | pub const KEEP_ALIVE_DEFAULT: u64 = 5_000; 5 | /// The default `retention_policy` setting for messages. 6 | pub const RETENTION_POLICY_DEFAULT: u64 = 1000 * 60 * 60 * 24; 7 | 8 | #[cfg(debug_assertions)] 9 | pub(crate) const CLOUD_CA: &[u8; 390] = include_bytes!("../ca.debug.der"); 10 | #[cfg(debug_assertions)] 11 | pub(crate) const SELIUM_CLOUD_REMOTE_URL: &str = "127.0.0.1:7002"; 12 | 13 | #[cfg(not(debug_assertions))] 14 | pub(crate) const CLOUD_CA: &[u8; 391] = include_bytes!("../ca.prod.der"); 15 | #[cfg(not(debug_assertions))] 16 | pub(crate) const SELIUM_CLOUD_REMOTE_URL: &str = "selium.io:7001"; 17 | -------------------------------------------------------------------------------- /log/examples/basic_usage.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Result; 2 | use selium_log::{config::LogConfig, message::Message, MessageLog}; 3 | use std::sync::Arc; 4 | 5 | const MESSAGE_VERSION: u32 = 1; 6 | 7 | #[tokio::main] 8 | async fn main() -> Result<()> { 9 | let config = LogConfig::from_path("path/to/segments/dir"); 10 | let log = MessageLog::open(Arc::new(config)).await?; 11 | let message = Message::single(b"Hello, world!", MESSAGE_VERSION); 12 | 13 | log.write(message).await?; 14 | log.flush().await?; 15 | let slice = log.read_slice(0, None).await?; 16 | 17 | if let Some(mut iter) = slice.messages() { 18 | let next = iter.next().await?; 19 | println!("{next:?}") 20 | } 21 | 22 | Ok(()) 23 | } 24 | -------------------------------------------------------------------------------- /benchmarks/src/args.rs: -------------------------------------------------------------------------------- 1 | use clap::Parser; 2 | 3 | #[derive(Debug, Parser)] 4 | pub struct Args { 5 | /// The number of messages to exchange 6 | #[arg(long, default_value_t = 1_000_000)] 7 | pub num_of_messages: u64, 8 | 9 | /// The number of streams to use with multiplexing 10 | #[arg(long, default_value_t = 10)] 11 | pub num_of_streams: u64, 12 | 13 | /// Size (in bytes) of the message payload 14 | #[arg(long, default_value_t = 32)] 15 | pub message_size: u64, 16 | 17 | /// Enable message batching 18 | #[arg(long, default_value_t = false)] 19 | pub enable_batching: bool, 20 | 21 | /// Enable compression 22 | #[arg(long, default_value_t = false)] 23 | pub enable_compression: bool, 24 | } 25 | -------------------------------------------------------------------------------- /standard/src/compression/brotli/decomp.rs: -------------------------------------------------------------------------------- 1 | use crate::traits::compression::Decompress; 2 | use anyhow::Result; 3 | use brotli::Decompressor; 4 | use bytes::Bytes; 5 | use std::io::Read; 6 | 7 | const BUFFER_SIZE: usize = 4096; 8 | 9 | /// Decompression half of Brotli implementation. 10 | /// 11 | /// `BrotliDecomp` implements [Decompress], and can be constructed for use with a `Subscriber` 12 | /// stream. 13 | #[derive(Debug)] 14 | pub struct BrotliDecomp; 15 | 16 | impl Decompress for BrotliDecomp { 17 | fn decompress(&self, input: Bytes) -> Result { 18 | let mut buf = Vec::new(); 19 | let mut decoder = Decompressor::new(&input[..], BUFFER_SIZE); 20 | decoder.read_to_end(&mut buf)?; 21 | 22 | Ok(buf.into()) 23 | } 24 | } 25 | -------------------------------------------------------------------------------- /.github/workflows/check_release.yml: -------------------------------------------------------------------------------- 1 | name: Check Release PR 2 | 3 | on: 4 | pull_request: 5 | branches: 6 | - release 7 | types: 8 | - opened 9 | - reopened 10 | - synchronize 11 | 12 | jobs: 13 | changelog: 14 | name: Check Changelog Entry 15 | strategy: 16 | matrix: 17 | name: [client, server] 18 | runs-on: ubuntu-latest 19 | steps: 20 | - name: Checkout code 21 | uses: actions/checkout@v3 22 | 23 | - name: Get crate version 24 | id: version 25 | run: echo value=$(sed -ne 's/^version = "\(.*\)"/\1/p' < ${{ matrix.name }}/Cargo.toml) >> $GITHUB_OUTPUT 26 | 27 | - name: Check changelog 28 | run: python ci/changelog.py ${{ matrix.name }} ${{ steps.version.outputs.value }} 29 | -------------------------------------------------------------------------------- /protocol/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "selium-protocol" 3 | version = "0.4.0" 4 | description = """ 5 | selium-protocol is a dependent crate of Selium. Do not use directly. 6 | """ 7 | include = ["src/**/*"] 8 | edition.workspace = true 9 | authors.workspace = true 10 | license.workspace = true 11 | homepage.workspace = true 12 | repository.workspace = true 13 | readme.workspace = true 14 | 15 | [features] 16 | __notopiccheck = [] 17 | 18 | [dependencies] 19 | anyhow = "1.0" 20 | bincode = "1.3" 21 | bytes = { version = "1.5", features = ["serde"] } 22 | futures = "0.3" 23 | quinn = "0.10" 24 | serde = { version = "1.0", features = ["derive"] } 25 | selium-std = { version = "0.2", path = "../standard" } 26 | tokio-util = { version = "0.7", features = ["codec"] } 27 | regex = "1.10" 28 | lazy-regex = "3.1" 29 | -------------------------------------------------------------------------------- /tools/src/cli.rs: -------------------------------------------------------------------------------- 1 | use clap::{Args, Parser, Subcommand}; 2 | use std::path::PathBuf; 3 | 4 | #[derive(Parser)] 5 | #[command(about)] 6 | pub struct ToolsCli { 7 | #[command(subcommand)] 8 | pub command: Commands, 9 | } 10 | 11 | #[derive(Subcommand)] 12 | pub enum Commands { 13 | /// Generate self-signed CA and keypairs for use with testing and development. 14 | GenCerts(GenCertsArgs), 15 | } 16 | 17 | #[derive(Args)] 18 | pub struct GenCertsArgs { 19 | /// Output path for server certs 20 | #[clap(short = 's', default_value = "certs/server/")] 21 | pub server_out_path: PathBuf, 22 | 23 | /// Output path for client certs 24 | #[clap(short = 'c', default_value = "certs/client/")] 25 | pub client_out_path: PathBuf, 26 | 27 | /// No expiry 28 | #[clap(long = "no-expiry")] 29 | pub no_expiry: bool, 30 | } 31 | -------------------------------------------------------------------------------- /client/src/keep_alive/mod.rs: -------------------------------------------------------------------------------- 1 | //! Data types and stream/sink wrappers to allow streams to recover from transient errors. 2 | //! 3 | //! By default, streams are wrapped in a `KeepAlive` type that will allow the stream to recover 4 | //! from transient errors such as connection timeouts, scheduled server shutdowns, etc. 5 | //! 6 | //! In most cases, there is no input required from the user, as streams already enable this feature 7 | //! with a default, reasonable connection retry strategy. However, if you wish to specify your own retry 8 | //! strategy, you can do so by constructing a [BackoffStrategy] instance and providing it to the `Selium` 9 | //! stream builder. 10 | 11 | mod backoff_strategy; 12 | mod connection_status; 13 | mod helpers; 14 | 15 | pub mod pubsub; 16 | pub mod reqrep; 17 | 18 | pub use backoff_strategy::*; 19 | pub(crate) use connection_status::*; 20 | -------------------------------------------------------------------------------- /server/CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # CHANGELOG 2 | 3 | ## v0.1.0 4 | 5 | - Initial release of Selium 6 | 7 | ## v0.2.0 8 | 9 | - Added support for message batching 10 | - Message payload size limit (1MB) is now enforced on wire protocol 11 | - Implemented QUIC Mutual TLS 12 | 13 | ## v0.3.0 14 | 15 | - Added request-reply messaging pattern 16 | - Implement protocol graceful shutdown 17 | - Decoupled binary into library components for testing 18 | - Features for Selium Cloud 19 | - Bump dependency versions 20 | 21 | ## v0.3.1 22 | 23 | - Remove openssl dependency 24 | - Replace faulty Cloud certs 25 | - Bug fixes for Selium Cloud 26 | 27 | ## v0.4.0 28 | 29 | - Improve error reporting to client 30 | - Fix race condition when replier stream rejoins topic, which could result in the replier being erroneously rejected 31 | 32 | ## v0.5.0 33 | 34 | - Integrated Selium Log into pubsub broker 35 | -------------------------------------------------------------------------------- /client/examples/publish_chrono.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Result; 2 | use futures::SinkExt; 3 | use selium::prelude::*; 4 | use selium::std::codecs::StringCodec; 5 | 6 | #[tokio::main] 7 | async fn main() -> Result<()> { 8 | let connection = selium::custom() 9 | .keep_alive(chrono::Duration::seconds(5))? 10 | .endpoint("127.0.0.1:7001") 11 | .with_certificate_authority("../certs/client/ca.der")? 12 | .with_cert_and_key( 13 | "../certs/client/localhost.der", 14 | "../certs/client/localhost.key.der", 15 | )? 16 | .connect() 17 | .await?; 18 | 19 | let mut publisher = connection 20 | .publisher("/acmeco/stocks") 21 | .with_encoder(StringCodec) 22 | .open() 23 | .await?; 24 | 25 | publisher.send("Hello, world!".to_owned()).await?; 26 | publisher.finish().await?; 27 | 28 | Ok(()) 29 | } 30 | -------------------------------------------------------------------------------- /client/src/traits/keep_alive.rs: -------------------------------------------------------------------------------- 1 | use crate::connection::SharedConnection; 2 | use crate::keep_alive::AttemptFut; 3 | use selium_protocol::BiStream; 4 | 5 | /// Provides methods to adapt a stream into a `KeepAlive` compatible stream. 6 | pub trait KeepAliveStream { 7 | type Headers: Sized + Clone + Unpin + Send + 'static; 8 | 9 | /// Callback that is invoked to attempt to reconnect to the `Selium` server. 10 | fn reestablish_connection(connection: SharedConnection, headers: Self::Headers) -> AttemptFut; 11 | 12 | /// Callback that is invoked upon successful reconnection. 13 | fn on_reconnect(&mut self, stream: BiStream); 14 | 15 | /// Retrieves the shared selium client connection. 16 | fn get_connection(&self) -> SharedConnection; 17 | 18 | /// Retrieves the headers used to register the stream with the `Selium` server. 19 | fn get_headers(&self) -> Self::Headers; 20 | } 21 | -------------------------------------------------------------------------------- /client/CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # CHANGELOG 2 | 3 | ## v0.1.0 4 | 5 | - Initial release of Selium 6 | 7 | ## v0.2.0 8 | 9 | - Added opt-in message batching functionality 10 | - Added opt-in compression for Publisher and Subscriber streams 11 | - Message payload size limit (1MB) is now enforced on wire protocol 12 | - Implemented QUIC Mutual TLS 13 | 14 | ## v0.3.0 15 | 16 | - Added request-reply messaging pattern 17 | - Added connection reestablishment for failed connections 18 | - Features for Selium Cloud 19 | - Bump dependency versions 20 | 21 | ## v0.3.1 22 | 23 | - Add closure so client can gracefully handle replier errors 24 | - Add CA certificate 25 | - Bug fix for Selium Cloud 26 | 27 | ## v0.4.0 28 | 29 | - Add `tracing` lib to improve visibility 30 | - Improve and customise keepalive semantics for each stream type 31 | 32 | ## v0.5.0 33 | 34 | - Implemented Message Retention and Replay functionality to pubsub clients 35 | -------------------------------------------------------------------------------- /protocol/src/utils.rs: -------------------------------------------------------------------------------- 1 | use bytes::{Buf, BufMut, Bytes, BytesMut}; 2 | 3 | pub fn encode_message_batch(batch: Vec) -> Bytes { 4 | let mut bytes = BytesMut::new(); 5 | bytes.put_u64(batch.len() as u64); 6 | 7 | batch.iter().for_each(|m| { 8 | // Put a u64 into dst representing the length of the message 9 | bytes.put_u64(m.len() as u64); 10 | // Put the message bytes into dst 11 | bytes.extend_from_slice(m) 12 | }); 13 | 14 | bytes.into() 15 | } 16 | 17 | pub fn decode_message_batch(mut bytes: Bytes) -> Vec { 18 | let num_of_messages = bytes.get_u64(); 19 | let mut messages = Vec::with_capacity(num_of_messages as usize); 20 | 21 | for _ in 0..num_of_messages { 22 | let message_len = bytes.get_u64(); 23 | let message_bytes = bytes.split_to(message_len as usize); 24 | messages.push(message_bytes); 25 | } 26 | 27 | messages 28 | } 29 | -------------------------------------------------------------------------------- /tools/src/commands/gen_certs/mod.rs: -------------------------------------------------------------------------------- 1 | mod cert_gen; 2 | mod certificate_builder; 3 | mod key_pair; 4 | mod validity_range; 5 | 6 | use crate::cli::GenCertsArgs; 7 | use crate::commands::gen_certs::cert_gen::CertGen; 8 | use crate::traits::CommandRunner; 9 | use anyhow::Result; 10 | use colored::*; 11 | 12 | pub struct GenCertsRunner { 13 | args: GenCertsArgs, 14 | } 15 | 16 | impl From for GenCertsRunner { 17 | fn from(args: GenCertsArgs) -> Self { 18 | Self { args } 19 | } 20 | } 21 | 22 | impl CommandRunner for GenCertsRunner { 23 | fn run(self) -> Result<()> { 24 | eprintln!( 25 | "{}", 26 | "Warning! Using a self-signed certificate does not protect from person-in-the-middle attacks.".yellow() 27 | ); 28 | 29 | let cert_gen = CertGen::generate(self.args.no_expiry)?; 30 | cert_gen.output(&self.args.client_out_path, &self.args.server_out_path)?; 31 | 32 | Ok(()) 33 | } 34 | } 35 | -------------------------------------------------------------------------------- /client/src/logging/keep_alive.rs: -------------------------------------------------------------------------------- 1 | use selium_std::errors::SeliumError; 2 | 3 | pub fn reconnect_attempt(attempt_num: u32, max_attempts: u32) { 4 | tracing::info!( 5 | attempt_num, 6 | max_attempts, 7 | "Attempting to reconnect to server..." 8 | ); 9 | } 10 | 11 | pub fn too_many_retries() { 12 | tracing::error!("Too many connection retries. Aborting reconnection."); 13 | } 14 | 15 | pub fn successful_reconnection() { 16 | tracing::info!("Successfully reconnected to server."); 17 | } 18 | 19 | pub fn reconnect_error(err: &SeliumError) { 20 | tracing::error!(error = err.to_string(), "Failed to reconnect to server."); 21 | } 22 | 23 | pub fn unrecoverable_error(err: &SeliumError) { 24 | tracing::error!( 25 | error = err.to_string(), 26 | "Encountered unrecoverable error while reconnecting to server." 27 | ); 28 | } 29 | 30 | pub fn connection_lost() { 31 | tracing::error!("Client lost connection to the server."); 32 | } 33 | -------------------------------------------------------------------------------- /client/examples/decompression.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Result; 2 | use futures::StreamExt; 3 | use selium::prelude::*; 4 | use selium::std::codecs::StringCodec; 5 | use selium::std::compression::deflate::DeflateDecomp; 6 | 7 | #[tokio::main] 8 | async fn main() -> Result<()> { 9 | let connection = selium::custom() 10 | .keep_alive(5_000)? 11 | .endpoint("127.0.0.1:7001") 12 | .with_certificate_authority("../certs/client/ca.der")? 13 | .with_cert_and_key( 14 | "../certs/client/localhost.der", 15 | "../certs/client/localhost.key.der", 16 | )? 17 | .connect() 18 | .await?; 19 | 20 | let mut subscriber = connection 21 | .subscriber("/acmeco/stocks") 22 | .with_decoder(StringCodec) 23 | .with_decompression(DeflateDecomp::gzip()) 24 | .open() 25 | .await?; 26 | 27 | while let Some(Ok(message)) = subscriber.next().await { 28 | println!("NEW MESSAGE: \"{message}\""); 29 | } 30 | 31 | Ok(()) 32 | } 33 | -------------------------------------------------------------------------------- /client/examples/publish.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Result; 2 | use futures::SinkExt; 3 | use selium::prelude::*; 4 | use selium::std::codecs::StringCodec; 5 | 6 | const NUM_OF_MESSAGES: usize = 50_000; 7 | 8 | #[tokio::main] 9 | async fn main() -> Result<()> { 10 | let connection = selium::custom() 11 | .keep_alive(5_000)? 12 | .endpoint("127.0.0.1:7001") 13 | .with_certificate_authority("../certs/client/ca.der")? 14 | .with_cert_and_key( 15 | "../certs/client/localhost.der", 16 | "../certs/client/localhost.key.der", 17 | )? 18 | .connect() 19 | .await?; 20 | 21 | let mut publisher = connection 22 | .publisher("/acmeco/stocks") 23 | .with_encoder(StringCodec) 24 | .open() 25 | .await?; 26 | 27 | for i in 0..NUM_OF_MESSAGES { 28 | let message = format!("Hello, world - {i}!"); 29 | publisher.send(message).await.unwrap(); 30 | } 31 | 32 | publisher.finish().await?; 33 | 34 | Ok(()) 35 | } 36 | -------------------------------------------------------------------------------- /client/examples/subscribe.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Result; 2 | use futures::StreamExt; 3 | use selium::prelude::*; 4 | use selium::std::codecs::StringCodec; 5 | use selium_protocol::Offset; 6 | 7 | #[tokio::main] 8 | async fn main() -> Result<()> { 9 | tracing_subscriber::fmt().init(); 10 | 11 | let connection = selium::custom() 12 | .keep_alive(5_000)? 13 | .endpoint("127.0.0.1:7001") 14 | .with_certificate_authority("../certs/client/ca.der")? 15 | .with_cert_and_key( 16 | "../certs/client/localhost.der", 17 | "../certs/client/localhost.key.der", 18 | )? 19 | .connect() 20 | .await?; 21 | 22 | let mut subscriber = connection 23 | .subscriber("/acmeco/stocks") 24 | .with_decoder(StringCodec) 25 | .seek(Offset::FromEnd(5)) 26 | .open() 27 | .await?; 28 | 29 | while let Some(Ok(message)) = subscriber.next().await { 30 | println!("NEW MESSAGE: \"{message}\""); 31 | } 32 | 33 | Ok(()) 34 | } 35 | -------------------------------------------------------------------------------- /log/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "selium-log" 3 | version = "0.1.0" 4 | description = """ 5 | selium-log is an open-source implementation of a log-based message queue. 6 | """ 7 | edition.workspace = true 8 | authors.workspace = true 9 | license.workspace = true 10 | homepage.workspace = true 11 | repository.workspace = true 12 | readme.workspace = true 13 | keywords.workspace = true 14 | categories.workspace = true 15 | 16 | [dependencies] 17 | bytes = "1.5" 18 | chrono = "0.4" 19 | crc32c = "0.6" 20 | futures = "0.3" 21 | memmap2 = "0.9" 22 | tokio = { version = "1.36", features = [ 23 | "fs", 24 | "io-util", 25 | "time", 26 | "macros", 27 | "rt-multi-thread", 28 | "rt", 29 | ] } 30 | tokio-util = "0.7" 31 | thiserror = "1.0" 32 | 33 | [dev-dependencies] 34 | tempfile = "3.10" 35 | criterion = { version = "0.3", features = ["async_tokio"] } 36 | fake = "2.9" 37 | anyhow = "1.0" 38 | 39 | [[bench]] 40 | name = "read_benchmark" 41 | harness = false 42 | 43 | [[bench]] 44 | name = "write_benchmark" 45 | harness = false 46 | -------------------------------------------------------------------------------- /client/examples/compression.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Result; 2 | use futures::SinkExt; 3 | use selium::prelude::*; 4 | use selium::std::codecs::StringCodec; 5 | use selium::std::compression::deflate::DeflateComp; 6 | use selium::std::traits::compression::CompressionLevel; 7 | 8 | #[tokio::main] 9 | async fn main() -> Result<()> { 10 | let connection = selium::custom() 11 | .keep_alive(5_000)? 12 | .endpoint("127.0.0.1:7001") 13 | .with_certificate_authority("../certs/client/ca.der")? 14 | .with_cert_and_key( 15 | "../certs/client/localhost.der", 16 | "../certs/client/localhost.key.der", 17 | )? 18 | .connect() 19 | .await?; 20 | 21 | let mut publisher = connection 22 | .publisher("/acmeco/stocks") 23 | .with_encoder(StringCodec) 24 | .with_compression(DeflateComp::gzip().fastest()) 25 | .open() 26 | .await?; 27 | 28 | publisher.send("Hello, world!".to_owned()).await?; 29 | publisher.finish().await?; 30 | 31 | Ok(()) 32 | } 33 | -------------------------------------------------------------------------------- /client/examples/subscribe_bincode.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Result; 2 | use futures::StreamExt; 3 | use selium::prelude::*; 4 | use selium::std::codecs::BincodeCodec; 5 | use serde::{Deserialize, Serialize}; 6 | 7 | #[derive(Debug, Serialize, Deserialize)] 8 | struct StockEvent { 9 | ticker: String, 10 | change: f64, 11 | } 12 | 13 | #[tokio::main] 14 | async fn main() -> Result<()> { 15 | let connection = selium::custom() 16 | .keep_alive(5_000)? 17 | .endpoint("127.0.0.1:7001") 18 | .with_certificate_authority("../certs/client/ca.der")? 19 | .with_cert_and_key( 20 | "../certs/client/localhost.der", 21 | "../certs/client/localhost.key.der", 22 | )? 23 | .connect() 24 | .await?; 25 | 26 | let mut subscriber = connection 27 | .subscriber("/acmeco/stocks") 28 | .with_decoder(BincodeCodec::::default()) 29 | .open() 30 | .await?; 31 | 32 | while let Some(Ok(event)) = subscriber.next().await { 33 | println!("NEW STOCK EVENT: {event:#?}"); 34 | } 35 | 36 | Ok(()) 37 | } 38 | -------------------------------------------------------------------------------- /client/src/keep_alive/helpers.rs: -------------------------------------------------------------------------------- 1 | use selium_protocol::error_codes::REPLIER_ALREADY_BOUND; 2 | use selium_std::errors::{QuicError, Result, SeliumError}; 3 | use std::{io, task::Poll}; 4 | 5 | pub fn is_disconnect_error(err: &io::Error) -> bool { 6 | matches!( 7 | err.kind(), 8 | io::ErrorKind::ConnectionReset | io::ErrorKind::NotConnected 9 | ) 10 | } 11 | 12 | pub fn is_bind_error(code: u32) -> bool { 13 | code == REPLIER_ALREADY_BOUND 14 | } 15 | 16 | pub fn is_recoverable_error(err: &SeliumError) -> bool { 17 | match err { 18 | SeliumError::IoError(err) => is_disconnect_error(err), 19 | SeliumError::Quic(QuicError::ConnectionError(_)) => true, 20 | SeliumError::OpenStream(code, _) => is_bind_error(*code), 21 | _ => false, 22 | } 23 | } 24 | 25 | pub fn is_stream_disconnected(result: &Result) -> bool { 26 | matches!(result, Err(err) if is_recoverable_error(err)) 27 | } 28 | 29 | pub fn is_sink_disconnected(result: &Poll>) -> bool { 30 | matches!(result, Poll::Ready(Err(err)) if is_recoverable_error(err)) 31 | } 32 | -------------------------------------------------------------------------------- /tools/src/commands/gen_certs/key_pair.rs: -------------------------------------------------------------------------------- 1 | use super::certificate_builder::CertificateBuilder; 2 | use anyhow::Result; 3 | use rcgen::Certificate; 4 | 5 | const COMMON_NAME: &str = "selium.io"; 6 | 7 | pub struct KeyPair(pub Vec, pub Vec); 8 | 9 | impl KeyPair { 10 | pub fn client(ca: &Certificate, no_expiry: bool) -> Result { 11 | let cert_builder = CertificateBuilder::client(); 12 | Self::build(cert_builder, ca, no_expiry) 13 | } 14 | 15 | pub fn server(ca: &Certificate, no_expiry: bool) -> Result { 16 | let cert_builder = CertificateBuilder::server(); 17 | Self::build(cert_builder, ca, no_expiry) 18 | } 19 | 20 | fn build(builder: CertificateBuilder, ca: &Certificate, no_expiry: bool) -> Result { 21 | let mut builder = builder.common_name(COMMON_NAME); 22 | 23 | if !no_expiry { 24 | builder = builder.valid_for_days(5); 25 | } 26 | 27 | let cert = builder.build()?; 28 | let signed_cert = cert.serialize_der_with_signer(ca)?; 29 | let key = cert.serialize_private_key_der(); 30 | 31 | Ok(Self(signed_cert, key)) 32 | } 33 | } 34 | -------------------------------------------------------------------------------- /client/examples/publish_multitasking.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Result; 2 | use futures::SinkExt; 3 | use selium::prelude::*; 4 | use selium::std::codecs::StringCodec; 5 | 6 | #[tokio::main] 7 | async fn main() -> Result<()> { 8 | let connection = selium::custom() 9 | .keep_alive(5_000)? 10 | .endpoint("127.0.0.1:7001") 11 | .with_certificate_authority("../certs/client/ca.der")? 12 | .with_cert_and_key( 13 | "../certs/client/localhost.der", 14 | "../certs/client/localhost.key.der", 15 | )? 16 | .connect() 17 | .await?; 18 | 19 | let mut publisher = connection 20 | .publisher("/acmeco/stocks") 21 | .with_encoder(StringCodec) 22 | .open() 23 | .await?; 24 | 25 | tokio::spawn({ 26 | let mut publisher = publisher.duplicate().await.unwrap(); 27 | async move { 28 | publisher 29 | .send("Hello from spawned task!".to_owned()) 30 | .await 31 | .unwrap(); 32 | publisher.finish().await.unwrap(); 33 | } 34 | }); 35 | 36 | publisher.send("Hello from main!".to_owned()).await?; 37 | publisher.finish().await?; 38 | 39 | Ok(()) 40 | } 41 | -------------------------------------------------------------------------------- /client/src/streams/mod.rs: -------------------------------------------------------------------------------- 1 | mod aliases; 2 | mod builder; 3 | 4 | pub mod pubsub; 5 | pub mod request_reply; 6 | pub use builder::*; 7 | use futures::StreamExt; 8 | use selium_protocol::{ 9 | error_codes::{STREAM_CLOSED_PREMATURELY, UNKNOWN_ERROR}, 10 | BiStream, Frame, 11 | }; 12 | use selium_std::errors::{Result, SeliumError}; 13 | 14 | // Handle response from Selium server on opening a stream 15 | async fn handle_reply(stream: &mut BiStream) -> Result<()> { 16 | match stream.next().await { 17 | Some(Ok(Frame::Ok)) => Ok(()), 18 | Some(Ok(Frame::Error(payload))) => match String::from_utf8(payload.message.to_vec()) { 19 | Ok(s) => Err(SeliumError::OpenStream(payload.code, s)), 20 | Err(_) => Err(SeliumError::OpenStream( 21 | payload.code, 22 | "Invalid UTF-8 error".into(), 23 | )), 24 | }, 25 | Some(Ok(_)) => Err(SeliumError::OpenStream( 26 | UNKNOWN_ERROR, 27 | "Invalid frame returned from server".into(), 28 | )), 29 | Some(Err(e)) => Err(e), 30 | None => Err(SeliumError::OpenStream( 31 | STREAM_CLOSED_PREMATURELY, 32 | "Stream closed prematurely".into(), 33 | )), 34 | } 35 | } 36 | -------------------------------------------------------------------------------- /client/src/traits/stream.rs: -------------------------------------------------------------------------------- 1 | use super::TryIntoU64; 2 | use async_trait::async_trait; 3 | use selium_std::errors::Result; 4 | 5 | /// Provides an `open` method for [StreamBuilder](crate::StreamBuilder) implementations to 6 | /// construct and spawn a new stream. 7 | #[async_trait] 8 | pub trait Open { 9 | type Output; 10 | 11 | /// Constructs headers to register a stream with the `Selium` server, and constructs the output 12 | /// type corresponding to the type of stream. 13 | /// 14 | /// For example, when constructing a [Publisher](crate::streams::pubsub::Publisher) stream via a 15 | /// [StreamBuilder](crate::StreamBuilder), the open method will construct publisher headers, 16 | /// and then spawn a new [Publisher](crate::streams::pubsub::Publisher) stream. 17 | /// 18 | /// # Errors 19 | /// 20 | /// Returns [Err] if a failure occurs while spawning the stream. 21 | async fn open(self) -> Result; 22 | } 23 | 24 | #[doc(hidden)] 25 | pub trait Retain { 26 | fn retain(self, policy: T) -> Result 27 | where 28 | Self: Sized; 29 | } 30 | 31 | #[doc(hidden)] 32 | pub trait Operations { 33 | fn map(self, module_path: &str) -> Self; 34 | fn filter(self, module_path: &str) -> Self; 35 | } 36 | -------------------------------------------------------------------------------- /client/src/client/cloud/states.rs: -------------------------------------------------------------------------------- 1 | use crate::constants::CLOUD_CA; 2 | use crate::crypto::cert::load_root_store; 3 | use crate::ClientCommon; 4 | use rustls::{Certificate, PrivateKey, RootCertStore}; 5 | 6 | #[doc(hidden)] 7 | pub struct CloudWantsCertAndKey { 8 | pub(crate) common: ClientCommon, 9 | pub(crate) root_store: RootCertStore, 10 | } 11 | 12 | impl Default for CloudWantsCertAndKey { 13 | fn default() -> Self { 14 | let certs = vec![Certificate(CLOUD_CA.to_vec())]; 15 | // Safe to unwrap, as the cloud CA is baked into the library. 16 | let root_store = load_root_store(&certs).unwrap(); 17 | 18 | Self { 19 | common: ClientCommon::default(), 20 | root_store, 21 | } 22 | } 23 | } 24 | 25 | #[doc(hidden)] 26 | pub struct CloudWantsConnect { 27 | pub(crate) common: ClientCommon, 28 | pub(crate) root_store: RootCertStore, 29 | pub(crate) certs: Vec, 30 | pub(crate) key: PrivateKey, 31 | } 32 | 33 | impl CloudWantsConnect { 34 | pub fn new(prev: CloudWantsCertAndKey, certs: &[Certificate], key: PrivateKey) -> Self { 35 | Self { 36 | common: prev.common, 37 | root_store: prev.root_store, 38 | certs: certs.to_owned(), 39 | key, 40 | } 41 | } 42 | } 43 | -------------------------------------------------------------------------------- /log/src/error.rs: -------------------------------------------------------------------------------- 1 | //! Type aliases and Enums relating to Selium Log errors. 2 | 3 | use thiserror::Error; 4 | 5 | /// Result type alias for [LogError]. 6 | pub type Result = std::result::Result; 7 | 8 | /// An enumeration of Selium Log errors. 9 | #[derive(Error, Debug)] 10 | pub enum LogError { 11 | /// Returned when a [std::io::Error] error occurs while creating a logs directory. 12 | #[error("Failed to create log directory.")] 13 | CreateLogsDirectory(#[source] std::io::Error), 14 | 15 | /// Returned when a [std::io::Error] error occurs while provisioning a [SegmentList](crate::segment::SegmentList). 16 | #[error("Failed to load segment offsets from log directory.")] 17 | LoadSegments(#[source] std::io::Error), 18 | 19 | /// Returned when attempting to write to an empty [SegmentList](crate::segment::SegmentList). 20 | #[error("Cannot find a hot segment to write to.")] 21 | SegmentListEmpty, 22 | 23 | /// Returned when the [Index](crate::index::Index) file fails to map to the memory map buffer. 24 | #[error("Failed to map segment index file to memory.")] 25 | MemoryMapIndex(#[source] std::io::Error), 26 | 27 | /// Any generic [std::io::Error] errors that aren't classified. 28 | #[error(transparent)] 29 | IoError(#[from] std::io::Error), 30 | } 31 | -------------------------------------------------------------------------------- /standard/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "selium-std" 3 | version = "0.2.3" 4 | description = """ 5 | selium-std is a dependent crate of Selium. Do not use directly. 6 | """ 7 | include = ["src/**/*", "benches/**/*"] 8 | edition.workspace = true 9 | authors.workspace = true 10 | license.workspace = true 11 | homepage.workspace = true 12 | repository.workspace = true 13 | readme.workspace = true 14 | 15 | [dependencies] 16 | anyhow = "1.0" 17 | bincode = "1.3" 18 | brotli = { version = "3.4", optional = true } 19 | bytes = "1.4" 20 | flate2 = { version = "1.0", features = ["zlib"], optional = true } 21 | futures = "0.3" 22 | lz4_flex = { version = "0.11", optional = true } 23 | quinn = "0.10" 24 | selium-log = { version = "0.1", path = "../log" } 25 | serde = { version = "1.0", optional = true } 26 | thiserror = "1.0" 27 | zstd = { version = "0.13", optional = true } 28 | 29 | [dev-dependencies] 30 | criterion = "0.5" 31 | fake = "2.9" 32 | serde = { version = "1.0", features = ["derive"] } 33 | 34 | [features] 35 | compression = ["dep:brotli", "dep:flate2", "dep:lz4_flex", "dep:zstd"] 36 | codec = ["dep:serde"] 37 | 38 | [[bench]] 39 | name = "codecs" 40 | harness = false 41 | required-features = ["codec", "traits"] 42 | 43 | [[bench]] 44 | name = "compression" 45 | harness = false 46 | required-features = ["compression", "traits"] 47 | -------------------------------------------------------------------------------- /client/src/keep_alive/connection_status.rs: -------------------------------------------------------------------------------- 1 | use super::{BackoffStrategy, NextAttempt}; 2 | use futures::Future; 3 | use selium_protocol::BiStream; 4 | use selium_std::errors::Result; 5 | use std::pin::Pin; 6 | 7 | pub type AttemptsIterator = Box + Send>; 8 | pub type AttemptFut = Pin> + Send>>; 9 | 10 | pub enum ConnectionStatus { 11 | Connected, 12 | Disconnected(ReconnectState), 13 | Exhausted, 14 | } 15 | 16 | impl Default for ConnectionStatus { 17 | fn default() -> Self { 18 | Self::Connected 19 | } 20 | } 21 | 22 | impl ConnectionStatus { 23 | pub fn disconnected(backoff_strategy: BackoffStrategy) -> Self { 24 | let reconnect_state = ReconnectState::from(backoff_strategy); 25 | ConnectionStatus::Disconnected(reconnect_state) 26 | } 27 | } 28 | 29 | pub struct ReconnectState { 30 | pub attempts: AttemptsIterator, 31 | pub current_attempt: AttemptFut, 32 | } 33 | 34 | impl From for ReconnectState { 35 | fn from(strategy: BackoffStrategy) -> Self { 36 | let attempts = Box::new(strategy.into_iter()); 37 | let current_attempt = Box::pin(async { unreachable!() }); 38 | Self { 39 | attempts, 40 | current_attempt, 41 | } 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /log/src/message/slice.rs: -------------------------------------------------------------------------------- 1 | use crate::data::LogIterator; 2 | 3 | /// A slice of a log segment, retrieved from reading from the log. 4 | /// The MessageSlice struct contains an `end_offset`, indicating the next offset 5 | /// to read from the log, and may or may not contain an iterator over the log segment. 6 | #[derive(Debug, Default)] 7 | pub struct MessageSlice { 8 | messages: Option, 9 | end_offset: u64, 10 | } 11 | 12 | impl MessageSlice { 13 | /// Constructs a MessageSlice instance. 14 | pub fn new(messages: LogIterator, end_offset: u64) -> Self { 15 | Self { 16 | messages: Some(messages), 17 | end_offset, 18 | } 19 | } 20 | 21 | /// Constructs an empty MessageSlice, with the messages iterator set to [Option::None]. 22 | pub fn empty(end_offset: u64) -> Self { 23 | Self { 24 | messages: None, 25 | end_offset, 26 | } 27 | } 28 | 29 | /// An iterator over the log segment. 30 | pub fn messages(self) -> Option { 31 | self.messages 32 | } 33 | 34 | /// The last segment in this slice. 35 | /// 36 | /// Used to determine the next offset to request from the log after processing all messages 37 | /// in this slice. 38 | pub fn end_offset(&self) -> u64 { 39 | self.end_offset 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /standard/src/traits/compression.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Result; 2 | use bytes::Bytes; 3 | 4 | /// Interface to adapt compression implementations for use with Selium. 5 | pub trait Compress { 6 | /// Fallibly compress the `input` bytes, and output as bytes. 7 | fn compress(&self, input: Bytes) -> Result; 8 | } 9 | 10 | /// Interface to adapt compression implementations for use with Selium. 11 | pub trait Decompress { 12 | /// Fallibly decompress the `input` bytes, and output as bytes. 13 | fn decompress(&self, input: Bytes) -> Result; 14 | } 15 | 16 | /// Interface for applicable compression algorithms and implementations that allow users to 17 | /// specify a compression level. 18 | pub trait CompressionLevel { 19 | /// Sets the compression level to the highest possible level for the algorithm/implementation. 20 | fn highest_ratio(self) -> Self; 21 | /// Sets the compression level to a balance between speed and size. 22 | /// 23 | /// Typically set to the default compression level for the specific algorithm/implementation. 24 | fn balanced(self) -> Self; 25 | /// Sets the compression level to the fastest possible speed supported by the 26 | /// algorithm/implementation. 27 | fn fastest(self) -> Self; 28 | /// Allows a user to set the compression level to a specific level. 29 | fn level(self, level: u32) -> Self; 30 | } 31 | -------------------------------------------------------------------------------- /server/src/topic/mod.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Result; 2 | use futures::{channel::mpsc, SinkExt}; 3 | 4 | pub mod config; 5 | pub mod pubsub; 6 | pub mod reqrep; 7 | 8 | pub enum Socket { 9 | Pubsub(pubsub::Socket), 10 | Reqrep(reqrep::Socket), 11 | } 12 | 13 | impl Socket { 14 | fn unwrap_pubsub(self) -> pubsub::Socket { 15 | match self { 16 | Self::Pubsub(s) => s, 17 | _ => panic!("Attempted to unwrap non-pubsub socket"), 18 | } 19 | } 20 | 21 | fn unwrap_reqrep(self) -> reqrep::Socket { 22 | match self { 23 | Self::Reqrep(s) => s, 24 | _ => panic!("Attempted to unwrap non-reqrep socket"), 25 | } 26 | } 27 | } 28 | 29 | pub enum Sender { 30 | Pubsub(mpsc::Sender), 31 | ReqRep(mpsc::Sender), 32 | } 33 | 34 | impl Sender { 35 | pub async fn send(&mut self, sock: Socket) -> Result<()> { 36 | match self { 37 | Self::Pubsub(ref mut s) => s.send(sock.unwrap_pubsub()).await?, 38 | Self::ReqRep(ref mut s) => s.send(sock.unwrap_reqrep()).await?, 39 | } 40 | 41 | Ok(()) 42 | } 43 | 44 | pub fn close_channel(&mut self) { 45 | match self { 46 | Self::Pubsub(ref mut s) => s.close_channel(), 47 | Self::ReqRep(ref mut s) => s.close_channel(), 48 | } 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /server/src/sink/mod.rs: -------------------------------------------------------------------------------- 1 | use futures::{sink::With, Future, Sink, SinkExt as _}; 2 | 3 | mod fanout_many; 4 | pub use fanout_many::*; 5 | 6 | mod router; 7 | pub use router::*; 8 | 9 | // @TODO - awaiting selium#22 10 | // mod filter; 11 | // pub use filter::Filter; 12 | 13 | // @TODO - awaiting selium#69 14 | // mod ordered; 15 | // pub use ordered::Ordered; 16 | 17 | impl SinkExt for T where T: Sink {} 18 | 19 | pub trait SinkExt: Sink { 20 | // This is a wrapper around `with` for conceptual symmetry with `StreamExt::map` 21 | fn map(self, f: F) -> With 22 | where 23 | F: FnMut(U) -> Fut, 24 | Fut: Future>, 25 | E: From, 26 | Self: Sized, 27 | { 28 | self.with(f) 29 | } 30 | 31 | // @TODO - awaiting selium#22 32 | // fn filter(self, f: F) -> Filter 33 | // where 34 | // F: FnMut(&Item) -> Fut, 35 | // Fut: Future, 36 | // Self: Sized, 37 | // { 38 | // Filter::new(self, f) 39 | // } 40 | 41 | // @TODO - awaiting selium#69 42 | // fn ordered(self, last_sent: usize) -> Ordered 43 | // where 44 | // Self: Sized, 45 | // { 46 | // Ordered::new(self, last_sent) 47 | // } 48 | } 49 | -------------------------------------------------------------------------------- /server/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "selium-server" 3 | version = "0.5.0" 4 | description = """ 5 | The server-side binary for Selium, an extremely developer friendly, composable 6 | messaging platform with zero build time configuration. 7 | """ 8 | include = ["src/**/*", "proxy.debug.der", "proxy.prod.der"] 9 | edition.workspace = true 10 | authors.workspace = true 11 | license.workspace = true 12 | homepage.workspace = true 13 | repository.workspace = true 14 | readme.workspace = true 15 | keywords.workspace = true 16 | categories.workspace = true 17 | 18 | [features] 19 | __cloud = [] 20 | 21 | [dependencies] 22 | anyhow = "1.0" 23 | bytes = "1.5" 24 | clap = { version = "4.4", features = ["derive"] } 25 | clap-verbosity-flag = "2.1" 26 | env_logger = "0.10" 27 | futures = "0.3" 28 | hmac-sha512 = "1.1" 29 | log = "0.4" 30 | pin-project-lite = "0.2" 31 | quinn = "0.10" 32 | rcgen = "0.11" 33 | rustls = "0.21" 34 | rustls-pemfile = "1.0" 35 | selium-protocol = { version = "0.4", path = "../protocol" } 36 | selium-log = { version = "0.1", path = "../log" } 37 | selium-std = { version = "0.2", path = "../standard", features = ["codec"] } 38 | serde = { version = "1.0", features = ["derive"] } 39 | tokio = { version = "1.34", features = [ 40 | "macros", 41 | "net", 42 | "rt-multi-thread", 43 | "signal", 44 | "sync", 45 | ] } 46 | tokio-stream = "0.1" 47 | tokio-util = { version = "0.7", features = ["codec"] } 48 | -------------------------------------------------------------------------------- /client/src/batching/message_batch.rs: -------------------------------------------------------------------------------- 1 | use super::BatchConfig; 2 | use bytes::Bytes; 3 | use std::time::Instant; 4 | 5 | pub(crate) struct MessageBatch { 6 | batch: Vec, 7 | config: BatchConfig, 8 | last_run: Instant, 9 | } 10 | 11 | impl MessageBatch { 12 | pub fn push(&mut self, value: Bytes) { 13 | self.batch.push(value); 14 | } 15 | 16 | pub fn drain(&mut self) -> Vec { 17 | let batch = self.batch.drain(..); 18 | batch.collect() 19 | } 20 | 21 | pub fn is_empty(&self) -> bool { 22 | self.batch.is_empty() 23 | } 24 | 25 | pub fn update_last_run(&mut self, instant: Instant) { 26 | self.last_run = instant; 27 | } 28 | 29 | pub fn exceeded_interval(&self, now: Instant) -> bool { 30 | now >= self.last_run + self.config.interval 31 | } 32 | 33 | pub fn exceeded_batch_size(&self) -> bool { 34 | self.batch.len() >= self.config.batch_size as usize 35 | } 36 | 37 | pub fn is_ready(&self, now: Instant) -> bool { 38 | self.exceeded_interval(now) || self.exceeded_batch_size() 39 | } 40 | } 41 | 42 | impl From for MessageBatch { 43 | fn from(config: BatchConfig) -> Self { 44 | let batch = Vec::with_capacity(config.batch_size as usize); 45 | let last_run = Instant::now(); 46 | 47 | Self { 48 | batch, 49 | config, 50 | last_run, 51 | } 52 | } 53 | } 54 | -------------------------------------------------------------------------------- /standard/src/lib.rs: -------------------------------------------------------------------------------- 1 | //! A library containing standard offerings for Selium features, such as client codecs, 2 | //! compression, and more. 3 | //! 4 | //! Selium Standard contains a rich selection of premade client codecs, compression 5 | //! implementations, etc, which have been created by the `Selium Labs` team to make the 6 | //! development experience as effortless as possible while using Selium. 7 | //! 8 | //! # Feature flags 9 | //! 10 | //! Do you only require a subset of features offered by Selium Standard? No worries! All offerings are 11 | //! intended to be entirely optional, and can be included via the respective feature flag. This 12 | //! in-turn will ensure that your binary is kept as slim as possible. 13 | //! 14 | //! For example, if a developer would like to use one of the many compression implementations offered by the 15 | //! library, but uses their own proprietary codecs in-house, they can simply compile `selium-std` with the 16 | //! `compression` feature flag, and then adapt their own proprietary codec to Selium's client codec 17 | //! interface by implementing the respective MessageEncoder and MessageDecoder traits located in the 18 | //! `traits/codec.rs` module. 19 | //! 20 | //! - `compression`: Enables all compression implementations. 21 | //! - `codec`: Enables all client codec implementations. 22 | //! - `traits`: Enables all traits. Enabled by default. 23 | //! - `errors`: Enables all errors. Enabled by default. 24 | 25 | #[cfg(feature = "codec")] 26 | pub mod codecs; 27 | #[cfg(feature = "compression")] 28 | pub mod compression; 29 | 30 | pub mod errors; 31 | pub mod traits; 32 | -------------------------------------------------------------------------------- /client/examples/reply.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Result; 2 | use selium::std::codecs::BincodeCodec; 3 | use selium::{keep_alive::BackoffStrategy, prelude::*}; 4 | use serde::{Deserialize, Serialize}; 5 | 6 | #[derive(Debug, Serialize, Deserialize, Clone)] 7 | enum Request { 8 | HelloWorld(Option), 9 | } 10 | 11 | #[derive(Debug, Serialize, Deserialize, PartialEq, Clone)] 12 | enum Response { 13 | HelloWorld(String), 14 | } 15 | 16 | #[tokio::main] 17 | async fn main() -> Result<()> { 18 | let connection = selium::custom() 19 | .keep_alive(5_000)? 20 | .backoff_strategy(BackoffStrategy::constant().with_max_attempts(1)) 21 | .endpoint("127.0.0.1:7001") 22 | .with_certificate_authority("../certs/client/ca.der")? 23 | .with_cert_and_key( 24 | "../certs/client/localhost.der", 25 | "../certs/client/localhost.key.der", 26 | )? 27 | .connect() 28 | .await?; 29 | 30 | let mut replier = connection 31 | .replier("/some/endpoint") 32 | .with_request_decoder(BincodeCodec::default()) 33 | .with_reply_encoder(BincodeCodec::default()) 34 | .with_handler(|req| async move { handler(req).await }) 35 | .open() 36 | .await?; 37 | 38 | replier.listen().await?; 39 | 40 | Ok(()) 41 | } 42 | 43 | async fn handler(req: Request) -> Result { 44 | match req { 45 | Request::HelloWorld(mut name) => { 46 | let name = name.take().unwrap_or_else(|| "World".to_owned()); 47 | let response = format!("Hello, {name}!"); 48 | Ok(Response::HelloWorld(response)) 49 | } 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /log/benches/write_benchmark.rs: -------------------------------------------------------------------------------- 1 | use bytes::Bytes; 2 | use criterion::{criterion_group, criterion_main, Criterion}; 3 | use selium_log::{ 4 | config::{FlushPolicy, LogConfig}, 5 | message::Message, 6 | MessageLog, 7 | }; 8 | use std::{sync::Arc, time::Duration}; 9 | use tempfile::tempdir; 10 | 11 | const ONE_DAY: u64 = 86_400; 12 | const NUM_OF_MESSAGES: u64 = 1_000_000; 13 | const MAX_ENTRIES_PER_SEGMENT: u32 = 50_000; 14 | 15 | fn get_log_config() -> LogConfig { 16 | let tempdir = tempdir().unwrap(); 17 | 18 | LogConfig::from_path(tempdir.path()) 19 | .max_index_entries(MAX_ENTRIES_PER_SEGMENT) 20 | .retention_period(Duration::from_secs(ONE_DAY)) 21 | .cleaner_interval(Duration::from_secs(ONE_DAY)) 22 | .flush_policy(FlushPolicy::default().number_of_writes(100)) 23 | } 24 | 25 | async fn log_task() { 26 | let config = get_log_config(); 27 | let log = MessageLog::open(Arc::new(config)).await.unwrap(); 28 | 29 | for _ in 0..NUM_OF_MESSAGES { 30 | let batch = Bytes::copy_from_slice(&[1; 32]); 31 | let message = Message::single(&batch, 1); 32 | log.write(message).await.unwrap(); 33 | } 34 | 35 | log.flush().await.unwrap(); 36 | } 37 | 38 | pub fn benchmark(c: &mut Criterion) { 39 | c.bench_function("write 1_000_000 records", |b| { 40 | let runtime = tokio::runtime::Runtime::new().expect("Failed to construct executor"); 41 | b.to_async(runtime).iter(log_task); 42 | }); 43 | } 44 | 45 | criterion_group! { 46 | name = benches; 47 | config = Criterion::default().sample_size(10); 48 | targets = benchmark 49 | } 50 | criterion_main!(benches); 51 | -------------------------------------------------------------------------------- /client/examples/publish_bincode.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Result; 2 | use futures::SinkExt; 3 | use selium::prelude::*; 4 | use selium::std::codecs::BincodeCodec; 5 | use serde::{Deserialize, Serialize}; 6 | 7 | #[derive(Debug, Serialize, Deserialize, Clone)] 8 | struct StockEvent { 9 | ticker: String, 10 | change: f64, 11 | } 12 | 13 | impl StockEvent { 14 | pub fn new(ticker: &str, change: f64) -> Self { 15 | Self { 16 | ticker: ticker.to_owned(), 17 | change, 18 | } 19 | } 20 | } 21 | 22 | #[tokio::main] 23 | async fn main() -> Result<()> { 24 | let connection = selium::custom() 25 | .keep_alive(5_000)? 26 | .endpoint("127.0.0.1:7001") 27 | .with_certificate_authority("../certs/client/ca.der")? 28 | .with_cert_and_key( 29 | "../certs/client/localhost.der", 30 | "../certs/client/localhost.key.der", 31 | )? 32 | .connect() 33 | .await?; 34 | 35 | let mut publisher = connection 36 | .publisher("/acmeco/stocks") 37 | .with_encoder(BincodeCodec::default()) 38 | .open() 39 | .await?; 40 | 41 | tokio::spawn({ 42 | let mut publisher = publisher.duplicate().await.unwrap(); 43 | async move { 44 | publisher 45 | .send(StockEvent::new("MSFT", 12.75)) 46 | .await 47 | .unwrap(); 48 | 49 | publisher.finish().await.unwrap(); 50 | } 51 | }); 52 | 53 | publisher.send(StockEvent::new("APPL", 3.5)).await?; 54 | publisher.send(StockEvent::new("INTC", -9.0)).await?; 55 | publisher.finish().await?; 56 | 57 | Ok(()) 58 | } 59 | -------------------------------------------------------------------------------- /standard/src/codecs/bytes_codec.rs: -------------------------------------------------------------------------------- 1 | use crate::traits::codec::{MessageDecoder, MessageEncoder}; 2 | use anyhow::Result; 3 | use bytes::{Bytes, BytesMut}; 4 | 5 | /// A basic codec for sending raw bytes. 6 | #[derive(Default, Clone)] 7 | pub struct BytesCodec; 8 | 9 | /// Encodes a [Vec] slice into [Bytes](bytes::Bytes). 10 | /// 11 | /// # Errors 12 | /// 13 | /// Guaranteed not to error. 14 | impl MessageEncoder for BytesCodec { 15 | type Item = Vec; 16 | 17 | fn encode(&self, item: Vec) -> Result { 18 | Ok(item.into()) 19 | } 20 | } 21 | 22 | /// Decodes a [BytesMut](bytes::BytesMut) payload into an owned [Vec] 23 | /// 24 | /// # Errors 25 | /// 26 | /// Guaranteed not to error. 27 | impl MessageDecoder for BytesCodec { 28 | type Item = Vec; 29 | 30 | fn decode(&self, buffer: &mut BytesMut) -> Result { 31 | Ok(buffer.to_vec()) 32 | } 33 | } 34 | 35 | #[cfg(test)] 36 | mod tests { 37 | use super::*; 38 | 39 | #[test] 40 | fn encodes_into_string_bytes() { 41 | let input = b"decoded string".to_vec(); 42 | let expected = BytesMut::from(input.as_slice()); 43 | 44 | let codec = BytesCodec; 45 | let encoded = codec.encode(input.to_owned()).unwrap(); 46 | 47 | assert_eq!(expected, encoded); 48 | } 49 | 50 | #[test] 51 | fn decodes_string_bytes() { 52 | let expected = b"encoded string".to_vec(); 53 | let mut buffer = BytesMut::from(expected.as_slice()); 54 | 55 | let decoder = BytesCodec; 56 | let decoded = decoder.decode(&mut buffer).unwrap(); 57 | 58 | assert_eq!(decoded, expected); 59 | } 60 | } 61 | -------------------------------------------------------------------------------- /client/src/traits/try_into_u64.rs: -------------------------------------------------------------------------------- 1 | use selium_std::errors::{Result, SeliumError}; 2 | 3 | /// Provides a `try_into_u64` method to allow implementors to fallibly convert a suitable type to a 4 | /// [u64] 5 | pub trait TryIntoU64 { 6 | /// Consumes the input and fallibly converts it to a [u64]. 7 | fn try_into_u64(self) -> Result; 8 | } 9 | 10 | impl TryIntoU64 for u64 { 11 | fn try_into_u64(self) -> Result { 12 | Ok(self) 13 | } 14 | } 15 | 16 | impl TryIntoU64 for std::time::Duration { 17 | /// Converts a std [Duration](std::time::Duration) value into a [u64] by representing the 18 | /// duration in milliseconds. 19 | /// 20 | /// # Errors 21 | /// 22 | /// Because the [as_millis](std::time::Duration::as_millis) method 23 | /// returns a [u128], this conversion may fail due to potential data loss in the demotion of 24 | /// the integer. 25 | fn try_into_u64(self) -> Result { 26 | self.as_millis() 27 | .try_into() 28 | .map_err(|_| SeliumError::ParseDurationMillis) 29 | } 30 | } 31 | 32 | #[cfg(feature = "chrono")] 33 | impl TryIntoU64 for chrono::Duration { 34 | /// Converts a [chrono::Duration] value into a [u64] by representing the duration in 35 | /// milliseconds. 36 | /// 37 | /// # Errors 38 | /// 39 | /// Because the [num_milliseconds](chrono::Duration::num_milliseconds) method 40 | /// returns an [i64], this conversion may fail due to negative values. 41 | fn try_into_u64(self) -> Result { 42 | self.num_milliseconds() 43 | .try_into() 44 | .map_err(|_| SeliumError::ParseDurationMillis) 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /standard/src/codecs/string_codec.rs: -------------------------------------------------------------------------------- 1 | use crate::traits::codec::{MessageDecoder, MessageEncoder}; 2 | use anyhow::Result; 3 | use bytes::{Bytes, BytesMut}; 4 | 5 | /// A basic codec for encoding/decoding UTF-8 [String] message payloads. 6 | #[derive(Default, Clone)] 7 | pub struct StringCodec; 8 | 9 | /// Encodes a [&str] slice into [Bytes](bytes::Bytes). 10 | impl MessageEncoder for StringCodec { 11 | type Item = String; 12 | 13 | fn encode(&self, item: String) -> Result { 14 | Ok(item.into()) 15 | } 16 | } 17 | 18 | /// Decodes a [BytesMut](bytes::BytesMut) payload into an owned [String] 19 | /// 20 | /// # Errors 21 | /// 22 | /// Returns [Err] if a valid UTF-8 [String] cannot be constructed from the 23 | /// [BytesMut](bytes::BytesMut) slice. 24 | impl MessageDecoder for StringCodec { 25 | type Item = String; 26 | 27 | fn decode(&self, buffer: &mut BytesMut) -> Result { 28 | Ok(String::from_utf8(buffer[..].into())?) 29 | } 30 | } 31 | 32 | #[cfg(test)] 33 | mod tests { 34 | use super::*; 35 | 36 | #[test] 37 | fn encodes_into_string_bytes() { 38 | let input = "decoded string"; 39 | let expected = BytesMut::from(input); 40 | 41 | let codec = StringCodec; 42 | let encoded = codec.encode(input.to_owned()).unwrap(); 43 | 44 | assert_eq!(expected, encoded); 45 | } 46 | 47 | #[test] 48 | fn decodes_string_bytes() { 49 | let expected = "encoded string"; 50 | let mut buffer = BytesMut::from(expected); 51 | 52 | let decoder = StringCodec; 53 | let decoded = decoder.decode(&mut buffer).unwrap(); 54 | 55 | assert_eq!(decoded, expected); 56 | } 57 | } 58 | -------------------------------------------------------------------------------- /standard/benches/codecs.rs: -------------------------------------------------------------------------------- 1 | use bytes::BytesMut; 2 | use criterion::{black_box, criterion_group, criterion_main, Criterion}; 3 | use selium_std::{ 4 | codecs::{BincodeCodec, StringCodec}, 5 | traits::codec::{MessageDecoder, MessageEncoder}, 6 | }; 7 | use serde::{Deserialize, Serialize}; 8 | 9 | #[derive(Debug, Serialize, Deserialize)] 10 | struct StockEvent { 11 | ticker: String, 12 | change: f64, 13 | } 14 | 15 | impl StockEvent { 16 | pub fn new(ticker: &str, change: f64) -> Self { 17 | Self { 18 | ticker: ticker.to_owned(), 19 | change, 20 | } 21 | } 22 | } 23 | 24 | pub fn bincode_codec_benchmarks(c: &mut Criterion) { 25 | c.bench_function("bincode codec", |b| { 26 | b.iter(|| { 27 | let message = StockEvent::new("APPL", 25.5); 28 | let codec = BincodeCodec::default(); 29 | let encoded = codec.encode(black_box(message)).unwrap(); 30 | let mut encoded = BytesMut::from(&encoded[..]); 31 | let _ = codec.decode(black_box(&mut encoded)).unwrap(); 32 | }) 33 | }); 34 | } 35 | 36 | pub fn string_codec_benchmarks(c: &mut Criterion) { 37 | c.bench_function("string codec", |b| { 38 | b.iter(|| { 39 | let message = "Hello, world!".to_owned(); 40 | let codec = StringCodec::default(); 41 | let encoded = codec.encode(black_box(message)).unwrap(); 42 | let mut encoded = BytesMut::from(&encoded[..]); 43 | let _ = codec.decode(black_box(&mut encoded)).unwrap(); 44 | }) 45 | }); 46 | } 47 | 48 | criterion_group!(benches, bincode_codec_benchmarks, string_codec_benchmarks,); 49 | criterion_main!(benches); 50 | -------------------------------------------------------------------------------- /standard/src/compression/zstd/comp.rs: -------------------------------------------------------------------------------- 1 | use crate::traits::compression::{Compress, CompressionLevel}; 2 | use anyhow::Result; 3 | use bytes::Bytes; 4 | 5 | /// Highest compression level available for zstd. 6 | pub const HIGHEST_COMPRESSION: i32 = 9; 7 | 8 | /// Recommended compression level for zstd. 9 | pub const RECOMMENDED_COMPRESSION: i32 = zstd::DEFAULT_COMPRESSION_LEVEL; 10 | 11 | /// Fastest compression level available for zstd. 12 | pub const FASTEST_COMPRESSION: i32 = 1; 13 | 14 | /// Compression half of zstd implementation. 15 | /// 16 | /// `ZstdComp` implements [Compress], and can be constructed for use with a `Publisher` stream. 17 | #[derive(Debug)] 18 | pub struct ZstdComp { 19 | level: i32, 20 | } 21 | 22 | impl Default for ZstdComp { 23 | fn default() -> Self { 24 | ZstdComp::new() 25 | } 26 | } 27 | 28 | impl ZstdComp { 29 | pub fn new() -> Self { 30 | ZstdComp { 31 | level: RECOMMENDED_COMPRESSION, 32 | } 33 | } 34 | } 35 | 36 | impl CompressionLevel for ZstdComp { 37 | fn highest_ratio(mut self) -> Self { 38 | self.level = HIGHEST_COMPRESSION; 39 | self 40 | } 41 | 42 | fn balanced(mut self) -> Self { 43 | self.level = RECOMMENDED_COMPRESSION; 44 | self 45 | } 46 | 47 | fn fastest(mut self) -> Self { 48 | self.level = FASTEST_COMPRESSION; 49 | self 50 | } 51 | 52 | fn level(mut self, level: u32) -> Self { 53 | self.level = level.try_into().unwrap(); 54 | self 55 | } 56 | } 57 | 58 | impl Compress for ZstdComp { 59 | fn compress(&self, input: Bytes) -> Result { 60 | let output = zstd::encode_all(&input[..], self.level)?; 61 | Ok(output.into()) 62 | } 63 | } 64 | -------------------------------------------------------------------------------- /client/src/client/custom/states.rs: -------------------------------------------------------------------------------- 1 | use crate::ClientCommon; 2 | use rustls::{Certificate, PrivateKey, RootCertStore}; 3 | 4 | #[doc(hidden)] 5 | #[derive(Debug, Default)] 6 | pub struct CustomWantsEndpoint { 7 | pub(crate) common: ClientCommon, 8 | } 9 | 10 | #[doc(hidden)] 11 | #[derive(Debug)] 12 | pub struct CustomWantsRootCert { 13 | pub(crate) common: ClientCommon, 14 | pub(crate) endpoint: String, 15 | } 16 | 17 | impl CustomWantsRootCert { 18 | pub fn new(prev: CustomWantsEndpoint, endpoint: &str) -> Self { 19 | Self { 20 | common: prev.common, 21 | endpoint: endpoint.to_owned(), 22 | } 23 | } 24 | } 25 | 26 | #[doc(hidden)] 27 | #[derive(Debug)] 28 | pub struct CustomWantsCertAndKey { 29 | pub(crate) common: ClientCommon, 30 | pub(crate) endpoint: String, 31 | pub(crate) root_store: RootCertStore, 32 | } 33 | 34 | impl CustomWantsCertAndKey { 35 | pub fn new(prev: CustomWantsRootCert, root_store: RootCertStore) -> Self { 36 | Self { 37 | common: prev.common, 38 | endpoint: prev.endpoint, 39 | root_store, 40 | } 41 | } 42 | } 43 | 44 | #[doc(hidden)] 45 | #[derive(Debug)] 46 | pub struct CustomWantsConnect { 47 | pub(crate) common: ClientCommon, 48 | pub(crate) endpoint: String, 49 | pub(crate) root_store: RootCertStore, 50 | pub(crate) certs: Vec, 51 | pub(crate) key: PrivateKey, 52 | } 53 | 54 | impl CustomWantsConnect { 55 | pub fn new(prev: CustomWantsCertAndKey, certs: &[Certificate], key: PrivateKey) -> Self { 56 | Self { 57 | common: prev.common, 58 | endpoint: prev.endpoint, 59 | root_store: prev.root_store, 60 | certs: certs.to_owned(), 61 | key, 62 | } 63 | } 64 | } 65 | -------------------------------------------------------------------------------- /client/examples/batching_and_compression.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Result; 2 | use futures::{SinkExt, StreamExt}; 3 | use selium::batching::BatchConfig; 4 | use selium::prelude::*; 5 | use selium::std::codecs::StringCodec; 6 | use selium::std::compression::deflate::DeflateComp; 7 | use selium::std::compression::deflate::DeflateDecomp; 8 | use selium::std::traits::compression::CompressionLevel; 9 | 10 | #[tokio::main] 11 | async fn main() -> Result<()> { 12 | let connection = selium::custom() 13 | .keep_alive(5_000)? 14 | .endpoint("127.0.0.1:7001") 15 | .with_certificate_authority("../certs/client/ca.crt")? 16 | .with_cert_and_key( 17 | "../certs/client/localhost.der", 18 | "../certs/client/localhost.key.der", 19 | )? 20 | .connect() 21 | .await?; 22 | 23 | let mut publisher = connection 24 | .publisher("/acmeco/stocks") 25 | .with_encoder(StringCodec) 26 | .with_compression(DeflateComp::gzip().fastest()) 27 | .with_batching(BatchConfig::high_throughput()) 28 | .open() 29 | .await?; 30 | 31 | let mut subscriber = connection 32 | .subscriber("/acmeco/stocks") 33 | .with_decoder(StringCodec) 34 | .with_decompression(DeflateDecomp::gzip()) 35 | .open() 36 | .await?; 37 | 38 | let subscribe_task = tokio::spawn(async move { 39 | while let Some(Ok(msg)) = subscriber.next().await { 40 | println!("Received message: {msg}"); 41 | } 42 | }); 43 | 44 | let publish_task = tokio::spawn(async move { 45 | for _ in 0..1000 { 46 | publisher.send("Hello, world!".to_owned()).await.unwrap(); 47 | } 48 | 49 | publisher.finish().await.unwrap(); 50 | }); 51 | 52 | let _ = tokio::join!(subscribe_task, publish_task); 53 | 54 | Ok(()) 55 | } 56 | -------------------------------------------------------------------------------- /client/src/streams/pubsub/states.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | batching::BatchConfig, 3 | streams::aliases::{Comp, Decomp}, 4 | PubSubCommon, 5 | }; 6 | use selium_protocol::Offset; 7 | 8 | #[doc(hidden)] 9 | pub struct SubscriberWantsDecoder { 10 | pub(crate) common: PubSubCommon, 11 | } 12 | 13 | impl SubscriberWantsDecoder { 14 | pub fn new(topic: &str) -> Self { 15 | Self { 16 | common: PubSubCommon::new(topic), 17 | } 18 | } 19 | } 20 | 21 | #[doc(hidden)] 22 | pub struct SubscriberWantsOpen { 23 | pub(crate) common: PubSubCommon, 24 | pub(crate) decoder: D, 25 | pub(crate) decompression: Option, 26 | pub(crate) offset: Offset, 27 | } 28 | 29 | impl SubscriberWantsOpen { 30 | pub fn new(prev: SubscriberWantsDecoder, decoder: D) -> Self { 31 | Self { 32 | common: prev.common, 33 | decoder, 34 | decompression: None, 35 | offset: Offset::default(), 36 | } 37 | } 38 | } 39 | 40 | #[doc(hidden)] 41 | #[derive(Debug)] 42 | pub struct PublisherWantsEncoder { 43 | pub(crate) common: PubSubCommon, 44 | } 45 | 46 | impl PublisherWantsEncoder { 47 | pub fn new(topic: &str) -> Self { 48 | Self { 49 | common: PubSubCommon::new(topic), 50 | } 51 | } 52 | } 53 | 54 | #[doc(hidden)] 55 | pub struct PublisherWantsOpen { 56 | pub(crate) common: PubSubCommon, 57 | pub(crate) encoder: E, 58 | pub(crate) compression: Option, 59 | pub(crate) batch_config: Option, 60 | } 61 | 62 | impl PublisherWantsOpen { 63 | pub fn new(prev: PublisherWantsEncoder, encoder: E) -> Self { 64 | Self { 65 | common: prev.common, 66 | encoder, 67 | compression: None, 68 | batch_config: None, 69 | } 70 | } 71 | } 72 | -------------------------------------------------------------------------------- /log/src/config/flush_policy.rs: -------------------------------------------------------------------------------- 1 | use std::time::Duration; 2 | 3 | /// Defines the flushing policy for a message log. 4 | /// Flushing is triggered by a defined interval, and optionally, a write threshold. 5 | #[derive(Debug, Clone)] 6 | pub struct FlushPolicy { 7 | /// An optional write-count threshold. When the threshold is exceeded, a flush will be triggered. 8 | pub(crate) number_of_writes: Option, 9 | /// The flushing interval for the log. Triggers a flush when the interval elapses. 10 | pub(crate) interval: Duration, 11 | } 12 | 13 | impl Default for FlushPolicy { 14 | /// By default, no write-count threshold is defined, and is left as a user-provided optimization 15 | /// depending on the throughput and durability requirements of the log. 16 | fn default() -> Self { 17 | Self { 18 | number_of_writes: None, 19 | interval: Duration::from_secs(3), 20 | } 21 | } 22 | } 23 | 24 | impl FlushPolicy { 25 | /// Creates a new FlushPolicy with suitable defaults. 26 | pub fn new() -> Self { 27 | Self::default() 28 | } 29 | 30 | /// Opts-in to flushing based on a write-count threshold, and specifies that the flush should be 31 | /// triggered on every write. 32 | pub fn every_write(mut self) -> Self { 33 | self.number_of_writes = Some(1); 34 | self 35 | } 36 | 37 | /// Opts-in to flushing based on a write-count threshhold, and specifies that the flush should be 38 | /// triggered after the provided number of writes. 39 | pub fn number_of_writes(mut self, num: u64) -> Self { 40 | self.number_of_writes = Some(num); 41 | self 42 | } 43 | 44 | /// Overrides the default flushing interval. 45 | pub fn interval(mut self, interval: Duration) -> Self { 46 | self.interval = interval; 47 | self 48 | } 49 | } 50 | -------------------------------------------------------------------------------- /client/examples/request.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Result; 2 | use fake::faker::name::en::Name; 3 | use fake::Fake; 4 | use futures::future::try_join_all; 5 | use selium::prelude::*; 6 | use selium::std::codecs::BincodeCodec; 7 | use serde::{Deserialize, Serialize}; 8 | use std::time::Duration; 9 | 10 | const NUM_OF_REQUESTS: usize = 10; 11 | 12 | #[derive(Debug, Serialize, Deserialize, Clone)] 13 | enum Request { 14 | HelloWorld(Option), 15 | } 16 | 17 | #[derive(Debug, Serialize, Deserialize, PartialEq, Clone)] 18 | enum Response { 19 | HelloWorld(String), 20 | } 21 | 22 | #[tokio::main] 23 | async fn main() -> Result<()> { 24 | let connection = selium::custom() 25 | .keep_alive(5_000)? 26 | .endpoint("127.0.0.1:7001") 27 | .with_certificate_authority("../certs/client/ca.der")? 28 | .with_cert_and_key( 29 | "../certs/client/localhost.der", 30 | "../certs/client/localhost.key.der", 31 | )? 32 | .connect() 33 | .await?; 34 | 35 | let requestor = connection 36 | .requestor("/some/endpoint") 37 | .with_request_encoder(BincodeCodec::default()) 38 | .with_reply_decoder(BincodeCodec::::default()) 39 | .with_request_timeout(Duration::from_secs(3))? 40 | .open() 41 | .await?; 42 | 43 | let mut tasks = vec![]; 44 | 45 | for _ in 0..NUM_OF_REQUESTS { 46 | let task = tokio::spawn({ 47 | let name: String = Name().fake(); 48 | let request: Request = Request::HelloWorld(Some(name.clone())); 49 | let expected = format!("Hello, {name}!"); 50 | let mut requestor = requestor.clone(); 51 | 52 | async move { 53 | let res = requestor.request(request).await.unwrap(); 54 | assert_eq!(res, Response::HelloWorld(expected)); 55 | } 56 | }); 57 | 58 | tasks.push(task); 59 | } 60 | 61 | try_join_all(tasks).await?; 62 | 63 | Ok(()) 64 | } 65 | -------------------------------------------------------------------------------- /standard/src/compression/deflate/decomp.rs: -------------------------------------------------------------------------------- 1 | use super::types::DeflateLibrary; 2 | use crate::traits::compression::Decompress; 3 | use anyhow::Result; 4 | use bytes::Bytes; 5 | use flate2::read::{GzDecoder, ZlibDecoder}; 6 | use std::io::Read; 7 | 8 | /// Decompression half of DEFLATE implementation. 9 | /// 10 | /// `DeflateDecomp` implements [Decompress], and can be constructed for use with a `Subscriber` stream. 11 | #[derive(Default)] 12 | pub struct DeflateDecomp { 13 | library: DeflateLibrary, 14 | } 15 | 16 | impl DeflateDecomp { 17 | /// Constructs a new `DeflateDecomp` instance, using the provided [DeflateLibrary] variant. This 18 | /// constructor is used directly by the `gzip` and `zlib` associated functions, so it is recommended to use 19 | /// either of those to construct an instance. 20 | pub fn new(library: DeflateLibrary) -> Self { 21 | Self { library } 22 | } 23 | 24 | /// Constructs a new `DeflateDecomp` instance, using [gzip](https://gzip.org) as the preferred 25 | /// implementation of the DEFLATE algorithm. 26 | pub fn gzip() -> Self { 27 | Self::new(DeflateLibrary::Gzip) 28 | } 29 | 30 | /// Constructs a new `DeflateDecomp` instance, using [zlib](https://github.com/madler/zlib) as the preferred 31 | /// implementation of the DEFLATE algorithm. 32 | pub fn zlib() -> DeflateDecomp { 33 | Self::new(DeflateLibrary::Zlib) 34 | } 35 | } 36 | 37 | impl Decompress for DeflateDecomp { 38 | fn decompress(&self, input: Bytes) -> Result { 39 | let mut output = Vec::new(); 40 | 41 | match self.library { 42 | DeflateLibrary::Gzip => { 43 | let mut decoder = GzDecoder::new(&input[..]); 44 | decoder.read_to_end(&mut output)?; 45 | } 46 | DeflateLibrary::Zlib => { 47 | let mut decoder = ZlibDecoder::new(&input[..]); 48 | decoder.read_to_end(&mut output)?; 49 | } 50 | }; 51 | 52 | Ok(output.into()) 53 | } 54 | } 55 | -------------------------------------------------------------------------------- /log/src/index/entry.rs: -------------------------------------------------------------------------------- 1 | use bytes::{Buf, BufMut, Bytes, BytesMut}; 2 | 3 | pub const SIZE_OF_INDEX_ENTRY: usize = 4 | std::mem::size_of::() + std::mem::size_of::() + std::mem::size_of::(); 5 | 6 | /// Represents an entry in a corresponding index file. 7 | #[derive(Debug)] 8 | pub struct IndexEntry { 9 | relative_offset: u32, 10 | timestamp: u64, 11 | physical_position: u64, 12 | } 13 | 14 | impl IndexEntry { 15 | /// Constructs a new IndexEntry instance. 16 | pub fn new(relative_offset: u32, timestamp: u64, physical_position: u64) -> Self { 17 | Self { 18 | relative_offset, 19 | timestamp, 20 | physical_position, 21 | } 22 | } 23 | 24 | /// Constructs an IndexEntry instance from the provided slice of bytes. 25 | /// 26 | /// # Panics 27 | /// This method will panic if the provided slice is not large enough to 28 | /// fit the size of an index entry. 29 | pub fn from_slice(slice: &[u8]) -> Self { 30 | let mut slice = slice; 31 | let relative_offset = slice.get_u32(); 32 | let timestamp = slice.get_u64(); 33 | let physical_position = slice.get_u64(); 34 | Self::new(relative_offset, timestamp, physical_position) 35 | } 36 | 37 | /// Takes ownership of the current IndexEntry, and converts it to an instance of [Bytes]. 38 | pub fn into_slice(self) -> Bytes { 39 | let mut bytes = BytesMut::with_capacity(SIZE_OF_INDEX_ENTRY); 40 | bytes.put_u32(self.relative_offset); 41 | bytes.put_u64(self.timestamp); 42 | bytes.put_u64(self.physical_position); 43 | bytes.into() 44 | } 45 | 46 | /// The relative offset of the entry. 47 | /// The offset is relative to the base offset of the segment, increasing sequentially from 1. 48 | pub fn relative_offset(&self) -> u32 { 49 | self.relative_offset 50 | } 51 | 52 | /// The physical position of the corresponding record in the log file. 53 | pub fn physical_position(&self) -> u64 { 54 | self.physical_position 55 | } 56 | } 57 | -------------------------------------------------------------------------------- /tools/src/commands/gen_certs/cert_gen.rs: -------------------------------------------------------------------------------- 1 | use super::{certificate_builder::CertificateBuilder, key_pair::KeyPair}; 2 | use anyhow::Result; 3 | use colored::*; 4 | use rcgen::Certificate; 5 | use std::fs::{self, File}; 6 | use std::io::Write; 7 | use std::path::Path; 8 | 9 | fn generate_ca_cert(no_expiry: bool) -> Result { 10 | let mut builder = CertificateBuilder::ca() 11 | .country_name("AU") 12 | .organization_name("Selium"); 13 | 14 | if !no_expiry { 15 | builder = builder.valid_for_days(5); 16 | } 17 | 18 | let cert = builder.build()?; 19 | 20 | Ok(cert) 21 | } 22 | 23 | fn write_file(filename: &Path, contents: &[u8]) -> Result<()> { 24 | File::create(filename)?.write_all(contents)?; 25 | 26 | println!( 27 | "{}", 28 | format!("Successfully created {}", filename.display()).green() 29 | ); 30 | 31 | Ok(()) 32 | } 33 | 34 | pub struct CertGen { 35 | pub ca: Vec, 36 | pub client: KeyPair, 37 | pub server: KeyPair, 38 | } 39 | 40 | impl CertGen { 41 | pub fn generate(no_expiry: bool) -> Result { 42 | println!("Generating certificates..."); 43 | 44 | let ca = generate_ca_cert(no_expiry)?; 45 | let client = KeyPair::client(&ca, no_expiry)?; 46 | let server = KeyPair::server(&ca, no_expiry)?; 47 | let ca = ca.serialize_der()?; 48 | 49 | Ok(Self { ca, client, server }) 50 | } 51 | 52 | pub fn output(&self, client_out_path: &Path, server_out_path: &Path) -> Result<()> { 53 | println!("Writing certs to filesystem..."); 54 | 55 | self.write_to_filesystem(client_out_path, &self.client)?; 56 | self.write_to_filesystem(server_out_path, &self.server)?; 57 | 58 | Ok(()) 59 | } 60 | 61 | fn write_to_filesystem(&self, path: &Path, keypair: &KeyPair) -> Result<()> { 62 | fs::create_dir_all(path)?; 63 | 64 | write_file(&path.join("ca.der"), &self.ca)?; 65 | write_file(&path.join("localhost.der"), &keypair.0)?; 66 | write_file(&path.join("localhost.key.der"), &keypair.1)?; 67 | 68 | Ok(()) 69 | } 70 | } 71 | -------------------------------------------------------------------------------- /tests/tests/streams/request_reply.rs: -------------------------------------------------------------------------------- 1 | use crate::helpers::{Request, Response, TestClient}; 2 | use anyhow::Result; 3 | use futures::future::try_join_all; 4 | use selium::std::errors::SeliumError; 5 | use std::time::Duration; 6 | use uuid::Uuid; 7 | 8 | #[tokio::test] 9 | async fn request_reply_successful() -> Result<()> { 10 | let client = TestClient::start().await?; 11 | client.start_replier(None); 12 | 13 | let mut requestor = client.requestor(None).await?; 14 | let reply = requestor.request(Request::Ping).await?; 15 | 16 | assert_eq!(reply, Response::Pong); 17 | 18 | Ok(()) 19 | } 20 | 21 | #[tokio::test] 22 | async fn request_fails_if_exceeds_timeout() -> Result<()> { 23 | let client = TestClient::start().await?; 24 | client.start_replier(Some(Duration::from_secs(3))); 25 | 26 | let mut requestor = client.requestor(Some(Duration::from_secs(2))).await?; 27 | let reply = requestor.request(Request::Ping).await; 28 | 29 | assert!(matches!(reply, Err(SeliumError::RequestTimeout))); 30 | 31 | Ok(()) 32 | } 33 | 34 | #[tokio::test] 35 | async fn concurrent_requests_are_routed_successfully() -> Result<()> { 36 | let client = TestClient::start().await?; 37 | client.start_replier(None); 38 | 39 | let requestor = client.requestor(None).await?; 40 | let mut tasks = vec![]; 41 | 42 | for _ in 0..10_000 { 43 | tasks.push(tokio::spawn({ 44 | let mut requestor = requestor.clone(); 45 | let uuid = Uuid::new_v4().to_string(); 46 | 47 | async move { 48 | let reply = requestor 49 | .request(Request::Echo(uuid.clone())) 50 | .await 51 | .unwrap(); 52 | assert_eq!(reply, Response::Echo(uuid)); 53 | } 54 | })); 55 | } 56 | 57 | try_join_all(tasks).await?; 58 | 59 | Ok(()) 60 | } 61 | 62 | #[tokio::test] 63 | async fn fails_to_bind_multiple_repliers_to_topic() -> Result<()> { 64 | let client = TestClient::start().await?; 65 | 66 | client.start_replier(None); 67 | assert!(client.start_replier(None).await.unwrap().is_err()); 68 | 69 | Ok(()) 70 | } 71 | -------------------------------------------------------------------------------- /log/src/tasks/flusher.rs: -------------------------------------------------------------------------------- 1 | use crate::{config::SharedLogConfig, error::Result, segment::SharedSegmentList}; 2 | use std::sync::Arc; 3 | use tokio::sync::mpsc::{self, Receiver, Sender}; 4 | use tokio_util::sync::CancellationToken; 5 | 6 | /// Task container for the asynchronous flusher task. 7 | /// 8 | /// The FlusherTask container spawns an asynchronous background task that polls the flushing interval 9 | /// of the log's FlushPolicy and triggers a flush once elapsed. 10 | #[derive(Debug)] 11 | pub struct FlusherTask { 12 | segments: SharedSegmentList, 13 | config: SharedLogConfig, 14 | cancellation_token: CancellationToken, 15 | } 16 | 17 | impl FlusherTask { 18 | /// Starts the background task and returns a reference to the task container. 19 | pub fn start(config: SharedLogConfig, segments: SharedSegmentList) -> (Arc, Sender<()>) { 20 | let cancellation_token = CancellationToken::new(); 21 | let (tx, rx) = mpsc::channel(1); 22 | 23 | let task = Arc::new(Self { 24 | segments, 25 | config, 26 | cancellation_token, 27 | }); 28 | 29 | tokio::spawn({ 30 | let task = task.clone(); 31 | async move { 32 | task.run(rx).await.unwrap(); 33 | } 34 | }); 35 | 36 | (task, tx) 37 | } 38 | 39 | async fn run(&self, mut rx: Receiver<()>) -> Result<()> { 40 | loop { 41 | tokio::select! { 42 | _ = tokio::time::sleep(self.config.flush_policy.interval) => { 43 | self.segments.write().await.flush().await?; 44 | }, 45 | _ = rx.recv() => { 46 | continue; 47 | } 48 | _ = self.cancellation_token.cancelled() => { 49 | break Ok(()); 50 | } 51 | } 52 | } 53 | } 54 | } 55 | 56 | impl Drop for FlusherTask { 57 | /// When the task container is dropped, a cancel signal will be dispatched in order to gracefully 58 | /// terminate the background task. 59 | fn drop(&mut self) { 60 | self.cancellation_token.cancel(); 61 | } 62 | } 63 | -------------------------------------------------------------------------------- /.github/workflows/test.yml: -------------------------------------------------------------------------------- 1 | name: Test 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | 8 | pull_request: 9 | branches: 10 | - main 11 | - release 12 | 13 | jobs: 14 | check: 15 | name: Cargo Check 16 | runs-on: ubuntu-latest 17 | steps: 18 | - name: Checkout sources 19 | uses: actions/checkout@v3 20 | 21 | - name: Install stable toolchain 22 | uses: actions-rs/toolchain@v1 23 | with: 24 | profile: minimal 25 | toolchain: stable 26 | override: true 27 | 28 | - name: Run cargo check 29 | uses: actions-rs/cargo@v1 30 | with: 31 | command: check 32 | 33 | test: 34 | name: Cargo Test 35 | runs-on: ubuntu-latest 36 | steps: 37 | - name: Checkout sources 38 | uses: actions/checkout@v3 39 | 40 | - name: Install stable toolchain 41 | uses: actions-rs/toolchain@v1 42 | with: 43 | profile: minimal 44 | toolchain: stable 45 | override: true 46 | 47 | - name: Generate self-signed certs 48 | uses: actions-rs/cargo@v1 49 | with: 50 | command: run 51 | args: --bin selium-tools gen-certs 52 | 53 | - name: Run cargo test 54 | uses: actions-rs/cargo@v1 55 | with: 56 | command: test 57 | 58 | - name: Run cargo test --ignored 59 | uses: actions-rs/cargo@v1 60 | with: 61 | command: test 62 | args: -- --ignored 63 | 64 | lints: 65 | name: Linters 66 | runs-on: ubuntu-latest 67 | steps: 68 | - name: Checkout sources 69 | uses: actions/checkout@v3 70 | 71 | - name: Install stable toolchain 72 | uses: actions-rs/toolchain@v1 73 | with: 74 | profile: minimal 75 | toolchain: stable 76 | override: true 77 | components: rustfmt, clippy 78 | 79 | - name: Run cargo fmt 80 | uses: actions-rs/cargo@v1 81 | with: 82 | command: fmt 83 | args: --all -- --check 84 | 85 | - name: Run cargo clippy 86 | uses: actions-rs/cargo@v1 87 | with: 88 | command: clippy 89 | args: -- -D warnings 90 | -------------------------------------------------------------------------------- /log/src/message/headers.rs: -------------------------------------------------------------------------------- 1 | use super::{CRC_SIZE, HEADERS_SIZE}; 2 | use bytes::{Buf, BufMut}; 3 | use chrono::Utc; 4 | 5 | /// Headers corresponding to a [Message](crate::message::Message), containing information about the message records batch. 6 | #[derive(Debug, Clone, PartialEq)] 7 | pub struct Headers { 8 | length: u64, 9 | version: u32, 10 | batch_size: u32, 11 | timestamp: u64, 12 | } 13 | 14 | impl Headers { 15 | /// Constructs a new headers instance. 16 | pub fn new(batch_len: usize, batch_size: u32, version: u32) -> Self { 17 | let length = (batch_len + HEADERS_SIZE + CRC_SIZE) as u64; 18 | let timestamp = Utc::now().timestamp() as u64; 19 | 20 | Self { 21 | length, 22 | version, 23 | batch_size, 24 | timestamp, 25 | } 26 | } 27 | 28 | /// Decodes a Headers instance from the provided bytes source. 29 | /// 30 | /// # Panics 31 | /// Will panic if the the bytes source is not large enough. 32 | pub fn decode(mut src: &[u8]) -> Self { 33 | let length = src.get_u64(); 34 | let version = src.get_u32(); 35 | let batch_size = src.get_u32(); 36 | let timestamp = src.get_u64(); 37 | 38 | Self { 39 | length, 40 | version, 41 | batch_size, 42 | timestamp, 43 | } 44 | } 45 | 46 | /// Encodes this Headers instance into the provided buffer. 47 | pub fn encode(&self, buffer: &mut T) { 48 | buffer.put_u64(self.length); 49 | buffer.put_u32(self.version); 50 | buffer.put_u32(self.batch_size); 51 | buffer.put_u64(self.timestamp); 52 | } 53 | 54 | /// The byte length of the encoded batch. 55 | pub fn length(&self) -> u64 { 56 | self.length 57 | } 58 | 59 | /// The message frame version. 60 | pub fn version(&self) -> u32 { 61 | self.version 62 | } 63 | 64 | /// The amount of records in the batch. 65 | pub fn batch_size(&self) -> u32 { 66 | self.batch_size 67 | } 68 | 69 | /// A UNIX timestamp representing the time the message was appended to the log. 70 | pub fn timestamp(&self) -> u64 { 71 | self.timestamp 72 | } 73 | } 74 | -------------------------------------------------------------------------------- /log/src/tasks/cleaner.rs: -------------------------------------------------------------------------------- 1 | use crate::{config::SharedLogConfig, error::Result, segment::SharedSegmentList}; 2 | use std::sync::Arc; 3 | use tokio_util::sync::CancellationToken; 4 | 5 | /// Task container for the asynchronous cleaner task. 6 | /// 7 | /// The CleanerTask container spawns an asynchronous task that polls for stale/expired segments 8 | /// in order to trigger their cleaning. 9 | #[derive(Debug)] 10 | pub struct CleanerTask { 11 | segments: SharedSegmentList, 12 | config: SharedLogConfig, 13 | cancellation_token: CancellationToken, 14 | } 15 | 16 | impl CleanerTask { 17 | /// Starts the background task and returns a reference to the task container. 18 | pub fn start(config: SharedLogConfig, segments: SharedSegmentList) -> Arc { 19 | let cancellation_token = CancellationToken::new(); 20 | 21 | let cleaner = Arc::new(Self { 22 | segments, 23 | config, 24 | cancellation_token, 25 | }); 26 | 27 | tokio::spawn({ 28 | let cleaner = cleaner.clone(); 29 | async move { 30 | cleaner.run().await.unwrap(); 31 | } 32 | }); 33 | 34 | cleaner 35 | } 36 | 37 | async fn run(&self) -> Result<()> { 38 | loop { 39 | tokio::select! { 40 | _ = tokio::time::sleep(self.config.cleaner_interval) => { 41 | self.remove_stale_segments().await?; 42 | }, 43 | _ = self.cancellation_token.cancelled() => { 44 | break Ok(()); 45 | } 46 | } 47 | } 48 | } 49 | 50 | async fn remove_stale_segments(&self) -> Result<()> { 51 | let mut segments = self.segments.write().await; 52 | 53 | let stale_segments = segments 54 | .find_stale_segments(self.config.retention_period) 55 | .await?; 56 | 57 | segments.remove_segments(stale_segments.as_slice()).await?; 58 | 59 | Ok(()) 60 | } 61 | } 62 | 63 | impl Drop for CleanerTask { 64 | /// When the task container is dropped, a cancel signal will be dispatched in order to gracefully 65 | /// terminate the background task. 66 | fn drop(&mut self) { 67 | self.cancellation_token.cancel(); 68 | } 69 | } 70 | -------------------------------------------------------------------------------- /log/src/data/iterator.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | error::Result, 3 | message::{Headers, Message, CRC_SIZE, HEADERS_SIZE}, 4 | }; 5 | use tokio::{ 6 | fs::File, 7 | io::{AsyncReadExt, BufReader}, 8 | }; 9 | 10 | /// An iterator over a [Data](crate::data::Data) file. 11 | /// 12 | /// The LogIterator acts as an active reader over the log, pulling messages from the log 13 | /// and decoding them, while maintaining a cursor to ensure that superflous reads are not performed 14 | /// when a read limit is provided via `end_position`. 15 | #[derive(Debug)] 16 | pub struct LogIterator { 17 | reader: BufReader, 18 | cursor: u64, 19 | end_position: u64, 20 | } 21 | 22 | impl LogIterator { 23 | /// Constructs a new LogIterator instance. 24 | pub fn new(reader: BufReader, cursor: u64, end_position: u64) -> Self { 25 | Self { 26 | reader, 27 | cursor, 28 | end_position, 29 | } 30 | } 31 | 32 | /// Attempts to decode and retrieve the next message from the `reader`. 33 | /// Returns [Option::None] if there are no more messages to decode. 34 | /// 35 | /// # Errors 36 | /// Returns std::io::ErrorKind::UnexpectedEof if the an unexpected end-of-file 37 | /// is encountered due to a partially committed or corrupted message. 38 | pub async fn next(&mut self) -> Result> { 39 | if self.cursor >= self.end_position { 40 | return Ok(None); 41 | } 42 | 43 | let mut headers = vec![0; HEADERS_SIZE]; 44 | self.reader.read_exact(&mut headers).await?; 45 | 46 | let headers = Headers::decode(&headers); 47 | let remainder_len = headers.length() as usize - HEADERS_SIZE; 48 | let combined_len = HEADERS_SIZE + remainder_len; 49 | let records_len = remainder_len - CRC_SIZE; 50 | 51 | let mut remainder = vec![0; remainder_len]; 52 | self.reader.read_exact(&mut remainder).await?; 53 | 54 | let records = &remainder[..records_len]; 55 | let mut crc = [0; CRC_SIZE]; 56 | crc.copy_from_slice(&remainder[records_len..]); 57 | let crc = u32::from_be_bytes(crc); 58 | let message = Message::new(headers, records, crc); 59 | 60 | self.cursor += combined_len as u64; 61 | 62 | Ok(Some(message)) 63 | } 64 | } 65 | -------------------------------------------------------------------------------- /client/README.md: -------------------------------------------------------------------------------- 1 | # Selium Client 2 | 3 | This is the client library for the Selium platform. Clients of the Selium server should 4 | implement this library to send data to and/or receive data from the server. 5 | 6 | ## Running the Examples 7 | 8 | Before running the examples, you should generate a set of self-signed certificates to use for authenticating the client and server via mTLS. 9 | 10 | You can do so via the `selium-tools` binary included in the workspace: 11 | 12 | ```bash 13 | $ cargo run --bin selium-tools gen-certs 14 | ``` 15 | 16 | ## Getting Started 17 | 18 | Once you've started a Selium server ([see the server's `README.md`](../server/README.md) 19 | for details), use the client library to start sending and receiving messages. 20 | 21 | Here's a minimal example: 22 | 23 | ```rust 24 | use futures::SinkExt; 25 | use selium::{prelude::*, std::codecs::StringCodec}; 26 | 27 | #[tokio::main] 28 | async fn main() -> Result<(), Box> { 29 | let connection = selium::client() 30 | .with_certificate_authority("certs/client/ca.der")? 31 | .with_cert_and_key( 32 | "certs/client/localhost.der", 33 | "certs/client/localhost.key.der", 34 | )? 35 | .connect("127.0.0.1:7001") 36 | .await?; 37 | 38 | let mut publisher = connection 39 | .publisher("/some/topic") 40 | .with_encoder(StringCodec) 41 | .open() 42 | .await?; 43 | 44 | publisher.send("Hello, world!".into()).await?; 45 | publisher.finish().await?; 46 | 47 | Ok(()) 48 | } 49 | ``` 50 | 51 | ### Familiar Features 52 | 53 | Selium Client uses Rust's `Futures` and `Tokio` libraries under the hood for doing 54 | asynchronous I/O. If you are already familiar with these libs, then you already know how 55 | to drive Selium! 56 | 57 | Take our example above: 58 | 59 | ```rust 60 | let mut publisher = connection 61 | .publisher("/some/topic") 62 | ...; 63 | ``` 64 | 65 | Here, `publisher` is actually just a `futures::Sink`. This makes it instantly compatible 66 | with your existing streams/sinks: 67 | 68 | ```rust 69 | let my_stream = ...; // whatever floats your boat! 70 | 71 | my_stream.forward(publisher); 72 | ``` 73 | 74 | Now you're publishing messages! 75 | 76 | There's lots more Selium can do, like sending things other than strings. Be sure to 77 | [checkout the user guide](https://guide.selium.com) for more details. 78 | -------------------------------------------------------------------------------- /client/src/batching/mod.rs: -------------------------------------------------------------------------------- 1 | //! Data structures and utilities to enable message batching on streams. 2 | //! 3 | //! Message batching is an optimization that batches several messages into a single frame, to reduce network and 4 | //! compression calls for particulaly chatty [Publisher](crate::streams::pubsub::Publisher) streams. 5 | //! 6 | //! # Publisher 7 | //! 8 | //! In `Selium`, message batching uses an algorithm that collects messages sent to the 9 | //! [Publisher](crate::streams::pubsub::Publisher) stream's [Sink](futures::Sink) implementation by any means, and pushes 10 | //! them to a batching queue. 11 | //! 12 | //! Batching is an opt-in functionality for [Publisher](crate::streams::pubsub::Publisher) streams. If you wish to enable 13 | //! batching for your [Publisher](crate::streams::pubsub::Publisher) stream, you can do so via the `with_batching` method 14 | //! on the Publisher [ClientBuilder](crate::ClientBuilder). 15 | //! 16 | //! ## Batching Algorithm 17 | //! 18 | //! The batching algorithm is tuned by providing a [BatchConfig] instance, which specifies the 19 | //! `batching interval` and `maximum batch size`. The queue will continue to collect new messages until 20 | //! either the batch size has been exceeded, or the batching interval has expired. Batches will then be 21 | //! encoded [into a message frame](selium_protocol::Frame::BatchMessage) recognized by the wire protocol, 22 | //! before applying compression (if specified) and sending it over the wire. 23 | //! 24 | //! If a batch is incomplete prior to closing a [Publisher](crate::streams::pubsub::Publisher) stream, calling 25 | //! [finish](crate::streams::pubsub::Publisher::finish) on the stream will automatically flush the pending message 26 | //! batch to ensure that it is delivered to subscribers. 27 | 28 | //! # Subscriber 29 | //! 30 | //! No stream configuration is required to enable message batching for a 31 | //! [Subscriber](crate::streams::pubsub::Subscriber) stream. As message batches are recieved over the wire in a 32 | //! [Frame::BatchMessage](selium_protocol::Frame::BatchMessage) frame, the 33 | //! [Stream](futures::Stream) implementation for the [Subscriber](crate::streams::pubsub::Subscriber) stream will 34 | //! decompress the batch payload (if specified), and then unpack the batch and deliver each message 35 | //! individually. 36 | 37 | mod batch_config; 38 | mod message_batch; 39 | 40 | pub use batch_config::*; 41 | pub(crate) use message_batch::*; 42 | -------------------------------------------------------------------------------- /benchmarks/src/results.rs: -------------------------------------------------------------------------------- 1 | use crate::args::Args; 2 | use num_format::{Locale, ToFormattedString}; 3 | use std::{fmt::Display, time::Duration}; 4 | 5 | #[derive(Debug)] 6 | pub struct BenchmarkResults { 7 | duration: Duration, 8 | args: Args, 9 | total_mb_transferred: f64, 10 | avg_throughput: f64, 11 | avg_latency: f64, 12 | } 13 | 14 | impl BenchmarkResults { 15 | pub fn calculate(duration: Duration, args: Args) -> Self { 16 | let total_bytes_transferred = args.num_of_messages * args.message_size; 17 | let total_mb_transferred = total_bytes_transferred as f64 / 1024.0 / 1024.0; 18 | let avg_throughput = total_mb_transferred / duration.as_secs_f64(); 19 | let avg_latency = duration.as_nanos() as f64 / args.num_of_messages as f64; 20 | 21 | Self { 22 | duration, 23 | args, 24 | total_mb_transferred, 25 | avg_throughput, 26 | avg_latency, 27 | } 28 | } 29 | } 30 | 31 | impl Display for BenchmarkResults { 32 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 33 | let duration = format!("{:.4} Secs", self.duration.as_secs_f64()); 34 | let total_transferred = format!("{:.2} MB", self.total_mb_transferred); 35 | let avg_throughput = format!("{:.2} MB/s", self.avg_throughput); 36 | let avg_latency = format!("{:.2} ns", self.avg_latency); 37 | 38 | let summary = format!( 39 | " 40 | Benchmark Results 41 | --------------------- 42 | Number of Messages: {} 43 | Number of Streams: {} 44 | Message Size (Bytes): {} 45 | Batching Enabled: {} 46 | Compression Enabled: {}", 47 | self.args.num_of_messages.to_formatted_string(&Locale::en), 48 | self.args.num_of_streams.to_formatted_string(&Locale::en), 49 | self.args.message_size.to_formatted_string(&Locale::en), 50 | self.args.enable_batching, 51 | self.args.enable_compression, 52 | ); 53 | 54 | let header = format!( 55 | "| {: <20} | {: <20} | {: <20} | {: <20} |", 56 | "Duration", "Total Transferred", "Avg. Throughput", "Avg. Latency" 57 | ); 58 | 59 | let body = format!( 60 | "| {: <20} | {: <20} | {: <20} | {: <20} |", 61 | duration, total_transferred, avg_throughput, avg_latency 62 | ); 63 | 64 | write!(f, "{summary}\n\n{header}\n{body}\n") 65 | } 66 | } 67 | -------------------------------------------------------------------------------- /log/src/message/mod.rs: -------------------------------------------------------------------------------- 1 | //! Message envelope/frame for a set of records. 2 | 3 | mod headers; 4 | mod slice; 5 | 6 | use bytes::{BufMut, Bytes}; 7 | use crc32c::crc32c; 8 | pub use headers::Headers; 9 | pub use slice::MessageSlice; 10 | use std::mem::size_of; 11 | 12 | /// The byte length of the [Headers] length marker 13 | pub const LEN_MARKER_SIZE: usize = size_of::(); 14 | 15 | /// The byte length of the CRC. 16 | pub const CRC_SIZE: usize = size_of::(); 17 | 18 | /// The combined byte length of the message headers. 19 | pub const HEADERS_SIZE: usize = 20 | size_of::() + size_of::() + size_of::() + size_of::(); 21 | 22 | /// The Message frame contains information required to parse the message, a calculated CRC used to 23 | /// verify message integrity, and the encoded records. 24 | #[derive(Debug, Clone, PartialEq)] 25 | pub struct Message { 26 | headers: Headers, 27 | records: Bytes, 28 | // TODO: implement CRC check after replication is implemented. 29 | _crc: u32, 30 | } 31 | 32 | impl Message { 33 | /// Constructs a Message instance with the provided [Headers], records 34 | /// batch and CRC. 35 | pub fn new(headers: Headers, records: &[u8], crc: u32) -> Self { 36 | let records = Bytes::copy_from_slice(records); 37 | 38 | Self { 39 | headers, 40 | records, 41 | _crc: crc, 42 | } 43 | } 44 | 45 | /// Shorthand method for constructing a Message instance with a batch 46 | /// size of 1. 47 | pub fn single(records: &[u8], version: u32) -> Self { 48 | let headers = Headers::new(records.len(), 1, version); 49 | Self::new(headers, records, 0) 50 | } 51 | 52 | /// Shorthand method for constructing a Message instance with a provided 53 | /// batch size. 54 | pub fn batch(records: &[u8], batch_size: u32, version: u32) -> Self { 55 | let headers = Headers::new(records.len(), batch_size, version); 56 | Self::new(headers, records, 0) 57 | } 58 | 59 | /// Encodes this Message instance into the provided buffer. 60 | pub fn encode(&self, buffer: &mut Vec) { 61 | self.headers.encode(buffer); 62 | buffer.put_slice(&self.records); 63 | let crc = crc32c(buffer); 64 | buffer.put_u32(crc); 65 | } 66 | 67 | /// The message headers containing information about the records batch. 68 | pub fn headers(&self) -> &Headers { 69 | &self.headers 70 | } 71 | 72 | /// The encoded records batch. 73 | pub fn records(&self) -> &[u8] { 74 | &self.records 75 | } 76 | } 77 | -------------------------------------------------------------------------------- /tools/src/commands/gen_certs/certificate_builder.rs: -------------------------------------------------------------------------------- 1 | use super::validity_range::ValidityRange; 2 | use anyhow::Result; 3 | use rcgen::{ 4 | BasicConstraints, Certificate, CertificateParams, DnType, DnValue, ExtendedKeyUsagePurpose, 5 | IsCa, KeyUsagePurpose, SanType, 6 | }; 7 | 8 | pub struct CertificateBuilder { 9 | params: CertificateParams, 10 | } 11 | 12 | impl CertificateBuilder { 13 | pub fn ca() -> Self { 14 | let mut params = CertificateParams::new(vec![]); 15 | 16 | params.is_ca = IsCa::Ca(BasicConstraints::Unconstrained); 17 | params.key_usages.push(KeyUsagePurpose::DigitalSignature); 18 | params.key_usages.push(KeyUsagePurpose::KeyCertSign); 19 | params.key_usages.push(KeyUsagePurpose::CrlSign); 20 | 21 | Self { params } 22 | } 23 | 24 | pub fn server() -> Self { 25 | CertificateBuilder::entity(ExtendedKeyUsagePurpose::ServerAuth) 26 | } 27 | 28 | pub fn client() -> Self { 29 | CertificateBuilder::entity(ExtendedKeyUsagePurpose::ClientAuth) 30 | } 31 | 32 | pub fn country_name(mut self, name: &str) -> Self { 33 | self.params.distinguished_name.push( 34 | DnType::CountryName, 35 | DnValue::PrintableString(name.to_owned()), 36 | ); 37 | self 38 | } 39 | 40 | pub fn organization_name(mut self, name: &str) -> Self { 41 | self.params 42 | .distinguished_name 43 | .push(DnType::OrganizationName, name); 44 | self 45 | } 46 | 47 | pub fn common_name(mut self, name: &str) -> Self { 48 | self.params 49 | .distinguished_name 50 | .push(DnType::CommonName, name); 51 | self 52 | } 53 | 54 | pub fn valid_for_days(mut self, days: i64) -> Self { 55 | let range = ValidityRange::new(days); 56 | self.params.not_before = range.start; 57 | self.params.not_after = range.end; 58 | self 59 | } 60 | 61 | pub fn build(self) -> Result { 62 | let cert = Certificate::from_params(self.params)?; 63 | Ok(cert) 64 | } 65 | 66 | fn entity(purpose: ExtendedKeyUsagePurpose) -> Self { 67 | let mut params = CertificateParams::new(vec![]); 68 | 69 | params 70 | .subject_alt_names 71 | .push(SanType::DnsName("localhost".to_owned())); 72 | 73 | params.key_usages.push(KeyUsagePurpose::DigitalSignature); 74 | params.use_authority_key_identifier_extension = true; 75 | params.extended_key_usages.push(purpose); 76 | 77 | Self { params } 78 | } 79 | } 80 | -------------------------------------------------------------------------------- /log/benches/read_benchmark.rs: -------------------------------------------------------------------------------- 1 | use bytes::Bytes; 2 | use criterion::{black_box, criterion_group, criterion_main, Criterion}; 3 | use selium_log::{ 4 | config::{FlushPolicy, LogConfig}, 5 | message::Message, 6 | MessageLog, 7 | }; 8 | use std::{path::Path, sync::Arc, time::Duration}; 9 | use tempfile::tempdir; 10 | 11 | const ONE_DAY: u64 = 86_400; 12 | const NUM_OF_MESSAGES: u64 = 1_000_000; 13 | const MAX_ENTRIES_PER_SEGMENT: u32 = 50_000; 14 | 15 | fn get_log_config(path: impl AsRef) -> LogConfig { 16 | LogConfig::from_path(path) 17 | .max_index_entries(MAX_ENTRIES_PER_SEGMENT) 18 | .retention_period(Duration::from_secs(ONE_DAY)) 19 | .cleaner_interval(Duration::from_secs(ONE_DAY)) 20 | .flush_policy(FlushPolicy::default().number_of_writes(100)) 21 | } 22 | 23 | async fn write_records(path: impl AsRef) { 24 | let config = get_log_config(path); 25 | let log = MessageLog::open(Arc::new(config)).await.unwrap(); 26 | 27 | for i in 0..NUM_OF_MESSAGES { 28 | let message = format!("Hello, world! {i}"); 29 | let batch = Bytes::from(message); 30 | let message = Message::single(&batch, 1); 31 | log.write(message).await.unwrap(); 32 | } 33 | 34 | log.flush().await.unwrap(); 35 | } 36 | 37 | async fn read_records(path: impl AsRef) { 38 | let config = get_log_config(path); 39 | let log = MessageLog::open(Arc::new(config)).await.unwrap(); 40 | let mut offset = 0; 41 | 42 | loop { 43 | let slice = log.read_slice(offset, None).await.unwrap(); 44 | offset = slice.end_offset(); 45 | 46 | match slice.messages().as_mut() { 47 | Some(ref mut iterator) => { 48 | while let Some(next) = iterator.next().await.unwrap() { 49 | black_box(next); 50 | } 51 | } 52 | None => break, 53 | } 54 | } 55 | } 56 | 57 | pub fn benchmark(c: &mut Criterion) { 58 | let tempdir = tempdir().unwrap(); 59 | let path = tempdir.path(); 60 | let runtime = tokio::runtime::Runtime::new().expect("Failed to construct executor"); 61 | 62 | runtime.block_on(async { 63 | write_records(path).await; 64 | }); 65 | 66 | c.bench_function("read 1_000_000 records", |b| { 67 | let runtime = tokio::runtime::Runtime::new().expect("Failed to construct executor"); 68 | b.to_async(runtime).iter(move || read_records(path)); 69 | }); 70 | } 71 | 72 | criterion_group! { 73 | name = benches; 74 | config = Criterion::default().sample_size(10); 75 | targets = benchmark 76 | } 77 | criterion_main!(benches); 78 | -------------------------------------------------------------------------------- /server/src/args.rs: -------------------------------------------------------------------------------- 1 | use clap::{Args, Parser}; 2 | use clap_verbosity_flag::Verbosity; 3 | use std::{net::SocketAddr, path::PathBuf}; 4 | 5 | #[derive(Parser, Debug)] 6 | #[command(version, about)] 7 | pub struct UserArgs { 8 | /// Address to bind this server to 9 | #[clap(short = 'a', long = "bind-addr", default_value = "127.0.0.1:7001")] 10 | pub bind_addr: SocketAddr, 11 | 12 | #[clap(flatten)] 13 | pub cert: CertGroup, 14 | 15 | #[clap(flatten)] 16 | pub log: LogArgs, 17 | 18 | /// Enable stateless retries 19 | #[clap(long = "stateless-retry")] 20 | pub stateless_retry: bool, 21 | 22 | /// File to log TLS keys to for debugging 23 | #[clap(long = "keylog")] 24 | pub keylog: bool, 25 | 26 | /// Maximum time in ms a client can idle waiting for data - default to 15 seconds 27 | #[clap(long = "max-idle-timeout", default_value_t = 15000, value_parser = clap::value_parser!(u32))] 28 | pub max_idle_timeout: u32, 29 | 30 | /// Can be called multiple times to increase output 31 | #[clap(flatten)] 32 | pub verbose: Verbosity, 33 | } 34 | 35 | #[derive(Args, Debug)] 36 | pub struct CertGroup { 37 | /// CA certificate 38 | #[clap(long, default_value = "certs/server/ca.der")] 39 | pub ca: PathBuf, 40 | /// TLS private key 41 | #[clap( 42 | short = 'k', 43 | long = "key", 44 | default_value = "certs/server/localhost.key.der" 45 | )] 46 | pub key: PathBuf, 47 | /// TLS certificate 48 | #[clap( 49 | short = 'c', 50 | long = "cert", 51 | default_value = "certs/server/localhost.der" 52 | )] 53 | pub cert: PathBuf, 54 | } 55 | 56 | #[derive(Args, Debug)] 57 | pub struct LogArgs { 58 | /// Path to directory to store log segments. 59 | #[clap(long, default_value = "logs/")] 60 | pub log_segments_directory: PathBuf, 61 | 62 | /// Interval in seconds to poll log cleaner task - default to 5 minutes. 63 | #[clap(long, default_value_t = 300_000)] 64 | pub log_cleaner_interval: u64, 65 | 66 | /// Maximum number of entries per log segment. 67 | #[clap(long, default_value_t = 100_000)] 68 | pub log_maximum_entries: u32, 69 | 70 | /// Number of writes before flushing log to filesystem. 71 | #[clap(long)] 72 | pub flush_policy_num_writes: Option, 73 | 74 | /// Interval in millis to asynchronously flush log to filesystem. 75 | #[clap(long, default_value_t = 3000)] 76 | pub flush_policy_interval: u64, 77 | 78 | /// Subscriber polling interval in milliseconds. 79 | #[clap(long, default_value_t = 25)] 80 | pub subscriber_polling_interval: u64, 81 | } 82 | -------------------------------------------------------------------------------- /log/tests/helpers.rs: -------------------------------------------------------------------------------- 1 | use bytes::Bytes; 2 | use fake::Fake; 3 | use selium_log::{ 4 | config::{LogConfig, SharedLogConfig}, 5 | message::Message, 6 | MessageLog, 7 | }; 8 | use std::sync::Arc; 9 | use tokio::fs; 10 | 11 | fn generate_dummy_message() -> String { 12 | (16..32).fake::() 13 | } 14 | 15 | pub fn generate_dummy_messages(count: usize) -> Vec { 16 | (0..count) 17 | .map(|_| generate_dummy_message()) 18 | .collect::>() 19 | } 20 | 21 | pub struct TestWrapper { 22 | log: MessageLog, 23 | config: SharedLogConfig, 24 | } 25 | 26 | impl TestWrapper { 27 | pub async fn build(config: LogConfig) -> Self { 28 | let config = Arc::new(config); 29 | let log = MessageLog::open(config.clone()).await.unwrap(); 30 | 31 | Self { log, config } 32 | } 33 | 34 | pub async fn number_of_segments(&self) -> u64 { 35 | let mut segments_count = 0; 36 | let mut dir = fs::read_dir(&self.config.segments_path).await.unwrap(); 37 | 38 | while let Some(entry) = dir.next_entry().await.unwrap() { 39 | let path = entry.path(); 40 | 41 | if path.is_file() && path.extension() == Some("index".as_ref()) { 42 | segments_count += 1; 43 | } 44 | } 45 | 46 | segments_count 47 | } 48 | 49 | pub async fn write_records(&mut self, records: &[String]) { 50 | for record in records { 51 | self.write(record).await; 52 | } 53 | } 54 | 55 | pub async fn write_dummy_records(&mut self, count: usize) { 56 | for _ in 0..count { 57 | let message = generate_dummy_message(); 58 | self.write(&message).await; 59 | } 60 | } 61 | 62 | pub async fn read_records(&mut self, offset: u64, limit: Option) -> Vec { 63 | let mut slice = self 64 | .log 65 | .read_slice(offset, limit) 66 | .await 67 | .unwrap() 68 | .messages() 69 | .unwrap(); 70 | 71 | let mut messages = vec![]; 72 | 73 | while let Ok(Some(message)) = slice.next().await { 74 | let message = String::from_utf8(message.records().to_vec()).unwrap(); 75 | messages.push(message); 76 | } 77 | 78 | messages 79 | } 80 | 81 | pub async fn flush(&mut self) { 82 | self.log.flush().await.unwrap(); 83 | } 84 | 85 | async fn write(&mut self, message: &str) { 86 | let batch = Bytes::from(message.to_owned()); 87 | let message = Message::single(&batch, 1); 88 | self.log.write(message).await.unwrap(); 89 | } 90 | } 91 | -------------------------------------------------------------------------------- /standard/src/compression/deflate/comp.rs: -------------------------------------------------------------------------------- 1 | use super::types::DeflateLibrary; 2 | use crate::traits::compression::{Compress, CompressionLevel}; 3 | use anyhow::Result; 4 | use bytes::Bytes; 5 | use flate2::write::{GzEncoder, ZlibEncoder}; 6 | use flate2::Compression; 7 | use std::io::Write; 8 | 9 | /// Compression half of DEFLATE implementation. 10 | /// 11 | /// `DeflateComp` implements [Compress], and can be constructed for use with a `Publisher` stream. 12 | #[derive(Default)] 13 | pub struct DeflateComp { 14 | library: DeflateLibrary, 15 | level: Compression, 16 | } 17 | 18 | impl DeflateComp { 19 | /// Constructs a new `DeflateComp` instance, using the provided [DeflateLibrary] variant. This 20 | /// constructor is used directly by the `gzip` and `zlib` associated functions, so it is recommended to use 21 | /// either of those to construct an instance. 22 | pub fn new(library: DeflateLibrary) -> Self { 23 | Self { 24 | library, 25 | level: Compression::default(), 26 | } 27 | } 28 | 29 | /// Constructs a new `DeflateComp` instance, using `gzip`as the preferred implementation of the DEFLATE 30 | /// algorithm. 31 | pub fn gzip() -> Self { 32 | DeflateComp::new(DeflateLibrary::Gzip) 33 | } 34 | 35 | /// Constructs a new `DeflateComp` instance, using `zlib` as the preferred implementation of the DEFLATE 36 | /// algorithm. 37 | pub fn zlib() -> Self { 38 | DeflateComp::new(DeflateLibrary::Zlib) 39 | } 40 | } 41 | 42 | impl CompressionLevel for DeflateComp { 43 | fn highest_ratio(mut self) -> Self { 44 | self.level = Compression::best(); 45 | self 46 | } 47 | 48 | fn balanced(mut self) -> Self { 49 | self.level = Compression::default(); 50 | self 51 | } 52 | 53 | fn fastest(mut self) -> Self { 54 | self.level = Compression::fast(); 55 | self 56 | } 57 | 58 | fn level(mut self, level: u32) -> Self { 59 | self.level = Compression::new(level); 60 | self 61 | } 62 | } 63 | 64 | impl Compress for DeflateComp { 65 | fn compress(&self, input: Bytes) -> Result { 66 | let bytes = match self.library { 67 | DeflateLibrary::Gzip => { 68 | let mut encoder = GzEncoder::new(vec![], self.level); 69 | encoder.write_all(&input)?; 70 | encoder.finish()? 71 | } 72 | DeflateLibrary::Zlib => { 73 | let mut encoder = ZlibEncoder::new(vec![], self.level); 74 | encoder.write_all(&input)?; 75 | encoder.finish()? 76 | } 77 | }; 78 | 79 | Ok(bytes.into()) 80 | } 81 | } 82 | -------------------------------------------------------------------------------- /standard/src/codecs/bincode_codec.rs: -------------------------------------------------------------------------------- 1 | use std::marker::PhantomData; 2 | 3 | use crate::traits::codec::{MessageDecoder, MessageEncoder}; 4 | use anyhow::Result; 5 | use bytes::{Buf, Bytes, BytesMut}; 6 | use serde::{de::DeserializeOwned, Serialize}; 7 | 8 | /// A basic codec that uses [bincode] to serialize and deserialize 9 | /// binary message payloads. 10 | #[derive(Debug, Clone)] 11 | pub struct BincodeCodec { 12 | _marker: PhantomData, 13 | } 14 | 15 | impl Default for BincodeCodec { 16 | fn default() -> Self { 17 | Self { 18 | _marker: PhantomData, 19 | } 20 | } 21 | } 22 | 23 | /// Encodes any `Item` implementing [Serialize](serde::Serialize) into a binary format via 24 | /// [bincode]. 25 | /// 26 | /// # Errors 27 | /// 28 | /// Returns [Err] if `item` fails to serialize. 29 | impl MessageEncoder for BincodeCodec { 30 | type Item = Item; 31 | 32 | fn encode(&self, item: Self::Item) -> Result { 33 | Ok(bincode::serialize(&item)?.into()) 34 | } 35 | } 36 | 37 | /// Decodes a [BytesMut](bytes::BytesMut) payload into any `Item` implementing 38 | /// [DeserializeOwned](serde::de::DeserializeOwned). 39 | /// 40 | /// # Errors 41 | /// 42 | /// Returns [Err] if the [BytesMut](bytes::BytesMut) payload fails to deserialize into `Item`. 43 | impl MessageDecoder for BincodeCodec { 44 | type Item = Item; 45 | 46 | fn decode(&self, buffer: &mut BytesMut) -> Result { 47 | Ok(bincode::deserialize_from(buffer.reader())?) 48 | } 49 | } 50 | 51 | #[cfg(test)] 52 | mod tests { 53 | use super::*; 54 | use serde::{Deserialize, Serialize}; 55 | 56 | #[derive(Debug, PartialEq, Serialize, Deserialize)] 57 | struct Dummy { 58 | foo: String, 59 | bar: u64, 60 | } 61 | 62 | #[test] 63 | fn encodes_to_bincode_bytes() { 64 | let input = Dummy { 65 | foo: "foo".to_owned(), 66 | bar: 42, 67 | }; 68 | 69 | let codec = BincodeCodec::default(); 70 | let bytes = codec.encode(&input).unwrap(); 71 | let expected = Bytes::from("\x03\0\0\0\0\0\0\0foo*\0\0\0\0\0\0\0"); 72 | 73 | assert_eq!(expected, bytes); 74 | } 75 | 76 | #[test] 77 | fn decodes_bincode_bytes() { 78 | let mut buffer = BytesMut::from("\x03\0\0\0\0\0\0\0foo*\0\0\0\0\0\0\0"); 79 | let decoder = BincodeCodec::::default(); 80 | 81 | let expected = Dummy { 82 | foo: "foo".to_owned(), 83 | bar: 42, 84 | }; 85 | 86 | let decoded = decoder.decode(&mut buffer).unwrap(); 87 | 88 | assert_eq!(decoded, expected); 89 | } 90 | } 91 | -------------------------------------------------------------------------------- /client/src/streams/builder.rs: -------------------------------------------------------------------------------- 1 | use crate::{constants::RETENTION_POLICY_DEFAULT, traits::TryIntoU64, Client}; 2 | use selium_protocol::{Offset, Operation}; 3 | use selium_std::errors::Result; 4 | 5 | /// A convenient builder struct used to build a `Selium` stream, such as a 6 | /// [Pub/Sub](crate::streams::pubsub) or [Request/Reply](crate::streams::request_reply) stream. 7 | /// 8 | /// Similar to the [ClientBuilder](crate::ClientBuilder) struct, the [StreamBuilder] struct uses a 9 | /// type-level Finite State Machine to assure that any stream instance cannot be constructed with an 10 | /// invalid state. Using a [Publisher](crate::streams::pubsub::Publisher) stream as an example, the `open` 11 | /// method will not be in-scope unless the [StreamBuilder](crate::StreamBuilder) is in a pre-open state, which 12 | /// requires the stream to be configured, and a decoder to have been specified. 13 | /// 14 | /// **NOTE:** The [StreamBuilder] type is not intended to be used directly, but rather, is 15 | /// constructed via any of the methods on a [Client](crate::Client) instance. For example, the 16 | /// [subscriber](crate::Client::subscriber), [publisher](crate::Client::publisher), 17 | /// [requestor](crate::Client::requestor) and [replier](crate::Client::replier) methods will 18 | /// construct each respective StreamBuilder. 19 | pub struct StreamBuilder { 20 | pub(crate) state: T, 21 | pub(crate) client: Client, 22 | } 23 | 24 | impl StreamBuilder { 25 | pub fn new(client: Client, state: T) -> Self { 26 | Self { state, client } 27 | } 28 | } 29 | 30 | #[doc(hidden)] 31 | #[derive(Debug)] 32 | pub struct PubSubCommon { 33 | pub(crate) topic: String, 34 | pub(crate) retention_policy: u64, 35 | pub(crate) offset: Offset, 36 | pub(crate) operations: Vec, 37 | } 38 | 39 | impl PubSubCommon { 40 | pub fn new(topic: &str) -> Self { 41 | Self { 42 | topic: topic.to_owned(), 43 | retention_policy: RETENTION_POLICY_DEFAULT, 44 | offset: Offset::default(), 45 | operations: Vec::new(), 46 | } 47 | } 48 | 49 | #[doc(hidden)] 50 | pub fn map(&mut self, module_path: &str) { 51 | self.operations.push(Operation::Map(module_path.into())); 52 | } 53 | 54 | #[doc(hidden)] 55 | pub fn filter(&mut self, module_path: &str) { 56 | self.operations.push(Operation::Filter(module_path.into())); 57 | } 58 | 59 | #[doc(hidden)] 60 | pub fn retain(&mut self, policy: T) -> Result<()> { 61 | self.retention_policy = policy.try_into_u64()?; 62 | Ok(()) 63 | } 64 | 65 | #[doc(hidden)] 66 | pub fn seek(&mut self, offset: Offset) { 67 | self.offset = offset; 68 | } 69 | } 70 | -------------------------------------------------------------------------------- /client/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "selium" 3 | version = "0.5.0" 4 | description = """ 5 | An extremely developer friendly, composable messaging platform with zero build 6 | time configuration. 7 | """ 8 | include = ["src/**/*", "ca.debug.der", "ca.prod.der"] 9 | edition.workspace = true 10 | authors.workspace = true 11 | license.workspace = true 12 | homepage.workspace = true 13 | repository.workspace = true 14 | readme.workspace = true 15 | keywords.workspace = true 16 | categories.workspace = true 17 | 18 | [dependencies] 19 | async-trait = "0.1" 20 | bytes = "1.5" 21 | chrono = { version = "0.4", optional = true, default-features = false, features = [ 22 | "clock", 23 | ] } 24 | futures = "0.3" 25 | quinn = "0.10" 26 | rustls = "0.21" 27 | rustls-pemfile = "1.0" 28 | selium-protocol = { version = "0.4", path = "../protocol" } 29 | selium-std = { version = "0.2", path = "../standard" } 30 | tokio = { version = "1.34", features = ["full"] } 31 | tracing = "0.1" 32 | 33 | [dev-dependencies] 34 | anyhow = "1.0" 35 | fake = "2.9" 36 | rand = "0.8" 37 | serde = "1.0" 38 | tracing-subscriber = "0.3" 39 | 40 | [features] 41 | chrono = ["dep:chrono"] 42 | std-compression = ["selium-std/compression"] 43 | std-codec = ["selium-std/codec"] 44 | std = ["std-compression", "std-codec"] 45 | __notopiccheck = ["selium-protocol/__notopiccheck"] 46 | 47 | [[example]] 48 | name = "publish" 49 | path = "examples/publish.rs" 50 | required-features = ["std-codec"] 51 | 52 | [[example]] 53 | name = "subscribe" 54 | path = "examples/subscribe.rs" 55 | required-features = ["std-codec"] 56 | 57 | [[example]] 58 | name = "publish_multitasking" 59 | path = "examples/publish_multitasking.rs" 60 | required-features = ["std-codec"] 61 | 62 | [[example]] 63 | name = "publish_bincode" 64 | path = "examples/publish_bincode.rs" 65 | required-features = ["std-codec"] 66 | 67 | [[example]] 68 | name = "subscribe_bincode" 69 | path = "examples/subscribe_bincode.rs" 70 | required-features = ["std-codec"] 71 | 72 | [[example]] 73 | name = "publish_chrono" 74 | path = "examples/publish_chrono.rs" 75 | required-features = ["std-codec", "chrono"] 76 | 77 | [[example]] 78 | name = "compression" 79 | path = "examples/compression.rs" 80 | required-features = ["std-codec", "std-compression"] 81 | 82 | [[example]] 83 | name = "decompression" 84 | path = "examples/decompression.rs" 85 | required-features = ["std-codec", "std-compression"] 86 | 87 | [[example]] 88 | name = "batching_and_compression" 89 | path = "examples/batching_and_compression.rs" 90 | required-features = ["std-compression"] 91 | 92 | [[example]] 93 | name = "request" 94 | path = "examples/request.rs" 95 | required-features = ["std-codec"] 96 | 97 | [[example]] 98 | name = "reply" 99 | path = "examples/reply.rs" 100 | required-features = ["std-codec"] 101 | -------------------------------------------------------------------------------- /server/src/sink/filter.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | pin::Pin, 3 | task::{Context, Poll}, 4 | }; 5 | 6 | use futures::{ready, Future, Sink}; 7 | use pin_project_lite::pin_project; 8 | 9 | pin_project! { 10 | #[must_use = "sinks do nothing unless polled"] 11 | pub struct Filter 12 | where Si: Sink, 13 | { 14 | #[pin] 15 | sink: Si, 16 | f: F, 17 | #[pin] 18 | pending_fut: Option, 19 | pending_item: Option, 20 | } 21 | } 22 | 23 | impl Filter 24 | where 25 | Si: Sink, 26 | F: FnMut(&Item) -> Fut, 27 | Fut: Future, 28 | { 29 | pub(super) fn new(sink: Si, f: F) -> Self { 30 | Self { 31 | sink, 32 | f, 33 | pending_fut: None, 34 | pending_item: None, 35 | } 36 | } 37 | } 38 | 39 | impl Filter 40 | where 41 | Si: Sink, 42 | F: FnMut(&Item) -> Fut, 43 | Fut: Future, 44 | { 45 | // Completes the processing of previous item if any 46 | fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { 47 | let mut this = self.project(); 48 | 49 | if let Some(fut) = this.pending_fut.as_mut().as_pin_mut() { 50 | let res = ready!(fut.poll(cx)); 51 | this.pending_fut.set(None); 52 | if res { 53 | this.sink.start_send(this.pending_item.take().unwrap())?; 54 | } 55 | *this.pending_item = None; 56 | } 57 | 58 | Poll::Ready(Ok(())) 59 | } 60 | } 61 | 62 | impl Sink for Filter 63 | where 64 | Si: Sink, 65 | F: FnMut(&Item) -> Fut, 66 | Fut: Future, 67 | { 68 | type Error = Si::Error; 69 | 70 | fn poll_ready(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { 71 | ready!(self.as_mut().poll(cx))?; 72 | ready!(self.project().sink.poll_ready(cx)?); 73 | Poll::Ready(Ok(())) 74 | } 75 | 76 | fn start_send(self: Pin<&mut Self>, item: Item) -> Result<(), Self::Error> { 77 | let mut this = self.project(); 78 | 79 | this.pending_fut.set(Some((this.f)(&item))); 80 | *this.pending_item = Some(item); 81 | 82 | Ok(()) 83 | } 84 | 85 | fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { 86 | ready!(self.as_mut().poll(cx))?; 87 | ready!(self.project().sink.poll_flush(cx)?); 88 | Poll::Ready(Ok(())) 89 | } 90 | 91 | fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { 92 | ready!(self.as_mut().poll(cx))?; 93 | ready!(self.project().sink.poll_close(cx)?); 94 | Poll::Ready(Ok(())) 95 | } 96 | } 97 | -------------------------------------------------------------------------------- /client/src/crypto/cert.rs: -------------------------------------------------------------------------------- 1 | use rustls::{Certificate, PrivateKey, RootCertStore}; 2 | use rustls_pemfile::{certs, pkcs8_private_keys, rsa_private_keys}; 3 | use selium_std::errors::{CryptoError, Result}; 4 | use std::{fs, path::Path}; 5 | 6 | pub type KeyPair = (Vec, PrivateKey); 7 | 8 | fn load_key>(path: T) -> Result { 9 | let path = path.as_ref(); 10 | let key = fs::read(path).map_err(CryptoError::OpenKeyFileError)?; 11 | let key = if path.extension().map_or(false, |x| x == "der") { 12 | PrivateKey(key) 13 | } else { 14 | let pkcs8 = 15 | pkcs8_private_keys(&mut &*key).map_err(CryptoError::MalformedPKCS8PrivateKey)?; 16 | 17 | match pkcs8.into_iter().next() { 18 | Some(x) => PrivateKey(x), 19 | None => { 20 | let rsa = 21 | rsa_private_keys(&mut &*key).map_err(CryptoError::MalformedPKCS1PrivateKey)?; 22 | 23 | rsa.into_iter() 24 | .next() 25 | .map(PrivateKey) 26 | .ok_or(CryptoError::NoPrivateKeysFound)? 27 | } 28 | } 29 | }; 30 | 31 | Ok(key) 32 | } 33 | 34 | pub fn load_certs>(path: T) -> Result> { 35 | let path = path.as_ref(); 36 | let cert_chain = fs::read(path).map_err(CryptoError::OpenCertFileError)?; 37 | let cert_chain = if path.extension().map_or(false, |x| x == "der") { 38 | vec![Certificate(cert_chain)] 39 | } else { 40 | certs(&mut &*cert_chain) 41 | .map_err(CryptoError::InvalidPemCertificate)? 42 | .into_iter() 43 | .map(Certificate) 44 | .collect() 45 | }; 46 | 47 | Ok(cert_chain) 48 | } 49 | 50 | /// Creates and returns a RootCertStore via certificates parsed from the provided 51 | /// filepath pointing to a Certificate Authority file. 52 | /// 53 | /// This function will fail if no certificates can be successfully parsed from 54 | /// the CA input file. 55 | /// 56 | /// # Arguments 57 | /// 58 | /// * `ca_file` - The filepath to the CA file. 59 | /// 60 | pub(crate) fn load_root_store(certs: &[Certificate]) -> Result { 61 | let mut store = RootCertStore::empty(); 62 | store.add_parsable_certificates(certs); 63 | 64 | if store.is_empty() { 65 | return Err(CryptoError::InvalidRootCert.into()); 66 | } 67 | 68 | Ok(store) 69 | } 70 | 71 | /// Extracts a public/private key pair from the provided filepaths. 72 | /// 73 | /// This function will fail if no valid certificates or private key can be 74 | /// successfully parsed from the input files. 75 | /// 76 | /// # Arguments 77 | /// 78 | /// * `cert_file` - The filepath to the certificate file. 79 | /// * `key_file` - The filepath to the private key file. 80 | /// 81 | pub fn load_keypair>(cert_file: T, key_file: T) -> Result { 82 | let certs = load_certs(cert_file)?; 83 | let private_key = load_key(key_file)?; 84 | Ok((certs, private_key)) 85 | } 86 | -------------------------------------------------------------------------------- /log/src/config/mod.rs: -------------------------------------------------------------------------------- 1 | //! Configuration settings for an individual message log. 2 | 3 | mod flush_policy; 4 | 5 | pub use flush_policy::FlushPolicy; 6 | use std::{ 7 | path::{Path, PathBuf}, 8 | sync::Arc, 9 | time::Duration, 10 | }; 11 | 12 | /// The default maximum entries for log segment. 13 | pub const MAX_INDEX_ENTRIES_DEFAULT: u32 = 100_000; 14 | 15 | /// The default log retention period. 16 | pub const RETENTION_PERIOD_DEFAULT: Duration = Duration::from_secs(60 * 60 * 24 * 7); 17 | 18 | /// The default Cleaner task interval. 19 | pub const CLEANER_INTERVAL_DEFAULT: Duration = Duration::from_secs(60 * 5); 20 | 21 | pub type SharedLogConfig = Arc; 22 | 23 | /// The LogConfig struct groups preferences for the log's behaviour, such as settings related to 24 | /// message retention, frequency of flushes, etc, and is shared across each component of the log. 25 | #[derive(Debug, Clone)] 26 | pub struct LogConfig { 27 | /// Indicates the maximum amount of entries a segment index will retain before a new segment 28 | /// is created. 29 | pub max_index_entries: u32, 30 | /// The path to the directory containing the segment index/data files. 31 | pub segments_path: PathBuf, 32 | /// The retention period for each individual segment. Determines when a segment is stale/expired, 33 | /// and can be cleaned up by the cleaner task. 34 | pub retention_period: Duration, 35 | /// The desired interval to poll the cleaner task to discover stale/expired segments. 36 | pub cleaner_interval: Duration, 37 | /// The desired flush policy for the log. The flush policy dictates the frequency of flushing based 38 | /// on the number of writes, and/or a defined interval. 39 | pub flush_policy: FlushPolicy, 40 | } 41 | 42 | impl LogConfig { 43 | /// Creates a new LogConfig builder with the provided segments path. All remaining fields are assigned 44 | /// reasonable defaults until overrided. 45 | pub fn from_path(path: impl AsRef) -> Self { 46 | Self { 47 | max_index_entries: MAX_INDEX_ENTRIES_DEFAULT, 48 | segments_path: path.as_ref().to_owned(), 49 | retention_period: RETENTION_PERIOD_DEFAULT, 50 | cleaner_interval: CLEANER_INTERVAL_DEFAULT, 51 | flush_policy: FlushPolicy::default(), 52 | } 53 | } 54 | 55 | /// Overrides the default `max_index_entries` field. 56 | pub fn max_index_entries(mut self, max_entries: u32) -> Self { 57 | self.max_index_entries = max_entries; 58 | self 59 | } 60 | 61 | /// Overrides the default `retention_period` field. 62 | pub fn retention_period(mut self, period: Duration) -> Self { 63 | self.retention_period = period; 64 | self 65 | } 66 | 67 | /// Overrides the default `cleaner_interval` field. 68 | pub fn cleaner_interval(mut self, interval: Duration) -> Self { 69 | self.cleaner_interval = interval; 70 | self 71 | } 72 | 73 | /// Overrides the default `flush_policy` field. 74 | pub fn flush_policy(mut self, policy: FlushPolicy) -> Self { 75 | self.flush_policy = policy; 76 | self 77 | } 78 | } 79 | -------------------------------------------------------------------------------- /standard/src/compression/brotli/comp.rs: -------------------------------------------------------------------------------- 1 | use crate::traits::compression::{Compress, CompressionLevel}; 2 | use anyhow::Result; 3 | use brotli::enc::backward_references::BrotliEncoderMode; 4 | use brotli::enc::writer::CompressorWriter; 5 | use brotli::enc::BrotliEncoderParams; 6 | use bytes::Bytes; 7 | use std::io::Write; 8 | 9 | /// Highest compression level available for Brotli. 10 | pub const HIGHEST_COMPRESSION: i32 = 11; 11 | 12 | /// Recommended compression level for Brotli. 13 | pub const RECOMMENDED_COMPRESSION: i32 = 6; 14 | 15 | /// Fastest compression level available for Brotli. 16 | pub const FASTEST_COMPRESSION: i32 = 1; 17 | 18 | const BUFFER_SIZE: usize = 4096; 19 | 20 | /// Compression half of Brotli implementation. 21 | /// 22 | /// `BrotliComp` implements [Compress], and can be constructed for use with a `Publisher` 23 | /// stream. 24 | #[derive(Debug, Default)] 25 | pub struct BrotliComp { 26 | params: BrotliEncoderParams, 27 | } 28 | 29 | impl BrotliComp { 30 | /// Constructs a new `BrotliComp` instance, using the provided mode to use when encoding 31 | /// a sequence with Brotli. See 32 | /// [BrotliEncoderMode](brotli::enc::backward_references::BrotliEncoderMode) for more 33 | /// information. 34 | pub fn new(mode: BrotliEncoderMode) -> Self { 35 | let params = BrotliEncoderParams { 36 | mode, 37 | ..Default::default() 38 | }; 39 | 40 | Self { params } 41 | } 42 | 43 | /// Constructs a new instance with the 44 | /// [BROTLI_MODE_GENERIC](brotli::enc::backward_references::BrotliEncoderMode::BROTLI_MODE_GENERIC) 45 | /// encoding mode. 46 | pub fn generic() -> Self { 47 | BrotliComp::new(BrotliEncoderMode::BROTLI_MODE_GENERIC) 48 | } 49 | 50 | /// Constructs a new instance with the 51 | /// [BROTLI_MODE_TEXT](brotli::enc::backward_references::BrotliEncoderMode::BROTLI_MODE_TEXT) 52 | /// encoding mode. 53 | pub fn text() -> Self { 54 | BrotliComp::new(BrotliEncoderMode::BROTLI_MODE_TEXT) 55 | } 56 | 57 | /// Constructs a new instance with the 58 | /// [BROTLI_MODE_FONT](brotli::enc::backward_references::BrotliEncoderMode::BROTLI_MODE_FONT) 59 | /// encoding mode. 60 | pub fn font() -> Self { 61 | BrotliComp::new(BrotliEncoderMode::BROTLI_MODE_FONT) 62 | } 63 | } 64 | 65 | impl CompressionLevel for BrotliComp { 66 | fn highest_ratio(mut self) -> Self { 67 | self.params.quality = HIGHEST_COMPRESSION; 68 | self 69 | } 70 | 71 | fn balanced(mut self) -> Self { 72 | self.params.quality = RECOMMENDED_COMPRESSION; 73 | self 74 | } 75 | 76 | fn fastest(mut self) -> Self { 77 | self.params.quality = FASTEST_COMPRESSION; 78 | self 79 | } 80 | 81 | fn level(mut self, level: u32) -> Self { 82 | self.params.quality = level.try_into().unwrap(); 83 | self 84 | } 85 | } 86 | 87 | impl Compress for BrotliComp { 88 | fn compress(&self, input: Bytes) -> Result { 89 | let mut encoder = CompressorWriter::with_params(vec![], BUFFER_SIZE, &self.params); 90 | encoder.write_all(&input)?; 91 | encoder.flush()?; 92 | 93 | Ok(encoder.into_inner().into()) 94 | } 95 | } 96 | -------------------------------------------------------------------------------- /server/README.md: -------------------------------------------------------------------------------- 1 | # Selium Server 2 | 3 | This is the server-side binary for the Selium platform. It takes a very minimal set of 4 | configuration items listed under "Usage" below, however it is recommended that you use 5 | default parameters unless you have a specific reason not to. 6 | 7 | ## Getting Started 8 | 9 | The Selium server binary can be downloaded from 10 | [GitHub](https://github.com/orgs/seliumlabs/packages?repo_name=selium), or compiled from 11 | [crates.io](https://crates.io/crates/selium-server). Once you have installed Selium 12 | Server, its usage is very straightforward. 13 | 14 | #### Start a test server 15 | 16 | To start a server as quickly as possible **_for testing_**, you can use generate a set of self-signed 17 | certificates with the `selium-tools` CLI. 18 | 19 | >Note that using this option in production will leave clients exposed to 20 | person-in-the-middle attacks! 21 | 22 | ```bash 23 | $ cargo install selium-tools 24 | $ cargo run --bin selium-tools gen-certs 25 | ``` 26 | 27 | Then you can start a selium-server! 28 | 29 | ```bash 30 | $ selium-server --bind-addr=127.0.0.1:7001 31 | ``` 32 | 33 | Running this command will create a new server instance that listens on the loopback 34 | interface, port 7001. 35 | 36 | #### Start a production-ready server 37 | 38 | For production use, you will likely want to generate a signed certificate keypair. Using 39 | your own certificate authority for this process is perfectly acceptable, provided that 40 | you are able to distribute this certificate to all potential clients in advance. For 41 | instances where this is not possible, a third party CA provider like 42 | [LetsEncrypt](https://letsencrypt.org) can help. 43 | 44 | Here is a minimal example to start a production-ready instance of `selium-server`: 45 | 46 | ```bash 47 | $ selium-server --bind-addr=0.0.0.0:7001 --ca /path/to/ca.der --key /path/to/key.der --cert /path/to/cert.der 48 | ``` 49 | 50 | Running this command will create a new server instance that listens on the public 51 | interface, port 7001. 52 | 53 | #### Debugging 54 | 55 | `selium-server` can output comprehensive logs to help you debug issues with your Selium 56 | instance. To enable, use the `-v` (verbose) flag: 57 | 58 | ```bash 59 | $ selium-server ... -v # Display warnings 60 | $ selium-server ... -vv # Display info 61 | $ selium-server ... -vvv # Display debug 62 | $ selium-server ... -vvvv # Display trace 63 | ``` 64 | 65 | ## Usage 66 | 67 | ``` 68 | Usage: selium-server [OPTIONS] --bind-addr <--key |--cert |--self-signed> 69 | 70 | Options: 71 | -a, --bind-addr 72 | Address to bind this server to 73 | --ca 74 | TLS CA certificate 75 | -k, --key 76 | TLS private key 77 | -c, --cert 78 | TLS certificate 79 | --stateless-retry 80 | Enable stateless retries 81 | --keylog 82 | File to log TLS keys to for debugging 83 | --max-idle-timeout 84 | Maximum time in ms a client can idle waiting for data - default to 15 seconds [default: 15000] 85 | -v, --verbose... 86 | More output per occurrence 87 | -q, --quiet... 88 | Less output per occurrence 89 | -h, --help 90 | Print help 91 | -V, --version 92 | Print version 93 | ``` 94 | -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | name: Create Release 2 | 3 | on: 4 | push: 5 | branches: 6 | - release 7 | 8 | jobs: 9 | release: 10 | name: Create GitHub Release 11 | strategy: 12 | max-parallel: 1 13 | matrix: 14 | name: [standard, protocol, client, server, tools, log] 15 | include: 16 | - name: standard 17 | display: Standard 18 | repo: selium-std 19 | - name: protocol 20 | display: protocol 21 | repo: selium-protocol 22 | - name: client 23 | display: Client 24 | repo: selium 25 | - name: server 26 | display: Server 27 | repo: selium-server 28 | - name: tools 29 | display: Tools 30 | repo: selium-tools 31 | - name: log 32 | display: Log 33 | repo: selium-log 34 | 35 | runs-on: ubuntu-latest 36 | steps: 37 | - name: Checkout code 38 | uses: actions/checkout@v3 39 | with: 40 | fetch-depth: 0 41 | 42 | - name: Get latest tag 43 | id: latesttag 44 | uses: "WyriHaximus/github-action-get-previous-tag@v1" 45 | with: 46 | fallback: ${{ matrix.name }}-0.0.0 47 | prefix: ${{ matrix.name }}- 48 | 49 | - name: Get crate version 50 | id: version 51 | run: echo value=$(sed -ne 's/^version = "\(.*\)"/\1/p' < ${{ matrix.name }}/Cargo.toml) >> $GITHUB_OUTPUT 52 | 53 | - name: Get CHANGELOG entry 54 | id: changelog 55 | run: | 56 | body=$(python ci/changelog.py ${{ matrix.name }} ${{ steps.version.outputs.value }}) 57 | echo body=${body//$'\n'/'%0A'} >> $GITHUB_OUTPUT 58 | if: ${{ steps.latesttag.outputs.tag != format('{0}-{1}', matrix.name, steps.version.outputs.value) }} 59 | 60 | - name: Tag branch 61 | uses: rickstaa/action-create-tag@v1 62 | with: 63 | tag: ${{ format('{0}-{1}', matrix.name, steps.version.outputs.value) }} 64 | message: ${{ format('{0}-{1}', matrix.name, steps.version.outputs.value) }} 65 | if: ${{ steps.latesttag.outputs.tag != format('{0}-{1}', matrix.name, steps.version.outputs.value) }} 66 | 67 | - name: Create release 68 | uses: softprops/action-gh-release@v1 69 | env: 70 | GITHUB_TOKEN: ${{ secrets.RELEASE_PAT }} 71 | with: 72 | tag_name: ${{ format('{0}-{1}', matrix.name, steps.version.outputs.value) }} 73 | name: ${{ matrix.display }} Release ${{ steps.version.outputs.value }} 74 | body: ${{ steps.changelog.outputs.body }} 75 | draft: false 76 | prerelease: false 77 | if: ${{ steps.latesttag.outputs.tag != format('{0}-{1}', matrix.name, steps.version.outputs.value) }} 78 | 79 | - name: Login to Crates.io 80 | uses: actions-rs/cargo@v1 81 | with: 82 | command: login 83 | args: ${{ secrets.CRATES_TOKEN }} 84 | if: ${{ steps.latesttag.outputs.tag != format('{0}-{1}', matrix.name, steps.version.outputs.value) }} 85 | 86 | - name: Cargo publish 87 | uses: actions-rs/cargo@v1 88 | with: 89 | command: publish 90 | args: -p ${{ matrix.repo }} 91 | if: ${{ steps.latesttag.outputs.tag != format('{0}-{1}', matrix.name, steps.version.outputs.value) }} 92 | -------------------------------------------------------------------------------- /client/src/connection.rs: -------------------------------------------------------------------------------- 1 | use crate::utils::net::get_socket_addrs; 2 | use quinn::{ClientConfig, Connection, Endpoint, TransportConfig}; 3 | use rustls::{Certificate, PrivateKey, RootCertStore}; 4 | use selium_std::errors::{ParseEndpointAddressError, QuicError, Result}; 5 | use std::sync::Arc; 6 | use std::{net::SocketAddr, time::Duration}; 7 | use tokio::sync::Mutex; 8 | 9 | const ALPN_QUIC_HTTP: &[&[u8]] = &[b"hq-29"]; 10 | const ENDPOINT_ADDRESS: &str = "[::]:0"; 11 | 12 | pub type SharedConnection = Arc>; 13 | 14 | #[derive(Debug, Clone)] 15 | pub struct ConnectionOptions { 16 | certs: Vec, 17 | key: PrivateKey, 18 | root_store: RootCertStore, 19 | keep_alive: u64, 20 | } 21 | 22 | impl ConnectionOptions { 23 | pub fn new( 24 | certs: &[Certificate], 25 | key: PrivateKey, 26 | root_store: RootCertStore, 27 | keep_alive: u64, 28 | ) -> Self { 29 | Self { 30 | certs: certs.to_vec(), 31 | key, 32 | root_store, 33 | keep_alive, 34 | } 35 | } 36 | } 37 | 38 | #[derive(Debug, Clone)] 39 | pub struct ClientConnection { 40 | addr: SocketAddr, 41 | connection: Connection, 42 | client_config: ClientConfig, 43 | } 44 | 45 | impl ClientConnection { 46 | pub async fn connect(addr: &str, options: ConnectionOptions) -> Result { 47 | let client_config = configure_client(options); 48 | let addr = get_socket_addrs(addr)?; 49 | let connection = connect_to_endpoint(addr, client_config.clone()).await?; 50 | 51 | Ok(Self { 52 | addr, 53 | connection, 54 | client_config, 55 | }) 56 | } 57 | 58 | pub fn conn(&self) -> &Connection { 59 | &self.connection 60 | } 61 | 62 | pub async fn reconnect(&mut self) -> Result<()> { 63 | if self.connection.close_reason().is_some() { 64 | let connection = connect_to_endpoint(self.addr, self.client_config.clone()).await?; 65 | self.connection = connection; 66 | } 67 | 68 | Ok(()) 69 | } 70 | } 71 | 72 | fn configure_client(options: ConnectionOptions) -> ClientConfig { 73 | let mut crypto = rustls::ClientConfig::builder() 74 | .with_safe_defaults() 75 | .with_root_certificates(options.root_store) 76 | .with_client_auth_cert(options.certs, options.key) 77 | .unwrap(); 78 | 79 | crypto.alpn_protocols = ALPN_QUIC_HTTP.iter().map(|&x| x.into()).collect(); 80 | 81 | let mut config = ClientConfig::new(Arc::new(crypto)); 82 | let mut transport_config = TransportConfig::default(); 83 | let keep_alive = Duration::from_millis(options.keep_alive); 84 | 85 | transport_config.keep_alive_interval(Some(keep_alive)); 86 | config.transport_config(Arc::new(transport_config)); 87 | 88 | config 89 | } 90 | 91 | async fn connect_to_endpoint(addr: SocketAddr, config: ClientConfig) -> Result { 92 | let endpoint_addr = ENDPOINT_ADDRESS 93 | .parse::() 94 | .map_err(ParseEndpointAddressError::InvalidAddress)?; 95 | 96 | let mut endpoint = Endpoint::client(endpoint_addr)?; 97 | endpoint.set_default_client_config(config); 98 | let connection = endpoint 99 | .connect(addr, "localhost") 100 | .map_err(QuicError::ConnectError)? 101 | .await 102 | .map_err(QuicError::ConnectionError)?; 103 | 104 | Ok(connection) 105 | } 106 | -------------------------------------------------------------------------------- /client/src/batching/batch_config.rs: -------------------------------------------------------------------------------- 1 | use std::time::Duration; 2 | 3 | /// Configuration type used for tuning the [message batching](crate::batching) algorithm parameters. 4 | /// 5 | /// The `BatchConfig` type includes high-level associated functions for constructing preset batching 6 | /// configurations, which suit most use cases. 7 | /// 8 | /// ``` 9 | /// use selium::batching::BatchConfig; 10 | /// use std::time::Duration; 11 | /// 12 | /// let high_throughput = BatchConfig::high_throughput(); 13 | /// let balanced = BatchConfig::balanced(); 14 | /// let minimal = BatchConfig::minimal_payload(); 15 | /// ``` 16 | /// 17 | /// However, if the presets are not to your liking, you can use the standard `new` constructor to 18 | /// specify your own `batch_size` and `interval` properties. 19 | /// 20 | /// ``` 21 | /// use selium::batching::BatchConfig; 22 | /// use std::time::Duration; 23 | /// 24 | /// let custom = BatchConfig::new(1_000, Duration::from_millis(10)); 25 | /// ``` 26 | /// 27 | /// It's also possible to tweak an existing preset via the `BatchConfig::batch_size` and 28 | /// `BatchConfig::interval` methods. 29 | /// 30 | /// ``` 31 | /// use selium::batching::BatchConfig; 32 | /// use std::time::Duration; 33 | /// 34 | /// let tweaked_high_throughput = BatchConfig::high_throughput() 35 | /// .interval(Duration::from_millis(10)); 36 | /// ``` 37 | #[derive(Debug, Clone)] 38 | pub struct BatchConfig { 39 | pub(crate) batch_size: u32, 40 | pub(crate) interval: Duration, 41 | } 42 | 43 | impl Default for BatchConfig { 44 | fn default() -> Self { 45 | Self::balanced() 46 | } 47 | } 48 | 49 | impl BatchConfig { 50 | /// Constructs a new `BatchConfig` instance with the provided `batch_size` and `interval` 51 | /// arguments. 52 | pub fn new(batch_size: u32, interval: Duration) -> Self { 53 | Self { 54 | batch_size, 55 | interval, 56 | } 57 | } 58 | 59 | /// Constructs a new `BatchConfig` instance with a high throughput preset. 60 | /// 61 | /// Typically used when a [Publisher](crate::streams::pubsub::Publisher) sends small messages at a high rate. 62 | pub fn high_throughput() -> Self { 63 | Self::new(250, Duration::from_millis(100)) 64 | } 65 | 66 | /// Constructs a new `BatchConfig` instance with a balanced preset. 67 | /// 68 | /// Typically used to acheive a balance between throughput and payload size. 69 | /// [Publisher](crate::streams::pubsub::Publisher) streams that send mid-sized messages at a high rate 70 | /// will get the most benefit out of this preset. 71 | pub fn balanced() -> Self { 72 | Self::new(100, Duration::from_millis(100)) 73 | } 74 | 75 | /// Constructs a new `BatchConfig` instance with a preset that favours minimal payloads. 76 | /// 77 | /// Typically used when a [Publisher](crate::streams::pubsub::Publisher) sends large messages, and wants to 78 | /// remain mindful of the wire protocol message size limits while still optimizing network 79 | /// calls and compression. 80 | pub fn minimal_payload() -> Self { 81 | Self::new(10, Duration::from_millis(100)) 82 | } 83 | 84 | /// Updates the `batch_size` of a `BatchConfig` instance. Helpful for tweaking existing presets. 85 | pub fn batch_size(mut self, size: u32) -> Self { 86 | self.batch_size = size; 87 | self 88 | } 89 | 90 | /// Updates the `interval` of a `BatchConfig` instance. Helpful for tweaking existing presets. 91 | pub fn interval(mut self, interval: Duration) -> Self { 92 | self.interval = interval; 93 | self 94 | } 95 | } 96 | -------------------------------------------------------------------------------- /client/src/client/mod.rs: -------------------------------------------------------------------------------- 1 | mod builder; 2 | mod cloud; 3 | mod custom; 4 | 5 | use crate::connection::SharedConnection; 6 | use crate::keep_alive::BackoffStrategy; 7 | use crate::pubsub::states::{PublisherWantsEncoder, SubscriberWantsDecoder}; 8 | use crate::request_reply::states::{ReplierWantsRequestDecoder, RequestorWantsRequestEncoder}; 9 | use crate::StreamBuilder; 10 | 11 | pub use builder::*; 12 | pub use cloud::*; 13 | pub use custom::*; 14 | 15 | /// Constructs a Custom [ClientBuilder] in its initial state to prepare to connect to a self-hosted 16 | /// `Selium` server. 17 | /// 18 | /// Prefer invoking this function over explicitly constructing a [ClientBuilder]. 19 | pub fn custom() -> ClientBuilder { 20 | ClientBuilder { 21 | state: CustomWantsEndpoint::default(), 22 | } 23 | } 24 | 25 | /// Constructs a Cloud [ClientBuilder] in its initial state to prepare to connect to `Selium 26 | /// Cloud`. 27 | /// 28 | /// **NOTE:** Selium Cloud is a managed service for Selium, eliminating the need to run and maintain your 29 | /// own Selium Server. If you have registered for an account already, use this endpoint to connect 30 | /// to the Cloud. Otherwise you can create a free account at [selium.com](https://selium.com). 31 | /// 32 | /// Prefer invoking this function over explicitly constructing a [ClientBuilder]. 33 | pub fn cloud() -> ClientBuilder { 34 | ClientBuilder { 35 | state: CloudWantsCertAndKey::default(), 36 | } 37 | } 38 | 39 | /// A client containing an authenticated connection to either `Selium Cloud` or a self-hosted `Selium` 40 | /// server. 41 | /// 42 | /// The [Client] struct is the entry point to opening various `Selium` streams, such as the 43 | /// [Pub/Sub](crate::streams::pubsub) streams and [Request/Reply](crate::streams::request_reply) streams. 44 | /// 45 | /// Multiple streams can be opened from a single connected [Client] without extinguishing the underlying 46 | /// connection, through the use of [QUIC](https://quicwg.org) multiplexing. 47 | /// 48 | /// **NOTE:** The [Client] struct should never be used directly, and is intended to be constructed by a 49 | /// [ClientBuilder], following a successfully established connection to the `Selium` server. 50 | #[derive(Clone)] 51 | pub struct Client { 52 | pub(crate) connection: SharedConnection, 53 | pub(crate) backoff_strategy: BackoffStrategy, 54 | } 55 | 56 | impl Client { 57 | /// Returns a new [StreamBuilder](crate::StreamBuilder) instance, with an initial `Subscriber` 58 | /// state. 59 | pub fn subscriber(&self, topic: &str) -> StreamBuilder { 60 | StreamBuilder::new(self.clone(), SubscriberWantsDecoder::new(topic)) 61 | } 62 | 63 | /// Returns a new [StreamBuilder](crate::StreamBuilder) instance, with an initial `Publisher` 64 | /// state. 65 | pub fn publisher(&self, topic: &str) -> StreamBuilder { 66 | StreamBuilder::new(self.clone(), PublisherWantsEncoder::new(topic)) 67 | } 68 | 69 | /// Returns a new [StreamBuilder](crate::StreamBuilder) instance, with an initial `Replier` 70 | /// state. 71 | pub fn replier(&self, endpoint: &str) -> StreamBuilder { 72 | StreamBuilder::new(self.clone(), ReplierWantsRequestDecoder::new(endpoint)) 73 | } 74 | 75 | /// Returns a new [StreamBuilder](crate::StreamBuilder) instance, with an initial `Requestor` 76 | /// state. 77 | pub fn requestor(&self, endpoint: &str) -> StreamBuilder { 78 | StreamBuilder::new(self.clone(), RequestorWantsRequestEncoder::new(endpoint)) 79 | } 80 | } 81 | -------------------------------------------------------------------------------- /benchmarks/src/runner.rs: -------------------------------------------------------------------------------- 1 | use crate::{args::Args, results::BenchmarkResults}; 2 | use anyhow::Result; 3 | use clap::Parser; 4 | use futures::SinkExt; 5 | use futures::{future::join_all, StreamExt}; 6 | use selium::batching::BatchConfig; 7 | use selium::std::codecs::StringCodec; 8 | use selium::std::compression::lz4::{Lz4Comp, Lz4Decomp}; 9 | use selium::{prelude::*, Client}; 10 | use selium_server::args::UserArgs; 11 | use selium_server::server::Server; 12 | use std::time::Instant; 13 | 14 | const SERVER_ADDR: &str = "127.0.0.1:7001"; 15 | 16 | fn start_server() -> Result<()> { 17 | let args = UserArgs::parse_from([ 18 | "", 19 | "--cert", 20 | "certs/server/localhost.der", 21 | "--key", 22 | "certs/server/localhost.key.der", 23 | "--ca", 24 | "certs/server/ca.der", 25 | "-vvvv", 26 | ]); 27 | 28 | let server = Server::try_from(args)?; 29 | 30 | tokio::spawn(async move { 31 | server.listen().await.expect("Failed to spawn server"); 32 | }); 33 | 34 | Ok(()) 35 | } 36 | 37 | fn generate_message(message_size: usize) -> String { 38 | (0..message_size) 39 | .map(|i| (i % 25 + 97) as u8 as char) 40 | .collect() 41 | } 42 | 43 | pub struct BenchmarkRunner { 44 | connection: Client, 45 | } 46 | 47 | impl BenchmarkRunner { 48 | pub async fn init() -> Result { 49 | start_server()?; 50 | 51 | let connection = selium::custom() 52 | .endpoint(SERVER_ADDR) 53 | .with_certificate_authority("certs/client/ca.der")? 54 | .with_cert_and_key( 55 | "certs/client/localhost.der", 56 | "certs/client/localhost.key.der", 57 | )? 58 | .connect() 59 | .await?; 60 | 61 | Ok(Self { connection }) 62 | } 63 | 64 | pub async fn run(&self, args: Args) -> Result { 65 | let mut tasks = Vec::with_capacity(args.num_of_streams as usize); 66 | let message = generate_message(args.message_size as usize); 67 | let start = Instant::now(); 68 | 69 | let mut subscriber = self 70 | .connection 71 | .subscriber("/acmeco/stocks") 72 | .with_decoder(StringCodec); 73 | 74 | if args.enable_compression { 75 | subscriber = subscriber.with_decompression(Lz4Decomp); 76 | } 77 | 78 | let mut subscriber = subscriber.open().await?; 79 | 80 | for _ in 0..args.num_of_streams { 81 | let mut publisher = self 82 | .connection 83 | .publisher("/acmeco/stocks") 84 | .with_encoder(StringCodec); 85 | 86 | if args.enable_batching { 87 | publisher = publisher.with_batching(BatchConfig::high_throughput()); 88 | } 89 | 90 | if args.enable_compression { 91 | publisher = publisher.with_compression(Lz4Comp); 92 | } 93 | 94 | let mut publisher = publisher.open().await?; 95 | let message = message.clone(); 96 | 97 | let handle = tokio::spawn(async move { 98 | for _ in 0..args.num_of_messages / args.num_of_streams { 99 | publisher.send(message.to_owned()).await.unwrap(); 100 | } 101 | 102 | publisher.finish().await.unwrap(); 103 | }); 104 | 105 | tasks.push(handle); 106 | } 107 | 108 | let handle = tokio::spawn(async move { 109 | for _ in 0..args.num_of_messages { 110 | let _ = subscriber.next().await; 111 | } 112 | }); 113 | 114 | tasks.push(handle); 115 | join_all(tasks).await; 116 | let elapsed = start.elapsed(); 117 | 118 | Ok(BenchmarkResults::calculate(elapsed, args)) 119 | } 120 | } 121 | -------------------------------------------------------------------------------- /client/src/streams/request_reply/states.rs: -------------------------------------------------------------------------------- 1 | use crate::streams::aliases::{Comp, Decomp}; 2 | use std::{pin::Pin, time::Duration}; 3 | 4 | pub const DEFAULT_REQUEST_TIMEOUT: Duration = Duration::from_secs(10); 5 | 6 | #[doc(hidden)] 7 | pub struct RequestorWantsRequestEncoder { 8 | pub(crate) endpoint: String, 9 | } 10 | 11 | impl RequestorWantsRequestEncoder { 12 | pub fn new(endpoint: &str) -> Self { 13 | Self { 14 | endpoint: endpoint.to_owned(), 15 | } 16 | } 17 | } 18 | 19 | pub struct RequestorWantsReplyDecoder { 20 | pub(crate) endpoint: String, 21 | pub(crate) encoder: E, 22 | pub(crate) compression: Option, 23 | } 24 | 25 | impl RequestorWantsReplyDecoder { 26 | pub fn new(prev: RequestorWantsRequestEncoder, encoder: E) -> Self { 27 | Self { 28 | endpoint: prev.endpoint, 29 | encoder, 30 | compression: None, 31 | } 32 | } 33 | } 34 | 35 | pub struct RequestorWantsOpen { 36 | pub(crate) endpoint: String, 37 | pub(crate) encoder: E, 38 | pub(crate) compression: Option, 39 | pub(crate) decoder: D, 40 | pub(crate) decompression: Option, 41 | pub(crate) request_timeout: Duration, 42 | } 43 | 44 | impl RequestorWantsOpen { 45 | pub fn new(prev: RequestorWantsReplyDecoder, decoder: D) -> Self { 46 | Self { 47 | endpoint: prev.endpoint, 48 | encoder: prev.encoder, 49 | compression: prev.compression, 50 | decoder, 51 | decompression: None, 52 | request_timeout: DEFAULT_REQUEST_TIMEOUT, 53 | } 54 | } 55 | } 56 | 57 | #[doc(hidden)] 58 | pub struct ReplierWantsRequestDecoder { 59 | pub(crate) endpoint: String, 60 | } 61 | 62 | impl ReplierWantsRequestDecoder { 63 | pub fn new(endpoint: &str) -> Self { 64 | Self { 65 | endpoint: endpoint.to_owned(), 66 | } 67 | } 68 | } 69 | 70 | #[doc(hidden)] 71 | pub struct ReplierWantsReplyEncoder { 72 | pub(crate) endpoint: String, 73 | pub(crate) decoder: D, 74 | pub(crate) decompression: Option, 75 | } 76 | 77 | impl ReplierWantsReplyEncoder { 78 | pub fn new(prev: ReplierWantsRequestDecoder, decoder: D) -> Self { 79 | Self { 80 | endpoint: prev.endpoint, 81 | decoder, 82 | decompression: None, 83 | } 84 | } 85 | } 86 | 87 | #[doc(hidden)] 88 | pub struct ReplierWantsHandler { 89 | pub(crate) endpoint: String, 90 | pub(crate) decoder: D, 91 | pub(crate) decompression: Option, 92 | pub(crate) encoder: E, 93 | pub(crate) compression: Option, 94 | } 95 | 96 | impl ReplierWantsHandler { 97 | pub fn new(prev: ReplierWantsReplyEncoder, encoder: E) -> Self { 98 | Self { 99 | endpoint: prev.endpoint, 100 | decoder: prev.decoder, 101 | decompression: prev.decompression, 102 | encoder, 103 | compression: None, 104 | } 105 | } 106 | } 107 | 108 | #[doc(hidden)] 109 | pub struct ReplierWantsOpen { 110 | pub(crate) endpoint: String, 111 | pub(crate) decoder: D, 112 | pub(crate) decompression: Option, 113 | pub(crate) encoder: E, 114 | pub(crate) compression: Option, 115 | pub(crate) handler: Pin>, 116 | } 117 | 118 | impl ReplierWantsOpen { 119 | pub fn new(prev: ReplierWantsHandler, handler: F) -> Self { 120 | Self { 121 | endpoint: prev.endpoint, 122 | decoder: prev.decoder, 123 | decompression: prev.decompression, 124 | encoder: prev.encoder, 125 | compression: prev.compression, 126 | handler: Box::pin(handler), 127 | } 128 | } 129 | } 130 | -------------------------------------------------------------------------------- /standard/benches/compression.rs: -------------------------------------------------------------------------------- 1 | use bytes::Bytes; 2 | use criterion::{black_box, criterion_group, criterion_main, Criterion}; 3 | use selium_std::compression::{brotli, deflate, lz4, zstd}; 4 | use selium_std::traits::compression::{Compress, CompressionLevel, Decompress}; 5 | 6 | pub fn deflate_benchmarks(c: &mut Criterion) { 7 | c.bench_function("DEFLATE | gzip | fastest", |b| { 8 | b.iter(|| { 9 | let payload = black_box(Bytes::from(black_box("hello, world!"))); 10 | 11 | let compressed = deflate::DeflateComp::gzip() 12 | .fastest() 13 | .compress(payload) 14 | .unwrap(); 15 | 16 | deflate::DeflateDecomp::gzip() 17 | .decompress(black_box(compressed)) 18 | .unwrap(); 19 | }) 20 | }); 21 | 22 | c.bench_function("DEFLATE | zlib | fastest", |b| { 23 | b.iter(|| { 24 | let payload = black_box(Bytes::from(black_box("hello, world!"))); 25 | 26 | let compressed = deflate::DeflateComp::zlib() 27 | .fastest() 28 | .compress(payload) 29 | .unwrap(); 30 | 31 | deflate::DeflateDecomp::zlib() 32 | .decompress(black_box(compressed)) 33 | .unwrap(); 34 | }) 35 | }); 36 | } 37 | 38 | pub fn zstd_benchmarks(c: &mut Criterion) { 39 | c.bench_function("zstd | fastest", |b| { 40 | b.iter(|| { 41 | let payload = black_box(Bytes::from(black_box("hello, world!"))); 42 | let compressed = zstd::ZstdComp::new().fastest().compress(payload).unwrap(); 43 | 44 | zstd::ZstdDecomp.decompress(black_box(compressed)).unwrap(); 45 | }) 46 | }); 47 | } 48 | 49 | pub fn brotli_benchmarks(c: &mut Criterion) { 50 | c.bench_function("brotli | text | fastest", |b| { 51 | b.iter(|| { 52 | let payload = black_box(Bytes::from(black_box("hello, world!"))); 53 | 54 | let compressed = brotli::BrotliComp::text() 55 | .fastest() 56 | .compress(payload) 57 | .unwrap(); 58 | 59 | brotli::BrotliDecomp 60 | .decompress(black_box(compressed)) 61 | .unwrap(); 62 | }) 63 | }); 64 | 65 | c.bench_function("brotli | generic | fastest", |b| { 66 | b.iter(|| { 67 | let payload = black_box(Bytes::from(black_box("hello, world!"))); 68 | 69 | let compressed = brotli::BrotliComp::generic() 70 | .fastest() 71 | .compress(payload) 72 | .unwrap(); 73 | 74 | brotli::BrotliDecomp 75 | .decompress(black_box(compressed)) 76 | .unwrap(); 77 | }) 78 | }); 79 | 80 | c.bench_function("brotli | font | fastest", |b| { 81 | b.iter(|| { 82 | let payload = black_box(Bytes::from(black_box("hello, world!"))); 83 | 84 | let compressed = brotli::BrotliComp::font() 85 | .fastest() 86 | .compress(payload) 87 | .unwrap(); 88 | 89 | brotli::BrotliDecomp 90 | .decompress(black_box(compressed)) 91 | .unwrap(); 92 | }) 93 | }); 94 | } 95 | 96 | pub fn lz4_benchmarks(c: &mut Criterion) { 97 | c.bench_function("lz4", |b| { 98 | b.iter(|| { 99 | let payload = black_box(Bytes::from(black_box("hello, world!"))); 100 | let compressed = lz4::Lz4Comp.compress(payload).unwrap(); 101 | 102 | lz4::Lz4Decomp.decompress(black_box(compressed)).unwrap(); 103 | }) 104 | }); 105 | } 106 | 107 | criterion_group!( 108 | benches, 109 | deflate_benchmarks, 110 | zstd_benchmarks, 111 | brotli_benchmarks, 112 | lz4_benchmarks 113 | ); 114 | 115 | criterion_main!(benches); 116 | -------------------------------------------------------------------------------- /server/src/sink/ordered.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | collections::HashMap, 3 | pin::Pin, 4 | task::{Context, Poll}, 5 | }; 6 | 7 | use futures::{ready, Sink, Stream}; 8 | use log::{debug, trace}; 9 | use pin_project_lite::pin_project; 10 | 11 | pin_project! { 12 | #[project = OrderedProj] 13 | #[derive(Debug)] 14 | #[must_use = "sinks do nothing unless polled"] 15 | pub struct Ordered { 16 | #[pin] 17 | sink: Si, 18 | cache: HashMap, 19 | last_sent: usize, 20 | } 21 | } 22 | 23 | impl, Item> Ordered { 24 | pub(super) fn new(sink: Si, last_sent: usize) -> Self { 25 | Self { 26 | sink, 27 | cache: HashMap::new(), 28 | last_sent, 29 | } 30 | } 31 | 32 | fn try_send_cached(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { 33 | let mut this = self.project(); 34 | ready!(this.sink.as_mut().poll_ready(cx))?; 35 | while let Some(item) = this.cache.remove(&(*this.last_sent + 1)) { 36 | trace!( 37 | "Sending ordered message from cache: {}", 38 | *this.last_sent + 1 39 | ); 40 | this.sink.as_mut().start_send(item)?; 41 | *this.last_sent += 1; 42 | if !this.cache.is_empty() { 43 | ready!(this.sink.as_mut().poll_ready(cx))?; 44 | } 45 | } 46 | Poll::Ready(Ok(())) 47 | } 48 | } 49 | 50 | impl Stream for Ordered 51 | where 52 | S: Sink + Stream, 53 | { 54 | type Item = S::Item; 55 | 56 | fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { 57 | self.project().sink.poll_next(cx) 58 | } 59 | 60 | fn size_hint(&self) -> (usize, Option) { 61 | self.sink.size_hint() 62 | } 63 | } 64 | 65 | impl, Item> Sink<(usize, Item)> for Ordered { 66 | type Error = Si::Error; 67 | 68 | fn poll_ready(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { 69 | Poll::Ready(Ok(())) 70 | } 71 | 72 | fn start_send(self: Pin<&mut Self>, (seq, item): (usize, Item)) -> Result<(), Self::Error> { 73 | let OrderedProj { 74 | sink, 75 | cache, 76 | last_sent, 77 | } = self.project(); 78 | 79 | if seq == *last_sent + 1 { 80 | *last_sent = seq; 81 | trace!("Sending ordered message: {seq}"); 82 | sink.start_send(item) 83 | } else if seq < *last_sent { 84 | debug!("Sending sequence {seq} out of order (last sent={last_sent})"); 85 | trace!("Sending ordered message: {seq}"); 86 | sink.start_send(item) 87 | } else { 88 | trace!( 89 | "Caching ordered message: {seq} - waiting for {}", 90 | *last_sent + 1 91 | ); 92 | cache.insert(seq, item); 93 | Ok(()) 94 | } 95 | } 96 | 97 | fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { 98 | ready!(self.as_mut().try_send_cached(cx))?; 99 | self.project().sink.poll_flush(cx) 100 | } 101 | 102 | fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { 103 | ready!(self.as_mut().try_send_cached(cx))?; 104 | self.project().sink.poll_close(cx) 105 | } 106 | } 107 | 108 | impl OrderedExt for T where T: Sink {} 109 | 110 | pub trait OrderedExt: Sink { 111 | fn ordered(self, last_sent: usize) -> Ordered 112 | where 113 | Self: Sized, 114 | { 115 | debug!("Ordering messages starting from {last_sent}"); 116 | Ordered::new(self, last_sent) 117 | } 118 | } 119 | -------------------------------------------------------------------------------- /client/src/client/builder.rs: -------------------------------------------------------------------------------- 1 | use crate::constants::KEEP_ALIVE_DEFAULT; 2 | use crate::keep_alive::BackoffStrategy; 3 | use crate::traits::TryIntoU64; 4 | use selium_std::errors::Result; 5 | 6 | /// A convenient builder struct used to build a [Client](crate::Client) instance. 7 | /// 8 | /// The [ClientBuilder] uses a type-level Finite State Machine to assure that a 9 | /// [Client](crate::Client) cannot be constructed with an invalid state. For example, the 10 | /// [connect](ClientBuilder::connect) method will not be in-scope unless the [ClientBuilder] is in 11 | /// a pre-connection state, which is achieved by first configuring the root store and keypair. 12 | /// 13 | /// **NOTE:** The [ClientBuilder] type is not intended to be used directly. Use the 14 | /// [cloud](crate::cloud) or [custom](crate::custom) functions to construct a [ClientBuilder] in 15 | /// its initial state. 16 | #[derive(Debug)] 17 | pub struct ClientBuilder { 18 | pub(crate) state: T, 19 | } 20 | 21 | impl ClientBuilder { 22 | pub fn new(state: T) -> Self { 23 | Self { state } 24 | } 25 | } 26 | 27 | /// Common state for all client types. 28 | #[derive(Debug)] 29 | pub struct ClientCommon { 30 | pub(crate) keep_alive: u64, 31 | pub(crate) backoff_strategy: BackoffStrategy, 32 | } 33 | 34 | impl Default for ClientCommon { 35 | fn default() -> Self { 36 | Self { 37 | keep_alive: KEEP_ALIVE_DEFAULT, 38 | backoff_strategy: BackoffStrategy::default(), 39 | } 40 | } 41 | } 42 | 43 | impl ClientCommon { 44 | /// Overrides the `keep_alive` interval for the client connection in milliseconds. 45 | /// 46 | /// Accepts any `interval` argument that can be *fallibly* converted into a [u64] via the 47 | /// [TryIntoU64](crate::traits::TryIntoU64) trait. 48 | /// 49 | /// **NOTE:** `Selium` already provides a reasonable default for the `keep_alive` interval (see 50 | /// [KEEP_ALIVE_DEFAULT]), so this setting should only be overridden if it's not suitable for 51 | /// your use-case. 52 | /// 53 | /// # Errors 54 | /// 55 | /// Returns [Err] if the provided interval fails to be convert to a [u64]. 56 | /// 57 | /// # Examples 58 | /// 59 | /// Overriding the default `keep_alive` interval as 6 seconds (represented in milliseconds). 60 | /// 61 | /// ``` 62 | /// let client = selium::custom() 63 | /// .keep_alive(6_000).unwrap(); 64 | /// ``` 65 | /// 66 | /// You can even use any other type that can be converted to a [u64] via the 67 | /// [TryIntoU64](crate::traits::TryIntoU64) trait, such as the standard library's Duration 68 | /// type. 69 | /// 70 | /// ``` 71 | /// use std::time::Duration; 72 | /// 73 | /// let client = selium::custom() 74 | /// .keep_alive(Duration::from_secs(6)).unwrap(); 75 | /// ``` 76 | pub fn keep_alive(&mut self, interval: T) -> Result<()> { 77 | self.keep_alive = interval.try_into_u64()?; 78 | Ok(()) 79 | } 80 | 81 | /// Overrides the `backoff_strategy` used to recover a connection and streams when transient 82 | /// errors occur. 83 | /// 84 | /// See the [keep_alive](crate::keep_alive) module for more information. 85 | /// 86 | /// # Examples 87 | /// 88 | /// Overriding the default `backoff_strategy` to use a linear strategy with 5 attempts, and a 89 | /// duration step of 2 seconds. 90 | /// 91 | /// ``` 92 | /// use selium::keep_alive::BackoffStrategy; 93 | /// use std::time::Duration; 94 | /// 95 | /// let strategy = BackoffStrategy::linear() 96 | /// .with_max_attempts(5) 97 | /// .with_step(Duration::from_secs(2)); 98 | /// 99 | /// let client = selium::custom() 100 | /// .backoff_strategy(strategy); 101 | /// ``` 102 | pub fn backoff_strategy(&mut self, strategy: BackoffStrategy) { 103 | self.backoff_strategy = strategy; 104 | } 105 | } 106 | -------------------------------------------------------------------------------- /protocol/src/topic_name.rs: -------------------------------------------------------------------------------- 1 | use lazy_regex::{lazy_regex, Lazy}; 2 | use regex::Regex; 3 | use selium_std::errors::{Result, SeliumError}; 4 | use serde::{Deserialize, Serialize}; 5 | use std::fmt::Display; 6 | 7 | const RESERVED_NAMESPACE: &str = "selium"; 8 | // Any [a-zA-Z0-9-_] with a length between 3 and 64 chars 9 | static COMPONENT_REGEX: Lazy = lazy_regex!(r"^[\w-]{3,64}$"); 10 | static TOPIC_REGEX: Lazy = lazy_regex!(r"^\/([\w-]{3,64})\/([\w-]{3,64})$"); 11 | 12 | #[derive(Debug, Clone, Hash, Eq, PartialEq, Serialize, Deserialize)] 13 | pub struct TopicName { 14 | namespace: String, 15 | topic: String, 16 | } 17 | 18 | impl TopicName { 19 | pub fn create(namespace: &str, topic: &str) -> Result { 20 | let s = Self { 21 | namespace: namespace.to_owned(), 22 | topic: topic.to_owned(), 23 | }; 24 | 25 | if s.is_valid() { 26 | Ok(s) 27 | } else { 28 | Err(SeliumError::ParseTopicNameError) 29 | } 30 | } 31 | 32 | #[doc(hidden)] 33 | pub fn _create_unchecked(namespace: &str, topic: &str) -> Self { 34 | Self { 35 | namespace: namespace.into(), 36 | topic: topic.into(), 37 | } 38 | } 39 | 40 | pub fn is_valid(&self) -> bool { 41 | !(self.namespace.starts_with(RESERVED_NAMESPACE) 42 | || !COMPONENT_REGEX.is_match(&self.namespace) 43 | || !COMPONENT_REGEX.is_match(&self.topic)) 44 | } 45 | 46 | pub fn namespace(&self) -> &str { 47 | &self.namespace 48 | } 49 | 50 | pub fn topic(&self) -> &str { 51 | &self.topic 52 | } 53 | } 54 | 55 | impl TryFrom<&str> for TopicName { 56 | type Error = SeliumError; 57 | 58 | fn try_from(value: &str) -> Result { 59 | if value.is_empty() { 60 | return Err(SeliumError::ParseTopicNameError); 61 | } 62 | 63 | #[cfg(not(feature = "__notopiccheck"))] 64 | if value[1..].starts_with(RESERVED_NAMESPACE) { 65 | return Err(SeliumError::ReservedNamespaceError); 66 | } 67 | 68 | let matches = TOPIC_REGEX 69 | .captures(value) 70 | .ok_or(SeliumError::ParseTopicNameError)?; 71 | 72 | let namespace = matches.get(1).unwrap().as_str().to_owned(); 73 | let topic = matches.get(2).unwrap().as_str().to_owned(); 74 | 75 | Ok(Self { namespace, topic }) 76 | } 77 | } 78 | 79 | impl Display for TopicName { 80 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 81 | write!(f, "/{}/{}", self.namespace, self.topic) 82 | } 83 | } 84 | 85 | #[cfg(test)] 86 | mod tests { 87 | use super::*; 88 | 89 | #[test] 90 | fn fails_to_parse_poorly_formatted_topic_names() { 91 | let topic_names = [ 92 | "", 93 | "namespace", 94 | "/namespace/", 95 | "/namespace/topic/other", 96 | "/namespace/topic!", 97 | ]; 98 | 99 | for topic_name in topic_names { 100 | let result = TopicName::try_from(topic_name); 101 | assert!(result.is_err()); 102 | } 103 | } 104 | 105 | #[cfg(not(feature = "__notopiccheck"))] 106 | #[test] 107 | fn fails_to_parse_reserved_namespace() { 108 | assert!(TopicName::try_from("/selium/topic").is_err()); 109 | } 110 | 111 | #[test] 112 | fn successfully_parses_topic_name() { 113 | let topic_names = [ 114 | "/namespace/topic", 115 | "/name_space/topic", 116 | "/namespace/to_pic", 117 | "/name_space/to_pic", 118 | ]; 119 | 120 | for topic_name in topic_names { 121 | let result = TopicName::try_from(topic_name); 122 | assert!(result.is_ok()); 123 | } 124 | } 125 | 126 | #[test] 127 | fn outputs_formatted_topic_name() { 128 | let namespace = "namespace"; 129 | let topic = "topic"; 130 | let topic_name = TopicName::create(namespace, topic).unwrap(); 131 | let expected = format!("/{namespace}/{topic}"); 132 | 133 | assert_eq!(topic_name.to_string(), expected); 134 | } 135 | } 136 | -------------------------------------------------------------------------------- /client/src/keep_alive/reqrep.rs: -------------------------------------------------------------------------------- 1 | use super::backoff_strategy::*; 2 | use super::helpers::is_recoverable_error; 3 | use crate::logging; 4 | use crate::request_reply::{Replier, Requestor}; 5 | use crate::traits::KeepAliveStream; 6 | use futures::Future; 7 | use selium_std::errors::QuicError; 8 | use selium_std::errors::Result; 9 | use selium_std::traits::codec::{MessageDecoder, MessageEncoder}; 10 | use std::fmt::Debug; 11 | 12 | #[doc(hidden)] 13 | pub struct KeepAlive { 14 | stream: T, 15 | backoff_strategy: BackoffStrategy, 16 | } 17 | 18 | impl KeepAlive 19 | where 20 | T: KeepAliveStream, 21 | { 22 | pub fn new(stream: T, backoff_strategy: BackoffStrategy) -> Self { 23 | Self { 24 | stream, 25 | backoff_strategy, 26 | } 27 | } 28 | 29 | async fn try_reconnect(&mut self, attempts: &mut BackoffStrategyIter) -> Result<()> { 30 | logging::keep_alive::connection_lost(); 31 | 32 | loop { 33 | let NextAttempt { 34 | duration, 35 | attempt_num, 36 | max_attempts, 37 | } = match attempts.next() { 38 | Some(next) => next, 39 | None => { 40 | logging::keep_alive::too_many_retries(); 41 | return Err(QuicError::TooManyRetries)?; 42 | } 43 | }; 44 | 45 | let connection = self.stream.get_connection(); 46 | let headers = self.stream.get_headers(); 47 | 48 | logging::keep_alive::reconnect_attempt(attempt_num, max_attempts); 49 | tokio::time::sleep(duration).await; 50 | 51 | match T::reestablish_connection(connection, headers).await { 52 | Ok(stream) => { 53 | logging::keep_alive::successful_reconnection(); 54 | self.stream.on_reconnect(stream); 55 | return Ok(()); 56 | } 57 | Err(err) if is_recoverable_error(&err) => { 58 | logging::keep_alive::reconnect_error(&err) 59 | } 60 | Err(err) => { 61 | logging::keep_alive::unrecoverable_error(&err); 62 | return Err(err); 63 | } 64 | } 65 | } 66 | } 67 | } 68 | 69 | impl Clone for KeepAlive> 70 | where 71 | E: MessageEncoder + Send + Unpin + Clone, 72 | D: MessageDecoder + Send + Unpin + Clone, 73 | { 74 | fn clone(&self) -> Self { 75 | Self { 76 | stream: self.stream.clone(), 77 | backoff_strategy: self.backoff_strategy.clone(), 78 | } 79 | } 80 | } 81 | 82 | impl KeepAlive> 83 | where 84 | E: MessageEncoder + Send + Unpin + Clone, 85 | D: MessageDecoder + Send + Unpin + Clone, 86 | { 87 | pub async fn request(&mut self, req: E::Item) -> Result { 88 | let mut attempts = self.backoff_strategy.clone().into_iter(); 89 | 90 | loop { 91 | match self.stream.request(req.clone()).await { 92 | Ok(res) => return Ok(res), 93 | Err(err) if is_recoverable_error(&err) => self.try_reconnect(&mut attempts).await?, 94 | Err(err) => { 95 | logging::keep_alive::unrecoverable_error(&err); 96 | return Err(err); 97 | } 98 | }; 99 | } 100 | } 101 | } 102 | 103 | impl KeepAlive> 104 | where 105 | D: MessageDecoder + Send + Unpin, 106 | E: MessageEncoder + Send + Unpin, 107 | Err: Debug, 108 | F: FnMut(D::Item) -> Fut + Send + Unpin, 109 | Fut: Future>, 110 | { 111 | pub async fn listen(&mut self) -> Result<()> { 112 | let mut attempts = self.backoff_strategy.clone().into_iter(); 113 | 114 | loop { 115 | match self.stream.listen().await { 116 | Err(err) if !is_recoverable_error(&err) => { 117 | logging::keep_alive::unrecoverable_error(&err); 118 | return Err(err); 119 | } 120 | _ => self.try_reconnect(&mut attempts).await?, 121 | }; 122 | } 123 | } 124 | } 125 | -------------------------------------------------------------------------------- /client/src/client/cloud/mod.rs: -------------------------------------------------------------------------------- 1 | mod states; 2 | pub use states::*; 3 | 4 | use crate::connection::{ClientConnection, ConnectionOptions}; 5 | use crate::constants::SELIUM_CLOUD_REMOTE_URL; 6 | use crate::crypto::cert::load_keypair; 7 | use crate::keep_alive::BackoffStrategy; 8 | use crate::logging; 9 | use crate::traits::TryIntoU64; 10 | use crate::{Client, ClientBuilder, ClientCommon}; 11 | use selium_std::errors::{Result, SeliumError}; 12 | use std::path::Path; 13 | use std::sync::Arc; 14 | use tokio::sync::Mutex; 15 | 16 | impl ClientBuilder { 17 | /// See [keep_alive](ClientCommon::keep_alive) in [ClientCommon]. 18 | pub fn keep_alive(mut self, interval: T) -> Result { 19 | self.state.common.keep_alive(interval)?; 20 | Ok(self) 21 | } 22 | 23 | /// See [backoff_strategy](ClientCommon::backoff_strategy) in [ClientCommon]. 24 | pub fn backoff_strategy(mut self, strategy: BackoffStrategy) -> Self { 25 | self.state.common.backoff_strategy(strategy); 26 | self 27 | } 28 | 29 | /// Attempts to load a valid keypair from the filesystem to use with authenticating the QUIC connection. 30 | /// 31 | /// Keypairs can be encoded in either a Base64 ASCII (.pem) or binary (.der) format. 32 | /// 33 | /// Following this method, the [ClientBuilder] will be in a pre-connection state, so any 34 | /// additional configuration must take place before invoking this method. 35 | /// 36 | /// # Errors 37 | /// 38 | /// Returns [Err] under the following conditions: 39 | /// 40 | /// - The provided `cert_file` argument does not refer to a file containing a 41 | /// valid certificate. 42 | /// 43 | /// - The provided `key_file` argument does not refer to a file containing a 44 | /// valid PKCS-8 private key. 45 | pub fn with_cert_and_key>( 46 | self, 47 | cert_file: T, 48 | key_file: T, 49 | ) -> Result> { 50 | let (certs, key) = load_keypair(cert_file, key_file)?; 51 | let next_state = CloudWantsConnect::new(self.state, &certs, key); 52 | Ok(ClientBuilder { state: next_state }) 53 | } 54 | } 55 | 56 | impl ClientBuilder { 57 | /// Attempts to establish a connection with `Selium Cloud`. 58 | /// 59 | /// The [connect](ClientBuilder::connect) method will only be in scope if the 60 | /// [ClientBuilder] is in a pre-connect state, `CloudWantsConnect`. 61 | /// 62 | /// # Errors 63 | /// 64 | /// Returns [Err] under the following conditions: 65 | /// 66 | /// - If the connection cannot be established. 67 | pub async fn connect(self) -> Result { 68 | let CloudWantsConnect { 69 | common, 70 | certs, 71 | key, 72 | root_store, 73 | } = self.state; 74 | let ClientCommon { 75 | keep_alive, 76 | backoff_strategy, 77 | } = common; 78 | 79 | let options = ConnectionOptions::new(certs.as_slice(), key, root_store, keep_alive); 80 | logging::connection::get_cloud_endpoint(); 81 | let endpoint = get_cloud_endpoint(options.clone()).await?; 82 | 83 | logging::connection::connect_to_address(&endpoint); 84 | let connection = ClientConnection::connect(&endpoint, options).await?; 85 | let connection = Arc::new(Mutex::new(connection)); 86 | logging::connection::successful_connection(&endpoint); 87 | 88 | Ok(Client { 89 | connection, 90 | backoff_strategy, 91 | }) 92 | } 93 | } 94 | 95 | #[tracing::instrument] 96 | async fn get_cloud_endpoint(options: ConnectionOptions) -> Result { 97 | let connection = ClientConnection::connect(SELIUM_CLOUD_REMOTE_URL, options).await?; 98 | let (_, mut read) = connection 99 | .conn() 100 | .open_bi() 101 | .await 102 | .map_err(SeliumError::OpenCloudStreamFailed)?; 103 | let endpoint_bytes = read 104 | .read_to_end(2048) 105 | .await 106 | .map_err(|_| SeliumError::GetServerAddressFailed)?; 107 | let endpoint = 108 | String::from_utf8(endpoint_bytes).map_err(|_| SeliumError::GetServerAddressFailed)?; 109 | 110 | Ok(endpoint) 111 | } 112 | -------------------------------------------------------------------------------- /log/src/index/mod.rs: -------------------------------------------------------------------------------- 1 | //! Contains the [Index] and [Mmap] types. 2 | 3 | mod entry; 4 | mod mmap; 5 | 6 | use crate::{config::SharedLogConfig, error::Result}; 7 | pub use entry::IndexEntry; 8 | pub use mmap::Mmap; 9 | use std::path::Path; 10 | 11 | /// Wrapper type for an index file belonging to a segment. 12 | /// 13 | /// The index is a memory-mapped file that serves as a lookup directory of all messages in the segment. 14 | /// 15 | /// The index lists where any messages in the log can be located via byte offset. Given a relative offset, 16 | /// a fast lookup of a byte offset in the data file can be performed via a binary search. 17 | #[derive(Debug)] 18 | pub struct Index { 19 | mmap: Mmap, 20 | current_offset: u32, 21 | config: SharedLogConfig, 22 | } 23 | 24 | impl Index { 25 | /// Constructs a new Index instance. 26 | pub fn new(mmap: Mmap, current_offset: u32, config: SharedLogConfig) -> Self { 27 | Self { 28 | mmap, 29 | current_offset, 30 | config, 31 | } 32 | } 33 | 34 | /// Constructs an Index instance from an existing index file. 35 | /// 36 | /// # Errors 37 | /// - Returns Err if a failure occurs while opening the existing index file. 38 | pub async fn open(path: impl AsRef, config: SharedLogConfig) -> Result { 39 | let mmap = Mmap::load(path).await?; 40 | let next_offset = mmap.get_current_offset(); 41 | Ok(Self::new(mmap, next_offset, config)) 42 | } 43 | 44 | /// Constructs an Index instance, and creates the underlying memory-mapped index file. 45 | /// 46 | /// # Errors 47 | /// - Returns Err if a failure occurs while creating the index file. 48 | pub async fn create(path: impl AsRef, config: SharedLogConfig) -> Result { 49 | let mmap = Mmap::create(path, config.clone()).await?; 50 | Ok(Self::new(mmap, 0, config)) 51 | } 52 | 53 | /// Appends a new entry to the memory-mapped index. 54 | /// This method is called after appending an encoded message to the segment's data file. 55 | /// 56 | /// # Params 57 | /// * `timestamp` - The UNIX timestamp corresponding to when the message was appended the data file. 58 | /// * `file_position` - The byte offset in the data file for the appended message. 59 | pub fn append(&mut self, timestamp: u64, file_position: u64) { 60 | if self.current_offset <= self.config.max_index_entries { 61 | let next_offset = self.current_offset + 1; 62 | let entry = IndexEntry::new(next_offset, timestamp, file_position); 63 | self.mmap.push(entry); 64 | self.current_offset = next_offset; 65 | } 66 | } 67 | 68 | /// Flushes the memory map to the underlying file. 69 | /// This operation can have negative performance impacts, so should be used sparingly until 70 | /// replication has been implemented. 71 | /// 72 | /// # Errors 73 | /// - Returns Err if the memory map fails to flush to the underlying file. 74 | pub fn flush(&mut self) -> Result<()> { 75 | self.mmap.flush()?; 76 | Ok(()) 77 | } 78 | 79 | /// Performs a lookup in the memory map for the specified `relative_offset`, and returns 80 | /// the decoded [IndexEntry]. 81 | /// 82 | /// Returns [Option::None] if the relative offset does not exist in the index. 83 | pub fn lookup(&self, relative_offset: u32) -> Option { 84 | if relative_offset > self.current_offset { 85 | return None; 86 | } 87 | 88 | self.mmap 89 | .find(|entry| entry.relative_offset() == relative_offset) 90 | } 91 | 92 | /// Removes the underlying index file. 93 | /// This method is typically only called when a segment is being removed by the log cleaner task. 94 | /// 95 | /// # Errors 96 | /// - Returns Err if the underlying file cannot be removed. 97 | pub async fn remove(self) -> Result<()> { 98 | self.mmap.remove().await?; 99 | Ok(()) 100 | } 101 | 102 | /// The current relative offset in the index. 103 | pub fn current_offset(&self) -> u32 { 104 | self.current_offset 105 | } 106 | 107 | /// Returns true if the index is at capacity, based on the provided `max_index_entries` option 108 | /// in the shared log configuration. 109 | pub fn is_full(&self) -> bool { 110 | self.current_offset == self.config.max_index_entries 111 | } 112 | } 113 | -------------------------------------------------------------------------------- /tests/tests/streams/helpers.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Result; 2 | use clap::Parser; 3 | use selium::keep_alive::reqrep::KeepAlive; 4 | use selium::keep_alive::BackoffStrategy; 5 | use selium::prelude::*; 6 | use selium::std::codecs::BincodeCodec; 7 | use selium::std::errors::SeliumError; 8 | use selium::{request_reply::Requestor, Client}; 9 | use selium_server::args::UserArgs; 10 | use selium_server::server::Server; 11 | use serde::{Deserialize, Serialize}; 12 | use std::net::SocketAddr; 13 | use std::path::Path; 14 | use std::time::Duration; 15 | use tempfile::TempDir; 16 | 17 | // Allow the operating system to assign a free port 18 | const SERVER_ADDR: &str = "127.0.0.1:0"; 19 | 20 | #[derive(Debug, Serialize, Deserialize, Clone)] 21 | pub enum Request { 22 | Ping, 23 | Echo(String), 24 | } 25 | 26 | #[derive(Debug, Serialize, Deserialize, PartialEq, Clone)] 27 | pub enum Response { 28 | Pong, 29 | Echo(String), 30 | } 31 | 32 | type Req = KeepAlive, BincodeCodec>>; 33 | 34 | pub struct TestClient { 35 | client: Client, 36 | _tempdir: TempDir, 37 | } 38 | 39 | impl TestClient { 40 | pub async fn start() -> Result { 41 | let tempdir = TempDir::new().unwrap(); 42 | let server_addr = start_server(tempdir.path())?; 43 | 44 | let client = selium::custom() 45 | .keep_alive(5_000)? 46 | .backoff_strategy(BackoffStrategy::constant().with_max_attempts(0)) 47 | .endpoint(&server_addr.to_string()) 48 | .with_certificate_authority("../certs/client/ca.der")? 49 | .with_cert_and_key( 50 | "../certs/client/localhost.der", 51 | "../certs/client/localhost.key.der", 52 | )? 53 | .connect() 54 | .await?; 55 | 56 | Ok(Self { 57 | client, 58 | _tempdir: tempdir, 59 | }) 60 | } 61 | 62 | pub fn start_replier( 63 | &self, 64 | delay: Option, 65 | ) -> tokio::task::JoinHandle> { 66 | tokio::spawn({ 67 | let client = self.client.clone(); 68 | 69 | async move { 70 | let mut replier = client 71 | .replier("/test/endpoint") 72 | .with_request_decoder(BincodeCodec::default()) 73 | .with_reply_encoder(BincodeCodec::default()) 74 | .with_handler(|req| async move { 75 | if let Some(delay) = delay { 76 | tokio::time::sleep(delay).await; 77 | } 78 | 79 | handler(req).await 80 | }) 81 | .open() 82 | .await 83 | .unwrap(); 84 | 85 | replier.listen().await 86 | } 87 | }) 88 | } 89 | 90 | pub async fn requestor(&self, timeout: Option) -> Result { 91 | let mut builder = self 92 | .client 93 | .requestor("/test/endpoint") 94 | .with_request_encoder(BincodeCodec::default()) 95 | .with_reply_decoder(BincodeCodec::default()); 96 | 97 | if let Some(timeout) = timeout { 98 | builder = builder.with_request_timeout(timeout)?; 99 | } 100 | 101 | let requestor = builder.open().await?; 102 | 103 | Ok(requestor) 104 | } 105 | } 106 | 107 | async fn handler(req: Request) -> Result { 108 | let res = match req { 109 | Request::Ping => Response::Pong, 110 | Request::Echo(msg) => Response::Echo(msg), 111 | }; 112 | 113 | Ok(res) 114 | } 115 | 116 | pub fn start_server(logs_dir: impl AsRef) -> Result { 117 | let args = UserArgs::parse_from([ 118 | "", 119 | "--bind-addr", 120 | SERVER_ADDR, 121 | "--cert", 122 | "../certs/server/localhost.der", 123 | "--key", 124 | "../certs/server/localhost.key.der", 125 | "--ca", 126 | "../certs/server/ca.der", 127 | "--flush-policy-num-writes", 128 | "1", 129 | "--log-segments-directory", 130 | logs_dir.as_ref().to_str().unwrap(), 131 | ]); 132 | 133 | let server = Server::try_from(args)?; 134 | let addr = server.addr()?; 135 | 136 | tokio::spawn(async move { 137 | server.listen().await.expect("Failed to spawn server"); 138 | }); 139 | 140 | Ok(addr) 141 | } 142 | -------------------------------------------------------------------------------- /server/src/quic.rs: -------------------------------------------------------------------------------- 1 | //! Much of this code was borrowed with many thanks from the Quinn project: 2 | //! `` 3 | 4 | use anyhow::{bail, Context, Result}; 5 | use quinn::{Connection, IdleTimeout, ServerConfig}; 6 | use rustls::server::AllowAnyAuthenticatedClient; 7 | use rustls::{Certificate, PrivateKey, RootCertStore}; 8 | use rustls_pemfile::{certs, pkcs8_private_keys, rsa_private_keys}; 9 | use std::{fs, path::Path, sync::Arc}; 10 | 11 | const ALPN_QUIC_HTTP: &[&[u8]] = &[b"hq-29"]; 12 | 13 | #[derive(Default)] 14 | pub struct ConfigOptions { 15 | pub keylog: bool, 16 | pub stateless_retry: bool, 17 | pub max_idle_timeout: IdleTimeout, 18 | } 19 | 20 | pub fn server_config( 21 | root_store: RootCertStore, 22 | certs: Vec, 23 | key: PrivateKey, 24 | options: ConfigOptions, 25 | ) -> Result { 26 | let client_cert_verifier = Arc::new(AllowAnyAuthenticatedClient::new(root_store)); 27 | 28 | let mut server_crypto = rustls::ServerConfig::builder() 29 | .with_safe_defaults() 30 | .with_client_cert_verifier(client_cert_verifier) 31 | .with_single_cert(certs, key)?; 32 | 33 | server_crypto.alpn_protocols = ALPN_QUIC_HTTP.iter().map(|&x| x.into()).collect(); 34 | if options.keylog { 35 | server_crypto.key_log = Arc::new(rustls::KeyLogFile::new()); 36 | } 37 | 38 | let mut server_config = ServerConfig::with_crypto(Arc::new(server_crypto)); 39 | let transport_config = Arc::get_mut(&mut server_config.transport).unwrap(); 40 | transport_config.max_concurrent_uni_streams(0_u8.into()); 41 | transport_config.max_idle_timeout(Some(options.max_idle_timeout)); 42 | if options.stateless_retry { 43 | server_config.use_retry(true); 44 | } 45 | 46 | Ok(server_config) 47 | } 48 | 49 | fn load_key>(path: T) -> Result { 50 | let path = path.as_ref(); 51 | let key = fs::read(path).context("failed to read private key")?; 52 | let key = if path.extension().map_or(false, |x| x == "der") { 53 | PrivateKey(key) 54 | } else { 55 | let pkcs8 = pkcs8_private_keys(&mut &*key).context("malformed PKCS #8 private key")?; 56 | match pkcs8.into_iter().next() { 57 | Some(x) => PrivateKey(x), 58 | None => { 59 | let rsa = rsa_private_keys(&mut &*key).context("malformed PKCS #1 private key")?; 60 | match rsa.into_iter().next() { 61 | Some(x) => PrivateKey(x), 62 | None => { 63 | bail!("no private keys found"); 64 | } 65 | } 66 | } 67 | } 68 | }; 69 | 70 | Ok(key) 71 | } 72 | 73 | fn load_certs>(path: T) -> Result> { 74 | let path = path.as_ref(); 75 | let cert_chain = fs::read(path).context("failed to read certificate chain")?; 76 | 77 | let cert_chain = if path.extension().map_or(false, |x| x == "der") { 78 | vec![Certificate(cert_chain)] 79 | } else { 80 | certs(&mut &*cert_chain) 81 | .context("invalid PEM-encoded certificate")? 82 | .into_iter() 83 | .map(Certificate) 84 | .collect() 85 | }; 86 | 87 | Ok(cert_chain) 88 | } 89 | 90 | pub fn read_certs>( 91 | cert_path: T, 92 | key_path: T, 93 | ) -> Result<(Vec, PrivateKey)> { 94 | let certs = load_certs(cert_path)?; 95 | let key = load_key(key_path)?; 96 | Ok((certs, key)) 97 | } 98 | 99 | pub fn load_root_store>(ca_file: T) -> Result { 100 | let ca_file = ca_file.as_ref(); 101 | let mut store = RootCertStore::empty(); 102 | let certs = load_certs(ca_file)?; 103 | store.add_parsable_certificates(&certs); 104 | 105 | if store.is_empty() { 106 | bail!("No valid certs found in file {ca_file:?}"); 107 | } 108 | 109 | Ok(store) 110 | } 111 | 112 | pub fn get_pubkey_from_connection(connection: &Connection) -> Result> { 113 | let peer_identity = connection 114 | .peer_identity() 115 | .context("Unable to read peer identity")?; 116 | 117 | let certs = peer_identity 118 | .downcast_ref::>() 119 | .context("Unable to read cert")?; 120 | 121 | Ok(certs 122 | .first() 123 | .context("Failed to get first certificate")? 124 | .0 125 | .clone()) 126 | } 127 | -------------------------------------------------------------------------------- /server/src/cloud.rs: -------------------------------------------------------------------------------- 1 | use std::{pin::Pin, time::Duration}; 2 | 3 | use anyhow::{anyhow, Context, Result as AnyhowResult}; 4 | use bytes::BytesMut; 5 | use futures::{ 6 | channel::mpsc::{unbounded, UnboundedReceiver, UnboundedSender}, 7 | future, Sink, SinkExt, Stream, StreamExt, 8 | }; 9 | use quinn::Connection; 10 | use selium_protocol::{Frame, MessagePayload, TopicName}; 11 | use selium_std::{ 12 | codecs::BincodeCodec, 13 | errors::{CodecError, Result, SeliumError}, 14 | traits::codec::{MessageDecoder, MessageEncoder}, 15 | }; 16 | use serde::{Deserialize, Serialize}; 17 | use tokio::time::timeout; 18 | 19 | use crate::{ 20 | quic::get_pubkey_from_connection, 21 | server::SharedTopics, 22 | topic::{reqrep, Socket}, 23 | }; 24 | 25 | #[cfg(debug_assertions)] 26 | const PROXY_PUBKEY: &[u8; 415] = include_bytes!("../proxy.debug.der"); 27 | #[cfg(not(debug_assertions))] 28 | const PROXY_PUBKEY: &[u8; 416] = include_bytes!("../proxy.prod.der"); 29 | 30 | #[derive(Serialize)] 31 | pub enum AdminRequest { 32 | _Pad1, 33 | _Pad2, 34 | _Pad3, 35 | _Pad4, 36 | _Pad5, 37 | _Pad6, 38 | GetNamespace(Vec), 39 | _Pad7, 40 | } 41 | 42 | #[derive(Deserialize)] 43 | pub enum AdminResponse { 44 | _Pad1, 45 | _Pad2, 46 | _Pad3, 47 | _Pad4, 48 | GetNamespaceResponse(String), 49 | ServerError(String), 50 | _Pad6, 51 | } 52 | 53 | // XXX This is horrendously inefficient! Caching is needed. 54 | pub async fn do_cloud_auth( 55 | connection: &Connection, 56 | name: &TopicName, 57 | topics: &SharedTopics, 58 | ) -> AnyhowResult<()> { 59 | let pub_key = get_pubkey_from_connection(&connection)?; 60 | 61 | // If this is the proxy, don't do auth 62 | if pub_key == PROXY_PUBKEY { 63 | return Ok(()); 64 | } 65 | 66 | let mut ts = topics.lock().await; 67 | 68 | let proxy_namespace = TopicName::_create_unchecked("selium", "proxy"); 69 | 70 | let namespace = name.namespace(); 71 | 72 | if ts.contains_key(&proxy_namespace) { 73 | let ((si, st), (mut tx, rx)) = channel_pair(); 74 | 75 | let topic_tx = ts.get_mut(&proxy_namespace).unwrap(); 76 | topic_tx 77 | .send(Socket::Reqrep(reqrep::Socket::Client(( 78 | Box::pin(si.sink_map_err(|_| SeliumError::RequestFailed)), 79 | Box::pin(st), 80 | )))) 81 | .await 82 | .context("Failed to add Requestor to proxy topic")?; 83 | 84 | tx.send(AdminRequest::GetNamespace(pub_key)).await?; 85 | let result = timeout(Duration::from_secs(5), rx.into_future()).await; 86 | match result { 87 | Ok((Some(Ok(AdminResponse::GetNamespaceResponse(ns))), _)) if ns == namespace => Ok(()), 88 | Ok((Some(Ok(AdminResponse::GetNamespaceResponse(_))), _)) 89 | | Ok((Some(Ok(AdminResponse::ServerError(_))), _)) => Err(anyhow!("Access denied")), 90 | Ok((Some(Err(e)), _)) => Err(e.into()), 91 | _ => Err(anyhow!("No response from proxy")), 92 | } 93 | } else { 94 | Err(anyhow!("Waiting for proxy to connect - please retry")) 95 | } 96 | } 97 | 98 | fn channel_pair() -> ( 99 | (UnboundedSender, UnboundedReceiver>), 100 | ( 101 | Pin + Send>>, 102 | Pin> + Send>>, 103 | ), 104 | ) { 105 | let (si, rx) = unbounded(); 106 | let (tx, st) = unbounded(); 107 | 108 | let bincode = BincodeCodec::default(); 109 | let tx = tx 110 | .sink_map_err(|_| SeliumError::RequestFailed) 111 | .with(move |item| match bincode.encode(item) { 112 | Ok(msg) => future::ok(Ok(Frame::Message(MessagePayload { 113 | headers: None, 114 | message: msg, 115 | }))), 116 | Err(e) => future::err(SeliumError::Codec(CodecError::EncodeFailure(e))), 117 | }); 118 | 119 | let bincode = BincodeCodec::default(); 120 | let rx = rx.map(move |frame| match frame { 121 | Frame::Message(payload) => { 122 | let mut bytes = BytesMut::new(); 123 | bytes.extend(payload.message); 124 | match bincode.decode(&mut bytes) { 125 | Ok(item) => Ok(item), 126 | Err(e) => Err(SeliumError::Codec(CodecError::DecodeFailure(e))), 127 | } 128 | } 129 | _ => Err(SeliumError::RequestFailed), 130 | }); 131 | 132 | ((si, st), (Box::pin(tx), Box::pin(rx))) 133 | } 134 | --------------------------------------------------------------------------------