├── .github └── workflows │ ├── docs.yml │ └── rust.yml ├── .gitignore ├── Cargo.lock ├── Cargo.toml ├── LICENSE ├── README.md ├── core ├── Cargo.toml └── src │ ├── async_runtime │ ├── executor.rs │ ├── io.rs │ ├── lock.rs │ ├── mod.rs │ ├── net.rs │ ├── spawn.rs │ ├── task.rs │ └── timer.rs │ ├── async_util │ ├── backoff.rs │ ├── condvar.rs │ ├── condwait.rs │ ├── mod.rs │ ├── select.rs │ ├── sleep.rs │ ├── task_group.rs │ └── timeout.rs │ ├── crypto │ ├── key_pair.rs │ └── mod.rs │ ├── error.rs │ ├── event.rs │ ├── lib.rs │ ├── pubsub.rs │ └── util │ ├── decode.rs │ ├── encode.rs │ ├── mod.rs │ └── path.rs ├── jsonrpc ├── Cargo.toml ├── README.md ├── examples │ ├── client.py │ ├── client.rs │ ├── client_custom_codec.rs │ ├── client_derive.rs │ ├── pubsub_client.rs │ ├── pubsub_server.rs │ ├── server.rs │ ├── server_custom_codec.rs │ ├── server_derive.rs │ └── tokio_server │ │ ├── .gitignore │ │ ├── Cargo.toml │ │ └── src │ │ └── main.rs ├── impl │ ├── Cargo.toml │ └── src │ │ └── lib.rs ├── src │ ├── client │ │ ├── builder.rs │ │ ├── message_dispatcher.rs │ │ ├── mod.rs │ │ └── subscriptions.rs │ ├── codec.rs │ ├── error.rs │ ├── lib.rs │ ├── message.rs │ ├── net.rs │ └── server │ │ ├── builder.rs │ │ ├── channel.rs │ │ ├── mod.rs │ │ ├── pubsub_service.rs │ │ ├── response_queue.rs │ │ └── service.rs └── tests │ ├── rpc_impl.rs │ └── rpc_pubsub_impl.rs ├── net ├── Cargo.toml ├── async_rustls │ ├── Cargo.toml │ └── src │ │ └── lib.rs ├── examples │ ├── tcp_codec.rs │ └── tcp_codec_tokio │ │ ├── .gitignore │ │ ├── Cargo.toml │ │ └── src │ │ └── main.rs └── src │ ├── codec │ ├── buffer.rs │ ├── bytes_codec.rs │ ├── length_codec.rs │ ├── mod.rs │ └── websocket.rs │ ├── connection.rs │ ├── endpoint.rs │ ├── error.rs │ ├── lib.rs │ ├── listener.rs │ ├── stream │ ├── mod.rs │ └── websocket.rs │ └── transports │ ├── mod.rs │ ├── tcp.rs │ ├── tls.rs │ ├── udp.rs │ ├── unix.rs │ └── ws.rs └── p2p ├── Cargo.toml ├── README.md ├── examples ├── chat.rs ├── chat_simulation.sh ├── net_simulation.sh ├── peer.rs ├── shared │ └── mod.rs └── tokio-example │ ├── .gitignore │ ├── Cargo.toml │ └── src │ └── main.rs └── src ├── backend.rs ├── codec.rs ├── config.rs ├── conn_queue.rs ├── connection.rs ├── connector.rs ├── discovery ├── lookup.rs ├── mod.rs └── refresh.rs ├── error.rs ├── lib.rs ├── listener.rs ├── message.rs ├── monitor ├── event.rs └── mod.rs ├── peer ├── mod.rs └── peer_id.rs ├── peer_pool.rs ├── protocol.rs ├── protocols ├── handshake.rs ├── mod.rs └── ping.rs ├── routing_table ├── bucket.rs ├── entry.rs └── mod.rs ├── slots.rs ├── tls_config.rs └── version.rs /.github/workflows/docs.yml: -------------------------------------------------------------------------------- 1 | name: Build and Deploy Rust API Docs 2 | 3 | on: 4 | push: 5 | branches: 6 | - master 7 | 8 | jobs: 9 | docs: 10 | runs-on: ubuntu-latest 11 | permissions: 12 | contents: write 13 | 14 | steps: 15 | - name: Checkout sources 16 | uses: actions/checkout@v2 17 | with: 18 | submodules: recursive 19 | 20 | - name: Get date for registry cache 21 | id: date 22 | run: echo "::set-output name=date::$(date +'%Y-%m-%d')" 23 | 24 | - name: Cargo registry cache 25 | uses: actions/cache@v4 26 | with: 27 | path: | 28 | ~/.cargo/registry/index 29 | ~/.cargo/registry/cache 30 | ~/.cargo/git 31 | key: ${{ runner.os }}-cargo-registry-${{ steps.date.outputs.date }} 32 | 33 | - name: Rust toolchain 34 | uses: actions-rs/toolchain@v1 35 | with: 36 | toolchain: stable 37 | override: true 38 | 39 | - name: Build Documentation 40 | uses: actions-rs/cargo@v1 41 | with: 42 | command: doc 43 | args: --no-deps --all --document-private-items 44 | 45 | - name: Deploy 46 | uses: peaceiris/actions-gh-pages@v3 47 | with: 48 | github_token: ${{ secrets.GITHUB_TOKEN }} 49 | publish_dir: ./target/doc 50 | publish_branch: gh-pages 51 | force_orphan: true 52 | -------------------------------------------------------------------------------- /.github/workflows/rust.yml: -------------------------------------------------------------------------------- 1 | name: Cargo Build & Test 2 | 3 | on: 4 | push: 5 | pull_request: 6 | 7 | env: 8 | CARGO_TERM_COLOR: always 9 | 10 | jobs: 11 | build_and_test: 12 | name: karyon - latest 13 | runs-on: ubuntu-latest 14 | strategy: 15 | matrix: 16 | toolchain: 17 | - stable 18 | steps: 19 | - uses: actions/checkout@v3 20 | - run: rustup update ${{ matrix.toolchain }} && rustup default ${{ matrix.toolchain }} 21 | - name: Build with smol 22 | run: cargo build --workspace --verbose 23 | - name: Build with tokio 24 | run: cargo build --workspace --no-default-features --features tokio --verbose 25 | - name: Run tests 26 | run: cargo test --workspace --verbose 27 | - name: Run clippy 28 | run: cargo clippy -- -D warnings 29 | - name: Run fmt 30 | run: cargo fmt --all -- --check 31 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [workspace] 2 | resolver = "2" 3 | 4 | # Please ensure that each crate comes before any other crate that depends on it 5 | members = ["core", "net", "net/async_rustls", "jsonrpc", "jsonrpc/impl", "p2p"] 6 | 7 | [workspace.package] 8 | description = "A library for building p2p, decentralized, and collaborative software" 9 | version = "0.3.2" 10 | edition = "2021" 11 | homepage = "https://karyontech.net" 12 | repository = "https://github.com/karyontech/karyon" 13 | license = "GPL-3.0" 14 | authors = ["hozan23 "] 15 | 16 | [workspace.dependencies] 17 | karyon_core = { path = "core", version = "0.3.2", default-features = false } 18 | 19 | karyon_net = { path = "net", version = "0.3.2", default-features = false } 20 | karyon_async_rustls = { path = "net/async_rustls", version = "0.3.2", default-features = false } 21 | 22 | karyon_jsonrpc = { path = "jsonrpc", version = "0.3.2", default-features = false } 23 | karyon_jsonrpc_macro = { path = "jsonrpc/impl", version = "0.3.2", default-features = false } 24 | 25 | karyon_p2p = { path = "p2p", version = "0.3.2", default-features = false } 26 | 27 | log = "0.4" 28 | thiserror = "2.0" 29 | chrono = "0.4" 30 | rand = "0.8" 31 | url = "2.5" 32 | parking_lot = "0.12" 33 | once_cell = "1.21" 34 | semver = "1.0" 35 | sha2 = "0.10" 36 | dirs = "6.0" 37 | ed25519-dalek = "2.1" 38 | 39 | 40 | # async 41 | async-channel = "2.3" 42 | async-trait = "0.1" 43 | pin-project-lite = "0.2" 44 | async-process = "2.3" 45 | smol = "2.0" 46 | tokio = "1.45" 47 | futures-util = { version = "0.3", default-features = false } 48 | 49 | # encode 50 | bincode = "2.0" 51 | serde = "1.0" 52 | serde_json = "1.0" 53 | base64 = "0.22" 54 | 55 | # macros 56 | proc-macro2 = "1.0" 57 | quote = "1.0" 58 | syn = "2.0" 59 | 60 | # websocket 61 | async-tungstenite = { version = "0.29", default-features = false } 62 | 63 | # tls 64 | rustls-pki-types = "1.12" 65 | futures-rustls = "0.26" 66 | tokio-rustls = "0.26" 67 | rcgen = "0.13" 68 | yasna = "0.5" 69 | x509-parser = "0.17" 70 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Karyon 2 | 3 | [![Build](https://github.com/karyontech/karyon/actions/workflows/rust.yml/badge.svg)](https://github.com/karyontech/karyon/actions) 4 | [![License](https://img.shields.io/crates/l/karyon_core)](https://github.com/karyontech/karyon/blob/master/LICENSE) 5 | 6 | - [![karyon_jsonrpc crates.io](https://img.shields.io/crates/v/karyon_jsonrpc?label=karyon_jsonrpc%20crates.io)](https://crates.io/crates/karyon_jsonrpc) 7 | - [![karyon_jsonrpc docs.rs](https://img.shields.io/docsrs/karyon_jsonrpc?label=karyon_jsonrpc%20docs.rs)](https://docs.rs/karyon_jsonrpc/latest/karyon_jsonrpc/) 8 | 9 | A library for building p2p, decentralized, and collaborative software 10 | 11 | [Website](https://karyontech.net/) | [Discord](https://discord.gg/xuXRcrkz3p) | [irc](https://libera.chat/) #karyon on liberachat 12 | 13 | > In molecular biology, a Karyon is essentially "a part of the cell 14 | > containing DNA and RNA and responsible for growth and reproduction" 15 | 16 | ## Overview 17 | 18 | Building peer-to-peer (p2p), decentralized applications that are resilient, 19 | secure, and free from central control is a challenge for developers. There are 20 | not many libraries and tools available to build these applications. As 21 | a result, many developers either abandon their ideas or have to develop a new 22 | p2p network stack and tools from scratch. Instead of sharing common components 23 | and tools for building p2p systems, every p2p project seems to reinvent the 24 | wheel, which increases the effort required and the potential for vulnerabilities. 25 | 26 | Karyon addresses this issue by providing developers with the components and 27 | tools needed to create p2p and decentralized apps, simplifying the complexities 28 | associated with building them. its primary goal is to make decentralization 29 | more accessible and efficient for developers everywhere. 30 | 31 | ## Crates 32 | 33 | - **[karyon core](./core)**: Essential utilities and core functionality. 34 | - **[karyon net](./net)**: Provides a network interface for TCP, UDP, TLS, WebSocket, and Unix, 35 | along with common network functionality. 36 | - **[karyon p2p](./p2p)**: A lightweight, extensible, and customizable 37 | peer-to-peer (p2p) network stack. 38 | - **[karyon jsonrpc](./jsonrpc)**: A fast and lightweight async 39 | [JSONRPC2.0](https://www.jsonrpc.org/specification) implementation. 40 | - **karyon crdt**: A [CRDT](https://en.wikipedia.org/wiki/Conflict-free_replicated_data_type) 41 | implementation for building collaborative software. 42 | - **karyon base**: A lightweight, extensible database that operates with **karyon crdt**. 43 | 44 | ## Choosing the async runtime 45 | 46 | All the crates support both **smol(async-std)** and **tokio** async runtimes. 47 | The default is **smol**, but if you want to use **tokio**, you need to disable 48 | the default features and then select the `tokio` feature. 49 | 50 | ## Docs 51 | 52 | Online documentation for the main crates: 53 | - [karyon_p2p](https://karyontech.github.io/karyon/karyon_p2p), 54 | - [karyon_jsonrpc](https://karyontech.github.io/karyon/karyon_jsonrpc) 55 | 56 | For the internal crates: 57 | - [karyon_core](https://karyontech.github.io/karyon/karyon_core), 58 | - [karyon_net](https://karyontech.github.io/karyon/karyon_net) 59 | 60 | ## Status 61 | 62 | This project is a work in progress. The current focus is on shipping `karyon 63 | crdt` and `karyon base`, along with major changes to the network stack. You can 64 | check the [issues](https://github.com/karyontech/karyon/issues) for updates on 65 | ongoing tasks. 66 | 67 | ## Contribution 68 | 69 | Feel free to open a pull request or an [issue](https://github.com/karyontech/karyon/issues/new). 70 | 71 | ## License 72 | 73 | All the code in this repository is licensed under the GNU General Public 74 | License, version 3 (GPL-3.0). You can find a copy of the license in the 75 | [LICENSE](./LICENSE) file. 76 | -------------------------------------------------------------------------------- /core/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "karyon_core" 3 | description = "Internal crate for Karyon library." 4 | version.workspace = true 5 | edition.workspace = true 6 | homepage.workspace = true 7 | repository.workspace = true 8 | license.workspace = true 9 | authors.workspace = true 10 | 11 | [features] 12 | default = ["smol"] 13 | crypto = ["ed25519-dalek"] 14 | tokio = ["dep:tokio"] 15 | smol = ["dep:smol", "async-process"] 16 | 17 | [dependencies] 18 | log = { workspace = true } 19 | thiserror = { workspace = true } 20 | chrono = { workspace = true } 21 | rand = { workspace = true } 22 | parking_lot = { workspace = true } 23 | once_cell = { workspace = true } 24 | bincode = { workspace = true } 25 | dirs = { workspace = true } 26 | 27 | async-channel = { workspace = true } 28 | pin-project-lite = { workspace = true } 29 | async-process = { workspace = true, optional = true } 30 | smol = { workspace = true, optional = true } 31 | tokio = { workspace = true, features = ["full"], optional = true } 32 | futures-util = { workspace = true, features = [ 33 | "alloc", 34 | ], default-features = false } 35 | 36 | 37 | ed25519-dalek = { workspace = true, features = ["rand_core"], optional = true } 38 | -------------------------------------------------------------------------------- /core/src/async_runtime/executor.rs: -------------------------------------------------------------------------------- 1 | use std::{future::Future, panic::catch_unwind, sync::Arc, thread}; 2 | 3 | use once_cell::sync::OnceCell; 4 | 5 | #[cfg(feature = "smol")] 6 | pub use smol::Executor as SmolEx; 7 | 8 | #[cfg(feature = "tokio")] 9 | pub use tokio::runtime::Runtime; 10 | 11 | use super::Task; 12 | 13 | #[derive(Clone)] 14 | pub struct Executor { 15 | #[cfg(feature = "smol")] 16 | inner: Arc>, 17 | #[cfg(feature = "tokio")] 18 | inner: Arc, 19 | } 20 | 21 | impl Executor { 22 | pub fn spawn( 23 | &self, 24 | future: impl Future + Send + 'static, 25 | ) -> Task { 26 | self.inner.spawn(future).into() 27 | } 28 | 29 | #[cfg(feature = "tokio")] 30 | pub fn handle(&self) -> &tokio::runtime::Handle { 31 | self.inner.handle() 32 | } 33 | } 34 | 35 | static GLOBAL_EXECUTOR: OnceCell = OnceCell::new(); 36 | 37 | /// Returns a single-threaded global executor 38 | pub fn global_executor() -> Executor { 39 | #[cfg(feature = "smol")] 40 | fn init_executor() -> Executor { 41 | let ex = smol::Executor::new(); 42 | thread::Builder::new() 43 | .name("smol-executor".to_string()) 44 | .spawn(|| loop { 45 | catch_unwind(|| { 46 | smol::block_on(global_executor().inner.run(std::future::pending::<()>())) 47 | }) 48 | .ok(); 49 | }) 50 | .expect("cannot spawn executor thread"); 51 | // Prevent spawning another thread by running the process driver on this 52 | // thread. see https://github.com/smol-rs/smol/blob/master/src/spawn.rs 53 | ex.spawn(async_process::driver()).detach(); 54 | Executor { 55 | inner: Arc::new(ex), 56 | } 57 | } 58 | 59 | #[cfg(feature = "tokio")] 60 | fn init_executor() -> Executor { 61 | let ex = Arc::new(tokio::runtime::Runtime::new().expect("cannot build tokio runtime")); 62 | thread::Builder::new() 63 | .name("tokio-executor".to_string()) 64 | .spawn({ 65 | let ex = ex.clone(); 66 | move || { 67 | catch_unwind(|| ex.block_on(std::future::pending::<()>())).ok(); 68 | } 69 | }) 70 | .expect("cannot spawn tokio runtime thread"); 71 | Executor { inner: ex } 72 | } 73 | 74 | GLOBAL_EXECUTOR.get_or_init(init_executor).clone() 75 | } 76 | 77 | #[cfg(feature = "smol")] 78 | impl From>> for Executor { 79 | fn from(ex: Arc>) -> Executor { 80 | Executor { inner: ex } 81 | } 82 | } 83 | 84 | #[cfg(feature = "tokio")] 85 | impl From> for Executor { 86 | fn from(rt: Arc) -> Executor { 87 | Executor { inner: rt } 88 | } 89 | } 90 | 91 | #[cfg(feature = "smol")] 92 | impl From> for Executor { 93 | fn from(ex: smol::Executor<'static>) -> Executor { 94 | Executor { 95 | inner: Arc::new(ex), 96 | } 97 | } 98 | } 99 | 100 | #[cfg(feature = "tokio")] 101 | impl From for Executor { 102 | fn from(rt: tokio::runtime::Runtime) -> Executor { 103 | Executor { 104 | inner: Arc::new(rt), 105 | } 106 | } 107 | } 108 | -------------------------------------------------------------------------------- /core/src/async_runtime/io.rs: -------------------------------------------------------------------------------- 1 | #[cfg(feature = "smol")] 2 | pub use smol::io::{ 3 | split, AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt, ReadHalf, WriteHalf, 4 | }; 5 | 6 | #[cfg(feature = "tokio")] 7 | pub use tokio::io::{ 8 | split, AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt, ReadHalf, WriteHalf, 9 | }; 10 | -------------------------------------------------------------------------------- /core/src/async_runtime/lock.rs: -------------------------------------------------------------------------------- 1 | #[cfg(feature = "smol")] 2 | pub use smol::lock::{Mutex, MutexGuard, OnceCell, RwLock}; 3 | 4 | #[cfg(feature = "tokio")] 5 | pub use tokio::sync::{Mutex, MutexGuard, OnceCell, RwLock}; 6 | -------------------------------------------------------------------------------- /core/src/async_runtime/mod.rs: -------------------------------------------------------------------------------- 1 | mod executor; 2 | pub mod io; 3 | pub mod lock; 4 | pub mod net; 5 | mod spawn; 6 | mod task; 7 | mod timer; 8 | 9 | pub use executor::{global_executor, Executor}; 10 | pub use spawn::spawn; 11 | pub use task::Task; 12 | 13 | #[cfg(test)] 14 | pub fn block_on(future: impl std::future::Future) -> T { 15 | #[cfg(feature = "smol")] 16 | let result = smol::block_on(future); 17 | #[cfg(feature = "tokio")] 18 | let result = tokio::runtime::Builder::new_current_thread() 19 | .enable_all() 20 | .build() 21 | .unwrap() 22 | .block_on(future); 23 | 24 | result 25 | } 26 | -------------------------------------------------------------------------------- /core/src/async_runtime/net.rs: -------------------------------------------------------------------------------- 1 | #[cfg(target_family = "unix")] 2 | pub use std::os::unix::net::SocketAddr; 3 | 4 | #[cfg(all(feature = "smol", target_family = "unix"))] 5 | pub use smol::net::unix::{SocketAddr as UnixSocketAddr, UnixListener, UnixStream}; 6 | #[cfg(feature = "smol")] 7 | pub use smol::net::{TcpListener, TcpStream, UdpSocket}; 8 | 9 | #[cfg(all(feature = "tokio", target_family = "unix"))] 10 | pub use tokio::net::{unix::SocketAddr as UnixSocketAddr, UnixListener, UnixStream}; 11 | #[cfg(feature = "tokio")] 12 | pub use tokio::net::{TcpListener, TcpStream, UdpSocket}; 13 | -------------------------------------------------------------------------------- /core/src/async_runtime/spawn.rs: -------------------------------------------------------------------------------- 1 | use std::future::Future; 2 | 3 | use super::Task; 4 | 5 | pub fn spawn(future: impl Future + Send + 'static) -> Task { 6 | #[cfg(feature = "smol")] 7 | let result: Task = smol::spawn(future).into(); 8 | #[cfg(feature = "tokio")] 9 | let result: Task = tokio::spawn(future).into(); 10 | 11 | result 12 | } 13 | -------------------------------------------------------------------------------- /core/src/async_runtime/task.rs: -------------------------------------------------------------------------------- 1 | use std::future::Future; 2 | use std::pin::Pin; 3 | use std::task::{Context, Poll}; 4 | 5 | use crate::error::Error; 6 | 7 | pub struct Task { 8 | #[cfg(feature = "smol")] 9 | inner_task: smol::Task, 10 | #[cfg(feature = "tokio")] 11 | inner_task: tokio::task::JoinHandle, 12 | } 13 | 14 | impl Task { 15 | pub async fn cancel(self) { 16 | #[cfg(feature = "smol")] 17 | self.inner_task.cancel().await; 18 | #[cfg(feature = "tokio")] 19 | self.inner_task.abort(); 20 | } 21 | } 22 | 23 | impl Future for Task { 24 | type Output = Result; 25 | 26 | fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { 27 | #[cfg(feature = "smol")] 28 | let result = smol::Task::poll(Pin::new(&mut self.inner_task), cx); 29 | #[cfg(feature = "tokio")] 30 | let result = tokio::task::JoinHandle::poll(Pin::new(&mut self.inner_task), cx); 31 | 32 | #[cfg(feature = "smol")] 33 | return result.map(Ok); 34 | 35 | #[cfg(feature = "tokio")] 36 | return result.map_err(|e| e.into()); 37 | } 38 | } 39 | 40 | #[cfg(feature = "smol")] 41 | impl From> for Task { 42 | fn from(t: smol::Task) -> Task { 43 | Task { inner_task: t } 44 | } 45 | } 46 | 47 | #[cfg(feature = "tokio")] 48 | impl From> for Task { 49 | fn from(t: tokio::task::JoinHandle) -> Task { 50 | Task { inner_task: t } 51 | } 52 | } 53 | -------------------------------------------------------------------------------- /core/src/async_runtime/timer.rs: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /core/src/async_util/backoff.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | cmp::min, 3 | sync::atomic::{AtomicBool, AtomicU32, Ordering}, 4 | time::Duration, 5 | }; 6 | 7 | use super::sleep; 8 | 9 | /// Exponential backoff 10 | /// 11 | /// 12 | /// # Examples 13 | /// 14 | /// ``` 15 | /// use karyon_core::async_util::Backoff; 16 | /// 17 | /// async { 18 | /// let backoff = Backoff::new(300, 3000); 19 | /// 20 | /// loop { 21 | /// backoff.sleep().await; 22 | /// 23 | /// // do something 24 | /// break; 25 | /// } 26 | /// 27 | /// backoff.reset(); 28 | /// 29 | /// // .... 30 | /// }; 31 | /// 32 | /// ``` 33 | /// 34 | pub struct Backoff { 35 | /// The base delay in milliseconds for the initial retry. 36 | base_delay: u64, 37 | /// The max delay in milliseconds allowed for a retry. 38 | max_delay: u64, 39 | /// Atomic counter 40 | retries: AtomicU32, 41 | /// Stop flag 42 | stop: AtomicBool, 43 | } 44 | 45 | impl Backoff { 46 | /// Creates a new Backoff. 47 | pub fn new(base_delay: u64, max_delay: u64) -> Self { 48 | Self { 49 | base_delay, 50 | max_delay, 51 | retries: AtomicU32::new(0), 52 | stop: AtomicBool::new(false), 53 | } 54 | } 55 | 56 | /// Sleep based on the current retry count and delay values. 57 | /// Retruns the delay value. 58 | pub async fn sleep(&self) -> u64 { 59 | if self.stop.load(Ordering::SeqCst) { 60 | sleep(Duration::from_millis(self.max_delay)).await; 61 | return self.max_delay; 62 | } 63 | 64 | let retries = self.retries.load(Ordering::SeqCst); 65 | let delay = self.base_delay * (2_u64).pow(retries); 66 | let delay = min(delay, self.max_delay); 67 | 68 | if delay == self.max_delay { 69 | self.stop.store(true, Ordering::SeqCst); 70 | } 71 | 72 | self.retries.store(retries + 1, Ordering::SeqCst); 73 | 74 | sleep(Duration::from_millis(delay)).await; 75 | delay 76 | } 77 | 78 | /// Reset the retry counter to 0. 79 | pub fn reset(&self) { 80 | self.retries.store(0, Ordering::SeqCst); 81 | self.stop.store(false, Ordering::SeqCst); 82 | } 83 | } 84 | 85 | #[cfg(test)] 86 | mod tests { 87 | use std::sync::Arc; 88 | 89 | use crate::async_runtime::{block_on, spawn}; 90 | 91 | use super::*; 92 | 93 | #[test] 94 | fn test_backoff() { 95 | block_on(async move { 96 | let backoff = Arc::new(Backoff::new(5, 15)); 97 | let backoff_c = backoff.clone(); 98 | spawn(async move { 99 | let delay = backoff_c.sleep().await; 100 | assert_eq!(delay, 5); 101 | 102 | let delay = backoff_c.sleep().await; 103 | assert_eq!(delay, 10); 104 | 105 | let delay = backoff_c.sleep().await; 106 | assert_eq!(delay, 15); 107 | }) 108 | .await 109 | .unwrap(); 110 | 111 | spawn(async move { 112 | backoff.reset(); 113 | let delay = backoff.sleep().await; 114 | assert_eq!(delay, 5); 115 | }) 116 | .await 117 | .unwrap(); 118 | }); 119 | } 120 | } 121 | -------------------------------------------------------------------------------- /core/src/async_util/condwait.rs: -------------------------------------------------------------------------------- 1 | use super::CondVar; 2 | use crate::async_runtime::lock::Mutex; 3 | 4 | /// CondWait is a wrapper struct for CondVar with a Mutex boolean flag. 5 | /// 6 | /// # Example 7 | /// 8 | ///``` 9 | /// use std::sync::Arc; 10 | /// 11 | /// use karyon_core::async_util::CondWait; 12 | /// use karyon_core::async_runtime::spawn; 13 | /// 14 | /// async { 15 | /// let cond_wait = Arc::new(CondWait::new()); 16 | /// let task = spawn({ 17 | /// let cond_wait = cond_wait.clone(); 18 | /// async move { 19 | /// cond_wait.wait().await; 20 | /// // ... 21 | /// } 22 | /// }); 23 | /// 24 | /// cond_wait.signal().await; 25 | /// }; 26 | /// 27 | /// ``` 28 | /// 29 | pub struct CondWait { 30 | /// The CondVar 31 | condvar: CondVar, 32 | /// Boolean flag 33 | w: Mutex, 34 | } 35 | 36 | impl CondWait { 37 | /// Creates a new CondWait. 38 | pub fn new() -> Self { 39 | Self { 40 | condvar: CondVar::new(), 41 | w: Mutex::new(false), 42 | } 43 | } 44 | 45 | /// Waits for a signal or broadcast. 46 | pub async fn wait(&self) { 47 | let mut w = self.w.lock().await; 48 | 49 | // While the boolean flag is false, wait for a signal. 50 | while !*w { 51 | w = self.condvar.wait(w).await; 52 | } 53 | } 54 | 55 | /// Signal a waiting task. 56 | pub async fn signal(&self) { 57 | *self.w.lock().await = true; 58 | self.condvar.signal(); 59 | } 60 | 61 | /// Signal all waiting tasks. 62 | pub async fn broadcast(&self) { 63 | *self.w.lock().await = true; 64 | self.condvar.broadcast(); 65 | } 66 | 67 | /// Reset the boolean flag value to false. 68 | pub async fn reset(&self) { 69 | *self.w.lock().await = false; 70 | } 71 | } 72 | 73 | impl Default for CondWait { 74 | fn default() -> Self { 75 | Self::new() 76 | } 77 | } 78 | 79 | #[cfg(test)] 80 | mod tests { 81 | use std::sync::{ 82 | atomic::{AtomicUsize, Ordering}, 83 | Arc, 84 | }; 85 | 86 | use crate::async_runtime::{block_on, spawn}; 87 | 88 | use super::*; 89 | 90 | #[test] 91 | fn test_cond_wait() { 92 | block_on(async { 93 | let cond_wait = Arc::new(CondWait::new()); 94 | let count = Arc::new(AtomicUsize::new(0)); 95 | 96 | let task = spawn({ 97 | let cond_wait = cond_wait.clone(); 98 | let count = count.clone(); 99 | async move { 100 | cond_wait.wait().await; 101 | count.fetch_add(1, Ordering::Relaxed); 102 | // do something 103 | } 104 | }); 105 | 106 | // Send a signal to the waiting task 107 | cond_wait.signal().await; 108 | 109 | let _ = task.await; 110 | 111 | // Reset the boolean flag 112 | cond_wait.reset().await; 113 | 114 | assert_eq!(count.load(Ordering::Relaxed), 1); 115 | 116 | let task1 = spawn({ 117 | let cond_wait = cond_wait.clone(); 118 | let count = count.clone(); 119 | async move { 120 | cond_wait.wait().await; 121 | count.fetch_add(1, Ordering::Relaxed); 122 | // do something 123 | } 124 | }); 125 | 126 | let task2 = spawn({ 127 | let cond_wait = cond_wait.clone(); 128 | let count = count.clone(); 129 | async move { 130 | cond_wait.wait().await; 131 | count.fetch_add(1, Ordering::Relaxed); 132 | // do something 133 | } 134 | }); 135 | 136 | // Broadcast a signal to all waiting tasks 137 | cond_wait.broadcast().await; 138 | 139 | let _ = task1.await; 140 | let _ = task2.await; 141 | assert_eq!(count.load(Ordering::Relaxed), 3); 142 | }); 143 | } 144 | } 145 | -------------------------------------------------------------------------------- /core/src/async_util/mod.rs: -------------------------------------------------------------------------------- 1 | mod backoff; 2 | mod condvar; 3 | mod condwait; 4 | mod select; 5 | mod sleep; 6 | mod task_group; 7 | mod timeout; 8 | 9 | pub use backoff::Backoff; 10 | pub use condvar::CondVar; 11 | pub use condwait::CondWait; 12 | pub use select::{select, Either}; 13 | pub use sleep::sleep; 14 | pub use task_group::{TaskGroup, TaskResult}; 15 | pub use timeout::timeout; 16 | -------------------------------------------------------------------------------- /core/src/async_util/select.rs: -------------------------------------------------------------------------------- 1 | use std::future::Future; 2 | use std::pin::Pin; 3 | use std::task::{Context, Poll}; 4 | 5 | use pin_project_lite::pin_project; 6 | 7 | /// Returns the result of the future that completes first, preferring future1 8 | /// if both are ready. 9 | /// 10 | /// # Examples 11 | /// 12 | /// ``` 13 | /// use std::future; 14 | /// 15 | /// use karyon_core::async_util::{select, Either}; 16 | /// 17 | /// async { 18 | /// let fut1 = future::pending::(); 19 | /// let fut2 = future::ready(0); 20 | /// let res = select(fut1, fut2).await; 21 | /// assert!(matches!(res, Either::Right(0))); 22 | /// // .... 23 | /// }; 24 | /// 25 | /// ``` 26 | /// 27 | pub fn select(future1: F1, future2: F2) -> Select 28 | where 29 | F1: Future, 30 | F2: Future, 31 | { 32 | Select { future1, future2 } 33 | } 34 | 35 | pin_project! { 36 | #[derive(Debug)] 37 | pub struct Select { 38 | #[pin] 39 | future1: F1, 40 | #[pin] 41 | future2: F2, 42 | } 43 | } 44 | 45 | /// The return value from the [`select`] function, indicating which future 46 | /// completed first. 47 | #[derive(Debug)] 48 | pub enum Either { 49 | Left(T1), 50 | Right(T2), 51 | } 52 | 53 | // Implement the Future trait for the Select struct. 54 | impl Future for Select 55 | where 56 | F1: Future, 57 | F2: Future, 58 | { 59 | type Output = Either; 60 | 61 | fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { 62 | let this = self.project(); 63 | 64 | if let Poll::Ready(t) = this.future1.poll(cx) { 65 | return Poll::Ready(Either::Left(t)); 66 | } 67 | 68 | if let Poll::Ready(t) = this.future2.poll(cx) { 69 | return Poll::Ready(Either::Right(t)); 70 | } 71 | 72 | Poll::Pending 73 | } 74 | } 75 | 76 | #[cfg(test)] 77 | mod tests { 78 | use std::future; 79 | 80 | use crate::{async_runtime::block_on, async_util::sleep}; 81 | 82 | use super::{select, Either}; 83 | 84 | #[test] 85 | fn test_async_select() { 86 | block_on(async move { 87 | let fut = select(sleep(std::time::Duration::MAX), future::ready(0 as u32)).await; 88 | assert!(matches!(fut, Either::Right(0))); 89 | 90 | let fut1 = future::pending::(); 91 | let fut2 = future::ready(0); 92 | let res = select(fut1, fut2).await; 93 | assert!(matches!(res, Either::Right(0))); 94 | 95 | let fut1 = future::ready(0); 96 | let fut2 = future::pending::(); 97 | let res = select(fut1, fut2).await; 98 | assert!(matches!(res, Either::Left(_))); 99 | }); 100 | } 101 | } 102 | -------------------------------------------------------------------------------- /core/src/async_util/sleep.rs: -------------------------------------------------------------------------------- 1 | pub async fn sleep(duration: std::time::Duration) { 2 | #[cfg(feature = "smol")] 3 | smol::Timer::after(duration).await; 4 | #[cfg(feature = "tokio")] 5 | tokio::time::sleep(duration).await; 6 | } 7 | -------------------------------------------------------------------------------- /core/src/async_util/timeout.rs: -------------------------------------------------------------------------------- 1 | use std::{future::Future, time::Duration}; 2 | 3 | use crate::{error::Error, Result}; 4 | 5 | use super::{select, sleep, Either}; 6 | 7 | /// Waits for a future to complete or times out if it exceeds a specified 8 | /// duration. 9 | /// 10 | /// # Example 11 | /// 12 | /// ``` 13 | /// use std::{future, time::Duration}; 14 | /// 15 | /// use karyon_core::async_util::timeout; 16 | /// 17 | /// async { 18 | /// let fut = future::pending::<()>(); 19 | /// assert!(timeout(Duration::from_millis(100), fut).await.is_err()); 20 | /// }; 21 | /// 22 | /// ``` 23 | /// 24 | pub async fn timeout(delay: Duration, future1: F) -> Result 25 | where 26 | F: Future, 27 | { 28 | let result = select(sleep(delay), future1).await; 29 | 30 | match result { 31 | Either::Left(_) => Err(Error::Timeout), 32 | Either::Right(res) => Ok(res), 33 | } 34 | } 35 | 36 | #[cfg(test)] 37 | mod tests { 38 | use super::*; 39 | use std::{future, time::Duration}; 40 | 41 | #[test] 42 | fn test_timeout() { 43 | crate::async_runtime::block_on(async move { 44 | let fut = future::pending::<()>(); 45 | assert!(timeout(Duration::from_millis(10), fut).await.is_err()); 46 | 47 | let fut = sleep(Duration::from_millis(10)); 48 | assert!(timeout(Duration::from_millis(50), fut).await.is_ok()) 49 | }); 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /core/src/crypto/key_pair.rs: -------------------------------------------------------------------------------- 1 | use ed25519_dalek::{Signer as _, Verifier as _}; 2 | use rand::rngs::OsRng; 3 | 4 | use crate::{error::Error, Result}; 5 | 6 | /// key cryptography type 7 | pub enum KeyPairType { 8 | Ed25519, 9 | } 10 | 11 | /// A Secret key 12 | pub struct SecretKey(pub Vec); 13 | 14 | #[derive(Clone)] 15 | pub enum KeyPair { 16 | Ed25519(Ed25519KeyPair), 17 | } 18 | 19 | impl KeyPair { 20 | /// Generate a new random keypair. 21 | pub fn generate(kp_type: &KeyPairType) -> Self { 22 | match kp_type { 23 | KeyPairType::Ed25519 => Self::Ed25519(Ed25519KeyPair::generate()), 24 | } 25 | } 26 | 27 | /// Sign a message using the private key. 28 | pub fn sign(&self, msg: &[u8]) -> Vec { 29 | match self { 30 | KeyPair::Ed25519(kp) => kp.sign(msg), 31 | } 32 | } 33 | 34 | /// Get the public key of this keypair. 35 | pub fn public(&self) -> PublicKey { 36 | match self { 37 | KeyPair::Ed25519(kp) => kp.public(), 38 | } 39 | } 40 | 41 | /// Get the secret key of this keypair. 42 | pub fn secret(&self) -> SecretKey { 43 | match self { 44 | KeyPair::Ed25519(kp) => kp.secret(), 45 | } 46 | } 47 | } 48 | 49 | /// An extension trait, adding essential methods to all [`KeyPair`] types. 50 | trait KeyPairExt { 51 | /// Sign a message using the private key. 52 | fn sign(&self, msg: &[u8]) -> Vec; 53 | 54 | /// Get the public key of this keypair. 55 | fn public(&self) -> PublicKey; 56 | 57 | /// Get the secret key of this keypair. 58 | fn secret(&self) -> SecretKey; 59 | } 60 | 61 | #[derive(Clone)] 62 | pub struct Ed25519KeyPair(ed25519_dalek::SigningKey); 63 | 64 | impl Ed25519KeyPair { 65 | fn generate() -> Self { 66 | Self(ed25519_dalek::SigningKey::generate(&mut OsRng)) 67 | } 68 | } 69 | 70 | impl KeyPairExt for Ed25519KeyPair { 71 | fn sign(&self, msg: &[u8]) -> Vec { 72 | self.0.sign(msg).to_bytes().to_vec() 73 | } 74 | 75 | fn public(&self) -> PublicKey { 76 | PublicKey::Ed25519(Ed25519PublicKey(self.0.verifying_key())) 77 | } 78 | 79 | fn secret(&self) -> SecretKey { 80 | SecretKey(self.0.to_bytes().to_vec()) 81 | } 82 | } 83 | 84 | #[derive(Debug)] 85 | pub enum PublicKey { 86 | Ed25519(Ed25519PublicKey), 87 | } 88 | 89 | impl PublicKey { 90 | pub fn from_bytes(kp_type: &KeyPairType, pk: &[u8]) -> Result { 91 | match kp_type { 92 | KeyPairType::Ed25519 => Ok(Self::Ed25519(Ed25519PublicKey::from_bytes(pk)?)), 93 | } 94 | } 95 | 96 | pub fn as_bytes(&self) -> &[u8] { 97 | match self { 98 | Self::Ed25519(pk) => pk.as_bytes(), 99 | } 100 | } 101 | 102 | /// Verify a signature on a message with this public key. 103 | pub fn verify(&self, msg: &[u8], signature: &[u8]) -> Result<()> { 104 | match self { 105 | Self::Ed25519(pk) => pk.verify(msg, signature), 106 | } 107 | } 108 | } 109 | 110 | /// An extension trait, adding essential methods to all [`PublicKey`] types. 111 | trait PublicKeyExt { 112 | fn as_bytes(&self) -> &[u8]; 113 | 114 | /// Verify a signature on a message with this public key. 115 | fn verify(&self, msg: &[u8], signature: &[u8]) -> Result<()>; 116 | } 117 | 118 | #[derive(Debug)] 119 | pub struct Ed25519PublicKey(ed25519_dalek::VerifyingKey); 120 | 121 | impl Ed25519PublicKey { 122 | pub fn from_bytes(pk: &[u8]) -> Result { 123 | let pk_bytes: [u8; 32] = pk 124 | .try_into() 125 | .map_err(|_| Error::TryInto("Failed to convert slice to [u8; 32]".to_string()))?; 126 | 127 | Ok(Self(ed25519_dalek::VerifyingKey::from_bytes(&pk_bytes)?)) 128 | } 129 | } 130 | 131 | impl PublicKeyExt for Ed25519PublicKey { 132 | fn as_bytes(&self) -> &[u8] { 133 | self.0.as_bytes() 134 | } 135 | 136 | fn verify(&self, msg: &[u8], signature: &[u8]) -> Result<()> { 137 | let sig_bytes: [u8; 64] = signature 138 | .try_into() 139 | .map_err(|_| Error::TryInto("Failed to convert slice to [u8; 64]".to_string()))?; 140 | self.0 141 | .verify(msg, &ed25519_dalek::Signature::from_bytes(&sig_bytes))?; 142 | Ok(()) 143 | } 144 | } 145 | -------------------------------------------------------------------------------- /core/src/crypto/mod.rs: -------------------------------------------------------------------------------- 1 | mod key_pair; 2 | 3 | pub use key_pair::{KeyPair, KeyPairType, PublicKey, SecretKey}; 4 | -------------------------------------------------------------------------------- /core/src/error.rs: -------------------------------------------------------------------------------- 1 | use thiserror::Error as ThisError; 2 | 3 | pub type Result = std::result::Result; 4 | 5 | #[derive(ThisError, Debug)] 6 | pub enum Error { 7 | #[error(transparent)] 8 | IO(#[from] std::io::Error), 9 | 10 | #[error("TryInto Error: {0}")] 11 | TryInto(String), 12 | 13 | #[error("Timeout Error")] 14 | Timeout, 15 | 16 | #[error("Path Not Found Error: {0}")] 17 | PathNotFound(String), 18 | 19 | #[error("Event Emit Error: {0}")] 20 | EventEmitError(String), 21 | 22 | #[cfg(feature = "crypto")] 23 | #[error(transparent)] 24 | Ed25519(#[from] ed25519_dalek::ed25519::Error), 25 | 26 | #[cfg(feature = "tokio")] 27 | #[error(transparent)] 28 | TokioJoinError(#[from] tokio::task::JoinError), 29 | 30 | #[error("Channel Send Error: {0}")] 31 | ChannelSend(String), 32 | 33 | #[error(transparent)] 34 | ChannelRecv(#[from] async_channel::RecvError), 35 | 36 | #[error(transparent)] 37 | BincodeDecode(#[from] bincode::error::DecodeError), 38 | 39 | #[error(transparent)] 40 | BincodeEncode(#[from] bincode::error::EncodeError), 41 | } 42 | 43 | impl From> for Error { 44 | fn from(error: async_channel::SendError) -> Self { 45 | Error::ChannelSend(error.to_string()) 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /core/src/lib.rs: -------------------------------------------------------------------------------- 1 | #[cfg(all(feature = "smol", feature = "tokio"))] 2 | compile_error!("Only one async runtime feature should be enabled"); 3 | 4 | #[cfg(not(any(feature = "smol", feature = "tokio")))] 5 | compile_error!("At least one async runtime feature must be enabled for this crate."); 6 | 7 | /// A set of helper tools and functions. 8 | pub mod util; 9 | 10 | /// A set of async utilities. 11 | pub mod async_util; 12 | 13 | /// Represents karyon's Core Error. 14 | pub mod error; 15 | 16 | /// [`event::EventEmitter`] implementation. 17 | pub mod event; 18 | 19 | /// A simple publish-subscribe system [`Read More`](./pubsub/struct.Publisher.html) 20 | pub mod pubsub; 21 | 22 | /// A cross-compatible async runtime 23 | pub mod async_runtime; 24 | 25 | #[cfg(feature = "crypto")] 26 | /// Collects common cryptographic tools 27 | pub mod crypto; 28 | 29 | pub use error::{Error, Result}; 30 | -------------------------------------------------------------------------------- /core/src/pubsub.rs: -------------------------------------------------------------------------------- 1 | use std::{collections::HashMap, sync::Arc}; 2 | 3 | use futures_util::stream::{FuturesUnordered, StreamExt}; 4 | use log::error; 5 | 6 | use crate::{async_runtime::lock::Mutex, util::random_32, Result}; 7 | 8 | const CHANNEL_BUFFER_SIZE: usize = 1000; 9 | 10 | pub type SubscriptionID = u32; 11 | 12 | /// A simple publish-subscribe system. 13 | // # Example 14 | /// 15 | /// ``` 16 | /// use karyon_core::pubsub::{Publisher}; 17 | /// 18 | /// async { 19 | /// let publisher = Publisher::new(); 20 | /// 21 | /// let sub = publisher.subscribe().await; 22 | /// 23 | /// publisher.notify(&String::from("MESSAGE")).await; 24 | /// 25 | /// let msg = sub.recv().await; 26 | /// 27 | /// // .... 28 | /// }; 29 | /// 30 | /// ``` 31 | pub struct Publisher { 32 | subs: Mutex>>, 33 | subscription_buffer_size: usize, 34 | } 35 | 36 | impl Publisher { 37 | /// Creates a new [`Publisher`] 38 | pub fn new() -> Arc> { 39 | Arc::new(Self { 40 | subs: Mutex::new(HashMap::new()), 41 | subscription_buffer_size: CHANNEL_BUFFER_SIZE, 42 | }) 43 | } 44 | 45 | /// Creates a new [`Publisher`] with the provided buffer size for the 46 | /// [`Subscription`] channel. 47 | /// 48 | /// This is important to control the memory used by the [`Subscription`] channel. 49 | /// If the subscriber can't keep up with the new messages coming, then the 50 | /// channel buffer will fill with new messages, and if the buffer is full, 51 | /// the emit function will block until the subscriber starts to process 52 | /// the buffered messages. 53 | /// 54 | /// If `size` is zero, this function will panic. 55 | pub fn with_buffer_size(size: usize) -> Arc> { 56 | Arc::new(Self { 57 | subs: Mutex::new(HashMap::new()), 58 | subscription_buffer_size: size, 59 | }) 60 | } 61 | 62 | /// Subscribes and return a [`Subscription`] 63 | pub async fn subscribe(self: &Arc) -> Subscription { 64 | let mut subs = self.subs.lock().await; 65 | 66 | let chan = async_channel::bounded(self.subscription_buffer_size); 67 | 68 | let mut sub_id = random_32(); 69 | 70 | // Generate a new one if sub_id already exists 71 | while subs.contains_key(&sub_id) { 72 | sub_id = random_32(); 73 | } 74 | 75 | let sub = Subscription::new(sub_id, self.clone(), chan.1); 76 | subs.insert(sub_id, chan.0); 77 | 78 | sub 79 | } 80 | 81 | /// Unsubscribes by providing subscription id 82 | pub async fn unsubscribe(self: &Arc, id: &SubscriptionID) { 83 | self.subs.lock().await.remove(id); 84 | } 85 | 86 | /// Notifies all subscribers 87 | pub async fn notify(self: &Arc, value: &T) { 88 | let mut subs = self.subs.lock().await; 89 | 90 | let mut results = FuturesUnordered::new(); 91 | let mut closed_subs = vec![]; 92 | 93 | for (sub_id, sub) in subs.iter() { 94 | let result = async { (*sub_id, sub.send(value.clone()).await) }; 95 | results.push(result); 96 | } 97 | 98 | while let Some((id, fut_err)) = results.next().await { 99 | if let Err(err) = fut_err { 100 | error!("failed to notify {}: {}", id, err); 101 | closed_subs.push(id); 102 | } 103 | } 104 | drop(results); 105 | 106 | for sub_id in closed_subs.iter() { 107 | subs.remove(sub_id); 108 | } 109 | } 110 | } 111 | 112 | // Subscription 113 | pub struct Subscription { 114 | id: SubscriptionID, 115 | recv_chan: async_channel::Receiver, 116 | publisher: Arc>, 117 | } 118 | 119 | impl Subscription { 120 | /// Creates a new [`Subscription`] 121 | pub fn new( 122 | id: SubscriptionID, 123 | publisher: Arc>, 124 | recv_chan: async_channel::Receiver, 125 | ) -> Subscription { 126 | Self { 127 | id, 128 | recv_chan, 129 | publisher, 130 | } 131 | } 132 | 133 | /// Receive a message from the [`Publisher`] 134 | pub async fn recv(&self) -> Result { 135 | let msg = self.recv_chan.recv().await?; 136 | Ok(msg) 137 | } 138 | 139 | /// Unsubscribe from the [`Publisher`] 140 | pub async fn unsubscribe(&self) { 141 | self.publisher.unsubscribe(&self.id).await; 142 | } 143 | } 144 | -------------------------------------------------------------------------------- /core/src/util/decode.rs: -------------------------------------------------------------------------------- 1 | use bincode::Decode; 2 | 3 | use crate::Result; 4 | 5 | /// Decodes a given type `T` from the given slice. returns the decoded value 6 | /// along with the number of bytes read. 7 | pub fn decode>(src: &[u8]) -> Result<(T, usize)> { 8 | let (result, bytes_read) = bincode::decode_from_slice(src, bincode::config::standard())?; 9 | Ok((result, bytes_read)) 10 | } 11 | -------------------------------------------------------------------------------- /core/src/util/encode.rs: -------------------------------------------------------------------------------- 1 | use bincode::Encode; 2 | 3 | use crate::{Error, Result}; 4 | 5 | /// Encode the given type `T` into a `Vec`. 6 | pub fn encode(src: &T) -> Result> { 7 | let vec = bincode::encode_to_vec(src, bincode::config::standard())?; 8 | Ok(vec) 9 | } 10 | 11 | /// Encode the given type `T` into the given slice.. 12 | pub fn encode_into_slice(src: &T, dst: &mut [u8]) -> Result { 13 | bincode::encode_into_slice(src, dst, bincode::config::standard()).map_err(Error::from) 14 | } 15 | -------------------------------------------------------------------------------- /core/src/util/mod.rs: -------------------------------------------------------------------------------- 1 | mod decode; 2 | mod encode; 3 | mod path; 4 | 5 | pub use decode::decode; 6 | pub use encode::{encode, encode_into_slice}; 7 | pub use path::{home_dir, tilde_expand}; 8 | 9 | use rand::{rngs::OsRng, Rng}; 10 | 11 | /// Generates and returns a random u32 using `rand::rngs::OsRng`. 12 | pub fn random_32() -> u32 { 13 | OsRng.gen() 14 | } 15 | 16 | /// Generates and returns a random u64 using `rand::rngs::OsRng`. 17 | pub fn random_64() -> u64 { 18 | OsRng.gen() 19 | } 20 | 21 | /// Generates and returns a random u16 using `rand::rngs::OsRng`. 22 | pub fn random_16() -> u16 { 23 | OsRng.gen() 24 | } 25 | -------------------------------------------------------------------------------- /core/src/util/path.rs: -------------------------------------------------------------------------------- 1 | use std::path::PathBuf; 2 | 3 | use crate::{error::Error, Result}; 4 | 5 | /// Returns the user's home directory as a `PathBuf`. 6 | #[allow(dead_code)] 7 | pub fn home_dir() -> Result { 8 | dirs::home_dir().ok_or(Error::PathNotFound("Home dir not found".to_string())) 9 | } 10 | 11 | /// Expands a tilde (~) in a path and returns the expanded `PathBuf`. 12 | #[allow(dead_code)] 13 | pub fn tilde_expand(path: &str) -> Result { 14 | match path { 15 | "~" => home_dir(), 16 | p if p.starts_with("~/") => Ok(home_dir()?.join(&path[2..])), 17 | _ => Ok(PathBuf::from(path)), 18 | } 19 | } 20 | 21 | #[cfg(test)] 22 | mod tests { 23 | use super::*; 24 | 25 | #[test] 26 | fn test_tilde_expand() { 27 | let path = "~/src"; 28 | let expanded_path = dirs::home_dir().unwrap().join("src"); 29 | assert_eq!(tilde_expand(path).unwrap(), expanded_path); 30 | 31 | let path = "~"; 32 | let expanded_path = dirs::home_dir().unwrap(); 33 | assert_eq!(tilde_expand(path).unwrap(), expanded_path); 34 | 35 | let path = ""; 36 | let expanded_path = PathBuf::from(""); 37 | assert_eq!(tilde_expand(path).unwrap(), expanded_path); 38 | } 39 | } 40 | -------------------------------------------------------------------------------- /jsonrpc/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "karyon_jsonrpc" 3 | description = "A fast and lightweight async JSONRPC2.0 implementation." 4 | version.workspace = true 5 | edition.workspace = true 6 | homepage.workspace = true 7 | repository.workspace = true 8 | license.workspace = true 9 | authors.workspace = true 10 | readme = "README.md" 11 | 12 | 13 | [features] 14 | default = ["smol", "tcp", "unix"] 15 | tcp = ["karyon_net/tcp"] 16 | tls = ["tcp", "karyon_net/tls"] 17 | ws = ["tcp", "karyon_net/ws", "async-tungstenite"] 18 | unix = ["karyon_net/unix"] 19 | smol = [ 20 | "karyon_core/smol", 21 | "karyon_net/smol", 22 | "karyon_jsonrpc_macro/smol", 23 | "async-tungstenite?/async-std-runtime", 24 | ] 25 | tokio = [ 26 | "karyon_core/tokio", 27 | "karyon_net/tokio", 28 | "karyon_jsonrpc_macro/tokio", 29 | "async-tungstenite?/tokio-runtime", 30 | ] 31 | 32 | [dependencies] 33 | karyon_core = { workspace = true } 34 | karyon_net = { workspace = true } 35 | karyon_jsonrpc_macro = { workspace = true } 36 | 37 | log = { workspace = true } 38 | rand = { workspace = true } 39 | thiserror = { workspace = true } 40 | 41 | # encode/decode 42 | serde = { workspace = true, features = ["derive"] } 43 | serde_json = { workspace = true } 44 | 45 | # async 46 | async-trait = { workspace = true } 47 | async-channel = { workspace = true } 48 | 49 | # websocket 50 | async-tungstenite = { workspace = true, optional = true } 51 | 52 | [dev-dependencies] 53 | smol = { workspace = true } 54 | env_logger = "0.11" 55 | -------------------------------------------------------------------------------- /jsonrpc/README.md: -------------------------------------------------------------------------------- 1 | # karyon jsonrpc 2 | 3 | A fast and lightweight async implementation of [JSON-RPC 4 | 2.0](https://www.jsonrpc.org/specification). 5 | 6 | features: 7 | - Supports TCP, TLS, WebSocket, and Unix protocols. 8 | - Uses `smol`(async-std) as the async runtime, with support for `tokio` via the 9 | `tokio` feature. 10 | - Enables the registration of multiple services (structs) on a single server. 11 | - Offers support for custom JSON codec. 12 | - Includes support for pub/sub. 13 | - Allows the use of an `async_executors::Executor` or `tokio::Runtime` when building 14 | the server. 15 | 16 | 17 | ## Install 18 | 19 | ```bash 20 | 21 | $ cargo add karyon_jsonrpc 22 | 23 | ``` 24 | 25 | ## Example 26 | 27 | ```rust 28 | use std::{sync::Arc, time::Duration}; 29 | 30 | use serde_json::Value; 31 | use smol::stream::StreamExt; 32 | 33 | use karyon_jsonrpc::{ 34 | error::RPCError, server::{Server, ServerBuilder, Channel}, client::ClientBuilder, 35 | rpc_impl, rpc_pubsub_impl, rpc_method, message::SubscriptionID, 36 | }; 37 | 38 | struct HelloWorld {} 39 | 40 | // It is possible to change the service name by adding a `name` attribute 41 | #[rpc_impl] 42 | impl HelloWorld { 43 | async fn say_hello(&self, params: Value) -> Result { 44 | let msg: String = serde_json::from_value(params)?; 45 | Ok(serde_json::json!(format!("Hello {msg}!"))) 46 | } 47 | 48 | #[rpc_method(name = "foo_method")] 49 | async fn foo(&self, params: Value) -> Result { 50 | Ok(serde_json::json!("foo!")) 51 | } 52 | 53 | async fn bar(&self, params: Value) -> Result { 54 | Ok(serde_json::json!("bar!")) 55 | } 56 | } 57 | 58 | 59 | // It is possible to change the service name by adding a `name` attribute 60 | #[rpc_pubsub_impl] 61 | impl HelloWorld { 62 | async fn log_subscribe(&self, chan: Arc, method: String, _params: Value) -> Result { 63 | let sub = chan.new_subscription(&method, None).await.expect("Failed to subscribe"); 64 | let sub_id = sub.id.clone(); 65 | smol::spawn(async move { 66 | loop { 67 | smol::Timer::after(std::time::Duration::from_secs(1)).await; 68 | if let Err(err) = sub.notify(serde_json::json!("Hello")).await { 69 | println!("Failed to send notification: {err}"); 70 | break; 71 | } 72 | } 73 | }) 74 | .detach(); 75 | 76 | Ok(serde_json::json!(sub_id)) 77 | } 78 | 79 | async fn log_unsubscribe(&self, chan: Arc, method: String, params: Value) -> Result { 80 | let sub_id: SubscriptionID = serde_json::from_value(params)?; 81 | chan.remove_subscription(&sub_id).await; 82 | Ok(serde_json::json!(true)) 83 | } 84 | } 85 | 86 | 87 | // Server 88 | async { 89 | let service = Arc::new(HelloWorld {}); 90 | // Creates a new server 91 | 92 | let server = ServerBuilder::new("tcp://127.0.0.1:60000") 93 | .expect("create new server builder") 94 | .service(service.clone()) 95 | .pubsub_service(service) 96 | .build() 97 | .await 98 | .expect("build the server"); 99 | 100 | // Starts the server 101 | server.start_block() 102 | .await 103 | .expect("Start the server"); 104 | 105 | }; 106 | 107 | // Client 108 | async { 109 | // Creates a new client 110 | let client = ClientBuilder::new("tcp://127.0.0.1:60000") 111 | .expect("create new client builder") 112 | .build() 113 | .await 114 | .expect("build the client"); 115 | 116 | let result: String = client.call("HelloWorld.say_hello", "world".to_string()) 117 | .await 118 | .expect("send a request"); 119 | 120 | let result: String = client.call("HelloWorld.foo_method", ()) 121 | .await 122 | .expect("send a request"); 123 | 124 | let sub = client 125 | .subscribe("HelloWorld.log_subscribe", ()) 126 | .await 127 | .expect("Subscribe to log_subscribe method"); 128 | 129 | let sub_id = sub.id(); 130 | smol::spawn(async move { 131 | loop { 132 | let m = sub.recv().await.expect("Receive new log msg"); 133 | println!("Receive new log {m}"); 134 | } 135 | }) 136 | .detach(); 137 | 138 | // Unsubscribe after 5 seconds 139 | smol::Timer::after(std::time::Duration::from_secs(5)).await; 140 | 141 | client 142 | .unsubscribe("HelloWorld.log_unsubscribe", sub_id) 143 | .await 144 | .expect("Unsubscribe from log_unsubscirbe method"); 145 | }; 146 | 147 | ``` 148 | 149 | ## Supported Client Implementations 150 | 151 | - [X] [Golang](https://github.com/karyontech/karyon-go) 152 | - [ ] Python 153 | - [ ] JavaScript/TypeScript 154 | 155 | 156 | -------------------------------------------------------------------------------- /jsonrpc/examples/client.py: -------------------------------------------------------------------------------- 1 | import socket 2 | import random 3 | import json 4 | 5 | HOST = "127.0.0.1" 6 | PORT = 6000 7 | 8 | s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) 9 | s.connect((HOST, PORT)) 10 | 11 | req = { 12 | "jsonrpc": "2.0", 13 | "id": str(random.randint(0, 1000)), 14 | "method": "Calc.ping", 15 | "params": None, 16 | } 17 | print("Send: ", req) 18 | s.sendall((json.dumps(req)).encode()) 19 | res = s.recv(1024) 20 | res = json.loads(res) 21 | print("Received: ", res) 22 | 23 | req = { 24 | "jsonrpc": "2.0", 25 | "id": str(random.randint(0, 1000)), 26 | "method": "Calc.version", 27 | "params": None, 28 | } 29 | print("Send: ", req) 30 | s.sendall((json.dumps(req)).encode()) 31 | res = s.recv(1024) 32 | res = json.loads(res) 33 | print("Received: ", res) 34 | 35 | s.close() 36 | -------------------------------------------------------------------------------- /jsonrpc/examples/client.rs: -------------------------------------------------------------------------------- 1 | use std::time::Duration; 2 | 3 | use log::info; 4 | use serde::{Deserialize, Serialize}; 5 | use smol::Timer; 6 | 7 | use karyon_jsonrpc::client::ClientBuilder; 8 | 9 | #[derive(Deserialize, Serialize)] 10 | struct Req { 11 | x: u32, 12 | y: u32, 13 | } 14 | 15 | #[derive(Deserialize, Serialize, Debug)] 16 | struct Pong {} 17 | 18 | fn main() { 19 | env_logger::init(); 20 | smol::future::block_on(async { 21 | let client = ClientBuilder::new("tcp://127.0.0.1:7878") 22 | .expect("Create client builder") 23 | .build() 24 | .await 25 | .expect("Create rpc client"); 26 | 27 | let result: String = client 28 | .call("Calc.version", ()) 29 | .await 30 | .expect("Call Calc.version method"); 31 | info!("Version result: {result}"); 32 | 33 | loop { 34 | Timer::after(Duration::from_millis(100)).await; 35 | let result: Pong = client 36 | .call("Calc.ping", ()) 37 | .await 38 | .expect("Call Calc.ping method"); 39 | info!("Ping result: {:?}", result); 40 | } 41 | }); 42 | } 43 | -------------------------------------------------------------------------------- /jsonrpc/examples/client_custom_codec.rs: -------------------------------------------------------------------------------- 1 | use std::time::Duration; 2 | 3 | use log::info; 4 | use serde::{Deserialize, Serialize}; 5 | use smol::Timer; 6 | 7 | use karyon_jsonrpc::{ 8 | client::ClientBuilder, 9 | codec::{ByteBuffer, Codec, Decoder, Encoder}, 10 | error::Error, 11 | }; 12 | 13 | #[derive(Deserialize, Serialize)] 14 | struct Req { 15 | x: u32, 16 | y: u32, 17 | } 18 | 19 | #[derive(Deserialize, Serialize, Debug)] 20 | struct Pong {} 21 | 22 | #[derive(Clone)] 23 | pub struct CustomJsonCodec {} 24 | 25 | impl Codec for CustomJsonCodec { 26 | type Message = serde_json::Value; 27 | type Error = Error; 28 | } 29 | 30 | impl Encoder for CustomJsonCodec { 31 | type EnMessage = serde_json::Value; 32 | type EnError = Error; 33 | fn encode( 34 | &self, 35 | src: &Self::EnMessage, 36 | dst: &mut ByteBuffer, 37 | ) -> std::result::Result { 38 | let msg = match serde_json::to_string(src) { 39 | Ok(m) => m, 40 | Err(err) => return Err(Error::Encode(err.to_string())), 41 | }; 42 | let buf = msg.as_bytes(); 43 | dst.extend_from_slice(buf); 44 | Ok(buf.len()) 45 | } 46 | } 47 | 48 | impl Decoder for CustomJsonCodec { 49 | type DeMessage = serde_json::Value; 50 | type DeError = Error; 51 | fn decode( 52 | &self, 53 | src: &mut ByteBuffer, 54 | ) -> std::result::Result, Self::DeError> { 55 | let de = serde_json::Deserializer::from_slice(src.as_ref()); 56 | let mut iter = de.into_iter::(); 57 | 58 | let item = match iter.next() { 59 | Some(Ok(item)) => item, 60 | Some(Err(ref e)) if e.is_eof() => return Ok(None), 61 | Some(Err(e)) => return Err(Error::Decode(e.to_string())), 62 | None => return Ok(None), 63 | }; 64 | 65 | Ok(Some((iter.byte_offset(), item))) 66 | } 67 | } 68 | 69 | fn main() { 70 | env_logger::init(); 71 | smol::future::block_on(async { 72 | let client = ClientBuilder::new_with_codec("tcp://127.0.0.1:6000", CustomJsonCodec {}) 73 | .expect("Create client builder") 74 | .build() 75 | .await 76 | .expect("Create rpc client"); 77 | 78 | loop { 79 | Timer::after(Duration::from_millis(100)).await; 80 | let result: Pong = client 81 | .call("Calc.ping", ()) 82 | .await 83 | .expect("Call Calc.ping method"); 84 | info!("Ping result: {:?}", result); 85 | } 86 | }); 87 | } 88 | -------------------------------------------------------------------------------- /jsonrpc/examples/client_derive.rs: -------------------------------------------------------------------------------- 1 | use std::time::Duration; 2 | 3 | use log::info; 4 | use serde::{Deserialize, Serialize}; 5 | use smol::Timer; 6 | 7 | use karyon_jsonrpc::client::ClientBuilder; 8 | 9 | #[derive(Deserialize, Serialize)] 10 | struct Req { 11 | x: u32, 12 | y: u32, 13 | } 14 | 15 | #[derive(Deserialize, Serialize, Debug)] 16 | struct Pong {} 17 | 18 | fn main() { 19 | env_logger::init(); 20 | smol::future::block_on(async { 21 | let client = ClientBuilder::new("tcp://127.0.0.1:6000") 22 | .expect("Create client builder") 23 | .build() 24 | .await 25 | .expect("Create rpc client"); 26 | 27 | let params = Req { x: 10, y: 7 }; 28 | let result: u32 = client 29 | .call("calculator.math.add", params) 30 | .await 31 | .expect("Call calculator.math.add method"); 32 | info!("Add result: {result}"); 33 | 34 | let params = Req { x: 10, y: 7 }; 35 | let result: u32 = client 36 | .call("calculator.math.sub", params) 37 | .await 38 | .expect("Call calculator.math.sub method"); 39 | info!("Sub result: {result}"); 40 | 41 | let result: String = client 42 | .call("calculator.version", ()) 43 | .await 44 | .expect("Call calculator.version method"); 45 | info!("Version result: {result}"); 46 | 47 | loop { 48 | Timer::after(Duration::from_millis(100)).await; 49 | let result: Pong = client 50 | .call("calculator.ping", ()) 51 | .await 52 | .expect("Call calculator.ping method"); 53 | info!("Ping result: {:?}", result); 54 | } 55 | }); 56 | } 57 | -------------------------------------------------------------------------------- /jsonrpc/examples/pubsub_client.rs: -------------------------------------------------------------------------------- 1 | use std::time::Duration; 2 | 3 | use log::info; 4 | use serde::{Deserialize, Serialize}; 5 | use smol::Timer; 6 | 7 | use karyon_jsonrpc::client::ClientBuilder; 8 | 9 | #[derive(Deserialize, Serialize, Debug)] 10 | struct Pong {} 11 | 12 | async fn run_client() { 13 | let client = ClientBuilder::new("tcp://127.0.0.1:6000") 14 | .expect("Create client builder") 15 | .build() 16 | .await 17 | .expect("Build a client"); 18 | 19 | let sub = client 20 | .subscribe("Calc.log_subscribe", ()) 21 | .await 22 | .expect("Subscribe to log_subscribe method"); 23 | 24 | smol::spawn(async move { 25 | loop { 26 | let m = sub.recv().await.expect("Receive new log msg"); 27 | info!("Receive new log {m}"); 28 | } 29 | }) 30 | .detach(); 31 | 32 | loop { 33 | Timer::after(Duration::from_millis(500)).await; 34 | let _: Pong = client 35 | .call("Calc.ping", ()) 36 | .await 37 | .expect("Send ping request"); 38 | } 39 | } 40 | 41 | fn main() { 42 | env_logger::init(); 43 | smol::future::block_on(async { 44 | smol::spawn(run_client()).await; 45 | }); 46 | } 47 | -------------------------------------------------------------------------------- /jsonrpc/examples/pubsub_server.rs: -------------------------------------------------------------------------------- 1 | use std::{sync::Arc, time::Duration}; 2 | 3 | use log::error; 4 | use serde::{Deserialize, Serialize}; 5 | use serde_json::Value; 6 | 7 | use karyon_core::{async_util::sleep, util::random_32}; 8 | use karyon_jsonrpc::{ 9 | error::RPCError, 10 | message::SubscriptionID, 11 | rpc_impl, rpc_pubsub_impl, 12 | server::{channel::Channel, ServerBuilder}, 13 | }; 14 | 15 | struct Calc {} 16 | 17 | #[derive(Deserialize, Serialize)] 18 | struct Req { 19 | x: u32, 20 | y: u32, 21 | } 22 | 23 | #[derive(Deserialize, Serialize)] 24 | struct Pong {} 25 | 26 | #[rpc_impl] 27 | impl Calc { 28 | async fn ping(&self, _params: Value) -> Result { 29 | Ok(serde_json::json!(Pong {})) 30 | } 31 | } 32 | 33 | #[rpc_pubsub_impl] 34 | impl Calc { 35 | async fn log_subscribe( 36 | &self, 37 | chan: Arc, 38 | method: String, 39 | _params: Value, 40 | ) -> Result { 41 | let sub = chan 42 | .new_subscription(&method, Some(random_32())) 43 | .await 44 | .map_err(|_| RPCError::InvalidRequest("Duplicated subscription".into()))?; 45 | let sub_id = sub.id.clone(); 46 | smol::spawn(async move { 47 | loop { 48 | sleep(Duration::from_millis(500)).await; 49 | if let Err(err) = sub.notify(serde_json::json!("Hello")).await { 50 | error!("Send notification {err}"); 51 | break; 52 | } 53 | } 54 | }) 55 | .detach(); 56 | 57 | Ok(serde_json::json!(sub_id)) 58 | } 59 | 60 | async fn log_unsubscribe( 61 | &self, 62 | chan: Arc, 63 | _method: String, 64 | params: Value, 65 | ) -> Result { 66 | let sub_id: SubscriptionID = serde_json::from_value(params)?; 67 | let success = chan.remove_subscription(&sub_id).await.is_ok(); 68 | Ok(serde_json::json!(success)) 69 | } 70 | } 71 | 72 | fn main() { 73 | env_logger::init(); 74 | smol::block_on(async { 75 | let calc = Arc::new(Calc {}); 76 | 77 | // Creates a new server 78 | let server = ServerBuilder::new("tcp://127.0.0.1:6000") 79 | .expect("Create a new server builder") 80 | .service(calc.clone()) 81 | .pubsub_service(calc) 82 | .build() 83 | .await 84 | .expect("Build a new server"); 85 | 86 | // Start the server 87 | server.start_block().await.expect("Start the server"); 88 | }); 89 | } 90 | -------------------------------------------------------------------------------- /jsonrpc/examples/server.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Arc; 2 | 3 | use serde::{Deserialize, Serialize}; 4 | use serde_json::Value; 5 | 6 | use karyon_jsonrpc::{ 7 | error::RPCError, 8 | server::{RPCMethod, RPCService, ServerBuilder}, 9 | }; 10 | 11 | struct Calc { 12 | version: String, 13 | } 14 | 15 | #[derive(Deserialize, Serialize)] 16 | struct Req { 17 | x: u32, 18 | y: u32, 19 | } 20 | 21 | #[derive(Deserialize, Serialize)] 22 | struct Pong {} 23 | 24 | impl RPCService for Calc { 25 | fn get_method(&self, name: &str) -> Option { 26 | match name { 27 | "ping" => Some(Box::new(move |params: Value| Box::pin(self.ping(params)))), 28 | "version" => Some(Box::new(move |params: Value| { 29 | Box::pin(self.version(params)) 30 | })), 31 | _ => unimplemented!(), 32 | } 33 | } 34 | 35 | fn name(&self) -> String { 36 | "Calc".to_string() 37 | } 38 | } 39 | 40 | impl Calc { 41 | async fn ping(&self, _params: Value) -> Result { 42 | Ok(serde_json::json!(Pong {})) 43 | } 44 | 45 | async fn version(&self, _params: Value) -> Result { 46 | Ok(serde_json::json!(self.version)) 47 | } 48 | } 49 | 50 | fn main() { 51 | env_logger::init(); 52 | smol::block_on(async { 53 | // Register the Calc service 54 | let calc = Calc { 55 | version: String::from("0.1"), 56 | }; 57 | 58 | // Creates a new server 59 | let server = ServerBuilder::new("tcp://127.0.0.1:7878") 60 | .expect("Create a new server builder") 61 | .service(Arc::new(calc)) 62 | .build() 63 | .await 64 | .expect("start a new server"); 65 | 66 | // Start the server 67 | server.start_block().await.expect("Start the server"); 68 | }); 69 | } 70 | -------------------------------------------------------------------------------- /jsonrpc/examples/server_custom_codec.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Arc; 2 | 3 | use serde::{Deserialize, Serialize}; 4 | use serde_json::Value; 5 | 6 | use karyon_jsonrpc::{ 7 | codec::{ByteBuffer, Codec, Decoder, Encoder}, 8 | error::{Error, RPCError}, 9 | rpc_impl, 10 | server::ServerBuilder, 11 | }; 12 | 13 | struct Calc {} 14 | 15 | #[derive(Deserialize, Serialize)] 16 | struct Req { 17 | x: u32, 18 | y: u32, 19 | } 20 | 21 | #[derive(Deserialize, Serialize)] 22 | struct Pong {} 23 | 24 | #[rpc_impl] 25 | impl Calc { 26 | async fn ping(&self, _params: Value) -> Result { 27 | Ok(serde_json::json!(Pong {})) 28 | } 29 | } 30 | 31 | #[derive(Clone)] 32 | pub struct CustomJsonCodec {} 33 | 34 | impl Codec for CustomJsonCodec { 35 | type Message = serde_json::Value; 36 | type Error = Error; 37 | } 38 | 39 | impl Encoder for CustomJsonCodec { 40 | type EnMessage = serde_json::Value; 41 | type EnError = Error; 42 | fn encode( 43 | &self, 44 | src: &Self::EnMessage, 45 | dst: &mut ByteBuffer, 46 | ) -> std::result::Result { 47 | let msg = match serde_json::to_string(src) { 48 | Ok(m) => m, 49 | Err(err) => return Err(Error::Encode(err.to_string())), 50 | }; 51 | let buf = msg.as_bytes(); 52 | dst.extend_from_slice(buf); 53 | Ok(buf.len()) 54 | } 55 | } 56 | 57 | impl Decoder for CustomJsonCodec { 58 | type DeMessage = serde_json::Value; 59 | type DeError = Error; 60 | fn decode( 61 | &self, 62 | src: &mut ByteBuffer, 63 | ) -> std::result::Result, Self::DeError> { 64 | let de = serde_json::Deserializer::from_slice(src.as_ref()); 65 | let mut iter = de.into_iter::(); 66 | 67 | let item = match iter.next() { 68 | Some(Ok(item)) => item, 69 | Some(Err(ref e)) if e.is_eof() => return Ok(None), 70 | Some(Err(e)) => return Err(Error::Decode(e.to_string())), 71 | None => return Ok(None), 72 | }; 73 | 74 | Ok(Some((iter.byte_offset(), item))) 75 | } 76 | } 77 | 78 | fn main() { 79 | env_logger::init(); 80 | smol::block_on(async { 81 | // Register the Calc service 82 | let calc = Calc {}; 83 | 84 | // Creates a new server 85 | let server = ServerBuilder::new_with_codec("tcp://127.0.0.1:6000", CustomJsonCodec {}) 86 | .expect("Create a new server builder") 87 | .service(Arc::new(calc)) 88 | .build() 89 | .await 90 | .expect("start a new server"); 91 | 92 | // Start the server 93 | server.start_block().await.expect("Start the server"); 94 | }); 95 | } 96 | -------------------------------------------------------------------------------- /jsonrpc/examples/server_derive.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Arc; 2 | 3 | use serde::{Deserialize, Serialize}; 4 | use serde_json::Value; 5 | 6 | use karyon_jsonrpc::{error::RPCError, rpc_impl, rpc_method, server::ServerBuilder}; 7 | 8 | struct Calc { 9 | version: String, 10 | } 11 | 12 | #[derive(Deserialize, Serialize)] 13 | struct Req { 14 | x: u32, 15 | y: u32, 16 | } 17 | 18 | #[derive(Deserialize, Serialize)] 19 | struct Pong {} 20 | 21 | #[rpc_impl(name = "calculator")] 22 | impl Calc { 23 | async fn ping(&self, _params: Value) -> Result { 24 | Ok(serde_json::json!(Pong {})) 25 | } 26 | 27 | #[rpc_method(name = "math.add")] 28 | async fn add(&self, params: Value) -> Result { 29 | let params: Req = serde_json::from_value(params)?; 30 | Ok(serde_json::json!(params.x + params.y)) 31 | } 32 | 33 | #[rpc_method(name = "math.sub")] 34 | async fn sub(&self, params: Value) -> Result { 35 | let params: Req = serde_json::from_value(params)?; 36 | Ok(serde_json::json!(params.x - params.y)) 37 | } 38 | 39 | async fn version(&self, _params: Value) -> Result { 40 | Ok(serde_json::json!(self.version)) 41 | } 42 | } 43 | 44 | fn main() { 45 | env_logger::init(); 46 | smol::block_on(async { 47 | // Register the Calc service 48 | let calc = Calc { 49 | version: String::from("0.1"), 50 | }; 51 | 52 | // Creates a new server 53 | let server = ServerBuilder::new("tcp://127.0.0.1:6000") 54 | .expect("Create a new server builder") 55 | .service(Arc::new(calc)) 56 | .build() 57 | .await 58 | .expect("start a new server"); 59 | 60 | // Start the server 61 | server.start_block().await.expect("Start the server"); 62 | }); 63 | } 64 | -------------------------------------------------------------------------------- /jsonrpc/examples/tokio_server/.gitignore: -------------------------------------------------------------------------------- 1 | target 2 | Cargo.lock 3 | -------------------------------------------------------------------------------- /jsonrpc/examples/tokio_server/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "tokio_server" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | [workspace] 7 | 8 | [dependencies] 9 | env_logger = "0.11" 10 | karyon_jsonrpc = { path = "../../", default-features = false, features = ["tokio", "ws"] } 11 | serde = { version = "1.0", features = ["derive"] } 12 | serde_json = "1.0" 13 | tokio = { version = "1.45", features = ["full"] } 14 | 15 | [profile.release] 16 | debug = true 17 | 18 | 19 | -------------------------------------------------------------------------------- /jsonrpc/examples/tokio_server/src/main.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Arc; 2 | 3 | use serde::{Deserialize, Serialize}; 4 | use serde_json::Value; 5 | 6 | use karyon_jsonrpc::{ 7 | error::RPCError, 8 | message::SubscriptionID, 9 | rpc_impl, rpc_pubsub_impl, 10 | server::{Channel, ServerBuilder}, 11 | }; 12 | 13 | struct Calc { 14 | version: String, 15 | } 16 | 17 | #[derive(Deserialize, Serialize)] 18 | struct Req { 19 | x: u32, 20 | y: u32, 21 | } 22 | 23 | #[derive(Deserialize, Serialize)] 24 | struct Pong {} 25 | 26 | #[rpc_impl] 27 | impl Calc { 28 | async fn ping(&self, _params: Value) -> Result { 29 | Ok(serde_json::json!(Pong {})) 30 | } 31 | 32 | async fn add(&self, params: Value) -> Result { 33 | let params: Req = serde_json::from_value(params)?; 34 | Ok(serde_json::json!(params.x + params.y)) 35 | } 36 | 37 | async fn sub(&self, params: Value) -> Result { 38 | let params: Req = serde_json::from_value(params)?; 39 | Ok(serde_json::json!(params.x - params.y)) 40 | } 41 | 42 | async fn version(&self, _params: Value) -> Result { 43 | Ok(serde_json::json!(self.version)) 44 | } 45 | } 46 | 47 | #[rpc_pubsub_impl] 48 | impl Calc { 49 | async fn log_subscribe( 50 | &self, 51 | chan: Arc, 52 | method: String, 53 | _params: Value, 54 | ) -> Result { 55 | let sub = chan.new_subscription(&method, None).await.expect("Failed to subscribe"); 56 | let sub_id = sub.id; 57 | tokio::spawn(async move { 58 | loop { 59 | tokio::time::sleep(std::time::Duration::from_secs(1)).await; 60 | if sub.notify(serde_json::json!("Hello")).await.is_err() { 61 | break; 62 | } 63 | } 64 | }); 65 | 66 | Ok(serde_json::json!(sub_id)) 67 | } 68 | 69 | async fn log_unsubscribe( 70 | &self, 71 | chan: Arc, 72 | _method: String, 73 | params: Value, 74 | ) -> Result { 75 | let sub_id: SubscriptionID = serde_json::from_value(params)?; 76 | chan.remove_subscription(&sub_id).await; 77 | Ok(serde_json::json!(true)) 78 | } 79 | } 80 | 81 | #[tokio::main] 82 | async fn main() { 83 | env_logger::init(); 84 | // Register the Calc service 85 | let calc = Arc::new(Calc { 86 | version: String::from("0.1"), 87 | }); 88 | 89 | // Creates a new server 90 | let server = ServerBuilder::new("ws://127.0.0.1:6000") 91 | .expect("Create a new server builder") 92 | .service(calc.clone()) 93 | .pubsub_service(calc) 94 | .build() 95 | .await 96 | .expect("start a new server"); 97 | 98 | // Start the server 99 | server.start_block().await.expect("Start the server"); 100 | } 101 | -------------------------------------------------------------------------------- /jsonrpc/impl/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "karyon_jsonrpc_macro" 3 | description = "Internal crate for Karyon library." 4 | version.workspace = true 5 | edition.workspace = true 6 | homepage.workspace = true 7 | repository.workspace = true 8 | license.workspace = true 9 | authors.workspace = true 10 | 11 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 12 | 13 | [lib] 14 | proc-macro = true 15 | 16 | [features] 17 | default = ["smol"] 18 | smol = [] 19 | tokio = [] 20 | 21 | [dependencies] 22 | proc-macro2 = { workspace = true } 23 | quote = { workspace = true } 24 | syn = { workspace = true, features = ["full"] } 25 | 26 | serde_json = { workspace = true } 27 | -------------------------------------------------------------------------------- /jsonrpc/src/client/message_dispatcher.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashMap; 2 | 3 | use async_channel::{Receiver, Sender}; 4 | 5 | use karyon_core::async_runtime::lock::Mutex; 6 | 7 | use crate::{ 8 | error::{Error, Result}, 9 | message, 10 | }; 11 | 12 | use super::RequestID; 13 | 14 | /// Manages client requests 15 | pub(super) struct MessageDispatcher { 16 | chans: Mutex>>, 17 | } 18 | 19 | impl MessageDispatcher { 20 | /// Creates a new MessageDispatcher 21 | pub(super) fn new() -> Self { 22 | Self { 23 | chans: Mutex::new(HashMap::new()), 24 | } 25 | } 26 | 27 | /// Registers a new request with a given ID and returns a Receiver channel 28 | /// to wait for the response. 29 | pub(super) async fn register(&self, id: RequestID) -> Receiver { 30 | let (tx, rx) = async_channel::bounded(1); 31 | self.chans.lock().await.insert(id, tx); 32 | rx 33 | } 34 | 35 | /// Unregisters the request with the provided ID 36 | pub(super) async fn unregister(&self, id: &RequestID) { 37 | self.chans.lock().await.remove(id); 38 | } 39 | 40 | /// Clear the registered channels. 41 | pub(super) async fn clear(&self) { 42 | let mut chans = self.chans.lock().await; 43 | for (_, tx) in chans.iter() { 44 | tx.close(); 45 | } 46 | chans.clear(); 47 | } 48 | 49 | /// Dispatches a response to the channel associated with the response's ID. 50 | /// 51 | /// If a channel is registered for the response's ID, the response is sent 52 | /// through that channel. If no channel is found for the ID, returns an error. 53 | pub(super) async fn dispatch(&self, res: message::Response) -> Result<()> { 54 | let res_id = match res.id { 55 | Some(ref rid) => rid.clone(), 56 | None => { 57 | return Err(Error::InvalidMsg("Response id is none".to_string())); 58 | } 59 | }; 60 | let id: RequestID = serde_json::from_value(res_id)?; 61 | let val = self.chans.lock().await.remove(&id); 62 | match val { 63 | Some(tx) => tx.send(res).await.map_err(Error::from), 64 | None => Err(Error::InvalidMsg("Receive unknown message".to_string())), 65 | } 66 | } 67 | } 68 | -------------------------------------------------------------------------------- /jsonrpc/src/client/subscriptions.rs: -------------------------------------------------------------------------------- 1 | use std::{collections::HashMap, sync::Arc}; 2 | 3 | use async_channel::{Receiver, Sender}; 4 | use serde_json::json; 5 | use serde_json::Value; 6 | 7 | use karyon_core::async_runtime::lock::Mutex; 8 | 9 | use crate::{ 10 | error::{Error, Result}, 11 | message::{Notification, NotificationResult, SubscriptionID}, 12 | }; 13 | 14 | /// A subscription established when the client's subscribe to a method 15 | pub struct Subscription { 16 | id: SubscriptionID, 17 | rx: Receiver, 18 | tx: Sender, 19 | } 20 | 21 | impl Subscription { 22 | fn new(id: SubscriptionID, buffer_size: usize) -> Arc { 23 | let (tx, rx) = async_channel::bounded(buffer_size); 24 | Arc::new(Self { tx, id, rx }) 25 | } 26 | 27 | pub async fn recv(&self) -> Result { 28 | self.rx.recv().await.map_err(|_| Error::SubscriptionClosed) 29 | } 30 | 31 | pub fn id(&self) -> SubscriptionID { 32 | self.id 33 | } 34 | 35 | async fn notify(&self, val: Value) -> Result<()> { 36 | if self.tx.is_full() { 37 | return Err(Error::SubscriptionBufferFull); 38 | } 39 | self.tx.send(val).await?; 40 | Ok(()) 41 | } 42 | 43 | fn close(&self) { 44 | self.tx.close(); 45 | } 46 | } 47 | 48 | /// Manages subscriptions for the client. 49 | pub(super) struct Subscriptions { 50 | subs: Mutex>>, 51 | sub_buffer_size: usize, 52 | } 53 | 54 | impl Subscriptions { 55 | /// Creates a new [`Subscriptions`]. 56 | pub(super) fn new(sub_buffer_size: usize) -> Arc { 57 | Arc::new(Self { 58 | subs: Mutex::new(HashMap::new()), 59 | sub_buffer_size, 60 | }) 61 | } 62 | 63 | /// Returns a new [`Subscription`] 64 | pub(super) async fn subscribe(&self, id: SubscriptionID) -> Arc { 65 | let sub = Subscription::new(id, self.sub_buffer_size); 66 | self.subs.lock().await.insert(id, sub.clone()); 67 | sub 68 | } 69 | 70 | /// Closes subscription channels and clear the inner map. 71 | pub(super) async fn clear(&self) { 72 | let mut subs = self.subs.lock().await; 73 | for (_, sub) in subs.iter() { 74 | sub.close(); 75 | } 76 | subs.clear(); 77 | } 78 | 79 | /// Unsubscribe from the provided subscription id. 80 | pub(super) async fn unsubscribe(&self, id: &SubscriptionID) { 81 | if let Some(sub) = self.subs.lock().await.remove(id) { 82 | sub.close(); 83 | } 84 | } 85 | 86 | /// Notifies the subscription about the given notification. 87 | pub(super) async fn notify(&self, nt: Notification) -> Result<()> { 88 | let nt_res: NotificationResult = match nt.params { 89 | Some(ref p) => serde_json::from_value(p.clone())?, 90 | None => return Err(Error::InvalidMsg("Invalid notification msg".to_string())), 91 | }; 92 | 93 | match self.subs.lock().await.get(&nt_res.subscription) { 94 | Some(s) => s.notify(nt_res.result.unwrap_or(json!(""))).await?, 95 | None => { 96 | return Err(Error::InvalidMsg("Unknown notification".to_string())); 97 | } 98 | } 99 | 100 | Ok(()) 101 | } 102 | } 103 | -------------------------------------------------------------------------------- /jsonrpc/src/codec.rs: -------------------------------------------------------------------------------- 1 | #[cfg(feature = "ws")] 2 | use async_tungstenite::tungstenite::Message; 3 | 4 | pub use karyon_net::codec::{ByteBuffer, Codec, Decoder, Encoder}; 5 | 6 | #[cfg(feature = "ws")] 7 | pub use karyon_net::codec::{WebSocketCodec, WebSocketDecoder, WebSocketEncoder}; 8 | 9 | use crate::error::Error; 10 | 11 | #[cfg(not(feature = "ws"))] 12 | pub trait ClonableJsonCodec: Codec + Clone {} 13 | #[cfg(not(feature = "ws"))] 14 | impl + Clone> ClonableJsonCodec for T {} 15 | 16 | #[cfg(feature = "ws")] 17 | pub trait ClonableJsonCodec: 18 | Codec 19 | + WebSocketCodec 20 | + Clone 21 | { 22 | } 23 | #[cfg(feature = "ws")] 24 | impl< 25 | T: Codec 26 | + WebSocketCodec 27 | + Clone, 28 | > ClonableJsonCodec for T 29 | { 30 | } 31 | 32 | #[derive(Clone)] 33 | pub struct JsonCodec {} 34 | 35 | impl Codec for JsonCodec { 36 | type Message = serde_json::Value; 37 | type Error = Error; 38 | } 39 | 40 | impl Encoder for JsonCodec { 41 | type EnMessage = serde_json::Value; 42 | type EnError = Error; 43 | fn encode(&self, src: &Self::EnMessage, dst: &mut ByteBuffer) -> Result { 44 | let msg = match serde_json::to_string(src) { 45 | Ok(m) => m, 46 | Err(err) => return Err(Error::Encode(err.to_string())), 47 | }; 48 | let buf = msg.as_bytes(); 49 | dst.extend_from_slice(buf); 50 | Ok(buf.len()) 51 | } 52 | } 53 | 54 | impl Decoder for JsonCodec { 55 | type DeMessage = serde_json::Value; 56 | type DeError = Error; 57 | fn decode( 58 | &self, 59 | src: &mut ByteBuffer, 60 | ) -> Result, Self::DeError> { 61 | let de = serde_json::Deserializer::from_slice(src.as_ref()); 62 | let mut iter = de.into_iter::(); 63 | 64 | let item = match iter.next() { 65 | Some(Ok(item)) => item, 66 | Some(Err(ref e)) if e.is_eof() => return Ok(None), 67 | Some(Err(e)) => return Err(Error::Decode(e.to_string())), 68 | None => return Ok(None), 69 | }; 70 | 71 | Ok(Some((iter.byte_offset(), item))) 72 | } 73 | } 74 | 75 | #[cfg(feature = "ws")] 76 | #[derive(Clone)] 77 | pub struct WsJsonCodec {} 78 | 79 | #[cfg(feature = "ws")] 80 | impl WebSocketCodec for JsonCodec { 81 | type Message = serde_json::Value; 82 | type Error = Error; 83 | } 84 | 85 | #[cfg(feature = "ws")] 86 | impl WebSocketEncoder for JsonCodec { 87 | type EnMessage = serde_json::Value; 88 | type EnError = Error; 89 | 90 | fn encode(&self, src: &Self::EnMessage) -> Result { 91 | let msg = match serde_json::to_string(src) { 92 | Ok(m) => m, 93 | Err(err) => return Err(Error::Encode(err.to_string())), 94 | }; 95 | Ok(Message::Text(msg.into())) 96 | } 97 | } 98 | 99 | #[cfg(feature = "ws")] 100 | impl WebSocketDecoder for JsonCodec { 101 | type DeMessage = serde_json::Value; 102 | type DeError = Error; 103 | 104 | fn decode(&self, src: &Message) -> Result, Self::DeError> { 105 | match src { 106 | Message::Text(s) => match serde_json::from_str(s) { 107 | Ok(m) => Ok(Some(m)), 108 | Err(err) => Err(Error::Decode(err.to_string())), 109 | }, 110 | Message::Binary(s) => match serde_json::from_slice(s) { 111 | Ok(m) => Ok(m), 112 | Err(err) => Err(Error::Decode(err.to_string())), 113 | }, 114 | Message::Close(_) => Err(Error::IO(std::io::ErrorKind::ConnectionAborted.into())), 115 | m => Err(Error::Decode(format!( 116 | "Receive unexpected message: {:?}", 117 | m 118 | ))), 119 | } 120 | } 121 | } 122 | -------------------------------------------------------------------------------- /jsonrpc/src/error.rs: -------------------------------------------------------------------------------- 1 | use thiserror::Error as ThisError; 2 | 3 | pub type Result = std::result::Result; 4 | 5 | /// Represents karyon's jsonrpc Error. 6 | #[derive(ThisError, Debug)] 7 | pub enum Error { 8 | #[error(transparent)] 9 | IO(#[from] std::io::Error), 10 | 11 | #[error("Call Error: code: {0} - msg: {1}")] 12 | CallError(i32, String), 13 | 14 | #[error("Subscribe Error: code: {0} - msg: {1}")] 15 | SubscribeError(i32, String), 16 | 17 | #[error("Encode Error: {0}")] 18 | Encode(String), 19 | 20 | #[error("Decode Error: {0}")] 21 | Decode(String), 22 | 23 | #[error("Invalid Message Error: {0}")] 24 | InvalidMsg(String), 25 | 26 | #[error(transparent)] 27 | ParseJSON(#[from] serde_json::Error), 28 | 29 | #[error("Unsupported Protocol: {0}")] 30 | UnsupportedProtocol(String), 31 | 32 | #[error("Tls config is required")] 33 | TLSConfigRequired, 34 | 35 | #[error("Receive Close Message From Connection: {0}")] 36 | CloseConnection(String), 37 | 38 | #[error("Subscription Not Found: {0}")] 39 | SubscriptionNotFound(String), 40 | 41 | #[error("Subscription Exceeds The Maximum Buffer Size")] 42 | SubscriptionBufferFull, 43 | 44 | #[error("Subscription Closed")] 45 | SubscriptionClosed, 46 | 47 | #[error("Subscription duplicated: {0}")] 48 | SubscriptionDuplicated(String), 49 | 50 | #[error("ClientDisconnected")] 51 | ClientDisconnected, 52 | 53 | #[error(transparent)] 54 | ChannelRecv(#[from] async_channel::RecvError), 55 | 56 | #[error("Channel send Error: {0}")] 57 | ChannelSend(String), 58 | 59 | #[cfg(feature = "ws")] 60 | #[error(transparent)] 61 | WebSocket(#[from] async_tungstenite::tungstenite::Error), 62 | 63 | #[error("Unexpected Error: {0}")] 64 | General(String), 65 | 66 | #[error(transparent)] 67 | KaryonCore(#[from] karyon_core::error::Error), 68 | 69 | #[error(transparent)] 70 | KaryonNet(#[from] karyon_net::Error), 71 | } 72 | 73 | impl From> for Error { 74 | fn from(error: async_channel::SendError) -> Self { 75 | Error::ChannelSend(error.to_string()) 76 | } 77 | } 78 | 79 | pub type RPCResult = std::result::Result; 80 | 81 | /// Represents RPC Error. 82 | #[derive(ThisError, Debug)] 83 | pub enum RPCError { 84 | #[error("Custom Error: code: {0} msg: {1}")] 85 | CustomError(i32, String), 86 | 87 | #[error("Invalid Params: {0}")] 88 | InvalidParams(String), 89 | 90 | #[error("Invalid Request: {0}")] 91 | InvalidRequest(String), 92 | 93 | #[error("Parse Error: {0}")] 94 | ParseError(String), 95 | 96 | #[error("Internal Error")] 97 | InternalError, 98 | } 99 | 100 | impl From for RPCError { 101 | fn from(error: serde_json::Error) -> Self { 102 | RPCError::ParseError(error.to_string()) 103 | } 104 | } 105 | -------------------------------------------------------------------------------- /jsonrpc/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![doc = include_str!("../README.md")] 2 | 3 | pub mod client; 4 | pub mod codec; 5 | pub mod error; 6 | pub mod message; 7 | pub mod net; 8 | pub mod server; 9 | 10 | pub use karyon_jsonrpc_macro::{rpc_impl, rpc_method, rpc_pubsub_impl}; 11 | -------------------------------------------------------------------------------- /jsonrpc/src/net.rs: -------------------------------------------------------------------------------- 1 | pub use karyon_net::{Addr, Endpoint, ToEndpoint}; 2 | 3 | #[cfg(feature = "tcp")] 4 | pub use karyon_net::tcp::TcpConfig; 5 | -------------------------------------------------------------------------------- /jsonrpc/src/server/channel.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashSet; 2 | use std::sync::{Arc, Weak}; 3 | 4 | use karyon_core::{async_runtime::lock::Mutex, util::random_32}; 5 | 6 | use crate::{ 7 | error::{Error, Result}, 8 | message::SubscriptionID, 9 | }; 10 | 11 | #[derive(Debug)] 12 | pub struct NewNotification { 13 | pub sub_id: SubscriptionID, 14 | pub result: serde_json::Value, 15 | pub method: String, 16 | } 17 | 18 | /// Represents a new subscription 19 | #[derive(Clone)] 20 | pub struct Subscription { 21 | pub id: SubscriptionID, 22 | parent: Weak, 23 | chan: async_channel::Sender, 24 | method: String, 25 | } 26 | 27 | impl Subscription { 28 | /// Creates a new [`Subscription`] 29 | fn new( 30 | parent: Weak, 31 | id: SubscriptionID, 32 | chan: async_channel::Sender, 33 | method: &str, 34 | ) -> Self { 35 | Self { 36 | parent, 37 | id, 38 | chan, 39 | method: method.to_string(), 40 | } 41 | } 42 | 43 | /// Sends a notification to the subscriber 44 | pub async fn notify(&self, res: serde_json::Value) -> Result<()> { 45 | if self.still_subscribed().await { 46 | let nt = NewNotification { 47 | sub_id: self.id, 48 | result: res, 49 | method: self.method.clone(), 50 | }; 51 | self.chan.send(nt).await?; 52 | Ok(()) 53 | } else { 54 | Err(Error::SubscriptionNotFound(self.id.to_string())) 55 | } 56 | } 57 | 58 | /// Checks from the partent if this subscription is still subscribed 59 | async fn still_subscribed(&self) -> bool { 60 | match self.parent.upgrade() { 61 | Some(parent) => parent.subs.lock().await.contains(&self.id), 62 | None => false, 63 | } 64 | } 65 | } 66 | 67 | /// Represents a connection channel for creating/removing subscriptions 68 | pub struct Channel { 69 | chan: async_channel::Sender, 70 | subs: Mutex>, 71 | } 72 | 73 | impl Channel { 74 | /// Creates a new [`Channel`] 75 | pub(crate) fn new(chan: async_channel::Sender) -> Arc { 76 | Arc::new(Self { 77 | chan, 78 | subs: Mutex::new(HashSet::new()), 79 | }) 80 | } 81 | 82 | /// Creates a new [`Subscription`] 83 | pub async fn new_subscription( 84 | self: &Arc, 85 | method: &str, 86 | sub_id: Option, 87 | ) -> Result { 88 | let sub_id = sub_id.unwrap_or_else(random_32); 89 | if !self.subs.lock().await.insert(sub_id) { 90 | return Err(Error::SubscriptionDuplicated(sub_id.to_string())); 91 | } 92 | 93 | let sub = Subscription::new(Arc::downgrade(self), sub_id, self.chan.clone(), method); 94 | Ok(sub) 95 | } 96 | 97 | /// Removes a [`Subscription`] 98 | pub async fn remove_subscription(&self, id: &SubscriptionID) -> Result<()> { 99 | let mut subs = self.subs.lock().await; 100 | if !subs.remove(id) { 101 | return Err(Error::SubscriptionNotFound(id.to_string())); 102 | } 103 | Ok(()) 104 | } 105 | 106 | /// Closes the [`Channel`] 107 | pub(crate) fn close(&self) { 108 | self.chan.close(); 109 | } 110 | } 111 | -------------------------------------------------------------------------------- /jsonrpc/src/server/pubsub_service.rs: -------------------------------------------------------------------------------- 1 | use std::{future::Future, pin::Pin, sync::Arc}; 2 | 3 | use crate::error::RPCResult; 4 | 5 | use super::channel::Channel; 6 | 7 | /// Represents the RPC method 8 | pub type PubSubRPCMethod<'a> = 9 | Box, String, serde_json::Value) -> PubSubRPCMethodOutput<'a> + Send + 'a>; 10 | type PubSubRPCMethodOutput<'a> = 11 | Pin> + Send + Sync + 'a>>; 12 | 13 | /// Defines the interface for an RPC service. 14 | pub trait PubSubRPCService: Sync + Send { 15 | fn get_pubsub_method(&self, name: &str) -> Option; 16 | fn name(&self) -> String; 17 | } 18 | -------------------------------------------------------------------------------- /jsonrpc/src/server/response_queue.rs: -------------------------------------------------------------------------------- 1 | use std::{collections::VecDeque, sync::Arc}; 2 | 3 | use karyon_core::{async_runtime::lock::Mutex, async_util::CondVar}; 4 | 5 | /// A queue for handling responses 6 | pub(super) struct ResponseQueue { 7 | queue: Mutex>, 8 | condvar: CondVar, 9 | } 10 | 11 | impl ResponseQueue { 12 | pub(super) fn new() -> Arc { 13 | Arc::new(Self { 14 | queue: Mutex::new(VecDeque::new()), 15 | condvar: CondVar::new(), 16 | }) 17 | } 18 | 19 | /// Wait while the queue is empty, remove and return the item from the queue, 20 | /// panicking if empty (shouldn't happen) 21 | pub(super) async fn recv(&self) -> T { 22 | let mut queue = self.queue.lock().await; 23 | 24 | while queue.is_empty() { 25 | queue = self.condvar.wait(queue).await; 26 | } 27 | 28 | match queue.pop_front() { 29 | Some(v) => v, 30 | None => unreachable!(), 31 | } 32 | } 33 | 34 | /// Push an item into the queue, notify all waiting tasks that the 35 | /// condvar has changed 36 | pub(super) async fn push(&self, res: T) { 37 | self.queue.lock().await.push_back(res); 38 | self.condvar.signal(); 39 | } 40 | } 41 | -------------------------------------------------------------------------------- /jsonrpc/src/server/service.rs: -------------------------------------------------------------------------------- 1 | use std::{future::Future, pin::Pin}; 2 | 3 | use crate::error::RPCResult; 4 | 5 | /// Represents the RPC method 6 | pub type RPCMethod<'a> = Box RPCMethodOutput<'a> + Send + 'a>; 7 | type RPCMethodOutput<'a> = 8 | Pin> + Send + Sync + 'a>>; 9 | 10 | /// Defines the interface for an RPC service. 11 | pub trait RPCService: Sync + Send { 12 | fn get_method(&self, name: &str) -> Option; 13 | fn name(&self) -> String; 14 | } 15 | -------------------------------------------------------------------------------- /jsonrpc/tests/rpc_impl.rs: -------------------------------------------------------------------------------- 1 | use karyon_jsonrpc::{error::RPCError, rpc_impl, server::RPCService}; 2 | use serde_json::Value; 3 | 4 | #[test] 5 | fn rpc_impl_service() { 6 | struct Foo {} 7 | 8 | #[rpc_impl] 9 | impl Foo { 10 | async fn foo(&self, params: Value) -> Result { 11 | Ok(params) 12 | } 13 | } 14 | 15 | let f = Foo {}; 16 | 17 | assert!(f.get_method("foo").is_some()); 18 | assert!(f.get_method("bar").is_none()); 19 | 20 | let params = serde_json::json!("params"); 21 | 22 | smol::block_on(async { 23 | let foo_method = f.get_method("foo").expect("Get method foo"); 24 | assert_eq!( 25 | foo_method(params.clone()).await.expect("Call foo method"), 26 | params 27 | ); 28 | }); 29 | } 30 | -------------------------------------------------------------------------------- /jsonrpc/tests/rpc_pubsub_impl.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Arc; 2 | 3 | use karyon_jsonrpc::{ 4 | error::RPCError, 5 | rpc_pubsub_impl, 6 | server::{Channel, PubSubRPCService}, 7 | }; 8 | use serde_json::Value; 9 | 10 | #[test] 11 | fn rpc_pubsub_impl_service() { 12 | struct Foo {} 13 | 14 | #[rpc_pubsub_impl] 15 | impl Foo { 16 | async fn foo( 17 | &self, 18 | _channel: Arc, 19 | _method: String, 20 | params: Value, 21 | ) -> Result { 22 | Ok(params) 23 | } 24 | } 25 | 26 | let f = Arc::new(Foo {}); 27 | 28 | assert!(f.get_pubsub_method("foo").is_some()); 29 | assert!(f.get_pubsub_method("bar").is_none()); 30 | 31 | let _params = serde_json::json!("params"); 32 | 33 | // TODO add more tests here 34 | } 35 | -------------------------------------------------------------------------------- /net/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "karyon_net" 3 | description = "Internal crate for Karyon library." 4 | version.workspace = true 5 | edition.workspace = true 6 | homepage.workspace = true 7 | repository.workspace = true 8 | license.workspace = true 9 | authors.workspace = true 10 | 11 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 12 | 13 | [features] 14 | default = ["smol", "all-protocols"] 15 | all-protocols = ["tcp", "tls", "ws", "udp", "unix"] 16 | stream = ["pin-project-lite", "futures-util"] 17 | tcp = ["stream"] 18 | tls = ["tcp", "karyon_async_rustls", "rustls-pki-types"] 19 | ws = ["tcp", "async-tungstenite"] 20 | udp = [] 21 | unix = ["stream"] 22 | smol = [ 23 | "karyon_core/smol", 24 | "async-tungstenite?/async-std-runtime", 25 | "karyon_async_rustls?/smol", 26 | ] 27 | tokio = [ 28 | "karyon_core/tokio", 29 | "async-tungstenite?/tokio-runtime", 30 | "karyon_async_rustls?/tokio", 31 | "dep:tokio", 32 | ] 33 | serde = ["dep:serde"] 34 | 35 | [dependencies] 36 | karyon_core = { workspace = true } 37 | karyon_async_rustls = { workspace = true, optional = true } 38 | 39 | log = { workspace = true } 40 | thiserror = { workspace = true } 41 | url = { workspace = true } 42 | serde = { workspace = true, features = ["derive"], optional = true } 43 | bincode = { workspace = true, features = ["derive"] } 44 | 45 | # async 46 | async-trait = { workspace = true } 47 | futures-util = { workspace = true, features = ["sink"], optional = true } 48 | pin-project-lite = { workspace = true, optional = true } 49 | tokio = { workspace = true, features = ["io-util"], optional = true } 50 | 51 | # websocket 52 | async-tungstenite = { workspace = true, optional = true, features = [ 53 | "futures-03-sink", 54 | ] } 55 | 56 | # tls 57 | rustls-pki-types = { workspace = true, optional = true } 58 | 59 | [dev-dependencies] 60 | smol = { workspace = true } 61 | -------------------------------------------------------------------------------- /net/async_rustls/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "karyon_async_rustls" 3 | description = "Internal crate for Karyon library." 4 | version.workspace = true 5 | edition.workspace = true 6 | homepage.workspace = true 7 | repository.workspace = true 8 | license.workspace = true 9 | authors.workspace = true 10 | 11 | [features] 12 | default = ["smol"] 13 | smol = ["futures-rustls"] 14 | tokio = ["tokio-rustls"] 15 | 16 | [dependencies] 17 | futures-rustls = { workspace = true, optional = true } 18 | tokio-rustls = { workspace = true, optional = true } 19 | -------------------------------------------------------------------------------- /net/async_rustls/src/lib.rs: -------------------------------------------------------------------------------- 1 | #[cfg(all(feature = "smol", feature = "tokio"))] 2 | compile_error!("Only one async runtime feature should be enabled"); 3 | 4 | #[cfg(not(any(feature = "smol", feature = "tokio")))] 5 | compile_error!("At least one async runtime feature must be enabled for this crate."); 6 | 7 | #[cfg(feature = "smol")] 8 | pub use futures_rustls::{rustls, TlsAcceptor, TlsConnector, TlsStream}; 9 | 10 | #[cfg(feature = "tokio")] 11 | pub use tokio_rustls::{rustls, TlsAcceptor, TlsConnector, TlsStream}; 12 | -------------------------------------------------------------------------------- /net/examples/tcp_codec.rs: -------------------------------------------------------------------------------- 1 | use std::time::Duration; 2 | 3 | use karyon_core::async_util::sleep; 4 | 5 | use karyon_net::{ 6 | codec::{ByteBuffer, Codec, Decoder, Encoder}, 7 | tcp, ConnListener, Connection, Endpoint, Error, Result, 8 | }; 9 | 10 | #[derive(Clone)] 11 | struct NewLineCodec {} 12 | 13 | impl Codec for NewLineCodec { 14 | type Message = String; 15 | type Error = Error; 16 | } 17 | 18 | impl Encoder for NewLineCodec { 19 | type EnMessage = String; 20 | type EnError = Error; 21 | fn encode(&self, src: &Self::EnMessage, dst: &mut ByteBuffer) -> Result { 22 | dst.extend_from_slice(src.as_bytes()); 23 | Ok(src.len()) 24 | } 25 | } 26 | 27 | impl Decoder for NewLineCodec { 28 | type DeMessage = String; 29 | type DeError = Error; 30 | fn decode(&self, src: &mut ByteBuffer) -> Result> { 31 | match src.as_ref().iter().position(|&b| b == b'\n') { 32 | Some(i) => Ok(Some(( 33 | i + 1, 34 | String::from_utf8(src.as_ref()[..i].to_vec()).unwrap(), 35 | ))), 36 | None => Ok(None), 37 | } 38 | } 39 | } 40 | 41 | fn main() { 42 | smol::block_on(async { 43 | let endpoint: Endpoint = "tcp://127.0.0.1:3000".parse().unwrap(); 44 | 45 | let config = tcp::TcpConfig::default(); 46 | 47 | let listener = tcp::listen(&endpoint, config.clone(), NewLineCodec {}) 48 | .await 49 | .unwrap(); 50 | smol::spawn(async move { 51 | if let Ok(conn) = listener.accept().await { 52 | loop { 53 | let msg = conn.recv().await.unwrap(); 54 | println!("Receive a message: {:?}", msg); 55 | } 56 | }; 57 | }) 58 | .detach(); 59 | 60 | let conn = tcp::dial(&endpoint, config, NewLineCodec {}).await.unwrap(); 61 | conn.send("hello".to_string()).await.unwrap(); 62 | conn.send(" world\n".to_string()).await.unwrap(); 63 | sleep(Duration::from_secs(1)).await; 64 | }); 65 | } 66 | -------------------------------------------------------------------------------- /net/examples/tcp_codec_tokio/.gitignore: -------------------------------------------------------------------------------- 1 | target 2 | Cargo.lock 3 | -------------------------------------------------------------------------------- /net/examples/tcp_codec_tokio/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "tcp_codec_tokio" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | [workspace] 7 | 8 | [dependencies] 9 | karyon_net = { path = "../../", default-features = false, features = ["tokio", "tcp"] } 10 | karyon_core = { path = "../../../core", default-features = false, features = ["tokio"] } 11 | tokio = { version = "1.45", features = ["full"] } 12 | 13 | -------------------------------------------------------------------------------- /net/examples/tcp_codec_tokio/src/main.rs: -------------------------------------------------------------------------------- 1 | use std::time::Duration; 2 | 3 | use karyon_core::async_util::sleep; 4 | 5 | use karyon_net::{ 6 | codec::{ByteBuffer, Codec, Decoder, Encoder}, 7 | tcp, ConnListener, Connection, Endpoint, Error, Result, 8 | }; 9 | 10 | #[derive(Clone)] 11 | struct NewLineCodec {} 12 | 13 | impl Codec for NewLineCodec { 14 | type Message = String; 15 | type Error = Error; 16 | } 17 | 18 | impl Encoder for NewLineCodec { 19 | type EnMessage = String; 20 | type EnError = Error; 21 | fn encode(&self, src: &Self::EnMessage, dst: &mut ByteBuffer) -> Result { 22 | dst.extend_from_slice(src.as_bytes()); 23 | Ok(src.len()) 24 | } 25 | } 26 | 27 | impl Decoder for NewLineCodec { 28 | type DeMessage = String; 29 | type DeError = Error; 30 | fn decode(&self, src: &mut ByteBuffer) -> Result> { 31 | match src.as_ref().iter().position(|&b| b == b'\n') { 32 | Some(i) => Ok(Some(( 33 | i + 1, 34 | String::from_utf8(src.consume(i).to_vec()).unwrap(), 35 | ))), 36 | None => Ok(None), 37 | } 38 | } 39 | } 40 | 41 | #[tokio::main] 42 | async fn main() { 43 | let endpoint: Endpoint = "tcp://127.0.0.1:3000".parse().unwrap(); 44 | 45 | let config = tcp::TcpConfig::default(); 46 | 47 | let listener = tcp::listen(&endpoint, config.clone(), NewLineCodec {}) 48 | .await 49 | .unwrap(); 50 | tokio::spawn(async move { 51 | if let Ok(conn) = listener.accept().await { 52 | loop { 53 | let msg = conn.recv().await.unwrap(); 54 | println!("Receive a message: {:?}", msg); 55 | } 56 | }; 57 | }); 58 | 59 | let conn = tcp::dial(&endpoint, config, NewLineCodec {}).await.unwrap(); 60 | conn.send("hello".to_string()).await.unwrap(); 61 | conn.send(" world\n".to_string()).await.unwrap(); 62 | sleep(Duration::from_secs(1)).await; 63 | } 64 | -------------------------------------------------------------------------------- /net/src/codec/buffer.rs: -------------------------------------------------------------------------------- 1 | pub type ByteBuffer = Buffer; 2 | 3 | #[derive(Debug)] 4 | pub struct Buffer { 5 | inner: Vec, 6 | len: usize, 7 | max_length: usize, 8 | } 9 | 10 | impl Buffer { 11 | /// Constructs a new, empty Buffer. 12 | pub fn new(max_length: usize) -> Self { 13 | Self { 14 | max_length, 15 | inner: Vec::new(), 16 | len: 0, 17 | } 18 | } 19 | 20 | /// Returns the number of elements in the buffer. 21 | #[allow(dead_code)] 22 | pub fn len(&self) -> usize { 23 | self.len 24 | } 25 | 26 | /// Resizes the buffer in-place so that `len` is equal to `new_size`. 27 | pub fn resize(&mut self, new_size: usize) { 28 | // Check the Buffer doesn't grow beyond its max length. 29 | assert!( 30 | self.max_length > new_size, 31 | "buffer resize to {} overflows the buffer max_length ({})", 32 | new_size, 33 | self.max_length 34 | ); 35 | // Make sure the vector can contain the data. 36 | // Note 1: reserve() is a no-op if the vector capacity is already large 37 | // enough, but we don't want to cause the unsigned to underflow. 38 | // Note 2: we don't shrink the vector memory if the length is reduced, 39 | // as this operation might be costly and the released memory expected to 40 | // be small. 41 | // Note 3: the vector capacity (aka allocated memory) might be larger 42 | // than max_length due to the allocator doing over-provisioning, but it 43 | // is guaranteed that the data length won't overflow. 44 | if new_size > self.len { 45 | self.inner.reserve(new_size - self.len); 46 | } 47 | // This is a no-op if the new_size is greater or equal to self.len 48 | self.inner.truncate(new_size); 49 | self.len = new_size; 50 | } 51 | 52 | /// Appends all elements in a slice to the buffer. 53 | pub fn extend_from_slice(&mut self, bytes: &[u8]) { 54 | self.resize(self.len + bytes.len()); 55 | self.inner.extend_from_slice(bytes); 56 | } 57 | 58 | /// Shortens the buffer, dropping the first `cnt` bytes and keeping the 59 | /// rest. 60 | pub fn advance(&mut self, cnt: usize) { 61 | assert!( 62 | self.len >= cnt, 63 | "buffer advance of {} underflows the buffer length ({})", 64 | cnt, 65 | self.len 66 | ); 67 | self.inner.rotate_left(cnt); 68 | self.resize(self.len - cnt); 69 | } 70 | 71 | /// Returns `true` if the buffer contains no elements. 72 | pub fn is_empty(&self) -> bool { 73 | self.len == 0 74 | } 75 | } 76 | 77 | impl AsMut<[u8]> for Buffer { 78 | fn as_mut(&mut self) -> &mut [u8] { 79 | &mut self.inner[..self.len] 80 | } 81 | } 82 | 83 | impl AsRef<[u8]> for Buffer { 84 | fn as_ref(&self) -> &[u8] { 85 | &self.inner[..self.len] 86 | } 87 | } 88 | 89 | #[cfg(test)] 90 | mod tests { 91 | use super::*; 92 | 93 | #[test] 94 | fn test_buffer() { 95 | let mut buf = Buffer::new(32); 96 | assert_eq!(&[] as &[u8], buf.as_ref()); 97 | assert_eq!(0, buf.len()); 98 | assert_eq!(true, buf.is_empty()); 99 | 100 | buf.extend_from_slice(&[1, 2, 3, 4, 5]); 101 | assert_eq!(&[1, 2, 3, 4, 5], buf.as_ref()); 102 | assert_eq!(5, buf.len()); 103 | assert_eq!(false, buf.is_empty()); 104 | 105 | buf.advance(2); 106 | assert_eq!(&[3, 4, 5], buf.as_ref()); 107 | assert_eq!(3, buf.len()); 108 | assert_eq!(false, buf.is_empty()); 109 | 110 | buf.extend_from_slice(&[6, 7, 8]); 111 | assert_eq!(&[3, 4, 5, 6, 7, 8], buf.as_ref()); 112 | assert_eq!(6, buf.len()); 113 | assert_eq!(false, buf.is_empty()); 114 | 115 | buf.advance(4); 116 | assert_eq!(&[7, 8], buf.as_ref()); 117 | assert_eq!(2, buf.len()); 118 | assert_eq!(false, buf.is_empty()); 119 | 120 | buf.advance(2); 121 | assert_eq!(&[] as &[u8], buf.as_ref()); 122 | assert_eq!(0, buf.len()); 123 | assert_eq!(true, buf.is_empty()); 124 | } 125 | 126 | #[test] 127 | #[should_panic(expected = "buffer resize to 9 overflows the buffer max_length (8)")] 128 | fn test_buffer_resize_overflow() { 129 | let mut buf = Buffer::new(8); 130 | buf.extend_from_slice(&[1, 2, 3, 4, 5, 6, 7, 8, 9]); 131 | } 132 | 133 | #[test] 134 | #[should_panic(expected = "buffer advance of 5 underflows the buffer length (4)")] 135 | fn test_buffer_advance_underflow() { 136 | let mut buf = Buffer::new(8); 137 | buf.extend_from_slice(&[1, 2, 3, 4]); 138 | buf.advance(5); 139 | } 140 | } 141 | -------------------------------------------------------------------------------- /net/src/codec/bytes_codec.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | codec::{ByteBuffer, Codec, Decoder, Encoder}, 3 | Error, Result, 4 | }; 5 | 6 | #[derive(Clone)] 7 | pub struct BytesCodec {} 8 | impl Codec for BytesCodec { 9 | type Message = Vec; 10 | type Error = Error; 11 | } 12 | 13 | impl Encoder for BytesCodec { 14 | type EnMessage = Vec; 15 | type EnError = Error; 16 | fn encode(&self, src: &Self::EnMessage, dst: &mut ByteBuffer) -> Result { 17 | dst.extend_from_slice(src); 18 | Ok(src.len()) 19 | } 20 | } 21 | 22 | impl Decoder for BytesCodec { 23 | type DeMessage = Vec; 24 | type DeError = Error; 25 | fn decode(&self, src: &mut ByteBuffer) -> Result> { 26 | if src.is_empty() { 27 | Ok(None) 28 | } else { 29 | Ok(Some((src.len(), src.as_ref().to_vec()))) 30 | } 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /net/src/codec/length_codec.rs: -------------------------------------------------------------------------------- 1 | use karyon_core::util::{decode, encode_into_slice}; 2 | 3 | use crate::{ 4 | codec::{ByteBuffer, Codec, Decoder, Encoder}, 5 | Error, Result, 6 | }; 7 | 8 | /// The size of the message length. 9 | const MSG_LENGTH_SIZE: usize = std::mem::size_of::(); 10 | 11 | #[derive(Clone)] 12 | pub struct LengthCodec {} 13 | impl Codec for LengthCodec { 14 | type Message = Vec; 15 | type Error = Error; 16 | } 17 | 18 | impl Encoder for LengthCodec { 19 | type EnMessage = Vec; 20 | type EnError = Error; 21 | fn encode(&self, src: &Self::EnMessage, dst: &mut ByteBuffer) -> Result { 22 | let length_buf = &mut [0; MSG_LENGTH_SIZE]; 23 | encode_into_slice(&(src.len() as u32), length_buf)?; 24 | 25 | dst.resize(MSG_LENGTH_SIZE); 26 | dst.extend_from_slice(length_buf); 27 | 28 | dst.resize(src.len()); 29 | dst.extend_from_slice(src); 30 | 31 | Ok(dst.len()) 32 | } 33 | } 34 | 35 | impl Decoder for LengthCodec { 36 | type DeMessage = Vec; 37 | type DeError = Error; 38 | fn decode(&self, src: &mut ByteBuffer) -> Result> { 39 | if src.len() < MSG_LENGTH_SIZE { 40 | return Ok(None); 41 | } 42 | 43 | let mut length = [0; MSG_LENGTH_SIZE]; 44 | length.copy_from_slice(&src.as_ref()[..MSG_LENGTH_SIZE]); 45 | let (length, _) = decode::(&length)?; 46 | let length = length as usize; 47 | 48 | if src.len() - MSG_LENGTH_SIZE >= length { 49 | Ok(Some(( 50 | length + MSG_LENGTH_SIZE, 51 | src.as_ref()[MSG_LENGTH_SIZE..length + MSG_LENGTH_SIZE].to_vec(), 52 | ))) 53 | } else { 54 | Ok(None) 55 | } 56 | } 57 | } 58 | -------------------------------------------------------------------------------- /net/src/codec/mod.rs: -------------------------------------------------------------------------------- 1 | mod buffer; 2 | mod bytes_codec; 3 | mod length_codec; 4 | #[cfg(feature = "ws")] 5 | mod websocket; 6 | 7 | pub use buffer::{Buffer, ByteBuffer}; 8 | pub use bytes_codec::BytesCodec; 9 | pub use length_codec::LengthCodec; 10 | 11 | #[cfg(feature = "ws")] 12 | pub use websocket::{WebSocketCodec, WebSocketDecoder, WebSocketEncoder}; 13 | 14 | pub trait Codec: 15 | Decoder 16 | + Encoder 17 | + Send 18 | + Sync 19 | + Unpin 20 | { 21 | type Message: Send + Sync; 22 | type Error; 23 | } 24 | 25 | pub trait Encoder { 26 | type EnMessage; 27 | type EnError: From; 28 | fn encode( 29 | &self, 30 | src: &Self::EnMessage, 31 | dst: &mut ByteBuffer, 32 | ) -> std::result::Result; 33 | } 34 | 35 | pub trait Decoder { 36 | type DeMessage; 37 | type DeError: From; 38 | fn decode( 39 | &self, 40 | src: &mut ByteBuffer, 41 | ) -> std::result::Result, Self::DeError>; 42 | } 43 | -------------------------------------------------------------------------------- /net/src/codec/websocket.rs: -------------------------------------------------------------------------------- 1 | use async_tungstenite::tungstenite::Message; 2 | 3 | pub trait WebSocketCodec: 4 | WebSocketDecoder 5 | + WebSocketEncoder 6 | + Send 7 | + Sync 8 | + Unpin 9 | { 10 | type Message: Send + Sync; 11 | type Error; 12 | } 13 | 14 | pub trait WebSocketEncoder { 15 | type EnMessage; 16 | type EnError; 17 | fn encode(&self, src: &Self::EnMessage) -> std::result::Result; 18 | } 19 | 20 | pub trait WebSocketDecoder { 21 | type DeMessage; 22 | type DeError; 23 | fn decode(&self, src: &Message) -> std::result::Result, Self::DeError>; 24 | } 25 | -------------------------------------------------------------------------------- /net/src/connection.rs: -------------------------------------------------------------------------------- 1 | use std::result::Result; 2 | 3 | use async_trait::async_trait; 4 | 5 | use crate::Endpoint; 6 | 7 | /// Alias for `Box` 8 | pub type Conn = Box>; 9 | 10 | /// A trait for objects which can be converted to [`Conn`]. 11 | pub trait ToConn { 12 | type Message; 13 | type Error; 14 | fn to_conn(self) -> Conn; 15 | } 16 | 17 | /// Connection is a generic network connection interface for 18 | /// [`udp::UdpConn`], [`tcp::TcpConn`], [`tls::TlsConn`], [`ws::WsConn`], 19 | /// and [`unix::UnixConn`]. 20 | /// 21 | /// If you are familiar with the Go language, this is similar to the 22 | /// [Conn](https://pkg.go.dev/net#Conn) interface 23 | #[async_trait] 24 | pub trait Connection: Send + Sync { 25 | type Message; 26 | type Error; 27 | /// Returns the remote peer endpoint of this connection 28 | fn peer_endpoint(&self) -> Result; 29 | 30 | /// Returns the local socket endpoint of this connection 31 | fn local_endpoint(&self) -> Result; 32 | 33 | /// Recvs data from this connection. 34 | async fn recv(&self) -> Result; 35 | 36 | /// Sends data to this connection 37 | async fn send(&self, msg: Self::Message) -> Result<(), Self::Error>; 38 | } 39 | -------------------------------------------------------------------------------- /net/src/error.rs: -------------------------------------------------------------------------------- 1 | use thiserror::Error as ThisError; 2 | 3 | pub type Result = std::result::Result; 4 | 5 | #[derive(ThisError, Debug)] 6 | pub enum Error { 7 | #[error(transparent)] 8 | IO(#[from] std::io::Error), 9 | 10 | #[error("Try From Endpoint Error")] 11 | TryFromEndpoint, 12 | 13 | #[error("Unsupported Endpoint {0}")] 14 | UnsupportedEndpoint(String), 15 | 16 | #[error("Parse Endpoint Error {0}")] 17 | ParseEndpoint(String), 18 | 19 | #[cfg(feature = "ws")] 20 | #[error("Ws Error: {0}")] 21 | WsError(#[from] Box), 22 | 23 | #[cfg(feature = "tls")] 24 | #[error("Invalid DNS Name: {0}")] 25 | InvalidDnsNameError(#[from] rustls_pki_types::InvalidDnsNameError), 26 | 27 | #[error(transparent)] 28 | KaryonCore(#[from] karyon_core::Error), 29 | } 30 | -------------------------------------------------------------------------------- /net/src/lib.rs: -------------------------------------------------------------------------------- 1 | pub mod codec; 2 | mod connection; 3 | mod endpoint; 4 | mod error; 5 | mod listener; 6 | #[cfg(feature = "stream")] 7 | mod stream; 8 | mod transports; 9 | 10 | pub use { 11 | connection::{Conn, Connection, ToConn}, 12 | endpoint::{Addr, Endpoint, Port, ToEndpoint}, 13 | listener::{ConnListener, Listener, ToListener}, 14 | }; 15 | 16 | #[cfg(feature = "tcp")] 17 | pub use transports::tcp; 18 | 19 | #[cfg(feature = "tls")] 20 | pub use transports::tls; 21 | 22 | #[cfg(feature = "ws")] 23 | pub use transports::ws; 24 | 25 | #[cfg(feature = "udp")] 26 | pub use transports::udp; 27 | 28 | #[cfg(all(feature = "unix", target_family = "unix"))] 29 | pub use transports::unix; 30 | 31 | #[cfg(feature = "tls")] 32 | pub use karyon_async_rustls as async_rustls; 33 | 34 | /// Represents karyon's Net Error 35 | pub use error::Error; 36 | 37 | /// Represents karyon's Net Result 38 | pub use error::Result; 39 | -------------------------------------------------------------------------------- /net/src/listener.rs: -------------------------------------------------------------------------------- 1 | use std::result::Result; 2 | 3 | use async_trait::async_trait; 4 | 5 | use crate::{Conn, Endpoint}; 6 | 7 | /// Alias for `Box` 8 | pub type Listener = Box>; 9 | 10 | /// A trait for objects which can be converted to [`Listener`]. 11 | pub trait ToListener { 12 | type Message; 13 | type Error; 14 | fn to_listener(self) -> Listener; 15 | } 16 | 17 | /// ConnListener is a generic network listener interface for 18 | /// [`tcp::TcpConn`], [`tls::TlsConn`], [`ws::WsConn`], and [`unix::UnixConn`]. 19 | #[async_trait] 20 | pub trait ConnListener: Send + Sync { 21 | type Message; 22 | type Error; 23 | fn local_endpoint(&self) -> Result; 24 | async fn accept(&self) -> Result, Self::Error>; 25 | } 26 | -------------------------------------------------------------------------------- /net/src/stream/websocket.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | io::ErrorKind, 3 | pin::Pin, 4 | result::Result, 5 | task::{Context, Poll}, 6 | }; 7 | 8 | use async_tungstenite::tungstenite::Message; 9 | use futures_util::{ 10 | stream::{SplitSink, SplitStream}, 11 | Sink, SinkExt, Stream, StreamExt, TryStreamExt, 12 | }; 13 | use pin_project_lite::pin_project; 14 | 15 | use async_tungstenite::tungstenite::Error; 16 | 17 | #[cfg(feature = "tokio")] 18 | type WebSocketStream = 19 | async_tungstenite::WebSocketStream>; 20 | #[cfg(feature = "smol")] 21 | use async_tungstenite::WebSocketStream; 22 | 23 | use karyon_core::async_runtime::net::TcpStream; 24 | 25 | #[cfg(feature = "tls")] 26 | use crate::async_rustls::TlsStream; 27 | 28 | use crate::codec::WebSocketCodec; 29 | 30 | pub struct WsStream { 31 | inner: InnerWSConn, 32 | codec: C, 33 | } 34 | 35 | impl WsStream 36 | where 37 | C: WebSocketCodec + Clone, 38 | { 39 | pub fn new_ws(conn: WebSocketStream, codec: C) -> Self { 40 | Self { 41 | inner: InnerWSConn::Plain(Box::new(conn)), 42 | codec, 43 | } 44 | } 45 | 46 | #[cfg(feature = "tls")] 47 | pub fn new_wss(conn: WebSocketStream>, codec: C) -> Self { 48 | Self { 49 | inner: InnerWSConn::Tls(Box::new(conn)), 50 | codec, 51 | } 52 | } 53 | 54 | pub fn split(self) -> (ReadWsStream, WriteWsStream) { 55 | let (write, read) = self.inner.split(); 56 | 57 | ( 58 | ReadWsStream { 59 | codec: self.codec.clone(), 60 | inner: read, 61 | }, 62 | WriteWsStream { 63 | inner: write, 64 | codec: self.codec, 65 | }, 66 | ) 67 | } 68 | } 69 | 70 | pin_project! { 71 | pub struct ReadWsStream { 72 | #[pin] 73 | inner: SplitStream, 74 | codec: C, 75 | } 76 | } 77 | 78 | pin_project! { 79 | pub struct WriteWsStream { 80 | #[pin] 81 | inner: SplitSink, 82 | codec: C, 83 | } 84 | } 85 | 86 | impl ReadWsStream 87 | where 88 | C: WebSocketCodec, 89 | E: From, 90 | { 91 | pub async fn recv(&mut self) -> Result { 92 | match self.inner.next().await { 93 | Some(msg) => match self.codec.decode(&msg?)? { 94 | Some(m) => Ok(m), 95 | None => todo!(), 96 | }, 97 | None => Err(Error::Io(std::io::Error::from(ErrorKind::ConnectionAborted)).into()), 98 | } 99 | } 100 | } 101 | 102 | impl WriteWsStream 103 | where 104 | C: WebSocketCodec, 105 | E: From, 106 | { 107 | pub async fn send(&mut self, msg: C::Message) -> Result<(), E> { 108 | let ws_msg = self.codec.encode(&msg)?; 109 | Ok(self.inner.send(ws_msg).await?) 110 | } 111 | } 112 | 113 | impl Sink for WriteWsStream { 114 | type Error = Error; 115 | 116 | fn poll_ready(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { 117 | self.project().inner.poll_ready(cx) 118 | } 119 | 120 | fn start_send(self: Pin<&mut Self>, item: Message) -> Result<(), Self::Error> { 121 | self.project().inner.start_send(item) 122 | } 123 | 124 | fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { 125 | self.project().inner.poll_flush(cx) 126 | } 127 | 128 | fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { 129 | self.project().inner.poll_close(cx) 130 | } 131 | } 132 | 133 | impl Stream for ReadWsStream { 134 | type Item = Result; 135 | 136 | fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { 137 | self.inner.try_poll_next_unpin(cx) 138 | } 139 | } 140 | 141 | enum InnerWSConn { 142 | Plain(Box>), 143 | #[cfg(feature = "tls")] 144 | Tls(Box>>), 145 | } 146 | 147 | impl Sink for InnerWSConn { 148 | type Error = Error; 149 | 150 | fn poll_ready(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { 151 | match &mut *self { 152 | InnerWSConn::Plain(s) => Pin::new(s.as_mut()).poll_ready(cx), 153 | #[cfg(feature = "tls")] 154 | InnerWSConn::Tls(s) => Pin::new(s.as_mut()).poll_ready(cx), 155 | } 156 | } 157 | 158 | fn start_send(mut self: Pin<&mut Self>, item: Message) -> Result<(), Self::Error> { 159 | match &mut *self { 160 | InnerWSConn::Plain(s) => Pin::new(s.as_mut()).start_send(item), 161 | #[cfg(feature = "tls")] 162 | InnerWSConn::Tls(s) => Pin::new(s.as_mut()).start_send(item), 163 | } 164 | } 165 | 166 | fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { 167 | match &mut *self { 168 | InnerWSConn::Plain(s) => Pin::new(s.as_mut()).poll_flush(cx), 169 | #[cfg(feature = "tls")] 170 | InnerWSConn::Tls(s) => Pin::new(s.as_mut()).poll_flush(cx), 171 | } 172 | } 173 | 174 | fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { 175 | match &mut *self { 176 | InnerWSConn::Plain(s) => Pin::new(s.as_mut()).poll_close(cx), 177 | #[cfg(feature = "tls")] 178 | InnerWSConn::Tls(s) => Pin::new(s.as_mut()).poll_close(cx), 179 | } 180 | } 181 | } 182 | 183 | impl Stream for InnerWSConn { 184 | type Item = Result; 185 | 186 | fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { 187 | match &mut *self { 188 | InnerWSConn::Plain(s) => Pin::new(s).poll_next(cx), 189 | #[cfg(feature = "tls")] 190 | InnerWSConn::Tls(s) => Pin::new(s.as_mut()).poll_next(cx), 191 | } 192 | } 193 | } 194 | -------------------------------------------------------------------------------- /net/src/transports/mod.rs: -------------------------------------------------------------------------------- 1 | #[cfg(feature = "tcp")] 2 | pub mod tcp; 3 | #[cfg(feature = "tls")] 4 | pub mod tls; 5 | #[cfg(feature = "udp")] 6 | pub mod udp; 7 | #[cfg(all(feature = "unix", target_family = "unix"))] 8 | pub mod unix; 9 | #[cfg(feature = "ws")] 10 | pub mod ws; 11 | -------------------------------------------------------------------------------- /net/src/transports/tcp.rs: -------------------------------------------------------------------------------- 1 | use std::net::SocketAddr; 2 | 3 | use async_trait::async_trait; 4 | use futures_util::SinkExt; 5 | 6 | use karyon_core::async_runtime::{ 7 | io::{split, ReadHalf, WriteHalf}, 8 | lock::Mutex, 9 | net::{TcpListener as AsyncTcpListener, TcpStream}, 10 | }; 11 | 12 | use crate::{ 13 | codec::Codec, 14 | connection::{Conn, Connection, ToConn}, 15 | endpoint::Endpoint, 16 | listener::{ConnListener, Listener, ToListener}, 17 | stream::{ReadStream, WriteStream}, 18 | Result, 19 | }; 20 | 21 | /// TCP configuration 22 | #[derive(Clone)] 23 | pub struct TcpConfig { 24 | pub nodelay: bool, 25 | } 26 | 27 | impl Default for TcpConfig { 28 | fn default() -> Self { 29 | Self { nodelay: true } 30 | } 31 | } 32 | 33 | /// TCP connection implementation of the [`Connection`] trait. 34 | pub struct TcpConn { 35 | read_stream: Mutex, C>>, 36 | write_stream: Mutex, C>>, 37 | peer_endpoint: Endpoint, 38 | local_endpoint: Endpoint, 39 | } 40 | 41 | impl TcpConn 42 | where 43 | C: Codec + Clone, 44 | { 45 | /// Creates a new TcpConn 46 | pub fn new( 47 | socket: TcpStream, 48 | codec: C, 49 | peer_endpoint: Endpoint, 50 | local_endpoint: Endpoint, 51 | ) -> Self { 52 | let (read, write) = split(socket); 53 | let read_stream = Mutex::new(ReadStream::new(read, codec.clone())); 54 | let write_stream = Mutex::new(WriteStream::new(write, codec)); 55 | Self { 56 | read_stream, 57 | write_stream, 58 | peer_endpoint, 59 | local_endpoint, 60 | } 61 | } 62 | } 63 | 64 | #[async_trait] 65 | impl Connection for TcpConn 66 | where 67 | C: Codec + Clone, 68 | { 69 | type Message = C::Message; 70 | type Error = E; 71 | fn peer_endpoint(&self) -> std::result::Result { 72 | Ok(self.peer_endpoint.clone()) 73 | } 74 | 75 | fn local_endpoint(&self) -> std::result::Result { 76 | Ok(self.local_endpoint.clone()) 77 | } 78 | 79 | async fn recv(&self) -> std::result::Result { 80 | self.read_stream.lock().await.recv().await 81 | } 82 | 83 | async fn send(&self, msg: Self::Message) -> std::result::Result<(), Self::Error> { 84 | self.write_stream.lock().await.send(msg).await 85 | } 86 | } 87 | 88 | pub struct TcpListener { 89 | inner: AsyncTcpListener, 90 | config: TcpConfig, 91 | codec: C, 92 | } 93 | 94 | impl TcpListener 95 | where 96 | C: Codec, 97 | { 98 | pub fn new(listener: AsyncTcpListener, config: TcpConfig, codec: C) -> Self { 99 | Self { 100 | inner: listener, 101 | config: config.clone(), 102 | codec, 103 | } 104 | } 105 | } 106 | 107 | #[async_trait] 108 | impl ConnListener for TcpListener 109 | where 110 | C: Codec + Clone + 'static, 111 | E: From, 112 | { 113 | type Message = C::Message; 114 | type Error = E; 115 | fn local_endpoint(&self) -> std::result::Result { 116 | Ok(Endpoint::new_tcp_addr(self.inner.local_addr()?)) 117 | } 118 | 119 | async fn accept(&self) -> std::result::Result, Self::Error> { 120 | let (socket, _) = self.inner.accept().await?; 121 | socket.set_nodelay(self.config.nodelay)?; 122 | 123 | let peer_endpoint = socket.peer_addr().map(Endpoint::new_tcp_addr)?; 124 | let local_endpoint = socket.local_addr().map(Endpoint::new_tcp_addr)?; 125 | 126 | Ok(Box::new(TcpConn::new( 127 | socket, 128 | self.codec.clone(), 129 | peer_endpoint, 130 | local_endpoint, 131 | ))) 132 | } 133 | } 134 | 135 | /// Connects to the given TCP address and port. 136 | pub async fn dial(endpoint: &Endpoint, config: TcpConfig, codec: C) -> Result> 137 | where 138 | C: Codec + Clone, 139 | { 140 | let addr = SocketAddr::try_from(endpoint.clone())?; 141 | let socket = TcpStream::connect(addr).await?; 142 | socket.set_nodelay(config.nodelay)?; 143 | 144 | let peer_endpoint = socket.peer_addr().map(Endpoint::new_tcp_addr)?; 145 | let local_endpoint = socket.local_addr().map(Endpoint::new_tcp_addr)?; 146 | 147 | Ok(TcpConn::new(socket, codec, peer_endpoint, local_endpoint)) 148 | } 149 | 150 | /// Listens on the given TCP address and port. 151 | pub async fn listen(endpoint: &Endpoint, config: TcpConfig, codec: C) -> Result> 152 | where 153 | C: Codec, 154 | { 155 | let addr = SocketAddr::try_from(endpoint.clone())?; 156 | let listener = AsyncTcpListener::bind(addr).await?; 157 | Ok(TcpListener::new(listener, config, codec)) 158 | } 159 | 160 | impl From> for Box> 161 | where 162 | C: Clone + Codec + 'static, 163 | E: From, 164 | { 165 | fn from(listener: TcpListener) -> Self { 166 | Box::new(listener) 167 | } 168 | } 169 | 170 | impl ToConn for TcpConn 171 | where 172 | C: Codec + Clone + 'static, 173 | { 174 | type Message = C::Message; 175 | type Error = E; 176 | fn to_conn(self) -> Conn { 177 | Box::new(self) 178 | } 179 | } 180 | 181 | impl ToListener for TcpListener 182 | where 183 | C: Clone + Codec + 'static, 184 | E: From, 185 | { 186 | type Message = C::Message; 187 | type Error = E; 188 | fn to_listener(self) -> Listener { 189 | Box::new(self) 190 | } 191 | } 192 | -------------------------------------------------------------------------------- /net/src/transports/udp.rs: -------------------------------------------------------------------------------- 1 | use std::net::SocketAddr; 2 | 3 | use async_trait::async_trait; 4 | use karyon_core::async_runtime::net::UdpSocket; 5 | 6 | use crate::{ 7 | codec::{Buffer, Codec}, 8 | connection::{Conn, Connection, ToConn}, 9 | endpoint::Endpoint, 10 | Result, 11 | }; 12 | 13 | const BUFFER_SIZE: usize = 64 * 1024; 14 | 15 | /// UDP configuration 16 | #[derive(Default)] 17 | pub struct UdpConfig {} 18 | 19 | /// UDP network connection implementation of the [`Connection`] trait. 20 | #[allow(dead_code)] 21 | pub struct UdpConn { 22 | inner: UdpSocket, 23 | codec: C, 24 | config: UdpConfig, 25 | } 26 | 27 | impl UdpConn 28 | where 29 | C: Codec + Clone, 30 | { 31 | /// Creates a new UdpConn 32 | fn new(socket: UdpSocket, config: UdpConfig, codec: C) -> Self { 33 | Self { 34 | inner: socket, 35 | codec, 36 | config, 37 | } 38 | } 39 | } 40 | 41 | #[async_trait] 42 | impl Connection for UdpConn 43 | where 44 | C: Codec + Clone, 45 | E: From, 46 | { 47 | type Message = (C::Message, Endpoint); 48 | type Error = E; 49 | fn peer_endpoint(&self) -> std::result::Result { 50 | Ok(self.inner.peer_addr().map(Endpoint::new_udp_addr)?) 51 | } 52 | 53 | fn local_endpoint(&self) -> std::result::Result { 54 | Ok(self.inner.local_addr().map(Endpoint::new_udp_addr)?) 55 | } 56 | 57 | async fn recv(&self) -> std::result::Result { 58 | let mut buf = Buffer::new(BUFFER_SIZE); 59 | let (_, addr) = self.inner.recv_from(buf.as_mut()).await?; 60 | match self.codec.decode(&mut buf)? { 61 | Some((_, msg)) => Ok((msg, Endpoint::new_udp_addr(addr))), 62 | None => Err(std::io::Error::from(std::io::ErrorKind::ConnectionAborted).into()), 63 | } 64 | } 65 | 66 | async fn send(&self, msg: Self::Message) -> std::result::Result<(), Self::Error> { 67 | let (msg, out_addr) = msg; 68 | let mut buf = Buffer::new(BUFFER_SIZE); 69 | self.codec.encode(&msg, &mut buf)?; 70 | let addr: SocketAddr = out_addr 71 | .try_into() 72 | .map_err(|_| std::io::Error::other("Convert Endpoint to SocketAddress"))?; 73 | self.inner.send_to(buf.as_ref(), addr).await?; 74 | Ok(()) 75 | } 76 | } 77 | 78 | /// Connects to the given UDP address and port. 79 | pub async fn dial(endpoint: &Endpoint, config: UdpConfig, codec: C) -> Result> 80 | where 81 | C: Codec + Clone, 82 | { 83 | let addr = SocketAddr::try_from(endpoint.clone())?; 84 | 85 | // Let the operating system assign an available port to this socket 86 | let conn = UdpSocket::bind("[::]:0").await?; 87 | conn.connect(addr).await?; 88 | Ok(UdpConn::new(conn, config, codec)) 89 | } 90 | 91 | /// Listens on the given UDP address and port. 92 | pub async fn listen(endpoint: &Endpoint, config: UdpConfig, codec: C) -> Result> 93 | where 94 | C: Codec + Clone, 95 | { 96 | let addr = SocketAddr::try_from(endpoint.clone())?; 97 | let conn = UdpSocket::bind(addr).await?; 98 | Ok(UdpConn::new(conn, config, codec)) 99 | } 100 | 101 | impl ToConn for UdpConn 102 | where 103 | C: Codec + Clone + 'static, 104 | E: From, 105 | { 106 | type Message = (C::Message, Endpoint); 107 | type Error = E; 108 | fn to_conn(self) -> Conn { 109 | Box::new(self) 110 | } 111 | } 112 | -------------------------------------------------------------------------------- /net/src/transports/unix.rs: -------------------------------------------------------------------------------- 1 | use async_trait::async_trait; 2 | use futures_util::SinkExt; 3 | 4 | use karyon_core::async_runtime::{ 5 | io::{split, ReadHalf, WriteHalf}, 6 | lock::Mutex, 7 | net::{UnixListener as AsyncUnixListener, UnixStream}, 8 | }; 9 | 10 | use crate::{ 11 | codec::Codec, 12 | connection::{Conn, Connection, ToConn}, 13 | endpoint::Endpoint, 14 | listener::{ConnListener, Listener, ToListener}, 15 | stream::{ReadStream, WriteStream}, 16 | Result, 17 | }; 18 | 19 | /// Unix Conn config 20 | #[derive(Clone, Default)] 21 | pub struct UnixConfig {} 22 | 23 | /// Unix domain socket implementation of the [`Connection`] trait. 24 | pub struct UnixConn { 25 | read_stream: Mutex, C>>, 26 | write_stream: Mutex, C>>, 27 | peer_endpoint: Option, 28 | local_endpoint: Option, 29 | } 30 | 31 | impl UnixConn 32 | where 33 | C: Codec + Clone, 34 | { 35 | /// Creates a new TcpConn 36 | pub fn new(conn: UnixStream, codec: C) -> Self { 37 | let peer_endpoint = conn 38 | .peer_addr() 39 | .and_then(|a| { 40 | Ok(Endpoint::new_unix_addr( 41 | a.as_pathname() 42 | .ok_or(std::io::ErrorKind::AddrNotAvailable)?, 43 | )) 44 | }) 45 | .ok(); 46 | let local_endpoint = conn 47 | .local_addr() 48 | .and_then(|a| { 49 | Ok(Endpoint::new_unix_addr( 50 | a.as_pathname() 51 | .ok_or(std::io::ErrorKind::AddrNotAvailable)?, 52 | )) 53 | }) 54 | .ok(); 55 | 56 | let (read, write) = split(conn); 57 | let read_stream = Mutex::new(ReadStream::new(read, codec.clone())); 58 | let write_stream = Mutex::new(WriteStream::new(write, codec)); 59 | Self { 60 | read_stream, 61 | write_stream, 62 | peer_endpoint, 63 | local_endpoint, 64 | } 65 | } 66 | } 67 | 68 | #[async_trait] 69 | impl Connection for UnixConn 70 | where 71 | C: Codec + Clone, 72 | E: From, 73 | { 74 | type Message = C::Message; 75 | type Error = E; 76 | fn peer_endpoint(&self) -> std::result::Result { 77 | Ok(self 78 | .peer_endpoint 79 | .clone() 80 | .ok_or(std::io::Error::from(std::io::ErrorKind::AddrNotAvailable))?) 81 | } 82 | 83 | fn local_endpoint(&self) -> std::result::Result { 84 | Ok(self 85 | .local_endpoint 86 | .clone() 87 | .ok_or(std::io::Error::from(std::io::ErrorKind::AddrNotAvailable))?) 88 | } 89 | 90 | async fn recv(&self) -> std::result::Result { 91 | self.read_stream.lock().await.recv().await 92 | } 93 | 94 | async fn send(&self, msg: Self::Message) -> std::result::Result<(), Self::Error> { 95 | self.write_stream.lock().await.send(msg).await 96 | } 97 | } 98 | 99 | #[allow(dead_code)] 100 | pub struct UnixListener { 101 | inner: AsyncUnixListener, 102 | config: UnixConfig, 103 | codec: C, 104 | } 105 | 106 | impl UnixListener 107 | where 108 | C: Codec + Clone, 109 | { 110 | pub fn new(listener: AsyncUnixListener, config: UnixConfig, codec: C) -> Self { 111 | Self { 112 | inner: listener, 113 | config, 114 | codec, 115 | } 116 | } 117 | } 118 | 119 | #[async_trait] 120 | impl ConnListener for UnixListener 121 | where 122 | C: Codec + Clone + 'static, 123 | E: From, 124 | { 125 | type Message = C::Message; 126 | type Error = E; 127 | fn local_endpoint(&self) -> std::result::Result { 128 | Ok(self.inner.local_addr().and_then(|a| { 129 | Ok(Endpoint::new_unix_addr( 130 | a.as_pathname() 131 | .ok_or(std::io::ErrorKind::AddrNotAvailable)?, 132 | )) 133 | })?) 134 | } 135 | 136 | async fn accept(&self) -> std::result::Result, Self::Error> { 137 | let (conn, _) = self.inner.accept().await?; 138 | Ok(Box::new(UnixConn::new(conn, self.codec.clone()))) 139 | } 140 | } 141 | 142 | /// Connects to the given Unix socket path. 143 | pub async fn dial(endpoint: &Endpoint, _config: UnixConfig, codec: C) -> Result> 144 | where 145 | C: Codec + Clone, 146 | { 147 | let path: std::path::PathBuf = endpoint.clone().try_into()?; 148 | let conn = UnixStream::connect(path).await?; 149 | Ok(UnixConn::new(conn, codec)) 150 | } 151 | 152 | /// Listens on the given Unix socket path. 153 | pub fn listen(endpoint: &Endpoint, config: UnixConfig, codec: C) -> Result> 154 | where 155 | C: Codec + Clone, 156 | { 157 | let path: std::path::PathBuf = endpoint.clone().try_into()?; 158 | let listener = AsyncUnixListener::bind(path)?; 159 | Ok(UnixListener::new(listener, config, codec)) 160 | } 161 | 162 | // impl From for Box { 163 | // fn from(conn: UnixStream) -> Self { 164 | // Box::new(UnixConn::new(conn)) 165 | // } 166 | // } 167 | 168 | impl From> for Listener 169 | where 170 | C: Codec + Clone + 'static, 171 | E: From, 172 | { 173 | fn from(listener: UnixListener) -> Self { 174 | Box::new(listener) 175 | } 176 | } 177 | 178 | impl ToConn for UnixConn 179 | where 180 | C: Codec + Clone + 'static, 181 | E: From, 182 | { 183 | type Message = C::Message; 184 | type Error = E; 185 | fn to_conn(self) -> Conn { 186 | Box::new(self) 187 | } 188 | } 189 | 190 | impl ToListener for UnixListener 191 | where 192 | C: Codec + Clone + 'static, 193 | E: From, 194 | { 195 | type Message = C::Message; 196 | type Error = E; 197 | fn to_listener(self) -> Listener { 198 | Box::new(self) 199 | } 200 | } 201 | -------------------------------------------------------------------------------- /p2p/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "karyon_p2p" 3 | description = "A lightweight, extensible, and customizable p2p network stack." 4 | version.workspace = true 5 | edition.workspace = true 6 | homepage.workspace = true 7 | repository.workspace = true 8 | license.workspace = true 9 | authors.workspace = true 10 | 11 | 12 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 13 | 14 | [features] 15 | default = ["smol"] 16 | smol = ["karyon_core/smol", "karyon_net/smol", "futures-rustls"] 17 | tokio = ["karyon_core/tokio", "karyon_net/tokio", "tokio-rustls"] 18 | serde = ["dep:serde", "karyon_net/serde"] 19 | 20 | [dependencies] 21 | karyon_core = { workspace = true, features = ["crypto"] } 22 | karyon_net = { workspace = true, features = ["tcp", "tls", "udp"] } 23 | 24 | 25 | log = { workspace = true } 26 | chrono = { workspace = true } 27 | rand = { workspace = true } 28 | thiserror = { workspace = true } 29 | semver = { workspace = true } 30 | sha2 = { workspace = true } 31 | parking_lot = { workspace = true } 32 | 33 | # encode/decode 34 | bincode = { workspace = true, features = ["derive"] } 35 | base64 = { workspace = true } 36 | serde = { workspace = true, features = ["derive"], optional = true } 37 | 38 | # async 39 | async-trait = { workspace = true } 40 | async-channel = { workspace = true } 41 | futures-util = { workspace = true, features = ["alloc"] } 42 | 43 | # tls 44 | rcgen = { workspace = true } 45 | yasna = { workspace = true } 46 | x509-parser = { workspace = true } 47 | futures-rustls = { workspace = true, features = ["aws-lc-rs"], optional = true } 48 | tokio-rustls = { workspace = true, features = ["aws-lc-rs"], optional = true } 49 | rustls-pki-types = { workspace = true } 50 | 51 | [dev-dependencies] 52 | smol = { workspace = true } 53 | blocking = "1.6" 54 | clap = { version = "4.5", features = ["derive"] } 55 | ctrlc = "3.4" 56 | easy-parallel = "3.3" 57 | env_logger = "0.11" 58 | -------------------------------------------------------------------------------- /p2p/README.md: -------------------------------------------------------------------------------- 1 | # Karyon p2p 2 | 3 | Karyon p2p serves as the foundational stack for the Karyon library. It offers 4 | a lightweight, extensible, and customizable peer-to-peer (p2p) network stack 5 | that seamlessly integrates with any p2p project. 6 | 7 | ## Architecture 8 | 9 | ### Discovery 10 | 11 | Karyon p2p uses a customized version of the Kademlia for discovering new peers 12 | in the network. This approach is based on Kademlia but with several significant 13 | differences and optimizations. Some of the main changes: 14 | 15 | 1. Karyon p2p uses TCP for the lookup process, while UDP is used for 16 | validating and refreshing the routing table. The reason for this choice is 17 | that the lookup process is infrequent, and the work required to manage 18 | messages with UDP is largely equivalent to using TCP for this purpose. 19 | However, for the periodic and efficient sending of numerous Ping messages to 20 | the entries in the routing table during refreshing, it makes sense to 21 | use UDP. 22 | 23 | 2. In contrast to traditional Kademlia, which often employs 160 buckets, 24 | Karyon p2p reduces the number of buckets to 32. This optimization is a 25 | result of the observation that most nodes tend to map into the last few 26 | buckets, with the majority of other buckets remaining empty. 27 | 28 | 3. While Kademlia typically uses a 160-bit key to identify a peer, Karyon p2p 29 | uses a 256-bit key. 30 | 31 | > Despite criticisms of Kademlia's vulnerabilities, particularly concerning 32 | > Sybil and Eclipse attacks [[1]](https://eprint.iacr.org/2018/236.pdf) 33 | > [[2]](https://arxiv.org/abs/1908.10141), we chose to use Kademlia because our 34 | > main goal is to build a network focused on sharing data. This choice 35 | > may also assist us in supporting sharding in the future. However, we have made 36 | > efforts to mitigate most of its vulnerabilities. Several projects, including 37 | > BitTorrent, Ethereum, IPFS, and Storj, still rely on Kademlia. 38 | 39 | ### Peer ID 40 | 41 | In the Karyon p2p network, each peer is identified by a 256-bit (32-byte) Peer ID. 42 | 43 | ### Seeding 44 | 45 | At the network's initiation, the client populates the routing table with peers 46 | closest to its key(PeerID) through a seeding process. Once this process is 47 | complete, and the routing table is filled, the client selects a random peer 48 | from the routing table and establishes an outbound connection. This process 49 | continues until all outbound slots are occupied. 50 | 51 | The client can optionally provide a listening endpoint to accept inbound 52 | connections. 53 | 54 | ### Refreshing 55 | 56 | The routing table undergoes periodic refreshment to validate the peers. This 57 | process involves opening a UDP connection with the peers listed in the routing 58 | table and sending a `PING` message. If the peer responds with a `PONG` message, 59 | it means that the peer is still alive. Otherwise, the peer will be removed from 60 | the routing table. 61 | 62 | ### Handshake 63 | 64 | When an inbound or outbound connection is established, the client initiates a 65 | handshake with that connection. If the handshake is successful, the connection 66 | is added to the `PeerPool`. 67 | 68 | ### Protocols 69 | 70 | In the Karyon p2p network, there are two types of protocols: core protocols and 71 | custom protocols. Core protocols, such as the Ping and Handshake protocols, 72 | come prebuilt into Karyon p2p. Custom protocols, however, are ones that you 73 | create to provide the specific functionality your application needs. 74 | 75 | Here's an example of a custom protocol: 76 | 77 | ```rust 78 | pub struct NewProtocol { 79 | peer: Arc, 80 | } 81 | 82 | impl NewProtocol { 83 | fn new(peer: Arc) -> Arc { 84 | Arc::new(Self { 85 | peer, 86 | }) 87 | } 88 | } 89 | 90 | #[async_trait] 91 | impl Protocol for NewProtocol { 92 | async fn start(self: Arc) -> Result<(), Error> { 93 | loop { 94 | match self.peer.recv::().await.expect("Receive msg") { 95 | ProtocolEvent::Message(msg) => { 96 | println!("{:?}", msg); 97 | } 98 | ProtocolEvent::Shutdown => { 99 | break; 100 | } 101 | } 102 | } 103 | Ok(()) 104 | } 105 | 106 | fn version() -> Result { 107 | "0.2.0, >0.1.0".parse() 108 | } 109 | 110 | fn id() -> ProtocolID { 111 | "NEWPROTOCOLID".into() 112 | } 113 | } 114 | 115 | ``` 116 | 117 | ## Network Security 118 | 119 | Using TLS is possible for all inbound and outbound connections by enabling the 120 | boolean `enable_tls` field in the configuration. However, implementing TLS for 121 | a p2p network is not trivial and is still unstable, requiring a comprehensive 122 | audit. 123 | 124 | 125 | ## Choosing the async runtime 126 | 127 | Karyon p2p currently supports both **smol(async-std)** and **tokio** async runtimes. 128 | The default is **smol**, but if you want to use **tokio**, you need to disable 129 | the default features and then select the `tokio` feature. 130 | 131 | ## Examples 132 | 133 | You can check out the examples [here](./examples). 134 | 135 | If you have tmux installed, you can run the network simulation script in the 136 | examples directory to run 12 peers simultaneously. 137 | 138 | ```bash 139 | $ RUST_LOG=karyon=info ./net_simulation.sh 140 | ``` 141 | -------------------------------------------------------------------------------- /p2p/examples/chat.rs: -------------------------------------------------------------------------------- 1 | mod shared; 2 | 3 | use std::sync::Arc; 4 | 5 | use async_trait::async_trait; 6 | use clap::Parser; 7 | use smol::{channel, Executor}; 8 | 9 | use karyon_p2p::{ 10 | endpoint::{Endpoint, Port}, 11 | keypair::{KeyPair, KeyPairType}, 12 | protocol::{Protocol, ProtocolEvent, ProtocolID}, 13 | Backend, Config, Error, Peer, Version, 14 | }; 15 | 16 | use shared::{read_line_async, run_executor}; 17 | 18 | #[derive(Parser)] 19 | #[command(author, version, about, long_about = None)] 20 | struct Cli { 21 | /// Optional list of bootstrap peers to start the seeding process. 22 | #[arg(short)] 23 | bootstrap_peers: Vec, 24 | 25 | /// Optional list of peer endpoints for manual connections. 26 | #[arg(short)] 27 | peer_endpoints: Vec, 28 | 29 | /// Optional endpoint for accepting incoming connections. 30 | #[arg(short)] 31 | listen_endpoint: Option, 32 | 33 | /// Optional TCP/UDP port for the discovery service. 34 | #[arg(short)] 35 | discovery_port: Option, 36 | 37 | /// Username 38 | #[arg(long)] 39 | username: String, 40 | } 41 | 42 | pub struct ChatProtocol { 43 | username: String, 44 | peer: Arc, 45 | executor: Arc>, 46 | } 47 | 48 | impl ChatProtocol { 49 | fn new(username: &str, peer: Arc, executor: Arc>) -> Arc { 50 | Arc::new(Self { 51 | peer, 52 | username: username.to_string(), 53 | executor, 54 | }) 55 | } 56 | } 57 | 58 | #[async_trait] 59 | impl Protocol for ChatProtocol { 60 | async fn start(self: Arc) -> Result<(), Error> { 61 | let task = self.executor.spawn({ 62 | let this = self.clone(); 63 | async move { 64 | loop { 65 | let input = read_line_async().await.expect("Read line from stdin"); 66 | let msg = format!("> {}: {}", this.username, input.trim()); 67 | this.peer.broadcast(&Self::id(), &msg).await; 68 | } 69 | } 70 | }); 71 | 72 | loop { 73 | match self.peer.recv::().await? { 74 | ProtocolEvent::Message(msg) => { 75 | let msg = String::from_utf8(msg).expect("Convert received bytes to string"); 76 | println!("{msg}"); 77 | } 78 | ProtocolEvent::Shutdown => { 79 | break; 80 | } 81 | } 82 | } 83 | 84 | task.cancel().await; 85 | Ok(()) 86 | } 87 | 88 | fn version() -> Result { 89 | "0.1.0, 0.1.0".parse() 90 | } 91 | 92 | fn id() -> ProtocolID { 93 | "CHAT".into() 94 | } 95 | } 96 | 97 | fn main() { 98 | env_logger::init(); 99 | let cli = Cli::parse(); 100 | 101 | // Create a PeerID based on the username. 102 | let key_pair = KeyPair::generate(&KeyPairType::Ed25519); 103 | 104 | // Create the configuration for the backend. 105 | let config = Config { 106 | listen_endpoint: cli.listen_endpoint, 107 | peer_endpoints: cli.peer_endpoints, 108 | bootstrap_peers: cli.bootstrap_peers, 109 | discovery_port: cli.discovery_port.unwrap_or(0), 110 | enable_tls: true, 111 | ..Default::default() 112 | }; 113 | 114 | // Create a new Executor 115 | let ex = Arc::new(Executor::new()); 116 | 117 | // Create a new Backend 118 | let backend = Backend::new(&key_pair, config, ex.clone().into()); 119 | 120 | let (ctrlc_s, ctrlc_r) = channel::unbounded(); 121 | let handle = move || ctrlc_s.try_send(()).expect("Send ctrlc signal"); 122 | ctrlc::set_handler(handle).expect("ctrlc set handler"); 123 | 124 | run_executor( 125 | { 126 | let ex = ex.clone(); 127 | async { 128 | let username = cli.username; 129 | 130 | // Attach the ChatProtocol 131 | let c = move |peer| ChatProtocol::new(&username, peer, ex.clone().into()); 132 | backend 133 | .attach_protocol::(c) 134 | .await 135 | .expect("Attach chat protocol to the p2p backend"); 136 | 137 | // Run the backend 138 | backend.run().await.expect("Run the backend"); 139 | 140 | // Wait for ctrlc signal 141 | ctrlc_r.recv().await.expect("Receive ctrlc signal"); 142 | 143 | // Shutdown the backend 144 | backend.shutdown().await; 145 | } 146 | }, 147 | ex, 148 | ); 149 | } 150 | -------------------------------------------------------------------------------- /p2p/examples/chat_simulation.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # build 4 | cargo build --release --example chat 5 | 6 | tmux new-session -d -s karyon_chat 7 | 8 | tmux send-keys -t karyon_chat "../../target/release/examples/chat --username 'user1'\ 9 | -l 'tcp://127.0.0.1:40000' -d '40010'" Enter 10 | 11 | tmux split-window -h -t karyon_chat 12 | tmux send-keys -t karyon_chat "../../target/release/examples/chat --username 'user2'\ 13 | -l 'tcp://127.0.0.1:40001' -d '40011' -b 'tcp://127.0.0.1:40010 ' " Enter 14 | 15 | tmux split-window -h -t karyon_chat 16 | tmux send-keys -t karyon_chat "../../target/release/examples/chat --username 'user3'\ 17 | -l 'tcp://127.0.0.1:40002' -d '40012' -b 'tcp://127.0.0.1:40010'" Enter 18 | 19 | tmux split-window -h -t karyon_chat 20 | tmux send-keys -t karyon_chat "../../target/release/examples/chat --username 'user4'\ 21 | -b 'tcp://127.0.0.1:40010'" Enter 22 | 23 | tmux select-layout tiled 24 | 25 | tmux attach -t karyon_chat 26 | -------------------------------------------------------------------------------- /p2p/examples/net_simulation.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # build 4 | cargo build --release --example peer 5 | 6 | tmux new-session -d -s karyon_p2p 7 | 8 | tmux send-keys -t karyon_p2p "../../target/release/examples/peer\ 9 | -l 'tcp://127.0.0.1:30000' -d '30010'" Enter 10 | 11 | tmux split-window -h -t karyon_p2p 12 | tmux send-keys -t karyon_p2p "../../target/release/examples/peer\ 13 | -l 'tcp://127.0.0.1:30001' -d '30011' -b 'tcp://127.0.0.1:30010 ' " Enter 14 | 15 | tmux split-window -h -t karyon_p2p 16 | tmux send-keys -t karyon_p2p "../../target/release/examples/peer\ 17 | -l 'tcp://127.0.0.1:30002' -d '30012' -b 'tcp://127.0.0.1:30010'" Enter 18 | 19 | tmux split-window -h -t karyon_p2p 20 | tmux send-keys -t karyon_p2p "../../target/release/examples/peer\ 21 | -l 'tcp://127.0.0.1:30003' -d '30013' -b 'tcp://127.0.0.1:30010'" Enter 22 | 23 | tmux split-window -h -t karyon_p2p 24 | tmux send-keys -t karyon_p2p "../../target/release/examples/peer\ 25 | -l 'tcp://127.0.0.1:30004' -d '30014' -b 'tcp://127.0.0.1:30010'" Enter 26 | 27 | tmux split-window -h -t karyon_p2p 28 | tmux send-keys -t karyon_p2p "../../target/release/examples/peer\ 29 | -l 'tcp://127.0.0.1:30005' -d '30015' -b 'tcp://127.0.0.1:30010'" Enter 30 | 31 | tmux select-layout even-horizontal 32 | 33 | sleep 3; 34 | 35 | tmux select-pane -t karyon_p2p:0.0 36 | 37 | tmux split-window -v -t karyon_p2p 38 | tmux send-keys -t karyon_p2p "../../target/release/examples/peer\ 39 | -b 'tcp://127.0.0.1:30010' -b 'tcp://127.0.0.1:30011'" Enter 40 | 41 | tmux select-pane -t karyon_p2p:0.2 42 | 43 | tmux split-window -v -t karyon_p2p 44 | tmux send-keys -t karyon_p2p "../../target/release/examples/peer\ 45 | -b 'tcp://127.0.0.1:30010' -b 'tcp://127.0.0.1:30012' -p 'tcp://127.0.0.1:30005'" Enter 46 | 47 | tmux select-pane -t karyon_p2p:0.4 48 | 49 | tmux split-window -v -t karyon_p2p 50 | tmux send-keys -t karyon_p2p "../../target/release/examples/peer\ 51 | -b 'tcp://127.0.0.1:30010' -b 'tcp://127.0.0.1:30013'" Enter 52 | 53 | tmux select-pane -t karyon_p2p:0.6 54 | 55 | tmux split-window -v -t karyon_p2p 56 | tmux send-keys -t karyon_p2p "../../target/release/examples/peer\ 57 | -b 'tcp://127.0.0.1:30010' -b 'tcp://127.0.0.1:30014'" Enter 58 | 59 | tmux select-pane -t karyon_p2p:0.8 60 | 61 | tmux split-window -v -t karyon_p2p 62 | tmux send-keys -t karyon_p2p "../../target/release/examples/peer\ 63 | -b 'tcp://127.0.0.1:30010' -b 'tcp://127.0.0.1:30015'" Enter 64 | 65 | tmux select-pane -t karyon_p2p:0.10 66 | 67 | tmux split-window -v -t karyon_p2p 68 | tmux send-keys -t karyon_p2p "../../target/release/examples/peer\ 69 | -b 'tcp://127.0.0.1:30010' -b 'tcp://127.0.0.1:30015' -b 'tcp://127.0.0.1:30011'" Enter 70 | 71 | # tmux set-window-option -t karyon_p2p synchronize-panes on 72 | 73 | tmux attach -t karyon_p2p 74 | -------------------------------------------------------------------------------- /p2p/examples/peer.rs: -------------------------------------------------------------------------------- 1 | mod shared; 2 | 3 | use std::sync::Arc; 4 | 5 | use clap::Parser; 6 | use smol::{channel, Executor}; 7 | 8 | use karyon_p2p::{ 9 | endpoint::{Endpoint, Port}, 10 | keypair::{KeyPair, KeyPairType}, 11 | Backend, Config, 12 | }; 13 | 14 | use shared::run_executor; 15 | 16 | #[derive(Parser)] 17 | #[command(author, version, about, long_about = None)] 18 | struct Cli { 19 | /// Optional list of bootstrap peers to start the seeding process. 20 | #[arg(short)] 21 | bootstrap_peers: Vec, 22 | 23 | /// Optional list of peer endpoints for manual connections. 24 | #[arg(short)] 25 | peer_endpoints: Vec, 26 | 27 | /// Optional endpoint for accepting incoming connections. 28 | #[arg(short)] 29 | listen_endpoint: Option, 30 | 31 | /// Optional TCP/UDP port for the discovery service. 32 | #[arg(short)] 33 | discovery_port: Option, 34 | } 35 | 36 | fn main() { 37 | env_logger::init(); 38 | let cli = Cli::parse(); 39 | 40 | let key_pair = KeyPair::generate(&KeyPairType::Ed25519); 41 | 42 | // Create the configuration for the backend. 43 | let config = Config { 44 | listen_endpoint: cli.listen_endpoint, 45 | peer_endpoints: cli.peer_endpoints, 46 | bootstrap_peers: cli.bootstrap_peers, 47 | discovery_port: cli.discovery_port.unwrap_or(0), 48 | ..Default::default() 49 | }; 50 | 51 | // Create a new Executor 52 | let ex = Arc::new(Executor::new()); 53 | 54 | // Create a new Backend 55 | let backend = Backend::new(&key_pair, config, ex.clone().into()); 56 | 57 | let (ctrlc_s, ctrlc_r) = channel::unbounded(); 58 | let handle = move || ctrlc_s.try_send(()).expect("Send ctrlc signal"); 59 | ctrlc::set_handler(handle).expect("ctrlc set handler"); 60 | 61 | run_executor( 62 | async { 63 | // Run the backend 64 | backend.run().await.expect("Run the backend"); 65 | 66 | // Wait for ctrlc signal 67 | ctrlc_r.recv().await.expect("Receive ctrlc signal"); 68 | 69 | // Shutdown the backend 70 | backend.shutdown().await; 71 | }, 72 | ex, 73 | ); 74 | } 75 | -------------------------------------------------------------------------------- /p2p/examples/shared/mod.rs: -------------------------------------------------------------------------------- 1 | use std::{io, num::NonZeroUsize, sync::Arc, thread}; 2 | 3 | use blocking::unblock; 4 | use easy_parallel::Parallel; 5 | use smol::{channel, future, future::Future, Executor}; 6 | 7 | #[allow(dead_code)] 8 | pub async fn read_line_async() -> Result { 9 | unblock(|| { 10 | let mut input = String::new(); 11 | std::io::stdin().read_line(&mut input)?; 12 | Ok(input) 13 | }) 14 | .await 15 | } 16 | 17 | /// Returns an estimate of the default amount of parallelism a program should use. 18 | /// see `std::thread::available_parallelism` 19 | pub fn available_parallelism() -> usize { 20 | thread::available_parallelism() 21 | .map(NonZeroUsize::get) 22 | .unwrap_or(1) 23 | } 24 | 25 | /// Run a multi-threaded executor 26 | pub fn run_executor(main_future: impl Future, ex: Arc>) { 27 | let (signal, shutdown) = channel::unbounded::<()>(); 28 | 29 | let num_threads = available_parallelism(); 30 | 31 | Parallel::new() 32 | .each(0..(num_threads), |_| { 33 | future::block_on(ex.run(shutdown.recv())) 34 | }) 35 | // Run the main future on the current thread. 36 | .finish(|| { 37 | future::block_on(async { 38 | main_future.await; 39 | drop(signal); 40 | }) 41 | }); 42 | } 43 | -------------------------------------------------------------------------------- /p2p/examples/tokio-example/.gitignore: -------------------------------------------------------------------------------- 1 | target 2 | Cargo.lock 3 | -------------------------------------------------------------------------------- /p2p/examples/tokio-example/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "tokio-example" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | [workspace] 7 | 8 | [dependencies] 9 | karyon_p2p = { path = "../../", default-features = false, features = ["tokio"] } 10 | async-channel = "2.3" 11 | tokio = { version = "1.45", features = ["full"] } 12 | clap = { version = "4.5", features = ["derive"] } 13 | ctrlc = "3.4" 14 | env_logger = "0.11" 15 | -------------------------------------------------------------------------------- /p2p/examples/tokio-example/src/main.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Arc; 2 | 3 | use clap::Parser; 4 | 5 | use karyon_p2p::{ 6 | endpoint::{Endpoint, Port}, 7 | keypair::{KeyPair, KeyPairType}, 8 | Backend, Config, 9 | }; 10 | 11 | /// Returns an estimate of the default amount of parallelism a program should use. 12 | /// see `std::thread::available_parallelism` 13 | pub fn available_parallelism() -> usize { 14 | std::thread::available_parallelism() 15 | .map(std::num::NonZeroUsize::get) 16 | .unwrap_or(1) 17 | } 18 | 19 | #[derive(Parser)] 20 | #[command(author, version, about, long_about = None)] 21 | struct Cli { 22 | /// Optional list of bootstrap peers to start the seeding process. 23 | #[arg(short)] 24 | bootstrap_peers: Vec, 25 | 26 | /// Optional list of peer endpoints for manual connections. 27 | #[arg(short)] 28 | peer_endpoints: Vec, 29 | 30 | /// Optional endpoint for accepting incoming connections. 31 | #[arg(short)] 32 | listen_endpoint: Option, 33 | 34 | /// Optional TCP/UDP port for the discovery service. 35 | #[arg(short)] 36 | discovery_port: Option, 37 | } 38 | 39 | fn main() { 40 | env_logger::init(); 41 | let cli = Cli::parse(); 42 | 43 | let key_pair = KeyPair::generate(&KeyPairType::Ed25519); 44 | 45 | // Create the configuration for the backend. 46 | let config = Config { 47 | listen_endpoint: cli.listen_endpoint, 48 | peer_endpoints: cli.peer_endpoints, 49 | bootstrap_peers: cli.bootstrap_peers, 50 | discovery_port: cli.discovery_port.unwrap_or(0), 51 | ..Default::default() 52 | }; 53 | 54 | // Create a new tokio runtime 55 | let rt = Arc::new( 56 | tokio::runtime::Builder::new_multi_thread() 57 | .worker_threads(available_parallelism()) 58 | .enable_all() 59 | .build() 60 | .expect("Build a new tokio runtime"), 61 | ); 62 | 63 | // Create a new Backend 64 | let backend = Backend::new(&key_pair, config, rt.clone().into()); 65 | 66 | let (ctrlc_s, ctrlc_r) = async_channel::unbounded(); 67 | let handle = move || ctrlc_s.try_send(()).expect("Send ctrlc signal"); 68 | ctrlc::set_handler(handle).expect("ctrlc set handler"); 69 | 70 | rt.block_on(async { 71 | // Run the backend 72 | backend.run().await.expect("Run the backend"); 73 | 74 | // Wait for ctrlc signal 75 | ctrlc_r.recv().await.expect("Receive ctrlc signal"); 76 | 77 | // Shutdown the backend 78 | backend.shutdown().await; 79 | }); 80 | } 81 | -------------------------------------------------------------------------------- /p2p/src/backend.rs: -------------------------------------------------------------------------------- 1 | use std::{collections::HashMap, sync::Arc}; 2 | 3 | use log::info; 4 | 5 | use karyon_core::{async_runtime::Executor, crypto::KeyPair}; 6 | use karyon_net::Endpoint; 7 | 8 | use crate::{ 9 | config::Config, conn_queue::ConnQueue, discovery::Discovery, monitor::Monitor, peer::Peer, 10 | peer_pool::PeerPool, protocol::Protocol, PeerID, Result, 11 | }; 12 | 13 | /// Backend serves as the central entry point for initiating and managing 14 | /// the P2P network. 15 | pub struct Backend { 16 | /// The Configuration for the P2P network. 17 | config: Arc, 18 | 19 | /// Identity Key pair 20 | key_pair: KeyPair, 21 | 22 | /// Peer ID 23 | peer_id: PeerID, 24 | 25 | /// Responsible for network and system monitoring. 26 | monitor: Arc, 27 | 28 | /// Discovery instance. 29 | discovery: Arc, 30 | 31 | /// PeerPool instance. 32 | peer_pool: Arc, 33 | } 34 | 35 | impl Backend { 36 | /// Creates a new Backend. 37 | pub fn new(key_pair: &KeyPair, config: Config, ex: Executor) -> Arc { 38 | let config = Arc::new(config); 39 | let monitor = Arc::new(Monitor::new(config.clone())); 40 | let conn_queue = ConnQueue::new(); 41 | 42 | let peer_id = PeerID::try_from(key_pair.public()) 43 | .expect("Derive a peer id from the provided key pair."); 44 | info!("PeerID: {}", peer_id); 45 | 46 | let peer_pool = PeerPool::new( 47 | &peer_id, 48 | conn_queue.clone(), 49 | config.clone(), 50 | monitor.clone(), 51 | ex.clone(), 52 | ); 53 | 54 | let discovery = Discovery::new( 55 | key_pair, 56 | &peer_id, 57 | conn_queue, 58 | config.clone(), 59 | monitor.clone(), 60 | ex, 61 | ); 62 | 63 | Arc::new(Self { 64 | key_pair: key_pair.clone(), 65 | peer_id, 66 | monitor, 67 | discovery, 68 | config, 69 | peer_pool, 70 | }) 71 | } 72 | 73 | /// Run the Backend, starting the PeerPool and Discovery instances. 74 | pub async fn run(self: &Arc) -> Result<()> { 75 | self.peer_pool.start().await?; 76 | self.discovery.start().await?; 77 | Ok(()) 78 | } 79 | 80 | /// Attach a custom protocol to the network 81 | pub async fn attach_protocol( 82 | &self, 83 | c: impl Fn(Arc) -> Arc + Send + Sync + 'static, 84 | ) -> Result<()> { 85 | self.peer_pool.attach_protocol::

(Box::new(c)).await 86 | } 87 | 88 | /// Returns the number of currently connected peers. 89 | pub async fn peers(&self) -> usize { 90 | self.peer_pool.peers_len().await 91 | } 92 | 93 | /// Returns the `Config`. 94 | pub fn config(&self) -> Arc { 95 | self.config.clone() 96 | } 97 | 98 | /// Returns the `PeerID`. 99 | pub fn peer_id(&self) -> &PeerID { 100 | &self.peer_id 101 | } 102 | 103 | /// Returns the `KeyPair`. 104 | pub fn key_pair(&self) -> &KeyPair { 105 | &self.key_pair 106 | } 107 | 108 | /// Returns a map of inbound connected peers with their endpoints. 109 | pub async fn inbound_peers(&self) -> HashMap { 110 | self.peer_pool.inbound_peers().await 111 | } 112 | 113 | /// Returns a map of outbound connected peers with their endpoints. 114 | pub async fn outbound_peers(&self) -> HashMap { 115 | self.peer_pool.outbound_peers().await 116 | } 117 | 118 | /// Returns the monitor to receive system events. 119 | pub fn monitor(&self) -> Arc { 120 | self.monitor.clone() 121 | } 122 | 123 | /// Shuts down the Backend. 124 | pub async fn shutdown(&self) { 125 | self.discovery.shutdown().await; 126 | self.peer_pool.shutdown().await; 127 | } 128 | } 129 | -------------------------------------------------------------------------------- /p2p/src/codec.rs: -------------------------------------------------------------------------------- 1 | use karyon_core::util::{decode, encode, encode_into_slice}; 2 | 3 | use karyon_net::codec::{ByteBuffer, Codec, Decoder, Encoder, LengthCodec}; 4 | 5 | use crate::{ 6 | message::{NetMsg, RefreshMsg}, 7 | Error, Result, 8 | }; 9 | 10 | #[derive(Clone)] 11 | pub struct NetMsgCodec { 12 | inner_codec: LengthCodec, 13 | } 14 | 15 | impl NetMsgCodec { 16 | pub fn new() -> Self { 17 | Self { 18 | inner_codec: LengthCodec {}, 19 | } 20 | } 21 | } 22 | 23 | impl Codec for NetMsgCodec { 24 | type Message = NetMsg; 25 | type Error = Error; 26 | } 27 | 28 | impl Encoder for NetMsgCodec { 29 | type EnMessage = NetMsg; 30 | type EnError = Error; 31 | fn encode(&self, src: &Self::EnMessage, dst: &mut ByteBuffer) -> Result { 32 | let src = encode(src)?; 33 | Ok(self.inner_codec.encode(&src, dst)?) 34 | } 35 | } 36 | 37 | impl Decoder for NetMsgCodec { 38 | type DeMessage = NetMsg; 39 | type DeError = Error; 40 | fn decode(&self, src: &mut ByteBuffer) -> Result> { 41 | match self.inner_codec.decode(src)? { 42 | Some((n, s)) => { 43 | let (m, _) = decode::(&s)?; 44 | Ok(Some((n, m))) 45 | } 46 | None => Ok(None), 47 | } 48 | } 49 | } 50 | 51 | #[derive(Clone)] 52 | pub struct RefreshMsgCodec {} 53 | 54 | impl Codec for RefreshMsgCodec { 55 | type Message = RefreshMsg; 56 | type Error = Error; 57 | } 58 | 59 | impl Encoder for RefreshMsgCodec { 60 | type EnMessage = RefreshMsg; 61 | type EnError = Error; 62 | fn encode(&self, src: &Self::EnMessage, dst: &mut ByteBuffer) -> Result { 63 | let n = encode_into_slice(src, dst.as_mut())?; 64 | Ok(n) 65 | } 66 | } 67 | 68 | impl Decoder for RefreshMsgCodec { 69 | type DeMessage = RefreshMsg; 70 | type DeError = Error; 71 | fn decode(&self, src: &mut ByteBuffer) -> Result> { 72 | let (m, n) = decode::(src.as_ref())?; 73 | Ok(Some((n, m))) 74 | } 75 | } 76 | -------------------------------------------------------------------------------- /p2p/src/config.rs: -------------------------------------------------------------------------------- 1 | use karyon_net::{Endpoint, Port}; 2 | 3 | use crate::Version; 4 | 5 | /// the Configuration for the P2P network. 6 | pub struct Config { 7 | /// Represents the network version. 8 | pub version: Version, 9 | 10 | /// Enable monitor 11 | pub enable_monitor: bool, 12 | 13 | ///////////////// 14 | // PeerPool 15 | //////////////// 16 | /// Timeout duration for the handshake with new peers, in seconds. 17 | pub handshake_timeout: u64, 18 | /// Interval at which the ping protocol sends ping messages to a peer to 19 | /// maintain connections, in seconds. 20 | pub ping_interval: u64, 21 | /// Timeout duration for receiving the pong message corresponding to the 22 | /// sent ping message, in seconds. 23 | pub ping_timeout: u64, 24 | /// The maximum number of retries for outbound connection establishment. 25 | pub max_connect_retries: usize, 26 | 27 | ///////////////// 28 | // DISCOVERY 29 | //////////////// 30 | /// A list of bootstrap peers for the seeding process. 31 | pub bootstrap_peers: Vec, 32 | /// An optional listening endpoint to accept incoming connections. 33 | pub listen_endpoint: Option, 34 | /// A list of endpoints representing peers that the `Discovery` will 35 | /// manually connect to. 36 | pub peer_endpoints: Vec, 37 | /// The number of available inbound slots for incoming connections. 38 | pub inbound_slots: usize, 39 | /// The number of available outbound slots for outgoing connections. 40 | pub outbound_slots: usize, 41 | /// TCP/UDP port for lookup and refresh processes. 42 | pub discovery_port: Port, 43 | /// Time interval, in seconds, at which the Discovery restarts the 44 | /// seeding process. 45 | pub seeding_interval: u64, 46 | 47 | ///////////////// 48 | // LOOKUP 49 | //////////////// 50 | /// The number of available inbound slots for incoming connections during 51 | /// the lookup process. 52 | pub lookup_inbound_slots: usize, 53 | /// The number of available outbound slots for outgoing connections during 54 | /// the lookup process. 55 | pub lookup_outbound_slots: usize, 56 | /// Timeout duration for a peer response during the lookup process, in 57 | /// seconds. 58 | pub lookup_response_timeout: u64, 59 | /// Maximum allowable time for a live connection with a peer during the 60 | /// lookup process, in seconds. 61 | pub lookup_connection_lifespan: u64, 62 | /// The maximum number of retries for outbound connection establishment 63 | /// during the lookup process. 64 | pub lookup_connect_retries: usize, 65 | 66 | ///////////////// 67 | // REFRESH 68 | //////////////// 69 | /// Interval at which the table refreshes its entries, in seconds. 70 | pub refresh_interval: u64, 71 | /// Timeout duration for a peer response during the table refresh process, 72 | /// in seconds. 73 | pub refresh_response_timeout: u64, 74 | /// The maximum number of retries for outbound connection establishment 75 | /// during the refresh process. 76 | pub refresh_connect_retries: usize, 77 | 78 | /// Enables TLS for all connections. 79 | pub enable_tls: bool, 80 | } 81 | 82 | impl Default for Config { 83 | fn default() -> Self { 84 | Config { 85 | version: "0.1.0".parse().unwrap(), 86 | 87 | enable_monitor: false, 88 | 89 | handshake_timeout: 2, 90 | ping_interval: 20, 91 | ping_timeout: 2, 92 | 93 | bootstrap_peers: vec![], 94 | listen_endpoint: None, 95 | peer_endpoints: vec![], 96 | inbound_slots: 12, 97 | outbound_slots: 12, 98 | max_connect_retries: 3, 99 | discovery_port: 0, 100 | seeding_interval: 60, 101 | 102 | lookup_inbound_slots: 20, 103 | lookup_outbound_slots: 20, 104 | lookup_response_timeout: 1, 105 | lookup_connection_lifespan: 3, 106 | lookup_connect_retries: 3, 107 | 108 | refresh_interval: 1800, 109 | refresh_response_timeout: 1, 110 | refresh_connect_retries: 3, 111 | 112 | enable_tls: false, 113 | } 114 | } 115 | } 116 | -------------------------------------------------------------------------------- /p2p/src/conn_queue.rs: -------------------------------------------------------------------------------- 1 | use std::{collections::VecDeque, sync::Arc}; 2 | 3 | use karyon_core::{async_runtime::lock::Mutex, async_util::CondVar}; 4 | 5 | use crate::{connection::ConnDirection, connection::Connection, ConnRef, Result}; 6 | 7 | /// Connection queue 8 | pub struct ConnQueue { 9 | queue: Mutex>, 10 | conn_available: CondVar, 11 | } 12 | 13 | impl ConnQueue { 14 | pub fn new() -> Arc { 15 | Arc::new(Self { 16 | queue: Mutex::new(VecDeque::new()), 17 | conn_available: CondVar::new(), 18 | }) 19 | } 20 | 21 | /// Handle a connection by pushing it into the queue and wait for the disconnect signal 22 | pub async fn handle(&self, conn: ConnRef, direction: ConnDirection) -> Result<()> { 23 | let endpoint = conn.peer_endpoint()?; 24 | 25 | let (disconnect_tx, disconnect_rx) = async_channel::bounded(1); 26 | let new_conn = Connection::new(conn, disconnect_tx, direction, endpoint); 27 | 28 | // Push a new conn to the queue 29 | self.queue.lock().await.push_back(new_conn); 30 | self.conn_available.signal(); 31 | 32 | // Wait for the disconnect signal from the connection handler 33 | if let Ok(result) = disconnect_rx.recv().await { 34 | return result; 35 | } 36 | 37 | Ok(()) 38 | } 39 | 40 | /// Waits for the next connection in the queue 41 | pub async fn next(&self) -> Connection { 42 | let mut queue = self.queue.lock().await; 43 | while queue.is_empty() { 44 | queue = self.conn_available.wait(queue).await; 45 | } 46 | queue.pop_front().unwrap() 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /p2p/src/connection.rs: -------------------------------------------------------------------------------- 1 | use std::{collections::HashMap, fmt, sync::Arc}; 2 | 3 | use async_channel::Sender; 4 | use bincode::Encode; 5 | 6 | use karyon_core::{ 7 | event::{EventEmitter, EventListener}, 8 | util::encode, 9 | }; 10 | 11 | use karyon_net::Endpoint; 12 | 13 | use crate::{ 14 | message::{NetMsg, NetMsgCmd, ProtocolMsg, ShutdownMsg}, 15 | protocol::{Protocol, ProtocolEvent, ProtocolID}, 16 | ConnRef, Error, Result, 17 | }; 18 | 19 | /// Defines the direction of a network connection. 20 | #[derive(Clone, Debug)] 21 | pub enum ConnDirection { 22 | Inbound, 23 | Outbound, 24 | } 25 | 26 | impl fmt::Display for ConnDirection { 27 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 28 | match self { 29 | ConnDirection::Inbound => write!(f, "Inbound"), 30 | ConnDirection::Outbound => write!(f, "Outbound"), 31 | } 32 | } 33 | } 34 | 35 | pub struct Connection { 36 | pub(crate) direction: ConnDirection, 37 | conn: ConnRef, 38 | disconnect_signal: Sender>, 39 | /// `EventEmitter` responsible for sending events to the registered protocols. 40 | protocol_events: Arc>, 41 | pub(crate) remote_endpoint: Endpoint, 42 | listeners: HashMap>, 43 | } 44 | 45 | impl Connection { 46 | pub fn new( 47 | conn: ConnRef, 48 | signal: Sender>, 49 | direction: ConnDirection, 50 | remote_endpoint: Endpoint, 51 | ) -> Self { 52 | Self { 53 | conn, 54 | direction, 55 | protocol_events: EventEmitter::new(), 56 | disconnect_signal: signal, 57 | remote_endpoint, 58 | listeners: HashMap::new(), 59 | } 60 | } 61 | 62 | pub async fn send(&self, protocol_id: ProtocolID, msg: T) -> Result<()> { 63 | let payload = encode(&msg)?; 64 | 65 | let proto_msg = ProtocolMsg { 66 | protocol_id, 67 | payload: payload.to_vec(), 68 | }; 69 | 70 | let msg = NetMsg::new(NetMsgCmd::Protocol, &proto_msg)?; 71 | self.conn.send(msg).await 72 | } 73 | 74 | pub async fn recv(&self) -> Result { 75 | match self.listeners.get(&P::id()) { 76 | Some(l) => l.recv().await.map_err(Error::from), 77 | None => Err(Error::UnsupportedProtocol(P::id())), 78 | } 79 | } 80 | 81 | /// Registers a listener for the given Protocol `P`. 82 | pub async fn register_protocol(&mut self, protocol_id: String) { 83 | let listener = self.protocol_events.register(&protocol_id).await; 84 | self.listeners.insert(protocol_id, listener); 85 | } 86 | 87 | pub async fn emit_msg(&self, id: &ProtocolID, event: &ProtocolEvent) -> Result<()> { 88 | self.protocol_events.emit_by_topic(id, event).await?; 89 | Ok(()) 90 | } 91 | 92 | pub async fn recv_inner(&self) -> Result { 93 | self.conn.recv().await 94 | } 95 | 96 | pub async fn send_inner(&self, msg: NetMsg) -> Result<()> { 97 | self.conn.send(msg).await 98 | } 99 | 100 | pub async fn disconnect(&self, res: Result<()>) -> Result<()> { 101 | self.protocol_events.clear().await; 102 | self.disconnect_signal.send(res).await?; 103 | 104 | let m = NetMsg::new(NetMsgCmd::Shutdown, ShutdownMsg(0)).expect("Create shutdown message"); 105 | self.conn.send(m).await?; 106 | 107 | Ok(()) 108 | } 109 | } 110 | -------------------------------------------------------------------------------- /p2p/src/connector.rs: -------------------------------------------------------------------------------- 1 | use std::{future::Future, sync::Arc}; 2 | 3 | use log::{error, trace, warn}; 4 | 5 | use karyon_core::{ 6 | async_runtime::Executor, 7 | async_util::{Backoff, TaskGroup, TaskResult}, 8 | crypto::KeyPair, 9 | }; 10 | use karyon_net::{tcp, tls, Endpoint}; 11 | 12 | use crate::{ 13 | codec::NetMsgCodec, 14 | monitor::{ConnEvent, Monitor}, 15 | slots::ConnectionSlots, 16 | tls_config::tls_client_config, 17 | ConnRef, Error, PeerID, Result, 18 | }; 19 | 20 | static DNS_NAME: &str = "karyontech.net"; 21 | 22 | /// Responsible for creating outbound connections with other peers. 23 | pub struct Connector { 24 | /// Identity Key pair 25 | key_pair: KeyPair, 26 | 27 | /// Managing spawned tasks. 28 | task_group: TaskGroup, 29 | 30 | /// Manages available outbound slots. 31 | connection_slots: Arc, 32 | 33 | /// The maximum number of retries allowed before successfully 34 | /// establishing a connection. 35 | max_retries: usize, 36 | 37 | /// Enables secure connection. 38 | enable_tls: bool, 39 | 40 | /// Responsible for network and system monitoring. 41 | monitor: Arc, 42 | } 43 | 44 | impl Connector { 45 | /// Creates a new Connector 46 | pub fn new( 47 | key_pair: &KeyPair, 48 | max_retries: usize, 49 | connection_slots: Arc, 50 | enable_tls: bool, 51 | monitor: Arc, 52 | ex: Executor, 53 | ) -> Arc { 54 | Arc::new(Self { 55 | key_pair: key_pair.clone(), 56 | max_retries, 57 | task_group: TaskGroup::with_executor(ex), 58 | monitor, 59 | connection_slots, 60 | enable_tls, 61 | }) 62 | } 63 | 64 | /// Shuts down the connector 65 | pub async fn shutdown(&self) { 66 | self.task_group.cancel().await; 67 | } 68 | 69 | /// Establish a connection to the specified `endpoint`. If the connection 70 | /// attempt fails, it performs a backoff and retries until the maximum allowed 71 | /// number of retries is exceeded. On a successful connection, it returns a 72 | /// `Conn` instance. 73 | /// 74 | /// This method will block until it finds an available slot. 75 | pub async fn connect(&self, endpoint: &Endpoint, peer_id: &Option) -> Result { 76 | self.connection_slots.wait_for_slot().await; 77 | self.connection_slots.add(); 78 | 79 | let mut retry = 0; 80 | let backoff = Backoff::new(500, 2000); 81 | while retry < self.max_retries { 82 | match self.dial(endpoint, peer_id).await { 83 | Ok(conn) => { 84 | self.monitor 85 | .notify(ConnEvent::Connected(endpoint.clone())) 86 | .await; 87 | return Ok(conn); 88 | } 89 | Err(err) => { 90 | error!("Failed to establish a connection to {endpoint}: {err}"); 91 | } 92 | } 93 | 94 | self.monitor 95 | .notify(ConnEvent::ConnectRetried(endpoint.clone())) 96 | .await; 97 | 98 | backoff.sleep().await; 99 | 100 | warn!("try to reconnect {endpoint}"); 101 | retry += 1; 102 | } 103 | 104 | self.monitor 105 | .notify(ConnEvent::ConnectFailed(endpoint.clone())) 106 | .await; 107 | 108 | self.connection_slots.remove().await; 109 | Err(Error::Timeout) 110 | } 111 | 112 | /// Establish a connection to the given `endpoint`. For each new connection, 113 | /// it invokes the provided `callback`, and pass the connection to the callback. 114 | pub async fn connect_with_cback( 115 | self: &Arc, 116 | endpoint: &Endpoint, 117 | peer_id: &Option, 118 | callback: impl FnOnce(ConnRef) -> Fut + Send + 'static, 119 | ) -> Result<()> 120 | where 121 | Fut: Future> + Send + 'static, 122 | { 123 | let conn = self.connect(endpoint, peer_id).await?; 124 | 125 | let endpoint = endpoint.clone(); 126 | let on_disconnect = { 127 | let this = self.clone(); 128 | |res| async move { 129 | if let TaskResult::Completed(Err(err)) = res { 130 | trace!("Outbound connection dropped: {err}"); 131 | } 132 | this.monitor 133 | .notify(ConnEvent::Disconnected(endpoint.clone())) 134 | .await; 135 | this.connection_slots.remove().await; 136 | } 137 | }; 138 | 139 | self.task_group.spawn(callback(conn), on_disconnect); 140 | 141 | Ok(()) 142 | } 143 | 144 | async fn dial(&self, endpoint: &Endpoint, peer_id: &Option) -> Result { 145 | if self.enable_tls { 146 | if !endpoint.is_tcp() && !endpoint.is_tls() { 147 | return Err(Error::UnsupportedEndpoint(endpoint.to_string())); 148 | } 149 | 150 | let tls_config = tls::ClientTlsConfig { 151 | tcp_config: Default::default(), 152 | client_config: tls_client_config(&self.key_pair, peer_id.clone())?, 153 | dns_name: DNS_NAME.to_string(), 154 | }; 155 | let c = tls::dial(endpoint, tls_config, NetMsgCodec::new()).await?; 156 | Ok(Box::new(c)) 157 | } else { 158 | if !endpoint.is_tcp() { 159 | return Err(Error::UnsupportedEndpoint(endpoint.to_string())); 160 | } 161 | 162 | let c = tcp::dial(endpoint, tcp::TcpConfig::default(), NetMsgCodec::new()).await?; 163 | Ok(Box::new(c)) 164 | } 165 | } 166 | } 167 | -------------------------------------------------------------------------------- /p2p/src/error.rs: -------------------------------------------------------------------------------- 1 | use thiserror::Error as ThisError; 2 | 3 | pub type Result = std::result::Result; 4 | 5 | /// Represents karyon's p2p Error. 6 | #[derive(ThisError, Debug)] 7 | pub enum Error { 8 | #[error(transparent)] 9 | IO(#[from] std::io::Error), 10 | 11 | #[error("Unsupported Protocol Error: {0}")] 12 | UnsupportedProtocol(String), 13 | 14 | #[error("Unsupported Endpoint: {0}")] 15 | UnsupportedEndpoint(String), 16 | 17 | #[error("PeerID Try From PublicKey Error")] 18 | PeerIDTryFromPublicKey, 19 | 20 | #[error("PeerID Try From String Error")] 21 | PeerIDTryFromString, 22 | 23 | #[error("Invalid Message Error: {0}")] 24 | InvalidMsg(String), 25 | 26 | #[error("Incompatible Peer")] 27 | IncompatiblePeer, 28 | 29 | #[error("Timeout Error")] 30 | Timeout, 31 | 32 | #[error(transparent)] 33 | ParseIntError(#[from] std::num::ParseIntError), 34 | 35 | #[error(transparent)] 36 | ParseIntError2(#[from] base64::DecodeError), 37 | 38 | #[error(transparent)] 39 | ParseFloatError(#[from] std::num::ParseFloatError), 40 | 41 | #[error(transparent)] 42 | SemverError(#[from] semver::Error), 43 | 44 | #[error("Parse Error: {0}")] 45 | ParseError(String), 46 | 47 | #[error("Incompatible Version Error: {0}")] 48 | IncompatibleVersion(String), 49 | 50 | #[error("Config Error: {0}")] 51 | Config(String), 52 | 53 | #[error("Peer Shutdown")] 54 | PeerShutdown, 55 | 56 | #[error("Invalid Pong Msg")] 57 | InvalidPongMsg, 58 | 59 | #[error("Discovery error: {0}")] 60 | Discovery(String), 61 | 62 | #[error("Lookup error: {0}")] 63 | Lookup(String), 64 | 65 | #[error("Peer Already Connected")] 66 | PeerAlreadyConnected, 67 | 68 | #[error("Yasna Error: {0}")] 69 | Yasna(#[from] yasna::ASN1Error), 70 | 71 | #[error("X509 Parser Error: {0}")] 72 | X509Parser(#[from] x509_parser::error::X509Error), 73 | 74 | #[error("Rcgen Error: {0}")] 75 | Rcgen(#[from] rcgen::Error), 76 | 77 | #[cfg(feature = "smol")] 78 | #[error("Tls Error: {0}")] 79 | Rustls(#[from] futures_rustls::rustls::Error), 80 | 81 | #[cfg(feature = "tokio")] 82 | #[error("Tls Error: {0}")] 83 | Rustls(#[from] tokio_rustls::rustls::Error), 84 | 85 | #[error("Invalid DNS Name: {0}")] 86 | InvalidDnsNameError(#[from] rustls_pki_types::InvalidDnsNameError), 87 | 88 | #[error("Channel Send Error: {0}")] 89 | ChannelSend(String), 90 | 91 | #[error(transparent)] 92 | ChannelRecv(#[from] async_channel::RecvError), 93 | 94 | #[error(transparent)] 95 | KaryonCore(#[from] karyon_core::error::Error), 96 | 97 | #[error(transparent)] 98 | KaryonNet(#[from] karyon_net::Error), 99 | 100 | #[error("Other Error: {0}")] 101 | Other(String), 102 | } 103 | 104 | impl From> for Error { 105 | fn from(error: async_channel::SendError) -> Self { 106 | Error::ChannelSend(error.to_string()) 107 | } 108 | } 109 | -------------------------------------------------------------------------------- /p2p/src/lib.rs: -------------------------------------------------------------------------------- 1 | //! A lightweight, extensible, and customizable peer-to-peer (p2p) network stack. 2 | //! 3 | //! # Example 4 | //! ``` 5 | //! use std::sync::Arc; 6 | //! 7 | //! use easy_parallel::Parallel; 8 | //! use smol::{future, Executor}; 9 | //! 10 | //! use karyon_p2p::{Backend, Config, PeerID, keypair::{KeyPair, KeyPairType}}; 11 | //! 12 | //! // Generate a new keypair for the peer 13 | //! let key_pair = KeyPair::generate(&KeyPairType::Ed25519); 14 | //! 15 | //! // Create the configuration for the backend. 16 | //! let mut config = Config::default(); 17 | //! 18 | //! // Create a new Executor 19 | //! let ex = Arc::new(Executor::new()); 20 | //! 21 | //! // Create a new Backend 22 | //! let backend = Backend::new(&key_pair, config, ex.clone().into()); 23 | //! 24 | //! let task = async { 25 | //! // Run the backend 26 | //! backend.run() 27 | //! .await 28 | //! .expect("start the backend"); 29 | //! 30 | //! // .... 31 | //! 32 | //! // Shutdown the backend 33 | //! backend.shutdown().await; 34 | //! }; 35 | //! 36 | //! future::block_on(ex.run(task)); 37 | //! 38 | //! ``` 39 | //! 40 | mod backend; 41 | mod codec; 42 | mod config; 43 | mod conn_queue; 44 | mod connection; 45 | mod connector; 46 | mod discovery; 47 | mod error; 48 | mod listener; 49 | mod message; 50 | mod peer; 51 | mod peer_pool; 52 | mod protocols; 53 | mod routing_table; 54 | mod slots; 55 | mod tls_config; 56 | mod version; 57 | 58 | /// Responsible for network and system monitoring. 59 | /// [`Read More`](./monitor/struct.Monitor.html) 60 | pub mod monitor; 61 | /// Defines the protocol trait. 62 | /// [`Read More`](./protocol/trait.Protocol.html) 63 | pub mod protocol; 64 | 65 | pub use backend::Backend; 66 | pub use config::Config; 67 | pub use peer::{Peer, PeerID}; 68 | pub use version::Version; 69 | 70 | pub mod endpoint { 71 | pub use karyon_net::{Addr, Endpoint, Port}; 72 | } 73 | 74 | pub mod keypair { 75 | pub use karyon_core::crypto::{KeyPair, KeyPairType, PublicKey, SecretKey}; 76 | } 77 | 78 | pub use error::{Error, Result}; 79 | 80 | type ListenerRef = karyon_net::Listener; 81 | type ConnRef = karyon_net::Conn; 82 | -------------------------------------------------------------------------------- /p2p/src/message.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashMap; 2 | 3 | use bincode::{Decode, Encode}; 4 | 5 | use karyon_core::util::encode; 6 | use karyon_net::{Addr, Port}; 7 | 8 | use crate::{protocol::ProtocolID, routing_table::Entry, version::VersionInt, PeerID, Result}; 9 | 10 | /// Defines the main message in the karyon p2p network. 11 | /// 12 | /// This message structure consists of a header and payload, where the header 13 | /// typically contains essential information about the message, and the payload 14 | /// contains the actual data being transmitted. 15 | #[derive(Decode, Encode, Debug, Clone)] 16 | pub struct NetMsg { 17 | pub header: NetMsgHeader, 18 | pub payload: Vec, 19 | } 20 | 21 | impl NetMsg { 22 | pub fn new(command: NetMsgCmd, t: T) -> Result { 23 | Ok(Self { 24 | header: NetMsgHeader { command }, 25 | payload: encode(&t)?, 26 | }) 27 | } 28 | } 29 | 30 | /// Represents the header of a message. 31 | #[derive(Decode, Encode, Debug, Clone)] 32 | pub struct NetMsgHeader { 33 | pub command: NetMsgCmd, 34 | } 35 | 36 | /// Defines message commands. 37 | #[derive(Decode, Encode, Debug, Clone)] 38 | #[repr(u8)] 39 | pub enum NetMsgCmd { 40 | Version, 41 | Verack, 42 | Protocol, 43 | Shutdown, 44 | 45 | // The following commands are used during the lookup process. 46 | Ping, 47 | Pong, 48 | FindPeer, 49 | Peer, 50 | Peers, 51 | } 52 | 53 | #[derive(Decode, Encode, Debug, Clone)] 54 | pub enum RefreshMsg { 55 | Ping([u8; 32]), 56 | Pong([u8; 32]), 57 | } 58 | 59 | /// Defines a message related to a specific protocol. 60 | #[derive(Decode, Encode, Debug, Clone)] 61 | pub struct ProtocolMsg { 62 | pub protocol_id: ProtocolID, 63 | pub payload: Vec, 64 | } 65 | 66 | /// Version message, providing information about a peer's capabilities. 67 | #[derive(Decode, Encode, Debug, Clone)] 68 | pub struct VerMsg { 69 | pub peer_id: PeerID, 70 | pub version: VersionInt, 71 | pub protocols: HashMap, 72 | } 73 | 74 | /// VerAck message acknowledges the receipt of a Version message. The message 75 | /// consists of the peer ID and an acknowledgment boolean value indicating 76 | /// whether the version is accepted. 77 | #[derive(Decode, Encode, Debug, Clone)] 78 | pub struct VerAckMsg { 79 | pub peer_id: PeerID, 80 | pub ack: bool, 81 | } 82 | 83 | /// Shutdown message. 84 | #[derive(Decode, Encode, Debug, Clone)] 85 | pub struct ShutdownMsg(pub u8); 86 | 87 | /// Ping message with a nonce and version information. 88 | #[derive(Decode, Encode, Debug, Clone)] 89 | pub struct PingMsg { 90 | pub nonce: [u8; 32], 91 | pub version: VersionInt, 92 | } 93 | 94 | /// Ping message with a nonce. 95 | #[derive(Decode, Encode, Debug)] 96 | pub struct PongMsg(pub [u8; 32]); 97 | 98 | /// FindPeer message used to find a specific peer. 99 | #[derive(Decode, Encode, Debug)] 100 | pub struct FindPeerMsg(pub PeerID); 101 | 102 | /// PeerMsg containing information about a peer. 103 | #[derive(Decode, Encode, Debug, Clone, PartialEq, Eq)] 104 | pub struct PeerMsg { 105 | pub peer_id: PeerID, 106 | pub addr: Addr, 107 | pub port: Port, 108 | pub discovery_port: Port, 109 | } 110 | 111 | /// PeersMsg a list of `PeerMsg`. 112 | #[derive(Decode, Encode, Debug)] 113 | pub struct PeersMsg { 114 | pub peers: Vec, 115 | } 116 | 117 | impl From for PeerMsg { 118 | fn from(entry: Entry) -> PeerMsg { 119 | PeerMsg { 120 | peer_id: PeerID(entry.key), 121 | addr: entry.addr, 122 | port: entry.port, 123 | discovery_port: entry.discovery_port, 124 | } 125 | } 126 | } 127 | 128 | impl From for Entry { 129 | fn from(peer: PeerMsg) -> Entry { 130 | Entry { 131 | key: peer.peer_id.0, 132 | addr: peer.addr, 133 | port: peer.port, 134 | discovery_port: peer.discovery_port, 135 | } 136 | } 137 | } 138 | -------------------------------------------------------------------------------- /p2p/src/monitor/event.rs: -------------------------------------------------------------------------------- 1 | use karyon_net::Endpoint; 2 | 3 | use crate::PeerID; 4 | 5 | /// Defines connection-related events. 6 | #[derive(Clone, Debug)] 7 | pub enum ConnEvent { 8 | Connected(Endpoint), 9 | ConnectRetried(Endpoint), 10 | ConnectFailed(Endpoint), 11 | Accepted(Endpoint), 12 | AcceptFailed, 13 | Disconnected(Endpoint), 14 | Listening(Endpoint), 15 | ListenFailed(Endpoint), 16 | } 17 | 18 | /// Defines `PP` events. 19 | #[derive(Clone, Debug)] 20 | pub enum PPEvent { 21 | NewPeer(PeerID), 22 | RemovePeer(PeerID), 23 | } 24 | 25 | /// Defines `Discovery` events. 26 | #[derive(Clone, Debug)] 27 | pub enum DiscvEvent { 28 | LookupStarted(Endpoint), 29 | LookupFailed(Endpoint), 30 | LookupSucceeded(Endpoint, usize), 31 | RefreshStarted, 32 | } 33 | 34 | impl ConnEvent { 35 | pub(super) fn get_endpoint(&self) -> Option<&Endpoint> { 36 | match self { 37 | ConnEvent::Connected(endpoint) 38 | | ConnEvent::ConnectRetried(endpoint) 39 | | ConnEvent::ConnectFailed(endpoint) 40 | | ConnEvent::Accepted(endpoint) 41 | | ConnEvent::Disconnected(endpoint) 42 | | ConnEvent::Listening(endpoint) 43 | | ConnEvent::ListenFailed(endpoint) => Some(endpoint), 44 | ConnEvent::AcceptFailed => None, 45 | } 46 | } 47 | 48 | pub(super) fn variant_name(&self) -> &'static str { 49 | match self { 50 | ConnEvent::Connected(_) => "Connected", 51 | ConnEvent::ConnectRetried(_) => "ConnectRetried", 52 | ConnEvent::ConnectFailed(_) => "ConnectFailed", 53 | ConnEvent::Accepted(_) => "Accepted", 54 | ConnEvent::AcceptFailed => "AcceptFailed", 55 | ConnEvent::Disconnected(_) => "Disconnected", 56 | ConnEvent::Listening(_) => "Listening", 57 | ConnEvent::ListenFailed(_) => "ListenFailed", 58 | } 59 | } 60 | } 61 | 62 | impl PPEvent { 63 | pub(super) fn get_peer_id(&self) -> Option<&PeerID> { 64 | match self { 65 | PPEvent::NewPeer(peer_id) | PPEvent::RemovePeer(peer_id) => Some(peer_id), 66 | } 67 | } 68 | pub(super) fn variant_name(&self) -> &'static str { 69 | match self { 70 | PPEvent::NewPeer(_) => "NewPeer", 71 | PPEvent::RemovePeer(_) => "RemovePeer", 72 | } 73 | } 74 | } 75 | 76 | impl DiscvEvent { 77 | pub(super) fn get_endpoint_and_size(&self) -> (Option<&Endpoint>, Option) { 78 | match self { 79 | DiscvEvent::LookupStarted(endpoint) | DiscvEvent::LookupFailed(endpoint) => { 80 | (Some(endpoint), None) 81 | } 82 | DiscvEvent::LookupSucceeded(endpoint, size) => (Some(endpoint), Some(*size)), 83 | DiscvEvent::RefreshStarted => (None, None), 84 | } 85 | } 86 | 87 | pub(super) fn variant_name(&self) -> &'static str { 88 | match self { 89 | DiscvEvent::LookupStarted(_) => "LookupStarted", 90 | DiscvEvent::LookupFailed(_) => "LookupFailed", 91 | DiscvEvent::LookupSucceeded(_, _) => "LookupSucceeded", 92 | DiscvEvent::RefreshStarted => "RefreshStarted", 93 | } 94 | } 95 | } 96 | -------------------------------------------------------------------------------- /p2p/src/peer/peer_id.rs: -------------------------------------------------------------------------------- 1 | use base64::{engine::general_purpose::STANDARD, Engine}; 2 | use bincode::{Decode, Encode}; 3 | use rand::{rngs::OsRng, RngCore}; 4 | use sha2::{Digest, Sha256}; 5 | 6 | #[cfg(feature = "serde")] 7 | use serde::{Deserialize, Serialize}; 8 | 9 | use karyon_core::crypto::PublicKey; 10 | 11 | use crate::Error; 12 | 13 | /// Represents a unique identifier for a peer. 14 | #[derive(Clone, Debug, Eq, PartialEq, Hash, Decode, Encode)] 15 | #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] 16 | #[cfg_attr(feature = "serde", serde(into = "String"))] 17 | pub struct PeerID(pub [u8; 32]); 18 | 19 | impl std::fmt::Display for PeerID { 20 | fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { 21 | let id = STANDARD.encode(self.0); 22 | write!(f, "{}", id) 23 | } 24 | } 25 | 26 | impl PeerID { 27 | /// Creates a new PeerID. 28 | pub fn new(src: &[u8]) -> Self { 29 | let mut hasher = Sha256::new(); 30 | hasher.update(src); 31 | Self(hasher.finalize().into()) 32 | } 33 | 34 | /// Generates a random PeerID. 35 | pub fn random() -> Self { 36 | let mut id: [u8; 32] = [0; 32]; 37 | OsRng.fill_bytes(&mut id); 38 | Self(id) 39 | } 40 | } 41 | 42 | impl From<[u8; 32]> for PeerID { 43 | fn from(b: [u8; 32]) -> Self { 44 | PeerID(b) 45 | } 46 | } 47 | 48 | impl From for String { 49 | fn from(pid: PeerID) -> Self { 50 | pid.to_string() 51 | } 52 | } 53 | 54 | impl TryFrom for PeerID { 55 | type Error = Error; 56 | 57 | fn try_from(i: String) -> Result { 58 | let result: [u8; 32] = STANDARD 59 | .decode(i)? 60 | .try_into() 61 | .map_err(|_| Error::PeerIDTryFromString)?; 62 | Ok(PeerID(result)) 63 | } 64 | } 65 | 66 | impl TryFrom for PeerID { 67 | type Error = Error; 68 | 69 | fn try_from(pk: PublicKey) -> Result { 70 | let pk: [u8; 32] = pk 71 | .as_bytes() 72 | .try_into() 73 | .map_err(|_| Error::PeerIDTryFromPublicKey)?; 74 | 75 | Ok(PeerID(pk)) 76 | } 77 | } 78 | -------------------------------------------------------------------------------- /p2p/src/protocol.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Arc; 2 | 3 | use async_trait::async_trait; 4 | 5 | use karyon_core::event::EventValue; 6 | 7 | use crate::{peer::Peer, version::Version, Result}; 8 | 9 | pub type ProtocolConstructor = dyn Fn(Arc) -> Arc + Send + Sync; 10 | 11 | pub type ProtocolID = String; 12 | 13 | /// Protocol event 14 | #[derive(Debug, Clone)] 15 | pub enum ProtocolEvent { 16 | /// Message event, contains a vector of bytes. 17 | Message(Vec), 18 | /// Shutdown event signals the protocol to gracefully shut down. 19 | Shutdown, 20 | } 21 | 22 | impl EventValue for ProtocolEvent { 23 | fn id() -> &'static str { 24 | "ProtocolEvent" 25 | } 26 | } 27 | 28 | /// The Protocol trait defines the interface for core protocols 29 | /// and custom protocols. 30 | /// 31 | /// # Example 32 | /// ``` 33 | /// use std::sync::Arc; 34 | /// 35 | /// use async_trait::async_trait; 36 | /// use smol::Executor; 37 | /// 38 | /// use karyon_p2p::{ 39 | /// protocol::{Protocol, ProtocolID, ProtocolEvent}, 40 | /// Backend, PeerID, Config, Version, Error, Peer, 41 | /// keypair::{KeyPair, KeyPairType}, 42 | /// }; 43 | /// 44 | /// pub struct NewProtocol { 45 | /// peer: Arc, 46 | /// } 47 | /// 48 | /// impl NewProtocol { 49 | /// fn new(peer: Arc) -> Arc { 50 | /// Arc::new(Self { 51 | /// peer, 52 | /// }) 53 | /// } 54 | /// } 55 | /// 56 | /// #[async_trait] 57 | /// impl Protocol for NewProtocol { 58 | /// async fn start(self: Arc) -> Result<(), Error> { 59 | /// loop { 60 | /// match self.peer.recv::().await.expect("Receive msg") { 61 | /// ProtocolEvent::Message(msg) => { 62 | /// println!("{:?}", msg); 63 | /// } 64 | /// ProtocolEvent::Shutdown => { 65 | /// break; 66 | /// } 67 | /// } 68 | /// } 69 | /// Ok(()) 70 | /// } 71 | /// 72 | /// fn version() -> Result { 73 | /// "0.2.0, >0.1.0".parse() 74 | /// } 75 | /// 76 | /// fn id() -> ProtocolID { 77 | /// "NEWPROTOCOLID".into() 78 | /// } 79 | /// } 80 | /// 81 | /// async { 82 | /// let key_pair = KeyPair::generate(&KeyPairType::Ed25519); 83 | /// let config = Config::default(); 84 | /// 85 | /// // Create a new Executor 86 | /// let ex = Arc::new(Executor::new()); 87 | /// 88 | /// // Create a new Backend 89 | /// let backend = Backend::new(&key_pair, config, ex.into()); 90 | /// 91 | /// // Attach the NewProtocol 92 | /// let c = move |peer| NewProtocol::new(peer); 93 | /// backend.attach_protocol::(c).await.unwrap(); 94 | /// }; 95 | /// 96 | /// ``` 97 | #[async_trait] 98 | pub trait Protocol: Send + Sync { 99 | /// Start the protocol 100 | async fn start(self: Arc) -> Result<()>; 101 | 102 | /// Returns the version of the protocol. 103 | fn version() -> Result 104 | where 105 | Self: Sized; 106 | 107 | /// Returns the unique ProtocolID associated with the protocol. 108 | fn id() -> ProtocolID 109 | where 110 | Self: Sized; 111 | } 112 | 113 | #[async_trait] 114 | pub(crate) trait InitProtocol: Send + Sync { 115 | type T; 116 | /// Initialize the protocol 117 | async fn init(self: Arc) -> Self::T; 118 | } 119 | -------------------------------------------------------------------------------- /p2p/src/protocols/handshake.rs: -------------------------------------------------------------------------------- 1 | use std::{collections::HashMap, sync::Arc, time::Duration}; 2 | 3 | use async_trait::async_trait; 4 | use log::trace; 5 | 6 | use karyon_core::{async_util::timeout, util::decode}; 7 | 8 | use crate::{ 9 | message::{NetMsg, NetMsgCmd, VerAckMsg, VerMsg}, 10 | peer::Peer, 11 | protocol::{InitProtocol, ProtocolID}, 12 | version::{version_match, VersionInt}, 13 | Error, PeerID, Result, Version, 14 | }; 15 | 16 | pub struct HandshakeProtocol { 17 | peer: Arc, 18 | protocols: HashMap, 19 | } 20 | 21 | #[async_trait] 22 | impl InitProtocol for HandshakeProtocol { 23 | type T = Result; 24 | /// Initiate a handshake with a connection. 25 | async fn init(self: Arc) -> Self::T { 26 | trace!("Init Handshake: {}", self.peer.remote_endpoint()); 27 | 28 | if !self.peer.is_inbound() { 29 | self.send_vermsg().await?; 30 | } 31 | 32 | let t = Duration::from_secs(self.peer.config().handshake_timeout); 33 | let msg: NetMsg = timeout(t, self.peer.conn.recv_inner()).await??; 34 | match msg.header.command { 35 | NetMsgCmd::Version => { 36 | let result = self.validate_version_msg(&msg).await; 37 | match result { 38 | Ok(_) => { 39 | self.send_verack(true).await?; 40 | } 41 | Err(Error::IncompatibleVersion(_)) | Err(Error::UnsupportedProtocol(_)) => { 42 | self.send_verack(false).await?; 43 | } 44 | _ => {} 45 | }; 46 | result 47 | } 48 | NetMsgCmd::Verack => self.validate_verack_msg(&msg).await, 49 | cmd => Err(Error::InvalidMsg(format!("unexpected msg found {:?}", cmd))), 50 | } 51 | } 52 | } 53 | 54 | impl HandshakeProtocol { 55 | pub fn new(peer: Arc, protocols: HashMap) -> Arc { 56 | Arc::new(Self { peer, protocols }) 57 | } 58 | 59 | /// Sends a Version message 60 | async fn send_vermsg(&self) -> Result<()> { 61 | let protocols = self 62 | .protocols 63 | .clone() 64 | .into_iter() 65 | .map(|p| (p.0, p.1.v)) 66 | .collect(); 67 | 68 | let vermsg = VerMsg { 69 | peer_id: self.peer.own_id().clone(), 70 | protocols, 71 | version: self.peer.config().version.v.clone(), 72 | }; 73 | 74 | trace!("Send VerMsg"); 75 | self.peer 76 | .conn 77 | .send_inner(NetMsg::new(NetMsgCmd::Version, &vermsg)?) 78 | .await?; 79 | Ok(()) 80 | } 81 | 82 | /// Sends a Verack message 83 | async fn send_verack(&self, ack: bool) -> Result<()> { 84 | let verack = VerAckMsg { 85 | peer_id: self.peer.own_id().clone(), 86 | ack, 87 | }; 88 | 89 | trace!("Send VerAckMsg {:?}", verack); 90 | self.peer 91 | .conn 92 | .send_inner(NetMsg::new(NetMsgCmd::Verack, &verack)?) 93 | .await?; 94 | Ok(()) 95 | } 96 | 97 | /// Validates the given version msg 98 | async fn validate_version_msg(&self, msg: &NetMsg) -> Result { 99 | let (vermsg, _) = decode::(&msg.payload)?; 100 | 101 | if !version_match(&self.peer.config().version.req, &vermsg.version) { 102 | return Err(Error::IncompatibleVersion("system: {}".into())); 103 | } 104 | 105 | self.protocols_match(&vermsg.protocols).await?; 106 | 107 | trace!("Received VerMsg from: {}", vermsg.peer_id); 108 | Ok(vermsg.peer_id) 109 | } 110 | 111 | /// Validates the given verack msg 112 | async fn validate_verack_msg(&self, msg: &NetMsg) -> Result { 113 | let (verack, _) = decode::(&msg.payload)?; 114 | 115 | if !verack.ack { 116 | return Err(Error::IncompatiblePeer); 117 | } 118 | 119 | trace!("Received VerAckMsg from: {}", verack.peer_id); 120 | Ok(verack.peer_id) 121 | } 122 | 123 | /// Check if the new connection has compatible protocols. 124 | async fn protocols_match(&self, protocols: &HashMap) -> Result<()> { 125 | for (n, pv) in protocols.iter() { 126 | match self.protocols.get(n) { 127 | Some(v) => { 128 | if !version_match(&v.req, pv) { 129 | return Err(Error::IncompatibleVersion(format!("{n} protocol: {pv}"))); 130 | } 131 | } 132 | None => { 133 | return Err(Error::UnsupportedProtocol(n.to_string())); 134 | } 135 | } 136 | } 137 | Ok(()) 138 | } 139 | } 140 | -------------------------------------------------------------------------------- /p2p/src/protocols/mod.rs: -------------------------------------------------------------------------------- 1 | mod handshake; 2 | mod ping; 3 | 4 | pub(crate) use handshake::HandshakeProtocol; 5 | pub(crate) use ping::PingProtocol; 6 | -------------------------------------------------------------------------------- /p2p/src/protocols/ping.rs: -------------------------------------------------------------------------------- 1 | use std::{sync::Arc, time::Duration}; 2 | 3 | use async_channel::{Receiver, Sender}; 4 | use async_trait::async_trait; 5 | use bincode::{Decode, Encode}; 6 | use log::trace; 7 | use rand::{rngs::OsRng, RngCore}; 8 | 9 | use karyon_core::{ 10 | async_runtime::Executor, 11 | async_util::{select, sleep, timeout, Either, TaskGroup, TaskResult}, 12 | util::decode, 13 | }; 14 | 15 | use crate::{ 16 | peer::Peer, 17 | protocol::{Protocol, ProtocolEvent, ProtocolID}, 18 | version::Version, 19 | Error, Result, 20 | }; 21 | 22 | const MAX_FAILUERS: u32 = 3; 23 | 24 | #[derive(Clone, Debug, Encode, Decode)] 25 | enum PingProtocolMsg { 26 | Ping([u8; 32]), 27 | Pong([u8; 32]), 28 | } 29 | 30 | pub struct PingProtocol { 31 | peer: Arc, 32 | ping_interval: u64, 33 | ping_timeout: u64, 34 | task_group: TaskGroup, 35 | } 36 | 37 | impl PingProtocol { 38 | #[allow(clippy::new_ret_no_self)] 39 | pub fn new( 40 | peer: Arc, 41 | ping_interval: u64, 42 | ping_timeout: u64, 43 | executor: Executor, 44 | ) -> Arc { 45 | Arc::new(Self { 46 | peer, 47 | ping_interval, 48 | ping_timeout, 49 | task_group: TaskGroup::with_executor(executor), 50 | }) 51 | } 52 | 53 | async fn recv_loop(&self, pong_chan: Sender<[u8; 32]>) -> Result<()> { 54 | loop { 55 | let event = self.peer.recv::().await?; 56 | let msg_payload = match event.clone() { 57 | ProtocolEvent::Message(m) => m, 58 | ProtocolEvent::Shutdown => { 59 | break; 60 | } 61 | }; 62 | 63 | let (msg, _) = decode::(&msg_payload)?; 64 | 65 | match msg { 66 | PingProtocolMsg::Ping(nonce) => { 67 | trace!("Received Ping message {:?}", nonce); 68 | self.peer 69 | .send(Self::id(), &PingProtocolMsg::Pong(nonce)) 70 | .await?; 71 | trace!("Send back Pong message {:?}", nonce); 72 | } 73 | PingProtocolMsg::Pong(nonce) => { 74 | pong_chan.send(nonce).await?; 75 | } 76 | } 77 | } 78 | Ok(()) 79 | } 80 | 81 | async fn ping_loop(&self, chan: Receiver<[u8; 32]>) -> Result<()> { 82 | let rng = &mut OsRng; 83 | let mut retry = 0; 84 | 85 | while retry < MAX_FAILUERS { 86 | sleep(Duration::from_secs(self.ping_interval)).await; 87 | 88 | let mut ping_nonce: [u8; 32] = [0; 32]; 89 | rng.fill_bytes(&mut ping_nonce); 90 | 91 | trace!("Send Ping message {:?}", ping_nonce); 92 | self.peer 93 | .send(Self::id(), &PingProtocolMsg::Ping(ping_nonce)) 94 | .await?; 95 | 96 | // Wait for Pong message 97 | let d = Duration::from_secs(self.ping_timeout); 98 | let pong_msg = match timeout(d, chan.recv()).await { 99 | Ok(m) => m?, 100 | Err(_) => { 101 | retry += 1; 102 | continue; 103 | } 104 | }; 105 | trace!("Received Pong message {:?}", pong_msg); 106 | 107 | if pong_msg != ping_nonce { 108 | retry += 1; 109 | continue; 110 | } 111 | 112 | retry = 0; 113 | } 114 | 115 | Err(Error::Timeout) 116 | } 117 | } 118 | 119 | #[async_trait] 120 | impl Protocol for PingProtocol { 121 | async fn start(self: Arc) -> Result<()> { 122 | trace!("Start Ping protocol"); 123 | 124 | let stop_signal = async_channel::bounded::>(1); 125 | let (pong_chan, pong_chan_recv) = async_channel::bounded(1); 126 | 127 | self.task_group.spawn( 128 | { 129 | let this = self.clone(); 130 | async move { this.ping_loop(pong_chan_recv.clone()).await } 131 | }, 132 | |res| async move { 133 | if let TaskResult::Completed(result) = res { 134 | let _ = stop_signal.0.send(result).await; 135 | } 136 | }, 137 | ); 138 | 139 | let result = select(self.recv_loop(pong_chan), stop_signal.1.recv()).await; 140 | self.task_group.cancel().await; 141 | 142 | match result { 143 | Either::Left(res) => { 144 | trace!("Receive loop stopped {:?}", res); 145 | res 146 | } 147 | Either::Right(res) => { 148 | let res = res?; 149 | trace!("Ping loop stopped {:?}", res); 150 | res 151 | } 152 | } 153 | } 154 | 155 | fn version() -> Result { 156 | "0.1.0".parse() 157 | } 158 | 159 | fn id() -> ProtocolID { 160 | "PING".into() 161 | } 162 | } 163 | -------------------------------------------------------------------------------- /p2p/src/routing_table/bucket.rs: -------------------------------------------------------------------------------- 1 | use super::{Entry, Key}; 2 | 3 | use rand::{rngs::OsRng, seq::SliceRandom}; 4 | 5 | /// BITFLAGS represent the status of an Entry within a bucket. 6 | pub type EntryStatusFlag = u16; 7 | 8 | /// The entry is connected. 9 | pub const CONNECTED_ENTRY: EntryStatusFlag = 0b000001; 10 | 11 | /// The entry is disconnected. This will increase the failure counter. 12 | pub const DISCONNECTED_ENTRY: EntryStatusFlag = 0b000010; 13 | 14 | /// The entry is ready to reconnect, meaning it has either been added and 15 | /// has no connection attempts, or it has been refreshed. 16 | pub const PENDING_ENTRY: EntryStatusFlag = 0b000100; 17 | 18 | /// The entry is unreachable. This will increase the failure counter. 19 | pub const UNREACHABLE_ENTRY: EntryStatusFlag = 0b001000; 20 | 21 | /// The entry is unstable. This will increase the failure counter. 22 | pub const UNSTABLE_ENTRY: EntryStatusFlag = 0b010000; 23 | 24 | /// The entry is incompatible. This entry will not contribute to an increase in 25 | /// failure attempts, instead, it will persist in the routing table for the 26 | /// lookup process and will only be removed in the presence of a new entry. 27 | pub const INCOMPATIBLE_ENTRY: EntryStatusFlag = 0b100000; 28 | 29 | #[allow(dead_code)] 30 | pub const ALL_ENTRY: EntryStatusFlag = 0b111111; 31 | 32 | /// A BucketEntry represents a peer in the routing table. 33 | #[derive(Clone, Debug)] 34 | pub struct BucketEntry { 35 | pub status: EntryStatusFlag, 36 | pub entry: Entry, 37 | pub failures: u32, 38 | pub last_seen: i64, 39 | } 40 | 41 | impl BucketEntry { 42 | pub fn is_connected(&self) -> bool { 43 | self.status ^ CONNECTED_ENTRY == 0 44 | } 45 | 46 | pub fn is_incompatible(&self) -> bool { 47 | self.status ^ INCOMPATIBLE_ENTRY == 0 48 | } 49 | 50 | pub fn is_unreachable(&self) -> bool { 51 | self.status ^ UNREACHABLE_ENTRY == 0 52 | } 53 | 54 | pub fn is_unstable(&self) -> bool { 55 | self.status ^ UNSTABLE_ENTRY == 0 56 | } 57 | } 58 | 59 | /// The number of entries that can be stored within a single bucket. 60 | pub const BUCKET_SIZE: usize = 20; 61 | 62 | /// A Bucket represents a group of entries in the routing table. 63 | #[derive(Debug, Clone)] 64 | pub struct Bucket { 65 | entries: Vec, 66 | } 67 | 68 | impl Bucket { 69 | /// Creates a new empty Bucket 70 | pub fn new() -> Self { 71 | Self { 72 | entries: Vec::with_capacity(BUCKET_SIZE), 73 | } 74 | } 75 | 76 | /// Add an entry to the bucket. 77 | pub fn add(&mut self, entry: &Entry) { 78 | self.entries.push(BucketEntry { 79 | status: PENDING_ENTRY, 80 | entry: entry.clone(), 81 | failures: 0, 82 | last_seen: chrono::Utc::now().timestamp(), 83 | }) 84 | } 85 | 86 | /// Get the number of entries in the bucket. 87 | pub fn len(&self) -> usize { 88 | self.entries.len() 89 | } 90 | 91 | /// Returns an iterator over the entries in the bucket. 92 | pub fn iter(&self) -> impl Iterator { 93 | self.entries.iter() 94 | } 95 | 96 | /// Remove an entry. 97 | pub fn remove(&mut self, key: &Key) { 98 | let position = self.entries.iter().position(|e| &e.entry.key == key); 99 | if let Some(i) = position { 100 | self.entries.remove(i); 101 | } 102 | } 103 | 104 | /// Returns an iterator of entries in random order. 105 | pub fn random_iter(&self, amount: usize) -> impl Iterator { 106 | self.entries.choose_multiple(&mut OsRng, amount) 107 | } 108 | 109 | /// Updates the status of an entry in the bucket identified by the given key. 110 | /// 111 | /// If the key is not found in the bucket, no action is taken. 112 | /// 113 | /// This will also update the last_seen field and increase the failures 114 | /// counter for the bucket entry according to the new status. 115 | pub fn update_entry(&mut self, key: &Key, entry_flag: EntryStatusFlag) { 116 | if let Some(e) = self.entries.iter_mut().find(|e| &e.entry.key == key) { 117 | e.status = entry_flag; 118 | if e.is_unreachable() || e.is_unstable() { 119 | e.failures += 1; 120 | } 121 | 122 | if !e.is_unreachable() { 123 | e.last_seen = chrono::Utc::now().timestamp(); 124 | } 125 | } 126 | } 127 | 128 | /// Check if the bucket contains the given key. 129 | pub fn contains_key(&self, key: &Key) -> bool { 130 | self.entries.iter().any(|e| &e.entry.key == key) 131 | } 132 | } 133 | -------------------------------------------------------------------------------- /p2p/src/routing_table/entry.rs: -------------------------------------------------------------------------------- 1 | use bincode::{Decode, Encode}; 2 | 3 | use karyon_net::{Addr, Port}; 4 | 5 | /// Specifies the size of the key, in bytes. 6 | pub const KEY_SIZE: usize = 32; 7 | 8 | /// The unique key identifying the peer. 9 | pub type Key = [u8; KEY_SIZE]; 10 | 11 | /// An Entry represents a peer in the routing table. 12 | #[derive(Encode, Decode, Clone, Debug)] 13 | pub struct Entry { 14 | /// The unique key identifying the peer. 15 | pub key: Key, 16 | /// The IP address of the peer. 17 | pub addr: Addr, 18 | /// TCP port 19 | pub port: Port, 20 | /// UDP/TCP port 21 | pub discovery_port: Port, 22 | } 23 | 24 | impl PartialEq for Entry { 25 | fn eq(&self, other: &Self) -> bool { 26 | // XXX: should we compare both self.addr and other.addr??? 27 | self.key == other.key 28 | } 29 | } 30 | 31 | /// Calculates the XOR distance between two provided keys. 32 | /// 33 | /// The XOR distance is a metric used in Kademlia to measure the closeness 34 | /// of keys. 35 | pub fn xor_distance(key: &Key, other: &Key) -> Key { 36 | let mut res = [0; 32]; 37 | for (i, (k, o)) in key.iter().zip(other.iter()).enumerate() { 38 | res[i] = k ^ o; 39 | } 40 | res 41 | } 42 | -------------------------------------------------------------------------------- /p2p/src/slots.rs: -------------------------------------------------------------------------------- 1 | use std::sync::atomic::{AtomicUsize, Ordering}; 2 | 3 | use karyon_core::async_util::CondWait; 4 | 5 | /// Manages available inbound and outbound slots. 6 | pub struct ConnectionSlots { 7 | /// A condvar for notifying when a slot become available. 8 | signal: CondWait, 9 | /// The number of occupied slots 10 | slots: AtomicUsize, 11 | /// The maximum number of slots. 12 | max_slots: usize, 13 | } 14 | 15 | impl ConnectionSlots { 16 | /// Creates a new ConnectionSlots 17 | pub fn new(max_slots: usize) -> Self { 18 | Self { 19 | signal: CondWait::new(), 20 | slots: AtomicUsize::new(0), 21 | max_slots, 22 | } 23 | } 24 | 25 | /// Increases the occupied slots by one. 26 | pub fn add(&self) { 27 | self.slots.fetch_add(1, Ordering::SeqCst); 28 | } 29 | 30 | /// Decreases the occupied slots by one and notifies the waiting signal 31 | /// to start accepting/connecting new connections. 32 | pub async fn remove(&self) { 33 | self.slots.fetch_sub(1, Ordering::SeqCst); 34 | if self.slots.load(Ordering::SeqCst) < self.max_slots { 35 | self.signal.signal().await; 36 | } 37 | } 38 | 39 | /// Waits for a slot to become available. 40 | pub async fn wait_for_slot(&self) { 41 | if self.slots.load(Ordering::SeqCst) < self.max_slots { 42 | return; 43 | } 44 | 45 | // Wait for a signal 46 | self.signal.wait().await; 47 | self.signal.reset().await; 48 | } 49 | } 50 | -------------------------------------------------------------------------------- /p2p/src/version.rs: -------------------------------------------------------------------------------- 1 | use std::str::FromStr; 2 | 3 | use bincode::{Decode, Encode}; 4 | use semver::VersionReq; 5 | 6 | use crate::{Error, Result}; 7 | 8 | /// Represents the network version and protocol version used in karyon p2p. 9 | /// 10 | /// # Example 11 | /// 12 | /// ``` 13 | /// use karyon_p2p::Version; 14 | /// 15 | /// let version: Version = "0.2.0, >0.1.0".parse().unwrap(); 16 | /// 17 | /// let version: Version = "0.2.0".parse().unwrap(); 18 | /// 19 | /// ``` 20 | #[derive(Debug, Clone)] 21 | pub struct Version { 22 | pub v: VersionInt, 23 | pub req: VersionReq, 24 | } 25 | 26 | impl Version { 27 | /// Creates a new Version 28 | pub fn new(v: VersionInt, req: VersionReq) -> Self { 29 | Self { v, req } 30 | } 31 | } 32 | 33 | #[derive(Debug, Decode, Encode, Clone)] 34 | pub struct VersionInt { 35 | major: u64, 36 | minor: u64, 37 | patch: u64, 38 | } 39 | 40 | impl FromStr for Version { 41 | type Err = Error; 42 | 43 | fn from_str(s: &str) -> Result { 44 | let v: Vec<&str> = s.split(", ").collect(); 45 | if v.is_empty() || v.len() > 2 { 46 | return Err(Error::ParseError(format!("Invalid version{s}"))); 47 | } 48 | 49 | let version: VersionInt = v[0].parse()?; 50 | let req: VersionReq = if v.len() > 1 { v[1] } else { v[0] }.parse()?; 51 | 52 | Ok(Self { v: version, req }) 53 | } 54 | } 55 | 56 | impl FromStr for VersionInt { 57 | type Err = Error; 58 | 59 | fn from_str(s: &str) -> Result { 60 | let v: Vec<&str> = s.split('.').collect(); 61 | if v.len() < 2 || v.len() > 3 { 62 | return Err(Error::ParseError(format!("Invalid version{s}"))); 63 | } 64 | 65 | let major = v[0].parse::()?; 66 | let minor = v[1].parse::()?; 67 | let patch = v.get(2).unwrap_or(&"0").parse::()?; 68 | 69 | Ok(Self { 70 | major, 71 | minor, 72 | patch, 73 | }) 74 | } 75 | } 76 | 77 | impl std::fmt::Display for VersionInt { 78 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 79 | write!(f, "{}.{}.{}", self.major, self.minor, self.patch) 80 | } 81 | } 82 | 83 | impl From for semver::Version { 84 | fn from(v: VersionInt) -> Self { 85 | semver::Version::new(v.major, v.minor, v.patch) 86 | } 87 | } 88 | 89 | /// Check if a version satisfies a version request. 90 | pub fn version_match(version_req: &VersionReq, version: &VersionInt) -> bool { 91 | let version: semver::Version = version.clone().into(); 92 | version_req.matches(&version) 93 | } 94 | --------------------------------------------------------------------------------