├── .github └── workflows │ └── rust.yml ├── .gitignore ├── Cargo.toml ├── core ├── Cargo.toml ├── benches │ └── channel.rs └── src │ ├── api.rs │ ├── builder.rs │ ├── lib.rs │ ├── strategy.rs │ ├── util │ ├── async_runtime │ │ ├── mod.rs │ │ ├── park.rs │ │ ├── runtime.rs │ │ └── waker.rs │ ├── channel.rs │ ├── fx_hasher.rs │ ├── mod.rs │ ├── no_hasher.rs │ ├── spin.rs │ └── spsc.rs │ └── worker.rs ├── examples ├── Cargo.toml └── src │ ├── log.rs │ └── websocket.rs ├── log ├── Cargo.toml └── src │ ├── lib.rs │ ├── no_op.rs │ └── value.rs └── readme.md /.github/workflows/rust.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | 3 | on: 4 | pull_request: 5 | push: 6 | branches: 7 | - master 8 | 9 | jobs: 10 | build_and_test: 11 | name: Build and test 12 | runs-on: ${{ matrix.os }} 13 | strategy: 14 | matrix: 15 | # TODO: add other os 16 | os: [ubuntu-latest] 17 | rust: [nightly] 18 | 19 | steps: 20 | - uses: actions/checkout@master 21 | 22 | - name: Install ${{ matrix.rust }} 23 | uses: actions-rs/toolchain@v1 24 | with: 25 | toolchain: ${{ matrix.rust }} 26 | components: clippy, rustfmt 27 | override: true 28 | 29 | - name: check 30 | uses: actions-rs/cargo@v1 31 | with: 32 | command: check 33 | args: --all --bins --examples 34 | 35 | - name: check no-default-features 36 | uses: actions-rs/cargo@v1 37 | with: 38 | command: check 39 | args: --no-default-features 40 | 41 | - name: tests 42 | uses: actions-rs/cargo@v1 43 | with: 44 | command: test 45 | args: --all 46 | 47 | - name: clippy 48 | uses: actions-rs/clippy-check@v1 49 | with: 50 | token: ${{ secrets.GITHUB_TOKEN }} 51 | args: --all-features --tests --examples 52 | 53 | - name: fmt 54 | uses: actions-rs/cargo@v1 55 | with: 56 | command: fmt 57 | args: --all -- --check 58 | 59 | miri-test-unsafe: 60 | name: miri-test-unsafe 61 | runs-on: ubuntu-latest 62 | steps: 63 | - uses: actions/checkout@v3 64 | - name: Install Rust nightly 65 | uses: actions-rs/toolchain@v1 66 | with: 67 | toolchain: nightly 68 | components: miri 69 | override: true 70 | - name: miri 71 | run: cargo miri test util --no-fail-fast -- --nocapture 72 | env: 73 | MIRIFLAGS: -Zmiri-strict-provenance -Zmiri-disable-isolation -Zmiri-symbolic-alignment-check 74 | PROPTEST_CASES: 10 -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | Cargo.lock 3 | *.con 4 | .idea 5 | .github 6 | .vscode 7 | .gradle 8 | local*.rs -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [workspace] 2 | resolver = "1" 3 | members = [ 4 | "core", 5 | "log", 6 | "examples" 7 | ] 8 | 9 | [patch.crates-io] 10 | flashfunk-core = { path = "./core" } 11 | owned-log = { path = "./log" } 12 | xitca-client = { git = "https://github.com/HFQR/xitca-web/" } 13 | http-ws = { git = "https://github.com/HFQR/xitca-web/" } 14 | 15 | [profile.release] 16 | lto = true 17 | opt-level = 3 18 | codegen-units = 1 19 | -------------------------------------------------------------------------------- /core/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "flashfunk-core" 3 | version = "0.6.0" 4 | authors = ["somewheve "] 5 | edition = "2021" 6 | 7 | [[bench]] 8 | name = "channel" 9 | path = "benches/channel.rs" 10 | harness = false 11 | 12 | [features] 13 | default = ["std"] 14 | std = ["core_affinity"] 15 | async = ["std", "futures-core", "parking"] 16 | 17 | [dependencies] 18 | crossbeam-utils = "0.8" 19 | core_affinity = { version = "0.8", optional = true } 20 | # async feature support 21 | futures-core = { version = "0.3", default-features = false, features = ["alloc"], optional = true } 22 | parking = { version = "2", optional = true } 23 | 24 | [dev-dependencies] 25 | criterion = "0.4" 26 | -------------------------------------------------------------------------------- /core/benches/channel.rs: -------------------------------------------------------------------------------- 1 | #[macro_use] 2 | extern crate criterion; 3 | 4 | use std::sync::Mutex; 5 | 6 | use criterion::Criterion; 7 | use flashfunk_core::util::{channel::channel, spin::SpinLock}; 8 | 9 | fn send_recv(c: &mut Criterion) { 10 | let (mut tx, mut rx) = channel::(128); 11 | c.bench_function("channel_send_recv", move |b| { 12 | b.iter(|| { 13 | for i in 0..128u128 { 14 | let _ = tx.send(i); 15 | rx.recv().unwrap(); 16 | } 17 | }) 18 | }); 19 | } 20 | 21 | fn send_recv_spin_uncontend(c: &mut Criterion) { 22 | let (tx, mut rx) = channel::(128); 23 | let tx = SpinLock::new(tx); 24 | c.bench_function("channel_send_recv_spin_uncontend", move |b| { 25 | b.iter(|| { 26 | for i in 0..128u128 { 27 | let _ = tx.lock().send(i); 28 | rx.recv().unwrap(); 29 | } 30 | }) 31 | }); 32 | } 33 | 34 | fn send_recv_lock_uncontend(c: &mut Criterion) { 35 | let (tx, mut rx) = channel::(128); 36 | let tx = Mutex::new(tx); 37 | c.bench_function("channel_send_recv_lock_uncontend", move |b| { 38 | b.iter(|| { 39 | for i in 0..128u128 { 40 | let _ = tx.lock().unwrap().send(i); 41 | rx.recv().unwrap(); 42 | } 43 | }) 44 | }); 45 | } 46 | 47 | criterion_group!( 48 | bench, 49 | send_recv, 50 | send_recv_spin_uncontend, 51 | send_recv_lock_uncontend 52 | ); 53 | 54 | criterion_main!(bench); 55 | -------------------------------------------------------------------------------- /core/src/api.rs: -------------------------------------------------------------------------------- 1 | use core::{ 2 | cmp::Eq, 3 | hash::{Hash, Hasher}, 4 | }; 5 | 6 | use super::builder::APIBuilder; 7 | use super::strategy::Strategy; 8 | use super::util::channel::{GroupReceiver, GroupSender}; 9 | 10 | pub trait API: Sized { 11 | /// symbol identifier for Strategy used for sending specific message to certain strategy. 12 | type Symbol: Hash + Eq + Clone; 13 | 14 | /// hasher used for storing strategy context with HashMap. 15 | /// 16 | /// flashfunk offers two utility hasher as: [`FxHasher`] and [`NoHasher`]. 17 | /// it's also possible to bring your own hasher by implement [Hasher] and [Default] traits for it. 18 | /// 19 | /// [`FxHasher`]: crate::util::fx_hasher::FxHasher 20 | /// [`NoHasher`]: crate::util::no_hasher::NoHasher 21 | type Hasher: Hasher + Default; 22 | 23 | /// message type from server to API and would be sent to strategies. 24 | type SndMessage: Send; 25 | 26 | /// message type from strategies to API and sent to server. 27 | type RecvMessage: Send; 28 | 29 | /// builder type where an array of strategies will be prepared to be hooked into api type. 30 | /// see [APIBuilder] for more. 31 | fn into_builder(self, strategies: [S; N]) -> APIBuilder 32 | where 33 | S: Strategy + 'static, 34 | Self: 'static, 35 | { 36 | APIBuilder::new(self, strategies) 37 | } 38 | 39 | /// callback function handles running of the api business logic. 40 | /// sender is used to send message to strategy with [Self::Symbol] type as identifier. 41 | /// receiver is used to receive message from all strategy with iteration 42 | fn run( 43 | self, 44 | sender: GroupSender, 45 | receiver: GroupReceiver, 46 | ); 47 | } 48 | -------------------------------------------------------------------------------- /core/src/builder.rs: -------------------------------------------------------------------------------- 1 | use std::hash::BuildHasherDefault; 2 | 3 | use crate::util::channel::GroupIndex; 4 | use alloc::vec::Vec; 5 | 6 | use super::{ 7 | api::API, 8 | strategy::Strategy, 9 | util::{ 10 | channel::{channel, GroupReceiver, GroupSender}, 11 | pin_to_core, 12 | }, 13 | worker::Worker, 14 | }; 15 | 16 | // 通道容量设为3024.如果单tick中每个策略的消息数量超过这个数值(或者有消息积压),可以考虑放松此上限。 17 | // 只影响内存占用。 fixme: 开始启动的时候会导致消息过多 造成pusherror 18 | const MESSAGE_LIMIT: usize = 3024usize; 19 | 20 | pub struct APIBuilder { 21 | pub(crate) pin_to_core: bool, 22 | pub(crate) message_capacity: usize, 23 | pub(crate) api: A, 24 | pub(crate) strategies: [S; N], 25 | } 26 | 27 | impl APIBuilder 28 | where 29 | A: API + 'static, 30 | S: Strategy + 'static, 31 | { 32 | pub(super) fn new(api: A, strategies: [S; N]) -> Self { 33 | Self { 34 | pin_to_core: true, 35 | message_capacity: MESSAGE_LIMIT, 36 | api, 37 | strategies, 38 | } 39 | } 40 | 41 | /// Do not pin strategy worker thread to cpu cores. 42 | pub fn disable_pin_to_core(mut self) -> Self { 43 | self.pin_to_core = false; 44 | self 45 | } 46 | 47 | /// Change capacity of message channel between API thread and strategy worker threads. 48 | /// 49 | /// Capacity is for per strategy. 50 | pub fn message_capacity(mut self, cap: usize) -> Self { 51 | assert_ne!(cap, 0); 52 | self.message_capacity = cap; 53 | self 54 | } 55 | 56 | /// Build and start API on current thread. 57 | /// [API::run](crate::api::API::run) would be called when build finished. 58 | /// 59 | /// Every strategy would run on it's own dedicated thread. 60 | pub fn build(self) { 61 | let Self { 62 | pin_to_core, 63 | message_capacity, 64 | api, 65 | strategies, 66 | } = self; 67 | 68 | // 收集核心cid 69 | let mut cores = pin_to_core::get_core_ids(); 70 | 71 | // 单向spsc: 72 | // API -> Strategies. 73 | let mut senders = Vec::new(); 74 | // Strategies -> API. 75 | let mut receivers = Vec::new(); 76 | 77 | // groups为与symbols相对应(vec index)的策略们的发送端vec. 78 | let mut group = std::collections::HashMap::<_, _, BuildHasherDefault>::default(); 79 | 80 | for (st_idx, st) in strategies.into_iter().enumerate() { 81 | st.symbol().iter().for_each(|symbol| { 82 | let g = group 83 | .entry(symbol.clone()) 84 | .or_insert_with(GroupIndex::default); 85 | assert!(!g.contains(&st_idx)); 86 | g.push(st_idx); 87 | }); 88 | 89 | // API -> Strategies 90 | let (s1, r1) = channel(message_capacity); 91 | 92 | // Strategies -> API. 93 | let (s2, r2) = channel(message_capacity); 94 | 95 | senders.push(s1); 96 | receivers.push(r2); 97 | 98 | let id = pin_to_core.then(|| cores.pop()).flatten(); 99 | Worker::new(st, s2, r1).run_in_core(id); 100 | } 101 | 102 | let group_senders = GroupSender::<_, _, _, N>::new(senders, group); 103 | let group_receivers = GroupReceiver::<_, N>::from_vec(receivers); 104 | 105 | // 分配最后一个核心给主线程 106 | let id = pin_to_core.then(|| cores.pop()).flatten(); 107 | pin_to_core::pin_to_core(id); 108 | 109 | api.run(group_senders, group_receivers); 110 | } 111 | } 112 | -------------------------------------------------------------------------------- /core/src/lib.rs: -------------------------------------------------------------------------------- 1 | extern crate alloc; 2 | 3 | mod worker; 4 | 5 | pub mod api; 6 | pub mod builder; 7 | pub mod strategy; 8 | pub mod util; 9 | 10 | #[cfg(test)] 11 | mod test { 12 | use super::api::API; 13 | use super::strategy::{Context, Strategy}; 14 | use super::util::channel::{channel, GroupReceiver, GroupSender, Sender}; 15 | 16 | use alloc::vec::Vec; 17 | 18 | struct Rem; 19 | 20 | #[derive(Default)] 21 | struct RemContext; 22 | 23 | struct APIMessage(Sender); 24 | 25 | struct StrategyMessage(u32); 26 | 27 | #[repr(u64)] 28 | #[derive(Debug, Clone, Hash, Eq, Copy, PartialEq, Default)] 29 | enum Symbol { 30 | #[default] 31 | BTCUSDT = 1, 32 | } 33 | 34 | impl API for Rem { 35 | type Symbol = Symbol; 36 | type Hasher = crate::util::no_hasher::NoHasher; 37 | type SndMessage = APIMessage; 38 | type RecvMessage = StrategyMessage; 39 | 40 | fn run( 41 | self, 42 | mut sender: GroupSender, 43 | mut receiver: GroupReceiver, 44 | ) { 45 | let group = sender.group().get(&Symbol::BTCUSDT).unwrap(); 46 | assert_eq!(group.len(), 1); 47 | 48 | let idx = group.iter().next().unwrap(); 49 | assert_eq!(*idx, 0); 50 | let (tx, mut rx) = channel(1); 51 | 52 | sender.send_to(APIMessage(tx), *idx); 53 | 54 | #[cfg(not(feature = "async"))] 55 | { 56 | loop { 57 | if let Ok(item) = rx.recv() { 58 | assert_eq!(996, item); 59 | break; 60 | } 61 | } 62 | 63 | receiver.iter_mut().for_each(|r| { 64 | if let Ok(m) = r.recv() { 65 | assert_eq!(m.0, 251); 66 | } 67 | }); 68 | } 69 | 70 | #[cfg(feature = "async")] 71 | { 72 | crate::util::async_runtime::StdRuntime::new().block_on(async move { 73 | assert_eq!(996, rx.recv().await.unwrap()); 74 | 75 | for r in receiver.iter_mut() { 76 | if let Ok(m) = r.recv().await { 77 | assert_eq!(m.0, 251); 78 | } 79 | } 80 | }); 81 | } 82 | } 83 | } 84 | 85 | struct RemStrategy { 86 | symbols: Vec, 87 | } 88 | 89 | impl Strategy for RemStrategy { 90 | fn symbol(&self) -> &[::Symbol] { 91 | self.symbols.as_slice() 92 | } 93 | 94 | fn call(&mut self, msg: ::SndMessage, ctx: &mut Context) { 95 | let mut tx = msg.0; 96 | 97 | ctx.sender().send(StrategyMessage(251)); 98 | 99 | tx.send(996u32); 100 | } 101 | } 102 | 103 | #[test] 104 | fn build() { 105 | let st = RemStrategy { 106 | symbols: vec![Symbol::BTCUSDT], 107 | }; 108 | let api = Rem; 109 | api.into_builder([st]) 110 | .disable_pin_to_core() 111 | .message_capacity(128) 112 | .build(); 113 | } 114 | } 115 | -------------------------------------------------------------------------------- /core/src/strategy.rs: -------------------------------------------------------------------------------- 1 | use super::{api::API, util::channel::Sender}; 2 | 3 | /// Trait for single strategy of given NAME. 4 | pub trait Strategy 5 | where 6 | A: API, 7 | Self: Send, 8 | { 9 | fn symbol(&self) -> &[A::Symbol]; 10 | 11 | /// Method called when strategy is about to start. 12 | #[allow(unused_variables)] 13 | fn on_start(&mut self, ctx: &mut Context) {} 14 | 15 | /// Method called when a new message is received by strategy. 16 | fn call(&mut self, msg: A::SndMessage, ctx: &mut Context); 17 | 18 | /// Method called when all message are processed by strategy and wait for next 19 | /// message to arrive. 20 | #[allow(unused_variables)] 21 | fn on_idle(&mut self, ctx: &mut Context) {} 22 | } 23 | 24 | impl Strategy for Box 25 | where 26 | S: Strategy + ?Sized, 27 | A: API, 28 | { 29 | #[inline] 30 | fn symbol(&self) -> &[A::Symbol] { 31 | (**self).symbol() 32 | } 33 | 34 | #[inline] 35 | fn on_start(&mut self, ctx: &mut Context) { 36 | (**self).on_start(ctx) 37 | } 38 | 39 | #[inline] 40 | fn call(&mut self, msg: A::SndMessage, ctx: &mut Context) { 41 | (**self).call(msg, ctx) 42 | } 43 | 44 | #[inline] 45 | fn on_idle(&mut self, ctx: &mut Context) { 46 | (**self).on_idle(ctx) 47 | } 48 | } 49 | 50 | /// Context type of a strategy. 51 | pub struct Context 52 | where 53 | A: API, 54 | { 55 | sender: Sender, 56 | } 57 | 58 | impl Context 59 | where 60 | A: API, 61 | { 62 | pub(super) fn new(sender: Sender) -> Self { 63 | Self { sender } 64 | } 65 | 66 | /// Get a sender type from strategy which can be used to send message 67 | /// to API. 68 | #[inline] 69 | pub fn sender(&mut self) -> &mut Sender { 70 | &mut self.sender 71 | } 72 | } 73 | -------------------------------------------------------------------------------- /core/src/util/async_runtime/mod.rs: -------------------------------------------------------------------------------- 1 | mod waker; 2 | 3 | pub mod park; 4 | pub mod runtime; 5 | 6 | use core::future::Future; 7 | 8 | use self::{ 9 | park::{Park, Unpark}, 10 | runtime::Runtime, 11 | }; 12 | 13 | struct Parker(parking::Parker); 14 | 15 | struct Unparker(parking::Unparker); 16 | 17 | impl Park for Parker { 18 | type Unparker = Unparker; 19 | 20 | fn unparker(&mut self) -> Self::Unparker { 21 | Unparker(self.0.unparker()) 22 | } 23 | 24 | #[inline] 25 | fn park(&self) { 26 | self.0.park(); 27 | } 28 | } 29 | 30 | impl Unpark for Unparker { 31 | #[inline] 32 | fn unpark(&self) { 33 | self.0.unpark(); 34 | } 35 | } 36 | 37 | pub struct StdRuntime(Runtime); 38 | 39 | impl Default for StdRuntime { 40 | fn default() -> Self { 41 | Self::new() 42 | } 43 | } 44 | 45 | impl StdRuntime { 46 | pub fn new() -> Self { 47 | let parker = Parker(parking::Parker::new()); 48 | let rt = Runtime::new(parker); 49 | Self(rt) 50 | } 51 | 52 | #[inline] 53 | pub fn block_on(&mut self, fut: Fut) -> Fut::Output { 54 | self.0.block_on(fut) 55 | } 56 | } 57 | -------------------------------------------------------------------------------- /core/src/util/async_runtime/park.rs: -------------------------------------------------------------------------------- 1 | /// Trait for park a thread. 2 | pub trait Park { 3 | /// Unparker type associated with this Parker. 4 | type Unparker: Unpark; 5 | 6 | /// Parker is tasked with construct unparker. 7 | fn unparker(&mut self) -> Self::Unparker; 8 | 9 | fn park(&self); 10 | } 11 | 12 | /// Trait for unpark a thread. 13 | pub trait Unpark: Send + Sync + 'static { 14 | fn unpark(&self); 15 | } 16 | -------------------------------------------------------------------------------- /core/src/util/async_runtime/runtime.rs: -------------------------------------------------------------------------------- 1 | use core::{ 2 | future::Future, 3 | pin::Pin, 4 | task::{Context, Poll}, 5 | }; 6 | 7 | use super::{park::Park, waker::waker_fn}; 8 | 9 | pub struct Runtime { 10 | parker: P, 11 | } 12 | 13 | impl Runtime

{ 14 | pub fn new(parker: P) -> Self { 15 | Self { parker } 16 | } 17 | 18 | pub fn block_on(&mut self, mut fut: Fut) -> Fut::Output { 19 | let unparker = self.parker.unparker(); 20 | 21 | let waker = waker_fn(unparker); 22 | 23 | // SAFETY 24 | // Pinning is safe. Future is shadow named after Pin. 25 | // It's not possible to move Fut without going through the Pin. 26 | let mut fut = unsafe { Pin::new_unchecked(&mut fut) }; 27 | 28 | loop { 29 | let cx = &mut Context::from_waker(&waker); 30 | 31 | match fut.as_mut().poll(cx) { 32 | Poll::Ready(res) => return res, 33 | Poll::Pending => self.parker.park(), 34 | } 35 | } 36 | } 37 | } 38 | -------------------------------------------------------------------------------- /core/src/util/async_runtime/waker.rs: -------------------------------------------------------------------------------- 1 | use core::mem::{self, ManuallyDrop}; 2 | use core::task::{RawWaker, RawWakerVTable, Waker}; 3 | 4 | use alloc::sync::Arc; 5 | 6 | use super::park::Unpark; 7 | 8 | pub(crate) fn waker_fn(u: U) -> Waker { 9 | let raw = Arc::into_raw(Arc::new(u)) as *const (); 10 | let vtable = &Helper::::VTABLE; 11 | unsafe { Waker::from_raw(RawWaker::new(raw, vtable)) } 12 | } 13 | 14 | struct Helper(U); 15 | 16 | impl Helper { 17 | const VTABLE: RawWakerVTable = RawWakerVTable::new( 18 | Self::clone_waker, 19 | Self::wake, 20 | Self::wake_by_ref, 21 | Self::drop_waker, 22 | ); 23 | 24 | unsafe fn clone_waker(ptr: *const ()) -> RawWaker { 25 | let arc = ManuallyDrop::new(Arc::from_raw(ptr as *const U)); 26 | mem::forget(arc.clone()); 27 | RawWaker::new(ptr, &Self::VTABLE) 28 | } 29 | 30 | unsafe fn wake(ptr: *const ()) { 31 | let arc = Arc::from_raw(ptr as *const U); 32 | arc.unpark(); 33 | } 34 | 35 | unsafe fn wake_by_ref(ptr: *const ()) { 36 | let arc = ManuallyDrop::new(Arc::from_raw(ptr as *const U)); 37 | arc.unpark(); 38 | } 39 | 40 | unsafe fn drop_waker(ptr: *const ()) { 41 | drop(Arc::from_raw(ptr as *const U)); 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /core/src/util/channel.rs: -------------------------------------------------------------------------------- 1 | use core::{ 2 | cmp::Eq, 3 | fmt::{Debug, Display, Formatter, Result as FmtResult}, 4 | hash::{BuildHasherDefault, Hash, Hasher}, 5 | ops::{Deref, DerefMut}, 6 | ptr, 7 | }; 8 | 9 | use alloc::vec::Vec; 10 | 11 | #[cfg(feature = "async")] 12 | use {alloc::sync::Arc, futures_core::task::__internal::AtomicWaker}; 13 | 14 | use super::spsc::{new, Consumer, Producer}; 15 | 16 | pub enum ChannelError { 17 | RecvError, 18 | TrySendError(M), 19 | SenderOverFlow(M), 20 | SenderGroupNotFound(M), 21 | } 22 | 23 | impl Debug for ChannelError { 24 | fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult { 25 | let mut fmt = f.debug_struct("ChannelError"); 26 | 27 | match self { 28 | ChannelError::SenderOverFlow(_) => fmt 29 | .field("cause", &"ChannelGroupSender") 30 | .field("description", &"Overflow on group sender's sender index"), 31 | ChannelError::SenderGroupNotFound(_) => { 32 | fmt.field("cause", &"ChannelGroupSender").field( 33 | "description", 34 | &"Overflow on group sender's group index(group not found)", 35 | ) 36 | } 37 | ChannelError::RecvError => fmt 38 | .field("cause", &"ChannelReceiver") 39 | .field("description", &"Failed to receive message from channel"), 40 | ChannelError::TrySendError(_) => fmt 41 | .field("cause", &"ChannelSender") 42 | .field("description", &"Failed to send message through channel"), 43 | }; 44 | 45 | fmt.finish() 46 | } 47 | } 48 | 49 | impl Display for ChannelError { 50 | fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult { 51 | write!(f, "{:?}", self) 52 | } 53 | } 54 | 55 | pub struct Sender { 56 | tx: Producer, 57 | #[cfg(feature = "async")] 58 | waker: Arc, 59 | } 60 | 61 | impl Sender { 62 | // 发送失败会panic 63 | #[inline] 64 | pub fn send(&mut self, m: impl Into) { 65 | self.tx.push(m.into()).unwrap(); 66 | #[cfg(feature = "async")] 67 | self.waker.wake(); 68 | } 69 | 70 | // 发送失败返回消息 71 | #[inline] 72 | pub fn try_send(&mut self, m: M) -> Result<(), ChannelError> { 73 | match self.tx.push(m) { 74 | Ok(_) => { 75 | #[cfg(feature = "async")] 76 | self.waker.wake(); 77 | Ok(()) 78 | } 79 | Err(e) => Err(ChannelError::TrySendError(e.0)), 80 | } 81 | } 82 | } 83 | 84 | pub struct Receiver { 85 | rx: Consumer, 86 | #[cfg(feature = "async")] 87 | waker: Arc, 88 | } 89 | 90 | #[cfg(not(feature = "async"))] 91 | pub use r#sync::*; 92 | 93 | #[cfg(not(feature = "async"))] 94 | mod r#sync { 95 | use super::*; 96 | 97 | pub fn channel(cap: usize) -> (Sender, Receiver) { 98 | let (tx, rx) = new(cap); 99 | (Sender { tx }, Receiver { rx }) 100 | } 101 | 102 | impl Receiver { 103 | #[inline] 104 | pub fn recv(&mut self) -> Result> { 105 | self.rx.pop().map_err(|_| ChannelError::RecvError) 106 | } 107 | } 108 | } 109 | 110 | #[cfg(feature = "async")] 111 | pub use r#async::*; 112 | 113 | #[cfg(feature = "async")] 114 | mod r#async { 115 | use super::*; 116 | 117 | use core::{ 118 | future::Future, 119 | pin::Pin, 120 | task::{Context, Poll}, 121 | }; 122 | 123 | pub fn channel(cap: usize) -> (Sender, Receiver) { 124 | let (tx, rx) = new(cap); 125 | let waker = Arc::new(AtomicWaker::new()); 126 | ( 127 | Sender { 128 | tx, 129 | waker: waker.clone(), 130 | }, 131 | Receiver { rx, waker }, 132 | ) 133 | } 134 | 135 | impl Receiver { 136 | #[inline] 137 | pub async fn recv(&mut self) -> Result> { 138 | struct Recv<'a, M>(&'a mut Receiver); 139 | 140 | impl Future for Recv<'_, M> { 141 | type Output = Result>; 142 | 143 | fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { 144 | let this = self.get_mut(); 145 | match this.0.rx.pop() { 146 | Ok(msg) => Poll::Ready(Ok(msg)), 147 | Err(_) => { 148 | this.0.waker.register(cx.waker()); 149 | Poll::Pending 150 | } 151 | } 152 | } 153 | } 154 | 155 | Recv(self).await 156 | } 157 | } 158 | 159 | impl GroupReceiver { 160 | pub async fn recv(&mut self) -> Result> { 161 | struct GroupReceiveFut<'a, M, const N: usize> { 162 | group: &'a mut GroupReceiver, 163 | } 164 | 165 | impl Future for GroupReceiveFut<'_, M, N> { 166 | type Output = Result>; 167 | 168 | fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { 169 | for rx in self.get_mut().group.iter_mut() { 170 | match rx.rx.pop() { 171 | Ok(msg) => return Poll::Ready(Ok(msg)), 172 | Err(_) => { 173 | rx.waker.register(cx.waker()); 174 | } 175 | } 176 | } 177 | 178 | Poll::Pending 179 | } 180 | } 181 | 182 | GroupReceiveFut { group: self }.await 183 | } 184 | } 185 | } 186 | 187 | type HashMap = 188 | std::collections::HashMap, BuildHasherDefault>; 189 | 190 | pub struct GroupSender { 191 | senders: [Sender; N], 192 | group: HashMap, 193 | } 194 | 195 | impl GroupSender 196 | where 197 | K: Hash + Eq, 198 | H: Hasher + Default, 199 | { 200 | pub fn new(sender: Vec>, group: HashMap) -> Self { 201 | let this = Self { 202 | senders: sender.try_into().ok().unwrap(), 203 | group, 204 | }; 205 | // IMPORTANT: 206 | // 207 | // Don't remove. See GroupSender::try_send_group method for reason. 208 | this.bound_check(); 209 | this 210 | } 211 | 212 | #[inline] 213 | pub fn group(&self) -> &HashMap { 214 | &self.group 215 | } 216 | 217 | #[inline] 218 | pub fn senders(&self) -> &[Sender] { 219 | &self.senders 220 | } 221 | 222 | // 发送至所有sender 223 | #[inline] 224 | pub fn send_all(&mut self, mm: MM) 225 | where 226 | MM: Into + Clone, 227 | { 228 | self.senders 229 | .iter_mut() 230 | .for_each(|s| s.send(mm.clone().into())) 231 | } 232 | 233 | // 发送至指定index的sender. 失败会panic 234 | #[inline] 235 | pub fn send_to(&mut self, m: impl Into, sender_index: usize) { 236 | self.senders[sender_index].send(m.into()); 237 | } 238 | 239 | // 发送至指定index的sender. 失败会返回消息 240 | #[inline] 241 | pub fn try_send_to(&mut self, m: MM, sender_index: usize) -> Result<(), ChannelError> 242 | where 243 | MM: Into, 244 | { 245 | match self.senders.get_mut(sender_index) { 246 | Some(s) => { 247 | s.send(m.into()); 248 | Ok(()) 249 | } 250 | None => Err(ChannelError::SenderOverFlow(m)), 251 | } 252 | } 253 | 254 | // 发送至指定group. group查找失败失败会返回消息.(group内的sender发送失败会panic) 255 | #[inline] 256 | pub fn try_send_group(&mut self, mm: MM, symbol: &K) -> Result<(), ChannelError> 257 | where 258 | MM: Into + Clone, 259 | { 260 | match self.group.get(symbol) { 261 | Some(g) => { 262 | g.iter().for_each(|i| { 263 | // SAFETY: 264 | // 265 | // Self::bound_check guarantee i is in range of Sender's stack array. 266 | unsafe { 267 | self.senders.get_unchecked_mut(*i).send(mm.clone().into()); 268 | } 269 | }); 270 | Ok(()) 271 | } 272 | None => Err(ChannelError::SenderGroupNotFound(mm)), 273 | } 274 | } 275 | 276 | #[cold] 277 | #[inline(never)] 278 | fn bound_check(&self) { 279 | self.group 280 | .iter() 281 | .for_each(|(_, g)| g.iter().for_each(|i| assert!(*i < self.senders.len()))); 282 | } 283 | } 284 | 285 | pub struct GroupReceiver { 286 | receivers: [Receiver; N], 287 | } 288 | 289 | impl GroupReceiver { 290 | pub(crate) fn from_vec(vec: Vec>) -> Self { 291 | Self { 292 | receivers: vec.try_into().ok().unwrap(), 293 | } 294 | } 295 | } 296 | 297 | impl Deref for GroupReceiver { 298 | type Target = [Receiver]; 299 | 300 | fn deref(&self) -> &Self::Target { 301 | &self.receivers 302 | } 303 | } 304 | 305 | impl DerefMut for GroupReceiver { 306 | fn deref_mut(&mut self) -> &mut Self::Target { 307 | &mut self.receivers 308 | } 309 | } 310 | 311 | /// a collection of Index of [GroupSender]'s `[Sender; N]`. 312 | pub struct GroupIndex { 313 | idx: [usize; N], 314 | len: usize, 315 | } 316 | 317 | impl Default for GroupIndex { 318 | fn default() -> Self { 319 | Self { 320 | idx: [0; N], 321 | len: 0, 322 | } 323 | } 324 | } 325 | 326 | impl GroupIndex { 327 | #[cold] 328 | #[inline(never)] 329 | pub(crate) fn push(&mut self, i: usize) { 330 | assert_ne!(self.len, N, "GroupIndex is full"); 331 | self.idx[self.len] = i; 332 | self.len += 1; 333 | } 334 | 335 | #[cold] 336 | #[inline(never)] 337 | pub(crate) fn contains(&self, idx: &usize) -> bool { 338 | self.iter().any(|i| i == idx) 339 | } 340 | 341 | pub fn iter(&self) -> impl Iterator { 342 | // SAFETY: 343 | // 344 | // This is safe as self.len is bound checked against N with every GroupIndex::push call. 345 | unsafe { &*ptr::slice_from_raw_parts(self.idx.as_ptr(), self.len) }.iter() 346 | } 347 | 348 | #[allow(clippy::len_without_is_empty)] 349 | #[inline] 350 | pub fn len(&self) -> usize { 351 | self.len 352 | } 353 | } 354 | 355 | #[cfg(test)] 356 | mod test { 357 | use super::*; 358 | 359 | #[test] 360 | #[should_panic] 361 | fn overflow() { 362 | let mut group = GroupIndex::<1>::default(); 363 | group.push(1); 364 | group.push(2); 365 | } 366 | 367 | #[test] 368 | fn iter() { 369 | let mut group = GroupIndex::<4>::default(); 370 | group.push(1); 371 | group.push(2); 372 | group.push(4); 373 | 374 | { 375 | let mut iter = group.iter(); 376 | 377 | assert_eq!(iter.next(), Some(&1)); 378 | assert_eq!(iter.next(), Some(&2)); 379 | assert_eq!(iter.next(), Some(&4)); 380 | assert_eq!(iter.next(), None); 381 | } 382 | 383 | group.push(8); 384 | 385 | let mut iter = group.iter(); 386 | 387 | assert_eq!(iter.next(), Some(&1)); 388 | assert_eq!(iter.next(), Some(&2)); 389 | assert_eq!(iter.next(), Some(&4)); 390 | assert_eq!(iter.next(), Some(&8)); 391 | assert_eq!(iter.next(), None); 392 | } 393 | 394 | #[test] 395 | fn len() { 396 | let mut group = GroupIndex::<4>::default(); 397 | 398 | group.push(1); 399 | assert_eq!(group.len(), 1); 400 | 401 | group.push(2); 402 | assert_eq!(group.len(), 2); 403 | 404 | group.push(4); 405 | assert_eq!(group.len(), 3); 406 | } 407 | } 408 | -------------------------------------------------------------------------------- /core/src/util/fx_hasher.rs: -------------------------------------------------------------------------------- 1 | use core::{ 2 | hash::{BuildHasherDefault, Hasher}, 3 | mem::size_of, 4 | ops::BitXor, 5 | }; 6 | 7 | pub type FxHashMap = std::collections::HashMap>; 8 | 9 | /// A speedy hash algorithm for use within rustc. The hashmap in liballoc 10 | /// by default uses SipHash which isn't quite as speedy as we want. In the 11 | /// compiler we're not really worried about DOS attempts, so we use a fast 12 | /// non-cryptographic hash. 13 | /// 14 | /// This is the same as the algorithm used by Firefox -- which is a homespun 15 | /// one not based on any widely-known algorithm -- though modified to produce 16 | /// 64-bit hash values instead of 32-bit hash values. It consistently 17 | /// out-performs an FNV-based hash within rustc itself -- the collision rate is 18 | /// similar or slightly worse than FNV, but the speed of the hash function 19 | /// itself is much higher because it works on up to 8 bytes at a time. 20 | #[derive(Default)] 21 | pub struct FxHasher { 22 | hash: usize, 23 | } 24 | 25 | #[cfg(target_pointer_width = "32")] 26 | const K: usize = 0x9e3779b9; 27 | #[cfg(target_pointer_width = "64")] 28 | const K: usize = 0x517cc1b727220a95; 29 | 30 | impl FxHasher { 31 | #[inline] 32 | fn add_to_hash(&mut self, i: usize) { 33 | self.hash = self.hash.rotate_left(5).bitxor(i).wrapping_mul(K); 34 | } 35 | } 36 | 37 | impl Hasher for FxHasher { 38 | #[inline] 39 | fn write(&mut self, mut bytes: &[u8]) { 40 | #[cfg(target_pointer_width = "32")] 41 | let read_usize = |bytes: &[u8]| u32::from_ne_bytes(bytes[..4].try_into().unwrap()); 42 | #[cfg(target_pointer_width = "64")] 43 | let read_usize = |bytes: &[u8]| u64::from_ne_bytes(bytes[..8].try_into().unwrap()); 44 | 45 | let mut hash = FxHasher { hash: self.hash }; 46 | assert!(size_of::() <= 8); 47 | while bytes.len() >= size_of::() { 48 | hash.add_to_hash(read_usize(bytes) as usize); 49 | bytes = &bytes[size_of::()..]; 50 | } 51 | if (size_of::() > 4) && (bytes.len() >= 4) { 52 | hash.add_to_hash(u32::from_ne_bytes(bytes[..4].try_into().unwrap()) as usize); 53 | bytes = &bytes[4..]; 54 | } 55 | if (size_of::() > 2) && bytes.len() >= 2 { 56 | hash.add_to_hash(u16::from_ne_bytes(bytes[..2].try_into().unwrap()) as usize); 57 | bytes = &bytes[2..]; 58 | } 59 | if (size_of::() > 1) && !bytes.is_empty() { 60 | hash.add_to_hash(bytes[0] as usize); 61 | } 62 | self.hash = hash.hash; 63 | } 64 | 65 | #[inline] 66 | fn write_u8(&mut self, i: u8) { 67 | self.add_to_hash(i as usize); 68 | } 69 | 70 | #[inline] 71 | fn write_u16(&mut self, i: u16) { 72 | self.add_to_hash(i as usize); 73 | } 74 | 75 | #[inline] 76 | fn write_u32(&mut self, i: u32) { 77 | self.add_to_hash(i as usize); 78 | } 79 | 80 | #[cfg(target_pointer_width = "32")] 81 | #[inline] 82 | fn write_u64(&mut self, i: u64) { 83 | self.add_to_hash(i as usize); 84 | self.add_to_hash((i >> 32) as usize); 85 | } 86 | 87 | #[cfg(target_pointer_width = "64")] 88 | #[inline] 89 | fn write_u64(&mut self, i: u64) { 90 | self.add_to_hash(i as usize); 91 | } 92 | 93 | #[inline] 94 | fn write_usize(&mut self, i: usize) { 95 | self.add_to_hash(i); 96 | } 97 | 98 | #[inline] 99 | fn finish(&self) -> u64 { 100 | self.hash as u64 101 | } 102 | } 103 | -------------------------------------------------------------------------------- /core/src/util/mod.rs: -------------------------------------------------------------------------------- 1 | mod spsc; 2 | 3 | #[cfg(feature = "async")] 4 | pub mod async_runtime; 5 | 6 | pub mod channel; 7 | pub mod fx_hasher; 8 | pub mod no_hasher; 9 | pub mod spin; 10 | 11 | pub mod pin_to_core { 12 | use alloc::vec::Vec; 13 | 14 | #[cfg(feature = "std")] 15 | pub type CoreId = core_affinity::CoreId; 16 | 17 | #[cfg(not(feature = "std"))] 18 | pub struct CoreId; 19 | 20 | pub fn get_core_ids() -> Vec { 21 | #[cfg(feature = "std")] 22 | { 23 | core_affinity::get_core_ids().unwrap() 24 | } 25 | 26 | #[cfg(not(feature = "std"))] 27 | { 28 | Vec::new() 29 | } 30 | } 31 | 32 | pub fn pin_to_core(id: Option) { 33 | if let Some(id) = id { 34 | #[cfg(feature = "std")] 35 | { 36 | core_affinity::set_for_current(id); 37 | } 38 | 39 | #[cfg(not(feature = "std"))] 40 | { 41 | drop(id); 42 | } 43 | } 44 | } 45 | } 46 | -------------------------------------------------------------------------------- /core/src/util/no_hasher.rs: -------------------------------------------------------------------------------- 1 | use core::hash::{BuildHasherDefault, Hasher}; 2 | 3 | pub type NoHashMap = std::collections::HashMap>; 4 | 5 | /// A simple hasher that do hashing by not doing it. 6 | #[derive(Default)] 7 | pub struct NoHasher(u64); 8 | 9 | impl Hasher for NoHasher { 10 | #[inline] 11 | fn finish(&self) -> u64 { 12 | self.0 13 | } 14 | 15 | fn write(&mut self, _: &[u8]) { 16 | unreachable!("NoHasher only work for Key type that can be cast as u64") 17 | } 18 | 19 | #[inline] 20 | fn write_u8(&mut self, i: u8) { 21 | self.0 = i as u64 22 | } 23 | 24 | #[inline] 25 | fn write_u16(&mut self, i: u16) { 26 | self.0 = i as u64 27 | } 28 | 29 | #[inline] 30 | fn write_u32(&mut self, i: u32) { 31 | self.0 = i as u64 32 | } 33 | 34 | #[inline] 35 | fn write_u64(&mut self, i: u64) { 36 | self.0 = i 37 | } 38 | 39 | #[inline] 40 | fn write_usize(&mut self, i: usize) { 41 | self.0 = i as u64 42 | } 43 | 44 | #[inline] 45 | fn write_i8(&mut self, i: i8) { 46 | self.0 = i as u64 47 | } 48 | 49 | #[inline] 50 | fn write_i16(&mut self, i: i16) { 51 | self.0 = i as u64 52 | } 53 | 54 | #[inline] 55 | fn write_i32(&mut self, i: i32) { 56 | self.0 = i as u64 57 | } 58 | 59 | #[inline] 60 | fn write_i64(&mut self, i: i64) { 61 | self.0 = i as u64 62 | } 63 | 64 | #[inline] 65 | fn write_isize(&mut self, i: isize) { 66 | self.0 = i as u64 67 | } 68 | } 69 | 70 | #[cfg(test)] 71 | mod test { 72 | use super::*; 73 | 74 | use core::hash::{BuildHasher, BuildHasherDefault}; 75 | 76 | #[test] 77 | fn test() { 78 | let mut hasher = BuildHasherDefault::::default().build_hasher(); 79 | 80 | hasher.write_i8(7); 81 | assert_eq!(hasher.finish(), 7u64); 82 | 83 | hasher.write_u8(251); 84 | assert_eq!(hasher.finish(), 251u64); 85 | 86 | hasher.write_i16(996); 87 | assert_eq!(hasher.finish(), 996u64); 88 | 89 | hasher.write_u16(996); 90 | assert_eq!(hasher.finish(), 996u64); 91 | 92 | hasher.write_i32(996); 93 | assert_eq!(hasher.finish(), 996u64); 94 | 95 | hasher.write_u32(996); 96 | assert_eq!(hasher.finish(), 996u64); 97 | 98 | hasher.write_i64(996); 99 | assert_eq!(hasher.finish(), 996u64); 100 | 101 | hasher.write_u64(996); 102 | assert_eq!(hasher.finish(), 996u64); 103 | 104 | hasher.write_isize(996); 105 | assert_eq!(hasher.finish(), 996u64); 106 | 107 | hasher.write_usize(996); 108 | assert_eq!(hasher.finish(), 996u64); 109 | } 110 | 111 | #[test] 112 | #[should_panic] 113 | fn not_support() { 114 | let mut hasher = BuildHasherDefault::::default().build_hasher(); 115 | hasher.write_i128(996); 116 | } 117 | } 118 | -------------------------------------------------------------------------------- /core/src/util/spin.rs: -------------------------------------------------------------------------------- 1 | //! A strict spin lock. No yield, no spin loop hint. 2 | 3 | use core::{ 4 | cell::UnsafeCell, 5 | fmt, 6 | ops::{Deref, DerefMut}, 7 | sync::atomic::{AtomicBool, Ordering}, 8 | }; 9 | 10 | pub struct SpinLock { 11 | locked: AtomicBool, 12 | value: UnsafeCell, 13 | } 14 | 15 | impl fmt::Debug for SpinLock { 16 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 17 | match self.try_lock() { 18 | Some(guard) => write!(f, "SpinLock {{ value: ") 19 | .and_then(|()| (*guard).fmt(f)) 20 | .and_then(|()| write!(f, "}}")), 21 | None => write!(f, "SpinLock {{ }}"), 22 | } 23 | } 24 | } 25 | 26 | // SAFETY: As long as T is Send type the lock is Send is Sync. 27 | unsafe impl Send for SpinLock {} 28 | unsafe impl Sync for SpinLock {} 29 | 30 | impl SpinLock { 31 | pub const fn new(value: T) -> Self { 32 | Self { 33 | locked: AtomicBool::new(false), 34 | value: UnsafeCell::new(value), 35 | } 36 | } 37 | 38 | #[inline] 39 | pub fn lock(&self) -> SpinGuard<'_, T> { 40 | while self 41 | .locked 42 | .compare_exchange_weak(false, true, Ordering::Acquire, Ordering::Relaxed) 43 | .is_err() 44 | { 45 | #[allow(clippy::missing_spin_loop)] 46 | while self.locked.load(Ordering::Relaxed) {} 47 | } 48 | 49 | SpinGuard(self) 50 | } 51 | 52 | #[inline] 53 | pub fn into_inner(self) -> T { 54 | self.value.into_inner() 55 | } 56 | 57 | #[inline] 58 | pub fn try_lock(&self) -> Option> { 59 | if self.locked.swap(true, Ordering::Acquire) { 60 | None 61 | } else { 62 | Some(SpinGuard(self)) 63 | } 64 | } 65 | 66 | #[inline(always)] 67 | fn release(&self) { 68 | self.locked.store(false, Ordering::Release) 69 | } 70 | } 71 | 72 | pub struct SpinGuard<'a, T>(&'a SpinLock); 73 | 74 | impl fmt::Debug for SpinGuard<'_, T> { 75 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 76 | fmt::Debug::fmt(&self.0, f) 77 | } 78 | } 79 | 80 | impl Deref for SpinGuard<'_, T> { 81 | type Target = T; 82 | 83 | #[inline] 84 | fn deref(&self) -> &Self::Target { 85 | // SAFETY: 86 | // safe as spin guard is referencing the lock and have exclusive to value. 87 | unsafe { &*self.0.value.get() } 88 | } 89 | } 90 | 91 | impl DerefMut for SpinGuard<'_, T> { 92 | #[inline] 93 | fn deref_mut(&mut self) -> &mut Self::Target { 94 | // SAFETY: 95 | // same reason as Deref is safe. 96 | unsafe { &mut *self.0.value.get() } 97 | } 98 | } 99 | 100 | impl Drop for SpinGuard<'_, T> { 101 | #[inline] 102 | fn drop(&mut self) { 103 | self.0.release(); 104 | } 105 | } 106 | 107 | #[cfg(test)] 108 | mod test { 109 | use super::*; 110 | 111 | use core::ptr; 112 | 113 | use alloc::sync::Arc; 114 | 115 | use std::time::Duration; 116 | 117 | #[test] 118 | fn lock() { 119 | let spin = Arc::new(SpinLock::new(0usize)); 120 | 121 | let spin2 = spin.clone(); 122 | let handle = std::thread::spawn(move || { 123 | for _ in 0..99 { 124 | { 125 | let mut guard = spin.lock(); 126 | *guard += 1; 127 | } 128 | std::thread::sleep(Duration::from_nanos(1)); 129 | } 130 | }); 131 | 132 | let handle2 = std::thread::spawn(move || { 133 | while *spin2.lock() != 99 { 134 | std::thread::sleep(Duration::from_nanos(1)); 135 | } 136 | }); 137 | 138 | handle.join().unwrap(); 139 | handle2.join().unwrap(); 140 | } 141 | 142 | #[test] 143 | fn try_lock() { 144 | let spin = Arc::new(SpinLock::new(0usize)); 145 | 146 | let spin2 = spin.clone(); 147 | let handle = std::thread::spawn(move || { 148 | for _ in 0..99 { 149 | { 150 | let mut guard = spin.lock(); 151 | *guard += 1; 152 | } 153 | std::thread::sleep(Duration::from_nanos(1)); 154 | } 155 | }); 156 | 157 | let handle2 = std::thread::spawn(move || loop { 158 | if let Some(guard) = spin2.try_lock() { 159 | if *guard == 99 { 160 | return; 161 | } else { 162 | drop(guard); 163 | std::thread::sleep(Duration::from_nanos(1)); 164 | } 165 | } 166 | }); 167 | 168 | handle.join().unwrap(); 169 | handle2.join().unwrap(); 170 | } 171 | 172 | #[test] 173 | fn send_non_sync() { 174 | #[allow(dead_code)] 175 | struct Foo { 176 | v: *const (), 177 | } 178 | 179 | unsafe impl Send for Foo {} 180 | 181 | let spin = Arc::new(SpinLock::new(Foo { v: ptr::null() })); 182 | 183 | let spin2 = spin.clone(); 184 | let handle = std::thread::spawn(move || { 185 | let _guard = spin.lock(); 186 | }); 187 | 188 | let handle2 = std::thread::spawn(move || { 189 | let _ = spin2.lock(); 190 | }); 191 | 192 | handle.join().unwrap(); 193 | handle2.join().unwrap(); 194 | } 195 | } 196 | -------------------------------------------------------------------------------- /core/src/util/spsc.rs: -------------------------------------------------------------------------------- 1 | //! A bounded single-producer single-consumer queue. 2 | 3 | #![allow(clippy::non_send_fields_in_send_ty)] 4 | 5 | use core::{ 6 | fmt, 7 | marker::PhantomData, 8 | mem, 9 | sync::atomic::{AtomicUsize, Ordering}, 10 | }; 11 | 12 | use alloc::{sync::Arc, vec::Vec}; 13 | 14 | use crossbeam_utils::CachePadded; 15 | 16 | /// The inner representation of a single-producer single-consumer queue. 17 | struct Inner { 18 | /// The head of the queue. 19 | /// 20 | /// This integer is in range `0 .. 2 * cap`. 21 | head: CachePadded, 22 | 23 | /// The tail of the queue. 24 | /// 25 | /// This integer is in range `0 .. 2 * cap`. 26 | tail: CachePadded, 27 | 28 | /// The buffer holding slots. 29 | buffer: *mut T, 30 | 31 | /// The queue capacity. 32 | cap: usize, 33 | 34 | /// Indicates that dropping a `Buffer` may drop elements of type `T`. 35 | _marker: PhantomData, 36 | } 37 | 38 | impl Inner { 39 | /// Returns a pointer to the slot at position `pos`. 40 | /// 41 | /// The position must be in range `0 .. 2 * cap`. 42 | #[inline] 43 | unsafe fn slot(&self, pos: usize) -> *mut T { 44 | if pos < self.cap { 45 | self.buffer.add(pos) 46 | } else { 47 | self.buffer.add(pos - self.cap) 48 | } 49 | } 50 | 51 | /// Increments a position by going one slot forward. 52 | /// 53 | /// The position must be in range `0 .. 2 * cap`. 54 | #[inline] 55 | fn increment(&self, pos: usize) -> usize { 56 | if pos < 2 * self.cap - 1 { 57 | pos + 1 58 | } else { 59 | 0 60 | } 61 | } 62 | 63 | /// Returns the distance between two positions. 64 | /// 65 | /// Positions must be in range `0 .. 2 * cap`. 66 | #[inline] 67 | fn distance(&self, a: usize, b: usize) -> usize { 68 | if a <= b { 69 | b - a 70 | } else { 71 | 2 * self.cap - a + b 72 | } 73 | } 74 | } 75 | 76 | impl Drop for Inner { 77 | fn drop(&mut self) { 78 | let mut head = self.head.load(Ordering::Relaxed); 79 | let tail = self.tail.load(Ordering::Relaxed); 80 | 81 | // Loop over all slots that hold a value and drop them. 82 | while head != tail { 83 | unsafe { 84 | self.slot(head).drop_in_place(); 85 | } 86 | head = self.increment(head); 87 | } 88 | 89 | // Finally, deallocate the buffer, but don't run any destructors. 90 | unsafe { 91 | Vec::from_raw_parts(self.buffer, 0, self.cap); 92 | } 93 | } 94 | } 95 | 96 | /// Creates a bounded single-producer single-consumer queue with the given capacity. 97 | /// 98 | /// Returns the producer and the consumer side for the queue. 99 | /// 100 | /// # Panics 101 | /// 102 | /// Panics if the capacity is zero. 103 | /// 104 | pub fn new(cap: usize) -> (Producer, Consumer) { 105 | assert!(cap > 0, "capacity must be non-zero"); 106 | 107 | // Allocate a buffer of length `cap`. 108 | let buffer = { 109 | let mut v = Vec::::with_capacity(cap); 110 | let ptr = v.as_mut_ptr(); 111 | mem::forget(v); 112 | ptr 113 | }; 114 | 115 | let inner = Arc::new(Inner { 116 | head: CachePadded::new(AtomicUsize::new(0)), 117 | tail: CachePadded::new(AtomicUsize::new(0)), 118 | buffer, 119 | cap, 120 | _marker: PhantomData, 121 | }); 122 | 123 | let p = Producer { 124 | inner: inner.clone(), 125 | head: 0, 126 | tail: 0, 127 | }; 128 | 129 | let c = Consumer { 130 | inner, 131 | head: 0, 132 | tail: 0, 133 | }; 134 | 135 | (p, c) 136 | } 137 | 138 | /// The producer side of a bounded single-producer single-consumer queue. 139 | pub struct Producer { 140 | /// The inner representation of the queue. 141 | inner: Arc>, 142 | 143 | /// A copy of `inner.head` for quick access. 144 | /// 145 | /// This value can be stale and sometimes needs to be resynchronized with `inner.head`. 146 | head: usize, 147 | 148 | /// A copy of `inner.tail` for quick access. 149 | /// 150 | /// This value is always in sync with `inner.tail`. 151 | tail: usize, 152 | } 153 | 154 | unsafe impl Send for Producer {} 155 | 156 | impl Producer { 157 | /// Attempts to push an element into the queue. 158 | /// 159 | /// If the queue is full, the element is returned back as an error. 160 | pub fn push(&mut self, value: T) -> Result<(), PushError> { 161 | let mut head = self.head; 162 | let mut tail = self.tail; 163 | 164 | // Check if the queue is *possibly* full. 165 | if self.inner.distance(head, tail) == self.inner.cap { 166 | // We need to refresh the head and check again if the queue is *really* full. 167 | head = self.inner.head.load(Ordering::Acquire); 168 | self.head = head; 169 | 170 | // Is the queue *really* full? 171 | if self.inner.distance(head, tail) == self.inner.cap { 172 | return Err(PushError(value)); 173 | } 174 | } 175 | 176 | // Write the value into the tail slot. 177 | unsafe { 178 | self.inner.slot(tail).write(value); 179 | } 180 | 181 | // Move the tail one slot forward. 182 | tail = self.inner.increment(tail); 183 | self.inner.tail.store(tail, Ordering::Release); 184 | self.tail = tail; 185 | 186 | Ok(()) 187 | } 188 | 189 | // /// Returns the capacity of the queue. 190 | // pub fn capacity(&self) -> usize { 191 | // self.inner.cap 192 | // } 193 | } 194 | 195 | impl fmt::Debug for Producer { 196 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 197 | f.pad("Producer { .. }") 198 | } 199 | } 200 | 201 | /// The consumer side of a bounded single-producer single-consumer queue. 202 | pub struct Consumer { 203 | /// The inner representation of the queue. 204 | inner: Arc>, 205 | 206 | /// A copy of `inner.head` for quick access. 207 | /// 208 | /// This value is always in sync with `inner.head`. 209 | head: usize, 210 | 211 | /// A copy of `inner.tail` for quick access. 212 | /// 213 | /// This value can be stale and sometimes needs to be resynchronized with `inner.tail`. 214 | tail: usize, 215 | } 216 | 217 | unsafe impl Send for Consumer {} 218 | 219 | impl Consumer { 220 | /// Attempts to pop an element from the queue. 221 | /// 222 | /// If the queue is empty, an error is returned. 223 | pub fn pop(&mut self) -> Result { 224 | let mut head = self.head; 225 | let mut tail = self.tail; 226 | 227 | // Check if the queue is *possibly* empty. 228 | if head == tail { 229 | // We need to refresh the tail and check again if the queue is *really* empty. 230 | tail = self.inner.tail.load(Ordering::Acquire); 231 | self.tail = tail; 232 | 233 | // Is the queue *really* empty? 234 | if head == tail { 235 | return Err(PopError); 236 | } 237 | } 238 | 239 | // Read the value from the head slot. 240 | let value = unsafe { self.inner.slot(head).read() }; 241 | 242 | // Move the head one slot forward. 243 | head = self.inner.increment(head); 244 | self.inner.head.store(head, Ordering::Release); 245 | self.head = head; 246 | 247 | Ok(value) 248 | } 249 | 250 | // /// Returns the capacity of the queue. 251 | // pub fn capacity(&self) -> usize { 252 | // self.inner.cap 253 | // } 254 | } 255 | 256 | impl fmt::Debug for Consumer { 257 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 258 | f.pad("Consumer { .. }") 259 | } 260 | } 261 | 262 | /// Error which occurs when popping from an empty queue. 263 | #[derive(Clone, Copy, Eq, PartialEq)] 264 | pub struct PopError; 265 | 266 | impl fmt::Debug for PopError { 267 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 268 | "PopError".fmt(f) 269 | } 270 | } 271 | 272 | impl fmt::Display for PopError { 273 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 274 | "popping from an empty queue".fmt(f) 275 | } 276 | } 277 | 278 | /// Error which occurs when pushing into a full queue. 279 | #[derive(Clone, Copy, Eq, PartialEq)] 280 | pub struct PushError(pub T); 281 | 282 | impl fmt::Debug for PushError { 283 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 284 | "PushError(..)".fmt(f) 285 | } 286 | } 287 | 288 | impl fmt::Display for PushError { 289 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 290 | "pushing into a full queue".fmt(f) 291 | } 292 | } 293 | -------------------------------------------------------------------------------- /core/src/worker.rs: -------------------------------------------------------------------------------- 1 | use super::api::API; 2 | use super::strategy::{Context, Strategy}; 3 | use super::util::{ 4 | channel::{Receiver, Sender}, 5 | pin_to_core::{self, CoreId}, 6 | }; 7 | 8 | pub struct Worker 9 | where 10 | S: Strategy, 11 | A: API, 12 | { 13 | strategy: S, 14 | sender: Sender, 15 | receiver: Receiver, 16 | } 17 | 18 | impl Worker 19 | where 20 | S: Strategy + 'static, 21 | A: API + 'static, 22 | { 23 | pub(super) fn new( 24 | strategy: S, 25 | sender: Sender, 26 | receiver: Receiver, 27 | ) -> Self { 28 | Self { 29 | strategy, 30 | sender, 31 | receiver, 32 | } 33 | } 34 | 35 | #[inline] 36 | pub(super) fn run_in_core(self, id: Option) { 37 | std::thread::spawn(move || { 38 | pin_to_core::pin_to_core(id); 39 | 40 | #[cfg(feature = "async")] 41 | { 42 | crate::util::async_runtime::StdRuntime::new().block_on(self.run()) 43 | } 44 | 45 | #[cfg(not(feature = "async"))] 46 | { 47 | self.run() 48 | } 49 | }); 50 | } 51 | 52 | #[cfg(not(feature = "async"))] 53 | #[inline(always)] 54 | pub(super) fn run(self) { 55 | let Self { 56 | mut strategy, 57 | sender, 58 | mut receiver, 59 | } = self; 60 | 61 | let ctx = &mut Context::new(sender); 62 | 63 | strategy.on_start(ctx); 64 | 65 | loop { 66 | if let Ok(msg) = receiver.recv() { 67 | strategy.call(msg, ctx); 68 | } 69 | strategy.on_idle(ctx); 70 | } 71 | } 72 | 73 | #[cfg(feature = "async")] 74 | #[inline(always)] 75 | pub(super) async fn run(self) { 76 | let Self { 77 | mut strategy, 78 | sender, 79 | mut receiver, 80 | } = self; 81 | 82 | let ctx = &mut Context::new(sender); 83 | 84 | strategy.on_start(ctx); 85 | 86 | loop { 87 | if let Ok(msg) = receiver.recv().await { 88 | strategy.call(msg, ctx); 89 | } 90 | strategy.on_idle(ctx); 91 | } 92 | } 93 | } 94 | -------------------------------------------------------------------------------- /examples/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "examples" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | [[example]] 7 | name = "websocket" 8 | path = "./src/websocket.rs" 9 | required-features = ["flashfunk-core/async"] 10 | 11 | [[example]] 12 | name = "log" 13 | path = "./src/log.rs" 14 | 15 | [dependencies] 16 | flashfunk-core = { version = "0.6" } 17 | owned-log = { version = "0.1" } 18 | 19 | crossbeam-queue = "0.3.5" 20 | core_affinity = "0.8" 21 | futures-util = { version = "0.3.30", features = ["sink"] } 22 | mimalloc = { version = "0.1.28", default-features = false } 23 | tokio = { version = "1.13", features = ["macros", "rt"] } 24 | xitca-client = { version = "0.1", features = ["rustls", "websocket"] } 25 | -------------------------------------------------------------------------------- /examples/src/log.rs: -------------------------------------------------------------------------------- 1 | #[global_allocator] 2 | static GLOBAL: mimalloc::MiMalloc = mimalloc::MiMalloc; 3 | 4 | use std::sync::{ 5 | atomic::{AtomicBool, Ordering}, 6 | Arc, 7 | }; 8 | 9 | use core_affinity::{set_for_current, CoreId}; 10 | use crossbeam_queue::ArrayQueue; 11 | use owned_log::{OwnedLog, Value}; 12 | 13 | fn main() { 14 | MyLogger::with_handler( 15 | |mut value| { 16 | let value = value.downcast_mut::().unwrap(); 17 | println!("value is {:?}", value.0); 18 | value.display(); 19 | }, 20 | Some(1), 21 | ); 22 | 23 | struct MyValue(usize); 24 | 25 | impl Value for MyValue { 26 | fn display(&mut self) { 27 | println!("are we slow??"); 28 | } 29 | } 30 | 31 | for _ in 0..99 { 32 | owned_log::log!(MyValue(1)); 33 | } 34 | 35 | let flag = Arc::new(AtomicBool::new(false)); 36 | 37 | let flag1 = flag.clone(); 38 | let handle = std::thread::spawn(move || { 39 | for _ in 0..8 { 40 | std::thread::sleep(std::time::Duration::from_millis(500)); 41 | flag1.store(true, Ordering::Relaxed); 42 | } 43 | }); 44 | 45 | let mut total = 0u128; 46 | let mut time = 0; 47 | 48 | while time < 8 { 49 | if flag.swap(false, Ordering::SeqCst) { 50 | let value = MyValue(2); 51 | let now = std::time::Instant::now(); 52 | owned_log::log!(value); 53 | total += now.elapsed().as_nanos(); 54 | time += 1; 55 | } 56 | } 57 | 58 | println!("average time is {:?} ns", total / 8); 59 | 60 | handle.join().unwrap(); 61 | } 62 | 63 | struct MyLogger(Arc>>); 64 | 65 | impl OwnedLog for MyLogger { 66 | fn log(&self, value: Box) { 67 | self.0.push(value).ok().unwrap(); 68 | } 69 | } 70 | 71 | impl MyLogger { 72 | pub fn with_handler(mut func: F, id: Option) 73 | where 74 | F: FnMut(Box) -> () + Send + 'static, 75 | { 76 | let queue = Arc::new(ArrayQueue::new(256)); 77 | 78 | owned_log::OWNED_LOGGER 79 | .set(Arc::new(MyLogger(queue.clone())) as _) 80 | .ok() 81 | .unwrap(); 82 | 83 | std::thread::spawn(move || { 84 | set_for_current(CoreId { 85 | id: id.unwrap_or(0), 86 | }); 87 | loop { 88 | match queue.pop() { 89 | Some(msg) => func(msg), 90 | None => std::thread::sleep(std::time::Duration::from_millis(100)), 91 | } 92 | } 93 | }); 94 | } 95 | } 96 | -------------------------------------------------------------------------------- /examples/src/websocket.rs: -------------------------------------------------------------------------------- 1 | //! An example async API for driving strategy with websocket message. 2 | 3 | use std::io; 4 | 5 | use flashfunk_core::{ 6 | api::API, 7 | strategy::{Context, Strategy}, 8 | util::{ 9 | channel::{GroupReceiver, GroupSender}, 10 | fx_hasher::FxHasher, 11 | }, 12 | }; 13 | use futures_util::{SinkExt, StreamExt}; 14 | use xitca_client::{bytes::Bytes, error::Error, http::Version, ws::Message, Client}; 15 | 16 | struct WsAPI; 17 | 18 | struct StrategyMessage(String); 19 | 20 | impl API for WsAPI { 21 | type Symbol = &'static str; 22 | type Hasher = FxHasher; 23 | type SndMessage = Bytes; 24 | type RecvMessage = StrategyMessage; 25 | 26 | fn run( 27 | self, 28 | mut sender: GroupSender, 29 | mut receiver: GroupReceiver, 30 | ) { 31 | let res = tokio::runtime::Builder::new_current_thread() 32 | .enable_all() 33 | .build() 34 | .unwrap() 35 | .block_on(async move { 36 | let client = Client::builder() 37 | .rustls() 38 | .set_max_http_version(Version::HTTP_11) 39 | .set_pool_capacity(8) 40 | .finish(); 41 | 42 | let mut ws = client.ws("wss://ws.kraken.com/").send().await?; 43 | 44 | let msg = ws.next().await.unwrap()?; 45 | println!("Connected: {:?}", msg); 46 | 47 | ws.send(Message::Text(Bytes::from("{\"event\":\"subscribe\", \"subscription\":{\"name\":\"ticker\"}, \"pair\":[\"BTC/USD\"]}"))).await?; 48 | 49 | loop { 50 | tokio::select! { 51 | res = ws.next() => { 52 | let msg = res.ok_or(io::Error::from(io::ErrorKind::UnexpectedEof))??; 53 | match msg { 54 | Message::Text(bytes) | Message::Binary(bytes) => sender.send_all(bytes), 55 | Message::Ping(bytes) => ws.send(Message::Pong(bytes)).await?, 56 | Message::Close(reason) => { 57 | ws.send(Message::Close(reason)).await?; 58 | return Ok::<_, Error>(()) 59 | }, 60 | _ => {} 61 | } 62 | } 63 | res = receiver.recv() => { 64 | let msg = res.unwrap(); 65 | println!("Message from WsStrategy: {}", msg.0); 66 | } 67 | } 68 | } 69 | }); 70 | 71 | if let Err(e) = res { 72 | println!("API exit with error: {:?}", e); 73 | } 74 | } 75 | } 76 | 77 | struct WsStrategy { 78 | symbols: [&'static str; 1], 79 | } 80 | 81 | impl Strategy for WsStrategy { 82 | fn symbol(&self) -> &[&'static str] { 83 | self.symbols.as_slice() 84 | } 85 | 86 | fn call(&mut self, msg: Bytes, ctx: &mut Context) { 87 | println!("Message from WsAPI: {}\r\n", String::from_utf8_lossy(&msg)); 88 | ctx.sender().send(StrategyMessage( 89 | self.symbol() 90 | .first() 91 | .map(|s| s.to_string()) 92 | .unwrap_or_else(String::new), 93 | )) 94 | } 95 | } 96 | 97 | fn main() { 98 | let st = WsStrategy { 99 | symbols: ["da_gong_ren"], 100 | }; 101 | 102 | WsAPI 103 | .into_builder([st]) 104 | .disable_pin_to_core() 105 | .message_capacity(128) 106 | .build(); 107 | } 108 | -------------------------------------------------------------------------------- /log/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "owned-log" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | [features] 7 | default = [] 8 | 9 | [dependencies] 10 | once_cell = "1.10.0" 11 | -------------------------------------------------------------------------------- /log/src/lib.rs: -------------------------------------------------------------------------------- 1 | mod no_op; 2 | mod value; 3 | 4 | use std::sync::Arc; 5 | 6 | use once_cell::sync::OnceCell; 7 | 8 | pub use crate::value::Value; 9 | 10 | /// Trait for customizable logger that take in ownership of the loggable value. 11 | pub trait OwnedLog: Send + Sync + 'static { 12 | fn log(&self, value: Box); 13 | } 14 | 15 | /// Global static instance of logger object. 16 | pub static OWNED_LOGGER: OnceCell> = OnceCell::new(); 17 | 18 | #[macro_export] 19 | macro_rules! log { 20 | ($value: expr) => { 21 | ::owned_log::__private::OWNED_LOGGER_LOCAL.with(|logger| logger.log(Box::new($value))); 22 | }; 23 | } 24 | 25 | // private public module to hide api call from exported macro. 26 | #[doc(hidden)] 27 | pub mod __private { 28 | use super::*; 29 | 30 | thread_local! { 31 | /// A thread local cache of global static [OWNED_LOGGER]. 32 | pub static OWNED_LOGGER_LOCAL: Arc = { 33 | OWNED_LOGGER.get_or_init(|| Arc::new(crate::no_op::NoOpLogger)).clone() 34 | }; 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /log/src/no_op.rs: -------------------------------------------------------------------------------- 1 | use crate::{OwnedLog, Value}; 2 | 3 | // default logger that do nothing. 4 | pub struct NoOpLogger; 5 | 6 | impl OwnedLog for NoOpLogger { 7 | fn log(&self, _: Box) {} 8 | } 9 | -------------------------------------------------------------------------------- /log/src/value.rs: -------------------------------------------------------------------------------- 1 | use std::any::TypeId; 2 | 3 | /// Trait of objectify loggable value. 4 | pub trait Value: Send + sealed::GetTypeId + 'static { 5 | fn display(&mut self); 6 | } 7 | 8 | #[doc(hidden)] 9 | mod sealed { 10 | use super::*; 11 | 12 | pub trait GetTypeId: 'static { 13 | fn get_type_id(&self) -> TypeId { 14 | TypeId::of::() 15 | } 16 | } 17 | 18 | impl GetTypeId for T {} 19 | } 20 | 21 | impl dyn Value { 22 | /// Downcast a dyn Value trait object to immutable reference of concrete type. 23 | pub fn downcast_ref(&self) -> Option<&T> { 24 | if sealed::GetTypeId::get_type_id(self) == TypeId::of::() { 25 | // SAFETY: 26 | // This is safe as cast only happen when TypeId is the same. 27 | unsafe { Some(&*(self as *const dyn Value as *const T)) } 28 | } else { 29 | None 30 | } 31 | } 32 | 33 | /// Downcast a dyn Value trait object to mutable reference of concrete type. 34 | pub fn downcast_mut(&mut self) -> Option<&mut T> { 35 | if sealed::GetTypeId::get_type_id(self) == TypeId::of::() { 36 | // SAFETY: 37 | // This is safe as cast only happen when TypeId is the same. 38 | unsafe { Some(&mut *(self as *mut dyn Value as *mut T)) } 39 | } else { 40 | None 41 | } 42 | } 43 | } 44 | 45 | #[cfg(test)] 46 | mod tests { 47 | use super::*; 48 | 49 | #[test] 50 | fn test_any_casting() { 51 | struct MyValue; 52 | 53 | impl Value for MyValue { 54 | fn display(&mut self) { 55 | todo!() 56 | } 57 | } 58 | 59 | let mut value = Box::new(MyValue) as Box; 60 | 61 | value.downcast_mut::().unwrap(); 62 | } 63 | } 64 | -------------------------------------------------------------------------------- /readme.md: -------------------------------------------------------------------------------- 1 |

7 | --------------------------------------------------------------------------------