├── .gitignore ├── server ├── src │ ├── servers │ │ ├── mod.rs │ │ └── websocket_server.rs │ ├── listeners │ │ ├── mod.rs │ │ ├── order_book │ │ │ ├── state.rs │ │ │ ├── utils.rs │ │ │ └── mod.rs │ │ └── directory.rs │ ├── prelude.rs │ ├── lib.rs │ ├── types │ │ ├── node_data.rs │ │ ├── mod.rs │ │ ├── inner.rs │ │ └── subscription.rs │ └── order_book │ │ ├── types.rs │ │ ├── levels.rs │ │ ├── linked_list.rs │ │ ├── mod.rs │ │ └── multi_book.rs └── Cargo.toml ├── rustfmt.toml ├── binaries ├── Cargo.toml └── src │ └── bin │ ├── websocket_server.rs │ └── example_client.rs ├── README.md └── Cargo.toml /.gitignore: -------------------------------------------------------------------------------- 1 | target/ 2 | -------------------------------------------------------------------------------- /server/src/servers/mod.rs: -------------------------------------------------------------------------------- 1 | pub(crate) mod websocket_server; 2 | -------------------------------------------------------------------------------- /server/src/listeners/mod.rs: -------------------------------------------------------------------------------- 1 | pub(crate) mod directory; 2 | pub(crate) mod order_book; 3 | -------------------------------------------------------------------------------- /rustfmt.toml: -------------------------------------------------------------------------------- 1 | imports_granularity = "Crate" 2 | use_small_heuristics = "Max" 3 | max_width = 120 4 | group_imports = "StdExternalCrate" 5 | -------------------------------------------------------------------------------- /server/src/prelude.rs: -------------------------------------------------------------------------------- 1 | pub(crate) type Error = Box; 2 | pub type Result = std::result::Result; 3 | pub(crate) use std::{fmt::Debug, fs, io}; 4 | -------------------------------------------------------------------------------- /server/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![cfg_attr(test, allow(clippy::unwrap_used, clippy::expect_used))] 2 | mod listeners; 3 | mod order_book; 4 | mod prelude; 5 | mod servers; 6 | mod types; 7 | 8 | pub use prelude::Result; 9 | pub use servers::websocket_server::run_websocket_server; 10 | 11 | pub const HL_NODE: &str = "hl-node"; 12 | -------------------------------------------------------------------------------- /binaries/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "binaries" 3 | version = "0.1.0" 4 | edition = "2024" 5 | publish = false 6 | 7 | [dependencies] 8 | server = { path = "../server" } 9 | env_logger = "0.11.8" 10 | tokio = { version = "1", features = ["full"] } 11 | futures-util = "0.3.31" 12 | tokio-tungstenite = "0.27.0" 13 | clap = { version = "4.5.42", features = ["derive"] } 14 | 15 | [lints] 16 | workspace = true 17 | -------------------------------------------------------------------------------- /server/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "server" 3 | version = "0.1.0" 4 | edition = "2024" 5 | 6 | [dependencies] 7 | chrono = { version = "0.4", features = ["serde", "clock"] } 8 | axum = { version = "0.8.4" } 9 | tokio = { version = "1", features = ["full"] } 10 | log = "0.4" 11 | notify = "8.0.0" 12 | slab = "0.4" 13 | itertools = "0.14.0" 14 | serde = { version = "1.0", features = ["derive"] } 15 | serde_json = "1.0" 16 | futures-util = "0.3.31" 17 | rayon = "1.10" 18 | alloy = "1.0.22" 19 | strum_macros = "0.27.2" 20 | reqwest = "0.12.22" 21 | yawc = { version = "0.2.6", features = ["axum"] } 22 | 23 | [lints] 24 | workspace = true 25 | 26 | [dev-dependencies] 27 | rand = "0.9.1" 28 | -------------------------------------------------------------------------------- /binaries/src/bin/websocket_server.rs: -------------------------------------------------------------------------------- 1 | #![allow(unused_crate_dependencies)] 2 | use std::net::Ipv4Addr; 3 | 4 | use clap::Parser; 5 | use server::{Result, run_websocket_server}; 6 | 7 | #[derive(Debug, Parser)] 8 | #[command(author, version, about)] 9 | struct Args { 10 | /// Server address (e.g., 0.0.0.0) 11 | #[arg(long)] 12 | address: Ipv4Addr, 13 | 14 | /// Server port (e.g., 8000) 15 | #[arg(long)] 16 | port: u16, 17 | 18 | /// Compression level for WebSocket connections. 19 | /// Accepts values in the range `0..=9`. 20 | /// * `0` – compression disabled. 21 | /// * `1` – fastest compression, low compression ratio (default). 22 | /// * `9` – slowest compression, highest compression ratio. 23 | /// 24 | /// The level is passed to `flate2::Compression::new(level)`; see the 25 | /// documentation for for more info. 26 | #[arg(long)] 27 | websocket_compression_level: Option, 28 | } 29 | 30 | #[tokio::main] 31 | async fn main() -> Result<()> { 32 | env_logger::init(); 33 | 34 | let args = Args::parse(); 35 | 36 | let full_address = format!("{}:{}", args.address, args.port); 37 | println!("Running websocket server on {full_address}"); 38 | 39 | let compression_level = args.websocket_compression_level.unwrap_or(/* Some compression */ 1); 40 | run_websocket_server(&full_address, true, compression_level).await?; 41 | 42 | Ok(()) 43 | } 44 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Local WebSocket Server 2 | 3 | ## Disclaimer 4 | 5 | This was a standalone project, not written by the Hyperliquid Labs core team. It is made available "as is", without warranty of any kind, express or implied, including but not limited to warranties of merchantability, fitness for a particular purpose, or noninfringement. Use at your own risk. It is intended for educational or illustrative purposes only and may be incomplete, insecure, or incompatible with future systems. No commitment is made to maintain, update, or fix any issues in this repository. 6 | 7 | ## Functionality 8 | 9 | This server provides the `l2book` and `trades` endpoints from [Hyperliquid’s official API](https://hyperliquid.gitbook.io/hyperliquid-docs/for-developers/api/websocket/subscriptions), with roughly the same API. 10 | 11 | - The `l2book` subscription now includes an optional field: 12 | `n_levels`, which can be up to `100` and defaults to `20`. 13 | - This server also introduces a new endpoint: `l4book`. 14 | 15 | The `l4book` subscription first sends a snapshot of the entire book and then forwards order diffs by block. The subscription format is: 16 | 17 | ```json 18 | { 19 | "method": "subscribe", 20 | "subscription": { 21 | "type": "l4Book", 22 | "coin": "" 23 | } 24 | } 25 | ``` 26 | 27 | ## Setup 28 | 29 | 1. Run a non-validating node (from [`hyperliquid-dex/node`](https://github.com/hyperliquid-dex/node)). Requires batching by block. Requires recording fills, order statuses, and raw book diffs. 30 | 31 | 2. Then run this local server: 32 | 33 | ```bash 34 | cargo run --release --bin websocket_server -- --address 0.0.0.0 --port 8000 35 | ``` 36 | 37 | If this local server does not detect the node writing down any new events, it will automatically exit after some amount of time (currently set to 5 seconds). 38 | In addition, the local server periodically fetches order book snapshots from the node, and compares to its own internal state. If a difference is detected, it will exit. 39 | 40 | If you want logging, prepend the command with `RUST_LOG=info`. 41 | 42 | The WebSocket server comes with compression built-in. The compression ratio can be tuned using the `--websocket-compression-level` flag. 43 | 44 | ## Caveats 45 | 46 | - This server does **not** show untriggered trigger orders. 47 | - It currently **does not** support spot order books. 48 | - The current implementation batches node outputs by block, making the order book a few milliseconds slower than a streaming implementation. 49 | -------------------------------------------------------------------------------- /server/src/types/node_data.rs: -------------------------------------------------------------------------------- 1 | use std::path::{Path, PathBuf}; 2 | 3 | use alloy::primitives::Address; 4 | use chrono::NaiveDateTime; 5 | use serde::{Deserialize, Serialize}; 6 | 7 | use crate::{ 8 | order_book::{Coin, Oid}, 9 | types::{Fill, L4Order, OrderDiff}, 10 | }; 11 | 12 | #[derive(Debug, Clone, Serialize, Deserialize)] 13 | pub(crate) struct NodeDataOrderDiff { 14 | user: Address, 15 | oid: u64, 16 | px: String, 17 | coin: String, 18 | pub(crate) raw_book_diff: OrderDiff, 19 | } 20 | 21 | impl NodeDataOrderDiff { 22 | pub(crate) fn diff(&self) -> OrderDiff { 23 | self.raw_book_diff.clone() 24 | } 25 | pub(crate) const fn oid(&self) -> Oid { 26 | Oid::new(self.oid) 27 | } 28 | 29 | pub(crate) fn coin(&self) -> Coin { 30 | Coin::new(&self.coin) 31 | } 32 | } 33 | 34 | #[derive(Debug, Clone, Serialize, Deserialize)] 35 | pub(crate) struct NodeDataFill(pub Address, pub Fill); 36 | 37 | #[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] 38 | pub(crate) struct NodeDataOrderStatus { 39 | pub time: NaiveDateTime, 40 | pub user: Address, 41 | pub status: String, 42 | pub order: L4Order, 43 | } 44 | 45 | impl NodeDataOrderStatus { 46 | pub(crate) fn is_inserted_into_book(&self) -> bool { 47 | (self.status == "open" && !self.order.is_trigger && (self.order.tif != Some("Ioc".to_string()))) 48 | || (self.order.is_trigger && self.status == "triggered") 49 | } 50 | } 51 | 52 | #[derive(Clone, Copy, strum_macros::Display)] 53 | pub(crate) enum EventSource { 54 | Fills, 55 | OrderStatuses, 56 | OrderDiffs, 57 | } 58 | 59 | impl EventSource { 60 | #[must_use] 61 | pub(crate) fn event_source_dir(self, dir: &Path) -> PathBuf { 62 | match self { 63 | Self::Fills => dir.join("hl/data/node_fills_by_block"), 64 | Self::OrderStatuses => dir.join("hl/data/node_order_statuses_by_block"), 65 | Self::OrderDiffs => dir.join("hl/data/node_raw_book_diffs_by_block"), 66 | } 67 | } 68 | } 69 | 70 | #[derive(Debug, Clone, Serialize, Deserialize)] 71 | pub(crate) struct Batch { 72 | local_time: NaiveDateTime, 73 | block_time: NaiveDateTime, 74 | block_number: u64, 75 | events: Vec, 76 | } 77 | 78 | impl Batch { 79 | #[allow(clippy::unwrap_used)] 80 | pub(crate) fn block_time(&self) -> u64 { 81 | self.block_time.and_utc().timestamp_millis().try_into().unwrap() 82 | } 83 | 84 | pub(crate) const fn block_number(&self) -> u64 { 85 | self.block_number 86 | } 87 | 88 | pub(crate) fn events(self) -> Vec { 89 | self.events 90 | } 91 | } 92 | -------------------------------------------------------------------------------- /binaries/src/bin/example_client.rs: -------------------------------------------------------------------------------- 1 | #![allow(unused_crate_dependencies)] 2 | use clap::{Parser, ValueEnum}; 3 | use futures_util::{SinkExt, StreamExt}; 4 | use server::Result; 5 | use tokio_tungstenite::{connect_async, tungstenite::Message}; 6 | 7 | #[derive(Debug, Clone, ValueEnum)] 8 | enum Subscription { 9 | L2Book, 10 | L4Book, 11 | Trades, 12 | } 13 | 14 | #[derive(Debug, Parser)] 15 | #[command(author, version, about)] 16 | struct Args { 17 | /// WebSocket server address (e.g., 127.0.0.1) 18 | #[arg(long)] 19 | address: String, 20 | 21 | /// WebSocket server port (e.g., 8000) 22 | #[arg(long)] 23 | port: u16, 24 | 25 | /// Subscription type: l2-book, l4-book, trades 26 | #[arg(long, value_enum, default_value_t = Subscription::L2Book)] 27 | subscription: Subscription, 28 | } 29 | 30 | #[tokio::main] 31 | async fn main() -> Result<()> { 32 | let args = Args::parse(); 33 | 34 | let url = format!("ws://{}:{}/ws", args.address, args.port); 35 | println!("Connecting to {url}"); 36 | 37 | let (ws_stream, _) = connect_async(url).await?; 38 | println!("Connected!"); 39 | 40 | let (mut write, mut read) = ws_stream.split(); 41 | 42 | // Define subscription messages 43 | let l2_book_sub = 44 | r#"{"method":"subscribe","subscription":{"type":"l2Book","coin":"BTC","nSigFigs":5,"mantissa":5}}"#; 45 | let l4_book_sub = r#"{"method":"subscribe","subscription":{"type":"l4Book","coin":"BTC"}}"#; 46 | let trades_sub = r#"{"method":"subscribe","subscription":{"type":"trades","coin":"BTC"}}"#; 47 | 48 | // Choose subscription 49 | match args.subscription { 50 | Subscription::L2Book => write.send(Message::Text(l2_book_sub.into())).await?, 51 | Subscription::L4Book => write.send(Message::Text(l4_book_sub.into())).await?, 52 | Subscription::Trades => write.send(Message::Text(trades_sub.into())).await?, 53 | } 54 | 55 | let mut msg_cnt = 0; 56 | while let Some(msg) = read.next().await { 57 | match msg { 58 | Ok(Message::Text(txt)) => println!("Received text {msg_cnt}: {txt}"), 59 | Ok(Message::Binary(bin)) => println!("Received binary: {bin:?}"), 60 | Ok(Message::Ping(_)) => println!("Received ping"), 61 | Ok(Message::Pong(_)) => println!("Received pong"), 62 | Ok(Message::Close(frame)) => { 63 | println!("Received close: {frame:?}"); 64 | break; 65 | } 66 | Ok(other) => println!("Received other message: {other:?}"), 67 | Err(err) => { 68 | eprintln!("WebSocket error: {err}"); 69 | break; 70 | } 71 | } 72 | msg_cnt += 1; 73 | } 74 | 75 | println!("Connection closed"); 76 | Ok(()) 77 | } 78 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [workspace] 2 | members = ["server", "binaries"] 3 | 4 | resolver = "3" 5 | 6 | [workspace.lints.clippy] 7 | correctness = { level = "warn", priority = -1 } 8 | suspicious = { level = "warn", priority = -1 } 9 | complexity = { level = "warn", priority = -1 } 10 | perf = { level = "warn", priority = -1 } 11 | style = { level = "warn", priority = -1 } 12 | pedantic = { level = "warn", priority = -1 } 13 | nursery = { level = "warn", priority = -1 } 14 | cargo = { level = "warn", priority = -1 } 15 | unwrap_used = { level = "warn", priority = -1 } 16 | expect_used = { level = "warn", priority = -1 } 17 | similar_names = { level = "warn", priority = -1 } 18 | cast_precision_loss = { level = "allow" } 19 | missing_panics_doc = { level = "allow" } 20 | module_name_repetitions = { level = "allow" } 21 | missing_errors_doc = { level = "allow" } 22 | needless_pass_by_value = { level = "allow" } 23 | enum_glob_use = { level = "allow" } 24 | float_cmp = { level = "allow" } 25 | iter_without_into_iter = { level = "allow" } 26 | into_iter_without_iter = { level = "allow" } 27 | derive_partial_eq_without_eq = { level = "allow" } 28 | fallible_impl_from = { level = "allow" } 29 | new_without_default = { level = "allow" } 30 | manual_range_contains = { level = "allow" } 31 | module_inception = { level = "allow" } 32 | cognitive_complexity = { level = "allow" } 33 | redundant_pub_crate = { level = "allow" } 34 | cargo_common_metadata = { level = "allow" } 35 | multiple_crate_versions = { level = "allow" } 36 | struct_field_names = { level = "allow" } 37 | struct_excessive_bools = { level = "allow" } 38 | 39 | [workspace.lints.rust] 40 | absolute_paths_not_starting_with_crate = { level = "warn" } 41 | explicit_outlives_requirements = { level = "warn" } 42 | ffi_unwind_calls = { level = "warn" } 43 | keyword-idents = { level = "warn" } 44 | keyword_idents_2018 = { level = "warn" } 45 | keyword_idents_2024 = { level = "warn" } 46 | let_underscore_drop = { level = "warn" } 47 | macro_use_extern_crate = { level = "warn" } 48 | missing_abi = { level = "warn" } 49 | non_ascii_idents = { level = "warn" } 50 | non_local_definitions = { level = "warn" } 51 | redundant_lifetimes = { level = "warn" } 52 | rust_2021_incompatible_closure_captures = { level = "warn" } 53 | rust_2021_incompatible_or_patterns = { level = "warn" } 54 | rust_2021_prefixes_incompatible_syntax = { level = "warn" } 55 | rust_2021_prelude_collisions = { level = "warn" } 56 | single_use_lifetimes = { level = "warn" } 57 | trivial_casts = { level = "warn" } 58 | trivial_numeric_casts = { level = "warn" } 59 | unit_bindings = { level = "warn" } 60 | unreachable_pub = { level = "warn" } 61 | unsafe_code = { level = "warn" } 62 | unsafe_op_in_unsafe_fn = { level = "warn" } 63 | unstable_features = { level = "warn" } 64 | unused_crate_dependencies = { level = "warn" } 65 | unused_extern_crates = { level = "warn" } 66 | unused_lifetimes = { level = "warn" } 67 | unused_macro_rules = { level = "warn" } 68 | unused_qualifications = { level = "warn" } 69 | variant_size_differences = { level = "warn" } 70 | 71 | -------------------------------------------------------------------------------- /server/src/order_book/types.rs: -------------------------------------------------------------------------------- 1 | use crate::prelude::*; 2 | use serde::{Deserialize, Serialize}; 3 | use std::fmt::{Debug, Formatter}; 4 | use std::ops::Add; 5 | 6 | #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] 7 | pub(crate) enum Side { 8 | #[serde(rename = "A")] 9 | Ask, 10 | #[serde(rename = "B")] 11 | Bid, 12 | } 13 | 14 | #[derive(Debug, Clone, PartialEq, Eq, Hash)] 15 | pub(crate) struct Oid(u64); 16 | 17 | #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] 18 | pub(crate) struct Px(u64); 19 | 20 | #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] 21 | pub(crate) struct Sz(u64); 22 | 23 | #[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] 24 | pub(crate) struct Coin(String); 25 | 26 | impl Sz { 27 | pub(crate) const fn new(value: u64) -> Self { 28 | Self(value) 29 | } 30 | pub(super) const fn is_positive(self) -> bool { 31 | self.0 > 0 32 | } 33 | pub(super) const fn is_zero(self) -> bool { 34 | self.0 == 0 35 | } 36 | pub(crate) const fn value(self) -> u64 { 37 | self.0 38 | } 39 | pub(crate) const fn decrement_sz(&mut self, dec: u64) { 40 | self.0 = self.0.saturating_sub(dec); 41 | } 42 | } 43 | 44 | impl Px { 45 | pub(crate) const fn new(value: u64) -> Self { 46 | Self(value) 47 | } 48 | pub(crate) const fn value(self) -> u64 { 49 | self.0 50 | } 51 | } 52 | 53 | impl Oid { 54 | pub(crate) const fn new(value: u64) -> Self { 55 | Self(value) 56 | } 57 | } 58 | 59 | pub(crate) trait InnerOrder: Clone { 60 | fn coin(&self) -> Coin; 61 | fn oid(&self) -> Oid; 62 | fn side(&self) -> Side; 63 | fn limit_px(&self) -> Px; 64 | fn sz(&self) -> Sz; 65 | fn decrement_sz(&mut self, dec: Sz); 66 | fn fill(&mut self, maker_order: &mut Self) -> Sz; 67 | fn modify_sz(&mut self, sz: Sz); 68 | fn convert_trigger(&mut self, ts: u64); 69 | } 70 | 71 | impl Coin { 72 | pub(crate) fn new(coin: &str) -> Self { 73 | Self(coin.to_string()) 74 | } 75 | 76 | pub(crate) fn value(&self) -> String { 77 | self.0.clone() 78 | } 79 | 80 | pub(crate) fn is_spot(&self) -> bool { 81 | self.0.starts_with('@') || self.0 == "PURR/USDC" 82 | } 83 | } 84 | 85 | impl Add for Sz { 86 | type Output = Self; 87 | 88 | fn add(self, rhs: Self) -> Self { 89 | Self(self.0 + rhs.0) 90 | } 91 | } 92 | 93 | // Multiply all sizes and prices by 10^MAX_DECIMALS for ease of computation. 94 | const MULTIPLIER: f64 = 100_000_000.0; 95 | 96 | impl Debug for Px { 97 | fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { 98 | write!(f, "{}", (self.value() as f64 / MULTIPLIER)) 99 | } 100 | } 101 | 102 | impl Debug for Sz { 103 | fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { 104 | write!(f, "{}", (self.value() as f64 / MULTIPLIER)) 105 | } 106 | } 107 | 108 | impl Px { 109 | #[allow(clippy::cast_possible_truncation)] 110 | #[allow(clippy::cast_sign_loss)] 111 | pub(crate) fn parse_from_str(value: &str) -> Result { 112 | let value = (value.parse::()? * MULTIPLIER).round() as u64; 113 | Ok(Self::new(value)) 114 | } 115 | 116 | #[must_use] 117 | pub(crate) fn to_str(self) -> String { 118 | let s = format!("{:.8}", (self.value() as f64) / MULTIPLIER); 119 | let s = s.trim_end_matches('0'); 120 | s.trim_end_matches('.').to_string() 121 | } 122 | 123 | #[allow(clippy::cast_possible_truncation)] 124 | #[allow(clippy::cast_sign_loss)] 125 | pub(crate) fn num_digits(self) -> u32 { 126 | if self.value() == 0 { 1 } else { (self.value() as f64).log10().floor() as u32 + 1 } 127 | } 128 | } 129 | 130 | impl Sz { 131 | #[allow(clippy::cast_possible_truncation)] 132 | #[allow(clippy::cast_sign_loss)] 133 | pub(crate) fn parse_from_str(value: &str) -> Result { 134 | let value = (value.parse::()? * MULTIPLIER).round() as u64; 135 | Ok(Self::new(value)) 136 | } 137 | 138 | #[must_use] 139 | pub(crate) fn to_str(self) -> String { 140 | let s = format!("{:.8}", (self.value() as f64) / MULTIPLIER); 141 | let s = s.trim_end_matches('0'); 142 | s.trim_end_matches('.').to_string() 143 | } 144 | } 145 | -------------------------------------------------------------------------------- /server/src/types/mod.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashMap; 2 | 3 | use alloy::primitives::Address; 4 | use serde::{Deserialize, Serialize}; 5 | 6 | use crate::{ 7 | order_book::types::Side, 8 | types::node_data::{NodeDataFill, NodeDataOrderDiff, NodeDataOrderStatus}, 9 | }; 10 | 11 | pub(crate) mod inner; 12 | pub(crate) mod node_data; 13 | pub(crate) mod subscription; 14 | 15 | #[derive(Debug, Serialize, Deserialize)] 16 | pub(crate) struct Trade { 17 | pub coin: String, 18 | side: Side, 19 | px: String, 20 | sz: String, 21 | hash: String, 22 | time: u64, 23 | tid: u64, 24 | users: [Address; 2], 25 | } 26 | 27 | #[derive(Debug, Eq, PartialEq, Serialize, Deserialize)] 28 | pub(crate) struct Level { 29 | px: String, 30 | sz: String, 31 | n: usize, 32 | } 33 | 34 | impl Level { 35 | pub(crate) const fn new(px: String, sz: String, n: usize) -> Self { 36 | Self { px, sz, n } 37 | } 38 | } 39 | 40 | #[derive(Debug, Serialize, Deserialize)] 41 | pub(crate) struct L2Book { 42 | coin: String, 43 | time: u64, 44 | levels: [Vec; 2], 45 | } 46 | 47 | #[derive(Debug, Serialize, Deserialize)] 48 | pub(crate) enum L4Book { 49 | Snapshot { coin: String, time: u64, height: u64, levels: [Vec; 2] }, 50 | Updates(L4BookUpdates), 51 | } 52 | 53 | impl L2Book { 54 | pub(crate) const fn from_l2_snapshot(coin: String, snapshot: [Vec; 2], time: u64) -> Self { 55 | Self { coin, time, levels: snapshot } 56 | } 57 | } 58 | 59 | impl Trade { 60 | #[allow(clippy::unwrap_used)] 61 | pub(crate) fn from_fills(mut fills: HashMap) -> Self { 62 | let NodeDataFill(seller, ask_fill) = fills.remove(&Side::Ask).unwrap(); 63 | let NodeDataFill(buyer, bid_fill) = fills.remove(&Side::Bid).unwrap(); 64 | let ask_is_taker = ask_fill.crossed; 65 | let side = if ask_is_taker { Side::Ask } else { Side::Bid }; 66 | let coin = ask_fill.coin.clone(); 67 | assert_eq!(coin, bid_fill.coin); 68 | let tid = ask_fill.tid; 69 | assert_eq!(tid, bid_fill.tid); 70 | let px = ask_fill.px; 71 | let sz = ask_fill.sz; 72 | let hash = ask_fill.hash; 73 | let time = ask_fill.time; 74 | let users = [buyer, seller]; 75 | Self { coin, side, px, sz, hash, time, tid, users } 76 | } 77 | } 78 | 79 | #[derive(Debug, Serialize, Deserialize)] 80 | pub(crate) struct L4BookUpdates { 81 | pub time: u64, 82 | pub height: u64, 83 | pub order_statuses: Vec, 84 | pub book_diffs: Vec, 85 | } 86 | 87 | impl L4BookUpdates { 88 | pub(crate) const fn new(time: u64, height: u64) -> Self { 89 | Self { time, height, order_statuses: Vec::new(), book_diffs: Vec::new() } 90 | } 91 | } 92 | 93 | // RawL4Order is the version of a L4Order we want to serialize and deserialize directly 94 | #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] 95 | #[serde(rename_all = "camelCase")] 96 | pub(crate) struct L4Order { 97 | // when serializing, this field is found outside of this struct 98 | // when deserializing, we move it into this struct 99 | pub user: Option
, 100 | pub coin: String, 101 | pub side: Side, 102 | pub limit_px: String, 103 | pub sz: String, 104 | pub oid: u64, 105 | pub timestamp: u64, 106 | pub trigger_condition: String, 107 | pub is_trigger: bool, 108 | pub trigger_px: String, 109 | pub is_position_tpsl: bool, 110 | pub reduce_only: bool, 111 | pub order_type: String, 112 | pub tif: Option, 113 | pub cloid: Option, 114 | } 115 | 116 | #[derive(Debug, Clone, Serialize, Deserialize)] 117 | #[serde(rename_all = "camelCase")] 118 | pub(crate) enum OrderDiff { 119 | #[serde(rename_all = "camelCase")] 120 | New { 121 | sz: String, 122 | }, 123 | #[serde(rename_all = "camelCase")] 124 | Update { 125 | orig_sz: String, 126 | new_sz: String, 127 | }, 128 | Remove, 129 | } 130 | 131 | #[derive(Debug, Clone, Serialize, Deserialize)] 132 | #[serde(rename_all = "camelCase")] 133 | pub(crate) struct Fill { 134 | pub coin: String, 135 | pub px: String, 136 | pub sz: String, 137 | pub side: Side, 138 | pub time: u64, 139 | pub start_position: String, 140 | pub dir: String, 141 | pub closed_pnl: String, 142 | pub hash: String, 143 | pub oid: u64, 144 | pub crossed: bool, 145 | pub fee: String, 146 | pub tid: u64, 147 | pub fee_token: String, 148 | pub liquidation: Option, 149 | } 150 | 151 | #[derive(Debug, Clone, Serialize, Deserialize)] 152 | #[serde(rename_all = "camelCase")] 153 | pub(crate) struct Liquidation { 154 | liquidated_user: String, 155 | mark_px: String, 156 | method: String, 157 | } 158 | -------------------------------------------------------------------------------- /server/src/order_book/levels.rs: -------------------------------------------------------------------------------- 1 | use crate::order_book::{InnerOrder, Oid, OrderBook, Px, Side, Snapshot, Sz, linked_list::LinkedList}; 2 | use crate::types::Level; 3 | use crate::types::inner::InnerLevel; 4 | use std::collections::BTreeMap; 5 | 6 | #[must_use] 7 | fn bucket(px: Px, side: Side, n_sig_figs: Option, mantissa: Option) -> Px { 8 | let m = mantissa.unwrap_or(1); 9 | n_sig_figs.map_or(px, |n| { 10 | let digs = px.num_digits(); 11 | let p = digs.saturating_sub(n); 12 | let inc = m * 10u64.pow(p); 13 | match side { 14 | Side::Ask => Px::new(px.value().div_ceil(inc) * inc), 15 | Side::Bid => Px::new((px.value() / inc) * inc), 16 | } 17 | }) 18 | } 19 | 20 | impl OrderBook { 21 | #[must_use] 22 | pub(crate) fn to_l2_snapshot( 23 | &self, 24 | n_levels: Option, 25 | n_sig_figs: Option, 26 | mantissa: Option, 27 | ) -> Snapshot { 28 | let bids = &self.bids; 29 | let asks = &self.asks; 30 | let bids = map_to_l2_levels(bids, Side::Bid, n_levels, n_sig_figs, mantissa); 31 | let asks = map_to_l2_levels(asks, Side::Ask, n_levels, n_sig_figs, mantissa); 32 | Snapshot([bids, asks]) 33 | } 34 | } 35 | 36 | impl Snapshot { 37 | #[must_use] 38 | pub(crate) fn to_l2_snapshot( 39 | &self, 40 | n_levels: Option, 41 | n_sig_figs: Option, 42 | mantissa: Option, 43 | ) -> Self { 44 | let [bids, asks] = &self.0; 45 | let bids = l2_levels_to_l2_levels(bids, Side::Bid, n_levels, n_sig_figs, mantissa); 46 | let asks = l2_levels_to_l2_levels(asks, Side::Ask, n_levels, n_sig_figs, mantissa); 47 | Self([bids, asks]) 48 | } 49 | 50 | pub(crate) fn export_inner_snapshot(self) -> [Vec; 2] { 51 | self.0.map(|b| b.into_iter().map(Level::from).collect()) 52 | } 53 | } 54 | 55 | #[must_use] 56 | fn l2_levels_to_l2_levels( 57 | levels: &[InnerLevel], 58 | side: Side, 59 | n_levels: Option, 60 | n_sig_figs: Option, 61 | mantissa: Option, 62 | ) -> Vec { 63 | let mut new_levels = Vec::new(); 64 | if n_levels == Some(0) { 65 | return new_levels; 66 | } 67 | let mut cur_level: Option = None; 68 | for level in levels { 69 | if build_l2_level(&mut cur_level, &mut new_levels, n_levels, n_sig_figs, mantissa, side, level.clone()) { 70 | break; 71 | } 72 | } 73 | new_levels.extend(cur_level.take()); 74 | new_levels 75 | } 76 | 77 | #[must_use] 78 | fn map_to_l2_levels( 79 | orders: &BTreeMap>, 80 | side: Side, 81 | n_levels: Option, 82 | n_sig_figs: Option, 83 | mantissa: Option, 84 | ) -> Vec { 85 | let mut levels = Vec::new(); 86 | if n_levels == Some(0) { 87 | return levels; 88 | } 89 | let mut cur_level: Option = None; 90 | let order_iter: Box)>> = match side { 91 | Side::Ask => Box::new(orders.iter()), 92 | Side::Bid => Box::new(orders.iter().rev()), 93 | }; 94 | for (px, orders) in order_iter { 95 | // could be done a bit more efficiently using caching 96 | let sz = orders.fold(Sz::new(0), |sz, order| *sz = *sz + order.sz()); 97 | let n = orders.fold(0, |n, _| *n += 1); 98 | if build_l2_level( 99 | &mut cur_level, 100 | &mut levels, 101 | n_levels, 102 | n_sig_figs, 103 | mantissa, 104 | side, 105 | InnerLevel { px: *px, sz, n }, 106 | ) { 107 | break; 108 | } 109 | } 110 | levels.extend(cur_level.take()); 111 | levels 112 | } 113 | 114 | pub(super) fn build_l2_level( 115 | cur_level: &mut Option, 116 | levels: &mut Vec, 117 | n_levels: Option, 118 | n_sig_figs: Option, 119 | mantissa: Option, 120 | side: Side, 121 | level: InnerLevel, 122 | ) -> bool { 123 | let new_bucket = cur_level.as_ref().is_none_or(|c| match side { 124 | Side::Ask => level.px.value() > c.px.value(), 125 | Side::Bid => level.px.value() < c.px.value(), 126 | }); 127 | if new_bucket { 128 | let bucket = bucket(level.px, side, n_sig_figs, mantissa); 129 | levels.extend(cur_level.take()); 130 | if n_levels == Some(levels.len()) { 131 | return true; 132 | } 133 | *cur_level = Some(InnerLevel { px: bucket, sz: level.sz, n: level.n }); 134 | } else if let Some(c) = cur_level.as_mut() { 135 | c.sz = level.sz + c.sz; 136 | c.n += level.n; 137 | } 138 | false 139 | } 140 | -------------------------------------------------------------------------------- /server/src/listeners/order_book/state.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | listeners::order_book::{L2Snapshots, TimedSnapshots, utils::compute_l2_snapshots}, 3 | order_book::{ 4 | Coin, InnerOrder, Oid, 5 | multi_book::{OrderBooks, Snapshots}, 6 | }, 7 | prelude::*, 8 | types::{ 9 | inner::{InnerL4Order, InnerOrderDiff}, 10 | node_data::{Batch, NodeDataOrderDiff, NodeDataOrderStatus}, 11 | }, 12 | }; 13 | use std::collections::{HashMap, HashSet, VecDeque}; 14 | 15 | #[derive(Clone)] 16 | pub(super) struct OrderBookState { 17 | order_book: OrderBooks, 18 | height: u64, 19 | time: u64, 20 | snapped: bool, 21 | ignore_spot: bool, 22 | } 23 | 24 | impl OrderBookState { 25 | pub(super) fn from_snapshot( 26 | snapshot: Snapshots, 27 | height: u64, 28 | time: u64, 29 | ignore_triggers: bool, 30 | ignore_spot: bool, 31 | ) -> Self { 32 | Self { 33 | ignore_spot, 34 | time, 35 | height, 36 | order_book: OrderBooks::from_snapshots(snapshot, ignore_triggers), 37 | snapped: false, 38 | } 39 | } 40 | 41 | pub(super) const fn height(&self) -> u64 { 42 | self.height 43 | } 44 | 45 | // forcibly take snapshot - (time, height, snapshot) 46 | pub(super) fn compute_snapshot(&self) -> TimedSnapshots { 47 | TimedSnapshots { time: self.time, height: self.height, snapshot: self.order_book.to_snapshots_par() } 48 | } 49 | 50 | // (time, snapshot) 51 | pub(super) fn l2_snapshots(&mut self, prevent_future_snaps: bool) -> Option<(u64, L2Snapshots)> { 52 | if self.snapped { 53 | None 54 | } else { 55 | self.snapped = prevent_future_snaps || self.snapped; 56 | Some((self.time, compute_l2_snapshots(&self.order_book))) 57 | } 58 | } 59 | 60 | pub(super) fn compute_universe(&self) -> HashSet { 61 | self.order_book.as_ref().keys().cloned().collect() 62 | } 63 | 64 | pub(super) fn apply_updates( 65 | &mut self, 66 | order_statuses: Batch, 67 | order_diffs: Batch, 68 | ) -> Result<()> { 69 | let height = order_statuses.block_number(); 70 | let time = order_statuses.block_time(); 71 | assert_eq!(order_statuses.block_number(), order_diffs.block_number()); 72 | if height > self.height + 1 { 73 | return Err(format!("Expecting block {}, got block {}", self.height + 1, height).into()); 74 | } else if height <= self.height { 75 | // This is not an error in case we started caching long before a snapshot is fetched 76 | return Ok(()); 77 | } 78 | let mut diffs = order_diffs.events().into_iter().collect::>(); 79 | let mut order_map = order_statuses 80 | .events() 81 | .into_iter() 82 | .filter_map(|order_status| { 83 | if order_status.is_inserted_into_book() { 84 | Some((Oid::new(order_status.order.oid), order_status)) 85 | } else { 86 | None 87 | } 88 | }) 89 | .collect::>(); 90 | while let Some(diff) = diffs.pop_front() { 91 | let oid = diff.oid(); 92 | let coin = diff.coin(); 93 | if coin.is_spot() && self.ignore_spot { 94 | continue; 95 | } 96 | let inner_diff = diff.diff().try_into()?; 97 | match inner_diff { 98 | InnerOrderDiff::New { sz } => { 99 | if let Some(order) = order_map.remove(&oid) { 100 | let time = order.time.and_utc().timestamp_millis(); 101 | let mut inner_order: InnerL4Order = order.try_into()?; 102 | inner_order.modify_sz(sz); 103 | // must replace time with time of entering book, which is the timestamp of the order status update 104 | #[allow(clippy::unwrap_used)] 105 | inner_order.convert_trigger(time.try_into().unwrap()); 106 | self.order_book.add_order(inner_order); 107 | } else { 108 | return Err(format!("Unable to find order opening status {diff:?}").into()); 109 | } 110 | } 111 | InnerOrderDiff::Update { new_sz, .. } => { 112 | if !self.order_book.modify_sz(oid, coin, new_sz) { 113 | return Err(format!("Unable to find order on the book {diff:?}").into()); 114 | } 115 | } 116 | InnerOrderDiff::Remove => { 117 | if !self.order_book.cancel_order(oid, coin) { 118 | return Err(format!("Unable to find order on the book {diff:?}").into()); 119 | } 120 | } 121 | } 122 | } 123 | self.height += 1; 124 | self.time = time; 125 | self.snapped = false; 126 | Ok(()) 127 | } 128 | } 129 | -------------------------------------------------------------------------------- /server/src/types/inner.rs: -------------------------------------------------------------------------------- 1 | use alloy::primitives::Address; 2 | 3 | use super::Level; 4 | use crate::{ 5 | order_book::{ 6 | Oid, 7 | types::{Coin, InnerOrder, Px, Side, Sz}, 8 | }, 9 | prelude::*, 10 | types::{L4Order, OrderDiff, node_data::NodeDataOrderStatus}, 11 | }; 12 | 13 | // L4Order: the struct we keep in the orderbook (computationally better) 14 | #[derive(Debug, Clone, PartialEq, Eq)] 15 | pub(crate) struct InnerL4Order { 16 | pub user: Address, 17 | pub coin: Coin, 18 | pub side: Side, 19 | pub limit_px: Px, 20 | pub sz: Sz, 21 | pub oid: u64, 22 | pub timestamp: u64, 23 | pub trigger_condition: String, 24 | pub is_trigger: bool, 25 | pub trigger_px: String, 26 | pub is_position_tpsl: bool, 27 | pub reduce_only: bool, 28 | pub order_type: String, 29 | pub tif: Option, 30 | pub cloid: Option, 31 | } 32 | 33 | impl InnerOrder for InnerL4Order { 34 | fn oid(&self) -> Oid { 35 | Oid::new(self.oid) 36 | } 37 | 38 | fn side(&self) -> Side { 39 | self.side 40 | } 41 | 42 | fn limit_px(&self) -> Px { 43 | self.limit_px 44 | } 45 | 46 | fn sz(&self) -> Sz { 47 | self.sz 48 | } 49 | 50 | fn decrement_sz(&mut self, dec: Sz) { 51 | self.sz.decrement_sz(dec.value()); 52 | } 53 | 54 | fn modify_sz(&mut self, sz: Sz) { 55 | self.sz = sz; 56 | } 57 | 58 | fn fill(&mut self, maker_order: &mut Self) -> Sz { 59 | let match_sz = self.sz().min(maker_order.sz()); 60 | self.decrement_sz(match_sz); 61 | maker_order.decrement_sz(match_sz); 62 | match_sz 63 | } 64 | 65 | fn convert_trigger(&mut self, ts: u64) { 66 | if self.is_trigger { 67 | self.trigger_px = "0.0".to_string(); 68 | self.trigger_condition = "Triggered".to_string(); 69 | self.is_trigger = false; 70 | self.timestamp = ts; 71 | self.tif = Some("Gtc".to_string()); 72 | } 73 | } 74 | 75 | fn coin(&self) -> Coin { 76 | self.coin.clone() 77 | } 78 | } 79 | 80 | impl TryFrom<(Address, L4Order)> for InnerL4Order { 81 | type Error = Error; 82 | 83 | fn try_from(value: (Address, L4Order)) -> Result { 84 | let L4Order { 85 | coin, 86 | side, 87 | limit_px, 88 | sz, 89 | oid, 90 | timestamp, 91 | trigger_condition, 92 | is_trigger, 93 | trigger_px, 94 | is_position_tpsl, 95 | reduce_only, 96 | order_type, 97 | tif, 98 | cloid, 99 | .. 100 | } = value.1; 101 | let user = value.0; 102 | let limit_px = Px::parse_from_str(&limit_px)?; 103 | let sz = Sz::parse_from_str(&sz)?; 104 | Ok(Self { 105 | user, 106 | coin: Coin::new(&coin), 107 | side, 108 | limit_px, 109 | sz, 110 | oid, 111 | timestamp, 112 | trigger_condition, 113 | is_trigger, 114 | trigger_px, 115 | is_position_tpsl, 116 | reduce_only, 117 | order_type, 118 | tif, 119 | cloid, 120 | }) 121 | } 122 | } 123 | 124 | impl From for L4Order { 125 | fn from(value: InnerL4Order) -> Self { 126 | let InnerL4Order { 127 | user, 128 | coin, 129 | side, 130 | limit_px, 131 | sz, 132 | oid, 133 | timestamp, 134 | trigger_condition, 135 | is_trigger, 136 | trigger_px, 137 | is_position_tpsl, 138 | reduce_only, 139 | order_type, 140 | tif, 141 | cloid, 142 | } = value; 143 | let limit_px = limit_px.to_str(); 144 | let sz = sz.to_str(); 145 | Self { 146 | user: Some(user), 147 | coin: coin.value(), 148 | side, 149 | limit_px, 150 | sz, 151 | oid, 152 | timestamp, 153 | trigger_condition, 154 | is_trigger, 155 | trigger_px, 156 | is_position_tpsl, 157 | reduce_only, 158 | order_type, 159 | tif, 160 | cloid, 161 | } 162 | } 163 | } 164 | 165 | impl TryFrom for InnerL4Order { 166 | type Error = Error; 167 | 168 | fn try_from(value: NodeDataOrderStatus) -> Result { 169 | (value.user, value.order).try_into() 170 | } 171 | } 172 | 173 | #[derive(Debug, Clone)] 174 | pub(crate) struct InnerLevel { 175 | pub px: Px, 176 | pub sz: Sz, 177 | pub n: usize, 178 | } 179 | 180 | impl From for Level { 181 | fn from(value: InnerLevel) -> Self { 182 | Self::new(value.px.to_str(), value.sz.to_str(), value.n) 183 | } 184 | } 185 | 186 | #[derive(Debug, Clone)] 187 | pub(crate) enum InnerOrderDiff { 188 | New { 189 | sz: Sz, 190 | }, 191 | #[allow(dead_code)] 192 | Update { 193 | orig_sz: Sz, 194 | new_sz: Sz, 195 | }, 196 | Remove, 197 | } 198 | 199 | impl TryFrom for InnerOrderDiff { 200 | type Error = Error; 201 | 202 | fn try_from(value: OrderDiff) -> Result { 203 | Ok(match value { 204 | OrderDiff::New { sz } => Self::New { sz: Sz::parse_from_str(&sz)? }, 205 | OrderDiff::Update { orig_sz, new_sz } => { 206 | Self::Update { orig_sz: Sz::parse_from_str(&orig_sz)?, new_sz: Sz::parse_from_str(&new_sz)? } 207 | } 208 | OrderDiff::Remove => Self::Remove, 209 | }) 210 | } 211 | } 212 | -------------------------------------------------------------------------------- /server/src/listeners/order_book/utils.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | listeners::order_book::{L2SnapshotParams, L2Snapshots}, 3 | order_book::{ 4 | Snapshot, 5 | multi_book::{OrderBooks, Snapshots}, 6 | types::InnerOrder, 7 | }, 8 | prelude::*, 9 | types::{ 10 | inner::InnerLevel, 11 | node_data::{Batch, NodeDataFill, NodeDataOrderDiff, NodeDataOrderStatus}, 12 | }, 13 | }; 14 | use rayon::iter::{IntoParallelRefIterator, ParallelIterator}; 15 | use reqwest::Client; 16 | use serde_json::json; 17 | use std::collections::VecDeque; 18 | use std::{ 19 | collections::HashMap, 20 | path::{Path, PathBuf}, 21 | }; 22 | 23 | pub(super) async fn process_rmp_file(dir: &Path) -> Result { 24 | let output_path = dir.join("out.json"); 25 | let payload = json!({ 26 | "type": "fileSnapshot", 27 | "request": { 28 | "type": "l4Snapshots", 29 | "includeUsers": true, 30 | "includeTriggerOrders": false 31 | }, 32 | "outPath": output_path, 33 | "includeHeightInOutput": true 34 | }); 35 | 36 | let client = Client::new(); 37 | client 38 | .post("http://localhost:3001/info") 39 | .header("Content-Type", "application/json") 40 | .json(&payload) 41 | .send() 42 | .await? 43 | .error_for_status()?; 44 | Ok(output_path) 45 | } 46 | 47 | pub(super) fn validate_snapshot_consistency( 48 | snapshot: &Snapshots, 49 | expected: Snapshots, 50 | ignore_spot: bool, 51 | ) -> Result<()> { 52 | let mut snapshot_map: HashMap<_, _> = 53 | expected.value().into_iter().filter(|(c, _)| !c.is_spot() || !ignore_spot).collect(); 54 | 55 | for (coin, book) in snapshot.as_ref() { 56 | if ignore_spot && coin.is_spot() { 57 | continue; 58 | } 59 | let book1 = book.as_ref(); 60 | if let Some(book2) = snapshot_map.remove(coin) { 61 | for (orders1, orders2) in book1.as_ref().iter().zip(book2.as_ref()) { 62 | for (order1, order2) in orders1.iter().zip(orders2.iter()) { 63 | if *order1 != *order2 { 64 | return Err( 65 | format!("Orders do not match, expected: {:?} received: {:?}", *order2, *order1).into() 66 | ); 67 | } 68 | } 69 | } 70 | } else if !book1[0].is_empty() || !book1[1].is_empty() { 71 | return Err(format!("Missing {} book", coin.value()).into()); 72 | } 73 | } 74 | if !snapshot_map.is_empty() { 75 | return Err("Extra orderbooks detected".to_string().into()); 76 | } 77 | Ok(()) 78 | } 79 | 80 | impl L2SnapshotParams { 81 | pub(crate) const fn new(n_sig_figs: Option, mantissa: Option) -> Self { 82 | Self { n_sig_figs, mantissa } 83 | } 84 | } 85 | 86 | pub(super) fn compute_l2_snapshots(order_books: &OrderBooks) -> L2Snapshots { 87 | L2Snapshots( 88 | order_books 89 | .as_ref() 90 | .par_iter() 91 | .map(|(coin, order_book)| { 92 | let mut entries = Vec::new(); 93 | let snapshot = order_book.to_l2_snapshot(None, None, None); 94 | entries.push((L2SnapshotParams { n_sig_figs: None, mantissa: None }, snapshot)); 95 | let mut add_new_snapshot = |n_sig_figs: Option, mantissa: Option, idx: usize| { 96 | if let Some((_, last_snapshot)) = &entries.get(entries.len() - idx) { 97 | let snapshot = last_snapshot.to_l2_snapshot(None, n_sig_figs, mantissa); 98 | entries.push((L2SnapshotParams { n_sig_figs, mantissa }, snapshot)); 99 | } 100 | }; 101 | for n_sig_figs in (2..=5).rev() { 102 | if n_sig_figs == 5 { 103 | for mantissa in [None, Some(2), Some(5)] { 104 | if mantissa == Some(5) { 105 | // Some(2) is NOT a superset of this info! 106 | add_new_snapshot(Some(n_sig_figs), mantissa, 2); 107 | } else { 108 | add_new_snapshot(Some(n_sig_figs), mantissa, 1); 109 | } 110 | } 111 | } else { 112 | add_new_snapshot(Some(n_sig_figs), None, 1); 113 | } 114 | } 115 | (coin.clone(), entries.into_iter().collect::>>()) 116 | }) 117 | .collect(), 118 | ) 119 | } 120 | 121 | pub(super) enum EventBatch { 122 | Orders(Batch), 123 | BookDiffs(Batch), 124 | Fills(Batch), 125 | } 126 | 127 | pub(super) struct BatchQueue { 128 | deque: VecDeque>, 129 | last_ts: Option, 130 | } 131 | 132 | impl BatchQueue { 133 | pub(super) const fn new() -> Self { 134 | Self { deque: VecDeque::new(), last_ts: None } 135 | } 136 | 137 | pub(super) fn push(&mut self, block: Batch) -> bool { 138 | if let Some(last_ts) = self.last_ts { 139 | if last_ts >= block.block_number() { 140 | return false; 141 | } 142 | } 143 | self.last_ts = Some(block.block_number()); 144 | self.deque.push_back(block); 145 | true 146 | } 147 | 148 | pub(super) fn pop_front(&mut self) -> Option> { 149 | self.deque.pop_front() 150 | } 151 | 152 | pub(super) fn front(&self) -> Option<&Batch> { 153 | self.deque.front() 154 | } 155 | } 156 | -------------------------------------------------------------------------------- /server/src/order_book/linked_list.rs: -------------------------------------------------------------------------------- 1 | use crate::prelude::*; 2 | use slab::Slab; 3 | use std::{collections::HashMap, hash::Hash, marker::PhantomData}; 4 | 5 | #[derive(Clone)] 6 | struct Node { 7 | key: K, 8 | value: T, 9 | next: Option, 10 | prev: Option, 11 | } 12 | 13 | impl Node { 14 | pub(crate) const fn new(key: K, value: T) -> Self { 15 | Self { key, value, next: None, prev: None } 16 | } 17 | } 18 | 19 | #[derive(Clone)] 20 | // Implicit assumption is that when we remove a node, it is never used again 21 | pub(crate) struct LinkedList { 22 | key_to_sid: HashMap, 23 | slab: Slab>, 24 | head: Option, 25 | tail: Option, 26 | phantom_data: PhantomData, 27 | } 28 | 29 | impl LinkedList { 30 | #[must_use] 31 | pub(crate) fn new() -> Self { 32 | Self { key_to_sid: HashMap::new(), slab: Slab::new(), head: None, tail: None, phantom_data: PhantomData } 33 | } 34 | 35 | pub(crate) fn push_back(&mut self, key: K, value: T) -> bool { 36 | if self.key_to_sid.contains_key(&key) { 37 | false 38 | } else { 39 | let node = Node::new(key.clone(), value); 40 | let sid = self.slab.insert(node); 41 | self.key_to_sid.insert(key, sid); 42 | match self.tail { 43 | None => { 44 | self.head = Some(sid); 45 | self.tail = Some(sid); 46 | } 47 | Some(t) => { 48 | let tail_order = &mut self.slab[t]; 49 | tail_order.next = Some(sid); 50 | let new_order = &mut self.slab[sid]; 51 | new_order.prev = Some(t); 52 | self.tail = Some(sid); 53 | } 54 | } 55 | true 56 | } 57 | } 58 | 59 | #[must_use] 60 | pub(crate) const fn is_empty(&self) -> bool { 61 | self.head.is_none() 62 | } 63 | 64 | pub(crate) fn head_value_ref_mut_unsafe(&mut self) -> Option<&mut T> { 65 | self.head.as_ref().map(|&h| &mut self.slab[h].value) 66 | } 67 | 68 | pub(crate) fn remove_front(&mut self) -> Result<()> { 69 | if let Some(h) = self.head { 70 | let new_head = { 71 | let head_order = self.slab.remove(h); 72 | self.key_to_sid.remove(&head_order.key); 73 | head_order.next 74 | }; 75 | match new_head { 76 | None => { 77 | self.head = None; 78 | self.tail = None; 79 | } 80 | Some(n) => { 81 | self.head = Some(n); 82 | let new_order = &mut self.slab[n]; 83 | new_order.prev = None; 84 | } 85 | } 86 | Ok(()) 87 | } else { 88 | Err("List is empty".into()) 89 | } 90 | } 91 | 92 | pub(crate) fn remove_node(&mut self, key: K) -> bool { 93 | if let Some((_, sid)) = self.key_to_sid.remove_entry(&key) { 94 | let (prev, next) = { 95 | let order = self.slab.remove(sid); 96 | (order.prev, order.next) 97 | }; 98 | if let Some(p) = prev { 99 | let prev_order = &mut self.slab[p]; 100 | prev_order.next = next; 101 | } else { 102 | self.head = next; 103 | } 104 | if let Some(n) = next { 105 | let next_order = &mut self.slab[n]; 106 | next_order.prev = prev; 107 | } else { 108 | self.tail = prev; 109 | } 110 | true 111 | } else { 112 | false 113 | } 114 | } 115 | 116 | pub(crate) fn node_value_mut(&mut self, key: &K) -> Option<&mut T> { 117 | if let Some(sid) = self.key_to_sid.get(key) { Some(&mut self.slab[*sid].value) } else { None } 118 | } 119 | 120 | #[must_use] 121 | pub(crate) fn to_vec(&self) -> Vec<&T> { 122 | let mut res = Vec::new(); 123 | let mut cur = self.head; 124 | while let Some(c) = cur { 125 | let node = &self.slab[c]; 126 | res.push(&node.value); 127 | cur = node.next; 128 | } 129 | res 130 | } 131 | 132 | pub(crate) fn fold(&self, mut init: Acc, f: F) -> Acc 133 | where 134 | F: Fn(&mut Acc, &T), 135 | { 136 | let mut cur = self.head; 137 | while let Some(c) = cur { 138 | let node = &self.slab[c]; 139 | f(&mut init, &node.value); 140 | cur = node.next; 141 | } 142 | init 143 | } 144 | } 145 | 146 | #[cfg(test)] 147 | mod tests { 148 | use super::*; 149 | use itertools::Itertools; 150 | use std::collections::VecDeque; 151 | 152 | #[must_use] 153 | fn to_rev_vec(list: &LinkedList) -> Vec<&T> { 154 | let mut res = Vec::new(); 155 | let mut cur = list.tail; 156 | while let Some(c) = cur { 157 | let node = &list.slab[c]; 158 | res.push(&node.value); 159 | cur = node.prev; 160 | } 161 | res 162 | } 163 | 164 | #[test] 165 | fn simple_linked_list_test() -> Result<()> { 166 | let mut deque = (0..11).collect::>(); 167 | let mut keys = Vec::new(); 168 | let mut list = LinkedList::new(); 169 | for &elt in &deque { 170 | keys.push(elt); 171 | list.push_back(elt, elt); 172 | } 173 | 174 | assert_vec_deque_list_eq(&deque, &list); 175 | 176 | list.remove_front()?; 177 | deque.pop_front(); 178 | 179 | assert_vec_deque_list_eq(&deque, &list); 180 | 181 | list.remove_front()?; 182 | deque.pop_front(); 183 | 184 | assert_vec_deque_list_eq(&deque, &list); 185 | 186 | list.remove_node(keys[4]); 187 | deque.remove(2); 188 | 189 | assert_vec_deque_list_eq(&deque, &list); 190 | 191 | for _ in 0..5 { 192 | list.remove_front()?; 193 | deque.pop_front(); 194 | assert_vec_deque_list_eq(&deque, &list); 195 | } 196 | 197 | for k in keys.iter().skip(8) { 198 | list.remove_node(*k); 199 | deque.pop_front(); 200 | assert_vec_deque_list_eq(&deque, &list); 201 | } 202 | 203 | assert!(list.is_empty()); 204 | Ok(()) 205 | } 206 | 207 | fn assert_vec_deque_list_eq( 208 | deque: &VecDeque, 209 | list: &LinkedList, 210 | ) { 211 | let evec = deque.iter().cloned().collect_vec(); 212 | for (a, b) in list.to_vec().iter().zip(evec.iter()) { 213 | assert_eq!(**a, *b); 214 | } 215 | let mut rev = evec; 216 | rev.reverse(); 217 | for (a, b) in to_rev_vec(list).iter().zip(rev.iter()) { 218 | assert_eq!(**a, *b); 219 | } 220 | } 221 | } 222 | -------------------------------------------------------------------------------- /server/src/types/subscription.rs: -------------------------------------------------------------------------------- 1 | use crate::types::{L2Book, L4Book, Trade}; 2 | use log::info; 3 | use serde::{Deserialize, Serialize}; 4 | use std::collections::HashSet; 5 | 6 | const MAX_LEVELS: usize = 100; 7 | pub(crate) const DEFAULT_LEVELS: usize = 20; 8 | 9 | #[derive(Debug, Serialize, Deserialize)] 10 | #[serde(tag = "method")] 11 | #[serde(rename_all = "camelCase")] 12 | pub(crate) enum ClientMessage { 13 | Subscribe { subscription: Subscription }, 14 | Unsubscribe { subscription: Subscription }, 15 | } 16 | 17 | #[derive(Debug, Clone, Eq, PartialEq, Hash, Serialize, Deserialize)] 18 | #[serde(tag = "type")] 19 | #[serde(rename_all = "camelCase")] 20 | pub(crate) enum Subscription { 21 | #[serde(rename_all = "camelCase")] 22 | Trades { coin: String }, 23 | #[serde(rename_all = "camelCase")] 24 | L2Book { coin: String, n_sig_figs: Option, n_levels: Option, mantissa: Option }, 25 | #[serde(rename_all = "camelCase")] 26 | L4Book { coin: String }, 27 | } 28 | 29 | impl Subscription { 30 | pub(crate) fn validate(&self, universe: &HashSet) -> bool { 31 | match self { 32 | Self::Trades { coin } => universe.contains(coin), 33 | Self::L2Book { coin, n_sig_figs, n_levels, mantissa } => { 34 | if !universe.contains(coin) || coin.starts_with('@') { 35 | info!("Invalid subscription: coin not found"); 36 | return false; 37 | } 38 | if *n_levels == Some(DEFAULT_LEVELS) { 39 | info!("Invalid subscription: set n_levels to this by using null"); 40 | return false; 41 | } 42 | let n_levels = n_levels.unwrap_or(DEFAULT_LEVELS); 43 | if n_levels > MAX_LEVELS { 44 | info!("Invalid subscription: n_levels too high"); 45 | return false; 46 | } 47 | if let Some(n_sig_figs) = *n_sig_figs { 48 | if !(2..=5).contains(&n_sig_figs) { 49 | info!("Invalid subscription: sig figs aren't set correctly"); 50 | return false; 51 | } 52 | if let Some(m) = *mantissa { 53 | if n_sig_figs < 5 || (m != 5 && m != 2) { 54 | return false; 55 | } 56 | } 57 | } else if mantissa.is_some() { 58 | info!("Invalid subscription: mantissa can not be some if sig figs are not set"); 59 | return false; 60 | } 61 | info!("Valid subscription"); 62 | true 63 | } 64 | Self::L4Book { coin } => { 65 | if !universe.contains(coin) || coin.starts_with('@') { 66 | info!("Invalid subscription: coin not found"); 67 | return false; 68 | } 69 | info!("Valid subscription"); 70 | true 71 | } 72 | } 73 | } 74 | } 75 | 76 | #[derive(Debug, Serialize, Deserialize)] 77 | #[serde(tag = "channel", content = "data")] 78 | #[serde(rename_all = "camelCase")] 79 | pub(crate) enum ServerResponse { 80 | SubscriptionResponse(ClientMessage), 81 | L2Book(L2Book), 82 | L4Book(L4Book), 83 | Trades(Vec), 84 | Error(String), 85 | } 86 | 87 | #[derive(Default)] 88 | pub(crate) struct SubscriptionManager { 89 | subscriptions: HashSet, 90 | } 91 | 92 | impl SubscriptionManager { 93 | pub(crate) fn subscribe(&mut self, sub: Subscription) -> bool { 94 | self.subscriptions.insert(sub) 95 | } 96 | 97 | pub(crate) fn unsubscribe(&mut self, sub: Subscription) -> bool { 98 | self.subscriptions.remove(&sub) 99 | } 100 | 101 | pub(crate) const fn subscriptions(&self) -> &HashSet { 102 | &self.subscriptions 103 | } 104 | } 105 | 106 | #[cfg(test)] 107 | mod test { 108 | use crate::types::subscription::Subscription; 109 | 110 | use super::{ClientMessage, ServerResponse}; 111 | 112 | #[test] 113 | fn test_message_deserialization_subscription_response() { 114 | let message = r#" 115 | {"channel":"subscriptionResponse","data":{"method":"subscribe","subscription":{"type":"l2Book","coin":"BTC","nSigFigs":null,"mantissa":null}}} 116 | "#; 117 | let msg = serde_json::from_str(message).unwrap(); 118 | assert!(matches!(msg, ServerResponse::SubscriptionResponse(_))); 119 | } 120 | 121 | #[test] 122 | fn test_message_deserialization_l2book() { 123 | let message = r#" 124 | {"channel":"l2Book","data":{"coin":"BTC","time":1751427259657,"levels":[[{"px":"106217.0","sz":"0.001","n":1},{"px":"106215.0","sz":"0.001","n":1},{"px":"106213.0","sz":"0.27739","n":1},{"px":"106193.0","sz":"0.49943","n":1},{"px":"106190.0","sz":"0.52899","n":1},{"px":"106162.0","sz":"0.55931","n":1},{"px":"106160.0","sz":"0.55023","n":1},{"px":"106140.0","sz":"0.001","n":1},{"px":"106137.0","sz":"0.001","n":1},{"px":"106131.0","sz":"0.001","n":1},{"px":"106111.0","sz":"0.01094","n":1},{"px":"106085.0","sz":"1.02207","n":2},{"px":"105916.0","sz":"0.001","n":1},{"px":"105913.0","sz":"1.01927","n":2},{"px":"105822.0","sz":"0.00474","n":1},{"px":"105698.0","sz":"0.51012","n":1},{"px":"105696.0","sz":"0.001","n":1},{"px":"105604.0","sz":"0.55072","n":1},{"px":"105579.0","sz":"0.00217","n":1},{"px":"105543.0","sz":"0.0197","n":1}],[{"px":"106233.0","sz":"0.26739","n":3},{"px":"106258.0","sz":"0.001","n":1},{"px":"106270.0","sz":"0.49128","n":2},{"px":"106306.0","sz":"0.27263","n":1},{"px":"106311.0","sz":"0.23837","n":1},{"px":"106350.0","sz":"0.001","n":1},{"px":"106396.0","sz":"0.24733","n":1},{"px":"106414.0","sz":"0.27088","n":1},{"px":"106560.0","sz":"0.0001","n":1},{"px":"106597.0","sz":"0.56981","n":1},{"px":"106637.0","sz":"0.57002","n":1},{"px":"106932.0","sz":"0.001","n":1},{"px":"107012.0","sz":"1.06873","n":2},{"px":"107094.0","sz":"0.0041","n":1},{"px":"107360.0","sz":"0.001","n":1},{"px":"107535.0","sz":"0.002","n":1},{"px":"107638.0","sz":"0.001","n":1},{"px":"107639.0","sz":"0.0007","n":1},{"px":"107650.0","sz":"0.00074","n":1},{"px":"107675.0","sz":"0.00083","n":1}]]}} 125 | "#; 126 | let msg: ServerResponse = serde_json::from_str(message).unwrap(); 127 | assert!(matches!(msg, ServerResponse::L2Book(_))); 128 | } 129 | 130 | #[test] 131 | fn test_message_deserialization_trade() { 132 | let message = r#" 133 | {"channel":"trades","data":[{"coin":"BTC","side":"A","px":"106296.0","sz":"0.00017","time":1751430933565,"hash":"0xde93a8a0729ade63d8840417805ba9010b008818422ddedb1285744426b73503","tid":293353986402527,"users":["0xcc0a3b6e3267c84361e91d8230868eea53431e4b","0xc64cc00b46101bd40aa1c3121195e85c0b0918d8"]}]} 134 | "#; 135 | let msg: ServerResponse = serde_json::from_str(message).unwrap(); 136 | assert!(matches!(msg, ServerResponse::Trades(_))); 137 | } 138 | 139 | #[test] 140 | fn test_client_message_deserialization() { 141 | let message = r#" 142 | { "method": "subscribe", "subscription":{ "type": "l2Book", "coin": "BTC" }} 143 | "#; 144 | let msg: ClientMessage = serde_json::from_str(message).unwrap(); 145 | assert!(matches!( 146 | msg, 147 | ClientMessage::Subscribe { 148 | subscription: Subscription::L2Book { n_sig_figs: None, n_levels: None, mantissa: None, .. }, 149 | } 150 | )); 151 | } 152 | } 153 | -------------------------------------------------------------------------------- /server/src/order_book/mod.rs: -------------------------------------------------------------------------------- 1 | use crate::prelude::*; 2 | use itertools::Itertools; 3 | use linked_list::LinkedList; 4 | use std::collections::{BTreeMap, HashMap, HashSet}; 5 | 6 | pub(crate) mod levels; 7 | mod linked_list; 8 | pub(crate) mod multi_book; 9 | pub(crate) mod types; 10 | 11 | pub(crate) use types::{Coin, InnerOrder, Oid, Px, Side, Sz}; 12 | 13 | #[derive(Clone, Default)] 14 | pub(crate) struct OrderBook { 15 | oid_to_side_px: HashMap, 16 | bids: BTreeMap>, 17 | asks: BTreeMap>, 18 | } 19 | 20 | #[derive(Debug, Clone)] 21 | pub(crate) struct Snapshot([Vec; 2]); 22 | 23 | impl Snapshot { 24 | pub(crate) const fn as_ref(&self) -> &[Vec; 2] { 25 | &self.0 26 | } 27 | 28 | pub(crate) fn truncate(&self, n: usize) -> Self { 29 | Self(self.0.clone().map(|orders| orders.into_iter().take(n).collect_vec())) 30 | } 31 | } 32 | 33 | impl Snapshot { 34 | pub(crate) fn remove_triggers(&mut self) { 35 | #[allow(clippy::unwrap_used)] 36 | let [bid_oids, ask_oids] = &self 37 | .0 38 | .iter() 39 | .map(|orders| orders.iter().map(InnerOrder::oid).collect::>()) 40 | .collect::>() 41 | .try_into() 42 | .unwrap(); 43 | for orders in &mut self.0 { 44 | while let Some(order) = orders.last() { 45 | let oid = order.oid(); 46 | if bid_oids.contains(&oid) && ask_oids.contains(&oid) { 47 | orders.pop(); 48 | } else { 49 | break; 50 | } 51 | } 52 | } 53 | } 54 | } 55 | 56 | impl OrderBook { 57 | #[must_use] 58 | pub(crate) fn new() -> Self { 59 | Self { oid_to_side_px: HashMap::new(), bids: BTreeMap::new(), asks: BTreeMap::new() } 60 | } 61 | 62 | pub(crate) fn add_order(&mut self, mut order: O) { 63 | let (maker_orders, resting_book) = match order.side() { 64 | Side::Ask => (&mut self.bids, &mut self.asks), 65 | Side::Bid => (&mut self.asks, &mut self.bids), 66 | }; 67 | let oids = match_order(maker_orders, &mut order); 68 | for oid in oids { 69 | self.oid_to_side_px.remove(&oid); 70 | } 71 | if order.sz().is_positive() { 72 | self.oid_to_side_px.insert(order.oid(), (order.side(), order.limit_px())); 73 | add_order_to_book(resting_book, order); 74 | } 75 | } 76 | 77 | pub(crate) fn cancel_order(&mut self, oid: Oid) -> bool { 78 | if let Some((side, px)) = self.oid_to_side_px.remove(&oid) { 79 | let map = match side { 80 | Side::Ask => &mut self.asks, 81 | Side::Bid => &mut self.bids, 82 | }; 83 | let list = map.get_mut(&px); 84 | if let Some(list) = list { 85 | let success = list.remove_node(oid.clone()); 86 | if list.is_empty() { 87 | map.remove(&px); 88 | } 89 | return success; 90 | } 91 | } 92 | false 93 | } 94 | 95 | pub(crate) fn modify_sz(&mut self, oid: Oid, sz: Sz) -> bool { 96 | if let Some((side, px)) = self.oid_to_side_px.get(&oid) { 97 | let map = match side { 98 | Side::Ask => &mut self.asks, 99 | Side::Bid => &mut self.bids, 100 | }; 101 | let list = map.get_mut(px); 102 | if let Some(list) = list { 103 | let old_order = list.node_value_mut(&oid); 104 | if let Some(old_order) = old_order { 105 | old_order.modify_sz(sz); 106 | return true; 107 | } 108 | return false; 109 | } 110 | } 111 | false 112 | } 113 | 114 | // we go by the convention that prioritized orders go first in the vector; this makes aggregation step later easier. 115 | pub(crate) fn to_snapshot(&self) -> Snapshot { 116 | let bids = self.bids.iter().rev().flat_map(|(_, l)| l.to_vec().into_iter().cloned()).collect_vec(); 117 | let asks = self.asks.iter().flat_map(|(_, l)| l.to_vec().into_iter().cloned()).collect_vec(); 118 | Snapshot([bids, asks]) 119 | } 120 | 121 | #[must_use] 122 | pub(crate) fn from_snapshot(mut snapshot: Snapshot, ignore_triggers: bool) -> Self { 123 | let mut book = Self::new(); 124 | if ignore_triggers { 125 | snapshot.remove_triggers(); 126 | } 127 | snapshot.0.into_iter().for_each(|orders| { 128 | for order in orders { 129 | book.add_order(order); 130 | } 131 | }); 132 | book 133 | } 134 | } 135 | 136 | fn add_order_to_book(map: &mut BTreeMap>, order: O) { 137 | let oid = order.oid(); 138 | let limit_px = order.limit_px(); 139 | map.entry(limit_px).or_insert_with(|| LinkedList::new()).push_back(oid, order); 140 | } 141 | 142 | fn match_order(maker_orders: &mut BTreeMap>, taker_order: &mut O) -> Vec { 143 | let mut filled_oids = Vec::new(); 144 | let mut keys_to_remove = Vec::new(); 145 | let taker_side = taker_order.side(); 146 | let limit_px = taker_order.limit_px(); 147 | let order_iter: Box)>> = match taker_side { 148 | Side::Ask => Box::new(maker_orders.iter_mut().rev()), 149 | Side::Bid => Box::new(maker_orders.iter_mut()), 150 | }; 151 | for (&px, list) in order_iter { 152 | let matches = match taker_side { 153 | Side::Ask => px >= limit_px, 154 | Side::Bid => px <= limit_px, 155 | }; 156 | if !matches { 157 | break; 158 | } 159 | while let Some(match_order) = list.head_value_ref_mut_unsafe() { 160 | taker_order.fill(match_order); 161 | if match_order.sz().is_zero() { 162 | filled_oids.push(match_order.oid()); 163 | let _unused = list.remove_front(); 164 | } 165 | if taker_order.sz().is_zero() { 166 | break; 167 | } 168 | } 169 | if list.is_empty() { 170 | keys_to_remove.push(px); 171 | } 172 | if taker_order.sz().is_zero() { 173 | break; 174 | } 175 | } 176 | for key in keys_to_remove { 177 | maker_orders.remove(&key); 178 | } 179 | filled_oids 180 | } 181 | 182 | #[cfg(test)] 183 | mod tests { 184 | use crate::order_book::types::{Coin, Sz}; 185 | 186 | use super::*; 187 | use std::collections::BTreeSet; 188 | 189 | #[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] 190 | struct MinimalOrder { 191 | oid: u64, 192 | side: Side, 193 | sz: u64, 194 | limit_px: u64, 195 | } 196 | 197 | impl InnerOrder for MinimalOrder { 198 | fn oid(&self) -> Oid { 199 | Oid::new(self.oid) 200 | } 201 | 202 | fn side(&self) -> Side { 203 | self.side 204 | } 205 | 206 | fn limit_px(&self) -> Px { 207 | Px::new(self.limit_px) 208 | } 209 | 210 | fn sz(&self) -> Sz { 211 | Sz::new(self.sz) 212 | } 213 | 214 | fn decrement_sz(&mut self, dec: Sz) { 215 | self.sz = self.sz.saturating_sub(dec.value()); 216 | } 217 | 218 | fn fill(&mut self, maker_order: &mut Self) -> Sz { 219 | let match_sz = self.sz().min(maker_order.sz()); 220 | maker_order.decrement_sz(match_sz); 221 | self.decrement_sz(match_sz); 222 | match_sz 223 | } 224 | 225 | fn modify_sz(&mut self, sz: Sz) { 226 | self.sz = sz.value(); 227 | } 228 | 229 | fn convert_trigger(&mut self, _: u64) {} 230 | 231 | fn coin(&self) -> Coin { 232 | Coin::new("") 233 | } 234 | } 235 | 236 | impl MinimalOrder { 237 | fn new(oid: u64, sz: u64, limit_px: u64, side: Side) -> Self { 238 | Self { oid, side, sz, limit_px } 239 | } 240 | } 241 | 242 | #[derive(Default)] 243 | struct OrderFactory { 244 | next_oid: u64, 245 | } 246 | 247 | impl OrderFactory { 248 | fn order(&mut self, sz: u64, limit_px: u64, side: Side) -> MinimalOrder { 249 | let order = MinimalOrder::new(self.next_oid, sz, limit_px, side); 250 | self.next_oid += 1; 251 | order 252 | } 253 | 254 | fn batch_order(&mut self, sz: u64, limit_px: u64, side: Side, n: u64) -> Vec { 255 | (0..n).map(|_| self.order(sz, limit_px, side)).collect_vec() 256 | } 257 | } 258 | 259 | #[test] 260 | fn simple_book_test() { 261 | let mut factory = OrderFactory::default(); 262 | let buy_orders1 = factory.batch_order(100, 5, Side::Bid, 3); 263 | let buy_orders2 = factory.batch_order(200, 4, Side::Bid, 4); 264 | let sell_orders1 = factory.batch_order(150, 5, Side::Ask, 2); 265 | let sell_orders2 = factory.batch_order(500, 6, Side::Ask, 2); 266 | let mut book = OrderBook::new(); 267 | for order in buy_orders2.clone() { 268 | book.add_order(order); 269 | } 270 | for order in sell_orders2.clone() { 271 | book.add_order(order); 272 | } 273 | for order in buy_orders1.clone() { 274 | book.add_order(order); 275 | } 276 | book.add_order(sell_orders1[0].clone()); 277 | let mut bids = [buy_orders2, buy_orders1].concat(); 278 | let mut asks = [sell_orders1.clone(), sell_orders2].concat(); 279 | // remove index 4 and alter index 5 (matched) 280 | bids[5].sz -= 50; 281 | bids.remove(4); 282 | // remove index 0 (matched) and 1 (not inserted) 283 | asks.remove(1); 284 | asks.remove(0); 285 | 286 | assert_same_book(Snapshot([bids.clone(), asks.clone()]), book.to_snapshot()); 287 | 288 | assert!(book.cancel_order(Oid::new(3))); 289 | assert!(book.cancel_order(Oid::new(9))); 290 | book.add_order(sell_orders1[1].clone()); 291 | 292 | // index 4 and 5 both get matched, index 0 is canceled (first out of buy_orders2) 293 | bids.remove(5); 294 | bids.remove(4); 295 | bids.remove(0); 296 | 297 | // only thing changing in asks is that index 0 is canceled 298 | asks.remove(0); 299 | 300 | assert_same_book(Snapshot([bids.clone(), asks.clone()]), book.to_snapshot()); 301 | 302 | // test modify size 303 | book.modify_sz(Oid::new(10), Sz::new(450)); 304 | asks[0].sz = 450; 305 | 306 | assert_same_book(Snapshot([bids.clone(), asks.clone()]), book.to_snapshot()); 307 | } 308 | 309 | fn assert_same_book(s1: Snapshot, s2: Snapshot) { 310 | let [b1, a1] = s1.0.map(BTreeSet::from_iter); 311 | let [b2, a2] = s2.0.map(BTreeSet::from_iter); 312 | assert_eq!(b1, b2); 313 | assert_eq!(a1, a2); 314 | } 315 | } 316 | -------------------------------------------------------------------------------- /server/src/order_book/multi_book.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | order_book::{Coin, InnerOrder, Oid, OrderBook, Snapshot, Sz}, 3 | prelude::*, 4 | }; 5 | use rayon::iter::{IntoParallelRefIterator, ParallelIterator}; 6 | use serde::{Deserialize, Serialize}; 7 | use std::{ 8 | collections::{BTreeMap, HashMap}, 9 | path::Path, 10 | }; 11 | use tokio::fs::read_to_string; 12 | 13 | pub(crate) struct Snapshots(HashMap>); 14 | 15 | impl Snapshots { 16 | pub(crate) const fn new(value: HashMap>) -> Self { 17 | Self(value) 18 | } 19 | 20 | pub(crate) const fn as_ref(&self) -> &HashMap> { 21 | &self.0 22 | } 23 | 24 | pub(crate) fn value(self) -> HashMap> { 25 | self.0 26 | } 27 | } 28 | 29 | #[derive(Clone)] 30 | pub(crate) struct OrderBooks { 31 | order_books: BTreeMap>, 32 | } 33 | 34 | impl OrderBooks { 35 | pub(crate) const fn as_ref(&self) -> &BTreeMap> { 36 | &self.order_books 37 | } 38 | #[must_use] 39 | pub(crate) fn from_snapshots(snapshot: Snapshots, ignore_triggers: bool) -> Self { 40 | Self { 41 | order_books: snapshot 42 | .value() 43 | .into_iter() 44 | .map(|(coin, book)| (coin, OrderBook::from_snapshot(book, ignore_triggers))) 45 | .collect(), 46 | } 47 | } 48 | 49 | pub(crate) fn add_order(&mut self, order: O) { 50 | let coin = &order.coin(); 51 | self.order_books.entry(coin.clone()).or_insert_with(OrderBook::new).add_order(order); 52 | } 53 | 54 | pub(crate) fn cancel_order(&mut self, oid: Oid, coin: Coin) -> bool { 55 | self.order_books.get_mut(&coin).is_some_and(|book| book.cancel_order(oid)) 56 | } 57 | 58 | // change size to reflect how much gets matched during the block 59 | pub(crate) fn modify_sz(&mut self, oid: Oid, coin: Coin, sz: Sz) -> bool { 60 | self.order_books.get_mut(&coin).is_some_and(|book| book.modify_sz(oid, sz)) 61 | } 62 | } 63 | 64 | impl OrderBooks { 65 | #[must_use] 66 | pub(crate) fn to_snapshots_par(&self) -> Snapshots { 67 | let snapshots = self.order_books.par_iter().map(|(c, book)| (c.clone(), book.to_snapshot())).collect(); 68 | Snapshots(snapshots) 69 | } 70 | } 71 | 72 | pub(crate) fn load_snapshots_from_str(str: &str) -> Result<(u64, Snapshots)> 73 | where 74 | O: TryFrom, 75 | R: Serialize + for<'a> Deserialize<'a>, 76 | { 77 | #[allow(clippy::type_complexity)] 78 | let (height, snapshot): (u64, Vec<(String, [Vec; 2])>) = serde_json::from_str(str)?; 79 | Ok(( 80 | height, 81 | Snapshots::new( 82 | snapshot 83 | .into_iter() 84 | .map(|(coin, [bids, asks])| { 85 | let bids: Vec = bids.into_iter().map(O::try_from).collect::>>()?; 86 | let asks: Vec = asks.into_iter().map(O::try_from).collect::>>()?; 87 | Ok((Coin::new(&coin), Snapshot([bids, asks]))) 88 | }) 89 | .collect::>>>()?, 90 | ), 91 | )) 92 | } 93 | 94 | pub(crate) async fn load_snapshots_from_json(path: &Path) -> Result<(u64, Snapshots)> 95 | where 96 | O: TryFrom, 97 | R: Serialize + for<'a> Deserialize<'a>, 98 | { 99 | let file_contents = read_to_string(path).await?; 100 | load_snapshots_from_str(&file_contents) 101 | } 102 | 103 | #[cfg(test)] 104 | mod tests { 105 | use crate::{ 106 | order_book::{ 107 | InnerOrder, OrderBook, Px, Side, Snapshot, Sz, 108 | levels::build_l2_level, 109 | multi_book::{Coin, Snapshots, load_snapshots_from_json, load_snapshots_from_str}, 110 | }, 111 | prelude::*, 112 | types::{ 113 | L4Order, Level, 114 | inner::{InnerL4Order, InnerLevel}, 115 | }, 116 | }; 117 | use alloy::primitives::Address; 118 | use itertools::Itertools; 119 | use std::{fs::create_dir_all, path::PathBuf}; 120 | 121 | #[must_use] 122 | fn snapshot_to_l2_snapshot( 123 | snapshot: &Snapshot, 124 | n_levels: Option, 125 | n_sig_figs: Option, 126 | mantissa: Option, 127 | ) -> Snapshot { 128 | let [bids, asks] = &snapshot.0; 129 | let bids = orders_to_l2_levels(bids, Side::Bid, n_levels, n_sig_figs, mantissa); 130 | let asks = orders_to_l2_levels(asks, Side::Ask, n_levels, n_sig_figs, mantissa); 131 | Snapshot([bids, asks]) 132 | } 133 | 134 | #[must_use] 135 | fn orders_to_l2_levels( 136 | orders: &[O], 137 | side: Side, 138 | n_levels: Option, 139 | n_sig_figs: Option, 140 | mantissa: Option, 141 | ) -> Vec { 142 | let mut levels = Vec::new(); 143 | if n_levels == Some(0) { 144 | return levels; 145 | } 146 | let mut cur_level: Option = None; 147 | 148 | for order in orders { 149 | if build_l2_level( 150 | &mut cur_level, 151 | &mut levels, 152 | n_levels, 153 | n_sig_figs, 154 | mantissa, 155 | side, 156 | InnerLevel { px: order.limit_px(), sz: order.sz(), n: 1 }, 157 | ) { 158 | break; 159 | } 160 | } 161 | levels.extend(cur_level.take()); 162 | levels 163 | } 164 | 165 | #[derive(Default)] 166 | struct OrderManager { 167 | next_oid: u64, 168 | } 169 | 170 | fn simple_inner_order(oid: u64, side: Side, sz: String, px: String) -> Result { 171 | let px = Px::parse_from_str(&px)?; 172 | let sz = Sz::parse_from_str(&sz)?; 173 | Ok(InnerL4Order { 174 | user: Address::new([0; 20]), 175 | coin: Coin::new(""), 176 | side, 177 | limit_px: px, 178 | sz, 179 | oid, 180 | timestamp: 0, 181 | trigger_condition: String::new(), 182 | is_trigger: false, 183 | trigger_px: String::new(), 184 | is_position_tpsl: false, 185 | reduce_only: false, 186 | order_type: String::new(), 187 | tif: None, 188 | cloid: None, 189 | }) 190 | } 191 | 192 | impl OrderManager { 193 | fn order(&mut self, sz: &str, limit_px: &str, side: Side) -> Result { 194 | let order = simple_inner_order(self.next_oid, side, sz.to_string(), limit_px.to_string())?; 195 | self.next_oid += 1; 196 | Ok(order) 197 | } 198 | 199 | fn batch_order(&mut self, sz: &str, limit_px: &str, side: Side, mult: u64) -> Result> { 200 | (0..mult).map(|_| self.order(sz, limit_px, side)).try_collect() 201 | } 202 | } 203 | 204 | fn setup_book(book: &mut OrderBook) -> Snapshots { 205 | let mut o = OrderManager::default(); 206 | let buy_orders1 = o.batch_order("100", "34.01", Side::Bid, 4).unwrap(); 207 | let buy_orders2 = o.batch_order("200", "34.5", Side::Bid, 2).unwrap(); 208 | let buy_orders3 = o.batch_order("300", "34.6", Side::Bid, 1).unwrap(); 209 | let sell_orders1 = o.batch_order("100", "35", Side::Ask, 4).unwrap(); 210 | let sell_orders2 = o.batch_order("200", "35.1", Side::Ask, 2).unwrap(); 211 | let sell_orders3 = o.batch_order("300", "35.5", Side::Ask, 1).unwrap(); 212 | for orders in [buy_orders1, buy_orders2, buy_orders3, sell_orders1, sell_orders2, sell_orders3] { 213 | for o in orders { 214 | book.add_order(o); 215 | } 216 | } 217 | Snapshots(vec![(Coin::new(""), book.to_snapshot()); 2].into_iter().collect()) 218 | } 219 | 220 | const SNAPSHOT_JSON: &str = r#"[100, 221 | [ 222 | [ 223 | "@1", 224 | [ 225 | [ 226 | [ 227 | "0x0000000000000000000000000000000000000000", 228 | { 229 | "coin": "@1", 230 | "side": "B", 231 | "limitPx": "30.444", 232 | "sz": "100.0", 233 | "oid": 105338503859, 234 | "timestamp": 1750660644034, 235 | "triggerCondition": "N/A", 236 | "isTrigger": false, 237 | "triggerPx": "0.0", 238 | "children": [], 239 | "isPositionTpsl": false, 240 | "reduceOnly": false, 241 | "orderType": "Limit", 242 | "origSz": "100.0", 243 | "tif": "Alo", 244 | "cloid": null 245 | } 246 | ], 247 | [ 248 | "0x0000000000000000000000000000000000000000", 249 | { 250 | "coin": "@1", 251 | "side": "B", 252 | "limitPx": "30.385", 253 | "sz": "5.45", 254 | "oid": 105337808436, 255 | "timestamp": 1750660453608, 256 | "triggerCondition": "N/A", 257 | "isTrigger": false, 258 | "triggerPx": "0.0", 259 | "children": [], 260 | "isPositionTpsl": false, 261 | "reduceOnly": false, 262 | "orderType": "Limit", 263 | "origSz": "5.45", 264 | "tif": "Gtc", 265 | "cloid": null 266 | } 267 | ] 268 | ], 269 | [] 270 | ] 271 | ] 272 | ] 273 | ]"#; 274 | 275 | #[tokio::test] 276 | async fn test_deserialization_from_json() -> Result<()> { 277 | create_dir_all("tmp/deserialization_test")?; 278 | fs::write("tmp/deserialization_test/out.json", SNAPSHOT_JSON)?; 279 | load_snapshots_from_json::(&PathBuf::from( 280 | "tmp/deserialization_test/out.json", 281 | )) 282 | .await?; 283 | Ok(()) 284 | } 285 | 286 | #[test] 287 | fn test_deserialization() -> Result<()> { 288 | load_snapshots_from_str::(SNAPSHOT_JSON)?; 289 | Ok(()) 290 | } 291 | 292 | #[test] 293 | fn test_l4_snapshot_to_l2_snapshot() { 294 | let mut book = OrderBook::new(); 295 | let coin = Coin::new(""); 296 | let snapshot = setup_book(&mut book); 297 | let levels = snapshot_to_l2_snapshot(snapshot.0.get(&coin).unwrap(), Some(2), Some(2), Some(1)); 298 | let raw_levels = levels.export_inner_snapshot(); 299 | let ans = [ 300 | vec![Level::new("34".to_string(), "1100".to_string(), 7)], 301 | vec![ 302 | Level::new("35".to_string(), "400".to_string(), 4), 303 | Level::new("36".to_string(), "700".to_string(), 3), 304 | ], 305 | ]; 306 | assert_eq!(ans, raw_levels); 307 | 308 | let levels = snapshot_to_l2_snapshot(snapshot.0.get(&coin).unwrap(), Some(2), Some(3), Some(5)); 309 | let raw_levels = levels.export_inner_snapshot(); 310 | let ans = [ 311 | vec![ 312 | Level::new("34.5".to_string(), "700".to_string(), 3), 313 | Level::new("34".to_string(), "400".to_string(), 4), 314 | ], 315 | vec![ 316 | Level::new("35".to_string(), "400".to_string(), 4), 317 | Level::new("35.5".to_string(), "700".to_string(), 3), 318 | ], 319 | ]; 320 | assert_eq!(ans, raw_levels); 321 | let snapshot_from_book = book.to_l2_snapshot(Some(2), Some(3), Some(5)); 322 | let raw_levels_from_book = snapshot_from_book.export_inner_snapshot(); 323 | let snapshot_from_book = book.to_l2_snapshot(None, None, None); 324 | let snapshot_from_snapshot = snapshot_from_book.to_l2_snapshot(Some(2), Some(3), Some(5)); 325 | let raw_levels_from_snapshot = snapshot_from_snapshot.export_inner_snapshot(); 326 | assert_eq!(raw_levels_from_book, ans); 327 | assert_eq!(raw_levels_from_snapshot, ans); 328 | 329 | let levels = snapshot_to_l2_snapshot(snapshot.0.get(&coin).unwrap(), Some(2), None, Some(5)); 330 | let raw_levels = levels.export_inner_snapshot(); 331 | let ans = [ 332 | vec![ 333 | Level::new("34.6".to_string(), "300".to_string(), 1), 334 | Level::new("34.5".to_string(), "400".to_string(), 2), 335 | ], 336 | vec![ 337 | Level::new("35".to_string(), "400".to_string(), 4), 338 | Level::new("35.1".to_string(), "400".to_string(), 2), 339 | ], 340 | ]; 341 | assert_eq!(ans, raw_levels); 342 | } 343 | } 344 | -------------------------------------------------------------------------------- /server/src/servers/websocket_server.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | listeners::order_book::{ 3 | InternalMessage, L2SnapshotParams, L2Snapshots, OrderBookListener, TimedSnapshots, hl_listen, 4 | }, 5 | order_book::{Coin, Snapshot}, 6 | prelude::*, 7 | types::{ 8 | L2Book, L4Book, L4BookUpdates, L4Order, Trade, 9 | inner::InnerLevel, 10 | node_data::{Batch, NodeDataFill, NodeDataOrderDiff, NodeDataOrderStatus}, 11 | subscription::{ClientMessage, DEFAULT_LEVELS, ServerResponse, Subscription, SubscriptionManager}, 12 | }, 13 | }; 14 | use axum::{Router, response::IntoResponse, routing::get}; 15 | use futures_util::{SinkExt, StreamExt}; 16 | use log::{error, info}; 17 | use std::{ 18 | collections::{HashMap, HashSet}, 19 | env::home_dir, 20 | sync::Arc, 21 | }; 22 | use tokio::select; 23 | use tokio::{ 24 | net::TcpListener, 25 | sync::{ 26 | Mutex, 27 | broadcast::{Sender, channel}, 28 | }, 29 | }; 30 | use yawc::{FrameView, OpCode, WebSocket}; 31 | 32 | pub async fn run_websocket_server(address: &str, ignore_spot: bool, compression_level: u32) -> Result<()> { 33 | let (internal_message_tx, _) = channel::>(100); 34 | 35 | // Central task: listen to messages and forward them for distribution 36 | let home_dir = home_dir().ok_or("Could not find home directory")?; 37 | let listener = { 38 | let internal_message_tx = internal_message_tx.clone(); 39 | OrderBookListener::new(Some(internal_message_tx), ignore_spot) 40 | }; 41 | let listener = Arc::new(Mutex::new(listener)); 42 | { 43 | let listener = listener.clone(); 44 | tokio::spawn(async move { 45 | if let Err(err) = hl_listen(listener, home_dir).await { 46 | error!("Listener fatal error: {err}"); 47 | std::process::exit(1); 48 | } 49 | }); 50 | } 51 | 52 | let websocket_opts = 53 | yawc::Options::default().with_compression_level(yawc::CompressionLevel::new(compression_level)); 54 | let app = Router::new().route( 55 | "/ws", 56 | get({ 57 | let internal_message_tx = internal_message_tx.clone(); 58 | async move |ws_upgrade| { 59 | ws_handler(ws_upgrade, internal_message_tx.clone(), listener.clone(), ignore_spot, websocket_opts) 60 | } 61 | }), 62 | ); 63 | 64 | let listener = TcpListener::bind(address).await?; 65 | info!("WebSocket server running at ws://{address}"); 66 | 67 | if let Err(err) = axum::serve(listener, app.into_make_service()).await { 68 | error!("Server fatal error: {err}"); 69 | std::process::exit(2); 70 | } 71 | 72 | Ok(()) 73 | } 74 | 75 | fn ws_handler( 76 | incoming: yawc::IncomingUpgrade, 77 | internal_message_tx: Sender>, 78 | listener: Arc>, 79 | ignore_spot: bool, 80 | websocket_opts: yawc::Options, 81 | ) -> impl IntoResponse { 82 | let (resp, fut) = incoming.upgrade(websocket_opts).unwrap(); 83 | tokio::spawn(async move { 84 | let ws = match fut.await { 85 | Ok(ok) => ok, 86 | Err(err) => { 87 | log::error!("failed to upgrade websocket connection: {err}"); 88 | return; 89 | } 90 | }; 91 | 92 | handle_socket(ws, internal_message_tx, listener, ignore_spot).await 93 | }); 94 | 95 | resp 96 | } 97 | 98 | async fn handle_socket( 99 | mut socket: WebSocket, 100 | internal_message_tx: Sender>, 101 | listener: Arc>, 102 | ignore_spot: bool, 103 | ) { 104 | let mut internal_message_rx = internal_message_tx.subscribe(); 105 | let is_ready = listener.lock().await.is_ready(); 106 | let mut manager = SubscriptionManager::default(); 107 | let mut universe = listener.lock().await.universe().into_iter().map(|c| c.value()).collect(); 108 | if !is_ready { 109 | let msg = ServerResponse::Error("Order book not ready for streaming (waiting for snapshot)".to_string()); 110 | send_socket_message(&mut socket, msg).await; 111 | return; 112 | } 113 | loop { 114 | select! { 115 | recv_result = internal_message_rx.recv() => { 116 | match recv_result { 117 | Ok(msg) => { 118 | match msg.as_ref() { 119 | InternalMessage::Snapshot{ l2_snapshots, time } => { 120 | universe = new_universe(l2_snapshots, ignore_spot); 121 | for sub in manager.subscriptions() { 122 | send_ws_data_from_snapshot(&mut socket, sub, l2_snapshots.as_ref(), *time).await; 123 | } 124 | }, 125 | InternalMessage::Fills{ batch } => { 126 | let mut trades = coin_to_trades(batch); 127 | for sub in manager.subscriptions() { 128 | send_ws_data_from_trades(&mut socket, sub, &mut trades).await; 129 | } 130 | }, 131 | InternalMessage::L4BookUpdates{ diff_batch, status_batch } => { 132 | let mut book_updates = coin_to_book_updates(diff_batch, status_batch); 133 | for sub in manager.subscriptions() { 134 | send_ws_data_from_book_updates(&mut socket, sub, &mut book_updates).await; 135 | } 136 | }, 137 | } 138 | 139 | } 140 | Err(err) => { 141 | error!("Receiver error: {err}"); 142 | return; 143 | } 144 | } 145 | } 146 | 147 | msg = socket.next() => { 148 | if let Some(frame) = msg { 149 | match frame.opcode { 150 | OpCode::Text => { 151 | let text = match std::str::from_utf8(&frame.payload) { 152 | Ok(text) => text, 153 | Err(err) => { 154 | log::warn!("unable to parse websocket content: {err}: {:?}", frame.payload.as_ref()); 155 | // deserves to close the connection because the payload is not a valid utf8 string. 156 | return; 157 | } 158 | }; 159 | 160 | info!("Client message: {text}"); 161 | 162 | if let Ok(value) = serde_json::from_str::(text) { 163 | receive_client_message(&mut socket, &mut manager, value, &universe, listener.clone()).await; 164 | } 165 | else { 166 | let msg = ServerResponse::Error(format!("Error parsing JSON into valid websocket request: {text}")); 167 | send_socket_message(&mut socket, msg).await; 168 | } 169 | } 170 | OpCode::Close => { 171 | info!("Client disconnected"); 172 | return; 173 | } 174 | _ => {} 175 | } 176 | } else { 177 | info!("Client connection closed"); 178 | return; 179 | } 180 | } 181 | } 182 | } 183 | } 184 | 185 | async fn receive_client_message( 186 | socket: &mut WebSocket, 187 | manager: &mut SubscriptionManager, 188 | client_message: ClientMessage, 189 | universe: &HashSet, 190 | listener: Arc>, 191 | ) { 192 | let subscription = match &client_message { 193 | ClientMessage::Unsubscribe { subscription } | ClientMessage::Subscribe { subscription } => subscription.clone(), 194 | }; 195 | // this is used for display purposes only, hence unwrap_or_default. It also shouldn't fail 196 | let sub = serde_json::to_string(&subscription).unwrap_or_default(); 197 | if !subscription.validate(universe) { 198 | let msg = ServerResponse::Error(format!("Invalid subscription: {sub}")); 199 | send_socket_message(socket, msg).await; 200 | return; 201 | } 202 | let (word, success) = match &client_message { 203 | ClientMessage::Subscribe { .. } => ("", manager.subscribe(subscription)), 204 | ClientMessage::Unsubscribe { .. } => ("un", manager.unsubscribe(subscription)), 205 | }; 206 | if success { 207 | let snapshot_msg = if let ClientMessage::Subscribe { subscription } = &client_message { 208 | let msg = subscription.handle_immediate_snapshot(listener).await; 209 | match msg { 210 | Ok(msg) => msg, 211 | Err(err) => { 212 | manager.unsubscribe(subscription.clone()); 213 | let msg = ServerResponse::Error(format!("Unable to grab order book snapshot: {err}")); 214 | send_socket_message(socket, msg).await; 215 | return; 216 | } 217 | } 218 | } else { 219 | None 220 | }; 221 | let msg = ServerResponse::SubscriptionResponse(client_message); 222 | send_socket_message(socket, msg).await; 223 | if let Some(snapshot_msg) = snapshot_msg { 224 | send_socket_message(socket, snapshot_msg).await; 225 | } 226 | } else { 227 | let msg = ServerResponse::Error(format!("Already {word}subscribed: {sub}")); 228 | send_socket_message(socket, msg).await; 229 | } 230 | } 231 | 232 | async fn send_socket_message(socket: &mut WebSocket, msg: ServerResponse) { 233 | let msg = serde_json::to_string(&msg); 234 | match msg { 235 | Ok(msg) => { 236 | if let Err(err) = socket.send(FrameView::text(msg)).await { 237 | error!("Failed to send: {err}"); 238 | } 239 | } 240 | Err(err) => { 241 | error!("Server response serialization error: {err}"); 242 | } 243 | } 244 | } 245 | 246 | // derive it from l2_snapshots because thats convenient 247 | fn new_universe(l2_snapshots: &L2Snapshots, ignore_spot: bool) -> HashSet { 248 | l2_snapshots 249 | .as_ref() 250 | .iter() 251 | .filter_map(|(c, _)| if !c.is_spot() || !ignore_spot { Some(c.clone().value()) } else { None }) 252 | .collect() 253 | } 254 | 255 | async fn send_ws_data_from_snapshot( 256 | socket: &mut WebSocket, 257 | subscription: &Subscription, 258 | snapshot: &HashMap>>, 259 | time: u64, 260 | ) { 261 | if let Subscription::L2Book { coin, n_sig_figs, n_levels, mantissa } = subscription { 262 | let snapshot = snapshot.get(&Coin::new(coin)); 263 | if let Some(snapshot) = 264 | snapshot.and_then(|snapshot| snapshot.get(&L2SnapshotParams::new(*n_sig_figs, *mantissa))) 265 | { 266 | let n_levels = n_levels.unwrap_or(DEFAULT_LEVELS); 267 | let snapshot = snapshot.truncate(n_levels); 268 | let snapshot = snapshot.export_inner_snapshot(); 269 | let l2_book = L2Book::from_l2_snapshot(coin.clone(), snapshot, time); 270 | let msg = ServerResponse::L2Book(l2_book); 271 | send_socket_message(socket, msg).await; 272 | } else { 273 | error!("Coin {coin} not found"); 274 | } 275 | } 276 | } 277 | 278 | fn coin_to_trades(batch: &Batch) -> HashMap> { 279 | let mut fills = batch.clone().events(); 280 | let mut trades = HashMap::new(); 281 | while fills.len() >= 2 { 282 | let f2 = fills.pop(); 283 | let f1 = fills.pop(); 284 | if let Some(f1) = f1 { 285 | if let Some(f2) = f2 { 286 | let mut fills = HashMap::new(); 287 | fills.insert(f1.1.side, f1); 288 | fills.insert(f2.1.side, f2); 289 | let trade = Trade::from_fills(fills); 290 | let coin = trade.coin.clone(); 291 | trades.entry(coin).or_insert_with(Vec::new).push(trade); 292 | } 293 | } 294 | } 295 | for list in trades.values_mut() { 296 | list.reverse(); 297 | } 298 | trades 299 | } 300 | 301 | fn coin_to_book_updates( 302 | diff_batch: &Batch, 303 | status_batch: &Batch, 304 | ) -> HashMap { 305 | let diffs = diff_batch.clone().events(); 306 | let statuses = status_batch.clone().events(); 307 | let time = diff_batch.block_time(); 308 | let height = diff_batch.block_number(); 309 | let mut updates = HashMap::new(); 310 | for diff in diffs { 311 | let coin = diff.coin().value(); 312 | updates.entry(coin).or_insert_with(|| L4BookUpdates::new(time, height)).book_diffs.push(diff); 313 | } 314 | for status in statuses { 315 | let coin = status.order.coin.clone(); 316 | updates.entry(coin).or_insert_with(|| L4BookUpdates::new(time, height)).order_statuses.push(status); 317 | } 318 | updates 319 | } 320 | 321 | async fn send_ws_data_from_book_updates( 322 | socket: &mut WebSocket, 323 | subscription: &Subscription, 324 | book_updates: &mut HashMap, 325 | ) { 326 | if let Subscription::L4Book { coin } = subscription { 327 | if let Some(updates) = book_updates.remove(coin) { 328 | let msg = ServerResponse::L4Book(L4Book::Updates(updates)); 329 | send_socket_message(socket, msg).await; 330 | } 331 | } 332 | } 333 | 334 | async fn send_ws_data_from_trades( 335 | socket: &mut WebSocket, 336 | subscription: &Subscription, 337 | trades: &mut HashMap>, 338 | ) { 339 | if let Subscription::Trades { coin } = subscription { 340 | if let Some(trades) = trades.remove(coin) { 341 | let msg = ServerResponse::Trades(trades); 342 | send_socket_message(socket, msg).await; 343 | } 344 | } 345 | } 346 | 347 | impl Subscription { 348 | // snapshots that begin a stream 349 | async fn handle_immediate_snapshot( 350 | &self, 351 | listener: Arc>, 352 | ) -> Result> { 353 | if let Self::L4Book { coin } = self { 354 | let snapshot = listener.lock().await.compute_snapshot(); 355 | if let Some(TimedSnapshots { time, height, snapshot }) = snapshot { 356 | let snapshot = 357 | snapshot.value().into_iter().filter(|(c, _)| *c == Coin::new(coin)).collect::>().pop(); 358 | if let Some((coin, snapshot)) = snapshot { 359 | let snapshot = 360 | snapshot.as_ref().clone().map(|orders| orders.into_iter().map(L4Order::from).collect()); 361 | return Ok(Some(ServerResponse::L4Book(L4Book::Snapshot { 362 | coin: coin.value(), 363 | time, 364 | height, 365 | levels: snapshot, 366 | }))); 367 | } 368 | } 369 | return Err("Snapshot Failed".into()); 370 | } 371 | Ok(None) 372 | } 373 | } 374 | -------------------------------------------------------------------------------- /server/src/listeners/directory.rs: -------------------------------------------------------------------------------- 1 | use crate::{prelude::*, types::node_data::EventSource}; 2 | use fs::File; 3 | use io::Read; 4 | use std::path::PathBuf; 5 | 6 | // We want all of these functions to be synchronous just for ease of use since they are fast (for now) 7 | // Asynchronous stuff can be done in the listen function (waiting for next file event) 8 | pub(crate) trait DirectoryListener { 9 | // are we tracking a file right now 10 | fn is_reading(&self, event_source: EventSource) -> bool; 11 | // get file that we are tracking 12 | fn file_mut(&mut self, event_source: EventSource) -> &mut Option; 13 | // when file is created what do we do? 14 | fn on_file_creation(&mut self, new_file: PathBuf, event_source: EventSource) -> Result<()>; 15 | // how do we want to process data that we just processed? 16 | fn process_data(&mut self, data: String, event_source: EventSource) -> Result<()>; 17 | 18 | fn on_file_modification(&mut self, event_source: EventSource) -> Result<()> { 19 | let mut buf = String::new(); 20 | let file = self.file_mut(event_source).as_mut().ok_or("No file being tracked")?; 21 | file.read_to_string(&mut buf)?; 22 | self.process_data(buf, event_source)?; 23 | Ok(()) 24 | } 25 | } 26 | 27 | #[cfg(test)] 28 | mod tests { 29 | use crate::{ 30 | listeners::directory::{DirectoryListener, EventSource}, 31 | prelude::*, 32 | }; 33 | use fs::{File, create_dir_all, read_dir, remove_dir_all, remove_file}; 34 | use log::{error, info}; 35 | use notify::{RecursiveMode, Watcher, recommended_watcher}; 36 | use rand::{Rng, SeedableRng, rngs::StdRng}; 37 | use std::{ 38 | io::{Seek, SeekFrom}, 39 | path::{Path, PathBuf}, 40 | sync::{Arc, Mutex}, 41 | time::Duration, 42 | }; 43 | use tokio::{fs::File as TokioFile, io::AsyncWriteExt, sync::mpsc::unbounded_channel, time::sleep}; 44 | 45 | const MOCK_HL_DIR: &str = "tmp/ws_listener_test"; 46 | const DATA: [&str; 2] = [ 47 | r#"{"coin":"@151","side":"A","time":"2025-06-24T02:56:36.172847427","px":"2393.9","sz":"0.1539","hash":"0x2b21750229be769650b604261eaac1018c00c45812652efbbdd35fe0ecb201a1","trade_dir_override":"Na","side_info":[{"user":"0xecb63caa47c7c4e77f60f1ce858cf28dc2b82b00","start_pos":"1166.565307356","oid":105686971733,"twap_id":null,"cloid":"0x1070fff92506b3ab5e5aec135e5a5ddd"},{"user":"0xb65117c1e1006e7b2413fa90e96fcbe3fa83ed75","start_pos":"0.153928559","oid":105686976226,"twap_id":null,"cloid":null}]} 48 | {"coin":"@166","side":"A","time":"2025-06-24T02:56:36.172847427","px":"1.0003","sz":"184.11","hash":"0x0ffc6896b2147680820e04261eaac1018c0101735014e44b56f038478b13ad8f","trade_dir_override":"Na","side_info":[{"user":"0x107332a1729ba0bcf6171117815a87b72a7e6082","start_pos":"36301.55539655","oid":105686050113,"twap_id":null,"cloid":null},{"user":"0xb65117c1e1006e7b2413fa90e96fcbe3fa83ed75","start_pos":"184.12704003","oid":105686976227,"twap_id":null,"cloid":null}]} 49 | {"coin":"@107","side":"B","time":"2025-06-24T02:56:36.172847427","px":"36.796","sz":"15.0","hash":"0x0f0fd1630a15ee0cfbf704261eaac1018c01021cbd299fd0aab0fc4b47dbfeb5","trade_dir_override":"Na","side_info":[{"user":"0xb65117c1e1006e7b2413fa90e96fcbe3fa83ed75","start_pos":"1438.73242692","oid":105686976228,"twap_id":null,"cloid":null},{"user":"0x91853bcefbdc3447768413e6db7d6e0a81c4be56","start_pos":"1230.04231931","oid":105686976111,"twap_id":null,"cloid":"0x0000000000001371000031839dda1c54"}]} 50 | {"coin":"HYPE","side":"B","time":"2025-06-24T02:56:36.241836440","px":"36.773","sz":"0.74","hash":"0xbc8b4f281951cd10af1704261eaac201910053453aba82fd6f64e9ad5cc3eab8","trade_dir_override":"Na","side_info":[{"user":"0xc2d25de009756d3e2ff8a69fd68a02dd7679a8a2","start_pos":"1.25","oid":105686976375,"twap_id":null,"cloid":null},{"user":"0xaf2ac71f62f341e5823d6985492409e92c940447","start_pos":"100.0","oid":105686976012,"twap_id":null,"cloid":"0x000000000000cba600002935c6911dbf"}]} 51 | {"coin":"@142","side":"A","time":"2025-06-24T02:56:36.241836440","px":"104924.0","sz":"0.01094","hash":"0x4e6ee8e4a2376fb83e1104261eaac2019a004ef0e8feaeee0c04557f2cb8faa1","trade_dir_override":"Na","side_info":[{"user":"0xdcac85ecae7148886029c20e661d848a4de99ce2","start_pos":"2.9973766089","oid":105686964903,"twap_id":null,"cloid":null},{"user":"0xefd3ab65915e35105caa462442c9ecc1346728df","start_pos":"1.4552651155","oid":105686976384,"twap_id":null,"cloid":null}]} 52 | {"coin":"@142","side":"A","time":"2025-06-24T02:56:36.241836440","px":"104924.0","sz":"0.00193","hash":"0x4e6ee8e4a2376fb83e1104261eaac2019a004ef0e8feaeee0c04557f2cb8faa1","trade_dir_override":"Na","side_info":[{"user":"0x15f99ee9c49d80851f3dfb9a899da42ec65a1b1b","start_pos":"0.0000145105","oid":105686976114,"twap_id":null,"cloid":null},{"user":"0xefd3ab65915e35105caa462442c9ecc1346728df","start_pos":"1.4443251155","oid":105686976384,"twap_id":null,"cloid":null}]} 53 | {"coin":"@151","side":"A","time":"2025-06-24T02:56:36.241836440","px":"2393.9","sz":"0.2641","hash":"0x95f3d0546bd85531a6d704261eaac2019b0034d53e7526933291402af6adebf7","trade_dir_override":"Na","side_info":[{"user":"0xecb63caa47c7c4e77f60f1ce858cf28dc2b82b00","start_pos":"1166.719207356","oid":105686971733,"twap_id":null,"cloid":"0x1070fff92506b3ab5e5aec135e5a5ddd"},{"user":"0xefd3ab65915e35105caa462442c9ecc1346728df","start_pos":"49.123091455","oid":105686976385,"twap_id":null,"cloid":null}]} 54 | {"coin":"AI16Z","side":"B","time":"2025-06-24T02:56:36.510075144","px":"0.15823","sz":"208.4","hash":"0xb791283313f742315be004261eaac4012a00bb7b07862d44cba4de6813f76602","trade_dir_override":"Na","side_info":[{"user":"0x010461c14e146ac35fe42271bdc1134ee31c703a","start_pos":"-305401.0","oid":105686976469,"twap_id":null,"cloid":null},{"user":"0xb4321b142b2a03ce20fcab2007ff6990b9acba93","start_pos":"18515.4","oid":105686969474,"twap_id":null,"cloid":"0x0000000000000000de5c5df32db69553"}]} 55 | {"coin":"PURR/USDC","side":"B","time":"2025-06-24T02:56:36.578815265","px":"0.20408","sz":"250.0","hash":"0x100515592168666abbbb04261eaac5018b005440d32a1bba306fc3ab0f0a7ff0","trade_dir_override":"Na","side_info":[{"user":"0x162cc7c861ebd0c06b3d72319201150482518185","start_pos":"1107212.2156400001","oid":105686976573,"twap_id":null,"cloid":"0x000000000000000033d1d30c32c2afaa"},{"user":"0xea419262a97e92b6eee39eb3d01138bf2949bb9d","start_pos":"694623.6427","oid":105686975104,"twap_id":null,"cloid":"0x00740ed68330003d00587dc670f10000"}]} 56 | {"coin":"PENDLE","side":"A","time":"2025-06-24T02:56:36.633936095","px":"3.5816","sz":"83.0","hash":"0xd02d3853fdde2296e34e04261eaac6014d00f37036a4ac1812d05fc71c260025","trade_dir_override":"Na","side_info":[{"user":"0x010461c14e146ac35fe42271bdc1134ee31c703a","start_pos":"25471.0","oid":105686972471,"twap_id":null,"cloid":null},{"user":"0x451333eb2f8230cda918db136623da9c26acea0b","start_pos":"-17530.0","oid":105686976632,"twap_id":null,"cloid":"0xaed6d063c14642c4a723b082c96e25c3"}]} 57 | {"coin":"TRX","side":"A","time":"2025-06-24T02:56:36.633936095","px":"0.27225","sz":"1835.0","hash":"0xee7211f542fb48054fd204261eaac60150000a0c8d0ce81ca4e9785d7027e910","trade_dir_override":"Na","side_info":[{"user":"0xecb63caa47c7c4e77f60f1ce858cf28dc2b82b00","start_pos":"-412654.0","oid":105686952032,"twap_id":null,"cloid":"0x6560df107f5fd7092a10871e274ef9e6"},{"user":"0xfc8c156428a8e48cb8d0356db16e59bec4c0ecea","start_pos":"212816.0","oid":105686976635,"twap_id":null,"cloid":"0xae9f6c0100000000c53a025834064b18"}]} 58 | {"coin":"PURR/USDC","side":"A","time":"2025-06-24T02:56:36.633936095","px":"0.20414","sz":"250.0","hash":"0xa35dc0ec4d0f1be7954104261eaac601660036c5486e43d1298fffb58ed3ef26","trade_dir_override":"Na","side_info":[{"user":"0x162cc7c861ebd0c06b3d72319201150482518185","start_pos":"1107462.1781500001","oid":105686976573,"twap_id":null,"cloid":"0x000000000000000033d1d30c32c2afaa"},{"user":"0x2afe745dfd735e24aae8ac39969140dd0511496b","start_pos":"219651.02052","oid":105686976661,"twap_id":null,"cloid":"0x00000000f373000690d9e43c68ee6b51"}]} 59 | {"coin":"ENA","side":"B","time":"2025-06-24T02:56:36.633936095","px":"0.2685","sz":"73.0","hash":"0xe017f0d464bbf330a7fd04261eaac60168007da9578ec00b69d3540c8e9efa0a","trade_dir_override":"Na","side_info":[{"user":"0x31ca8395cf837de08b24da3f660e77761dfb974b","start_pos":"59657.0","oid":105686976663,"twap_id":null,"cloid":null},{"user":"0x6ba889db7f923622d3548f621ecc2054b80c1817","start_pos":"23809.0","oid":105686931473,"twap_id":null,"cloid":"0x17507337595095386710000000000000"}]} 60 | "#, 61 | r#"{"coin":"STX","side":"B","time":"2025-06-24T02:56:36.894949789","px":"0.63591","sz":"30.6","hash":"0x7306fd0390c93a1cfdc504261eaac9010400e00556a0aef084a2a8161f025a09","trade_dir_override":"Na","side_info":[{"user":"0x31ca8395cf837de08b24da3f660e77761dfb974b","start_pos":"182153.4","oid":105686977410,"twap_id":null,"cloid":null},{"user":"0xee126cd566febeebee9a715812df601e1e69512c","start_pos":"4854.0","oid":105686843698,"twap_id":null,"cloid":"0x9d2c065a6cf37141775d522dcf38855e"}]} 62 | {"coin":"@107","side":"B","time":"2025-06-24T02:56:36.952195987","px":"36.801","sz":"1.56","hash":"0x5376478958639270d25b04261eaaca02011300b6823c88d3cdeeb6d1217e2b13","trade_dir_override":"Na","side_info":[{"user":"0xefd3ab65915e35105caa462442c9ecc1346728df","start_pos":"17674.17669565","oid":105686977611,"twap_id":null,"cloid":null},{"user":"0x8ac0ec32d282d0841f40c4be5a8ecbe0760e59e3","start_pos":"77.32102068","oid":105686977591,"twap_id":null,"cloid":"0x00000000000000001849c172509f24df"}]} 63 | {"coin":"@59","side":"A","time":"2025-06-24T02:56:36.952195987","px":"6.6472","sz":"8.68","hash":"0x0d39f6431f1c3338229c04261eaaca02011500b6669208468264a9aaa88ed3e6","trade_dir_override":"Na","side_info":[{"user":"0xffffffffffffffffffffffffffffffffffffffff","start_pos":"200293.72","oid":105684367994,"twap_id":null,"cloid":null},{"user":"0xefd3ab65915e35105caa462442c9ecc1346728df","start_pos":"856.17828753","oid":105686977613,"twap_id":null,"cloid":null}]} 64 | {"coin":"@166","side":"A","time":"2025-06-24T02:56:36.952195987","px":"1.0003","sz":"165.32","hash":"0xeed8d454cafed26151fd04261eaaca02011600de5df57ee78bab1b091b056b2e","trade_dir_override":"Na","side_info":[{"user":"0x107332a1729ba0bcf6171117815a87b72a7e6082","start_pos":"36485.61568686","oid":105686050113,"twap_id":null,"cloid":null},{"user":"0xefd3ab65915e35105caa462442c9ecc1346728df","start_pos":"215171.4145885","oid":105686977614,"twap_id":null,"cloid":null}]} 65 | {"coin":"@107","side":"B","time":"2025-06-24T02:56:36.952195987","px":"36.801","sz":"4.49","hash":"0x02c80a45cd822365816d04261eaaca02011700f3c0b5703cca97c90aaca4a785","trade_dir_override":"Na","side_info":[{"user":"0xefd3ab65915e35105caa462442c9ecc1346728df","start_pos":"17675.73631346","oid":105686977615,"twap_id":null,"cloid":null},{"user":"0x8ac0ec32d282d0841f40c4be5a8ecbe0760e59e3","start_pos":"75.76103627","oid":105686977591,"twap_id":null,"cloid":"0x00000000000000001849c172509f24df"}]} 66 | {"coin":"@166","side":"A","time":"2025-06-24T02:56:36.952195987","px":"1.0003","sz":"75.59","hash":"0x9c8b059c249d5b4f183c04261eaaca0201180081b2c5cedaa5a8e9c94ef563e5","trade_dir_override":"Na","side_info":[{"user":"0x107332a1729ba0bcf6171117815a87b72a7e6082","start_pos":"36650.89105047","oid":105686050113,"twap_id":null,"cloid":null},{"user":"0xefd3ab65915e35105caa462442c9ecc1346728df","start_pos":"215006.0945885","oid":105686977616,"twap_id":null,"cloid":null}]} 67 | {"coin":"@107","side":"B","time":"2025-06-24T02:56:36.952195987","px":"36.801","sz":"2.05","hash":"0xe3127dd9595b9aaa8d4e04261eaaca020119004ca6f9a8b1ed9c16358a32c78a","trade_dir_override":"Na","side_info":[{"user":"0xefd3ab65915e35105caa462442c9ecc1346728df","start_pos":"17680.22521342","oid":105686977617,"twap_id":null,"cloid":null},{"user":"0x8ac0ec32d282d0841f40c4be5a8ecbe0760e59e3","start_pos":"71.27108116","oid":105686977591,"twap_id":null,"cloid":"0x00000000000000001849c172509f24df"}]} 68 | {"coin":"@107","side":"B","time":"2025-06-24T02:56:36.952195987","px":"36.801","sz":"1.56","hash":"0x4023d33de912351b1c0704261eaaca02011a00b14e8ba255b1fd8dda6aca0f2a","trade_dir_override":"Na","side_info":[{"user":"0xefd3ab65915e35105caa462442c9ecc1346728df","start_pos":"17682.27471118","oid":105686977618,"twap_id":null,"cloid":null},{"user":"0x8ac0ec32d282d0841f40c4be5a8ecbe0760e59e3","start_pos":"69.22110165","oid":105686977591,"twap_id":null,"cloid":"0x00000000000000001849c172509f24df"}]} 69 | {"coin":"@107","side":"B","time":"2025-06-24T02:56:36.952195987","px":"36.801","sz":"5.95","hash":"0xe2bd19ead6bf48a9212604261eaaca02011b000266b78f2f1943e32da417a24d","trade_dir_override":"Na","side_info":[{"user":"0xefd3ab65915e35105caa462442c9ecc1346728df","start_pos":"17683.83432899","oid":105686977619,"twap_id":null,"cloid":null},{"user":"0x8ac0ec32d282d0841f40c4be5a8ecbe0760e59e3","start_pos":"67.66111724","oid":105686977591,"twap_id":null,"cloid":"0x00000000000000001849c172509f24df"}]} 70 | {"coin":"@166","side":"A","time":"2025-06-24T02:56:36.952195987","px":"1.0003","sz":"57.6","hash":"0x7a1100dee236e5a2693604261eaaca02011c00feab4d04df9d016ac3fc5a0bb2","trade_dir_override":"Na","side_info":[{"user":"0x107332a1729ba0bcf6171117815a87b72a7e6082","start_pos":"36726.46064118","oid":105686050113,"twap_id":null,"cloid":null},{"user":"0xefd3ab65915e35105caa462442c9ecc1346728df","start_pos":"214930.5045885","oid":105686977620,"twap_id":null,"cloid":null}]} 71 | {"coin":"@107","side":"A","time":"2025-06-24T02:56:36.952195987","px":"36.791","sz":"13.6","hash":"0x2165d9c82bb7c82b5a5204261eaaca02012500ffd6df104ef2bd49658b724ecd","trade_dir_override":"Na","side_info":[{"user":"0x023a3d058020fb76cca98f01b3c48c8938a22355","start_pos":"7694.15638066","oid":105686977112,"twap_id":null,"cloid":"0x00000000000008029538332006014039"},{"user":"0xa408cf373d04e8c562ea23c2f49acd88968e56ef","start_pos":"7833.93885203","oid":105686977629,"twap_id":null,"cloid":"0x078273e050854e298a3aada86fbc0020"}]} 72 | {"coin":"ETH","side":"A","time":"2025-06-24T02:56:37.026660548","px":"2391.9","sz":"0.0051","hash":"0x0000000000000000000000000000000000000000000000000000000000000000","trade_dir_override":"Na","side_info":[{"user":"0x31ca8395cf837de08b24da3f660e77761dfb974b","start_pos":"14.2048","oid":105686950111,"twap_id":null,"cloid":null},{"user":"0x937791d5ab5040402370f1fc0d9f8f014d80e90e","start_pos":"-2.6603","oid":105686977630,"twap_id":822427,"cloid":null}]} 73 | "#, 74 | ]; 75 | 76 | async fn listen(listener: &mut L, event_source: EventSource, dir: &Path) -> Result<()> { 77 | let event_source_dir = event_source.event_source_dir(dir).canonicalize()?; 78 | info!("Monitoring directory: {}", event_source_dir.display()); 79 | // monitoring the directory via the notify crate (gives file system events) 80 | let (fs_event_tx, mut fs_event_rx) = unbounded_channel(); 81 | let mut watcher = recommended_watcher(move |res| { 82 | let fs_event_tx = fs_event_tx.clone(); 83 | if let Err(err) = fs_event_tx.send(res) { 84 | error!("Error sending event to processor via channel: {err}"); 85 | } 86 | })?; 87 | 88 | watcher.watch(&event_source_dir, RecursiveMode::Recursive)?; 89 | loop { 90 | match fs_event_rx.recv().await { 91 | Some(Ok(event)) => { 92 | // if a new file is created, start tracking it. 93 | if event.kind.is_create() { 94 | let new_path = &event.paths[0]; 95 | if new_path.is_file() { 96 | info!("-- Event: {} created --", new_path.display()); 97 | listener.on_file_creation(new_path.clone(), event_source)?; 98 | } 99 | } 100 | // Check for `Modify` event (only if the file is already initialized) 101 | else if event.kind.is_modify() { 102 | let new_path = &event.paths[0]; 103 | if new_path.is_file() { 104 | // If we are not tracking anything right now, we treat a file update as declaring that it has been created. 105 | // Unfortunately, we miss the update that occurs at this time step. 106 | // We go to the end of the file to read for updates after that. 107 | if listener.is_reading(event_source) { 108 | info!("-- Event: {} modified --", new_path.display()); 109 | listener.on_file_modification(event_source)?; 110 | } else { 111 | info!("-- Event: {} created --", new_path.display()); 112 | let file = listener.file_mut(event_source); 113 | let mut new_file = File::open(new_path)?; 114 | new_file.seek(SeekFrom::End(0))?; 115 | *file = Some(new_file); 116 | } 117 | } 118 | } 119 | } 120 | Some(Err(err)) => { 121 | error!("Watcher error: {err}"); 122 | return Err(Box::new(err)); 123 | } 124 | None => { 125 | // The channel disconnected, likely because the sender (watcher) was dropped. 126 | // This usually means the program is shutting down or there's a problem. 127 | error!("Channel closed. Listener exiting"); 128 | return Err("Channel closed.".into()); // Exit the loop 129 | } 130 | } 131 | } 132 | } 133 | 134 | fn clear_dir_contents(path: &Path) -> Result<()> { 135 | if path.is_dir() { 136 | for entry in read_dir(path)? { 137 | let entry = entry?; 138 | let path = entry.path(); 139 | if path.is_dir() { 140 | remove_dir_all(&path)?; 141 | } else { 142 | remove_file(&path)?; 143 | } 144 | } 145 | } 146 | Ok(()) 147 | } 148 | 149 | async fn create_mock_data(event_source: EventSource, mock_dir: &Path) -> Result { 150 | // set up so that the directory is initially empty 151 | let mut res = String::new(); 152 | sleep(Duration::from_millis(100)).await; 153 | let mut rng = StdRng::from_seed([42; 32]); 154 | let mock_dir = event_source.event_source_dir(mock_dir).canonicalize()?; 155 | clear_dir_contents(&mock_dir)?; 156 | let mock_dir = mock_dir.join("hourly/20250624"); 157 | create_dir_all(&mock_dir)?; 158 | 159 | // test that it works when we transition across files 160 | for (i, data) in DATA.iter().enumerate() { 161 | res += data; 162 | let lines = data.split_whitespace(); 163 | let mut mock_file = TokioFile::create(mock_dir.join((i + 1).to_string())).await?; 164 | for line in lines { 165 | mock_file.write_all((line.to_string() + "\n").as_bytes()).await?; 166 | mock_file.flush().await?; 167 | let wait = rng.random_bool(0.25); 168 | // simulate writing transactions in separate blocks by waiting 0.1 seconds. 169 | if wait { 170 | sleep(Duration::from_millis(100)).await; 171 | } 172 | } 173 | } 174 | Ok(res) 175 | } 176 | 177 | // will listen to file events and collect their results in the history field 178 | struct TestListener { 179 | file: Option, 180 | history: Arc>, 181 | } 182 | 183 | impl DirectoryListener for TestListener { 184 | fn is_reading(&self, _event_source: EventSource) -> bool { 185 | self.file.is_some() 186 | } 187 | 188 | fn file_mut(&mut self, _event_source: EventSource) -> &mut Option { 189 | &mut self.file 190 | } 191 | 192 | fn on_file_creation(&mut self, new_file: PathBuf, _event_source: EventSource) -> Result<()> { 193 | let file = File::open(new_file)?; 194 | self.file = Some(file); 195 | Ok(()) 196 | } 197 | 198 | #[allow(clippy::significant_drop_tightening)] 199 | fn process_data(&mut self, data: String, _event_source: EventSource) -> Result<()> { 200 | let mut history = self.history.lock().unwrap(); 201 | *history += &data; 202 | Ok(()) 203 | } 204 | } 205 | 206 | impl TestListener { 207 | fn new(history: Arc>) -> Self { 208 | Self { file: None, history } 209 | } 210 | } 211 | 212 | #[allow(clippy::unwrap_used)] 213 | #[allow(clippy::significant_drop_tightening)] 214 | #[tokio::test] 215 | async fn test_trade_listener() -> Result<()> { 216 | let mock_path = PathBuf::from(MOCK_HL_DIR); 217 | let event_source = EventSource::Fills; 218 | create_dir_all(event_source.event_source_dir(&mock_path))?; 219 | let history = Arc::new(Mutex::new(String::new())); 220 | let mut test_listener = TestListener::new(history.clone()); 221 | { 222 | let mock_path = mock_path.clone(); 223 | tokio::spawn(async move { 224 | if let Err(err) = listen(&mut test_listener, event_source, &mock_path).await { 225 | error!("Listener error: {err}"); 226 | } 227 | }); 228 | } 229 | 230 | // get desired output 231 | let expected = create_mock_data(event_source, &mock_path).await?; 232 | sleep(Duration::from_secs(2)).await; 233 | let history = history.lock().unwrap(); 234 | assert_eq!(*history, expected); 235 | Ok(()) 236 | } 237 | } 238 | -------------------------------------------------------------------------------- /server/src/listeners/order_book/mod.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | HL_NODE, 3 | listeners::{directory::DirectoryListener, order_book::state::OrderBookState}, 4 | order_book::{ 5 | Coin, Snapshot, 6 | multi_book::{Snapshots, load_snapshots_from_json}, 7 | }, 8 | prelude::*, 9 | types::{ 10 | L4Order, 11 | inner::{InnerL4Order, InnerLevel}, 12 | node_data::{Batch, EventSource, NodeDataFill, NodeDataOrderDiff, NodeDataOrderStatus}, 13 | }, 14 | }; 15 | use alloy::primitives::Address; 16 | use fs::File; 17 | use log::{error, info}; 18 | use notify::{Event, RecursiveMode, Watcher, recommended_watcher}; 19 | use std::{ 20 | cmp::Ordering, 21 | collections::{HashMap, HashSet, VecDeque}, 22 | io::{Read, Seek, SeekFrom}, 23 | path::PathBuf, 24 | sync::Arc, 25 | time::Duration, 26 | }; 27 | use tokio::{ 28 | sync::{ 29 | Mutex, 30 | broadcast::Sender, 31 | mpsc::{UnboundedSender, unbounded_channel}, 32 | }, 33 | time::{Instant, interval_at, sleep}, 34 | }; 35 | use utils::{BatchQueue, EventBatch, process_rmp_file, validate_snapshot_consistency}; 36 | 37 | mod state; 38 | mod utils; 39 | 40 | // WARNING - this code assumes no other file system operations are occurring in the watched directories 41 | // if there are scripts running, this may not work as intended 42 | pub(crate) async fn hl_listen(listener: Arc>, dir: PathBuf) -> Result<()> { 43 | let order_statuses_dir = EventSource::OrderStatuses.event_source_dir(&dir).canonicalize()?; 44 | let fills_dir = EventSource::Fills.event_source_dir(&dir).canonicalize()?; 45 | let order_diffs_dir = EventSource::OrderDiffs.event_source_dir(&dir).canonicalize()?; 46 | info!("Monitoring order status directory: {}", order_statuses_dir.display()); 47 | info!("Monitoring order diffs directory: {}", order_diffs_dir.display()); 48 | info!("Monitoring fills directory: {}", fills_dir.display()); 49 | 50 | // monitoring the directory via the notify crate (gives file system events) 51 | let (fs_event_tx, mut fs_event_rx) = unbounded_channel(); 52 | let mut watcher = recommended_watcher(move |res| { 53 | let fs_event_tx = fs_event_tx.clone(); 54 | if let Err(err) = fs_event_tx.send(res) { 55 | error!("Error sending fs event to processor via channel: {err}"); 56 | } 57 | })?; 58 | 59 | let ignore_spot = { 60 | let listener = listener.lock().await; 61 | listener.ignore_spot 62 | }; 63 | 64 | // every so often, we fetch a new snapshot and the snapshot_fetch_task starts running. 65 | // Result is sent back along this channel (if error, we want to return to top level) 66 | let (snapshot_fetch_task_tx, mut snapshot_fetch_task_rx) = unbounded_channel::>(); 67 | 68 | watcher.watch(&order_statuses_dir, RecursiveMode::Recursive)?; 69 | watcher.watch(&fills_dir, RecursiveMode::Recursive)?; 70 | watcher.watch(&order_diffs_dir, RecursiveMode::Recursive)?; 71 | let start = Instant::now() + Duration::from_secs(5); 72 | let mut ticker = interval_at(start, Duration::from_secs(10)); 73 | loop { 74 | tokio::select! { 75 | event = fs_event_rx.recv() => match event { 76 | Some(Ok(event)) => { 77 | if event.kind.is_create() || event.kind.is_modify() { 78 | let new_path = &event.paths[0]; 79 | if new_path.starts_with(&order_statuses_dir) && new_path.is_file() { 80 | listener 81 | .lock() 82 | .await 83 | .process_update(&event, new_path, EventSource::OrderStatuses) 84 | .map_err(|err| format!("Order status processing error: {err}"))?; 85 | } else if new_path.starts_with(&fills_dir) && new_path.is_file() { 86 | listener 87 | .lock() 88 | .await 89 | .process_update(&event, new_path, EventSource::Fills) 90 | .map_err(|err| format!("Fill update processing error: {err}"))?; 91 | } else if new_path.starts_with(&order_diffs_dir) && new_path.is_file() { 92 | listener 93 | .lock() 94 | .await 95 | .process_update(&event, new_path, EventSource::OrderDiffs) 96 | .map_err(|err| format!("Book diff processing error: {err}"))?; 97 | } 98 | } 99 | } 100 | Some(Err(err)) => { 101 | error!("Watcher error: {err}"); 102 | return Err(format!("Watcher error: {err}").into()); 103 | } 104 | None => { 105 | error!("Channel closed. Listener exiting"); 106 | return Err("Channel closed.".into()); 107 | } 108 | }, 109 | snapshot_fetch_res = snapshot_fetch_task_rx.recv() => { 110 | match snapshot_fetch_res { 111 | None => { 112 | return Err("Snapshot fetch task sender dropped".into()); 113 | } 114 | Some(Err(err)) => { 115 | return Err(format!("Abci state reading error: {err}").into()); 116 | } 117 | Some(Ok(())) => {} 118 | } 119 | } 120 | _ = ticker.tick() => { 121 | let listener = listener.clone(); 122 | let snapshot_fetch_task_tx = snapshot_fetch_task_tx.clone(); 123 | fetch_snapshot(dir.clone(), listener, snapshot_fetch_task_tx, ignore_spot); 124 | } 125 | () = sleep(Duration::from_secs(5)) => { 126 | let listener = listener.lock().await; 127 | if listener.is_ready() { 128 | return Err(format!("Stream has fallen behind ({HL_NODE} failed?)").into()); 129 | } 130 | } 131 | } 132 | } 133 | } 134 | 135 | fn fetch_snapshot( 136 | dir: PathBuf, 137 | listener: Arc>, 138 | tx: UnboundedSender>, 139 | ignore_spot: bool, 140 | ) { 141 | let tx = tx.clone(); 142 | tokio::spawn(async move { 143 | let res = match process_rmp_file(&dir).await { 144 | Ok(output_fln) => { 145 | let state = { 146 | let mut listener = listener.lock().await; 147 | listener.begin_caching(); 148 | listener.clone_state() 149 | }; 150 | let snapshot = load_snapshots_from_json::(&output_fln).await; 151 | info!("Snapshot fetched"); 152 | // sleep to let some updates build up. 153 | sleep(Duration::from_secs(1)).await; 154 | let mut cache = { 155 | let mut listener = listener.lock().await; 156 | listener.take_cache() 157 | }; 158 | info!("Cache has {} elements", cache.len()); 159 | match snapshot { 160 | Ok((height, expected_snapshot)) => { 161 | if let Some(mut state) = state { 162 | while state.height() < height { 163 | if let Some((order_statuses, order_diffs)) = cache.pop_front() { 164 | state.apply_updates(order_statuses, order_diffs)?; 165 | } else { 166 | return Err::<(), Error>("Not enough cached updates".into()); 167 | } 168 | } 169 | if state.height() > height { 170 | return Err("Fetched snapshot lagging stored state".into()); 171 | } 172 | let stored_snapshot = state.compute_snapshot().snapshot; 173 | info!("Validating snapshot"); 174 | validate_snapshot_consistency(&stored_snapshot, expected_snapshot, ignore_spot) 175 | } else { 176 | listener.lock().await.init_from_snapshot(expected_snapshot, height); 177 | Ok(()) 178 | } 179 | } 180 | Err(err) => Err(err), 181 | } 182 | } 183 | Err(err) => Err(err), 184 | }; 185 | let _unused = tx.send(res); 186 | Ok(()) 187 | }); 188 | } 189 | 190 | pub(crate) struct OrderBookListener { 191 | ignore_spot: bool, 192 | fill_status_file: Option, 193 | order_status_file: Option, 194 | order_diff_file: Option, 195 | // None if we haven't seen a valid snapshot yet 196 | order_book_state: Option, 197 | last_fill: Option, 198 | order_diff_cache: BatchQueue, 199 | order_status_cache: BatchQueue, 200 | // Only Some when we want it to collect updates 201 | fetched_snapshot_cache: Option, Batch)>>, 202 | internal_message_tx: Option>>, 203 | } 204 | 205 | impl OrderBookListener { 206 | pub(crate) const fn new(internal_message_tx: Option>>, ignore_spot: bool) -> Self { 207 | Self { 208 | ignore_spot, 209 | fill_status_file: None, 210 | order_status_file: None, 211 | order_diff_file: None, 212 | order_book_state: None, 213 | last_fill: None, 214 | fetched_snapshot_cache: None, 215 | internal_message_tx, 216 | order_diff_cache: BatchQueue::new(), 217 | order_status_cache: BatchQueue::new(), 218 | } 219 | } 220 | 221 | fn clone_state(&self) -> Option { 222 | self.order_book_state.clone() 223 | } 224 | 225 | pub(crate) const fn is_ready(&self) -> bool { 226 | self.order_book_state.is_some() 227 | } 228 | 229 | pub(crate) fn universe(&self) -> HashSet { 230 | self.order_book_state.as_ref().map_or_else(HashSet::new, OrderBookState::compute_universe) 231 | } 232 | 233 | #[allow(clippy::type_complexity)] 234 | // pops earliest pair of cached updates that have the same timestamp if possible 235 | fn pop_cache(&mut self) -> Option<(Batch, Batch)> { 236 | // synchronize to same block 237 | while let Some(t) = self.order_diff_cache.front() { 238 | if let Some(s) = self.order_status_cache.front() { 239 | match t.block_number().cmp(&s.block_number()) { 240 | Ordering::Less => { 241 | self.order_diff_cache.pop_front(); 242 | } 243 | Ordering::Equal => { 244 | return self 245 | .order_status_cache 246 | .pop_front() 247 | .and_then(|t| self.order_diff_cache.pop_front().map(|s| (t, s))); 248 | } 249 | Ordering::Greater => { 250 | self.order_status_cache.pop_front(); 251 | } 252 | } 253 | } else { 254 | break; 255 | } 256 | } 257 | None 258 | } 259 | 260 | fn receive_batch(&mut self, updates: EventBatch) -> Result<()> { 261 | match updates { 262 | EventBatch::Orders(batch) => { 263 | self.order_status_cache.push(batch); 264 | } 265 | EventBatch::BookDiffs(batch) => { 266 | self.order_diff_cache.push(batch); 267 | } 268 | EventBatch::Fills(batch) => { 269 | if self.last_fill.is_none_or(|height| height < batch.block_number()) { 270 | // send fill updates if we received a new update 271 | if let Some(tx) = &self.internal_message_tx { 272 | let tx = tx.clone(); 273 | tokio::spawn(async move { 274 | let snapshot = Arc::new(InternalMessage::Fills { batch }); 275 | let _unused = tx.send(snapshot); 276 | }); 277 | } 278 | } 279 | } 280 | } 281 | if self.is_ready() { 282 | if let Some((order_statuses, order_diffs)) = self.pop_cache() { 283 | self.order_book_state 284 | .as_mut() 285 | .map(|book| book.apply_updates(order_statuses.clone(), order_diffs.clone())) 286 | .transpose()?; 287 | if let Some(cache) = &mut self.fetched_snapshot_cache { 288 | cache.push_back((order_statuses.clone(), order_diffs.clone())); 289 | } 290 | if let Some(tx) = &self.internal_message_tx { 291 | let tx = tx.clone(); 292 | tokio::spawn(async move { 293 | let updates = Arc::new(InternalMessage::L4BookUpdates { 294 | diff_batch: order_diffs, 295 | status_batch: order_statuses, 296 | }); 297 | let _unused = tx.send(updates); 298 | }); 299 | } 300 | } 301 | } 302 | Ok(()) 303 | } 304 | 305 | fn begin_caching(&mut self) { 306 | self.fetched_snapshot_cache = Some(VecDeque::new()); 307 | } 308 | 309 | // tkae the cached updates and stop collecting updates 310 | fn take_cache(&mut self) -> VecDeque<(Batch, Batch)> { 311 | self.fetched_snapshot_cache.take().unwrap_or_default() 312 | } 313 | 314 | fn init_from_snapshot(&mut self, snapshot: Snapshots, height: u64) { 315 | info!("No existing snapshot"); 316 | let mut new_order_book = OrderBookState::from_snapshot(snapshot, height, 0, true, self.ignore_spot); 317 | let mut retry = false; 318 | while let Some((order_statuses, order_diffs)) = self.pop_cache() { 319 | if new_order_book.apply_updates(order_statuses, order_diffs).is_err() { 320 | info!( 321 | "Failed to apply updates to this book (likely missing older updates). Waiting for next snapshot." 322 | ); 323 | retry = true; 324 | break; 325 | } 326 | } 327 | if !retry { 328 | self.order_book_state = Some(new_order_book); 329 | info!("Order book ready"); 330 | } 331 | } 332 | 333 | // forcibly grab current snapshot 334 | pub(crate) fn compute_snapshot(&mut self) -> Option { 335 | self.order_book_state.as_mut().map(|o| o.compute_snapshot()) 336 | } 337 | 338 | // prevent snapshotting mutiple times at the same height 339 | fn l2_snapshots(&mut self, prevent_future_snaps: bool) -> Option<(u64, L2Snapshots)> { 340 | self.order_book_state.as_mut().and_then(|o| o.l2_snapshots(prevent_future_snaps)) 341 | } 342 | } 343 | 344 | impl OrderBookListener { 345 | fn process_update(&mut self, event: &Event, new_path: &PathBuf, event_source: EventSource) -> Result<()> { 346 | if event.kind.is_create() { 347 | info!("-- Event: {} created --", new_path.display()); 348 | self.on_file_creation(new_path.clone(), event_source)?; 349 | } 350 | // Check for `Modify` event (only if the file is already initialized) 351 | else { 352 | // If we are not tracking anything right now, we treat a file update as declaring that it has been created. 353 | // Unfortunately, we miss the update that occurs at this time step. 354 | // We go to the end of the file to read for updates after that. 355 | if self.is_reading(event_source) { 356 | self.on_file_modification(event_source)?; 357 | } else { 358 | info!("-- Event: {} modified, tracking it now --", new_path.display()); 359 | let file = self.file_mut(event_source); 360 | let mut new_file = File::open(new_path)?; 361 | new_file.seek(SeekFrom::End(0))?; 362 | *file = Some(new_file); 363 | } 364 | } 365 | Ok(()) 366 | } 367 | } 368 | 369 | impl DirectoryListener for OrderBookListener { 370 | fn is_reading(&self, event_source: EventSource) -> bool { 371 | match event_source { 372 | EventSource::Fills => self.fill_status_file.is_some(), 373 | EventSource::OrderStatuses => self.order_status_file.is_some(), 374 | EventSource::OrderDiffs => self.order_diff_file.is_some(), 375 | } 376 | } 377 | 378 | fn file_mut(&mut self, event_source: EventSource) -> &mut Option { 379 | match event_source { 380 | EventSource::Fills => &mut self.fill_status_file, 381 | EventSource::OrderStatuses => &mut self.order_status_file, 382 | EventSource::OrderDiffs => &mut self.order_diff_file, 383 | } 384 | } 385 | 386 | fn on_file_creation(&mut self, new_file: PathBuf, event_source: EventSource) -> Result<()> { 387 | if let Some(file) = self.file_mut(event_source).as_mut() { 388 | let mut buf = String::new(); 389 | file.read_to_string(&mut buf)?; 390 | if !buf.is_empty() { 391 | self.process_data(buf, event_source)?; 392 | } 393 | } 394 | *self.file_mut(event_source) = Some(File::open(new_file)?); 395 | Ok(()) 396 | } 397 | 398 | fn process_data(&mut self, data: String, event_source: EventSource) -> Result<()> { 399 | let total_len = data.len(); 400 | let lines = data.lines(); 401 | for line in lines { 402 | if line.is_empty() { 403 | continue; 404 | } 405 | let res = match event_source { 406 | EventSource::Fills => serde_json::from_str::>(line).map(|batch| { 407 | let height = batch.block_number(); 408 | (height, EventBatch::Fills(batch)) 409 | }), 410 | EventSource::OrderStatuses => serde_json::from_str(line) 411 | .map(|batch: Batch| (batch.block_number(), EventBatch::Orders(batch))), 412 | EventSource::OrderDiffs => serde_json::from_str(line) 413 | .map(|batch: Batch| (batch.block_number(), EventBatch::BookDiffs(batch))), 414 | }; 415 | let (height, event_batch) = match res { 416 | Ok(data) => data, 417 | Err(err) => { 418 | // if we run into a serialization error (hitting EOF), just return to last line. 419 | error!( 420 | "{event_source} serialization error {err}, height: {:?}, line: {:?}", 421 | self.order_book_state.as_ref().map(OrderBookState::height), 422 | &line[..100], 423 | ); 424 | #[allow(clippy::unwrap_used)] 425 | let total_len: i64 = total_len.try_into().unwrap(); 426 | self.file_mut(event_source).as_mut().map(|f| f.seek_relative(-total_len)); 427 | break; 428 | } 429 | }; 430 | if height % 100 == 0 { 431 | info!("{event_source} block: {height}"); 432 | } 433 | if let Err(err) = self.receive_batch(event_batch) { 434 | self.order_book_state = None; 435 | return Err(err); 436 | } 437 | } 438 | let snapshot = self.l2_snapshots(true); 439 | if let Some(snapshot) = snapshot { 440 | if let Some(tx) = &self.internal_message_tx { 441 | let tx = tx.clone(); 442 | tokio::spawn(async move { 443 | let snapshot = Arc::new(InternalMessage::Snapshot { l2_snapshots: snapshot.1, time: snapshot.0 }); 444 | let _unused = tx.send(snapshot); 445 | }); 446 | } 447 | } 448 | Ok(()) 449 | } 450 | } 451 | 452 | pub(crate) struct L2Snapshots(HashMap>>); 453 | 454 | impl L2Snapshots { 455 | pub(crate) const fn as_ref(&self) -> &HashMap>> { 456 | &self.0 457 | } 458 | } 459 | 460 | pub(crate) struct TimedSnapshots { 461 | pub(crate) time: u64, 462 | pub(crate) height: u64, 463 | pub(crate) snapshot: Snapshots, 464 | } 465 | 466 | // Messages sent from node data listener to websocket dispatch to support streaming 467 | pub(crate) enum InternalMessage { 468 | Snapshot { l2_snapshots: L2Snapshots, time: u64 }, 469 | Fills { batch: Batch }, 470 | L4BookUpdates { diff_batch: Batch, status_batch: Batch }, 471 | } 472 | 473 | #[derive(Eq, PartialEq, Hash)] 474 | pub(crate) struct L2SnapshotParams { 475 | n_sig_figs: Option, 476 | mantissa: Option, 477 | } 478 | --------------------------------------------------------------------------------