├── .gitignore ├── src ├── lib.rs ├── cache.rs ├── config.rs ├── circuit_breaker.rs ├── polymarket.rs ├── main.rs ├── position_tracker.rs ├── kalshi.rs ├── discovery.rs ├── execution.rs └── polymarket_clob.rs ├── .github ├── TOPICS.md └── REPOSITORY_DESCRIPTION.md ├── Cargo.toml ├── scripts └── build_sports_cache.py └── README.md /.gitignore: -------------------------------------------------------------------------------- 1 | target/ 2 | .env 3 | .env.keys 4 | kalshi_private_key.txt 5 | claude.md 6 | .DS_Store 7 | positions.json 8 | .discovery_cache.json 9 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | //! Prediction Market Arbitrage Trading System 2 | //! 3 | //! A high-performance, production-ready arbitrage trading system for cross-platform 4 | //! prediction markets with real-time price monitoring and execution. 5 | 6 | pub mod cache; 7 | pub mod circuit_breaker; 8 | pub mod config; 9 | pub mod discovery; 10 | pub mod execution; 11 | pub mod kalshi; 12 | pub mod polymarket; 13 | pub mod polymarket_clob; 14 | pub mod position_tracker; 15 | pub mod types; -------------------------------------------------------------------------------- /.github/TOPICS.md: -------------------------------------------------------------------------------- 1 | # GitHub Topics for SEO 2 | 3 | When setting up this repository on GitHub, add these topics to improve discoverability: 4 | 5 | ## Primary Topics 6 | - polymarket 7 | - kalshi 8 | - arbitrage 9 | - trading-bot 10 | - prediction-markets 11 | - rust 12 | 13 | ## Secondary Topics 14 | - polymarket-arbitrage 15 | - kalshi-arbitrage 16 | - cross-platform-trading 17 | - market-making 18 | - sports-betting 19 | - websocket 20 | - async-rust 21 | - trading-algorithms 22 | - financial-engineering 23 | - algorithmic-trading 24 | 25 | ## Search Terms 26 | These topics will help users find this repository when searching for: 27 | - "kalshi-poly arbitrage bot" 28 | - "poly-poly arbitrage bot" 29 | - "kalshi-kalshi arbitrage bot" 30 | - "polymarket arbitrage bot" 31 | - "polymarket-kalshi arbitrage bot" 32 | - "kalshi arbitrage" 33 | - "prediction market trading bot" 34 | - "cross-platform arbitrage" 35 | 36 | -------------------------------------------------------------------------------- /.github/REPOSITORY_DESCRIPTION.md: -------------------------------------------------------------------------------- 1 | # GitHub Repository Description 2 | 3 | Use this description when creating/updating the GitHub repository: 4 | 5 | ``` 6 | Kalshi-Poly / Poly-Poly / Kalshi-Kalshi arbitrage bot - Automated cross-platform prediction market trading system. High-performance Rust implementation for risk-free arbitrage opportunities. 7 | ``` 8 | 9 | ## Short Description (for GitHub) 10 | ``` 11 | Kalshi-Poly / Poly-Poly / Kalshi-Kalshi arbitrage bot - Automated prediction market trading 12 | ``` 13 | 14 | ## Full Description 15 | ``` 16 | Kalshi-Poly / Poly-Poly / Kalshi-Kalshi arbitrage bot for automated cross-platform trading. A high-performance, production-ready arbitrage trading system that monitors price discrepancies between Kalshi and Polymarket, executing risk-free arbitrage opportunities in real-time with sub-millisecond latency. 17 | 18 | Features: 19 | - Real-time WebSocket price monitoring 20 | - Cross-platform market matching (Kalshi-Poly) 21 | - Same-platform arbitrage (Poly-Poly, Kalshi-Kalshi) 22 | - Concurrent order execution 23 | - Risk management with circuit breakers 24 | - Lock-free atomic orderbook cache 25 | - SIMD-accelerated arbitrage detection 26 | 27 | Supports: Kalshi-Poly, Poly-Poly, and Kalshi-Kalshi arbitrage opportunities. 28 | ``` 29 | 30 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "prediction-market-arbitrage" 3 | version = "2.0.0" 4 | edition = "2021" 5 | description = "Kalshi-Poly / Poly-Poly / Kalshi-Kalshi arbitrage bot - automated cross-platform prediction market trading system" 6 | keywords = ["polymarket", "kalshi", "arbitrage", "trading-bot", "prediction-markets", "kalshi-poly", "poly-poly", "kalshi-kalshi", "polymarket-arbitrage", "kalshi-arbitrage", "cross-platform-trading"] 7 | authors = ["teraus"] 8 | license = "MIT OR Apache-2.0" 9 | repository = "https://github.com/terauss/prediction-market-arbitrage" 10 | 11 | [dependencies] 12 | anyhow = "1.0" 13 | base64 = "0.22" 14 | chrono = "0.4" 15 | dotenvy = "0.15" 16 | ethers = { version = "2.0", features = ["legacy"] } 17 | futures-util = "0.3" 18 | hmac = "0.12" 19 | rand = "0.8" 20 | reqwest = { version = "0.11", features = ["json", "blocking"] } 21 | rsa = { version = "0.9", features = ["sha2"] } 22 | pkcs1 = { version = "0.7", features = ["pem"] } 23 | serde = { version = "1.0", features = ["derive", "rc"] } 24 | serde_json = "1.0" 25 | sha2 = "0.10" 26 | tokio = { version = "1.0", features = ["full"] } 27 | tokio-tungstenite = { version = "0.21", features = ["native-tls"] } 28 | tracing = "0.1" 29 | tracing-subscriber = { version = "0.3", features = ["env-filter"] } 30 | rustc-hash = "2.0" 31 | tiny-keccak = { version = "2.0", features = ["keccak"] } 32 | governor = "0.6" 33 | nonzero_ext = "0.3" 34 | arrayvec = "0.7" 35 | wide = "0.7" 36 | 37 | [dev-dependencies] 38 | criterion = { version = "0.5", features = ["html_reports"] } 39 | hex = "0.4" 40 | 41 | [profile.release] 42 | opt-level = 3 43 | lto = true 44 | codegen-units = 1 -------------------------------------------------------------------------------- /scripts/build_sports_cache.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """ 3 | Sports neg_risk cache warmer (async version). 4 | """ 5 | 6 | import asyncio 7 | import json 8 | from pathlib import Path 9 | import aiohttp 10 | 11 | GAMMA_API = "https://gamma-api.polymarket.com" 12 | GAMES_TAG = "100639" 13 | CACHE_FILE = ".clob_market_cache.json" 14 | CONCURRENT_REQUESTS = 5 # tune based on rate limits 15 | 16 | 17 | async def fetch_page(session: aiohttp.ClientSession, offset: int) -> list: 18 | async with session.get( 19 | f"{GAMMA_API}/events", 20 | params={"tag_id": GAMES_TAG, "active": "true", "closed": "false", "limit": 100, "offset": offset}, 21 | timeout=aiohttp.ClientTimeout(total=30) 22 | ) as resp: 23 | return await resp.json() 24 | 25 | 26 | async def main(): 27 | neg_risk_map = {} 28 | 29 | async with aiohttp.ClientSession() as session: 30 | # First request to estimate total pages 31 | first_page = await fetch_page(session, 0) 32 | if not first_page: 33 | return 34 | 35 | # Process first page 36 | all_events = first_page 37 | 38 | # Fetch remaining pages concurrently 39 | if len(first_page) == 100: 40 | # Optimistically fetch next several pages in parallel 41 | offsets = list(range(100, 2000, 100)) # adjust max based on typical event count 42 | 43 | sem = asyncio.Semaphore(CONCURRENT_REQUESTS) 44 | 45 | async def bounded_fetch(offset): 46 | async with sem: 47 | return await fetch_page(session, offset) 48 | 49 | tasks = [bounded_fetch(o) for o in offsets] 50 | results = await asyncio.gather(*tasks, return_exceptions=True) 51 | 52 | for result in results: 53 | if isinstance(result, list) and result: 54 | all_events.extend(result) 55 | elif isinstance(result, list) and not result: 56 | break # empty page, we're done 57 | 58 | # Process all events 59 | for event in all_events: 60 | neg_risk = event.get("negRisk", False) 61 | for market in event.get("markets", []): 62 | token_ids = json.loads(market.get("clobTokenIds", "[]")) 63 | for tid in token_ids: 64 | if tid: 65 | neg_risk_map[tid] = neg_risk 66 | 67 | Path(CACHE_FILE).write_text(json.dumps(neg_risk_map)) 68 | 69 | print(f"Cached {len(neg_risk_map)} tokens") 70 | 71 | 72 | if __name__ == "__main__": 73 | asyncio.run(main()) -------------------------------------------------------------------------------- /src/cache.rs: -------------------------------------------------------------------------------- 1 | //! Team code mapping cache for cross-platform market matching. 2 | //! 3 | //! This module provides bidirectional mapping between Polymarket and Kalshi 4 | //! team codes to enable accurate market discovery across platforms. 5 | 6 | use anyhow::Result; 7 | use serde::{Deserialize, Serialize}; 8 | use std::collections::HashMap; 9 | use std::path::Path; 10 | 11 | const CACHE_FILE: &str = "kalshi_team_cache.json"; 12 | 13 | /// Team code cache - bidirectional mapping between Poly and Kalshi team codes 14 | #[derive(Debug, Clone, Serialize, Deserialize, Default)] 15 | pub struct TeamCache { 16 | /// Forward: "league:poly_code" -> "kalshi_code" 17 | #[serde(serialize_with = "serialize_boxed_map", deserialize_with = "deserialize_boxed_map")] 18 | forward: HashMap, Box>, 19 | /// Reverse: "league:kalshi_code" -> "poly_code" 20 | #[serde(skip)] 21 | reverse: HashMap, Box>, 22 | } 23 | 24 | fn serialize_boxed_map(map: &HashMap, Box>, serializer: S) -> Result 25 | where 26 | S: serde::Serializer, 27 | { 28 | use serde::ser::SerializeMap; 29 | let mut ser_map = serializer.serialize_map(Some(map.len()))?; 30 | for (k, v) in map { 31 | ser_map.serialize_entry(k.as_ref(), v.as_ref())?; 32 | } 33 | ser_map.end() 34 | } 35 | 36 | fn deserialize_boxed_map<'de, D>(deserializer: D) -> Result, Box>, D::Error> 37 | where 38 | D: serde::Deserializer<'de>, 39 | { 40 | let string_map: HashMap = HashMap::deserialize(deserializer)?; 41 | Ok(string_map 42 | .into_iter() 43 | .map(|(k, v)| (k.into_boxed_str(), v.into_boxed_str())) 44 | .collect()) 45 | } 46 | 47 | #[allow(dead_code)] 48 | impl TeamCache { 49 | /// Load cache from JSON file 50 | pub fn load() -> Self { 51 | Self::load_from(CACHE_FILE) 52 | } 53 | 54 | /// Load from specific path 55 | pub fn load_from>(path: P) -> Self { 56 | let mut cache = match std::fs::read_to_string(path.as_ref()) { 57 | Ok(contents) => { 58 | serde_json::from_str(&contents).unwrap_or_else(|e| { 59 | tracing::warn!("Failed to parse team cache: {}", e); 60 | Self::default() 61 | }) 62 | } 63 | Err(_) => { 64 | tracing::info!("No team cache found at {:?}, starting empty", path.as_ref()); 65 | Self::default() 66 | } 67 | }; 68 | cache.rebuild_reverse(); 69 | cache 70 | } 71 | 72 | /// Save cache to JSON file 73 | pub fn save(&self) -> Result<()> { 74 | self.save_to(CACHE_FILE) 75 | } 76 | 77 | /// Save to specific path 78 | pub fn save_to>(&self, path: P) -> Result<()> { 79 | let json = serde_json::to_string_pretty(&self)?; 80 | std::fs::write(path, json)?; 81 | Ok(()) 82 | } 83 | 84 | /// Get Kalshi code for a Polymarket team code 85 | /// e.g., ("epl", "che") -> "cfc" 86 | pub fn poly_to_kalshi(&self, league: &str, poly_code: &str) -> Option { 87 | let mut key_buf = String::with_capacity(league.len() + 1 + poly_code.len()); 88 | key_buf.push_str(&league.to_ascii_lowercase()); 89 | key_buf.push(':'); 90 | key_buf.push_str(&poly_code.to_ascii_lowercase()); 91 | self.forward.get(key_buf.as_str()).map(|s| s.to_string()) 92 | } 93 | 94 | /// Get Polymarket code for a Kalshi team code (reverse lookup) 95 | /// e.g., ("epl", "cfc") -> "che" 96 | pub fn kalshi_to_poly(&self, league: &str, kalshi_code: &str) -> Option { 97 | let mut key_buf = String::with_capacity(league.len() + 1 + kalshi_code.len()); 98 | key_buf.push_str(&league.to_ascii_lowercase()); 99 | key_buf.push(':'); 100 | key_buf.push_str(&kalshi_code.to_ascii_lowercase()); 101 | 102 | self.reverse 103 | .get(key_buf.as_str()) 104 | .map(|s| s.to_string()) 105 | .or_else(|| Some(kalshi_code.to_ascii_lowercase())) 106 | } 107 | 108 | /// Add or update a mapping 109 | pub fn insert(&mut self, league: &str, poly_code: &str, kalshi_code: &str) { 110 | let league_lower = league.to_ascii_lowercase(); 111 | let poly_lower = poly_code.to_ascii_lowercase(); 112 | let kalshi_lower = kalshi_code.to_ascii_lowercase(); 113 | 114 | let forward_key: Box = format!("{}:{}", league_lower, poly_lower).into(); 115 | let reverse_key: Box = format!("{}:{}", league_lower, kalshi_lower).into(); 116 | 117 | self.forward.insert(forward_key, kalshi_lower.into()); 118 | self.reverse.insert(reverse_key, poly_lower.into()); 119 | } 120 | 121 | /// Number of mappings 122 | pub fn len(&self) -> usize { 123 | self.forward.len() 124 | } 125 | 126 | pub fn is_empty(&self) -> bool { 127 | self.forward.is_empty() 128 | } 129 | 130 | /// Rebuild reverse lookup map from forward mappings 131 | fn rebuild_reverse(&mut self) { 132 | self.reverse.clear(); 133 | self.reverse.reserve(self.forward.len()); 134 | for (key, kalshi_code) in &self.forward { 135 | if let Some((league, poly)) = key.split_once(':') { 136 | let reverse_key: Box = format!("{}:{}", league, kalshi_code).into(); 137 | self.reverse.insert(reverse_key, poly.into()); 138 | } 139 | } 140 | } 141 | } 142 | 143 | #[cfg(test)] 144 | mod tests { 145 | use super::*; 146 | 147 | #[test] 148 | fn test_cache_lookup() { 149 | let mut cache = TeamCache::default(); 150 | cache.insert("epl", "che", "cfc"); 151 | cache.insert("epl", "mun", "mun"); 152 | 153 | assert_eq!(cache.poly_to_kalshi("epl", "che"), Some("cfc".to_string())); 154 | assert_eq!(cache.poly_to_kalshi("epl", "CHE"), Some("cfc".to_string())); 155 | assert_eq!(cache.kalshi_to_poly("epl", "cfc"), Some("che".to_string())); 156 | } 157 | } 158 | -------------------------------------------------------------------------------- /src/config.rs: -------------------------------------------------------------------------------- 1 | //! System configuration and league mapping definitions. 2 | //! 3 | //! This module contains all configuration constants, league mappings, and 4 | //! environment variable parsing for the trading system. 5 | 6 | /// Kalshi WebSocket URL 7 | pub const KALSHI_WS_URL: &str = "wss://api.elections.kalshi.com/trade-api/ws/v2"; 8 | 9 | /// Kalshi REST API base URL 10 | pub const KALSHI_API_BASE: &str = "https://api.elections.kalshi.com/trade-api/v2"; 11 | 12 | /// Polymarket WebSocket URL 13 | pub const POLYMARKET_WS_URL: &str = "wss://ws-subscriptions-clob.polymarket.com/ws/market"; 14 | 15 | /// Gamma API base URL (Polymarket market data) 16 | pub const GAMMA_API_BASE: &str = "https://gamma-api.polymarket.com"; 17 | 18 | /// Arb threshold: alert when total cost < this (e.g., 0.995 = 0.5% profit) 19 | pub const ARB_THRESHOLD: f64 = 0.995; 20 | 21 | /// Polymarket ping interval (seconds) - keep connection alive 22 | pub const POLY_PING_INTERVAL_SECS: u64 = 30; 23 | 24 | /// Kalshi API rate limit delay (milliseconds between requests) 25 | /// Kalshi limit: 20 req/sec = 50ms minimum. We use 60ms for safety margin. 26 | pub const KALSHI_API_DELAY_MS: u64 = 60; 27 | 28 | /// WebSocket reconnect delay (seconds) 29 | pub const WS_RECONNECT_DELAY_SECS: u64 = 5; 30 | 31 | /// Which leagues to monitor (empty slice = all) 32 | pub const ENABLED_LEAGUES: &[&str] = &[]; 33 | 34 | /// Price logging enabled (set PRICE_LOGGING=1 to enable) 35 | #[allow(dead_code)] 36 | pub fn price_logging_enabled() -> bool { 37 | static CACHED: std::sync::OnceLock = std::sync::OnceLock::new(); 38 | *CACHED.get_or_init(|| { 39 | std::env::var("PRICE_LOGGING") 40 | .map(|v| v == "1" || v.to_lowercase() == "true") 41 | .unwrap_or(false) 42 | }) 43 | } 44 | 45 | /// League configuration for market discovery 46 | #[derive(Debug, Clone)] 47 | pub struct LeagueConfig { 48 | pub league_code: &'static str, 49 | pub poly_prefix: &'static str, 50 | pub kalshi_series_game: &'static str, 51 | pub kalshi_series_spread: Option<&'static str>, 52 | pub kalshi_series_total: Option<&'static str>, 53 | pub kalshi_series_btts: Option<&'static str>, 54 | } 55 | 56 | /// Get all supported leagues with their configurations 57 | pub fn get_league_configs() -> Vec { 58 | vec![ 59 | // Major European leagues (full market types) 60 | LeagueConfig { 61 | league_code: "epl", 62 | poly_prefix: "epl", 63 | kalshi_series_game: "KXEPLGAME", 64 | kalshi_series_spread: Some("KXEPLSPREAD"), 65 | kalshi_series_total: Some("KXEPLTOTAL"), 66 | kalshi_series_btts: Some("KXEPLBTTS"), 67 | }, 68 | LeagueConfig { 69 | league_code: "bundesliga", 70 | poly_prefix: "bun", 71 | kalshi_series_game: "KXBUNDESLIGAGAME", 72 | kalshi_series_spread: Some("KXBUNDESLIGASPREAD"), 73 | kalshi_series_total: Some("KXBUNDESLIGATOTAL"), 74 | kalshi_series_btts: Some("KXBUNDESLIGABTTS"), 75 | }, 76 | LeagueConfig { 77 | league_code: "laliga", 78 | poly_prefix: "lal", 79 | kalshi_series_game: "KXLALIGAGAME", 80 | kalshi_series_spread: Some("KXLALIGASPREAD"), 81 | kalshi_series_total: Some("KXLALIGATOTAL"), 82 | kalshi_series_btts: Some("KXLALIGABTTS"), 83 | }, 84 | LeagueConfig { 85 | league_code: "seriea", 86 | poly_prefix: "sea", 87 | kalshi_series_game: "KXSERIEAGAME", 88 | kalshi_series_spread: Some("KXSERIEASPREAD"), 89 | kalshi_series_total: Some("KXSERIEATOTAL"), 90 | kalshi_series_btts: Some("KXSERIEABTTS"), 91 | }, 92 | LeagueConfig { 93 | league_code: "ligue1", 94 | poly_prefix: "fl1", 95 | kalshi_series_game: "KXLIGUE1GAME", 96 | kalshi_series_spread: Some("KXLIGUE1SPREAD"), 97 | kalshi_series_total: Some("KXLIGUE1TOTAL"), 98 | kalshi_series_btts: Some("KXLIGUE1BTTS"), 99 | }, 100 | LeagueConfig { 101 | league_code: "ucl", 102 | poly_prefix: "ucl", 103 | kalshi_series_game: "KXUCLGAME", 104 | kalshi_series_spread: Some("KXUCLSPREAD"), 105 | kalshi_series_total: Some("KXUCLTOTAL"), 106 | kalshi_series_btts: Some("KXUCLBTTS"), 107 | }, 108 | // Secondary European leagues (moneyline only) 109 | LeagueConfig { 110 | league_code: "uel", 111 | poly_prefix: "uel", 112 | kalshi_series_game: "KXUELGAME", 113 | kalshi_series_spread: None, 114 | kalshi_series_total: None, 115 | kalshi_series_btts: None, 116 | }, 117 | LeagueConfig { 118 | league_code: "eflc", 119 | poly_prefix: "elc", 120 | kalshi_series_game: "KXEFLCHAMPIONSHIPGAME", 121 | kalshi_series_spread: None, 122 | kalshi_series_total: None, 123 | kalshi_series_btts: None, 124 | }, 125 | // US Sports 126 | LeagueConfig { 127 | league_code: "nba", 128 | poly_prefix: "nba", 129 | kalshi_series_game: "KXNBAGAME", 130 | kalshi_series_spread: Some("KXNBASPREAD"), 131 | kalshi_series_total: Some("KXNBATOTAL"), 132 | kalshi_series_btts: None, 133 | }, 134 | LeagueConfig { 135 | league_code: "nfl", 136 | poly_prefix: "nfl", 137 | kalshi_series_game: "KXNFLGAME", 138 | kalshi_series_spread: Some("KXNFLSPREAD"), 139 | kalshi_series_total: Some("KXNFLTOTAL"), 140 | kalshi_series_btts: None, 141 | }, 142 | LeagueConfig { 143 | league_code: "nhl", 144 | poly_prefix: "nhl", 145 | kalshi_series_game: "KXNHLGAME", 146 | kalshi_series_spread: Some("KXNHLSPREAD"), 147 | kalshi_series_total: Some("KXNHLTOTAL"), 148 | kalshi_series_btts: None, 149 | }, 150 | LeagueConfig { 151 | league_code: "mlb", 152 | poly_prefix: "mlb", 153 | kalshi_series_game: "KXMLBGAME", 154 | kalshi_series_spread: Some("KXMLBSPREAD"), 155 | kalshi_series_total: Some("KXMLBTOTAL"), 156 | kalshi_series_btts: None, 157 | }, 158 | LeagueConfig { 159 | league_code: "mls", 160 | poly_prefix: "mls", 161 | kalshi_series_game: "KXMLSGAME", 162 | kalshi_series_spread: None, 163 | kalshi_series_total: None, 164 | kalshi_series_btts: None, 165 | }, 166 | LeagueConfig { 167 | league_code: "ncaaf", 168 | poly_prefix: "cfb", 169 | kalshi_series_game: "KXNCAAFGAME", 170 | kalshi_series_spread: Some("KXNCAAFSPREAD"), 171 | kalshi_series_total: Some("KXNCAAFTOTAL"), 172 | kalshi_series_btts: None, 173 | }, 174 | ] 175 | } 176 | 177 | /// Get config for a specific league 178 | pub fn get_league_config(league: &str) -> Option { 179 | get_league_configs() 180 | .into_iter() 181 | .find(|c| c.league_code == league || c.poly_prefix == league) 182 | } -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Kalshi-Poly / Poly-Poly / Kalshi-Kalshi Arbitrage Bot 2 | 3 | **Kalshi-Poly arbitrage bot**, **Poly-Poly arbitrage bot**, and **Kalshi-Kalshi arbitrage bot** for automated cross-platform trading. A high-performance, production-ready arbitrage trading system that monitors price discrepancies between Kalshi and Polymarket, executing risk-free arbitrage opportunities in real-time with sub-millisecond latency. 4 | 5 | > 🔍 **Search Keywords**: polymarket arbitrage bot, polymarket-kalshi arbitrage bot, kalshi-poly arbitrage, poly-poly arbitrage, kalshi-kalshi arbitrage, kalshi arbitrage, prediction market arbitrage, cross-platform trading bot 6 | 7 | ## Overview 8 | 9 | This **Kalshi-Poly / Poly-Poly / Kalshi-Kalshi arbitrage bot** identifies and executes arbitrage opportunities across: 10 | 11 | - **Kalshi-Poly markets** (cross-platform arbitrage between Kalshi and Polymarket) 12 | - **Poly-Poly markets** (same-platform arbitrage on Polymarket) 13 | - **Kalshi-Kalshi markets** (same-platform arbitrage on Kalshi) 14 | 15 | The bot takes both sides of a market when YES and NO prices add up to less than $1.00, guaranteeing a risk-free profit at market expiry. 16 | 17 | ### How It Works 18 | 19 | **Example Opportunity:** 20 | - YES = $0.40, NO = $0.58 21 | - Total cost = $0.98 22 | - At expiry: YES = $1.00 and NO = $0.00 (or vice versa) 23 | - **Result: 2.04% risk-free return** 24 | 25 | ### Market Insights 26 | 27 | When observing large traders like PN1 finding significant size in these opportunities, the initial assumption was that opportunities would be extremely fleeting with intense competition. However, the reality is quite different: 28 | 29 | - **Opportunities are persistent**: While concurrent dislocations aren't frequent, when they do occur, they persist long enough to execute manually 30 | - **Large traders use limit orders**: Whales typically fill positions via limit orders over extended periods, as odds don't fluctuate significantly before game time 31 | - **Manual execution is viable**: Opportunities remain available long enough for manual intervention if needed 32 | 33 | ### System Workflow 34 | 35 | The repository implements the following workflow: 36 | 37 | 1. **Market Scanning**: Scans sports markets that expire within the next couple of days 38 | 2. **Market Matching**: Matches Kalshi-Polymarket markets using: 39 | - Cached mapping of team names between platforms 40 | - Kalshi-Polymarket event slug building conventions 41 | 3. **Real-time Monitoring**: Subscribes to orderbook delta WebSockets to detect instances where YES + NO can be purchased for less than $1.00 42 | 4. **Order Execution**: Executes trades concurrently on both platforms 43 | 5. **Risk Management**: Includes position management and circuit breakers (note: not extensively battle-tested in production) 44 | 45 | ### Useful Components 46 | 47 | Beyond the complete arbitrage system, you may find these components particularly useful: 48 | 49 | - **Cross-platform market mapping**: The team code mapping system for matching markets across Kalshi and Polymarket 50 | - **Rust CLOB client**: A Rust rewrite of Polymarket's Python `py-clob-client` (focused on order submission only) 51 | 52 | ## Quick Start 53 | 54 | ### 1. Install Dependencies 55 | 56 | ```bash 57 | # Rust 1.75+ 58 | curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh 59 | 60 | # Build 61 | cargo build --release 62 | ``` 63 | 64 | ### 2. Set Up Credentials 65 | 66 | Create a `.env` file: 67 | 68 | ```bash 69 | # === KALSHI CREDENTIALS === 70 | KALSHI_API_KEY_ID=your_kalshi_api_key_id 71 | KALSHI_PRIVATE_KEY_PATH=/path/to/kalshi_private_key.pem 72 | 73 | # === POLYMARKET CREDENTIALS === 74 | POLY_PRIVATE_KEY=0xYOUR_WALLET_PRIVATE_KEY 75 | POLY_FUNDER=0xYOUR_WALLET_ADDRESS 76 | 77 | # === SYSTEM CONFIGURATION === 78 | DRY_RUN=1 79 | RUST_LOG=info 80 | ``` 81 | 82 | ### 3. Run 83 | 84 | ```bash 85 | # Dry run (paper trading) 86 | dotenvx run -- cargo run --release 87 | 88 | # Live execution 89 | DRY_RUN=0 dotenvx run -- cargo run --release 90 | ``` 91 | 92 | --- 93 | 94 | ## Environment Variables 95 | 96 | ### Required 97 | 98 | | Variable | Description | 99 | | ------------------------- | ----------------------------------------------------------- | 100 | | `KALSHI_API_KEY_ID` | Your Kalshi API key ID | 101 | | `KALSHI_PRIVATE_KEY_PATH` | Path to RSA private key (PEM format) for Kalshi API signing | 102 | | `POLY_PRIVATE_KEY` | Ethereum private key (with 0x prefix) for Polymarket wallet | 103 | | `POLY_FUNDER` | Your Polymarket wallet address (with 0x prefix) | 104 | 105 | ### System Configuration 106 | 107 | | Variable | Default | Description | 108 | | ----------------- | ------- | ----------------------------------------------------- | 109 | | `DRY_RUN` | `1` | `1` = paper trading (no orders), `0` = live execution | 110 | | `RUST_LOG` | `info` | Log level: `error`, `warn`, `info`, `debug`, `trace` | 111 | | `FORCE_DISCOVERY` | `0` | `1` = re-fetch market mappings (ignore cache) | 112 | | `PRICE_LOGGING` | `0` | `1` = verbose price update logging | 113 | 114 | ### Test Mode 115 | 116 | | Variable | Default | Description | 117 | | --------------- | -------------------- | ---------------------------------------------------------------------------------------------- | 118 | | `TEST_ARB` | `0` | `1` = inject synthetic arb opportunity for testing | 119 | | `TEST_ARB_TYPE` | `poly_yes_kalshi_no` | Arb type: `poly_yes_kalshi_no`, `kalshi_yes_poly_no`, `poly_same_market`, `kalshi_same_market` | 120 | 121 | ### Circuit Breaker 122 | 123 | | Variable | Default | Description | 124 | | ---------------------------- | ------- | ------------------------------------------- | 125 | | `CB_ENABLED` | `true` | Enable/disable circuit breaker | 126 | | `CB_MAX_POSITION_PER_MARKET` | `100` | Max contracts per market | 127 | | `CB_MAX_TOTAL_POSITION` | `500` | Max total contracts across all markets | 128 | | `CB_MAX_DAILY_LOSS` | `5000` | Max daily loss in cents before halt | 129 | | `CB_MAX_CONSECUTIVE_ERRORS` | `5` | Consecutive errors before halt | 130 | | `CB_COOLDOWN_SECS` | `60` | Cooldown period after circuit breaker trips | 131 | 132 | --- 133 | 134 | ## Obtaining Credentials 135 | 136 | ### Kalshi 137 | 138 | 1. Log in to [Kalshi](https://kalshi.com) 139 | 2. Go to **Settings → API Keys** 140 | 3. Create a new API key with trading permissions 141 | 4. Download the private key (PEM file) 142 | 5. Note the API Key ID 143 | 144 | ### Polymarket 145 | 146 | 1. Create or import an Ethereum wallet (MetaMask, etc.) 147 | 2. Export the private key (include `0x` prefix) 148 | 3. Fund your wallet on Polygon network with USDC 149 | 4. The wallet address is your `POLY_FUNDER` 150 | 151 | --- 152 | 153 | ## Usage Examples 154 | 155 | ### Paper Trading (Development) 156 | 157 | ```bash 158 | # Full logging, dry run 159 | RUST_LOG=debug DRY_RUN=1 dotenvx run -- cargo run --release 160 | ``` 161 | 162 | ### Test Arbitrage Execution 163 | 164 | ```bash 165 | # Inject synthetic arb to test execution path 166 | TEST_ARB=1 DRY_RUN=0 dotenvx run -- cargo run --release 167 | ``` 168 | 169 | ### Production 170 | 171 | ```bash 172 | # Live trading with circuit breaker 173 | DRY_RUN=0 CB_MAX_DAILY_LOSS=10000 dotenvx run -- cargo run --release 174 | ``` 175 | 176 | ### Force Market Re-Discovery 177 | 178 | ```bash 179 | # Clear cache and re-fetch all market mappings 180 | FORCE_DISCOVERY=1 dotenvx run -- cargo run --release 181 | ``` 182 | 183 | --- 184 | 185 | ## How It Works 186 | 187 | ### Arbitrage Mechanics 188 | 189 | In prediction markets, the fundamental property holds: **YES + NO = $1.00** (guaranteed). 190 | 191 | This **Polymarket arbitrage bot** and **Polymarket-Kalshi arbitrage bot** exploits this property by detecting when: 192 | 193 | ``` 194 | Best YES ask (Platform A) + Best NO ask (Platform B) < $1.00 195 | ``` 196 | 197 | **Example Scenario (Kalshi-Poly Arbitrage):** 198 | 199 | ``` 200 | Kalshi YES ask: 42¢ 201 | Polymarket NO ask: 56¢ 202 | Total cost: 98¢ 203 | Guaranteed payout: 100¢ 204 | Net profit: 2¢ per contract (2.04% return) 205 | ``` 206 | 207 | The bot automatically executes both legs simultaneously, locking in the risk-free profit. 208 | 209 | ### Arbitrage Opportunity Types 210 | 211 | This **Kalshi-Poly / Poly-Poly / Kalshi-Kalshi arbitrage bot** supports four types of arbitrage opportunities: 212 | 213 | | Type | Execution Strategy | Frequency | Description | 214 | | -------------------- | ------------------------------------------- | --------- | ----------- | 215 | | `poly_yes_kalshi_no` | Buy Polymarket YES + Buy Kalshi NO | Common | **Kalshi-Poly**: Cross-platform arbitrage | 216 | | `kalshi_yes_poly_no` | Buy Kalshi YES + Buy Polymarket NO | Common | **Kalshi-Poly**: Cross-platform arbitrage | 217 | | `poly_only` | Buy Polymarket YES + Buy Polymarket NO | Rare | **Poly-Poly**: Same-platform arbitrage | 218 | | `kalshi_only` | Buy Kalshi YES + Buy Kalshi NO | Rare | **Kalshi-Kalshi**: Same-platform arbitrage | 219 | 220 | ### Fee Structure 221 | 222 | - **Kalshi**: Trading fees calculated as `ceil(0.07 × contracts × price × (1-price))` - automatically factored into arbitrage detection 223 | - **Polymarket**: Zero trading fees on all orders 224 | 225 | --- 226 | 227 | ## Architecture 228 | 229 | This **Kalshi-Poly / Poly-Poly / Kalshi-Kalshi arbitrage bot** is built with a modular, high-performance architecture optimized for low-latency execution: 230 | 231 | ``` 232 | src/ 233 | ├── main.rs # Application entry point and WebSocket orchestration 234 | ├── types.rs # Core type definitions and market state management 235 | ├── execution.rs # Concurrent order execution engine with position reconciliation 236 | ├── position_tracker.rs # Channel-based position tracking and P&L calculation 237 | ├── circuit_breaker.rs # Risk management with configurable limits and auto-halt 238 | ├── discovery.rs # Intelligent market discovery and matching system 239 | ├── cache.rs # Team code mapping cache for cross-platform matching 240 | ├── kalshi.rs # Kalshi REST API and WebSocket client 241 | ├── polymarket.rs # Polymarket WebSocket client and market data 242 | ├── polymarket_clob.rs # Polymarket CLOB order execution client 243 | └── config.rs # League configurations and system thresholds 244 | ``` 245 | 246 | ### Key Features 247 | 248 | - **Lock-free orderbook cache** using atomic operations for zero-copy updates 249 | - **SIMD-accelerated arbitrage detection** for sub-millisecond latency 250 | - **Concurrent order execution** with automatic position reconciliation 251 | - **Circuit breaker protection** with configurable risk limits 252 | - **Intelligent market discovery** with caching and incremental updates 253 | 254 | --- 255 | 256 | ## Development 257 | 258 | ### Run Tests 259 | 260 | ```bash 261 | cargo test 262 | ``` 263 | 264 | ### Enable Profiling 265 | 266 | ```bash 267 | cargo build --release --features profiling 268 | ``` 269 | 270 | ### Benchmarks 271 | 272 | ```bash 273 | cargo bench 274 | ``` 275 | 276 | --- 277 | 278 | ## Project Status 279 | 280 | ### ✅ Completed Features 281 | 282 | - [x] Kalshi REST API and WebSocket client 283 | - [x] Polymarket REST API and WebSocket client 284 | - [x] Lock-free atomic orderbook cache 285 | - [x] SIMD-accelerated arbitrage detection 286 | - [x] Concurrent multi-leg order execution 287 | - [x] Real-time position and P&L tracking 288 | - [x] Circuit breaker with configurable risk limits 289 | - [x] Intelligent market discovery with caching 290 | - [x] Automatic exposure management for mismatched fills 291 | 292 | ### 🚧 Future Enhancements 293 | 294 | - [ ] Web-based risk limit configuration UI 295 | - [ ] Multi-account support for portfolio management 296 | - [ ] Advanced order routing strategies 297 | - [ ] Historical performance analytics dashboard 298 | 299 | --- 300 | 301 | ## Topics & Keywords 302 | 303 | This **Kalshi-Poly / Poly-Poly / Kalshi-Kalshi arbitrage bot** repository covers: 304 | 305 | - **Kalshi-Poly arbitrage** - Cross-platform arbitrage between Kalshi and Polymarket 306 | - **Poly-Poly arbitrage** - Same-platform arbitrage on Polymarket markets 307 | - **Kalshi-Kalshi arbitrage** - Same-platform arbitrage on Kalshi markets 308 | - **Polymarket arbitrage** - Automated trading on Polymarket prediction markets 309 | - **Kalshi arbitrage** - Automated trading on Kalshi prediction markets 310 | - **Cross-platform arbitrage** - Exploiting price differences between Polymarket and Kalshi 311 | - **Prediction market trading** - Automated trading bot for prediction markets 312 | - **Arbitrage trading bot** - High-frequency arbitrage detection and execution 313 | - **Market making bot** - Risk-free market making via arbitrage 314 | - **Sports betting arbitrage** - Arbitrage opportunities in sports prediction markets 315 | - **Rust trading bot** - High-performance trading system written in Rust 316 | 317 | ### Related Technologies 318 | 319 | - Rust async/await for high-performance concurrent execution 320 | - WebSocket real-time price feeds (Kalshi & Polymarket) 321 | - REST API integration (Kalshi & Polymarket CLOB) 322 | - Atomic lock-free data structures for orderbook management 323 | - SIMD-accelerated arbitrage detection algorithms 324 | 325 | --- 326 | 327 | ## Contributing 328 | 329 | Contributions are welcome! This **Kalshi-Poly / Poly-Poly / Kalshi-Kalshi arbitrage bot** is open source and designed to help the prediction market trading community. 330 | 331 | ## License 332 | 333 | This project is licensed under either of 334 | 335 | - Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0) 336 | - MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT) 337 | 338 | at your option. 339 | -------------------------------------------------------------------------------- /src/circuit_breaker.rs: -------------------------------------------------------------------------------- 1 | //! Risk management and circuit breaker system. 2 | //! 3 | //! This module provides configurable risk limits, position tracking, and 4 | //! automatic trading halt mechanisms to protect against excessive losses. 5 | 6 | use std::sync::atomic::{AtomicBool, AtomicI64, Ordering}; 7 | use std::time::{Duration, Instant}; 8 | use tokio::sync::RwLock; 9 | use tracing::{error, warn, info}; 10 | 11 | /// Circuit breaker configuration from environment 12 | #[derive(Debug, Clone)] 13 | pub struct CircuitBreakerConfig { 14 | /// Maximum position size per market (in contracts) 15 | pub max_position_per_market: i64, 16 | 17 | /// Maximum total position across all markets (in contracts) 18 | pub max_total_position: i64, 19 | 20 | /// Maximum daily loss (in dollars) before halting 21 | pub max_daily_loss: f64, 22 | 23 | /// Maximum number of consecutive errors before halting 24 | pub max_consecutive_errors: u32, 25 | 26 | /// Cooldown period after a trip (seconds) 27 | pub cooldown_secs: u64, 28 | 29 | /// Whether circuit breakers are enabled 30 | pub enabled: bool, 31 | } 32 | 33 | impl CircuitBreakerConfig { 34 | pub fn from_env() -> Self { 35 | Self { 36 | max_position_per_market: std::env::var("CB_MAX_POSITION_PER_MARKET") 37 | .ok() 38 | .and_then(|v| v.parse().ok()) 39 | .unwrap_or(50000), 40 | 41 | max_total_position: std::env::var("CB_MAX_TOTAL_POSITION") 42 | .ok() 43 | .and_then(|v| v.parse().ok()) 44 | .unwrap_or(100000), 45 | 46 | max_daily_loss: std::env::var("CB_MAX_DAILY_LOSS") 47 | .ok() 48 | .and_then(|v| v.parse().ok()) 49 | .unwrap_or(500.0), 50 | 51 | max_consecutive_errors: std::env::var("CB_MAX_CONSECUTIVE_ERRORS") 52 | .ok() 53 | .and_then(|v| v.parse().ok()) 54 | .unwrap_or(5), 55 | 56 | cooldown_secs: std::env::var("CB_COOLDOWN_SECS") 57 | .ok() 58 | .and_then(|v| v.parse().ok()) 59 | .unwrap_or(300), // 5 minutes default 60 | 61 | enabled: std::env::var("CB_ENABLED") 62 | .map(|v| v == "1" || v == "true") 63 | .unwrap_or(true), // Enabled by default for safety 64 | } 65 | } 66 | } 67 | 68 | /// Reason why circuit breaker was tripped 69 | #[derive(Debug, Clone, PartialEq)] 70 | pub enum TripReason { 71 | MaxPositionPerMarket { market: String, position: i64, limit: i64 }, 72 | MaxTotalPosition { position: i64, limit: i64 }, 73 | MaxDailyLoss { loss: f64, limit: f64 }, 74 | ConsecutiveErrors { count: u32, limit: u32 }, 75 | ManualHalt, 76 | } 77 | 78 | impl std::fmt::Display for TripReason { 79 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 80 | match self { 81 | TripReason::MaxPositionPerMarket { market, position, limit } => { 82 | write!(f, "Max position per market: {} has {} contracts (limit: {})", market, position, limit) 83 | } 84 | TripReason::MaxTotalPosition { position, limit } => { 85 | write!(f, "Max total position: {} contracts (limit: {})", position, limit) 86 | } 87 | TripReason::MaxDailyLoss { loss, limit } => { 88 | write!(f, "Max daily loss: ${:.2} (limit: ${:.2})", loss, limit) 89 | } 90 | TripReason::ConsecutiveErrors { count, limit } => { 91 | write!(f, "Consecutive errors: {} (limit: {})", count, limit) 92 | } 93 | TripReason::ManualHalt => { 94 | write!(f, "Manual halt triggered") 95 | } 96 | } 97 | } 98 | } 99 | 100 | /// Position tracking for a single market 101 | #[derive(Debug, Default)] 102 | pub struct MarketPosition { 103 | pub kalshi_yes: i64, 104 | pub kalshi_no: i64, 105 | pub poly_yes: i64, 106 | pub poly_no: i64, 107 | } 108 | 109 | #[allow(dead_code)] 110 | impl MarketPosition { 111 | pub fn net_position(&self) -> i64 { 112 | // Net exposure: positive = long YES, negative = long NO 113 | (self.kalshi_yes + self.poly_yes) - (self.kalshi_no + self.poly_no) 114 | } 115 | 116 | pub fn total_contracts(&self) -> i64 { 117 | self.kalshi_yes + self.kalshi_no + self.poly_yes + self.poly_no 118 | } 119 | } 120 | 121 | /// Circuit breaker state 122 | pub struct CircuitBreaker { 123 | config: CircuitBreakerConfig, 124 | 125 | /// Whether trading is currently halted 126 | halted: AtomicBool, 127 | 128 | /// When the circuit breaker was tripped 129 | tripped_at: RwLock>, 130 | 131 | /// Reason for trip 132 | trip_reason: RwLock>, 133 | 134 | /// Consecutive error count 135 | consecutive_errors: AtomicI64, 136 | 137 | /// Daily P&L tracking (in cents) 138 | daily_pnl_cents: AtomicI64, 139 | 140 | /// Positions per market 141 | positions: RwLock>, 142 | } 143 | 144 | impl CircuitBreaker { 145 | pub fn new(config: CircuitBreakerConfig) -> Self { 146 | info!("[CB] Circuit breaker initialized:"); 147 | info!("[CB] Enabled: {}", config.enabled); 148 | info!("[CB] Max position per market: {} contracts", config.max_position_per_market); 149 | info!("[CB] Max total position: {} contracts", config.max_total_position); 150 | info!("[CB] Max daily loss: ${:.2}", config.max_daily_loss); 151 | info!("[CB] Max consecutive errors: {}", config.max_consecutive_errors); 152 | info!("[CB] Cooldown: {}s", config.cooldown_secs); 153 | 154 | Self { 155 | config, 156 | halted: AtomicBool::new(false), 157 | tripped_at: RwLock::new(None), 158 | trip_reason: RwLock::new(None), 159 | consecutive_errors: AtomicI64::new(0), 160 | daily_pnl_cents: AtomicI64::new(0), 161 | positions: RwLock::new(std::collections::HashMap::new()), 162 | } 163 | } 164 | 165 | /// Check if trading is allowed 166 | #[allow(dead_code)] 167 | pub fn is_trading_allowed(&self) -> bool { 168 | if !self.config.enabled { 169 | return true; 170 | } 171 | !self.halted.load(Ordering::SeqCst) 172 | } 173 | 174 | /// Check if we can execute a trade for a specific market 175 | pub async fn can_execute(&self, market_id: &str, contracts: i64) -> Result<(), TripReason> { 176 | if !self.config.enabled { 177 | return Ok(()); 178 | } 179 | 180 | if self.halted.load(Ordering::SeqCst) { 181 | let reason = self.trip_reason.read().await; 182 | return Err(reason.clone().unwrap_or(TripReason::ManualHalt)); 183 | } 184 | 185 | // Check position limits 186 | let positions = self.positions.read().await; 187 | 188 | // Per-market limit 189 | if let Some(pos) = positions.get(market_id) { 190 | let new_position = pos.total_contracts() + contracts; 191 | if new_position > self.config.max_position_per_market { 192 | return Err(TripReason::MaxPositionPerMarket { 193 | market: market_id.to_string(), 194 | position: new_position, 195 | limit: self.config.max_position_per_market, 196 | }); 197 | } 198 | } 199 | 200 | // Total position limit 201 | let total: i64 = positions.values().map(|p| p.total_contracts()).sum(); 202 | if total + contracts > self.config.max_total_position { 203 | return Err(TripReason::MaxTotalPosition { 204 | position: total + contracts, 205 | limit: self.config.max_total_position, 206 | }); 207 | } 208 | 209 | // Daily loss limit 210 | let daily_loss = -self.daily_pnl_cents.load(Ordering::SeqCst) as f64 / 100.0; 211 | if daily_loss > self.config.max_daily_loss { 212 | return Err(TripReason::MaxDailyLoss { 213 | loss: daily_loss, 214 | limit: self.config.max_daily_loss, 215 | }); 216 | } 217 | 218 | Ok(()) 219 | } 220 | 221 | /// Record a successful execution 222 | pub async fn record_success(&self, market_id: &str, kalshi_contracts: i64, poly_contracts: i64, pnl: f64) { 223 | // Reset consecutive errors 224 | self.consecutive_errors.store(0, Ordering::SeqCst); 225 | 226 | // Update P&L 227 | let pnl_cents = (pnl * 100.0) as i64; 228 | self.daily_pnl_cents.fetch_add(pnl_cents, Ordering::SeqCst); 229 | 230 | // Update positions 231 | let mut positions = self.positions.write().await; 232 | let pos = positions.entry(market_id.to_string()).or_default(); 233 | pos.kalshi_yes += kalshi_contracts; 234 | pos.poly_no += poly_contracts; 235 | } 236 | 237 | /// Record an error 238 | pub async fn record_error(&self) { 239 | let errors = self.consecutive_errors.fetch_add(1, Ordering::SeqCst) + 1; 240 | 241 | if errors >= self.config.max_consecutive_errors as i64 { 242 | self.trip(TripReason::ConsecutiveErrors { 243 | count: errors as u32, 244 | limit: self.config.max_consecutive_errors, 245 | }).await; 246 | } 247 | } 248 | 249 | /// Record P&L update (for tracking without execution) 250 | #[allow(dead_code)] 251 | pub fn record_pnl(&self, pnl: f64) { 252 | let pnl_cents = (pnl * 100.0) as i64; 253 | self.daily_pnl_cents.fetch_add(pnl_cents, Ordering::SeqCst); 254 | } 255 | 256 | /// Trip the circuit breaker 257 | pub async fn trip(&self, reason: TripReason) { 258 | if !self.config.enabled { 259 | return; 260 | } 261 | 262 | error!("🚨 CIRCUIT BREAKER TRIPPED: {}", reason); 263 | 264 | self.halted.store(true, Ordering::SeqCst); 265 | *self.tripped_at.write().await = Some(Instant::now()); 266 | *self.trip_reason.write().await = Some(reason); 267 | } 268 | 269 | /// Manually halt trading 270 | #[allow(dead_code)] 271 | pub async fn halt(&self) { 272 | warn!("[CB] Manual halt triggered"); 273 | self.trip(TripReason::ManualHalt).await; 274 | } 275 | 276 | /// Reset the circuit breaker (after cooldown or manual reset) 277 | #[allow(dead_code)] 278 | pub async fn reset(&self) { 279 | info!("[CB] Circuit breaker reset"); 280 | self.halted.store(false, Ordering::SeqCst); 281 | *self.tripped_at.write().await = None; 282 | *self.trip_reason.write().await = None; 283 | self.consecutive_errors.store(0, Ordering::SeqCst); 284 | } 285 | 286 | /// Reset daily P&L (call at midnight) 287 | #[allow(dead_code)] 288 | pub fn reset_daily_pnl(&self) { 289 | info!("[CB] Daily P&L reset"); 290 | self.daily_pnl_cents.store(0, Ordering::SeqCst); 291 | } 292 | 293 | /// Check if cooldown has elapsed and auto-reset if so 294 | #[allow(dead_code)] 295 | pub async fn check_cooldown(&self) -> bool { 296 | if !self.halted.load(Ordering::SeqCst) { 297 | return true; 298 | } 299 | 300 | let tripped_at = self.tripped_at.read().await; 301 | if let Some(tripped) = *tripped_at { 302 | if tripped.elapsed() > Duration::from_secs(self.config.cooldown_secs) { 303 | drop(tripped_at); // Release read lock before reset 304 | self.reset().await; 305 | return true; 306 | } 307 | } 308 | 309 | false 310 | } 311 | 312 | /// Get current status 313 | #[allow(dead_code)] 314 | pub async fn status(&self) -> CircuitBreakerStatus { 315 | let positions = self.positions.read().await; 316 | let total_position: i64 = positions.values().map(|p| p.total_contracts()).sum(); 317 | 318 | CircuitBreakerStatus { 319 | enabled: self.config.enabled, 320 | halted: self.halted.load(Ordering::SeqCst), 321 | trip_reason: self.trip_reason.read().await.clone(), 322 | consecutive_errors: self.consecutive_errors.load(Ordering::SeqCst) as u32, 323 | daily_pnl: self.daily_pnl_cents.load(Ordering::SeqCst) as f64 / 100.0, 324 | total_position, 325 | market_count: positions.len(), 326 | } 327 | } 328 | } 329 | 330 | #[derive(Debug, Clone)] 331 | #[allow(dead_code)] 332 | pub struct CircuitBreakerStatus { 333 | pub enabled: bool, 334 | pub halted: bool, 335 | pub trip_reason: Option, 336 | pub consecutive_errors: u32, 337 | pub daily_pnl: f64, 338 | pub total_position: i64, 339 | pub market_count: usize, 340 | } 341 | 342 | impl std::fmt::Display for CircuitBreakerStatus { 343 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 344 | if !self.enabled { 345 | return write!(f, "Circuit Breaker: DISABLED"); 346 | } 347 | 348 | if self.halted { 349 | write!(f, "Circuit Breaker: 🛑 HALTED")?; 350 | if let Some(reason) = &self.trip_reason { 351 | write!(f, " ({})", reason)?; 352 | } 353 | } else { 354 | write!(f, "Circuit Breaker: ✅ OK")?; 355 | } 356 | 357 | write!(f, " | P&L: ${:.2} | Pos: {} contracts across {} markets | Errors: {}", 358 | self.daily_pnl, self.total_position, self.market_count, self.consecutive_errors) 359 | } 360 | } 361 | 362 | #[cfg(test)] 363 | mod tests { 364 | use super::*; 365 | 366 | #[tokio::test] 367 | async fn test_circuit_breaker_position_limit() { 368 | let config = CircuitBreakerConfig { 369 | max_position_per_market: 10, 370 | max_total_position: 50, 371 | max_daily_loss: 100.0, 372 | max_consecutive_errors: 3, 373 | cooldown_secs: 60, 374 | enabled: true, 375 | }; 376 | 377 | let cb = CircuitBreaker::new(config); 378 | 379 | // Should allow initial trade 380 | assert!(cb.can_execute("market1", 5).await.is_ok()); 381 | 382 | // Record the trade 383 | cb.record_success("market1", 5, 5, 0.0).await; 384 | 385 | // Should reject trade exceeding per-market limit 386 | let result = cb.can_execute("market1", 10).await; 387 | assert!(matches!(result, Err(TripReason::MaxPositionPerMarket { .. }))); 388 | } 389 | 390 | #[tokio::test] 391 | async fn test_consecutive_errors() { 392 | let config = CircuitBreakerConfig { 393 | max_position_per_market: 100, 394 | max_total_position: 500, 395 | max_daily_loss: 100.0, 396 | max_consecutive_errors: 3, 397 | cooldown_secs: 60, 398 | enabled: true, 399 | }; 400 | 401 | let cb = CircuitBreaker::new(config); 402 | 403 | // Record errors 404 | cb.record_error().await; 405 | cb.record_error().await; 406 | assert!(cb.is_trading_allowed()); 407 | 408 | // Third error should trip 409 | cb.record_error().await; 410 | assert!(!cb.is_trading_allowed()); 411 | } 412 | } -------------------------------------------------------------------------------- /src/polymarket.rs: -------------------------------------------------------------------------------- 1 | //! Polymarket platform integration client. 2 | //! 3 | //! This module provides WebSocket client for real-time Polymarket price feeds 4 | //! and REST API client for market discovery via the Gamma API. 5 | 6 | use anyhow::{Context, Result}; 7 | use futures_util::{SinkExt, StreamExt}; 8 | use serde::{Deserialize, Serialize}; 9 | use std::sync::Arc; 10 | use std::time::Duration; 11 | use tokio::sync::mpsc; 12 | use tokio::time::{interval, Instant}; 13 | use tokio_tungstenite::{connect_async, tungstenite::Message}; 14 | use tracing::{error, info, warn}; 15 | 16 | use crate::config::{POLYMARKET_WS_URL, POLY_PING_INTERVAL_SECS, GAMMA_API_BASE}; 17 | use crate::execution::NanoClock; 18 | use crate::types::{ 19 | GlobalState, FastExecutionRequest, ArbType, PriceCents, SizeCents, 20 | parse_price, fxhash_str, 21 | }; 22 | 23 | // === WebSocket Message Types === 24 | 25 | #[derive(Deserialize, Debug)] 26 | pub struct BookSnapshot { 27 | pub asset_id: String, 28 | #[allow(dead_code)] 29 | pub bids: Vec, 30 | pub asks: Vec, 31 | } 32 | 33 | #[derive(Deserialize, Debug)] 34 | pub struct PriceLevel { 35 | pub price: String, 36 | pub size: String, 37 | } 38 | 39 | #[derive(Deserialize, Debug)] 40 | pub struct PriceChangeEvent { 41 | pub event_type: Option, 42 | #[serde(default)] 43 | pub price_changes: Option>, 44 | } 45 | 46 | #[derive(Deserialize, Debug)] 47 | pub struct PriceChangeItem { 48 | pub asset_id: String, 49 | pub price: Option, 50 | pub side: Option, 51 | } 52 | 53 | #[derive(Serialize)] 54 | struct SubscribeCmd { 55 | assets_ids: Vec, 56 | #[serde(rename = "type")] 57 | sub_type: &'static str, 58 | } 59 | 60 | // === Gamma API Client === 61 | 62 | pub struct GammaClient { 63 | http: reqwest::Client, 64 | } 65 | 66 | impl GammaClient { 67 | pub fn new() -> Self { 68 | Self { 69 | http: reqwest::Client::builder() 70 | .timeout(Duration::from_secs(10)) 71 | .build() 72 | .expect("Failed to build HTTP client"), 73 | } 74 | } 75 | 76 | /// Look up Polymarket market by slug, return (yes_token, no_token) 77 | /// Tries both the exact date and next day (timezone handling) 78 | pub async fn lookup_market(&self, slug: &str) -> Result> { 79 | // Try exact slug first 80 | if let Some(tokens) = self.try_lookup_slug(slug).await? { 81 | return Ok(Some(tokens)); 82 | } 83 | 84 | // Try with next day (Polymarket may use local time) 85 | if let Some(next_day_slug) = increment_date_in_slug(slug) { 86 | if let Some(tokens) = self.try_lookup_slug(&next_day_slug).await? { 87 | info!(" 📅 Found with next-day slug: {}", next_day_slug); 88 | return Ok(Some(tokens)); 89 | } 90 | } 91 | 92 | Ok(None) 93 | } 94 | 95 | async fn try_lookup_slug(&self, slug: &str) -> Result> { 96 | let url = format!("{}/markets?slug={}", GAMMA_API_BASE, slug); 97 | 98 | let resp = self.http.get(&url).send().await?; 99 | 100 | if !resp.status().is_success() { 101 | return Ok(None); 102 | } 103 | 104 | let markets: Vec = resp.json().await?; 105 | 106 | if markets.is_empty() { 107 | return Ok(None); 108 | } 109 | 110 | let market = &markets[0]; 111 | 112 | // Check if active and not closed 113 | if market.closed == Some(true) || market.active == Some(false) { 114 | return Ok(None); 115 | } 116 | 117 | // Parse clobTokenIds JSON array 118 | let token_ids: Vec = market.clob_token_ids 119 | .as_ref() 120 | .and_then(|s| serde_json::from_str(s).ok()) 121 | .unwrap_or_default(); 122 | 123 | if token_ids.len() >= 2 { 124 | Ok(Some((token_ids[0].clone(), token_ids[1].clone()))) 125 | } else { 126 | Ok(None) 127 | } 128 | } 129 | } 130 | 131 | #[derive(Debug, Deserialize)] 132 | struct GammaMarket { 133 | #[serde(rename = "clobTokenIds")] 134 | clob_token_ids: Option, 135 | active: Option, 136 | closed: Option, 137 | } 138 | 139 | /// Increment the date in a Polymarket slug by 1 day 140 | /// e.g., "epl-che-avl-2025-12-08" -> "epl-che-avl-2025-12-09" 141 | fn increment_date_in_slug(slug: &str) -> Option { 142 | let parts: Vec<&str> = slug.split('-').collect(); 143 | if parts.len() < 6 { 144 | return None; 145 | } 146 | 147 | let year: i32 = parts[3].parse().ok()?; 148 | let month: u32 = parts[4].parse().ok()?; 149 | let day: u32 = parts[5].parse().ok()?; 150 | 151 | // Compute next day 152 | let days_in_month = match month { 153 | 1 | 3 | 5 | 7 | 8 | 10 | 12 => 31, 154 | 4 | 6 | 9 | 11 => 30, 155 | 2 => if year % 4 == 0 && (year % 100 != 0 || year % 400 == 0) { 29 } else { 28 }, 156 | _ => 31, 157 | }; 158 | 159 | let (new_year, new_month, new_day) = if day >= days_in_month { 160 | if month == 12 { (year + 1, 1, 1) } else { (year, month + 1, 1) } 161 | } else { 162 | (year, month, day + 1) 163 | }; 164 | 165 | // Rebuild slug with owned strings 166 | let prefix = parts[..3].join("-"); 167 | let suffix = if parts.len() > 6 { format!("-{}", parts[6..].join("-")) } else { String::new() }; 168 | 169 | Some(format!("{}-{}-{:02}-{:02}{}", prefix, new_year, new_month, new_day, suffix)) 170 | } 171 | 172 | // ============================================================================= 173 | // WebSocket Runner 174 | // ============================================================================= 175 | 176 | /// Parse size from Polymarket (format: "123.45" dollars) 177 | #[inline(always)] 178 | fn parse_size(s: &str) -> SizeCents { 179 | // Parse as f64 and convert to cents 180 | s.parse::() 181 | .map(|size| (size * 100.0).round() as SizeCents) 182 | .unwrap_or(0) 183 | } 184 | 185 | /// WebSocket runner 186 | pub async fn run_ws( 187 | state: Arc, 188 | exec_tx: mpsc::Sender, 189 | threshold_cents: PriceCents, 190 | ) -> Result<()> { 191 | let tokens: Vec = state.markets.iter() 192 | .take(state.market_count()) 193 | .filter_map(|m| m.pair.as_ref()) 194 | .flat_map(|p| [p.poly_yes_token.to_string(), p.poly_no_token.to_string()]) 195 | .collect(); 196 | 197 | if tokens.is_empty() { 198 | info!("[POLY] No markets to monitor"); 199 | tokio::time::sleep(Duration::from_secs(u64::MAX)).await; 200 | return Ok(()); 201 | } 202 | 203 | let (ws_stream, _) = connect_async(POLYMARKET_WS_URL) 204 | .await 205 | .context("Failed to connect to Polymarket")?; 206 | 207 | info!("[POLY] Connected"); 208 | 209 | let (mut write, mut read) = ws_stream.split(); 210 | 211 | // Subscribe 212 | let subscribe_msg = SubscribeCmd { 213 | assets_ids: tokens.clone(), 214 | sub_type: "market", 215 | }; 216 | 217 | write.send(Message::Text(serde_json::to_string(&subscribe_msg)?)).await?; 218 | info!("[POLY] Subscribed to {} tokens", tokens.len()); 219 | 220 | let clock = NanoClock::new(); 221 | let mut ping_interval = interval(Duration::from_secs(POLY_PING_INTERVAL_SECS)); 222 | let mut last_message = Instant::now(); 223 | 224 | loop { 225 | tokio::select! { 226 | _ = ping_interval.tick() => { 227 | if let Err(e) = write.send(Message::Ping(vec![])).await { 228 | error!("[POLY] Failed to send ping: {}", e); 229 | break; 230 | } 231 | } 232 | 233 | msg = read.next() => { 234 | match msg { 235 | Some(Ok(Message::Text(text))) => { 236 | last_message = Instant::now(); 237 | 238 | // Try book snapshot first 239 | if let Ok(books) = serde_json::from_str::>(&text) { 240 | for book in &books { 241 | process_book(&state, book, &exec_tx, threshold_cents, &clock).await; 242 | } 243 | } 244 | // Try price change event 245 | else if let Ok(event) = serde_json::from_str::(&text) { 246 | if event.event_type.as_deref() == Some("price_change") { 247 | if let Some(changes) = &event.price_changes { 248 | for change in changes { 249 | process_price_change(&state, change, &exec_tx, threshold_cents, &clock).await; 250 | } 251 | } 252 | } 253 | } 254 | // Log unknown message types at trace level for debugging 255 | else { 256 | tracing::trace!("[POLY] Unknown WS message: {}...", &text[..text.len().min(100)]); 257 | } 258 | } 259 | Some(Ok(Message::Ping(data))) => { 260 | let _ = write.send(Message::Pong(data)).await; 261 | last_message = Instant::now(); 262 | } 263 | Some(Ok(Message::Pong(_))) => { 264 | last_message = Instant::now(); 265 | } 266 | Some(Ok(Message::Close(frame))) => { 267 | warn!("[POLY] Server closed: {:?}", frame); 268 | break; 269 | } 270 | Some(Err(e)) => { 271 | error!("[POLY] WebSocket error: {}", e); 272 | break; 273 | } 274 | None => { 275 | warn!("[POLY] Stream ended"); 276 | break; 277 | } 278 | _ => {} 279 | } 280 | } 281 | } 282 | 283 | if last_message.elapsed() > Duration::from_secs(120) { 284 | warn!("[POLY] Stale connection, reconnecting..."); 285 | break; 286 | } 287 | } 288 | 289 | Ok(()) 290 | } 291 | 292 | /// Process book snapshot 293 | #[inline] 294 | async fn process_book( 295 | state: &GlobalState, 296 | book: &BookSnapshot, 297 | exec_tx: &mpsc::Sender, 298 | threshold_cents: PriceCents, 299 | clock: &NanoClock, 300 | ) { 301 | let token_hash = fxhash_str(&book.asset_id); 302 | 303 | // Find best ask (lowest price) 304 | let (best_ask, ask_size) = book.asks.iter() 305 | .filter_map(|l| { 306 | let price = parse_price(&l.price); 307 | let size = parse_size(&l.size); 308 | if price > 0 { Some((price, size)) } else { None } 309 | }) 310 | .min_by_key(|(p, _)| *p) 311 | .unwrap_or((0, 0)); 312 | 313 | // Check if YES token 314 | if let Some(&market_id) = state.poly_yes_to_id.get(&token_hash) { 315 | let market = &state.markets[market_id as usize]; 316 | market.poly.update_yes(best_ask, ask_size); 317 | 318 | // Check arbs 319 | let arb_mask = market.check_arbs(threshold_cents); 320 | if arb_mask != 0 { 321 | send_arb_request(market_id, market, arb_mask, exec_tx, clock).await; 322 | } 323 | } 324 | // Check if NO token 325 | else if let Some(&market_id) = state.poly_no_to_id.get(&token_hash) { 326 | let market = &state.markets[market_id as usize]; 327 | market.poly.update_no(best_ask, ask_size); 328 | 329 | // Check arbs 330 | let arb_mask = market.check_arbs(threshold_cents); 331 | if arb_mask != 0 { 332 | send_arb_request(market_id, market, arb_mask, exec_tx, clock).await; 333 | } 334 | } 335 | } 336 | 337 | /// Process price change 338 | #[inline] 339 | async fn process_price_change( 340 | state: &GlobalState, 341 | change: &PriceChangeItem, 342 | exec_tx: &mpsc::Sender, 343 | threshold_cents: PriceCents, 344 | clock: &NanoClock, 345 | ) { 346 | // Only process ASK side updates 347 | if !matches!(change.side.as_deref(), Some("ASK" | "ask")) { 348 | return; 349 | } 350 | 351 | let Some(price_str) = &change.price else { return }; 352 | let price = parse_price(price_str); 353 | if price == 0 { return; } 354 | 355 | let token_hash = fxhash_str(&change.asset_id); 356 | 357 | // Check YES token 358 | if let Some(&market_id) = state.poly_yes_to_id.get(&token_hash) { 359 | let market = &state.markets[market_id as usize]; 360 | let (current_yes, _, current_yes_size, _) = market.poly.load(); 361 | 362 | // Only update if new price is better (lower) 363 | if price < current_yes || current_yes == 0 { 364 | // Keep existing size - it may be stale but FAK orders handle partial fills. 365 | // Size is an upper bound anyway; better to attempt arb than miss it. 366 | market.poly.update_yes(price, current_yes_size); 367 | 368 | let arb_mask = market.check_arbs(threshold_cents); 369 | if arb_mask != 0 { 370 | send_arb_request(market_id, market, arb_mask, exec_tx, clock).await; 371 | } 372 | } 373 | } 374 | // Check NO token 375 | else if let Some(&market_id) = state.poly_no_to_id.get(&token_hash) { 376 | let market = &state.markets[market_id as usize]; 377 | let (_, current_no, _, current_no_size) = market.poly.load(); 378 | 379 | if price < current_no || current_no == 0 { 380 | market.poly.update_no(price, current_no_size); 381 | 382 | let arb_mask = market.check_arbs(threshold_cents); 383 | if arb_mask != 0 { 384 | send_arb_request(market_id, market, arb_mask, exec_tx, clock).await; 385 | } 386 | } 387 | } 388 | } 389 | 390 | /// Send arb request to execution engine 391 | #[inline] 392 | async fn send_arb_request( 393 | market_id: u16, 394 | market: &crate::types::AtomicMarketState, 395 | arb_mask: u8, 396 | exec_tx: &mpsc::Sender, 397 | clock: &NanoClock, 398 | ) { 399 | let (k_yes, k_no, k_yes_size, k_no_size) = market.kalshi.load(); 400 | let (p_yes, p_no, p_yes_size, p_no_size) = market.poly.load(); 401 | 402 | // Priority order: cross-platform arbs first (more reliable) 403 | let (yes_price, no_price, yes_size, no_size, arb_type) = if arb_mask & 1 != 0 { 404 | // Poly YES + Kalshi NO 405 | (p_yes, k_no, p_yes_size, k_no_size, ArbType::PolyYesKalshiNo) 406 | } else if arb_mask & 2 != 0 { 407 | // Kalshi YES + Poly NO 408 | (k_yes, p_no, k_yes_size, p_no_size, ArbType::KalshiYesPolyNo) 409 | } else if arb_mask & 4 != 0 { 410 | // Poly only (both sides) 411 | (p_yes, p_no, p_yes_size, p_no_size, ArbType::PolyOnly) 412 | } else if arb_mask & 8 != 0 { 413 | // Kalshi only (both sides) 414 | (k_yes, k_no, k_yes_size, k_no_size, ArbType::KalshiOnly) 415 | } else { 416 | return; 417 | }; 418 | 419 | let req = FastExecutionRequest { 420 | market_id, 421 | yes_price, 422 | no_price, 423 | yes_size, 424 | no_size, 425 | arb_type, 426 | detected_ns: clock.now_ns(), 427 | }; 428 | 429 | // send! ~~ 430 | let _ = exec_tx.try_send(req); 431 | } -------------------------------------------------------------------------------- /src/main.rs: -------------------------------------------------------------------------------- 1 | //! Prediction Market Arbitrage Trading System 2 | //! 3 | //! A high-performance, production-ready arbitrage trading system for cross-platform 4 | //! prediction markets. This system monitors price discrepancies between Kalshi and 5 | //! Polymarket, executing risk-free arbitrage opportunities in real-time. 6 | //! 7 | //! ## Strategy 8 | //! 9 | //! The core arbitrage strategy exploits the fundamental property of prediction markets: 10 | //! YES + NO = $1.00 (guaranteed). Arbitrage opportunities exist when: 11 | //! 12 | //! ``` 13 | //! Best YES ask (Platform A) + Best NO ask (Platform B) < $1.00 14 | //! ``` 15 | //! 16 | //! ## Architecture 17 | //! 18 | //! - **Real-time price monitoring** via WebSocket connections to both platforms 19 | //! - **Lock-free orderbook cache** using atomic operations for zero-copy updates 20 | //! - **SIMD-accelerated arbitrage detection** for sub-millisecond latency 21 | //! - **Concurrent order execution** with automatic position reconciliation 22 | //! - **Circuit breaker protection** with configurable risk limits 23 | //! - **Market discovery system** with intelligent caching and incremental updates 24 | 25 | mod cache; 26 | mod circuit_breaker; 27 | mod config; 28 | mod discovery; 29 | mod execution; 30 | mod kalshi; 31 | mod polymarket; 32 | mod polymarket_clob; 33 | mod position_tracker; 34 | mod types; 35 | 36 | use anyhow::{Context, Result}; 37 | use std::sync::Arc; 38 | use tokio::sync::RwLock; 39 | use tracing::{error, info, warn}; 40 | 41 | use cache::TeamCache; 42 | use circuit_breaker::{CircuitBreaker, CircuitBreakerConfig}; 43 | use config::{ARB_THRESHOLD, ENABLED_LEAGUES, WS_RECONNECT_DELAY_SECS}; 44 | use discovery::DiscoveryClient; 45 | use execution::{ExecutionEngine, create_execution_channel, run_execution_loop}; 46 | use kalshi::{KalshiConfig, KalshiApiClient}; 47 | use polymarket_clob::{PolymarketAsyncClient, PreparedCreds, SharedAsyncClient}; 48 | use position_tracker::{PositionTracker, create_position_channel, position_writer_loop}; 49 | use types::{GlobalState, PriceCents}; 50 | 51 | /// Polymarket CLOB API host 52 | const POLY_CLOB_HOST: &str = "https://clob.polymarket.com"; 53 | /// Polygon chain ID 54 | const POLYGON_CHAIN_ID: u64 = 137; 55 | 56 | #[tokio::main] 57 | async fn main() -> Result<()> { 58 | // Initialize logging 59 | tracing_subscriber::fmt() 60 | .with_env_filter( 61 | tracing_subscriber::EnvFilter::from_default_env() 62 | .add_directive("arb_bot=info".parse().unwrap()), 63 | ) 64 | .init(); 65 | 66 | info!("🚀 Prediction Market Arbitrage System v2.0"); 67 | info!(" Profit threshold: <{:.1}¢ ({:.1}% minimum profit)", 68 | ARB_THRESHOLD * 100.0, (1.0 - ARB_THRESHOLD) * 100.0); 69 | info!(" Monitored leagues: {:?}", ENABLED_LEAGUES); 70 | 71 | // Check for dry run mode 72 | let dry_run = std::env::var("DRY_RUN").map(|v| v == "1" || v == "true").unwrap_or(true); 73 | if dry_run { 74 | info!(" Mode: DRY RUN (set DRY_RUN=0 to execute)"); 75 | } else { 76 | warn!(" Mode: LIVE EXECUTION"); 77 | } 78 | 79 | // Load Kalshi credentials 80 | let kalshi_config = KalshiConfig::from_env()?; 81 | info!("[KALSHI] API key loaded"); 82 | 83 | // Load Polymarket credentials 84 | dotenvy::dotenv().ok(); 85 | let poly_private_key = std::env::var("POLY_PRIVATE_KEY") 86 | .context("POLY_PRIVATE_KEY not set")?; 87 | let poly_funder = std::env::var("POLY_FUNDER") 88 | .context("POLY_FUNDER not set (your wallet address)")?; 89 | 90 | // Create async Polymarket client and derive API credentials 91 | info!("[POLYMARKET] Creating async client and deriving API credentials..."); 92 | let poly_async_client = PolymarketAsyncClient::new( 93 | POLY_CLOB_HOST, 94 | POLYGON_CHAIN_ID, 95 | &poly_private_key, 96 | &poly_funder, 97 | )?; 98 | let api_creds = poly_async_client.derive_api_key(0).await?; 99 | let prepared_creds = PreparedCreds::from_api_creds(&api_creds)?; 100 | let poly_async = Arc::new(SharedAsyncClient::new(poly_async_client, prepared_creds, POLYGON_CHAIN_ID)); 101 | 102 | // Load neg_risk cache from Python script output 103 | match poly_async.load_cache(".clob_market_cache.json") { 104 | Ok(count) => info!("[POLYMARKET] Loaded {} neg_risk entries from cache", count), 105 | Err(e) => warn!("[POLYMARKET] Could not load neg_risk cache: {}", e), 106 | } 107 | 108 | info!("[POLYMARKET] Client ready for {}", &poly_funder[..10]); 109 | 110 | // Load team code mapping cache 111 | let team_cache = TeamCache::load(); 112 | info!("📂 Loaded {} team code mappings", team_cache.len()); 113 | 114 | // Create Kalshi API client 115 | let kalshi_api = Arc::new(KalshiApiClient::new(kalshi_config)); 116 | 117 | // Run discovery (with caching support) 118 | let force_discovery = std::env::var("FORCE_DISCOVERY") 119 | .map(|v| v == "1" || v == "true") 120 | .unwrap_or(false); 121 | 122 | info!("🔍 Market discovery{}...", 123 | if force_discovery { " (forced refresh)" } else { "" }); 124 | 125 | let discovery = DiscoveryClient::new( 126 | KalshiApiClient::new(KalshiConfig::from_env()?), 127 | team_cache 128 | ); 129 | 130 | let result = if force_discovery { 131 | discovery.discover_all_force(ENABLED_LEAGUES).await 132 | } else { 133 | discovery.discover_all(ENABLED_LEAGUES).await 134 | }; 135 | 136 | info!("📊 Market discovery complete:"); 137 | info!(" - Matched market pairs: {}", result.pairs.len()); 138 | 139 | if !result.errors.is_empty() { 140 | for err in &result.errors { 141 | warn!(" ⚠️ {}", err); 142 | } 143 | } 144 | 145 | if result.pairs.is_empty() { 146 | error!("No market pairs found!"); 147 | return Ok(()); 148 | } 149 | 150 | // Display discovered market pairs 151 | info!("📋 Discovered market pairs:"); 152 | for pair in &result.pairs { 153 | info!(" ✅ {} | {} | Kalshi: {}", 154 | pair.description, 155 | pair.market_type, 156 | pair.kalshi_market_ticker); 157 | } 158 | 159 | // Build global state 160 | let state = Arc::new({ 161 | let mut s = GlobalState::new(); 162 | for pair in result.pairs { 163 | s.add_pair(pair); 164 | } 165 | info!("📡 Global state initialized: tracking {} markets", s.market_count()); 166 | s 167 | }); 168 | 169 | // Initialize execution infrastructure 170 | let (exec_tx, exec_rx) = create_execution_channel(); 171 | let circuit_breaker = Arc::new(CircuitBreaker::new(CircuitBreakerConfig::from_env())); 172 | 173 | let position_tracker = Arc::new(RwLock::new(PositionTracker::new())); 174 | let (position_channel, position_rx) = create_position_channel(); 175 | 176 | tokio::spawn(position_writer_loop(position_rx, position_tracker)); 177 | 178 | let threshold_cents: PriceCents = ((ARB_THRESHOLD * 100.0).round() as u16).max(1); 179 | info!(" Execution threshold: {} cents", threshold_cents); 180 | 181 | let engine = Arc::new(ExecutionEngine::new( 182 | kalshi_api.clone(), 183 | poly_async, 184 | state.clone(), 185 | circuit_breaker.clone(), 186 | position_channel, 187 | dry_run, 188 | )); 189 | 190 | let exec_handle = tokio::spawn(run_execution_loop(exec_rx, engine)); 191 | 192 | // === TEST MODE: Synthetic arbitrage injection === 193 | // TEST_ARB=1 to enable, TEST_ARB_TYPE=poly_yes_kalshi_no|kalshi_yes_poly_no|poly_only|kalshi_only 194 | let test_arb = std::env::var("TEST_ARB").map(|v| v == "1" || v == "true").unwrap_or(false); 195 | if test_arb { 196 | let test_state = state.clone(); 197 | let test_exec_tx = exec_tx.clone(); 198 | let test_dry_run = dry_run; 199 | 200 | // Parse arb type from environment (default: poly_yes_kalshi_no) 201 | let arb_type_str = std::env::var("TEST_ARB_TYPE").unwrap_or_else(|_| "poly_yes_kalshi_no".to_string()); 202 | 203 | tokio::spawn(async move { 204 | use types::{FastExecutionRequest, ArbType}; 205 | 206 | // Wait for WebSocket connections to establish and populate orderbooks 207 | info!("[TEST] Injecting synthetic arbitrage opportunity in 10 seconds..."); 208 | tokio::time::sleep(tokio::time::Duration::from_secs(10)).await; 209 | 210 | // Parse arb type 211 | let arb_type = match arb_type_str.to_lowercase().as_str() { 212 | "poly_yes_kalshi_no" | "pykn" | "0" => ArbType::PolyYesKalshiNo, 213 | "kalshi_yes_poly_no" | "kypn" | "1" => ArbType::KalshiYesPolyNo, 214 | "poly_only" | "poly" | "2" => ArbType::PolyOnly, 215 | "kalshi_only" | "kalshi" | "3" => ArbType::KalshiOnly, 216 | _ => { 217 | warn!("[TEST] Unknown TEST_ARB_TYPE='{}', defaulting to PolyYesKalshiNo", arb_type_str); 218 | warn!("[TEST] Valid values: poly_yes_kalshi_no, kalshi_yes_poly_no, poly_only, kalshi_only"); 219 | ArbType::PolyYesKalshiNo 220 | } 221 | }; 222 | 223 | // Set prices based on arb type for realistic test scenarios 224 | let (yes_price, no_price, description) = match arb_type { 225 | ArbType::PolyYesKalshiNo => (40, 50, "P_yes=40¢ + K_no=50¢ + fee≈2¢ = 92¢ → 8¢ profit"), 226 | ArbType::KalshiYesPolyNo => (40, 50, "K_yes=40¢ + P_no=50¢ + fee≈2¢ = 92¢ → 8¢ profit"), 227 | ArbType::PolyOnly => (48, 50, "P_yes=48¢ + P_no=50¢ + fee=0¢ = 98¢ → 2¢ profit (NO FEES!)"), 228 | ArbType::KalshiOnly => (44, 44, "K_yes=44¢ + K_no=44¢ + fee≈4¢ = 92¢ → 8¢ profit (DOUBLE FEES)"), 229 | }; 230 | 231 | // Find first market with valid state 232 | let market_count = test_state.market_count(); 233 | for market_id in 0..market_count { 234 | if let Some(market) = test_state.get_by_id(market_id as u16) { 235 | if let Some(pair) = &market.pair { 236 | // SIZE: 1000 cents = 10 contracts (Poly $1 min requires ~3 contracts at 40¢) 237 | let fake_req = FastExecutionRequest { 238 | market_id: market_id as u16, 239 | yes_price, 240 | no_price, 241 | yes_size: 1000, // 1000¢ = 10 contracts 242 | no_size: 1000, // 1000¢ = 10 contracts 243 | arb_type, 244 | detected_ns: 0, 245 | }; 246 | 247 | warn!("[TEST] 🧪 Injecting synthetic {:?} arbitrage for: {}", arb_type, pair.description); 248 | warn!("[TEST] Scenario: {}", description); 249 | warn!("[TEST] Position size capped to 10 contracts for safety"); 250 | warn!("[TEST] Execution mode: DRY_RUN={}", test_dry_run); 251 | 252 | if let Err(e) = test_exec_tx.send(fake_req).await { 253 | error!("[TEST] Failed to send fake arb: {}", e); 254 | } 255 | break; 256 | } 257 | } 258 | } 259 | }); 260 | } 261 | 262 | // Initialize Kalshi WebSocket connection (config reused on reconnects) 263 | let kalshi_state = state.clone(); 264 | let kalshi_exec_tx = exec_tx.clone(); 265 | let kalshi_threshold = threshold_cents; 266 | let kalshi_ws_config = KalshiConfig::from_env()?; 267 | let kalshi_handle = tokio::spawn(async move { 268 | loop { 269 | if let Err(e) = kalshi::run_ws(&kalshi_ws_config, kalshi_state.clone(), kalshi_exec_tx.clone(), kalshi_threshold).await { 270 | error!("[KALSHI] WebSocket disconnected: {} - reconnecting...", e); 271 | } 272 | tokio::time::sleep(tokio::time::Duration::from_secs(WS_RECONNECT_DELAY_SECS)).await; 273 | } 274 | }); 275 | 276 | // Initialize Polymarket WebSocket connection 277 | let poly_state = state.clone(); 278 | let poly_exec_tx = exec_tx.clone(); 279 | let poly_threshold = threshold_cents; 280 | let poly_handle = tokio::spawn(async move { 281 | loop { 282 | if let Err(e) = polymarket::run_ws(poly_state.clone(), poly_exec_tx.clone(), poly_threshold).await { 283 | error!("[POLYMARKET] WebSocket disconnected: {} - reconnecting...", e); 284 | } 285 | tokio::time::sleep(tokio::time::Duration::from_secs(WS_RECONNECT_DELAY_SECS)).await; 286 | } 287 | }); 288 | 289 | // System health monitoring and arbitrage diagnostics 290 | let heartbeat_state = state.clone(); 291 | let heartbeat_threshold = threshold_cents; 292 | let heartbeat_handle = tokio::spawn(async move { 293 | use crate::types::kalshi_fee_cents; 294 | let mut interval = tokio::time::interval(tokio::time::Duration::from_secs(60)); 295 | loop { 296 | interval.tick().await; 297 | let market_count = heartbeat_state.market_count(); 298 | let mut with_kalshi = 0; 299 | let mut with_poly = 0; 300 | let mut with_both = 0; 301 | // Track best arbitrage opportunity: (total_cost, market_id, p_yes, k_no, k_yes, p_no, fee, is_poly_yes_kalshi_no) 302 | let mut best_arb: Option<(u16, u16, u16, u16, u16, u16, u16, bool)> = None; 303 | 304 | for market in heartbeat_state.markets.iter().take(market_count) { 305 | let (k_yes, k_no, _, _) = market.kalshi.load(); 306 | let (p_yes, p_no, _, _) = market.poly.load(); 307 | let has_k = k_yes > 0 && k_no > 0; 308 | let has_p = p_yes > 0 && p_no > 0; 309 | if k_yes > 0 || k_no > 0 { with_kalshi += 1; } 310 | if p_yes > 0 || p_no > 0 { with_poly += 1; } 311 | if has_k && has_p { 312 | with_both += 1; 313 | 314 | let fee1 = kalshi_fee_cents(k_no); 315 | let cost1 = p_yes + k_no + fee1; 316 | 317 | let fee2 = kalshi_fee_cents(k_yes); 318 | let cost2 = k_yes + fee2 + p_no; 319 | 320 | let (best_cost, best_fee, is_poly_yes) = if cost1 <= cost2 { 321 | (cost1, fee1, true) 322 | } else { 323 | (cost2, fee2, false) 324 | }; 325 | 326 | if best_arb.is_none() || best_cost < best_arb.as_ref().unwrap().0 { 327 | best_arb = Some((best_cost, market.market_id, p_yes, k_no, k_yes, p_no, best_fee, is_poly_yes)); 328 | } 329 | } 330 | } 331 | 332 | info!("💓 System heartbeat | Markets: {} total, {} with Kalshi prices, {} with Polymarket prices, {} with both | threshold={}¢", 333 | market_count, with_kalshi, with_poly, with_both, heartbeat_threshold); 334 | 335 | if let Some((cost, market_id, p_yes, k_no, k_yes, p_no, fee, is_poly_yes)) = best_arb { 336 | let gap = cost as i16 - heartbeat_threshold as i16; 337 | let desc = heartbeat_state.get_by_id(market_id) 338 | .and_then(|m| m.pair.as_ref()) 339 | .map(|p| &*p.description) 340 | .unwrap_or("Unknown"); 341 | let leg_breakdown = if is_poly_yes { 342 | format!("P_yes({}¢) + K_no({}¢) + K_fee({}¢) = {}¢", p_yes, k_no, fee, cost) 343 | } else { 344 | format!("K_yes({}¢) + P_no({}¢) + K_fee({}¢) = {}¢", k_yes, p_no, fee, cost) 345 | }; 346 | if gap <= 10 { 347 | info!(" 📊 Best opportunity: {} | {} | gap={:+}¢ | [Poly_yes={}¢ Kalshi_no={}¢ Kalshi_yes={}¢ Poly_no={}¢]", 348 | desc, leg_breakdown, gap, p_yes, k_no, k_yes, p_no); 349 | } else { 350 | info!(" 📊 Best opportunity: {} | {} | gap={:+}¢ (market efficient)", 351 | desc, leg_breakdown, gap); 352 | } 353 | } else if with_both == 0 { 354 | warn!(" ⚠️ No markets with both Kalshi and Polymarket prices - verify WebSocket connections"); 355 | } 356 | } 357 | }); 358 | 359 | // Main event loop - run until termination 360 | info!("✅ All systems operational - entering main event loop"); 361 | let _ = tokio::join!(kalshi_handle, poly_handle, heartbeat_handle, exec_handle); 362 | 363 | Ok(()) 364 | } 365 | -------------------------------------------------------------------------------- /src/position_tracker.rs: -------------------------------------------------------------------------------- 1 | //! Position tracking and P&L calculation system. 2 | //! 3 | //! This module tracks all positions across both platforms, calculates cost basis, 4 | //! and maintains real-time profit and loss calculations. 5 | 6 | use anyhow::Result; 7 | use serde::{Deserialize, Serialize}; 8 | use std::collections::HashMap; 9 | use std::path::Path; 10 | use std::sync::Arc; 11 | use std::time::Duration; 12 | use tokio::sync::{mpsc, RwLock}; 13 | use tracing::{info, warn}; 14 | 15 | const POSITION_FILE: &str = "positions.json"; 16 | 17 | /// A single position leg on one platform 18 | #[derive(Debug, Clone, Serialize, Deserialize, Default)] 19 | pub struct PositionLeg { 20 | /// Number of contracts held 21 | pub contracts: f64, 22 | /// Total cost paid (in dollars) 23 | pub cost_basis: f64, 24 | /// Average price per contract 25 | pub avg_price: f64, 26 | } 27 | 28 | #[allow(dead_code)] 29 | impl PositionLeg { 30 | pub fn add(&mut self, contracts: f64, price: f64) { 31 | let new_cost = contracts * price; 32 | self.cost_basis += new_cost; 33 | self.contracts += contracts; 34 | if self.contracts > 0.0 { 35 | self.avg_price = self.cost_basis / self.contracts; 36 | } 37 | } 38 | 39 | /// Unrealized P&L based on current market price 40 | pub fn unrealized_pnl(&self, current_price: f64) -> f64 { 41 | let current_value = self.contracts * current_price; 42 | current_value - self.cost_basis 43 | } 44 | 45 | /// Value if this position wins (pays $1 per contract) 46 | pub fn value_if_win(&self) -> f64 { 47 | self.contracts * 1.0 48 | } 49 | 50 | /// Profit if this position wins 51 | pub fn profit_if_win(&self) -> f64 { 52 | self.value_if_win() - self.cost_basis 53 | } 54 | } 55 | 56 | /// A paired position (arb position spans both platforms) 57 | #[derive(Debug, Clone, Serialize, Deserialize, Default)] 58 | pub struct ArbPosition { 59 | /// Market identifier (Kalshi ticker) 60 | pub market_id: String, 61 | 62 | /// Description for logging 63 | pub description: String, 64 | 65 | /// Kalshi YES position 66 | pub kalshi_yes: PositionLeg, 67 | 68 | /// Kalshi NO position 69 | pub kalshi_no: PositionLeg, 70 | 71 | /// Polymarket YES position 72 | pub poly_yes: PositionLeg, 73 | 74 | /// Polymarket NO position 75 | pub poly_no: PositionLeg, 76 | 77 | /// Total fees paid (Kalshi fees) 78 | pub total_fees: f64, 79 | 80 | /// Timestamp when position was opened 81 | pub opened_at: String, 82 | 83 | /// Status: "open", "closed", "resolved" 84 | pub status: String, 85 | 86 | /// Realized P&L (set when position closes/resolves) 87 | pub realized_pnl: Option, 88 | } 89 | 90 | #[allow(dead_code)] 91 | impl ArbPosition { 92 | pub fn new(market_id: &str, description: &str) -> Self { 93 | Self { 94 | market_id: market_id.to_string(), 95 | description: description.to_string(), 96 | status: "open".to_string(), 97 | opened_at: chrono::Utc::now().to_rfc3339(), 98 | ..Default::default() 99 | } 100 | } 101 | 102 | /// Total contracts across all legs 103 | pub fn total_contracts(&self) -> f64 { 104 | self.kalshi_yes.contracts + self.kalshi_no.contracts + 105 | self.poly_yes.contracts + self.poly_no.contracts 106 | } 107 | 108 | /// Total cost basis across all legs 109 | pub fn total_cost(&self) -> f64 { 110 | self.kalshi_yes.cost_basis + self.kalshi_no.cost_basis + 111 | self.poly_yes.cost_basis + self.poly_no.cost_basis + 112 | self.total_fees 113 | } 114 | 115 | /// For a proper arb (YES on one platform + NO on other), one side always wins 116 | /// This calculates the guaranteed profit assuming the arb is balanced 117 | pub fn guaranteed_profit(&self) -> f64 { 118 | // In a balanced arb: we hold equal YES on platform A and NO on platform B 119 | // Regardless of outcome, we get $1 per contract pair 120 | let balanced_contracts = self.matched_contracts(); 121 | balanced_contracts - self.total_cost() 122 | } 123 | 124 | /// Number of matched contract pairs (min of YES and NO across platforms) 125 | pub fn matched_contracts(&self) -> f64 { 126 | let yes_total = self.kalshi_yes.contracts + self.poly_yes.contracts; 127 | let no_total = self.kalshi_no.contracts + self.poly_no.contracts; 128 | yes_total.min(no_total) 129 | } 130 | 131 | /// Unmatched exposure (contracts without offsetting position) 132 | pub fn unmatched_exposure(&self) -> f64 { 133 | let yes_total = self.kalshi_yes.contracts + self.poly_yes.contracts; 134 | let no_total = self.kalshi_no.contracts + self.poly_no.contracts; 135 | (yes_total - no_total).abs() 136 | } 137 | 138 | /// Mark position as resolved with outcome 139 | pub fn resolve(&mut self, outcome_yes_won: bool) { 140 | let payout = if outcome_yes_won { 141 | // YES won: Kalshi YES + Poly YES pay out 142 | self.kalshi_yes.contracts + self.poly_yes.contracts 143 | } else { 144 | // NO won: Kalshi NO + Poly NO pay out 145 | self.kalshi_no.contracts + self.poly_no.contracts 146 | }; 147 | 148 | self.realized_pnl = Some(payout - self.total_cost()); 149 | self.status = "resolved".to_string(); 150 | } 151 | } 152 | 153 | /// Summary of all positions 154 | #[derive(Debug, Clone, Serialize, Deserialize, Default)] 155 | #[allow(dead_code)] 156 | pub struct PositionSummary { 157 | /// Total cost basis across all open positions 158 | pub total_cost_basis: f64, 159 | 160 | /// Total guaranteed profit from matched arbs 161 | pub total_guaranteed_profit: f64, 162 | 163 | /// Total unmatched exposure (risk) 164 | pub total_unmatched_exposure: f64, 165 | 166 | /// Total realized P&L from closed/resolved positions 167 | pub realized_pnl: f64, 168 | 169 | /// Number of open positions 170 | pub open_positions: usize, 171 | 172 | /// Number of resolved positions 173 | pub resolved_positions: usize, 174 | 175 | /// Total contracts held 176 | pub total_contracts: f64, 177 | } 178 | 179 | /// Position tracker with persistence 180 | #[derive(Debug, Serialize, Deserialize)] 181 | pub struct PositionTracker { 182 | /// All positions keyed by market_id 183 | positions: HashMap, 184 | 185 | /// Daily realized P&L 186 | pub daily_realized_pnl: f64, 187 | 188 | /// Daily trading date (for reset) 189 | pub trading_date: String, 190 | 191 | /// Cumulative all-time P&L 192 | pub all_time_pnl: f64, 193 | } 194 | 195 | /// Data structure for serialization 196 | #[derive(Serialize)] 197 | struct SaveData { 198 | positions: HashMap, 199 | daily_realized_pnl: f64, 200 | trading_date: String, 201 | all_time_pnl: f64, 202 | } 203 | 204 | impl Default for PositionTracker { 205 | fn default() -> Self { 206 | Self::new() 207 | } 208 | } 209 | 210 | #[allow(dead_code)] 211 | impl PositionTracker { 212 | pub fn new() -> Self { 213 | Self { 214 | positions: HashMap::new(), 215 | daily_realized_pnl: 0.0, 216 | trading_date: today_string(), 217 | all_time_pnl: 0.0, 218 | } 219 | } 220 | 221 | /// Load from file or create new 222 | pub fn load() -> Self { 223 | Self::load_from(POSITION_FILE) 224 | } 225 | 226 | pub fn load_from>(path: P) -> Self { 227 | match std::fs::read_to_string(path.as_ref()) { 228 | Ok(contents) => { 229 | match serde_json::from_str::(&contents) { 230 | Ok(mut tracker) => { 231 | // Check if we need to reset daily P&L 232 | let today = today_string(); 233 | if tracker.trading_date != today { 234 | info!("[POSITIONS] New trading day, resetting daily P&L"); 235 | tracker.daily_realized_pnl = 0.0; 236 | tracker.trading_date = today; 237 | } 238 | info!("[POSITIONS] Loaded {} positions from {:?}", 239 | tracker.positions.len(), path.as_ref()); 240 | tracker 241 | } 242 | Err(e) => { 243 | warn!("[POSITIONS] Failed to parse positions file: {}", e); 244 | Self::new() 245 | } 246 | } 247 | } 248 | Err(_) => { 249 | info!("[POSITIONS] No positions file found, starting fresh"); 250 | Self::new() 251 | } 252 | } 253 | } 254 | 255 | /// Save to file 256 | pub fn save(&self) -> Result<()> { 257 | self.save_to(POSITION_FILE) 258 | } 259 | 260 | pub fn save_to>(&self, path: P) -> Result<()> { 261 | let json = serde_json::to_string_pretty(self)?; 262 | std::fs::write(path, json)?; 263 | Ok(()) 264 | } 265 | 266 | /// Save positions 267 | pub fn save_async(&self) { 268 | // Clone data for serialization 269 | let data = SaveData { 270 | positions: self.positions.clone(), 271 | daily_realized_pnl: self.daily_realized_pnl, 272 | trading_date: self.trading_date.clone(), 273 | all_time_pnl: self.all_time_pnl, 274 | }; 275 | // Try to spawn on runtime; if no runtime, save synchronously 276 | if tokio::runtime::Handle::try_current().is_ok() { 277 | tokio::spawn(async move { 278 | if let Ok(json) = serde_json::to_string_pretty(&data) { 279 | let _ = tokio::fs::write(POSITION_FILE, json).await; 280 | } 281 | }); 282 | } else if let Ok(json) = serde_json::to_string_pretty(&data) { 283 | let _ = std::fs::write(POSITION_FILE, json); 284 | } 285 | } 286 | 287 | /// Record a fill 288 | pub fn record_fill(&mut self, fill: &FillRecord) { 289 | self.record_fill_internal(fill); 290 | self.save_async(); 291 | } 292 | 293 | /// Record a fill without saving 294 | pub fn record_fill_internal(&mut self, fill: &FillRecord) { 295 | let position = self.positions 296 | .entry(fill.market_id.clone()) 297 | .or_insert_with(|| ArbPosition::new(&fill.market_id, &fill.description)); 298 | 299 | match (fill.platform.as_str(), fill.side.as_str()) { 300 | ("kalshi", "yes") => position.kalshi_yes.add(fill.contracts, fill.price), 301 | ("kalshi", "no") => position.kalshi_no.add(fill.contracts, fill.price), 302 | ("polymarket", "yes") => position.poly_yes.add(fill.contracts, fill.price), 303 | ("polymarket", "no") => position.poly_no.add(fill.contracts, fill.price), 304 | _ => warn!("[POSITIONS] Unknown platform/side: {}/{}", fill.platform, fill.side), 305 | } 306 | 307 | position.total_fees += fill.fees; 308 | 309 | info!("[POSITIONS] Recorded fill: {} {} {} @{:.1}¢ x{:.0} (fees: ${:.4})", 310 | fill.platform, fill.side, fill.market_id, 311 | fill.price * 100.0, fill.contracts, fill.fees); 312 | } 313 | 314 | /// Get or create position for a market 315 | pub fn get_or_create(&mut self, market_id: &str, description: &str) -> &mut ArbPosition { 316 | self.positions 317 | .entry(market_id.to_string()) 318 | .or_insert_with(|| ArbPosition::new(market_id, description)) 319 | } 320 | 321 | /// Get position (if exists) 322 | pub fn get(&self, market_id: &str) -> Option<&ArbPosition> { 323 | self.positions.get(market_id) 324 | } 325 | 326 | /// Mark a position as resolved 327 | pub fn resolve_position(&mut self, market_id: &str, yes_won: bool) -> Option { 328 | if let Some(position) = self.positions.get_mut(market_id) { 329 | position.resolve(yes_won); 330 | let pnl = position.realized_pnl.unwrap_or(0.0); 331 | 332 | self.daily_realized_pnl += pnl; 333 | self.all_time_pnl += pnl; 334 | 335 | info!("[POSITIONS] Resolved {}: {} won, P&L: ${:.2}", 336 | market_id, if yes_won { "YES" } else { "NO" }, pnl); 337 | 338 | self.save_async(); 339 | Some(pnl) 340 | } else { 341 | None 342 | } 343 | } 344 | 345 | /// Get summary statistics 346 | pub fn summary(&self) -> PositionSummary { 347 | let mut summary = PositionSummary::default(); 348 | 349 | for position in self.positions.values() { 350 | match position.status.as_str() { 351 | "open" => { 352 | summary.open_positions += 1; 353 | summary.total_cost_basis += position.total_cost(); 354 | summary.total_guaranteed_profit += position.guaranteed_profit(); 355 | summary.total_unmatched_exposure += position.unmatched_exposure(); 356 | summary.total_contracts += position.total_contracts(); 357 | } 358 | "resolved" => { 359 | summary.resolved_positions += 1; 360 | summary.realized_pnl += position.realized_pnl.unwrap_or(0.0); 361 | } 362 | _ => {} 363 | } 364 | } 365 | 366 | summary 367 | } 368 | 369 | /// Get all open positions 370 | pub fn open_positions(&self) -> Vec<&ArbPosition> { 371 | self.positions.values() 372 | .filter(|p| p.status == "open") 373 | .collect() 374 | } 375 | 376 | /// Daily P&L (realized only) 377 | pub fn daily_pnl(&self) -> f64 { 378 | self.daily_realized_pnl 379 | } 380 | 381 | /// Reset daily counters (call at midnight) 382 | pub fn reset_daily(&mut self) { 383 | self.daily_realized_pnl = 0.0; 384 | self.trading_date = today_string(); 385 | self.save_async(); 386 | } 387 | } 388 | 389 | /// Record of a single fill 390 | #[derive(Debug, Clone)] 391 | pub struct FillRecord { 392 | pub market_id: String, 393 | pub description: String, 394 | pub platform: String, // "kalshi" or "polymarket" 395 | pub side: String, // "yes" or "no" 396 | pub contracts: f64, 397 | pub price: f64, 398 | pub fees: f64, 399 | #[allow(dead_code)] 400 | pub order_id: String, 401 | #[allow(dead_code)] 402 | pub timestamp: String, 403 | } 404 | 405 | impl FillRecord { 406 | pub fn new( 407 | market_id: &str, 408 | description: &str, 409 | platform: &str, 410 | side: &str, 411 | contracts: f64, 412 | price: f64, 413 | fees: f64, 414 | order_id: &str, 415 | ) -> Self { 416 | Self { 417 | market_id: market_id.to_string(), 418 | description: description.to_string(), 419 | platform: platform.to_string(), 420 | side: side.to_string(), 421 | contracts, 422 | price, 423 | fees, 424 | order_id: order_id.to_string(), 425 | timestamp: chrono::Utc::now().to_rfc3339(), 426 | } 427 | } 428 | } 429 | 430 | #[allow(dead_code)] 431 | pub type SharedPositionTracker = Arc>; 432 | 433 | #[allow(dead_code)] 434 | pub fn create_position_tracker() -> SharedPositionTracker { 435 | Arc::new(RwLock::new(PositionTracker::load())) 436 | } 437 | 438 | fn today_string() -> String { 439 | chrono::Utc::now().format("%Y-%m-%d").to_string() 440 | } 441 | 442 | #[derive(Clone)] 443 | pub struct PositionChannel { 444 | tx: mpsc::UnboundedSender, 445 | } 446 | 447 | impl PositionChannel { 448 | pub fn new(tx: mpsc::UnboundedSender) -> Self { 449 | Self { tx } 450 | } 451 | 452 | #[inline] 453 | pub fn record_fill(&self, fill: FillRecord) { 454 | let _ = self.tx.send(fill); 455 | } 456 | } 457 | 458 | pub fn create_position_channel() -> (PositionChannel, mpsc::UnboundedReceiver) { 459 | let (tx, rx) = mpsc::unbounded_channel(); 460 | (PositionChannel::new(tx), rx) 461 | } 462 | 463 | pub async fn position_writer_loop( 464 | mut rx: mpsc::UnboundedReceiver, 465 | tracker: Arc>, 466 | ) { 467 | let mut batch = Vec::with_capacity(16); 468 | let mut interval = tokio::time::interval(Duration::from_millis(100)); 469 | 470 | loop { 471 | tokio::select! { 472 | biased; 473 | 474 | Some(fill) = rx.recv() => { 475 | batch.push(fill); 476 | if batch.len() >= 16 { 477 | let mut guard = tracker.write().await; 478 | for fill in batch.drain(..) { 479 | guard.record_fill_internal(&fill); 480 | } 481 | guard.save_async(); 482 | } 483 | } 484 | _ = interval.tick() => { 485 | if !batch.is_empty() { 486 | let mut guard = tracker.write().await; 487 | for fill in batch.drain(..) { 488 | guard.record_fill_internal(&fill); 489 | } 490 | guard.save_async(); 491 | } 492 | } 493 | } 494 | } 495 | } 496 | 497 | #[cfg(test)] 498 | mod tests { 499 | use super::*; 500 | 501 | #[test] 502 | fn test_position_leg() { 503 | let mut leg = PositionLeg::default(); 504 | leg.add(10.0, 0.45); // Buy 10 contracts at 45¢ 505 | 506 | assert_eq!(leg.contracts, 10.0); 507 | assert!((leg.cost_basis - 4.50).abs() < 0.001); 508 | assert!((leg.avg_price - 0.45).abs() < 0.001); 509 | 510 | // Profit if this leg wins 511 | assert!((leg.profit_if_win() - 5.50).abs() < 0.001); // $10 payout - $4.50 cost 512 | } 513 | 514 | #[test] 515 | fn test_arb_position_guaranteed_profit() { 516 | let mut pos = ArbPosition::new("TEST-MARKET", "Test"); 517 | 518 | // Buy 10 YES on Poly at 45¢ 519 | pos.poly_yes.add(10.0, 0.45); 520 | 521 | // Buy 10 NO on Kalshi at 50¢ 522 | pos.kalshi_no.add(10.0, 0.50); 523 | 524 | // Total cost: $4.50 + $5.00 = $9.50 525 | // Guaranteed payout: $10.00 (one side wins) 526 | // Guaranteed profit: $0.50 527 | 528 | assert!((pos.total_cost() - 9.50).abs() < 0.001); 529 | assert!((pos.matched_contracts() - 10.0).abs() < 0.001); 530 | assert!((pos.guaranteed_profit() - 0.50).abs() < 0.001); 531 | assert!((pos.unmatched_exposure() - 0.0).abs() < 0.001); 532 | } 533 | 534 | #[test] 535 | fn test_unmatched_exposure() { 536 | let mut pos = ArbPosition::new("TEST-MARKET", "Test"); 537 | 538 | // Buy 10 YES on Poly 539 | pos.poly_yes.add(10.0, 0.45); 540 | 541 | // Buy only 8 NO on Kalshi (partial fill) 542 | pos.kalshi_no.add(8.0, 0.50); 543 | 544 | // Matched: 8, Unmatched: 2 545 | assert!((pos.matched_contracts() - 8.0).abs() < 0.001); 546 | assert!((pos.unmatched_exposure() - 2.0).abs() < 0.001); 547 | } 548 | 549 | #[test] 550 | fn test_resolution() { 551 | let mut pos = ArbPosition::new("TEST-MARKET", "Test"); 552 | pos.poly_yes.add(10.0, 0.45); 553 | pos.kalshi_no.add(10.0, 0.50); 554 | 555 | // YES wins 556 | pos.resolve(true); 557 | 558 | // Payout: 10 (poly_yes wins) 559 | // Cost: 9.50 560 | // P&L: +0.50 561 | assert!((pos.realized_pnl.unwrap() - 0.50).abs() < 0.001); 562 | assert_eq!(pos.status, "resolved"); 563 | } 564 | } -------------------------------------------------------------------------------- /src/kalshi.rs: -------------------------------------------------------------------------------- 1 | //! Kalshi platform integration client. 2 | //! 3 | //! This module provides REST API and WebSocket clients for interacting with 4 | //! the Kalshi prediction market platform, including order execution and 5 | //! real-time price feed management. 6 | 7 | use anyhow::{Context, Result}; 8 | use base64::{engine::general_purpose::STANDARD as BASE64, Engine}; 9 | use futures_util::{SinkExt, StreamExt}; 10 | use pkcs1::DecodeRsaPrivateKey; 11 | use rsa::{ 12 | pss::SigningKey, 13 | sha2::Sha256, 14 | signature::{RandomizedSigner, SignatureEncoding}, 15 | RsaPrivateKey, 16 | }; 17 | use serde::{Deserialize, Serialize}; 18 | use std::sync::Arc; 19 | use std::time::{Duration, SystemTime, UNIX_EPOCH}; 20 | use tokio::sync::mpsc; 21 | use tokio_tungstenite::{connect_async, tungstenite::{http::Request, Message}}; 22 | use tracing::{debug, error, info}; 23 | 24 | use crate::config::{KALSHI_WS_URL, KALSHI_API_BASE, KALSHI_API_DELAY_MS}; 25 | use crate::execution::NanoClock; 26 | use crate::types::{ 27 | KalshiEventsResponse, KalshiMarketsResponse, KalshiEvent, KalshiMarket, 28 | GlobalState, FastExecutionRequest, ArbType, PriceCents, SizeCents, fxhash_str, 29 | }; 30 | 31 | // === Order Types === 32 | 33 | use std::borrow::Cow; 34 | use std::fmt::Write; 35 | use arrayvec::ArrayString; 36 | 37 | #[derive(Debug, Clone, Serialize)] 38 | pub struct KalshiOrderRequest<'a> { 39 | pub ticker: Cow<'a, str>, 40 | pub action: &'static str, 41 | pub side: &'static str, 42 | #[serde(rename = "type")] 43 | pub order_type: &'static str, 44 | pub count: i64, 45 | #[serde(skip_serializing_if = "Option::is_none")] 46 | pub yes_price: Option, 47 | #[serde(skip_serializing_if = "Option::is_none")] 48 | pub no_price: Option, 49 | pub client_order_id: Cow<'a, str>, 50 | #[serde(skip_serializing_if = "Option::is_none")] 51 | pub expiration_ts: Option, 52 | #[serde(skip_serializing_if = "Option::is_none")] 53 | pub time_in_force: Option<&'static str>, 54 | } 55 | 56 | impl<'a> KalshiOrderRequest<'a> { 57 | /// Create an IOC (immediate-or-cancel) buy order 58 | pub fn ioc_buy(ticker: Cow<'a, str>, side: &'static str, price_cents: i64, count: i64, client_order_id: Cow<'a, str>) -> Self { 59 | let (yes_price, no_price) = if side == "yes" { 60 | (Some(price_cents), None) 61 | } else { 62 | (None, Some(price_cents)) 63 | }; 64 | 65 | Self { 66 | ticker, 67 | action: "buy", 68 | side, 69 | order_type: "limit", 70 | count, 71 | yes_price, 72 | no_price, 73 | client_order_id, 74 | expiration_ts: None, 75 | time_in_force: Some("immediate_or_cancel"), 76 | } 77 | } 78 | 79 | /// Create an IOC (immediate-or-cancel) sell order 80 | pub fn ioc_sell(ticker: Cow<'a, str>, side: &'static str, price_cents: i64, count: i64, client_order_id: Cow<'a, str>) -> Self { 81 | let (yes_price, no_price) = if side == "yes" { 82 | (Some(price_cents), None) 83 | } else { 84 | (None, Some(price_cents)) 85 | }; 86 | 87 | Self { 88 | ticker, 89 | action: "sell", 90 | side, 91 | order_type: "limit", 92 | count, 93 | yes_price, 94 | no_price, 95 | client_order_id, 96 | expiration_ts: None, 97 | time_in_force: Some("immediate_or_cancel"), 98 | } 99 | } 100 | } 101 | 102 | #[derive(Debug, Clone, Deserialize)] 103 | pub struct KalshiOrderResponse { 104 | pub order: KalshiOrderDetails, 105 | } 106 | 107 | #[allow(dead_code)] 108 | #[derive(Debug, Clone, Deserialize)] 109 | pub struct KalshiOrderDetails { 110 | pub order_id: String, 111 | pub ticker: String, 112 | pub status: String, // "resting", "canceled", "executed", "pending" 113 | #[serde(default)] 114 | pub remaining_count: Option, 115 | #[serde(default)] 116 | pub queue_position: Option, 117 | pub action: String, 118 | pub side: String, 119 | #[serde(rename = "type")] 120 | pub order_type: String, 121 | pub yes_price: Option, 122 | pub no_price: Option, 123 | pub created_time: Option, 124 | #[serde(default)] 125 | pub taker_fill_count: Option, 126 | #[serde(default)] 127 | pub maker_fill_count: Option, 128 | #[serde(default)] 129 | pub place_count: Option, 130 | #[serde(default)] 131 | pub taker_fill_cost: Option, 132 | #[serde(default)] 133 | pub maker_fill_cost: Option, 134 | } 135 | 136 | #[allow(dead_code)] 137 | impl KalshiOrderDetails { 138 | /// Total filled contracts 139 | pub fn filled_count(&self) -> i64 { 140 | self.taker_fill_count.unwrap_or(0) + self.maker_fill_count.unwrap_or(0) 141 | } 142 | 143 | /// Check if order was fully filled 144 | pub fn is_filled(&self) -> bool { 145 | self.status == "executed" || self.remaining_count == Some(0) 146 | } 147 | 148 | /// Check if order was partially filled 149 | pub fn is_partial(&self) -> bool { 150 | self.filled_count() > 0 && !self.is_filled() 151 | } 152 | } 153 | 154 | // === Kalshi Auth Config === 155 | 156 | pub struct KalshiConfig { 157 | pub api_key_id: String, 158 | pub private_key: RsaPrivateKey, 159 | } 160 | 161 | impl KalshiConfig { 162 | pub fn from_env() -> Result { 163 | dotenvy::dotenv().ok(); 164 | let api_key_id = std::env::var("KALSHI_API_KEY_ID").context("KALSHI_API_KEY_ID not set")?; 165 | // Support both KALSHI_PRIVATE_KEY_PATH and KALSHI_PRIVATE_KEY_FILE for compatibility 166 | let key_path = std::env::var("KALSHI_PRIVATE_KEY_PATH") 167 | .or_else(|_| std::env::var("KALSHI_PRIVATE_KEY_FILE")) 168 | .unwrap_or_else(|_| "kalshi_private_key.txt".to_string()); 169 | let private_key_pem = std::fs::read_to_string(&key_path) 170 | .with_context(|| format!("Failed to read private key from {}", key_path))? 171 | .trim() 172 | .to_owned(); 173 | let private_key = RsaPrivateKey::from_pkcs1_pem(&private_key_pem) 174 | .context("Failed to parse private key PEM")?; 175 | Ok(Self { api_key_id, private_key }) 176 | } 177 | 178 | pub fn sign(&self, message: &str) -> Result { 179 | tracing::debug!("[KALSHI-DEBUG] Signing message: {}", message); 180 | let signing_key = SigningKey::::new(self.private_key.clone()); 181 | let signature = signing_key.sign_with_rng(&mut rand::thread_rng(), message.as_bytes()); 182 | let sig_b64 = BASE64.encode(signature.to_bytes()); 183 | tracing::debug!("[KALSHI-DEBUG] Signature (first 50 chars): {}...", &sig_b64[..50.min(sig_b64.len())]); 184 | Ok(sig_b64) 185 | } 186 | } 187 | 188 | // === Kalshi REST API Client === 189 | 190 | /// Timeout for order requests (shorter than general API timeout) 191 | const ORDER_TIMEOUT: Duration = Duration::from_secs(5); 192 | 193 | use std::sync::atomic::{AtomicU32, Ordering}; 194 | 195 | /// Global order counter for unique client_order_id generation 196 | static ORDER_COUNTER: AtomicU32 = AtomicU32::new(0); 197 | 198 | pub struct KalshiApiClient { 199 | http: reqwest::Client, 200 | pub config: KalshiConfig, 201 | } 202 | 203 | impl KalshiApiClient { 204 | pub fn new(config: KalshiConfig) -> Self { 205 | Self { 206 | http: reqwest::Client::builder() 207 | .timeout(Duration::from_secs(10)) 208 | .build() 209 | .expect("Failed to build HTTP client"), 210 | config, 211 | } 212 | } 213 | 214 | #[inline] 215 | fn next_order_id() -> ArrayString<24> { 216 | let counter = ORDER_COUNTER.fetch_add(1, Ordering::Relaxed); 217 | let ts = SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_secs(); 218 | let mut buf = ArrayString::<24>::new(); 219 | let _ = write!(&mut buf, "a{}{}", ts, counter); 220 | buf 221 | } 222 | 223 | /// Generic authenticated GET request with retry on rate limit 224 | async fn get(&self, path: &str) -> Result { 225 | let mut retries = 0; 226 | const MAX_RETRIES: u32 = 5; 227 | 228 | loop { 229 | let url = format!("{}{}", KALSHI_API_BASE, path); 230 | let timestamp_ms = SystemTime::now() 231 | .duration_since(UNIX_EPOCH) 232 | .unwrap() 233 | .as_millis() as u64; 234 | // Kalshi signature uses FULL path including /trade-api/v2 prefix 235 | let full_path = format!("/trade-api/v2{}", path); 236 | let signature = self.config.sign(&format!("{}GET{}", timestamp_ms, full_path))?; 237 | 238 | let resp = self.http 239 | .get(&url) 240 | .header("KALSHI-ACCESS-KEY", &self.config.api_key_id) 241 | .header("KALSHI-ACCESS-SIGNATURE", &signature) 242 | .header("KALSHI-ACCESS-TIMESTAMP", timestamp_ms.to_string()) 243 | .send() 244 | .await?; 245 | 246 | let status = resp.status(); 247 | 248 | // Handle rate limit with exponential backoff 249 | if status == reqwest::StatusCode::TOO_MANY_REQUESTS { 250 | retries += 1; 251 | if retries > MAX_RETRIES { 252 | anyhow::bail!("Kalshi API rate limited after {} retries", MAX_RETRIES); 253 | } 254 | let backoff_ms = 2000 * (1 << retries); // 4s, 8s, 16s, 32s, 64s 255 | debug!("[KALSHI] Rate limited, backing off {}ms (retry {}/{})", 256 | backoff_ms, retries, MAX_RETRIES); 257 | tokio::time::sleep(Duration::from_millis(backoff_ms)).await; 258 | continue; 259 | } 260 | 261 | if !status.is_success() { 262 | let body = resp.text().await.unwrap_or_default(); 263 | anyhow::bail!("Kalshi API error {}: {}", status, body); 264 | } 265 | 266 | let data: T = resp.json().await?; 267 | tokio::time::sleep(Duration::from_millis(KALSHI_API_DELAY_MS)).await; 268 | return Ok(data); 269 | } 270 | } 271 | 272 | pub async fn get_events(&self, series_ticker: &str, limit: u32) -> Result> { 273 | let path = format!("/events?series_ticker={}&limit={}&status=open", series_ticker, limit); 274 | let resp: KalshiEventsResponse = self.get(&path).await?; 275 | Ok(resp.events) 276 | } 277 | 278 | pub async fn get_markets(&self, event_ticker: &str) -> Result> { 279 | let path = format!("/markets?event_ticker={}", event_ticker); 280 | let resp: KalshiMarketsResponse = self.get(&path).await?; 281 | Ok(resp.markets) 282 | } 283 | 284 | /// Generic authenticated POST request 285 | async fn post(&self, path: &str, body: &B) -> Result { 286 | let url = format!("{}{}", KALSHI_API_BASE, path); 287 | let timestamp_ms = SystemTime::now() 288 | .duration_since(UNIX_EPOCH) 289 | .unwrap() 290 | .as_millis() as u64; 291 | // Kalshi signature uses FULL path including /trade-api/v2 prefix 292 | let full_path = format!("/trade-api/v2{}", path); 293 | let msg = format!("{}POST{}", timestamp_ms, full_path); 294 | let signature = self.config.sign(&msg)?; 295 | 296 | let resp = self.http 297 | .post(&url) 298 | .header("KALSHI-ACCESS-KEY", &self.config.api_key_id) 299 | .header("KALSHI-ACCESS-SIGNATURE", &signature) 300 | .header("KALSHI-ACCESS-TIMESTAMP", timestamp_ms.to_string()) 301 | .header("Content-Type", "application/json") 302 | .timeout(ORDER_TIMEOUT) 303 | .json(body) 304 | .send() 305 | .await?; 306 | 307 | let status = resp.status(); 308 | if !status.is_success() { 309 | let body = resp.text().await.unwrap_or_default(); 310 | anyhow::bail!("Kalshi API error {}: {}", status, body); 311 | } 312 | 313 | let data: T = resp.json().await?; 314 | Ok(data) 315 | } 316 | 317 | /// Create an order on Kalshi 318 | pub async fn create_order(&self, order: &KalshiOrderRequest<'_>) -> Result { 319 | let path = "/portfolio/orders"; 320 | self.post(path, order).await 321 | } 322 | 323 | /// Create an IOC buy order (convenience method) 324 | pub async fn buy_ioc( 325 | &self, 326 | ticker: &str, 327 | side: &str, // "yes" or "no" 328 | price_cents: i64, 329 | count: i64, 330 | ) -> Result { 331 | debug_assert!(!ticker.is_empty(), "ticker must not be empty"); 332 | debug_assert!(price_cents >= 1 && price_cents <= 99, "price must be 1-99"); 333 | debug_assert!(count >= 1, "count must be >= 1"); 334 | 335 | let side_static: &'static str = if side == "yes" { "yes" } else { "no" }; 336 | let order_id = Self::next_order_id(); 337 | let order = KalshiOrderRequest::ioc_buy( 338 | Cow::Borrowed(ticker), 339 | side_static, 340 | price_cents, 341 | count, 342 | Cow::Borrowed(&order_id) 343 | ); 344 | debug!("[KALSHI] IOC {} {} @{}¢ x{}", side, ticker, price_cents, count); 345 | 346 | let resp = self.create_order(&order).await?; 347 | debug!("[KALSHI] {} filled={}", resp.order.status, resp.order.filled_count()); 348 | Ok(resp) 349 | } 350 | 351 | pub async fn sell_ioc( 352 | &self, 353 | ticker: &str, 354 | side: &str, 355 | price_cents: i64, 356 | count: i64, 357 | ) -> Result { 358 | debug_assert!(!ticker.is_empty(), "ticker must not be empty"); 359 | debug_assert!(price_cents >= 1 && price_cents <= 99, "price must be 1-99"); 360 | debug_assert!(count >= 1, "count must be >= 1"); 361 | 362 | let side_static: &'static str = if side == "yes" { "yes" } else { "no" }; 363 | let order_id = Self::next_order_id(); 364 | let order = KalshiOrderRequest::ioc_sell( 365 | Cow::Borrowed(ticker), 366 | side_static, 367 | price_cents, 368 | count, 369 | Cow::Borrowed(&order_id) 370 | ); 371 | debug!("[KALSHI] SELL {} {} @{}¢ x{}", side, ticker, price_cents, count); 372 | 373 | let resp = self.create_order(&order).await?; 374 | debug!("[KALSHI] {} filled={}", resp.order.status, resp.order.filled_count()); 375 | Ok(resp) 376 | } 377 | } 378 | 379 | // === WebSocket Message Types === 380 | 381 | #[derive(Deserialize, Debug)] 382 | pub struct KalshiWsMessage { 383 | #[serde(rename = "type")] 384 | pub msg_type: String, 385 | pub msg: Option, 386 | } 387 | 388 | #[allow(dead_code)] 389 | #[derive(Deserialize, Debug)] 390 | pub struct KalshiWsMsgBody { 391 | pub market_ticker: Option, 392 | // Snapshot fields - arrays of [price_cents, quantity] 393 | pub yes: Option>>, 394 | pub no: Option>>, 395 | // Delta fields 396 | pub price: Option, 397 | pub delta: Option, 398 | pub side: Option, 399 | } 400 | 401 | #[derive(Serialize)] 402 | struct SubscribeCmd { 403 | id: i32, 404 | cmd: &'static str, 405 | params: SubscribeParams, 406 | } 407 | 408 | #[derive(Serialize)] 409 | struct SubscribeParams { 410 | channels: Vec<&'static str>, 411 | market_tickers: Vec, 412 | } 413 | 414 | // ============================================================================= 415 | // WebSocket Runner 416 | // ============================================================================= 417 | 418 | /// WebSocket runner 419 | pub async fn run_ws( 420 | config: &KalshiConfig, 421 | state: Arc, 422 | exec_tx: mpsc::Sender, 423 | threshold_cents: PriceCents, 424 | ) -> Result<()> { 425 | let tickers: Vec = state.markets.iter() 426 | .take(state.market_count()) 427 | .filter_map(|m| m.pair.as_ref().map(|p| p.kalshi_market_ticker.to_string())) 428 | .collect(); 429 | 430 | if tickers.is_empty() { 431 | info!("[KALSHI] No markets to monitor"); 432 | tokio::time::sleep(Duration::from_secs(u64::MAX)).await; 433 | return Ok(()); 434 | } 435 | 436 | let timestamp = SystemTime::now() 437 | .duration_since(UNIX_EPOCH)? 438 | .as_millis() 439 | .to_string(); 440 | 441 | let signature = config.sign(&format!("{}GET/trade-api/ws/v2", timestamp))?; 442 | 443 | let request = Request::builder() 444 | .uri(KALSHI_WS_URL) 445 | .header("KALSHI-ACCESS-KEY", &config.api_key_id) 446 | .header("KALSHI-ACCESS-SIGNATURE", &signature) 447 | .header("KALSHI-ACCESS-TIMESTAMP", ×tamp) 448 | .header("Host", "api.elections.kalshi.com") 449 | .header("Connection", "Upgrade") 450 | .header("Upgrade", "websocket") 451 | .header("Sec-WebSocket-Version", "13") 452 | .header("Sec-WebSocket-Key", tokio_tungstenite::tungstenite::handshake::client::generate_key()) 453 | .body(())?; 454 | 455 | let (ws_stream, _) = connect_async(request).await.context("Failed to connect to Kalshi")?; 456 | info!("[KALSHI] Connected"); 457 | 458 | let (mut write, mut read) = ws_stream.split(); 459 | 460 | // Subscribe to all tickers 461 | let subscribe_msg = SubscribeCmd { 462 | id: 1, 463 | cmd: "subscribe", 464 | params: SubscribeParams { 465 | channels: vec!["orderbook_delta"], 466 | market_tickers: tickers.clone(), 467 | }, 468 | }; 469 | 470 | write.send(Message::Text(serde_json::to_string(&subscribe_msg)?)).await?; 471 | info!("[KALSHI] Subscribed to {} markets", tickers.len()); 472 | 473 | let clock = NanoClock::new(); 474 | 475 | while let Some(msg) = read.next().await { 476 | match msg { 477 | Ok(Message::Text(text)) => { 478 | match serde_json::from_str::(&text) { 479 | Ok(kalshi_msg) => { 480 | let ticker = kalshi_msg.msg.as_ref() 481 | .and_then(|m| m.market_ticker.as_ref()); 482 | 483 | let Some(ticker) = ticker else { continue }; 484 | let ticker_hash = fxhash_str(ticker); 485 | 486 | let Some(&market_id) = state.kalshi_to_id.get(&ticker_hash) else { continue }; 487 | let market = &state.markets[market_id as usize]; 488 | 489 | match kalshi_msg.msg_type.as_str() { 490 | "orderbook_snapshot" => { 491 | if let Some(body) = &kalshi_msg.msg { 492 | process_kalshi_snapshot(market, body); 493 | 494 | // Check for arbs 495 | let arb_mask = market.check_arbs(threshold_cents); 496 | if arb_mask != 0 { 497 | send_kalshi_arb_request(market_id, market, arb_mask, &exec_tx, &clock).await; 498 | } 499 | } 500 | } 501 | "orderbook_delta" => { 502 | if let Some(body) = &kalshi_msg.msg { 503 | process_kalshi_delta(market, body); 504 | 505 | let arb_mask = market.check_arbs(threshold_cents); 506 | if arb_mask != 0 { 507 | send_kalshi_arb_request(market_id, market, arb_mask, &exec_tx, &clock).await; 508 | } 509 | } 510 | } 511 | _ => {} 512 | } 513 | } 514 | Err(e) => { 515 | // Log at trace level - unknown message types are normal 516 | tracing::trace!("[KALSHI] WS parse error: {} (msg: {}...)", e, &text[..text.len().min(100)]); 517 | } 518 | } 519 | } 520 | Ok(Message::Ping(data)) => { 521 | let _ = write.send(Message::Pong(data)).await; 522 | } 523 | Err(e) => { 524 | error!("[KALSHI] WebSocket error: {}", e); 525 | break; 526 | } 527 | _ => {} 528 | } 529 | } 530 | 531 | Ok(()) 532 | } 533 | 534 | /// Process Kalshi orderbook snapshot 535 | /// Note: Kalshi sends BIDS - to buy YES you pay (100 - best_NO_bid), to buy NO you pay (100 - best_YES_bid) 536 | #[inline] 537 | fn process_kalshi_snapshot(market: &crate::types::AtomicMarketState, body: &KalshiWsMsgBody) { 538 | // Find best YES bid (highest price) - this determines NO ask 539 | let (no_ask, no_size) = body.yes.as_ref() 540 | .and_then(|levels| { 541 | levels.iter() 542 | .filter_map(|l| { 543 | if l.len() >= 2 && l[1] > 0 { // Has quantity 544 | Some((l[0], l[1])) // (price, qty) 545 | } else { 546 | None 547 | } 548 | }) 549 | .max_by_key(|(p, _)| *p) // Highest bid 550 | .map(|(price, qty)| { 551 | let ask = (100 - price) as PriceCents; // To buy NO, pay 100 - YES_bid 552 | let size = (qty * price / 100) as SizeCents; 553 | (ask, size) 554 | }) 555 | }) 556 | .unwrap_or((0, 0)); 557 | 558 | // Find best NO bid (highest price) - this determines YES ask 559 | let (yes_ask, yes_size) = body.no.as_ref() 560 | .and_then(|levels| { 561 | levels.iter() 562 | .filter_map(|l| { 563 | if l.len() >= 2 && l[1] > 0 { 564 | Some((l[0], l[1])) 565 | } else { 566 | None 567 | } 568 | }) 569 | .max_by_key(|(p, _)| *p) 570 | .map(|(price, qty)| { 571 | let ask = (100 - price) as PriceCents; // To buy YES, pay 100 - NO_bid 572 | let size = (qty * price / 100) as SizeCents; 573 | (ask, size) 574 | }) 575 | }) 576 | .unwrap_or((0, 0)); 577 | 578 | // Store 579 | market.kalshi.store(yes_ask, no_ask, yes_size, no_size); 580 | } 581 | 582 | /// Process Kalshi orderbook delta 583 | /// Note: Deltas update bid levels; we recompute asks from best bids 584 | #[inline] 585 | fn process_kalshi_delta(market: &crate::types::AtomicMarketState, body: &KalshiWsMsgBody) { 586 | // For deltas, recompute from snapshot-like format 587 | // Kalshi deltas have yes/no as arrays of [price, new_qty] 588 | let (current_yes, current_no, current_yes_size, current_no_size) = market.kalshi.load(); 589 | 590 | // Process YES bid updates (affects NO ask) 591 | let (no_ask, no_size) = if let Some(levels) = &body.yes { 592 | // Find best (highest) YES bid with non-zero quantity 593 | levels.iter() 594 | .filter_map(|l| { 595 | if l.len() >= 2 && l[1] > 0 { 596 | Some((l[0], l[1])) 597 | } else { 598 | None 599 | } 600 | }) 601 | .max_by_key(|(p, _)| *p) 602 | .map(|(price, qty)| { 603 | let ask = (100 - price) as PriceCents; 604 | let size = (qty * price / 100) as SizeCents; 605 | (ask, size) 606 | }) 607 | .unwrap_or((current_no, current_no_size)) 608 | } else { 609 | (current_no, current_no_size) 610 | }; 611 | 612 | // Process NO bid updates (affects YES ask) 613 | let (yes_ask, yes_size) = if let Some(levels) = &body.no { 614 | levels.iter() 615 | .filter_map(|l| { 616 | if l.len() >= 2 && l[1] > 0 { 617 | Some((l[0], l[1])) 618 | } else { 619 | None 620 | } 621 | }) 622 | .max_by_key(|(p, _)| *p) 623 | .map(|(price, qty)| { 624 | let ask = (100 - price) as PriceCents; 625 | let size = (qty * price / 100) as SizeCents; 626 | (ask, size) 627 | }) 628 | .unwrap_or((current_yes, current_yes_size)) 629 | } else { 630 | (current_yes, current_yes_size) 631 | }; 632 | 633 | market.kalshi.store(yes_ask, no_ask, yes_size, no_size); 634 | } 635 | 636 | /// Send arb request from Kalshi handler 637 | #[inline] 638 | async fn send_kalshi_arb_request( 639 | market_id: u16, 640 | market: &crate::types::AtomicMarketState, 641 | arb_mask: u8, 642 | exec_tx: &mpsc::Sender, 643 | clock: &NanoClock, 644 | ) { 645 | let (k_yes, k_no, k_yes_size, k_no_size) = market.kalshi.load(); 646 | let (p_yes, p_no, p_yes_size, p_no_size) = market.poly.load(); 647 | 648 | let (yes_price, no_price, yes_size, no_size, arb_type) = if arb_mask & 1 != 0 { 649 | (p_yes, k_no, p_yes_size, k_no_size, ArbType::PolyYesKalshiNo) 650 | } else if arb_mask & 2 != 0 { 651 | (k_yes, p_no, k_yes_size, p_no_size, ArbType::KalshiYesPolyNo) 652 | } else if arb_mask & 4 != 0 { 653 | (p_yes, p_no, p_yes_size, p_no_size, ArbType::PolyOnly) 654 | } else if arb_mask & 8 != 0 { 655 | (k_yes, k_no, k_yes_size, k_no_size, ArbType::KalshiOnly) 656 | } else { 657 | return; 658 | }; 659 | 660 | let req = FastExecutionRequest { 661 | market_id, 662 | yes_price, 663 | no_price, 664 | yes_size, 665 | no_size, 666 | arb_type, 667 | detected_ns: clock.now_ns(), 668 | }; 669 | 670 | let _ = exec_tx.try_send(req); 671 | } -------------------------------------------------------------------------------- /src/discovery.rs: -------------------------------------------------------------------------------- 1 | //! Intelligent market discovery and matching system. 2 | //! 3 | //! This module handles the discovery of matching markets between Kalshi and Polymarket, 4 | //! with support for caching, incremental updates, and parallel processing. 5 | 6 | use anyhow::Result; 7 | use futures_util::{stream, StreamExt}; 8 | use governor::{Quota, RateLimiter, state::NotKeyed, clock::DefaultClock, middleware::NoOpMiddleware}; 9 | use serde::{Serialize, Deserialize}; 10 | use std::num::NonZeroU32; 11 | use std::sync::Arc; 12 | use std::time::{SystemTime, UNIX_EPOCH}; 13 | use tokio::sync::Semaphore; 14 | use tracing::{info, warn}; 15 | 16 | use crate::cache::TeamCache; 17 | use crate::config::{LeagueConfig, get_league_configs, get_league_config}; 18 | use crate::kalshi::KalshiApiClient; 19 | use crate::polymarket::GammaClient; 20 | use crate::types::{MarketPair, MarketType, DiscoveryResult, KalshiMarket, KalshiEvent}; 21 | 22 | /// Max concurrent Gamma API requests 23 | const GAMMA_CONCURRENCY: usize = 20; 24 | 25 | /// Kalshi rate limit: 2 requests per second (very conservative - they rate limit aggressively) 26 | /// Must be conservative because discovery runs many leagues/series in parallel 27 | const KALSHI_RATE_LIMIT_PER_SEC: u32 = 2; 28 | 29 | /// Max concurrent Kalshi API requests GLOBALLY across all leagues/series 30 | /// This is the hard cap - prevents bursting even when rate limiter has tokens 31 | const KALSHI_GLOBAL_CONCURRENCY: usize = 1; 32 | 33 | /// Cache file path 34 | const DISCOVERY_CACHE_PATH: &str = ".discovery_cache.json"; 35 | 36 | /// Cache TTL in seconds (2 hours - new markets appear every ~2 hours) 37 | const CACHE_TTL_SECS: u64 = 2 * 60 * 60; 38 | 39 | /// Task for parallel Gamma lookup 40 | struct GammaLookupTask { 41 | event: Arc, 42 | market: KalshiMarket, 43 | poly_slug: String, 44 | market_type: MarketType, 45 | league: String, 46 | } 47 | 48 | /// Type alias for Kalshi rate limiter 49 | type KalshiRateLimiter = RateLimiter; 50 | 51 | /// Persistent cache for discovered market pairs 52 | #[derive(Debug, Clone, Serialize, Deserialize)] 53 | struct DiscoveryCache { 54 | /// Unix timestamp when cache was created 55 | timestamp_secs: u64, 56 | /// Cached market pairs 57 | pairs: Vec, 58 | /// Set of known Kalshi market tickers (for incremental updates) 59 | known_kalshi_tickers: Vec, 60 | } 61 | 62 | impl DiscoveryCache { 63 | fn new(pairs: Vec) -> Self { 64 | let known_kalshi_tickers: Vec = pairs.iter() 65 | .map(|p| p.kalshi_market_ticker.to_string()) 66 | .collect(); 67 | Self { 68 | timestamp_secs: current_unix_secs(), 69 | pairs, 70 | known_kalshi_tickers, 71 | } 72 | } 73 | 74 | fn is_expired(&self) -> bool { 75 | let now = current_unix_secs(); 76 | now.saturating_sub(self.timestamp_secs) > CACHE_TTL_SECS 77 | } 78 | 79 | fn age_secs(&self) -> u64 { 80 | current_unix_secs().saturating_sub(self.timestamp_secs) 81 | } 82 | 83 | fn has_ticker(&self, ticker: &str) -> bool { 84 | self.known_kalshi_tickers.iter().any(|t| t == ticker) 85 | } 86 | } 87 | 88 | fn current_unix_secs() -> u64 { 89 | SystemTime::now() 90 | .duration_since(UNIX_EPOCH) 91 | .unwrap_or_default() 92 | .as_secs() 93 | } 94 | 95 | /// Market discovery and matching client for cross-platform market identification 96 | pub struct DiscoveryClient { 97 | kalshi: Arc, 98 | gamma: Arc, 99 | pub team_cache: Arc, 100 | kalshi_limiter: Arc, 101 | kalshi_semaphore: Arc, // Global concurrency limit for Kalshi 102 | gamma_semaphore: Arc, 103 | } 104 | 105 | impl DiscoveryClient { 106 | pub fn new(kalshi: KalshiApiClient, team_cache: TeamCache) -> Self { 107 | // Create token bucket rate limiter for Kalshi 108 | let quota = Quota::per_second(NonZeroU32::new(KALSHI_RATE_LIMIT_PER_SEC).unwrap()); 109 | let kalshi_limiter = Arc::new(RateLimiter::direct(quota)); 110 | 111 | Self { 112 | kalshi: Arc::new(kalshi), 113 | gamma: Arc::new(GammaClient::new()), 114 | team_cache: Arc::new(team_cache), 115 | kalshi_limiter, 116 | kalshi_semaphore: Arc::new(Semaphore::new(KALSHI_GLOBAL_CONCURRENCY)), 117 | gamma_semaphore: Arc::new(Semaphore::new(GAMMA_CONCURRENCY)), 118 | } 119 | } 120 | 121 | /// Load cache from disk (async) 122 | async fn load_cache() -> Option { 123 | let data = tokio::fs::read_to_string(DISCOVERY_CACHE_PATH).await.ok()?; 124 | serde_json::from_str(&data).ok() 125 | } 126 | 127 | /// Save cache to disk (async) 128 | async fn save_cache(cache: &DiscoveryCache) -> Result<()> { 129 | let data = serde_json::to_string_pretty(cache)?; 130 | tokio::fs::write(DISCOVERY_CACHE_PATH, data).await?; 131 | Ok(()) 132 | } 133 | 134 | /// Discover all market pairs with caching support 135 | /// 136 | /// Strategy: 137 | /// 1. Try to load cache from disk 138 | /// 2. If cache exists and is fresh (<2 hours), use it directly 139 | /// 3. If cache exists but is stale, load it + fetch incremental updates 140 | /// 4. If no cache, do full discovery 141 | pub async fn discover_all(&self, leagues: &[&str]) -> DiscoveryResult { 142 | // Try to load existing cache 143 | let cached = Self::load_cache().await; 144 | 145 | match cached { 146 | Some(cache) if !cache.is_expired() => { 147 | // Cache is fresh - use it directly 148 | info!("📂 Loaded {} pairs from cache (age: {}s)", 149 | cache.pairs.len(), cache.age_secs()); 150 | return DiscoveryResult { 151 | pairs: cache.pairs, 152 | kalshi_events_found: 0, // From cache 153 | poly_matches: 0, 154 | poly_misses: 0, 155 | errors: vec![], 156 | }; 157 | } 158 | Some(cache) => { 159 | // Cache is stale - do incremental discovery 160 | info!("📂 Cache expired (age: {}s), doing incremental refresh...", cache.age_secs()); 161 | return self.discover_incremental(leagues, cache).await; 162 | } 163 | None => { 164 | // No cache - do full discovery 165 | info!("📂 No cache found, doing full discovery..."); 166 | } 167 | } 168 | 169 | // Full discovery (no cache) 170 | let result = self.discover_full(leagues).await; 171 | 172 | // Save to cache 173 | if !result.pairs.is_empty() { 174 | let cache = DiscoveryCache::new(result.pairs.clone()); 175 | if let Err(e) = Self::save_cache(&cache).await { 176 | warn!("Failed to save discovery cache: {}", e); 177 | } else { 178 | info!("💾 Saved {} pairs to cache", result.pairs.len()); 179 | } 180 | } 181 | 182 | result 183 | } 184 | 185 | /// Force full discovery (ignores cache) 186 | pub async fn discover_all_force(&self, leagues: &[&str]) -> DiscoveryResult { 187 | info!("🔄 Forced full discovery (ignoring cache)..."); 188 | let result = self.discover_full(leagues).await; 189 | 190 | // Save to cache 191 | if !result.pairs.is_empty() { 192 | let cache = DiscoveryCache::new(result.pairs.clone()); 193 | if let Err(e) = Self::save_cache(&cache).await { 194 | warn!("Failed to save discovery cache: {}", e); 195 | } else { 196 | info!("💾 Saved {} pairs to cache", result.pairs.len()); 197 | } 198 | } 199 | 200 | result 201 | } 202 | 203 | /// Full discovery without cache 204 | async fn discover_full(&self, leagues: &[&str]) -> DiscoveryResult { 205 | let configs: Vec<_> = if leagues.is_empty() { 206 | get_league_configs() 207 | } else { 208 | leagues.iter() 209 | .filter_map(|l| get_league_config(l)) 210 | .collect() 211 | }; 212 | 213 | // Parallel discovery across all leagues 214 | let league_futures: Vec<_> = configs.iter() 215 | .map(|config| self.discover_league(config, None)) 216 | .collect(); 217 | 218 | let league_results = futures_util::future::join_all(league_futures).await; 219 | 220 | // Merge results 221 | let mut result = DiscoveryResult::default(); 222 | for league_result in league_results { 223 | result.pairs.extend(league_result.pairs); 224 | result.poly_matches += league_result.poly_matches; 225 | result.errors.extend(league_result.errors); 226 | } 227 | result.kalshi_events_found = result.pairs.len(); 228 | 229 | result 230 | } 231 | 232 | /// Incremental discovery - merge cached pairs with newly discovered ones 233 | async fn discover_incremental(&self, leagues: &[&str], cache: DiscoveryCache) -> DiscoveryResult { 234 | let configs: Vec<_> = if leagues.is_empty() { 235 | get_league_configs() 236 | } else { 237 | leagues.iter() 238 | .filter_map(|l| get_league_config(l)) 239 | .collect() 240 | }; 241 | 242 | // Discover with filter for known tickers 243 | let league_futures: Vec<_> = configs.iter() 244 | .map(|config| self.discover_league(config, Some(&cache))) 245 | .collect(); 246 | 247 | let league_results = futures_util::future::join_all(league_futures).await; 248 | 249 | // Merge cached pairs with newly discovered ones 250 | let mut all_pairs = cache.pairs; 251 | let mut new_count = 0; 252 | 253 | for league_result in league_results { 254 | for pair in league_result.pairs { 255 | if !all_pairs.iter().any(|p| *p.kalshi_market_ticker == *pair.kalshi_market_ticker) { 256 | all_pairs.push(pair); 257 | new_count += 1; 258 | } 259 | } 260 | } 261 | 262 | if new_count > 0 { 263 | info!("🆕 Found {} new market pairs", new_count); 264 | 265 | // Update cache 266 | let new_cache = DiscoveryCache::new(all_pairs.clone()); 267 | if let Err(e) = Self::save_cache(&new_cache).await { 268 | warn!("Failed to update discovery cache: {}", e); 269 | } else { 270 | info!("💾 Updated cache with {} total pairs", all_pairs.len()); 271 | } 272 | } else { 273 | info!("✅ No new markets found, using {} cached pairs", all_pairs.len()); 274 | 275 | // Just update timestamp to extend TTL 276 | let refreshed_cache = DiscoveryCache::new(all_pairs.clone()); 277 | let _ = Self::save_cache(&refreshed_cache).await; 278 | } 279 | 280 | DiscoveryResult { 281 | pairs: all_pairs, 282 | kalshi_events_found: new_count, 283 | poly_matches: new_count, 284 | poly_misses: 0, 285 | errors: vec![], 286 | } 287 | } 288 | 289 | /// Discover all market types for a single league (PARALLEL) 290 | /// If cache is provided, only discovers markets not already in cache 291 | async fn discover_league(&self, config: &LeagueConfig, cache: Option<&DiscoveryCache>) -> DiscoveryResult { 292 | info!("🔍 Discovering {} markets...", config.league_code); 293 | 294 | let market_types = [MarketType::Moneyline, MarketType::Spread, MarketType::Total, MarketType::Btts]; 295 | 296 | // Parallel discovery across market types 297 | let type_futures: Vec<_> = market_types.iter() 298 | .filter_map(|market_type| { 299 | let series = self.get_series_for_type(config, *market_type)?; 300 | Some(self.discover_series(config, series, *market_type, cache)) 301 | }) 302 | .collect(); 303 | 304 | let type_results = futures_util::future::join_all(type_futures).await; 305 | 306 | let mut result = DiscoveryResult::default(); 307 | for (pairs_result, market_type) in type_results.into_iter().zip(market_types.iter()) { 308 | match pairs_result { 309 | Ok(pairs) => { 310 | let count = pairs.len(); 311 | if count > 0 { 312 | info!(" ✅ {} {}: {} pairs", config.league_code, market_type, count); 313 | } 314 | result.poly_matches += count; 315 | result.pairs.extend(pairs); 316 | } 317 | Err(e) => { 318 | result.errors.push(format!("{} {}: {}", config.league_code, market_type, e)); 319 | } 320 | } 321 | } 322 | 323 | result 324 | } 325 | 326 | fn get_series_for_type(&self, config: &LeagueConfig, market_type: MarketType) -> Option<&'static str> { 327 | match market_type { 328 | MarketType::Moneyline => Some(config.kalshi_series_game), 329 | MarketType::Spread => config.kalshi_series_spread, 330 | MarketType::Total => config.kalshi_series_total, 331 | MarketType::Btts => config.kalshi_series_btts, 332 | } 333 | } 334 | 335 | /// Discover markets for a specific series (PARALLEL Kalshi + Gamma lookups) 336 | /// If cache is provided, skips markets already in cache 337 | async fn discover_series( 338 | &self, 339 | config: &LeagueConfig, 340 | series: &str, 341 | market_type: MarketType, 342 | cache: Option<&DiscoveryCache>, 343 | ) -> Result> { 344 | // Fetch Kalshi events 345 | { 346 | let _permit = self.kalshi_semaphore.acquire().await.map_err(|e| anyhow::anyhow!("semaphore closed: {}", e))?; 347 | self.kalshi_limiter.until_ready().await; 348 | } 349 | let events = self.kalshi.get_events(series, 50).await?; 350 | 351 | // PHASE 2: Parallel market fetching 352 | let kalshi = self.kalshi.clone(); 353 | let limiter = self.kalshi_limiter.clone(); 354 | let semaphore = self.kalshi_semaphore.clone(); 355 | 356 | // Parse events first, filtering out unparseable ones 357 | let parsed_events: Vec<_> = events.into_iter() 358 | .filter_map(|event| { 359 | let parsed = match parse_kalshi_event_ticker(&event.event_ticker) { 360 | Some(p) => p, 361 | None => { 362 | warn!(" ⚠️ Could not parse event ticker {}", event.event_ticker); 363 | return None; 364 | } 365 | }; 366 | Some((parsed, event)) 367 | }) 368 | .collect(); 369 | 370 | // Execute market fetches with GLOBAL concurrency limit 371 | let market_results: Vec<_> = stream::iter(parsed_events) 372 | .map(|(parsed, event)| { 373 | let kalshi = kalshi.clone(); 374 | let limiter = limiter.clone(); 375 | let semaphore = semaphore.clone(); 376 | let event_ticker = event.event_ticker.clone(); 377 | async move { 378 | let _permit = semaphore.acquire().await.ok(); 379 | // rate limit 380 | limiter.until_ready().await; 381 | let markets_result = kalshi.get_markets(&event_ticker).await; 382 | (parsed, Arc::new(event), markets_result) 383 | } 384 | }) 385 | .buffer_unordered(KALSHI_GLOBAL_CONCURRENCY * 2) // Allow some buffering, semaphore is the real limit 386 | .collect() 387 | .await; 388 | 389 | // Collect all (event, market) pairs 390 | let mut event_markets = Vec::with_capacity(market_results.len() * 3); 391 | for (parsed, event, markets_result) in market_results { 392 | match markets_result { 393 | Ok(markets) => { 394 | for market in markets { 395 | // Skip if already in cache 396 | if let Some(c) = cache { 397 | if c.has_ticker(&market.ticker) { 398 | continue; 399 | } 400 | } 401 | event_markets.push((parsed.clone(), event.clone(), market)); 402 | } 403 | } 404 | Err(e) => { 405 | warn!(" ⚠️ Failed to get markets for {}: {}", event.event_ticker, e); 406 | } 407 | } 408 | } 409 | 410 | // Parallel Gamma lookups with semaphore 411 | let lookup_futures: Vec<_> = event_markets 412 | .into_iter() 413 | .map(|(parsed, event, market)| { 414 | let poly_slug = self.build_poly_slug(config.poly_prefix, &parsed, market_type, &market); 415 | 416 | GammaLookupTask { 417 | event, 418 | market, 419 | poly_slug, 420 | market_type, 421 | league: config.league_code.to_string(), 422 | } 423 | }) 424 | .collect(); 425 | 426 | // Execute lookups in parallel 427 | let pairs: Vec = stream::iter(lookup_futures) 428 | .map(|task| { 429 | let gamma = self.gamma.clone(); 430 | let semaphore = self.gamma_semaphore.clone(); 431 | async move { 432 | let _permit = semaphore.acquire().await.ok()?; 433 | match gamma.lookup_market(&task.poly_slug).await { 434 | Ok(Some((yes_token, no_token))) => { 435 | let team_suffix = extract_team_suffix(&task.market.ticker); 436 | Some(MarketPair { 437 | pair_id: format!("{}-{}", task.poly_slug, task.market.ticker).into(), 438 | league: task.league.into(), 439 | market_type: task.market_type, 440 | description: format!("{} - {}", task.event.title, task.market.title).into(), 441 | kalshi_event_ticker: task.event.event_ticker.clone().into(), 442 | kalshi_market_ticker: task.market.ticker.into(), 443 | poly_slug: task.poly_slug.into(), 444 | poly_yes_token: yes_token.into(), 445 | poly_no_token: no_token.into(), 446 | line_value: task.market.floor_strike, 447 | team_suffix: team_suffix.map(|s| s.into()), 448 | }) 449 | } 450 | Ok(None) => None, 451 | Err(e) => { 452 | warn!(" ⚠️ Gamma lookup failed for {}: {}", task.poly_slug, e); 453 | None 454 | } 455 | } 456 | } 457 | }) 458 | .buffer_unordered(GAMMA_CONCURRENCY) 459 | .filter_map(|x| async { x }) 460 | .collect() 461 | .await; 462 | 463 | Ok(pairs) 464 | } 465 | 466 | /// Build Polymarket slug from Kalshi event data 467 | fn build_poly_slug( 468 | &self, 469 | poly_prefix: &str, 470 | parsed: &ParsedKalshiTicker, 471 | market_type: MarketType, 472 | market: &KalshiMarket, 473 | ) -> String { 474 | // Convert Kalshi team codes to Polymarket codes using cache 475 | let poly_team1 = self.team_cache 476 | .kalshi_to_poly(poly_prefix, &parsed.team1) 477 | .unwrap_or_else(|| parsed.team1.to_lowercase()); 478 | let poly_team2 = self.team_cache 479 | .kalshi_to_poly(poly_prefix, &parsed.team2) 480 | .unwrap_or_else(|| parsed.team2.to_lowercase()); 481 | 482 | // Convert date from "25DEC27" to "2025-12-27" 483 | let date_str = kalshi_date_to_iso(&parsed.date); 484 | 485 | // Base slug: league-team1-team2-date 486 | let base = format!("{}-{}-{}-{}", poly_prefix, poly_team1, poly_team2, date_str); 487 | 488 | match market_type { 489 | MarketType::Moneyline => { 490 | if let Some(suffix) = extract_team_suffix(&market.ticker) { 491 | if suffix.to_lowercase() == "tie" { 492 | format!("{}-draw", base) 493 | } else { 494 | let poly_suffix = self.team_cache 495 | .kalshi_to_poly(poly_prefix, &suffix) 496 | .unwrap_or_else(|| suffix.to_lowercase()); 497 | format!("{}-{}", base, poly_suffix) 498 | } 499 | } else { 500 | base 501 | } 502 | } 503 | MarketType::Spread => { 504 | if let Some(floor) = market.floor_strike { 505 | let floor_str = format!("{:.1}", floor).replace(".", "pt"); 506 | format!("{}-spread-{}", base, floor_str) 507 | } else { 508 | format!("{}-spread", base) 509 | } 510 | } 511 | MarketType::Total => { 512 | if let Some(floor) = market.floor_strike { 513 | let floor_str = format!("{:.1}", floor).replace(".", "pt"); 514 | format!("{}-total-{}", base, floor_str) 515 | } else { 516 | format!("{}-total", base) 517 | } 518 | } 519 | MarketType::Btts => { 520 | format!("{}-btts", base) 521 | } 522 | } 523 | } 524 | } 525 | 526 | // === Helpers === 527 | 528 | #[derive(Debug, Clone)] 529 | struct ParsedKalshiTicker { 530 | date: String, // "25DEC27" 531 | team1: String, // "CFC" 532 | team2: String, // "AVL" 533 | } 534 | 535 | /// Parse Kalshi event ticker like "KXEPLGAME-25DEC27CFCAVL" or "KXNCAAFGAME-25DEC27M-OHFRES" 536 | fn parse_kalshi_event_ticker(ticker: &str) -> Option { 537 | let parts: Vec<&str> = ticker.split('-').collect(); 538 | if parts.len() < 2 { 539 | return None; 540 | } 541 | 542 | // Handle two formats: 543 | // 1. "KXEPLGAME-25DEC27CFCAVL" - date+teams in parts[1] 544 | // 2. "KXNCAAFGAME-25DEC27M-OHFRES" - date in parts[1], teams in parts[2] 545 | let (date, teams_part) = if parts.len() >= 3 && parts[2].len() >= 4 { 546 | // Format 2: 3-part ticker with separate teams section 547 | // parts[1] is like "25DEC27M" (date + optional suffix) 548 | let date_part = parts[1]; 549 | let date = if date_part.len() >= 7 { 550 | date_part[..7].to_uppercase() 551 | } else { 552 | return None; 553 | }; 554 | (date, parts[2]) 555 | } else { 556 | // Format 1: 2-part ticker with combined date+teams 557 | let date_teams = parts[1]; 558 | // Minimum: 7 (date) + 2 + 2 (min team codes) = 11 559 | if date_teams.len() < 11 { 560 | return None; 561 | } 562 | let date = date_teams[..7].to_uppercase(); 563 | let teams = &date_teams[7..]; 564 | (date, teams) 565 | }; 566 | 567 | // Split team codes - try to find the best split point 568 | // Team codes range from 2-4 chars (e.g., OM, CFC, FRES) 569 | let (team1, team2) = split_team_codes(teams_part); 570 | 571 | Some(ParsedKalshiTicker { date, team1, team2 }) 572 | } 573 | 574 | /// Split a combined team string into two team codes 575 | /// Tries multiple split strategies based on string length 576 | fn split_team_codes(teams: &str) -> (String, String) { 577 | let len = teams.len(); 578 | 579 | // For 6 chars, could be 3+3, 2+4, or 4+2 580 | // For 5 chars, could be 2+3 or 3+2 581 | // For 4 chars, must be 2+2 582 | // For 7 chars, could be 3+4 or 4+3 583 | // For 8 chars, could be 4+4, 3+5, 5+3 584 | 585 | match len { 586 | 4 => (teams[..2].to_uppercase(), teams[2..].to_uppercase()), 587 | 5 => { 588 | // Prefer 2+3 (common for OM+ASM, OL+PSG) 589 | (teams[..2].to_uppercase(), teams[2..].to_uppercase()) 590 | } 591 | 6 => { 592 | // Check if it looks like 2+4 pattern (e.g., OHFRES = OH+FRES) 593 | // Common 2-letter codes: OM, OL, OH, SF, LA, NY, KC, TB, etc. 594 | let first_two = &teams[..2].to_uppercase(); 595 | if is_likely_two_letter_code(first_two) { 596 | (first_two.clone(), teams[2..].to_uppercase()) 597 | } else { 598 | // Default to 3+3 599 | (teams[..3].to_uppercase(), teams[3..].to_uppercase()) 600 | } 601 | } 602 | 7 => { 603 | // Could be 3+4 or 4+3 - prefer 3+4 604 | (teams[..3].to_uppercase(), teams[3..].to_uppercase()) 605 | } 606 | _ if len >= 8 => { 607 | // 4+4 or longer 608 | (teams[..4].to_uppercase(), teams[4..].to_uppercase()) 609 | } 610 | _ => { 611 | let mid = len / 2; 612 | (teams[..mid].to_uppercase(), teams[mid..].to_uppercase()) 613 | } 614 | } 615 | } 616 | 617 | /// Check if a 2-letter code is a known/likely team abbreviation 618 | fn is_likely_two_letter_code(code: &str) -> bool { 619 | matches!( 620 | code, 621 | // European football (Ligue 1, etc.) 622 | "OM" | "OL" | "FC" | 623 | // US sports common abbreviations 624 | "OH" | "SF" | "LA" | "NY" | "KC" | "TB" | "GB" | "NE" | "NO" | "LV" | 625 | // Generic short codes 626 | "BC" | "SC" | "AC" | "AS" | "US" 627 | ) 628 | } 629 | 630 | /// Convert Kalshi date "25DEC27" to ISO "2025-12-27" 631 | fn kalshi_date_to_iso(kalshi_date: &str) -> String { 632 | if kalshi_date.len() != 7 { 633 | return kalshi_date.to_string(); 634 | } 635 | 636 | let year = format!("20{}", &kalshi_date[..2]); 637 | let month = match &kalshi_date[2..5].to_uppercase()[..] { 638 | "JAN" => "01", "FEB" => "02", "MAR" => "03", "APR" => "04", 639 | "MAY" => "05", "JUN" => "06", "JUL" => "07", "AUG" => "08", 640 | "SEP" => "09", "OCT" => "10", "NOV" => "11", "DEC" => "12", 641 | _ => "01", 642 | }; 643 | let day = &kalshi_date[5..7]; 644 | 645 | format!("{}-{}-{}", year, month, day) 646 | } 647 | 648 | /// Extract team suffix from market ticker (e.g., "KXEPLGAME-25DEC27CFCAVL-CFC" -> "CFC") 649 | fn extract_team_suffix(ticker: &str) -> Option { 650 | let mut splits = ticker.splitn(3, '-'); 651 | splits.next()?; // series 652 | splits.next()?; // event 653 | splits.next().map(|s| s.to_uppercase()) 654 | } 655 | 656 | #[cfg(test)] 657 | mod tests { 658 | use super::*; 659 | 660 | #[test] 661 | fn test_parse_kalshi_ticker() { 662 | let parsed = parse_kalshi_event_ticker("KXEPLGAME-25DEC27CFCAVL").unwrap(); 663 | assert_eq!(parsed.date, "25DEC27"); 664 | assert_eq!(parsed.team1, "CFC"); 665 | assert_eq!(parsed.team2, "AVL"); 666 | } 667 | 668 | #[test] 669 | fn test_kalshi_date_to_iso() { 670 | assert_eq!(kalshi_date_to_iso("25DEC27"), "2025-12-27"); 671 | assert_eq!(kalshi_date_to_iso("25JAN01"), "2025-01-01"); 672 | } 673 | } 674 | -------------------------------------------------------------------------------- /src/execution.rs: -------------------------------------------------------------------------------- 1 | //! High-performance order execution engine for arbitrage opportunities. 2 | //! 3 | //! This module handles concurrent order execution across both platforms, 4 | //! position reconciliation, and automatic exposure management. 5 | 6 | use anyhow::{Result, anyhow}; 7 | use std::sync::Arc; 8 | use std::sync::atomic::{AtomicU64, Ordering}; 9 | use std::time::{Duration, Instant}; 10 | use tokio::sync::mpsc; 11 | use tracing::{info, warn, error}; 12 | 13 | use crate::kalshi::KalshiApiClient; 14 | use crate::polymarket_clob::SharedAsyncClient; 15 | use crate::types::{ 16 | ArbType, MarketPair, 17 | FastExecutionRequest, GlobalState, 18 | cents_to_price, 19 | }; 20 | use crate::circuit_breaker::CircuitBreaker; 21 | use crate::position_tracker::{FillRecord, PositionChannel}; 22 | 23 | // ============================================================================= 24 | // EXECUTION ENGINE 25 | // ============================================================================= 26 | 27 | /// High-precision monotonic clock for latency measurement and performance tracking 28 | pub struct NanoClock { 29 | start: Instant, 30 | } 31 | 32 | impl NanoClock { 33 | pub fn new() -> Self { 34 | Self { start: Instant::now() } 35 | } 36 | 37 | #[inline(always)] 38 | pub fn now_ns(&self) -> u64 { 39 | self.start.elapsed().as_nanos() as u64 40 | } 41 | } 42 | 43 | impl Default for NanoClock { 44 | fn default() -> Self { 45 | Self::new() 46 | } 47 | } 48 | 49 | /// Core execution engine for processing arbitrage opportunities 50 | pub struct ExecutionEngine { 51 | kalshi: Arc, 52 | poly_async: Arc, 53 | state: Arc, 54 | circuit_breaker: Arc, 55 | position_channel: PositionChannel, 56 | in_flight: Arc<[AtomicU64; 8]>, 57 | clock: NanoClock, 58 | pub dry_run: bool, 59 | test_mode: bool, 60 | } 61 | 62 | impl ExecutionEngine { 63 | pub fn new( 64 | kalshi: Arc, 65 | poly_async: Arc, 66 | state: Arc, 67 | circuit_breaker: Arc, 68 | position_channel: PositionChannel, 69 | dry_run: bool, 70 | ) -> Self { 71 | let test_mode = std::env::var("TEST_ARB") 72 | .map(|v| v == "1" || v == "true") 73 | .unwrap_or(false); 74 | 75 | Self { 76 | kalshi, 77 | poly_async, 78 | state, 79 | circuit_breaker, 80 | position_channel, 81 | in_flight: Arc::new(std::array::from_fn(|_| AtomicU64::new(0))), 82 | clock: NanoClock::new(), 83 | dry_run, 84 | test_mode, 85 | } 86 | } 87 | 88 | /// Process an execution request 89 | #[inline] 90 | pub async fn process(&self, req: FastExecutionRequest) -> Result { 91 | let market_id = req.market_id; 92 | 93 | // Deduplication check (512 markets via 8x u64 bitmask) 94 | if market_id < 512 { 95 | let slot = (market_id / 64) as usize; 96 | let bit = market_id % 64; 97 | let mask = 1u64 << bit; 98 | let prev = self.in_flight[slot].fetch_or(mask, Ordering::AcqRel); 99 | if prev & mask != 0 { 100 | return Ok(ExecutionResult { 101 | market_id, 102 | success: false, 103 | profit_cents: 0, 104 | latency_ns: self.clock.now_ns() - req.detected_ns, 105 | error: Some("Already in-flight"), 106 | }); 107 | } 108 | } 109 | 110 | // Get market pair 111 | let market = self.state.get_by_id(market_id) 112 | .ok_or_else(|| anyhow!("Unknown market_id {}", market_id))?; 113 | 114 | let pair = market.pair.as_ref() 115 | .ok_or_else(|| anyhow!("No pair for market_id {}", market_id))?; 116 | 117 | // Calculate profit 118 | let profit_cents = req.profit_cents(); 119 | if profit_cents < 1 { 120 | self.release_in_flight(market_id); 121 | return Ok(ExecutionResult { 122 | market_id, 123 | success: false, 124 | profit_cents: 0, 125 | latency_ns: self.clock.now_ns() - req.detected_ns, 126 | error: Some("Profit below threshold"), 127 | }); 128 | } 129 | 130 | // Calculate max contracts from size (min of both sides) 131 | let mut max_contracts = (req.yes_size.min(req.no_size) / 100) as i64; 132 | 133 | // Safety: In test mode, cap position size at 10 contracts 134 | // Note: Polymarket enforces a $1 minimum order value. At 40¢ per contract, 135 | // a single contract ($0.40) would be rejected. Using 10 contracts ensures 136 | // we meet the minimum requirement at any reasonable price level. 137 | if self.test_mode && max_contracts > 10 { 138 | warn!("[EXEC] ⚠️ TEST_MODE: Position size capped from {} to 10 contracts", max_contracts); 139 | max_contracts = 10; 140 | } 141 | 142 | if max_contracts < 1 { 143 | warn!( 144 | "[EXEC] Liquidity fail: {:?} | yes_size={}¢ no_size={}¢", 145 | req.arb_type, req.yes_size, req.no_size 146 | ); 147 | self.release_in_flight(market_id); 148 | return Ok(ExecutionResult { 149 | market_id, 150 | success: false, 151 | profit_cents: 0, 152 | latency_ns: self.clock.now_ns() - req.detected_ns, 153 | error: Some("Insufficient liquidity"), 154 | }); 155 | } 156 | 157 | // Circuit breaker check 158 | if let Err(_reason) = self.circuit_breaker.can_execute(&pair.pair_id, max_contracts).await { 159 | self.release_in_flight(market_id); 160 | return Ok(ExecutionResult { 161 | market_id, 162 | success: false, 163 | profit_cents: 0, 164 | latency_ns: self.clock.now_ns() - req.detected_ns, 165 | error: Some("Circuit breaker"), 166 | }); 167 | } 168 | 169 | let latency_to_exec = self.clock.now_ns() - req.detected_ns; 170 | info!( 171 | "[EXEC] 🎯 {} | {:?} y={}¢ n={}¢ | profit={}¢ | {}x | {}µs", 172 | pair.description, 173 | req.arb_type, 174 | req.yes_price, 175 | req.no_price, 176 | profit_cents, 177 | max_contracts, 178 | latency_to_exec / 1000 179 | ); 180 | 181 | if self.dry_run { 182 | info!("[EXEC] 🏃 DRY RUN - would execute {} contracts", max_contracts); 183 | self.release_in_flight_delayed(market_id); 184 | return Ok(ExecutionResult { 185 | market_id, 186 | success: true, 187 | profit_cents, 188 | latency_ns: latency_to_exec, 189 | error: Some("DRY_RUN"), 190 | }); 191 | } 192 | 193 | // Execute both legs concurrently 194 | let result = self.execute_both_legs_async(&req, pair, max_contracts).await; 195 | 196 | // Release in-flight after delay 197 | self.release_in_flight_delayed(market_id); 198 | 199 | match result { 200 | // Note: For same-platform arbs (PolyOnly/KalshiOnly), these are YES/NO fills, not platform fills 201 | Ok((yes_filled, no_filled, yes_cost, no_cost, yes_order_id, no_order_id)) => { 202 | let matched = yes_filled.min(no_filled); 203 | let success = matched > 0; 204 | let actual_profit = matched as i16 * 100 - (yes_cost + no_cost) as i16; 205 | 206 | // === Automatic exposure management for mismatched fills === 207 | // If one leg fills more than the other, automatically close the excess 208 | // to maintain market-neutral exposure (non-blocking background task) 209 | if yes_filled != no_filled && (yes_filled > 0 || no_filled > 0) { 210 | let excess = (yes_filled - no_filled).abs(); 211 | let (leg1_name, leg2_name) = match req.arb_type { 212 | ArbType::PolyYesKalshiNo => ("P_yes", "K_no"), 213 | ArbType::KalshiYesPolyNo => ("K_yes", "P_no"), 214 | ArbType::PolyOnly => ("P_yes", "P_no"), 215 | ArbType::KalshiOnly => ("K_yes", "K_no"), 216 | }; 217 | warn!("[EXEC] ⚠️ Fill mismatch: {}={} {}={} (excess={})", 218 | leg1_name, yes_filled, leg2_name, no_filled, excess); 219 | 220 | // Spawn auto-close in background (don't block hot path with 2s sleep) 221 | let kalshi = self.kalshi.clone(); 222 | let poly_async = self.poly_async.clone(); 223 | let arb_type = req.arb_type; 224 | let yes_price = req.yes_price; 225 | let no_price = req.no_price; 226 | let poly_yes_token = pair.poly_yes_token.clone(); 227 | let poly_no_token = pair.poly_no_token.clone(); 228 | let kalshi_ticker = pair.kalshi_market_ticker.clone(); 229 | let original_cost_per_contract = if yes_filled > no_filled { 230 | if yes_filled > 0 { yes_cost / yes_filled } else { 0 } 231 | } else { 232 | if no_filled > 0 { no_cost / no_filled } else { 0 } 233 | }; 234 | 235 | tokio::spawn(async move { 236 | Self::auto_close_background( 237 | kalshi, poly_async, arb_type, yes_filled, no_filled, 238 | yes_price, no_price, poly_yes_token, poly_no_token, 239 | kalshi_ticker, original_cost_per_contract 240 | ).await; 241 | }); 242 | } 243 | 244 | if success { 245 | self.circuit_breaker.record_success(&pair.pair_id, matched, matched, actual_profit as f64 / 100.0).await; 246 | } 247 | 248 | if matched > 0 { 249 | let (platform1, side1, platform2, side2) = match req.arb_type { 250 | ArbType::PolyYesKalshiNo => ("polymarket", "yes", "kalshi", "no"), 251 | ArbType::KalshiYesPolyNo => ("kalshi", "yes", "polymarket", "no"), 252 | ArbType::PolyOnly => ("polymarket", "yes", "polymarket", "no"), 253 | ArbType::KalshiOnly => ("kalshi", "yes", "kalshi", "no"), 254 | }; 255 | 256 | self.position_channel.record_fill(FillRecord::new( 257 | &pair.pair_id, &pair.description, platform1, side1, 258 | matched as f64, yes_cost as f64 / 100.0 / yes_filled.max(1) as f64, 259 | 0.0, &yes_order_id, 260 | )); 261 | self.position_channel.record_fill(FillRecord::new( 262 | &pair.pair_id, &pair.description, platform2, side2, 263 | matched as f64, no_cost as f64 / 100.0 / no_filled.max(1) as f64, 264 | 0.0, &no_order_id, 265 | )); 266 | } 267 | 268 | Ok(ExecutionResult { 269 | market_id, 270 | success, 271 | profit_cents: actual_profit, 272 | latency_ns: self.clock.now_ns() - req.detected_ns, 273 | error: if success { None } else { Some("Partial/no fill") }, 274 | }) 275 | } 276 | Err(_e) => { 277 | self.circuit_breaker.record_error().await; 278 | Ok(ExecutionResult { 279 | market_id, 280 | success: false, 281 | profit_cents: 0, 282 | latency_ns: self.clock.now_ns() - req.detected_ns, 283 | error: Some("Execution failed"), 284 | }) 285 | } 286 | } 287 | } 288 | 289 | async fn execute_both_legs_async( 290 | &self, 291 | req: &FastExecutionRequest, 292 | pair: &MarketPair, 293 | contracts: i64, 294 | ) -> Result<(i64, i64, i64, i64, String, String)> { 295 | match req.arb_type { 296 | // === CROSS-PLATFORM: Poly YES + Kalshi NO === 297 | ArbType::PolyYesKalshiNo => { 298 | let kalshi_fut = self.kalshi.buy_ioc( 299 | &pair.kalshi_market_ticker, 300 | "no", 301 | req.no_price as i64, 302 | contracts, 303 | ); 304 | let poly_fut = self.poly_async.buy_fak( 305 | &pair.poly_yes_token, 306 | cents_to_price(req.yes_price), 307 | contracts as f64, 308 | ); 309 | let (kalshi_res, poly_res) = tokio::join!(kalshi_fut, poly_fut); 310 | self.extract_cross_results(kalshi_res, poly_res) 311 | } 312 | 313 | // === CROSS-PLATFORM: Kalshi YES + Poly NO === 314 | ArbType::KalshiYesPolyNo => { 315 | let kalshi_fut = self.kalshi.buy_ioc( 316 | &pair.kalshi_market_ticker, 317 | "yes", 318 | req.yes_price as i64, 319 | contracts, 320 | ); 321 | let poly_fut = self.poly_async.buy_fak( 322 | &pair.poly_no_token, 323 | cents_to_price(req.no_price), 324 | contracts as f64, 325 | ); 326 | let (kalshi_res, poly_res) = tokio::join!(kalshi_fut, poly_fut); 327 | self.extract_cross_results(kalshi_res, poly_res) 328 | } 329 | 330 | // === SAME-PLATFORM: Poly YES + Poly NO === 331 | ArbType::PolyOnly => { 332 | let yes_fut = self.poly_async.buy_fak( 333 | &pair.poly_yes_token, 334 | cents_to_price(req.yes_price), 335 | contracts as f64, 336 | ); 337 | let no_fut = self.poly_async.buy_fak( 338 | &pair.poly_no_token, 339 | cents_to_price(req.no_price), 340 | contracts as f64, 341 | ); 342 | let (yes_res, no_res) = tokio::join!(yes_fut, no_fut); 343 | self.extract_poly_only_results(yes_res, no_res) 344 | } 345 | 346 | // === SAME-PLATFORM: Kalshi YES + Kalshi NO === 347 | ArbType::KalshiOnly => { 348 | let yes_fut = self.kalshi.buy_ioc( 349 | &pair.kalshi_market_ticker, 350 | "yes", 351 | req.yes_price as i64, 352 | contracts, 353 | ); 354 | let no_fut = self.kalshi.buy_ioc( 355 | &pair.kalshi_market_ticker, 356 | "no", 357 | req.no_price as i64, 358 | contracts, 359 | ); 360 | let (yes_res, no_res) = tokio::join!(yes_fut, no_fut); 361 | self.extract_kalshi_only_results(yes_res, no_res) 362 | } 363 | } 364 | } 365 | 366 | /// Extract results from cross-platform execution 367 | fn extract_cross_results( 368 | &self, 369 | kalshi_res: Result, 370 | poly_res: Result, 371 | ) -> Result<(i64, i64, i64, i64, String, String)> { 372 | let (kalshi_filled, kalshi_cost, kalshi_order_id) = match kalshi_res { 373 | Ok(resp) => { 374 | let filled = resp.order.filled_count(); 375 | let cost = resp.order.taker_fill_cost.unwrap_or(0) + resp.order.maker_fill_cost.unwrap_or(0); 376 | (filled, cost, resp.order.order_id) 377 | } 378 | Err(e) => { 379 | warn!("[EXEC] Kalshi failed: {}", e); 380 | (0, 0, String::new()) 381 | } 382 | }; 383 | 384 | let (poly_filled, poly_cost, poly_order_id) = match poly_res { 385 | Ok(fill) => { 386 | ((fill.filled_size as i64), (fill.fill_cost * 100.0) as i64, fill.order_id) 387 | } 388 | Err(e) => { 389 | warn!("[EXEC] Poly failed: {}", e); 390 | (0, 0, String::new()) 391 | } 392 | }; 393 | 394 | Ok((kalshi_filled, poly_filled, kalshi_cost, poly_cost, kalshi_order_id, poly_order_id)) 395 | } 396 | 397 | /// Extract results from Poly-only execution (same-platform) 398 | fn extract_poly_only_results( 399 | &self, 400 | yes_res: Result, 401 | no_res: Result, 402 | ) -> Result<(i64, i64, i64, i64, String, String)> { 403 | let (yes_filled, yes_cost, yes_order_id) = match yes_res { 404 | Ok(fill) => { 405 | ((fill.filled_size as i64), (fill.fill_cost * 100.0) as i64, fill.order_id) 406 | } 407 | Err(e) => { 408 | warn!("[EXEC] Poly YES failed: {}", e); 409 | (0, 0, String::new()) 410 | } 411 | }; 412 | 413 | let (no_filled, no_cost, no_order_id) = match no_res { 414 | Ok(fill) => { 415 | ((fill.filled_size as i64), (fill.fill_cost * 100.0) as i64, fill.order_id) 416 | } 417 | Err(e) => { 418 | warn!("[EXEC] Poly NO failed: {}", e); 419 | (0, 0, String::new()) 420 | } 421 | }; 422 | 423 | // For same-platform, return YES as "kalshi" slot and NO as "poly" slot 424 | // This keeps the existing result handling logic working 425 | Ok((yes_filled, no_filled, yes_cost, no_cost, yes_order_id, no_order_id)) 426 | } 427 | 428 | /// Extract results from Kalshi-only execution (same-platform) 429 | fn extract_kalshi_only_results( 430 | &self, 431 | yes_res: Result, 432 | no_res: Result, 433 | ) -> Result<(i64, i64, i64, i64, String, String)> { 434 | let (yes_filled, yes_cost, yes_order_id) = match yes_res { 435 | Ok(resp) => { 436 | let filled = resp.order.filled_count(); 437 | let cost = resp.order.taker_fill_cost.unwrap_or(0) + resp.order.maker_fill_cost.unwrap_or(0); 438 | (filled, cost, resp.order.order_id) 439 | } 440 | Err(e) => { 441 | warn!("[EXEC] Kalshi YES failed: {}", e); 442 | (0, 0, String::new()) 443 | } 444 | }; 445 | 446 | let (no_filled, no_cost, no_order_id) = match no_res { 447 | Ok(resp) => { 448 | let filled = resp.order.filled_count(); 449 | let cost = resp.order.taker_fill_cost.unwrap_or(0) + resp.order.maker_fill_cost.unwrap_or(0); 450 | (filled, cost, resp.order.order_id) 451 | } 452 | Err(e) => { 453 | warn!("[EXEC] Kalshi NO failed: {}", e); 454 | (0, 0, String::new()) 455 | } 456 | }; 457 | 458 | // For same-platform, return YES as "kalshi" slot and NO as "poly" slot 459 | Ok((yes_filled, no_filled, yes_cost, no_cost, yes_order_id, no_order_id)) 460 | } 461 | 462 | /// Background task to automatically close excess exposure from mismatched fills 463 | async fn auto_close_background( 464 | kalshi: Arc, 465 | poly_async: Arc, 466 | arb_type: ArbType, 467 | yes_filled: i64, 468 | no_filled: i64, 469 | yes_price: u16, 470 | no_price: u16, 471 | poly_yes_token: Arc, 472 | poly_no_token: Arc, 473 | kalshi_ticker: Arc, 474 | original_cost_per_contract: i64, 475 | ) { 476 | let excess = (yes_filled - no_filled).abs(); 477 | if excess == 0 { 478 | return; 479 | } 480 | 481 | // Helper to log P&L after close 482 | let log_close_pnl = |platform: &str, closed: i64, proceeds: i64| { 483 | if closed > 0 { 484 | let close_pnl = proceeds - (original_cost_per_contract * excess); 485 | info!("[EXEC] ✅ Closed {} {} contracts for {}¢ (P&L: {}¢)", 486 | closed, platform, proceeds, close_pnl); 487 | } else { 488 | warn!("[EXEC] ⚠️ Failed to close {} excess - 0 filled", platform); 489 | } 490 | }; 491 | 492 | match arb_type { 493 | ArbType::PolyOnly => { 494 | let (token, side, price) = if yes_filled > no_filled { 495 | (&poly_yes_token, "yes", yes_price) 496 | } else { 497 | (&poly_no_token, "no", no_price) 498 | }; 499 | let close_price = cents_to_price((price as i16).saturating_sub(10).max(1) as u16); 500 | 501 | info!("[EXEC] 🔄 Waiting 2s for Poly settlement before auto-close ({} {} contracts)", excess, side); 502 | tokio::time::sleep(Duration::from_secs(2)).await; 503 | 504 | match poly_async.sell_fak(token, close_price, excess as f64).await { 505 | Ok(fill) => log_close_pnl("Poly", fill.filled_size as i64, (fill.fill_cost * 100.0) as i64), 506 | Err(e) => warn!("[EXEC] ⚠️ Failed to close Poly excess: {}", e), 507 | } 508 | } 509 | 510 | ArbType::KalshiOnly => { 511 | let (side, price) = if yes_filled > no_filled { 512 | ("yes", yes_price as i64) 513 | } else { 514 | ("no", no_price as i64) 515 | }; 516 | let close_price = price.saturating_sub(10).max(1); 517 | 518 | match kalshi.sell_ioc(&kalshi_ticker, side, close_price, excess).await { 519 | Ok(resp) => { 520 | let proceeds = resp.order.taker_fill_cost.unwrap_or(0) + resp.order.maker_fill_cost.unwrap_or(0); 521 | log_close_pnl("Kalshi", resp.order.filled_count(), proceeds); 522 | } 523 | Err(e) => warn!("[EXEC] ⚠️ Failed to close Kalshi excess: {}", e), 524 | } 525 | } 526 | 527 | ArbType::PolyYesKalshiNo => { 528 | if yes_filled > no_filled { 529 | // Poly YES excess 530 | let close_price = cents_to_price((yes_price as i16).saturating_sub(10).max(1) as u16); 531 | info!("[EXEC] 🔄 Waiting 2s for Poly settlement before auto-close ({} yes contracts)", excess); 532 | tokio::time::sleep(Duration::from_secs(2)).await; 533 | 534 | match poly_async.sell_fak(&poly_yes_token, close_price, excess as f64).await { 535 | Ok(fill) => log_close_pnl("Poly", fill.filled_size as i64, (fill.fill_cost * 100.0) as i64), 536 | Err(e) => warn!("[EXEC] ⚠️ Failed to close Poly excess: {}", e), 537 | } 538 | } else { 539 | // Kalshi NO excess 540 | let close_price = (no_price as i64).saturating_sub(10).max(1); 541 | match kalshi.sell_ioc(&kalshi_ticker, "no", close_price, excess).await { 542 | Ok(resp) => { 543 | let proceeds = resp.order.taker_fill_cost.unwrap_or(0) + resp.order.maker_fill_cost.unwrap_or(0); 544 | log_close_pnl("Kalshi", resp.order.filled_count(), proceeds); 545 | } 546 | Err(e) => warn!("[EXEC] ⚠️ Failed to close Kalshi excess: {}", e), 547 | } 548 | } 549 | } 550 | 551 | ArbType::KalshiYesPolyNo => { 552 | if yes_filled > no_filled { 553 | // Kalshi YES excess 554 | let close_price = (yes_price as i64).saturating_sub(10).max(1); 555 | match kalshi.sell_ioc(&kalshi_ticker, "yes", close_price, excess).await { 556 | Ok(resp) => { 557 | let proceeds = resp.order.taker_fill_cost.unwrap_or(0) + resp.order.maker_fill_cost.unwrap_or(0); 558 | log_close_pnl("Kalshi", resp.order.filled_count(), proceeds); 559 | } 560 | Err(e) => warn!("[EXEC] ⚠️ Failed to close Kalshi excess: {}", e), 561 | } 562 | } else { 563 | // Poly NO excess 564 | let close_price = cents_to_price((no_price as i16).saturating_sub(10).max(1) as u16); 565 | info!("[EXEC] 🔄 Waiting 2s for Poly settlement before auto-close ({} no contracts)", excess); 566 | tokio::time::sleep(Duration::from_secs(2)).await; 567 | 568 | match poly_async.sell_fak(&poly_no_token, close_price, excess as f64).await { 569 | Ok(fill) => log_close_pnl("Poly", fill.filled_size as i64, (fill.fill_cost * 100.0) as i64), 570 | Err(e) => warn!("[EXEC] ⚠️ Failed to close Poly excess: {}", e), 571 | } 572 | } 573 | } 574 | } 575 | } 576 | 577 | #[inline(always)] 578 | fn release_in_flight(&self, market_id: u16) { 579 | if market_id < 512 { 580 | let slot = (market_id / 64) as usize; 581 | let bit = market_id % 64; 582 | let mask = !(1u64 << bit); 583 | self.in_flight[slot].fetch_and(mask, Ordering::Release); 584 | } 585 | } 586 | 587 | fn release_in_flight_delayed(&self, market_id: u16) { 588 | if market_id < 512 { 589 | let in_flight = self.in_flight.clone(); 590 | let slot = (market_id / 64) as usize; 591 | let bit = market_id % 64; 592 | tokio::spawn(async move { 593 | tokio::time::sleep(Duration::from_secs(10)).await; 594 | let mask = !(1u64 << bit); 595 | in_flight[slot].fetch_and(mask, Ordering::Release); 596 | }); 597 | } 598 | } 599 | } 600 | 601 | /// Result of an execution attempt 602 | #[derive(Debug, Clone, Copy)] 603 | pub struct ExecutionResult { 604 | /// Market identifier 605 | pub market_id: u16, 606 | /// Whether execution was successful 607 | pub success: bool, 608 | /// Realized profit in cents 609 | pub profit_cents: i16, 610 | /// Total latency from detection to completion in nanoseconds 611 | pub latency_ns: u64, 612 | /// Error message if execution failed 613 | pub error: Option<&'static str>, 614 | } 615 | 616 | /// Create a new execution request channel with bounded capacity 617 | pub fn create_execution_channel() -> (mpsc::Sender, mpsc::Receiver) { 618 | mpsc::channel(256) 619 | } 620 | 621 | /// Main execution event loop - processes arbitrage opportunities as they arrive 622 | pub async fn run_execution_loop( 623 | mut rx: mpsc::Receiver, 624 | engine: Arc, 625 | ) { 626 | info!("[EXEC] Execution engine started (dry_run={})", engine.dry_run); 627 | 628 | while let Some(req) = rx.recv().await { 629 | let engine = engine.clone(); 630 | 631 | // Process immediately in spawned task 632 | tokio::spawn(async move { 633 | match engine.process(req).await { 634 | Ok(result) if result.success => { 635 | info!( 636 | "[EXEC] ✅ market_id={} profit={}¢ latency={}µs", 637 | result.market_id, result.profit_cents, result.latency_ns / 1000 638 | ); 639 | } 640 | Ok(result) => { 641 | if result.error != Some("Already in-flight") { 642 | warn!( 643 | "[EXEC] ⚠️ market_id={}: {:?}", 644 | result.market_id, result.error 645 | ); 646 | } 647 | } 648 | Err(e) => { 649 | error!("[EXEC] ❌ Error: {}", e); 650 | } 651 | } 652 | }); 653 | } 654 | 655 | info!("[EXEC] Execution engine stopped"); 656 | } -------------------------------------------------------------------------------- /src/polymarket_clob.rs: -------------------------------------------------------------------------------- 1 | //! Polymarket CLOB (Central Limit Order Book) order execution client. 2 | //! 3 | //! This module provides high-performance order execution for the Polymarket CLOB, 4 | //! including pre-computed authentication credentials and optimized request handling. 5 | 6 | use std::time::{SystemTime, UNIX_EPOCH}; 7 | 8 | use anyhow::{Result, anyhow}; 9 | use base64::Engine; 10 | use base64::engine::general_purpose::URL_SAFE; 11 | use ethers::signers::{LocalWallet, Signer}; 12 | use ethers::types::H256; 13 | use ethers::types::transaction::eip712::{Eip712, TypedData}; 14 | use ethers::types::U256; 15 | use hmac::{Hmac, Mac}; 16 | use reqwest::header::{HeaderMap, HeaderValue}; 17 | use serde::{Deserialize, Serialize}; 18 | use serde_json::json; 19 | use sha2::Sha256; 20 | use std::collections::HashMap; 21 | use std::sync::Arc; 22 | 23 | const USER_AGENT: &str = "py_clob_client"; 24 | const MSG_TO_SIGN: &str = "This message attests that I control the given wallet"; 25 | const ZERO_ADDRESS: &str = "0x0000000000000000000000000000000000000000"; 26 | 27 | // ============================================================================ 28 | // PRE-COMPUTED EIP712 CONSTANTS 29 | // ============================================================================ 30 | 31 | type HmacSha256 = Hmac; 32 | 33 | #[derive(Debug, Clone, Serialize, Deserialize)] 34 | pub struct ApiCreds { 35 | #[serde(rename = "apiKey")] 36 | pub api_key: String, 37 | #[serde(rename = "secret")] 38 | pub api_secret: String, 39 | #[serde(rename = "passphrase")] 40 | pub api_passphrase: String, 41 | } 42 | 43 | // ============================================================================ 44 | // PREPARED CREDENTIALS 45 | // ============================================================================ 46 | 47 | #[derive(Clone)] 48 | pub struct PreparedCreds { 49 | pub api_key: String, 50 | hmac_template: HmacSha256, 51 | api_key_header: HeaderValue, 52 | passphrase_header: HeaderValue, 53 | } 54 | 55 | impl PreparedCreds { 56 | pub fn from_api_creds(creds: &ApiCreds) -> Result { 57 | let decoded_secret = URL_SAFE.decode(&creds.api_secret)?; 58 | let hmac_template = HmacSha256::new_from_slice(&decoded_secret) 59 | .map_err(|e| anyhow!("Invalid HMAC key: {}", e))?; 60 | 61 | let api_key_header = HeaderValue::from_str(&creds.api_key) 62 | .map_err(|e| anyhow!("Invalid API key for header: {}", e))?; 63 | let passphrase_header = HeaderValue::from_str(&creds.api_passphrase) 64 | .map_err(|e| anyhow!("Invalid passphrase for header: {}", e))?; 65 | 66 | Ok(Self { 67 | api_key: creds.api_key.clone(), 68 | hmac_template, 69 | api_key_header, 70 | passphrase_header, 71 | }) 72 | } 73 | 74 | /// Sign message using prewarmed HMAC 75 | #[inline] 76 | pub fn sign(&self, message: &[u8]) -> Vec { 77 | let mut mac = self.hmac_template.clone(); 78 | mac.update(message); 79 | mac.finalize().into_bytes().to_vec() 80 | } 81 | 82 | /// Sign and return base64 (for L2 headers) 83 | #[inline] 84 | pub fn sign_b64(&self, message: &[u8]) -> String { 85 | URL_SAFE.encode(self.sign(message)) 86 | } 87 | 88 | /// Get cached API key header 89 | #[inline] 90 | pub fn api_key_header(&self) -> HeaderValue { 91 | self.api_key_header.clone() 92 | } 93 | 94 | /// Get cached passphrase header 95 | #[inline] 96 | pub fn passphrase_header(&self) -> HeaderValue { 97 | self.passphrase_header.clone() 98 | } 99 | } 100 | 101 | fn add_default_headers(headers: &mut HeaderMap) { 102 | headers.insert("User-Agent", HeaderValue::from_static(USER_AGENT)); 103 | headers.insert("Accept", HeaderValue::from_static("*/*")); 104 | headers.insert("Connection", HeaderValue::from_static("keep-alive")); 105 | headers.insert("Content-Type", HeaderValue::from_static("application/json")); 106 | } 107 | 108 | #[inline(always)] 109 | fn current_unix_ts() -> u64 { 110 | SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_secs() 111 | } 112 | 113 | fn clob_auth_digest(chain_id: u64, address_str: &str, timestamp: u64, nonce: u64) -> Result { 114 | let typed_json = json!({ 115 | "types": { 116 | "EIP712Domain": [ 117 | {"name": "name", "type": "string"}, 118 | {"name": "version", "type": "string"}, 119 | {"name": "chainId", "type": "uint256"} 120 | ], 121 | "ClobAuth": [ 122 | {"name": "address", "type": "address"}, 123 | {"name": "timestamp", "type": "string"}, 124 | {"name": "nonce", "type": "uint256"}, 125 | {"name": "message", "type": "string"} 126 | ] 127 | }, 128 | "primaryType": "ClobAuth", 129 | "domain": { "name": "ClobAuthDomain", "version": "1", "chainId": chain_id }, 130 | "message": { "address": address_str, "timestamp": timestamp.to_string(), "nonce": nonce, "message": MSG_TO_SIGN } 131 | }); 132 | let typed: TypedData = serde_json::from_value(typed_json)?; 133 | Ok(typed.encode_eip712()?.into()) 134 | } 135 | 136 | #[derive(Debug, Clone)] 137 | #[allow(dead_code)] 138 | pub struct OrderArgs { 139 | pub token_id: String, 140 | pub price: f64, 141 | pub size: f64, 142 | pub side: String, 143 | pub fee_rate_bps: Option, 144 | pub nonce: Option, 145 | pub expiration: Option, 146 | pub taker: Option, 147 | } 148 | 149 | /// Order data for EIP712 signing (references to avoid clones in hot path) 150 | struct OrderData<'a> { 151 | maker: &'a str, 152 | taker: &'a str, 153 | token_id: &'a str, 154 | maker_amount: &'a str, 155 | taker_amount: &'a str, 156 | side: i32, 157 | fee_rate_bps: &'a str, 158 | nonce: &'a str, 159 | signer: &'a str, 160 | expiration: &'a str, 161 | signature_type: i32, 162 | salt: u128 163 | } 164 | 165 | #[derive(Debug, Clone, Serialize)] 166 | pub struct OrderStruct { 167 | pub salt: u128, 168 | pub maker: String, 169 | pub signer: String, 170 | pub taker: String, 171 | #[serde(rename = "tokenId")] 172 | pub token_id: String, 173 | #[serde(rename = "makerAmount")] 174 | pub maker_amount: String, 175 | #[serde(rename = "takerAmount")] 176 | pub taker_amount: String, 177 | pub expiration: String, 178 | pub nonce: String, 179 | #[serde(rename = "feeRateBps")] 180 | pub fee_rate_bps: String, 181 | pub side: i32, 182 | #[serde(rename = "signatureType")] 183 | pub signature_type: i32, 184 | } 185 | 186 | #[derive(Debug, Clone, Serialize)] 187 | pub struct SignedOrder { 188 | pub order: OrderStruct, 189 | pub signature: String 190 | } 191 | 192 | impl SignedOrder { 193 | pub fn post_body(&self, owner: &str, order_type: &str) -> String { 194 | let side_str = if self.order.side == 0 { "BUY" } else { "SELL" }; 195 | let mut buf = String::with_capacity(512); 196 | buf.push_str(r#"{"order":{"salt":"#); 197 | buf.push_str(&self.order.salt.to_string()); 198 | buf.push_str(r#","maker":""#); 199 | buf.push_str(&self.order.maker); 200 | buf.push_str(r#"","signer":""#); 201 | buf.push_str(&self.order.signer); 202 | buf.push_str(r#"","taker":""#); 203 | buf.push_str(&self.order.taker); 204 | buf.push_str(r#"","tokenId":""#); 205 | buf.push_str(&self.order.token_id); 206 | buf.push_str(r#"","makerAmount":""#); 207 | buf.push_str(&self.order.maker_amount); 208 | buf.push_str(r#"","takerAmount":""#); 209 | buf.push_str(&self.order.taker_amount); 210 | buf.push_str(r#"","expiration":""#); 211 | buf.push_str(&self.order.expiration); 212 | buf.push_str(r#"","nonce":""#); 213 | buf.push_str(&self.order.nonce); 214 | buf.push_str(r#"","feeRateBps":""#); 215 | buf.push_str(&self.order.fee_rate_bps); 216 | buf.push_str(r#"","side":""#); 217 | buf.push_str(side_str); 218 | buf.push_str(r#"","signatureType":"#); 219 | buf.push_str(&self.order.signature_type.to_string()); 220 | buf.push_str(r#","signature":""#); 221 | buf.push_str(&self.signature); 222 | buf.push_str(r#""},"owner":""#); 223 | buf.push_str(owner); 224 | buf.push_str(r#"","orderType":""#); 225 | buf.push_str(order_type); 226 | buf.push_str(r#""}"#); 227 | buf 228 | } 229 | } 230 | 231 | #[inline(always)] 232 | fn generate_seed() -> u128 { 233 | (SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_nanos() % u128::from(u32::MAX)) as u128 234 | } 235 | 236 | // ============================================================================ 237 | // ORDER CALCULATIONS 238 | // ============================================================================ 239 | 240 | /// Convert f64 price (0.0-1.0) to basis points (0-10000) 241 | /// e.g., 0.65 -> 6500 242 | #[inline(always)] 243 | pub fn price_to_bps(price: f64) -> u64 { 244 | ((price * 10000.0).round() as i64).max(0) as u64 245 | } 246 | 247 | /// Convert f64 size to micro-units (6 decimal places) 248 | /// e.g., 100.5 -> 100_500_000 249 | #[inline(always)] 250 | pub fn size_to_micro(size: f64) -> u64 { 251 | ((size * 1_000_000.0).floor() as i64).max(0) as u64 252 | } 253 | 254 | /// BUY order calculation 255 | /// Input: size in micro-units, price in basis points 256 | /// Output: (side=0, maker_amount, taker_amount) in token decimals (6 dp) 257 | #[inline(always)] 258 | pub fn get_order_amounts_buy(size_micro: u64, price_bps: u64) -> (i32, u128, u128) { 259 | // For BUY: taker = size (what we receive), maker = size * price (what we pay) 260 | let taker = size_micro as u128; 261 | // maker = size * price / 10000 (convert bps to ratio) 262 | let maker = (size_micro as u128 * price_bps as u128) / 10000; 263 | (0, maker, taker) 264 | } 265 | 266 | /// SELL order calculation 267 | /// Input: size in micro-units, price in basis points 268 | /// Output: (side=1, maker_amount, taker_amount) in token decimals (6 dp) 269 | #[inline(always)] 270 | pub fn get_order_amounts_sell(size_micro: u64, price_bps: u64) -> (i32, u128, u128) { 271 | // For SELL: maker = size (what we give), taker = size * price (what we receive) 272 | let maker = size_micro as u128; 273 | // taker = size * price / 10000 (convert bps to ratio) 274 | let taker = (size_micro as u128 * price_bps as u128) / 10000; 275 | (1, maker, taker) 276 | } 277 | 278 | /// Validate price is within allowed range for tick=0.01 279 | #[inline(always)] 280 | pub fn price_valid(price_bps: u64) -> bool { 281 | // For tick=0.01: price must be >= 0.01 (100 bps) and <= 0.99 (9900 bps) 282 | price_bps >= 100 && price_bps <= 9900 283 | } 284 | 285 | fn order_typed_data(chain_id: u64, exchange: &str, data: &OrderData<'_>) -> Result { 286 | let typed_json = json!({ 287 | "types": { 288 | "EIP712Domain": [ 289 | {"name": "name", "type": "string"}, 290 | {"name": "version", "type": "string"}, 291 | {"name": "chainId", "type": "uint256"}, 292 | {"name": "verifyingContract", "type": "address"} 293 | ], 294 | "Order": [ 295 | {"name":"salt","type":"uint256"}, 296 | {"name":"maker","type":"address"}, 297 | {"name":"signer","type":"address"}, 298 | {"name":"taker","type":"address"}, 299 | {"name":"tokenId","type":"uint256"}, 300 | {"name":"makerAmount","type":"uint256"}, 301 | {"name":"takerAmount","type":"uint256"}, 302 | {"name":"expiration","type":"uint256"}, 303 | {"name":"nonce","type":"uint256"}, 304 | {"name":"feeRateBps","type":"uint256"}, 305 | {"name":"side","type":"uint8"}, 306 | {"name":"signatureType","type":"uint8"} 307 | ] 308 | }, 309 | "primaryType": "Order", 310 | "domain": { "name": "Polymarket CTF Exchange", "version": "1", "chainId": chain_id, "verifyingContract": exchange }, 311 | "message": { 312 | "salt": U256::from(data.salt), 313 | "maker": data.maker, 314 | "signer": data.signer, 315 | "taker": data.taker, 316 | "tokenId": U256::from_dec_str(data.token_id)?, 317 | "makerAmount": U256::from_dec_str(data.maker_amount)?, 318 | "takerAmount": U256::from_dec_str(data.taker_amount)?, 319 | "expiration": U256::from_dec_str(data.expiration)?, 320 | "nonce": U256::from_dec_str(data.nonce)?, 321 | "feeRateBps": U256::from_dec_str(data.fee_rate_bps)?, 322 | "side": data.side, 323 | "signatureType": data.signature_type, 324 | } 325 | }); 326 | Ok(serde_json::from_value(typed_json)?) 327 | } 328 | 329 | fn get_exchange_address(chain_id: u64, neg_risk: bool) -> Result { 330 | match (chain_id, neg_risk) { 331 | (137, true) => Ok("0xC5d563A36AE78145C45a50134d48A1215220f80a".into()), 332 | (137, false) => Ok("0x4bFb41d5B3570DeFd03C39a9A4D8dE6Bd8B8982E".into()), 333 | (80002, true) => Ok("0xd91E80cF2E7be2e162c6513ceD06f1dD0dA35296".into()), 334 | (80002, false) => Ok("0xdFE02Eb6733538f8Ea35D585af8DE5958AD99E40".into()), 335 | _ => Err(anyhow!("unsupported chain")), 336 | } 337 | } 338 | 339 | // ============================================================================ 340 | // ORDER TYPES FOR FAK/FOK 341 | // ============================================================================ 342 | 343 | /// Order type for Polymarket 344 | #[derive(Debug, Clone, Copy)] 345 | #[allow(dead_code)] 346 | pub enum PolyOrderType { 347 | /// Good Till Cancelled (default) 348 | GTC, 349 | /// Good Till Time 350 | GTD, 351 | /// Fill Or Kill - must fill entirely or cancel 352 | FOK, 353 | /// Fill And Kill - fill what you can, cancel rest 354 | FAK, 355 | } 356 | 357 | impl PolyOrderType { 358 | pub fn as_str(&self) -> &'static str { 359 | match self { 360 | PolyOrderType::GTC => "GTC", 361 | PolyOrderType::GTD => "GTD", 362 | PolyOrderType::FOK => "FOK", 363 | PolyOrderType::FAK => "FAK", 364 | } 365 | } 366 | } 367 | 368 | // ============================================================================ 369 | // GET ORDER RESPONSE 370 | // ============================================================================ 371 | 372 | /// Response from GET /data/order/{order_id} 373 | #[derive(Debug, Clone, Deserialize)] 374 | #[allow(dead_code)] 375 | pub struct PolymarketOrderResponse { 376 | pub id: String, 377 | pub status: String, 378 | pub market: Option, 379 | pub outcome: Option, 380 | pub price: String, 381 | pub side: String, 382 | pub size_matched: String, 383 | pub original_size: String, 384 | pub maker_address: Option, 385 | pub asset_id: Option, 386 | #[serde(default)] 387 | pub associate_trades: Vec, 388 | #[serde(default)] 389 | pub created_at: Option, // Can be string or integer 390 | #[serde(default)] 391 | pub expiration: Option, // Can be string or integer 392 | #[serde(rename = "type")] 393 | pub order_type: Option, 394 | pub owner: Option, 395 | } 396 | 397 | // ============================================================================ 398 | // ASYNC CLIENT 399 | // ============================================================================ 400 | 401 | /// Async Polymarket client for execution 402 | pub struct PolymarketAsyncClient { 403 | host: String, 404 | chain_id: u64, 405 | http: reqwest::Client, // Async client with connection pooling 406 | wallet: Arc, 407 | funder: String, 408 | wallet_address_str: String, 409 | address_header: HeaderValue, 410 | } 411 | 412 | impl PolymarketAsyncClient { 413 | pub fn new(host: &str, chain_id: u64, private_key: &str, funder: &str) -> Result { 414 | let wallet = private_key.parse::()?.with_chain_id(chain_id); 415 | let wallet_address_str = format!("{:?}", wallet.address()); 416 | let address_header = HeaderValue::from_str(&wallet_address_str) 417 | .map_err(|e| anyhow!("Invalid wallet address for header: {}", e))?; 418 | 419 | // Build async client with connection pooling and keepalive 420 | let http = reqwest::Client::builder() 421 | .pool_max_idle_per_host(10) 422 | .pool_idle_timeout(std::time::Duration::from_secs(90)) 423 | .tcp_keepalive(std::time::Duration::from_secs(30)) 424 | .tcp_nodelay(true) 425 | .timeout(std::time::Duration::from_secs(10)) 426 | .build()?; 427 | 428 | Ok(Self { 429 | host: host.trim_end_matches('/').to_string(), 430 | chain_id, 431 | http, 432 | wallet: Arc::new(wallet), 433 | funder: funder.to_string(), 434 | wallet_address_str, 435 | address_header, 436 | }) 437 | } 438 | 439 | /// Build L1 headers for authentication (derive-api-key) 440 | /// wallet.sign_hash() is CPU-bound (~1ms), safe to call in async context 441 | fn build_l1_headers(&self, nonce: u64) -> Result { 442 | let timestamp = current_unix_ts(); 443 | let digest = clob_auth_digest(self.chain_id, &self.wallet_address_str, timestamp, nonce)?; 444 | let sig = self.wallet.sign_hash(digest)?; 445 | let mut headers = HeaderMap::new(); 446 | headers.insert("POLY_ADDRESS", self.address_header.clone()); 447 | headers.insert("POLY_SIGNATURE", HeaderValue::from_str(&format!("0x{}", sig))?); 448 | headers.insert("POLY_TIMESTAMP", HeaderValue::from_str(×tamp.to_string())?); 449 | headers.insert("POLY_NONCE", HeaderValue::from_str(&nonce.to_string())?); 450 | add_default_headers(&mut headers); 451 | Ok(headers) 452 | } 453 | 454 | /// Derive API credentials from L1 wallet signature 455 | pub async fn derive_api_key(&self, nonce: u64) -> Result { 456 | let url = format!("{}/auth/derive-api-key", self.host); 457 | let headers = self.build_l1_headers(nonce)?; 458 | let resp = self.http.get(&url).headers(headers).send().await?; 459 | if !resp.status().is_success() { 460 | let status = resp.status(); 461 | let body = resp.text().await.unwrap_or_default(); 462 | return Err(anyhow!("derive-api-key failed: {} {}", status, body)); 463 | } 464 | Ok(resp.json().await?) 465 | } 466 | 467 | /// Build L2 headers for authenticated requests 468 | fn build_l2_headers(&self, method: &str, path: &str, body: Option<&str>, creds: &PreparedCreds) -> Result { 469 | let timestamp = current_unix_ts(); 470 | let mut message = format!("{}{}{}", timestamp, method, path); 471 | if let Some(b) = body { message.push_str(b); } 472 | 473 | let sig_b64 = creds.sign_b64(message.as_bytes()); 474 | 475 | let mut headers = HeaderMap::with_capacity(9); 476 | headers.insert("POLY_ADDRESS", self.address_header.clone()); 477 | headers.insert("POLY_SIGNATURE", HeaderValue::from_str(&sig_b64)?); 478 | headers.insert("POLY_TIMESTAMP", HeaderValue::from_str(×tamp.to_string())?); 479 | headers.insert("POLY_API_KEY", creds.api_key_header()); 480 | headers.insert("POLY_PASSPHRASE", creds.passphrase_header()); 481 | add_default_headers(&mut headers); 482 | Ok(headers) 483 | } 484 | 485 | /// Post order 486 | pub async fn post_order_async(&self, body: String, creds: &PreparedCreds) -> Result { 487 | let path = "/order"; 488 | let url = format!("{}{}", self.host, path); 489 | let headers = self.build_l2_headers("POST", path, Some(&body), creds)?; 490 | 491 | let resp = self.http 492 | .post(&url) 493 | .headers(headers) 494 | .body(body) 495 | .send() 496 | .await?; 497 | 498 | Ok(resp) 499 | } 500 | 501 | /// Get order by ID 502 | pub async fn get_order_async(&self, order_id: &str, creds: &PreparedCreds) -> Result { 503 | let path = format!("/data/order/{}", order_id); 504 | let url = format!("{}{}", self.host, path); 505 | let headers = self.build_l2_headers("GET", &path, None, creds)?; 506 | 507 | let resp = self.http 508 | .get(&url) 509 | .headers(headers) 510 | .send() 511 | .await?; 512 | 513 | if !resp.status().is_success() { 514 | let status = resp.status(); 515 | let body = resp.text().await.unwrap_or_default(); 516 | return Err(anyhow!("get_order failed {}: {}", status, body)); 517 | } 518 | 519 | Ok(resp.json().await?) 520 | } 521 | 522 | /// Check neg_risk for token - with caching 523 | pub async fn check_neg_risk(&self, token_id: &str) -> Result { 524 | let url = format!("{}/neg-risk?token_id={}", self.host, token_id); 525 | let resp = self.http 526 | .get(&url) 527 | .header("User-Agent", USER_AGENT) 528 | .send() 529 | .await?; 530 | 531 | let val: serde_json::Value = resp.json().await?; 532 | Ok(val["neg_risk"].as_bool().unwrap_or(false)) 533 | } 534 | 535 | #[allow(dead_code)] 536 | pub fn wallet_address(&self) -> &str { 537 | &self.wallet_address_str 538 | } 539 | 540 | #[allow(dead_code)] 541 | pub fn funder(&self) -> &str { 542 | &self.funder 543 | } 544 | 545 | #[allow(dead_code)] 546 | pub fn wallet(&self) -> &LocalWallet { 547 | &self.wallet 548 | } 549 | } 550 | 551 | /// Shared async client wrapper for use in execution engine 552 | pub struct SharedAsyncClient { 553 | inner: Arc, 554 | creds: PreparedCreds, 555 | chain_id: u64, 556 | /// Pre-cached neg_risk lookups 557 | neg_risk_cache: std::sync::RwLock>, 558 | } 559 | 560 | impl SharedAsyncClient { 561 | pub fn new(client: PolymarketAsyncClient, creds: PreparedCreds, chain_id: u64) -> Self { 562 | Self { 563 | inner: Arc::new(client), 564 | creds, 565 | chain_id, 566 | neg_risk_cache: std::sync::RwLock::new(HashMap::new()), 567 | } 568 | } 569 | 570 | /// Load neg_risk cache from JSON file (output of build_sports_cache.py) 571 | pub fn load_cache(&self, path: &str) -> Result { 572 | let data = std::fs::read_to_string(path)?; 573 | let map: HashMap = serde_json::from_str(&data)?; 574 | let count = map.len(); 575 | let mut cache = self.neg_risk_cache.write().unwrap(); 576 | *cache = map; 577 | Ok(count) 578 | } 579 | 580 | /// Execute FAK buy order - 581 | pub async fn buy_fak(&self, token_id: &str, price: f64, size: f64) -> Result { 582 | debug_assert!(!token_id.is_empty(), "token_id must not be empty"); 583 | debug_assert!(price > 0.0 && price < 1.0, "price must be 0 < p < 1"); 584 | debug_assert!(size >= 1.0, "size must be >= 1"); 585 | self.execute_order(token_id, price, size, "BUY").await 586 | } 587 | 588 | /// Execute FAK sell order - 589 | pub async fn sell_fak(&self, token_id: &str, price: f64, size: f64) -> Result { 590 | debug_assert!(!token_id.is_empty(), "token_id must not be empty"); 591 | debug_assert!(price > 0.0 && price < 1.0, "price must be 0 < p < 1"); 592 | debug_assert!(size >= 1.0, "size must be >= 1"); 593 | self.execute_order(token_id, price, size, "SELL").await 594 | } 595 | 596 | async fn execute_order(&self, token_id: &str, price: f64, size: f64, side: &str) -> Result { 597 | // Check neg_risk cache first 598 | let neg_risk = { 599 | let cache = self.neg_risk_cache.read().unwrap(); 600 | cache.get(token_id).copied() 601 | }; 602 | 603 | let neg_risk = match neg_risk { 604 | Some(nr) => nr, 605 | None => { 606 | let nr = self.inner.check_neg_risk(token_id).await?; 607 | let mut cache = self.neg_risk_cache.write().unwrap(); 608 | cache.insert(token_id.to_string(), nr); 609 | nr 610 | } 611 | }; 612 | 613 | // Build signed order 614 | let signed = self.build_signed_order(token_id, price, size, side, neg_risk)?; 615 | // Owner must be the API key (not wallet address or funder!) 616 | let body = signed.post_body(&self.creds.api_key, PolyOrderType::FAK.as_str()); 617 | 618 | // Post order 619 | let resp = self.inner.post_order_async(body, &self.creds).await?; 620 | 621 | if !resp.status().is_success() { 622 | let status = resp.status(); 623 | let body = resp.text().await.unwrap_or_default(); 624 | return Err(anyhow!("Polymarket order failed {}: {}", status, body)); 625 | } 626 | 627 | let resp_json: serde_json::Value = resp.json().await?; 628 | let order_id = resp_json["orderID"].as_str().unwrap_or("unknown").to_string(); 629 | 630 | // Query fill status 631 | let order_info = self.inner.get_order_async(&order_id, &self.creds).await?; 632 | let filled_size: f64 = order_info.size_matched.parse().unwrap_or(0.0); 633 | let order_price: f64 = order_info.price.parse().unwrap_or(price); 634 | 635 | tracing::debug!( 636 | "[POLY-ASYNC] FAK {} {}: status={}, filled={:.2}/{:.2}, price={:.4}", 637 | side, order_id, order_info.status, filled_size, size, order_price 638 | ); 639 | 640 | Ok(PolyFillAsync { 641 | order_id, 642 | filled_size, 643 | fill_cost: filled_size * order_price, 644 | }) 645 | } 646 | 647 | /// Build a signed order 648 | fn build_signed_order( 649 | &self, 650 | token_id: &str, 651 | price: f64, 652 | size: f64, 653 | side: &str, 654 | neg_risk: bool, 655 | ) -> Result { 656 | let price_bps = price_to_bps(price); 657 | let size_micro = size_to_micro(size); 658 | 659 | if !price_valid(price_bps) { 660 | return Err(anyhow!("price {} ({}bps) outside allowed range", price, price_bps)); 661 | } 662 | 663 | let (side_code, maker_amt, taker_amt) = if side.eq_ignore_ascii_case("BUY") { 664 | get_order_amounts_buy(size_micro, price_bps) 665 | } else if side.eq_ignore_ascii_case("SELL") { 666 | get_order_amounts_sell(size_micro, price_bps) 667 | } else { 668 | return Err(anyhow!("side must be BUY or SELL")); 669 | }; 670 | 671 | let salt = generate_seed(); 672 | let maker_amount_str = maker_amt.to_string(); 673 | let taker_amount_str = taker_amt.to_string(); 674 | 675 | // Use references for EIP712 signing 676 | let data = OrderData { 677 | maker: &self.inner.funder, 678 | taker: ZERO_ADDRESS, 679 | token_id, 680 | maker_amount: &maker_amount_str, 681 | taker_amount: &taker_amount_str, 682 | side: side_code, 683 | fee_rate_bps: "0", 684 | nonce: "0", 685 | signer: &self.inner.wallet_address_str, 686 | expiration: "0", 687 | signature_type: 1, 688 | salt, 689 | }; 690 | let exchange = get_exchange_address(self.chain_id, neg_risk)?; 691 | let typed = order_typed_data(self.chain_id, &exchange, &data)?; 692 | let digest = typed.encode_eip712()?; 693 | 694 | let sig = self.inner.wallet.sign_hash(H256::from(digest))?; 695 | 696 | // Only allocate strings once for the final OrderStruct (serialization needs owned) 697 | Ok(SignedOrder { 698 | order: OrderStruct { 699 | salt, 700 | maker: self.inner.funder.clone(), 701 | signer: self.inner.wallet_address_str.clone(), 702 | taker: ZERO_ADDRESS.to_string(), 703 | token_id: token_id.to_string(), 704 | maker_amount: maker_amount_str, 705 | taker_amount: taker_amount_str, 706 | expiration: "0".to_string(), 707 | nonce: "0".to_string(), 708 | fee_rate_bps: "0".to_string(), 709 | side: side_code, 710 | signature_type: 1, 711 | }, 712 | signature: format!("0x{}", sig), 713 | }) 714 | } 715 | } 716 | 717 | /// Async fill result 718 | #[derive(Debug, Clone)] 719 | pub struct PolyFillAsync { 720 | pub order_id: String, 721 | pub filled_size: f64, 722 | pub fill_cost: f64, 723 | } --------------------------------------------------------------------------------