├── python-backend
└── app
│ ├── data
│ ├── __init__.py
│ ├── __pycache__
│ │ ├── __init__.cpython-312.pyc
│ │ └── etl_alchemy.cpython-312.pyc
│ ├── etl_alchemy.py
│ └── etl_goldsky.py
│ ├── analytics
│ ├── __init__.py
│ ├── gas_predictor.py
│ ├── size_opt.py
│ ├── arb_formula.py
│ └── evaluator.py
│ ├── arb
│ ├── __init__.py
│ ├── __pycache__
│ │ ├── costs.cpython-312.pyc
│ │ ├── sizing.cpython-312.pyc
│ │ └── __init__.cpython-312.pyc
│ ├── sizing.py
│ └── costs.py
│ ├── config_store.py
│ ├── deepseek_client.py
│ ├── executor.py
│ └── goldrush_client.py
├── requirements.txt
├── tsconfig.refinement.json
├── src
├── api
│ ├── server.ts
│ ├── routes
│ │ ├── config.ts
│ │ ├── bot.ts
│ │ ├── strategy.ts
│ │ └── eval.ts
│ ├── middleware
│ │ ├── logging.ts
│ │ ├── rateLimit.ts
│ │ └── metrics.ts
│ ├── app.ts
│ └── state.ts
├── chain
│ ├── providers.ts
│ ├── univ3_abi.ts
│ └── univ3_fetch.ts
├── eval
│ ├── latency.ts
│ ├── montecarlo.ts
│ ├── slippage.ts
│ ├── types.ts
│ ├── model.ts
│ └── arb_math.ts
├── feeds
│ └── types.ts
├── policy
│ └── strategyGate.ts
├── config
│ └── env.ts
├── execution
│ └── types.ts
├── metrics
│ └── execution.ts
├── data
│ └── markets.ts
└── ml
│ └── finbloom.ts
├── requirements-dev.txt
├── tsconfig.feeds.json
├── mvp_py
└── src
│ └── arbitrage
│ ├── pricing.py
│ ├── signer_stub.py
│ ├── sizing.py
│ ├── profit_gate.py
│ ├── watchdog.py
│ ├── gas_oracle.py
│ └── exec_engine.py
├── tsconfig.json
├── config.json
├── data
├── backtests.json
└── strategies.json
├── config
├── runtime_config.json
├── strategies.json
└── sample.env
├── web
├── favicon.svg
└── styles.css
├── vitest.config.ts
├── rust-engine
├── Cargo.toml
└── src
│ └── main.rs
├── prometheus.yml
├── tests
├── latency.spec.ts
├── slippage.spec.ts
├── strategyGate.spec.ts
├── univ3.spec.ts
├── integration
│ └── eval.api.spec.ts
├── model.flash.spec.ts
├── strategyApi.spec.ts
├── univ3_math_validation.spec.ts
└── model.spec.ts
├── research
├── plots.py
├── data_loader.py
├── optimize_scipy.py
└── backtest_np.py
├── scripts
├── smoke_eval.ts
├── seed_strategy.ts
├── flashloan.ts
├── setup_and_run.ps1
└── deploy.sh
├── tools
└── engine_shim.py
├── Dockerfile
├── package.json
├── contracts
└── HyperliquidArbitrageEngine.sol
├── docker-compose.yml
├── check-system.js
├── PRODUCTION_CHECKLIST.md
├── README.md
└── PROGRESS_REPORT.md
/python-backend/app/data/__init__.py:
--------------------------------------------------------------------------------
1 | # data package
2 |
--------------------------------------------------------------------------------
/python-backend/app/analytics/__init__.py:
--------------------------------------------------------------------------------
1 | # analytics package
2 |
--------------------------------------------------------------------------------
/python-backend/app/arb/__init__.py:
--------------------------------------------------------------------------------
1 | # Package marker for app.arb
2 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | numpy==1.26.0
2 | pandas==2.0.3
3 | scipy==1.11.0; sys_platform != 'win32'
4 | scipy==1.10.1; sys_platform == 'win32'
5 | requests==2.31.0
6 |
--------------------------------------------------------------------------------
/python-backend/app/arb/__pycache__/costs.cpython-312.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kallie45s/hyperliquid-arbitrage-bot/HEAD/python-backend/app/arb/__pycache__/costs.cpython-312.pyc
--------------------------------------------------------------------------------
/python-backend/app/arb/__pycache__/sizing.cpython-312.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kallie45s/hyperliquid-arbitrage-bot/HEAD/python-backend/app/arb/__pycache__/sizing.cpython-312.pyc
--------------------------------------------------------------------------------
/python-backend/app/arb/__pycache__/__init__.cpython-312.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kallie45s/hyperliquid-arbitrage-bot/HEAD/python-backend/app/arb/__pycache__/__init__.cpython-312.pyc
--------------------------------------------------------------------------------
/python-backend/app/data/__pycache__/__init__.cpython-312.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kallie45s/hyperliquid-arbitrage-bot/HEAD/python-backend/app/data/__pycache__/__init__.cpython-312.pyc
--------------------------------------------------------------------------------
/python-backend/app/data/__pycache__/etl_alchemy.cpython-312.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kallie45s/hyperliquid-arbitrage-bot/HEAD/python-backend/app/data/__pycache__/etl_alchemy.cpython-312.pyc
--------------------------------------------------------------------------------
/tsconfig.refinement.json:
--------------------------------------------------------------------------------
1 | {
2 | "extends": "./tsconfig.json",
3 | "compilerOptions": {
4 | "noEmit": true
5 | },
6 | "include": [
7 | "src/refinement/**/*.ts"
8 | ],
9 | "exclude": []
10 | }
11 |
--------------------------------------------------------------------------------
/src/api/server.ts:
--------------------------------------------------------------------------------
1 | import { app } from "./app";
2 |
3 | const port = Number(process.env.TS_API_PORT || 8082);
4 | app.listen(port, () => {
5 | // eslint-disable-next-line no-console
6 | console.log(`TS eval server listening on http://127.0.0.1:${port}`);
7 | });
8 |
--------------------------------------------------------------------------------
/requirements-dev.txt:
--------------------------------------------------------------------------------
1 | # Development/Research dependencies (do not install in prod runtime)
2 | # Pin reasonably recent stable versions
3 | pandas==2.2.2
4 | numpy==2.0.1
5 | scipy==1.13.1
6 | matplotlib==3.9.0
7 | seaborn==0.13.2
8 | requests==2.32.3
9 | pyarrow==16.1.0
10 |
--------------------------------------------------------------------------------
/src/chain/providers.ts:
--------------------------------------------------------------------------------
1 | import { providers } from 'ethers';
2 |
3 | let cached: providers.JsonRpcProvider | null = null;
4 |
5 | export function getProvider(rpcUrl?: string): providers.JsonRpcProvider {
6 | if (!cached) {
7 | const url = rpcUrl || process.env.RPC_URL || 'http://127.0.0.1:8545';
8 | cached = new providers.JsonRpcProvider(url);
9 | }
10 | return cached;
11 | }
12 |
--------------------------------------------------------------------------------
/tsconfig.feeds.json:
--------------------------------------------------------------------------------
1 | {
2 | "extends": "./tsconfig.json",
3 | "compilerOptions": {
4 | "noEmit": true,
5 | "target": "ES2020",
6 | "module": "commonjs",
7 | "lib": ["ES2020", "DOM"],
8 | "esModuleInterop": true,
9 | "downlevelIteration": true,
10 | "skipLibCheck": true
11 | },
12 | "include": [
13 | "src/feeds/SignalGenerator.ts",
14 | "src/feeds/index.ts"
15 | ],
16 | "exclude": []
17 | }
18 |
--------------------------------------------------------------------------------
/mvp_py/src/arbitrage/pricing.py:
--------------------------------------------------------------------------------
1 | from typing import Optional
2 |
3 | def usd_from_native(amount_native: float, native_usd: float) -> float:
4 | return float(amount_native) * float(native_usd)
5 |
6 | def apply_fee(amount: float, fee_bps: float) -> float:
7 | return float(amount) * (1.0 - float(fee_bps)/10000.0)
8 |
9 | def add_fee(amount: float, fee_bps: float) -> float:
10 | return float(amount) * (1.0 + float(fee_bps)/10000.0)
11 |
--------------------------------------------------------------------------------
/tsconfig.json:
--------------------------------------------------------------------------------
1 | {
2 | "compilerOptions": {
3 | "target": "ES2020",
4 | "module": "CommonJS",
5 | "moduleResolution": "Node",
6 | "outDir": "dist",
7 | "rootDir": "src",
8 | "strict": true,
9 | "esModuleInterop": true,
10 | "forceConsistentCasingInFileNames": true,
11 | "resolveJsonModule": true,
12 | "skipLibCheck": true
13 | },
14 | "include": ["src/**/*.ts"],
15 | "exclude": ["node_modules", "dist"]
16 | }
17 |
--------------------------------------------------------------------------------
/config.json:
--------------------------------------------------------------------------------
1 | {
2 | "max_capital": 1000000,
3 | "risk_free_rate": 0.05,
4 | "slippage_model": "volume_weighted",
5 | "fee_structure": {
6 | "exchange": 0.001,
7 | "router": 0.0005,
8 | "protocol": 0.0002
9 | },
10 | "flash_loan_params": {
11 | "base_fee": 10,
12 | "variable_fee_bps": 5,
13 | "base_gas_cost": 0.001
14 | },
15 | "risk_aversion": 0.7,
16 | "cache_dir": "./market_data_cache"
17 | }
18 |
--------------------------------------------------------------------------------
/src/eval/latency.ts:
--------------------------------------------------------------------------------
1 | export function decayEdge(edgeBps: number, latencySec: number, edgeDecayBpsPerSec: number): number {
2 | const e = Math.max(0, edgeBps - Math.max(0, latencySec) * Math.max(0, edgeDecayBpsPerSec));
3 | return e;
4 | }
5 |
6 | export function fillProb(baseFillProb: number, latencySec: number, theta: number = 0.15): number {
7 | const p = baseFillProb * Math.exp(-Math.max(0, theta) * Math.max(0, latencySec));
8 | return Math.max(0, Math.min(1, p));
9 | }
10 |
--------------------------------------------------------------------------------
/data/backtests.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "id": "dedd8b2e-1b1c-4dd1-9edf-2a0b11e471e4",
4 | "strategyId": "74336e08-00a1-4cf8-b409-577d594acf4d",
5 | "startedAt": 1754872846666,
6 | "endedAt": 1754966446666,
7 | "coverageHours": 26,
8 | "stats": {
9 | "evAdjUsd": 10,
10 | "pSuccess": 0.8,
11 | "maxDrawdown": 0,
12 | "hitRate": 0.75,
13 | "pnlUsd": 100,
14 | "samples": 1000
15 | },
16 | "createdAt": 1754966446720
17 | }
18 | ]
--------------------------------------------------------------------------------
/mvp_py/src/arbitrage/signer_stub.py:
--------------------------------------------------------------------------------
1 | # Minimal signer stub. Replace with eth-account or your wallet lib.
2 | import os, hashlib
3 | class SignerStub:
4 | def __init__(self, privkey_hex: str):
5 | self.priv=privkey_hex; self.address=os.getenv("BOT_ADDRESS","0xYourAddress")
6 | def sign_tx(self, tx, chain_id=1) -> str:
7 | # TODO: implement EIP-1559 signing (use eth_account in production)
8 | h=hashlib.sha256(str(tx).encode()).hexdigest()
9 | return "0x"+h # placeholder, not a real signed tx!
10 |
--------------------------------------------------------------------------------
/config/runtime_config.json:
--------------------------------------------------------------------------------
1 | {
2 | "min_profit_usd": 5.0,
3 | "min_spread_bps": 10.0,
4 | "min_liquidity_usd": 10000.0,
5 | "max_position_eth": 1.0,
6 | "risk_level": 5,
7 | "slippage_bps": 30.0,
8 | "gas_multiplier": 1.0,
9 | "fees_bps": 5.0,
10 | "max_trade_usd": 50000.0,
11 | "chain_name": "hyperevm-mainnet",
12 | "default_gas_limit": 250000,
13 | "assets": [
14 | "ETH",
15 | "HYPE"
16 | ],
17 | "run_duration_sec": 3600,
18 | "target_profit_bps_min": 50.0,
19 | "target_profit_bps_max": 500.0
20 | }
--------------------------------------------------------------------------------
/web/favicon.svg:
--------------------------------------------------------------------------------
1 |
14 |
--------------------------------------------------------------------------------
/vitest.config.ts:
--------------------------------------------------------------------------------
1 | import { defineConfig } from 'vitest/config';
2 |
3 | export default defineConfig({
4 | test: {
5 | globals: true,
6 | include: [
7 | 'tests/**/*.spec.ts',
8 | ],
9 | setupFiles: ['src/tests/setup-env.ts'],
10 | environment: 'node',
11 | coverage: {
12 | provider: 'v8',
13 | reporter: ['text', 'lcov'],
14 | reportsDirectory: './coverage',
15 | exclude: [
16 | 'src/api/server.ts',
17 | 'src/api/routes/**',
18 | 'scripts/**',
19 | ],
20 | },
21 | },
22 | });
23 |
--------------------------------------------------------------------------------
/rust-engine/Cargo.toml:
--------------------------------------------------------------------------------
1 | [package]
2 | name = "hyperliquid-arb-engine"
3 | version = "0.1.0"
4 | edition = "2021"
5 |
6 | [dependencies]
7 | tokio = { version = "1.39", features = ["full"] }
8 | reqwest = { version = "0.12", features = ["json", "rustls-tls"] }
9 | serde = { version = "1.0", features = ["derive"] }
10 | serde_json = "1.0"
11 | anyhow = "1.0"
12 | tracing = "0.1"
13 | tracing-subscriber = { version = "0.3", features = ["env-filter"] }
14 | dotenvy = "0.15"
15 | redis = { version = "0.26", features = ["tokio-comp", "connection-manager"] }
16 | chrono = { version = "0.4", features = ["clock", "std"] }
17 |
--------------------------------------------------------------------------------
/src/api/routes/config.ts:
--------------------------------------------------------------------------------
1 | import { Router, Request, Response } from 'express';
2 | import { env } from '../../config/env';
3 |
4 | const router = Router();
5 |
6 | router.get('/', (_req: Request, res: Response) => {
7 | res.json({
8 | policy: {
9 | minBacktestHours: env.MIN_BACKTEST_HOURS,
10 | minPSuccess: env.MIN_P_SUCCESS,
11 | minEvAdjUsd: env.MIN_EV_ADJ_USD,
12 | maxDrawdown: env.MAX_DRAWDOWN,
13 | },
14 | api: {
15 | totalFeesBps: env.TOTAL_FEES_BPS,
16 | },
17 | // Safe exposure only; no secrets returned
18 | updatedAt: Date.now(),
19 | });
20 | });
21 |
22 | export default router;
23 |
--------------------------------------------------------------------------------
/data/strategies.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "id": "74336e08-00a1-4cf8-b409-577d594acf4d",
4 | "name": "Auto triangular 2025-08-12T02:40:46.666Z",
5 | "kind": "triangular",
6 | "params": {},
7 | "status": "approved",
8 | "tags": [],
9 | "approvals": [
10 | {
11 | "at": 1754966446728,
12 | "status": "approved",
13 | "coverageHours": 26,
14 | "metrics": {
15 | "evAdjUsd": 10,
16 | "pSuccess": 0.8,
17 | "maxDrawdown": 0,
18 | "hitRate": 0.75,
19 | "pnlUsd": 100,
20 | "samples": 1000
21 | }
22 | }
23 | ],
24 | "createdAt": 1754966446705,
25 | "updatedAt": 1754966446729
26 | }
27 | ]
--------------------------------------------------------------------------------
/prometheus.yml:
--------------------------------------------------------------------------------
1 | global:
2 | scrape_interval: 15s
3 | evaluation_interval: 15s
4 | external_labels:
5 | monitor: 'hyperliquid-bot'
6 | environment: 'production'
7 |
8 | scrape_configs:
9 | # Bot metrics
10 | - job_name: 'hyperliquid-bot'
11 | static_configs:
12 | - targets: ['bot:4000']
13 | metrics_path: '/api/prometheus'
14 | scrape_interval: 5s
15 |
16 | # Node Exporter for system metrics
17 | - job_name: 'node-exporter'
18 | static_configs:
19 | - targets: ['node-exporter:9100']
20 |
21 | # Redis metrics
22 | - job_name: 'redis'
23 | static_configs:
24 | - targets: ['redis:6379']
25 |
26 | alerting:
27 | alertmanagers:
28 | - static_configs:
29 | - targets: []
30 |
31 | rule_files:
32 | - '/etc/prometheus/alerts.yml'
33 |
--------------------------------------------------------------------------------
/python-backend/app/analytics/gas_predictor.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import pandas as pd
3 | from typing import Tuple
4 |
5 | class GasPredictor:
6 | """
7 | Simple model: gas_used = a + b*size + c*size^2 (quadratic)
8 | Fit with numpy.polyfit; predict gas units; convert to USD outside this class.
9 | """
10 | def __init__(self):
11 | self.coef = None # np.poly1d
12 |
13 | def fit(self, df: pd.DataFrame, size_col: str = "size", gas_col: str = "gas_used") -> None:
14 | if len(df) < 3:
15 | return
16 | x = df[size_col].to_numpy(dtype=float)
17 | y = df[gas_col].to_numpy(dtype=float)
18 | self.coef = np.poly1d(np.polyfit(x, y, deg=2))
19 |
20 | def predict_units(self, size: float) -> float:
21 | if self.coef is None:
22 | return float("nan")
23 | return float(self.coef(size))
24 |
--------------------------------------------------------------------------------
/tests/latency.spec.ts:
--------------------------------------------------------------------------------
1 | import { describe, it, expect } from 'vitest';
2 | import { decayEdge, fillProb } from '../src/eval/latency';
3 |
4 | describe('latency', () => {
5 | it('decayEdge decreases with latency', () => {
6 | const edge0 = 50; // bps
7 | const d0 = decayEdge(edge0, 0, 2);
8 | const d1 = decayEdge(edge0, 1, 2);
9 | const d2 = decayEdge(edge0, 2, 2);
10 | expect(d0).toBeGreaterThan(d1);
11 | expect(d1).toBeGreaterThan(d2);
12 | expect(d2).toBeGreaterThanOrEqual(0);
13 | });
14 |
15 | it('fillProb decreases with latency and bounded [0,1]', () => {
16 | const p0 = fillProb(0.9, 0, 0.2);
17 | const p1 = fillProb(0.9, 1, 0.2);
18 | const p2 = fillProb(0.9, 5, 0.2);
19 | expect(p0).toBeLessThanOrEqual(1);
20 | expect(p0).toBeGreaterThanOrEqual(0);
21 | expect(p0).toBeGreaterThan(p1);
22 | expect(p1).toBeGreaterThan(p2);
23 | });
24 | });
25 |
--------------------------------------------------------------------------------
/mvp_py/src/arbitrage/sizing.py:
--------------------------------------------------------------------------------
1 | from typing import Tuple
2 |
3 | def max_in_for_slip_bps(reserve_in: float, reserve_out: float, slip_bps: float) -> float:
4 | """
5 | For constant product AMM: price impact ~ dx / (Rin + dx)
6 | We find dx such that impact <= slip_bps.
7 | Approx: dx <= Rin * slip_bps/10000 / (1 - slip_bps/10000)
8 | """
9 | s = float(slip_bps)/10000.0
10 | if s <= 0 or s >= 1:
11 | return 0.0
12 | return float(reserve_in) * s / (1.0 - s)
13 |
14 | def uni_v2_out_given_in(dx: float, Rin: float, Rout: float, fee_bps: float) -> float:
15 | fee = 1.0 - float(fee_bps)/10000.0
16 | dx_f = float(dx) * fee
17 | return (dx_f * float(Rout)) / (float(Rin) + dx_f)
18 |
19 | def uni_v2_in_given_out(dy: float, Rin: float, Rout: float, fee_bps: float) -> float:
20 | fee = 1.0 - float(fee_bps)/10000.0
21 | return (float(Rin) * float(dy)) / (fee * (float(Rout) - float(dy)))
22 |
--------------------------------------------------------------------------------
/src/api/middleware/logging.ts:
--------------------------------------------------------------------------------
1 | import { Request, Response, NextFunction } from "express";
2 |
3 | function genId() {
4 | return Math.random().toString(36).slice(2, 10);
5 | }
6 |
7 | export function loggingMiddleware(req: Request, res: Response, next: NextFunction) {
8 | const start = Date.now();
9 | const reqId = (req.headers["x-request-id"] as string) || genId();
10 | (res as any).locals = (res as any).locals || {};
11 | (res as any).locals.requestId = reqId;
12 | res.setHeader("x-request-id", reqId);
13 |
14 | res.on("finish", () => {
15 | const ms = Date.now() - start;
16 | // eslint-disable-next-line no-console
17 | console.log(JSON.stringify({
18 | level: "info",
19 | msg: "http_request",
20 | reqId,
21 | method: req.method,
22 | path: req.originalUrl,
23 | status: res.statusCode,
24 | ms,
25 | len: res.getHeader("content-length") || 0,
26 | }));
27 | });
28 | next();
29 | }
30 |
--------------------------------------------------------------------------------
/tests/slippage.spec.ts:
--------------------------------------------------------------------------------
1 | import { describe, it, expect } from 'vitest';
2 | import { effectiveSlipBps } from '../src/eval/slippage';
3 |
4 | // Note: empirical slippage is monotone increasing w.r.t size in our model.
5 |
6 | describe('slippage', () => {
7 | it('empirical slippage increases with trade size', () => {
8 | const model = { kind: 'empirical' as const, k: 1.0, alpha: 1.2, liquidityRefUsd: 1_000_000 };
9 | const s1 = effectiveSlipBps(model, 1_000);
10 | const s2 = effectiveSlipBps(model, 10_000);
11 | const s3 = effectiveSlipBps(model, 100_000);
12 | expect(s1).toBeGreaterThanOrEqual(0);
13 | expect(s2).toBeGreaterThanOrEqual(s1);
14 | expect(s3).toBeGreaterThanOrEqual(s2);
15 | });
16 |
17 | it('empirical slippage is near-zero if k=0 (degenerate)', () => {
18 | const model = { kind: 'empirical' as const, k: 0.0, alpha: 1.0, liquidityRefUsd: 1_000_000 };
19 | const s = effectiveSlipBps(model, 50_000);
20 | expect(s).toBe(0);
21 | });
22 | });
23 |
--------------------------------------------------------------------------------
/tests/strategyGate.spec.ts:
--------------------------------------------------------------------------------
1 | import { describe, it, expect } from 'vitest';
2 | import { checkApproval } from '../src/policy/strategyGate';
3 |
4 | describe('strategyGate.checkApproval', () => {
5 | it('approves when coverage and metrics meet thresholds', () => {
6 | const res = checkApproval(30, { pSuccess: 0.8, evAdjUsd: 1, maxDrawdown: 0 });
7 | expect(res.status).toBe('approved');
8 | expect(res.coverageHours).toBeGreaterThanOrEqual(24);
9 | });
10 |
11 | it('rejects for insufficient coverage hours', () => {
12 | const res = checkApproval(12, { pSuccess: 0.9, evAdjUsd: 10, maxDrawdown: 0 });
13 | expect(res.status).toBe('rejected');
14 | expect(res.reason).toBe('insufficient_coverage_hours');
15 | });
16 |
17 | it('rejects when pSuccess below threshold', () => {
18 | const res = checkApproval(48, { pSuccess: 0.5, evAdjUsd: 10, maxDrawdown: 0 });
19 | expect(res.status).toBe('rejected');
20 | expect(res.reason).toBe('p_success_below_threshold');
21 | });
22 | });
23 |
--------------------------------------------------------------------------------
/python-backend/app/config_store.py:
--------------------------------------------------------------------------------
1 | import json
2 | import os
3 | from typing import Any
4 | from .models import Config
5 |
6 | CONFIG_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir, os.pardir, "config", "runtime_config.json"))
7 |
8 | def _ensure_dir(path: str) -> None:
9 | d = os.path.dirname(path)
10 | if not os.path.isdir(d):
11 | os.makedirs(d, exist_ok=True)
12 |
13 | async def load_config() -> Config:
14 | # lightweight sync I/O is fine on startup, file is tiny
15 | try:
16 | with open(CONFIG_PATH, "r", encoding="utf-8") as f:
17 | data = json.load(f)
18 | return Config(**data)
19 | except FileNotFoundError:
20 | cfg = Config()
21 | await save_config(cfg)
22 | return cfg
23 | except Exception:
24 | # fallback to defaults on parse errors
25 | return Config()
26 |
27 | async def save_config(cfg: Config) -> None:
28 | _ensure_dir(CONFIG_PATH)
29 | with open(CONFIG_PATH, "w", encoding="utf-8") as f:
30 | json.dump(cfg.dict(), f, indent=2)
31 |
--------------------------------------------------------------------------------
/src/api/routes/bot.ts:
--------------------------------------------------------------------------------
1 | import { Router, Request, Response } from 'express';
2 | import { strategyStore } from '../../storage/strategyStore';
3 |
4 | const router = Router();
5 |
6 | // Derive simple status for now
7 | router.get('/status', async (_req: Request, res: Response) => {
8 | const approved = await strategyStore.listApprovedByKind('triangular'); // placeholder kind aggregation
9 | const approvedAll = (await strategyStore.listStrategies()).filter(s => s.status === 'approved');
10 | res.json({
11 | running: false, // no lifecycle manager yet
12 | mode: 'idle',
13 | approvedStrategies: approvedAll.map(s => s.id),
14 | updatedAt: Date.now(),
15 | });
16 | });
17 |
18 | // Control stubs
19 | const notImplemented = (_req: Request, res: Response) => res.status(501).json({ ok: false, reason: 'not_implemented', timestamp: Date.now() });
20 | router.post('/start', notImplemented);
21 | router.post('/stop', notImplemented);
22 | router.post('/pause', notImplemented);
23 | router.post('/resume', notImplemented);
24 | router.post('/emergency-stop', notImplemented);
25 |
26 | export default router;
27 |
--------------------------------------------------------------------------------
/python-backend/app/deepseek_client.py:
--------------------------------------------------------------------------------
1 | import os
2 | from typing import Any, Dict
3 |
4 | import httpx
5 |
6 | class DeepSeekClient:
7 | def __init__(self, api_key: str | None = None, base_url: str | None = None):
8 | self.api_key = api_key or os.getenv("DEEPSEEK_API_KEY", "")
9 | self.base_url = base_url or os.getenv("DEEPSEEK_BASE_URL", "https://api.deepseek.com")
10 | self._client = httpx.AsyncClient(timeout=10)
11 |
12 | async def analyze(self, payload: Dict[str, Any]) -> Dict[str, Any]:
13 | # Placeholder: wire to actual DeepSeek endpoint
14 | # resp = await self._client.post(f"{self.base_url}/analyze", json=payload, headers=self._headers())
15 | # resp.raise_for_status()
16 | # return resp.json()
17 | return {"confidence_score": 0.9, "detail": "stub"}
18 |
19 | async def optimize_strategy(self, params: Dict[str, Any], historical: Any) -> Dict[str, Any]:
20 | # Placeholder optimization stub
21 | return {"optimized": True, "params": params}
22 |
23 | def _headers(self) -> Dict[str, str]:
24 | return {"Authorization": f"Bearer {self.api_key}"}
25 |
--------------------------------------------------------------------------------
/mvp_py/src/arbitrage/profit_gate.py:
--------------------------------------------------------------------------------
1 | from dataclasses import dataclass
2 | from typing import Optional, Dict
3 |
4 | @dataclass
5 | class Costs:
6 | gas_wei: int
7 | gas_limit: int
8 | native_usd: float
9 | router_fee_bps: float = 0.0
10 | extra_usd: float = 0.0 # e.g. MEV tip, relayer fee
11 |
12 | @dataclass
13 | class Quote:
14 | gross_usd: float # expected gross profit before gas/fees
15 | slip_bps: float # worst case slippage assumed
16 | lp_fees_bps: float # total LP fees across route (sum)
17 | route: str
18 |
19 | def net_profit_usd(quote: Quote, costs: Costs) -> float:
20 | fee_mult = 1.0 - (float(quote.lp_fees_bps) + float(costs.router_fee_bps))/10000.0
21 | gross_after_fees = float(quote.gross_usd) * fee_mult
22 | gas_usd = (int(costs.gas_wei) * int(costs.gas_limit)) / 1e18 * float(costs.native_usd)
23 | return float(gross_after_fees) - float(gas_usd) - float(costs.extra_usd)
24 |
25 | def is_viable(quote: Quote, costs: Costs, min_profit_usd: float, max_slip_bps: float) -> bool:
26 | if float(quote.slip_bps) > float(max_slip_bps):
27 | return False
28 | return net_profit_usd(quote, costs) >= float(min_profit_usd)
29 |
--------------------------------------------------------------------------------
/src/api/middleware/rateLimit.ts:
--------------------------------------------------------------------------------
1 | import { Request, Response, NextFunction } from 'express';
2 |
3 | interface Bucket {
4 | tokens: number;
5 | lastRefill: number;
6 | }
7 |
8 | // Simple token bucket per IP
9 | export function rateLimit({ capacity, refillPerMs }: { capacity: number; refillPerMs: number }) {
10 | const buckets = new Map();
11 |
12 | return (req: Request, res: Response, next: NextFunction) => {
13 | const key = req.ip || req.headers['x-forwarded-for']?.toString() || 'unknown';
14 | const now = Date.now();
15 | const bucket = buckets.get(key) || { tokens: capacity, lastRefill: now };
16 |
17 | // Refill tokens
18 | const elapsed = now - bucket.lastRefill;
19 | const refill = elapsed / refillPerMs; // tokens per ms * elapsed ms
20 | bucket.tokens = Math.min(capacity, bucket.tokens + refill);
21 | bucket.lastRefill = now;
22 |
23 | if (bucket.tokens < 1) {
24 | buckets.set(key, bucket);
25 | res.status(429).json({ error: 'rate_limited', retryInMs: Math.ceil((1 - bucket.tokens) * refillPerMs), timestamp: now });
26 | return;
27 | }
28 |
29 | bucket.tokens -= 1;
30 | buckets.set(key, bucket);
31 | next();
32 | };
33 | }
34 |
--------------------------------------------------------------------------------
/src/api/middleware/metrics.ts:
--------------------------------------------------------------------------------
1 | import { Request, Response, NextFunction } from "express";
2 | import client from "prom-client";
3 |
4 | export const register = new client.Registry();
5 |
6 | const httpRequestDurationMs = new client.Histogram({
7 | name: "http_request_duration_ms",
8 | help: "HTTP request duration in milliseconds",
9 | labelNames: ["method", "route", "status"],
10 | buckets: [5, 10, 25, 50, 100, 200, 500, 1000, 2000],
11 | });
12 |
13 | const httpRequestCount = new client.Counter({
14 | name: "http_request_count",
15 | help: "HTTP request count",
16 | labelNames: ["method", "route", "status"],
17 | });
18 |
19 | register.registerMetric(httpRequestDurationMs);
20 | register.registerMetric(httpRequestCount);
21 |
22 | client.collectDefaultMetrics({ register });
23 |
24 | export function metricsMiddleware(req: Request, res: Response, next: NextFunction) {
25 | const start = Date.now();
26 | res.on("finish", () => {
27 | const route = (req.route && req.route.path) || req.path || "unknown";
28 | const labels = { method: req.method, route, status: String(res.statusCode) } as const;
29 | httpRequestCount.inc(labels);
30 | httpRequestDurationMs.observe(labels, Date.now() - start);
31 | });
32 | next();
33 | }
34 |
--------------------------------------------------------------------------------
/tests/univ3.spec.ts:
--------------------------------------------------------------------------------
1 | import { describe, it, expect } from 'vitest';
2 | import { simulateUniV3SlipBps, Q96 } from '../src/eval/univ3_math';
3 |
4 | // Construct a plausible pool state.
5 | const sqrtPriceX96 = ((): string => {
6 | // price ~ 1.0 => sqrtPrice ~1.0 in Q96
7 | return Q96.toString();
8 | })();
9 | const liquidity = (1_000_000n * Q96); // arbitrary large liquidity scaled by Q96
10 |
11 | function slip(amountInRaw: bigint, feeBps = 30, zeroForOne = true) {
12 | return simulateUniV3SlipBps({
13 | sqrtPriceX96,
14 | liquidity: liquidity.toString(),
15 | feeTierBps: feeBps,
16 | amountIn: amountInRaw.toString(),
17 | zeroForOne,
18 | }).slipBps;
19 | }
20 |
21 | describe('UniV3 no-cross simulator', () => {
22 | it('slippage increases with amountIn', () => {
23 | const s1 = slip(10n * 10n ** 18n); // 10 tokens
24 | const s2 = slip(100n * 10n ** 18n); // 100 tokens
25 | const s3 = slip(1_000n * 10n ** 18n); // 1,000 tokens
26 | expect(s1).toBeGreaterThan(0);
27 | expect(s2).toBeGreaterThan(s1);
28 | expect(s3).toBeGreaterThan(s2);
29 | });
30 |
31 | it('higher fee tier yields higher effective slippage', () => {
32 | const lowFee = slip(100n * 10n ** 18n, 5);
33 | const hiFee = slip(100n * 10n ** 18n, 100);
34 | expect(hiFee).toBeGreaterThan(lowFee);
35 | });
36 | });
37 |
--------------------------------------------------------------------------------
/research/plots.py:
--------------------------------------------------------------------------------
1 | """
2 | Plotting helpers for research analysis using Matplotlib/Seaborn.
3 | """
4 | from __future__ import annotations
5 | import typing as t
6 | import pandas as pd
7 | import numpy as np
8 | import matplotlib.pyplot as plt
9 | import seaborn as sns
10 |
11 | sns.set_context("talk")
12 |
13 |
14 | def plot_drawdown_curve(pnl_series: pd.Series, ax: plt.Axes | None = None) -> plt.Axes:
15 | ax = ax or plt.gca()
16 | pnl = pnl_series.sort_index()
17 | eq = pnl.cumsum()
18 | roll_max = eq.cummax()
19 | dd = eq - roll_max
20 | eq.plot(ax=ax, color='steelblue', label='Equity')
21 | dd.plot(ax=ax, color='crimson', label='Drawdown')
22 | ax.legend()
23 | ax.set_title('Equity and Drawdown')
24 | return ax
25 |
26 |
27 | def heatmap_param_response(df_grid: pd.DataFrame, x: str, y: str, z: str = 'total_net_profit', cmap: str = 'viridis', annot: bool = False) -> plt.Axes:
28 | pivot = df_grid.pivot_table(index=y, columns=x, values=z, aggfunc='mean')
29 | ax = sns.heatmap(pivot, cmap=cmap, annot=annot, fmt='.2f')
30 | ax.set_title(f'Response: {z} by {x} x {y}')
31 | return ax
32 |
33 |
34 | def distribution_plot(df: pd.DataFrame, col: str = 'trade_net_usd') -> plt.Axes:
35 | ax = sns.histplot(df[col], kde=True, stat='density', bins=50)
36 | ax.set_title(f'Distribution: {col}')
37 | return ax
38 |
--------------------------------------------------------------------------------
/python-backend/app/data/etl_alchemy.py:
--------------------------------------------------------------------------------
1 | import os, httpx
2 | import pandas as pd
3 | from typing import Any
4 |
5 | ALCHEMY_RPC_URL = os.getenv("ALCHEMY_RPC_URL", "")
6 |
7 | async def rpc(method: str, params: list) -> Any:
8 | if not ALCHEMY_RPC_URL:
9 | raise RuntimeError("ALCHEMY_RPC_URL not set")
10 | async with httpx.AsyncClient(timeout=10.0) as c:
11 | r = await c.post(ALCHEMY_RPC_URL, json={"jsonrpc":"2.0","id":1,"method":method,"params":params})
12 | r.raise_for_status()
13 | return r.json().get("result")
14 |
15 | async def get_recent_blocks(n: int = 50) -> pd.DataFrame:
16 | latest_hex = await rpc("eth_blockNumber", [])
17 | latest = int(latest_hex, 16)
18 | rows = []
19 | for i in range(n):
20 | num = latest - i
21 | blk = await rpc("eth_getBlockByNumber", [hex(num), False])
22 | rows.append({
23 | "number": int(blk["number"],16),
24 | "baseFeePerGas_gwei": (int(blk.get("baseFeePerGas","0x0"),16) / 1e9),
25 | "timestamp": int(blk["timestamp"],16)
26 | })
27 | df = pd.DataFrame(rows).sort_values("number").reset_index(drop=True)
28 | return df
29 |
30 | def base_fee_ema(df: pd.DataFrame, span:int=12) -> float:
31 | if df.empty: return 0.0
32 | return float(df["baseFeePerGas_gwei"].ewm(span=span, adjust=False).mean().iloc[-1])
33 |
--------------------------------------------------------------------------------
/mvp_py/src/arbitrage/watchdog.py:
--------------------------------------------------------------------------------
1 | import time
2 | from dataclasses import dataclass
3 |
4 | @dataclass
5 | class HealthCfg:
6 | min_tick_hz: float = 2.0
7 | max_error_rate: float = 0.2 # last 60s
8 | max_gas_usd: float = 20.0
9 | stale_ms: int = 3000
10 |
11 | class Watchdog:
12 | def __init__(self, cfg: HealthCfg):
13 | self.cfg=cfg
14 | self._ticks=[]; self._errs=[]; self._last_ts=0; self._gas_usd=0.0
15 | self._paused=False
16 |
17 | def record_tick(self, ts_ms:int): self._ticks.append(ts_ms); self._last_ts=ts_ms; self._trim()
18 | def record_error(self, ts_ms:int): self._errs.append(ts_ms); self._trim()
19 | def set_gas_usd(self, v:float): self._gas_usd=float(v)
20 |
21 | def _trim(self):
22 | now=int(time.time()*1000)
23 | self._ticks=[t for t in self._ticks if now-t<60000]
24 | self._errs=[t for t in self._errs if now-t<60000]
25 |
26 | def _tick_hz(self)->float:
27 | n=len(self._ticks); return n/60.0
28 |
29 | def _err_rate(self)->float:
30 | n=len(self._errs); return n/max(1,len(self._ticks))
31 |
32 | def should_pause(self)->bool:
33 | now=int(time.time()*1000)
34 | stale = (now - self._last_ts) > self.cfg.stale_ms
35 | return stale or (self._tick_hz()self.cfg.max_error_rate) or (self._gas_usd>self.cfg.max_gas_usd)
36 |
--------------------------------------------------------------------------------
/scripts/smoke_eval.ts:
--------------------------------------------------------------------------------
1 | // Quick smoke test against TS eval server
2 | // Assumes server running on TS_API_PORT (default 8082). Override via env.
3 |
4 | const port = Number(process.env.TS_API_PORT || 8083);
5 | const url = `http://127.0.0.1:${port}/api/eval/batch`;
6 |
7 | async function main() {
8 | const body = {
9 | items: [
10 | {
11 | edgeBps: 25,
12 | notionalUsd: 10000,
13 | fees: { totalFeesBps: 8, flashFeeBps: 5, referralBps: 2, executorFeeUsd: 0.5, flashFixedUsd: 0.2 },
14 | frictions: { gasUsdMean: 0.2, adverseUsdMean: 1.0 },
15 | latency: { latencySec: 1.2, edgeDecayBpsPerSec: 2.0, baseFillProb: 0.8, theta: 0.15 },
16 | slippage: { kind: "empirical", k: 0.9, alpha: 1.25, liquidityRefUsd: 1_500_000 },
17 | failures: { failBeforeFillProb: 0.02, failBetweenLegsProb: 0.01, reorgOrMevProb: 0.0 },
18 | flashEnabled: true,
19 | riskAversion: 0.0001,
20 | capitalUsd: 20000,
21 | },
22 | ],
23 | defaults: { varCvar: false },
24 | };
25 |
26 | try {
27 | const res = await fetch(url, {
28 | method: "POST",
29 | headers: { "Content-Type": "application/json" },
30 | body: JSON.stringify(body),
31 | });
32 | const json = await res.json();
33 | console.log(JSON.stringify(json, null, 2));
34 | } catch (e) {
35 | console.error("smoke_eval error:", e);
36 | process.exitCode = 1;
37 | }
38 | }
39 |
40 | main();
41 |
--------------------------------------------------------------------------------
/src/chain/univ3_abi.ts:
--------------------------------------------------------------------------------
1 | export const IUniswapV3PoolABI = [
2 | // slot0: sqrtPriceX96, tick, observationIndex, observationCardinality, observationCardinalityNext, feeProtocol, unlocked
3 | {
4 | "inputs": [],
5 | "name": "slot0",
6 | "outputs": [
7 | { "internalType": "uint160", "name": "sqrtPriceX96", "type": "uint160" },
8 | { "internalType": "int24", "name": "tick", "type": "int24" },
9 | { "internalType": "uint16", "name": "observationIndex", "type": "uint16" },
10 | { "internalType": "uint16", "name": "observationCardinality", "type": "uint16" },
11 | { "internalType": "uint16", "name": "observationCardinalityNext", "type": "uint16" },
12 | { "internalType": "uint8", "name": "feeProtocol", "type": "uint8" },
13 | { "internalType": "bool", "name": "unlocked", "type": "bool" }
14 | ],
15 | "stateMutability": "view",
16 | "type": "function"
17 | },
18 | {
19 | "inputs": [],
20 | "name": "liquidity",
21 | "outputs": [ { "internalType": "uint128", "name": "", "type": "uint128" } ],
22 | "stateMutability": "view",
23 | "type": "function"
24 | },
25 | { "inputs": [], "name": "fee", "outputs": [ { "internalType": "uint24", "name": "", "type": "uint24" } ], "stateMutability": "view", "type": "function" },
26 | { "inputs": [], "name": "tickSpacing", "outputs": [ { "internalType": "int24", "name": "", "type": "int24" } ], "stateMutability": "view", "type": "function" },
27 | ];
28 |
--------------------------------------------------------------------------------
/src/chain/univ3_fetch.ts:
--------------------------------------------------------------------------------
1 | import { Contract } from 'ethers';
2 | import { getProvider } from './providers';
3 | import { IUniswapV3PoolABI } from './univ3_abi';
4 |
5 | export type PoolState = {
6 | sqrtPriceX96: string;
7 | tick: number;
8 | liquidity: string;
9 | fee: number;
10 | tickSpacing: number;
11 | };
12 |
13 | export async function fetchPoolState(poolAddress: string, rpcUrl?: string): Promise {
14 | const provider = getProvider(rpcUrl);
15 | const pool = new Contract(poolAddress, IUniswapV3PoolABI, provider);
16 | const [slot0, liquidity, fee, tickSpacing] = await Promise.all([
17 | pool.slot0(),
18 | pool.liquidity(),
19 | pool.fee(),
20 | pool.tickSpacing(),
21 | ]);
22 | return {
23 | sqrtPriceX96: slot0.sqrtPriceX96.toString(),
24 | tick: Number(slot0.tick),
25 | liquidity: liquidity.toString(),
26 | fee: Number(fee),
27 | tickSpacing: Number(tickSpacing),
28 | };
29 | }
30 |
31 | // Placeholder: fetching initialized ticks requires TickLens helper or pool methods via range queries.
32 | // Implementations often use Uniswap's TickLens or subgraph. This is a stub to keep module shape.
33 | export type InitializedTick = { index: number; liquidityNet: string; sqrtPriceX96?: string };
34 |
35 | export async function fetchInitializedTicks(_poolAddress: string, _rpcUrl?: string): Promise {
36 | // TODO: implement via TickLens/multicall; return empty for now
37 | return [];
38 | }
39 |
--------------------------------------------------------------------------------
/tools/engine_shim.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | import os
3 | import random
4 | from datetime import datetime
5 | import httpx
6 |
7 | BACKEND_URL = os.getenv("BACKEND_URL", "http://127.0.0.1:8000")
8 | POST_URL = f"{BACKEND_URL}/api/ingest/opportunity"
9 |
10 | PAIRS = ["PRJX/USDC", "HYPE/USDC", "PRJX/ETH"]
11 | ROUTES = ["PRJX->HyperSwap", "HyperSwap->PRJX"]
12 |
13 | async def send_once(client: httpx.AsyncClient) -> None:
14 | pair = random.choice(PAIRS)
15 | opp = {
16 | "pair": pair,
17 | "spread_bps": round(random.uniform(5.0, 35.0), 2),
18 | "est_gas_usd": round(random.uniform(0.05, 1.2), 2),
19 | "est_profit_usd": round(random.uniform(-1.0, 30.0), 2),
20 | "liquidity_usd": round(random.uniform(10_000, 300_000), 2),
21 | "confidence": round(random.uniform(0.6, 0.99), 2),
22 | "route": random.choice(ROUTES),
23 | "note": f"shim@{datetime.utcnow().isoformat()}"
24 | }
25 | try:
26 | r = await client.post(POST_URL, json=opp, timeout=5.0)
27 | r.raise_for_status()
28 | except Exception as e:
29 | # Best-effort logging; keep going
30 | print(f"ingest error: {e}")
31 |
32 | async def main():
33 | interval = float(os.getenv("SHIM_INTERVAL", "0.8"))
34 | async with httpx.AsyncClient() as client:
35 | while True:
36 | await send_once(client)
37 | await asyncio.sleep(interval)
38 |
39 | if __name__ == "__main__":
40 | asyncio.run(main())
41 |
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | # Multi-stage build for production deployment
2 | FROM node:18-alpine AS builder
3 |
4 | # Install build dependencies
5 | RUN apk add --no-cache python3 make g++
6 |
7 | WORKDIR /app
8 |
9 | # Copy package files
10 | COPY package*.json ./
11 | COPY tsconfig.json ./
12 |
13 | # Install dependencies
14 | RUN npm ci --only=production && \
15 | npm install -g typescript
16 |
17 | # Copy source code
18 | COPY src ./src
19 |
20 | # Build TypeScript
21 | RUN npm run build
22 |
23 | # Production stage
24 | FROM node:18-alpine
25 |
26 | # Install runtime dependencies
27 | RUN apk add --no-cache tini curl
28 |
29 | # Create non-root user
30 | RUN addgroup -g 1001 -S nodejs && \
31 | adduser -S nodejs -u 1001
32 |
33 | WORKDIR /app
34 |
35 | # Copy built application
36 | COPY --from=builder --chown=nodejs:nodejs /app/dist ./dist
37 | COPY --from=builder --chown=nodejs:nodejs /app/node_modules ./node_modules
38 | COPY --chown=nodejs:nodejs package*.json ./
39 |
40 | # Create necessary directories
41 | RUN mkdir -p logs && \
42 | chown -R nodejs:nodejs logs
43 |
44 | # Health check
45 | HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \
46 | CMD node -e "require('http').get('http://localhost:4000/api/health', (r) => r.statusCode === 200 ? process.exit(0) : process.exit(1))"
47 |
48 | # Switch to non-root user
49 | USER nodejs
50 |
51 | # Use tini for proper signal handling
52 | ENTRYPOINT ["/sbin/tini", "--"]
53 |
54 | # Default command
55 | CMD ["node", "dist/integration/main.js"]
56 |
57 | # Expose ports
58 | EXPOSE 4000
59 |
--------------------------------------------------------------------------------
/tests/integration/eval.api.spec.ts:
--------------------------------------------------------------------------------
1 | import request from 'supertest';
2 | import { describe, it, expect } from 'vitest';
3 | import { app } from '../../src/api/app';
4 |
5 | describe('API /api/eval/batch', () => {
6 | it('responds with extended metrics and legacy fields', async () => {
7 | const res = await request(app)
8 | .post('/api/eval/batch')
9 | .send({
10 | items: [
11 | {
12 | edgeBps: 20,
13 | notionalUsd: 10_000,
14 | fees: { totalFeesBps: 8, flashFeeBps: 0, referralBps: 0, executorFeeUsd: 0, flashFixedUsd: 0 },
15 | frictions: { gasUsdMean: 0.2, adverseUsdMean: 0.8 },
16 | latency: { latencySec: 0.8, edgeDecayBpsPerSec: 1.2, baseFillProb: 0.85, theta: 0.15 },
17 | slippage: { kind: 'empirical', k: 0.9, alpha: 1.2, liquidityRefUsd: 1_500_000 },
18 | failures: { failBeforeFillProb: 0.02, failBetweenLegsProb: 0.01, reorgOrMevProb: 0.0 },
19 | flashEnabled: false,
20 | riskAversion: 0.00005,
21 | capitalUsd: 20_000,
22 | },
23 | ],
24 | })
25 | .expect(200);
26 |
27 | expect(Array.isArray(res.body?.items)).toBe(true);
28 | const item = res.body.items[0];
29 |
30 | // Extended fields
31 | expect(typeof item.ev_per_sec).toBe('number');
32 | expect(typeof item.size_opt_usd).toBe('number');
33 | expect(typeof item.p_success).toBe('number');
34 |
35 | // Legacy compatibility
36 | expect(typeof item.gas_usd).toBe('number');
37 | expect(typeof item.seconds).toBe('number');
38 | expect(typeof item.slip_bps_eff).toBe('number');
39 | });
40 | });
41 |
--------------------------------------------------------------------------------
/python-backend/app/analytics/size_opt.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from typing import Callable, Tuple
3 | from math import isfinite
4 | from scipy.optimize import minimize_scalar
5 |
6 | def xyk_out_given_in(dx: float, Rin: float, Rout: float, fee_bps: float) -> float:
7 | fee = 1.0 - fee_bps/10000.0
8 | dx_eff = dx * fee
9 | return (dx_eff * Rout) / (Rin + dx_eff)
10 |
11 | def slip_bps(dx: float, Rin: float) -> float:
12 | if dx <= 0 or Rin <= 0: return 0.0
13 | return 10000.0 * dx / (Rin + dx)
14 |
15 | def solve_best_dx(
16 | Rin: float, Rout: float, fee_bps: float,
17 | px_out_usd: float,
18 | notional_cap_usd: float,
19 | slip_cap_bps: float,
20 | expected_net_fn: Callable[[float], float],
21 | dx_hi_frac: float = 0.25
22 | ) -> Tuple[float, float, float]:
23 | if Rin<=0 or Rout<=0 or px_out_usd<=0:
24 | return 0.0, 0.0, 0.0
25 |
26 | dx_slip = (Rin * (slip_cap_bps/10000.0)) / max(1e-9, (1.0 - slip_cap_bps/10000.0))
27 | dx_max = min(Rin*dx_hi_frac, dx_slip)
28 |
29 | def obj(dx):
30 | if dx<=0: return 1e12
31 | if slip_bps(dx, Rin) > slip_cap_bps: return 1e11
32 | dy = xyk_out_given_in(dx, Rin, Rout, fee_bps)
33 | notional = dy * px_out_usd
34 | if notional > notional_cap_usd: return 1e10
35 | net = expected_net_fn(notional)
36 | return -net
37 |
38 | res = minimize_scalar(obj, bounds=(1e-12, dx_max), method="bounded", options={"xatol":1e-9})
39 | if not res.success or not isfinite(res.x):
40 | return 0.0, 0.0, 0.0
41 | best_dx = float(res.x)
42 | best_dy = xyk_out_given_in(best_dx, Rin, Rout, fee_bps)
43 | best_notional = best_dy * px_out_usd
44 | best_slip = slip_bps(best_dx, Rin)
45 | best_net = -float(res.fun)
46 | return best_dx, best_net, best_slip
47 |
--------------------------------------------------------------------------------
/src/api/app.ts:
--------------------------------------------------------------------------------
1 | import express, { Request, Response } from "express";
2 | import dotenv from "dotenv";
3 | import evalRouter from "./routes/eval";
4 | import strategyRouter from "./routes/strategy";
5 | import botRouter from "./routes/bot";
6 | import configRouter from "./routes/config";
7 | import { z } from "zod";
8 | import { loggingMiddleware } from "./middleware/logging";
9 | import { metricsMiddleware, register } from "./middleware/metrics";
10 | import { State } from "./state";
11 | import { rateLimit } from "./middleware/rateLimit";
12 |
13 | dotenv.config();
14 |
15 | export const app = express();
16 | app.use(express.json({ limit: "1mb" }));
17 | app.use(loggingMiddleware);
18 | app.use(metricsMiddleware);
19 |
20 | app.get("/health", (_req: Request, res: Response) => res.json({ ok: true }));
21 | app.use("/api/eval", evalRouter);
22 | app.get("/metrics", async (_req: Request, res: Response) => {
23 | res.setHeader("Content-Type", register.contentType);
24 | res.end(await register.metrics());
25 | });
26 |
27 | // Read-only endpoints for UI
28 | app.get("/api/stats", (_req: Request, res: Response) => {
29 | res.json(State.getStats());
30 | });
31 |
32 | app.get("/api/signals/active", (_req: Request, res: Response) => {
33 | res.json(State.getActiveSignals());
34 | });
35 |
36 | app.get("/api/opportunities/recent", (req: Request, res: Response) => {
37 | const limitSchema = z.coerce.number().int().min(1).max(200).default(50);
38 | const parse = limitSchema.safeParse(req.query.limit);
39 | const limit = parse.success ? parse.data : 50;
40 | res.json(State.getRecentOpportunities(limit));
41 | });
42 |
43 | // Strategy + Bot routes
44 | app.use("/api/strategy", rateLimit({ capacity: 20, refillPerMs: 250 }), strategyRouter);
45 | app.use("/api/bot", botRouter);
46 | app.use("/api/config", configRouter);
47 |
--------------------------------------------------------------------------------
/tests/model.flash.spec.ts:
--------------------------------------------------------------------------------
1 | import { describe, it, expect } from 'vitest';
2 | import { evaluateArb } from '../src/eval/model';
3 | import { ArbInputs } from '../src/eval/types';
4 |
5 | function makeInputs(): ArbInputs {
6 | return {
7 | edgeBps: 20,
8 | notionalUsd: 10_000,
9 | fees: { totalFeesBps: 8, flashFeeBps: 0, referralBps: 0, executorFeeUsd: 0, flashFixedUsd: 0 },
10 | frictions: { gasUsdMean: 0.2, adverseUsdMean: 0.8 },
11 | latency: { latencySec: 0.6, edgeDecayBpsPerSec: 1.0, baseFillProb: 0.85, theta: 0.15 },
12 | slippage: { kind: 'empirical', k: 1.0, alpha: 1.2, liquidityRefUsd: 1_200_000 },
13 | failures: { failBeforeFillProb: 0.02, failBetweenLegsProb: 0.01, reorgOrMevProb: 0.0 },
14 | flashEnabled: false,
15 | riskAversion: 0.00005,
16 | capitalUsd: 25_000,
17 | };
18 | }
19 |
20 | describe('model flash fee toggles', () => {
21 | it('flash on with zero flash fees ~ equal to flash off', () => {
22 | const a = makeInputs();
23 | a.flashEnabled = false;
24 | const ra = evaluateArb(a);
25 |
26 | const b = makeInputs();
27 | b.flashEnabled = true;
28 | b.fees.flashFeeBps = 0; b.fees.referralBps = 0; b.fees.executorFeeUsd = 0; b.fees.flashFixedUsd = 0;
29 | const rb = evaluateArb(b);
30 |
31 | // Expect close results
32 | expect(Math.abs(ra.ev_per_sec - rb.ev_per_sec)).toBeLessThan(1e-6);
33 | expect(Math.abs(ra.size_opt_usd - rb.size_opt_usd)).toBeLessThan(1e-6);
34 | });
35 |
36 | it('higher risk aversion reduces optimal size', () => {
37 | const low = makeInputs();
38 | low.riskAversion = 0.0;
39 | const rLow = evaluateArb(low);
40 |
41 | const high = makeInputs();
42 | high.riskAversion = 0.005;
43 | const rHigh = evaluateArb(high);
44 |
45 | expect(rHigh.size_opt_usd).toBeLessThanOrEqual(rLow.size_opt_usd);
46 | });
47 | });
48 |
--------------------------------------------------------------------------------
/scripts/seed_strategy.ts:
--------------------------------------------------------------------------------
1 | import axios from 'axios';
2 |
3 | async function main() {
4 | const base = process.env.API_BASE || 'http://127.0.0.1:8083';
5 | const kind = process.env.STRAT_KIND || 'triangular';
6 | const hours = Number(process.env.COVERAGE_HOURS || 26);
7 | const now = Date.now();
8 | const start = now - hours * 3600_000;
9 |
10 | console.log(`Seeding strategy on ${base} kind=${kind}, coverage=${hours}h`);
11 |
12 | // 1) Create strategy
13 | const createRes = await axios.post(`${base}/api/strategy`, {
14 | name: `Auto ${kind} ${new Date().toISOString()}`,
15 | kind,
16 | params: {}
17 | });
18 | const strat = createRes.data;
19 | console.log('Created strategy:', strat.id);
20 |
21 | // 2) Upload backtest
22 | const btRes = await axios.post(`${base}/api/strategy/${strat.id}/backtest`, {
23 | startedAt: start,
24 | endedAt: now,
25 | stats: {
26 | evAdjUsd: 10,
27 | pSuccess: 0.8,
28 | maxDrawdown: 0,
29 | hitRate: 0.75,
30 | pnlUsd: 100,
31 | samples: 1000
32 | }
33 | });
34 | const run = btRes.data;
35 | console.log('Backtest added:', run.id, 'coverageHours=', run.coverageHours.toFixed(2));
36 |
37 | // 3) Approve per policy
38 | const approveRes = await axios.post(`${base}/api/strategy/${strat.id}/approve`);
39 | console.log('Approval decision:', approveRes.data.status, approveRes.data.reason || '');
40 | console.log('Thresholds:', approveRes.data.thresholds);
41 |
42 | // 4) Fetch final strategy
43 | const final = await axios.get(`${base}/api/strategy/${strat.id}`);
44 | console.log(JSON.stringify(final.data, null, 2));
45 | }
46 |
47 | main().catch((e) => {
48 | if (axios.isAxiosError(e)) {
49 | console.error('HTTP Error:', e.response?.status, e.response?.data);
50 | } else {
51 | console.error(e);
52 | }
53 | process.exit(1);
54 | });
55 |
--------------------------------------------------------------------------------
/mvp_py/src/arbitrage/gas_oracle.py:
--------------------------------------------------------------------------------
1 | import os, time, httpx
2 | from typing import Optional
3 |
4 | # Simple gas oracle with 3 sources:
5 | # 1) ETH_GAS_WEI env override
6 | # 2) JSON-RPC eth_gasPrice
7 | # 3) Public REST fallback (optional)
8 |
9 | class GasOracle:
10 | def __init__(self, rpc_url: str, fallback_url: Optional[str]=None, safety_mult: float = 1.2):
11 | self.rpc_url = rpc_url
12 | self.fallback_url = fallback_url
13 | self.safety_mult = safety_mult
14 | self._last=(0,0)
15 |
16 | async def wei(self) -> int:
17 | override=os.getenv("ETH_GAS_WEI")
18 | if override:
19 | try:
20 | return int(override)
21 | except Exception:
22 | pass
23 | now=int(time.time())
24 | if now-self._last[0] < 3:
25 | return self._last[1]
26 | # RPC
27 | try:
28 | async with httpx.AsyncClient(timeout=2.5) as c:
29 | r=await c.post(self.rpc_url,json={"jsonrpc":"2.0","method":"eth_gasPrice","params":[],"id":1})
30 | r.raise_for_status()
31 | v=int(r.json()["result"],16)
32 | v=int(v*self.safety_mult)
33 | self._last=(now,v); return v
34 | except Exception:
35 | pass
36 | # Fallback (optional)
37 | if self.fallback_url:
38 | try:
39 | async with httpx.AsyncClient(timeout=2.5) as c:
40 | r=await c.get(self.fallback_url)
41 | r.raise_for_status()
42 | g=int(float(r.json()["standard"]) * 1e9) # gwei -> wei
43 | v=int(g*self.safety_mult)
44 | self._last=(now,v); return v
45 | except Exception:
46 | pass
47 | # last resort
48 | return int(30e9) # 30 gwei
49 |
--------------------------------------------------------------------------------
/python-backend/app/analytics/arb_formula.py:
--------------------------------------------------------------------------------
1 | from dataclasses import dataclass
2 | from typing import Optional, Dict, Any, Tuple
3 | import numpy as np
4 |
5 | @dataclass
6 | class GasModel:
7 | base_fee_gwei: float
8 | tip_gwei: float
9 | native_usd: float
10 | gas_limit: int
11 | max_gas_usd_per_trade: float = 1e9
12 | def usd(self) -> float:
13 | usd = (self.base_fee_gwei + self.tip_gwei) * 1e9 * self.gas_limit / 1e18 * self.native_usd
14 | return float(min(usd, self.max_gas_usd_per_trade))
15 |
16 | @dataclass
17 | class LatencyModel:
18 | decision_to_submit_ms: float = 200.0
19 | submit_to_inclusion_blocks: int = 1
20 | seconds_per_block: float = 1.0
21 | k_vol: float = 0.0
22 | notional_beta: float = 1.0
23 | def inclusion_seconds(self) -> float:
24 | return (self.decision_to_submit_ms/1000.0) + self.submit_to_inclusion_blocks*self.seconds_per_block
25 | def adverse_usd(self, notional_usd: float) -> float:
26 | dt = max(self.inclusion_seconds(), 1e-6)
27 | return float(self.k_vol) * np.sqrt(dt) * float(self.notional_beta) * float(notional_usd)
28 |
29 | def gross_from_edge_bps(edge_bps: float, notional_usd: float) -> float:
30 | return (edge_bps/10000.0) * notional_usd
31 |
32 | def apply_fees_bps(amount_usd: float, total_fees_bps: float) -> float:
33 | return amount_usd * (1.0 - total_fees_bps/10000.0)
34 |
35 | def expected_net_usd(edge_bps: float, notional_usd: float, total_fees_bps: float, gas_usd: float, adverse_usd: float, extra_usd: float=0.0, fail_prob: float=0.0) -> float:
36 | gross = gross_from_edge_bps(edge_bps, notional_usd)
37 | after_fees = apply_fees_bps(gross, total_fees_bps) - extra_usd
38 | net = after_fees - gas_usd - adverse_usd
39 | p = max(0.0, min(1.0, fail_prob))
40 | return (1.0-p)*net + p*(-gas_usd)
41 |
42 | def score_hft(net_usd: float, gas_usd: float, seconds: float, w_net: float=1.0, w_ppg: float=0.6, w_pps: float=0.6) -> float:
43 | ppg = net_usd / max(gas_usd, 1e-9)
44 | pps = net_usd / max(seconds, 1e-3)
45 | return w_net*net_usd + w_ppg*ppg + w_pps*pps
46 |
--------------------------------------------------------------------------------
/tests/strategyApi.spec.ts:
--------------------------------------------------------------------------------
1 | import { describe, it, expect, beforeAll, afterAll } from 'vitest';
2 | import request from 'supertest';
3 | import os from 'os';
4 | import path from 'path';
5 | import fs from 'fs/promises';
6 |
7 | let tmpDir: string;
8 |
9 | beforeAll(async () => {
10 | tmpDir = await fs.mkdtemp(path.join(os.tmpdir(), 'arb-strat-'));
11 | process.env.DATA_DIR = tmpDir; // ensure file-backed store uses isolated dir
12 | });
13 |
14 | afterAll(async () => {
15 | if (tmpDir) {
16 | try { await fs.rm(tmpDir, { recursive: true, force: true }); } catch {}
17 | }
18 | });
19 |
20 | describe('Strategy API integration', () => {
21 | it('creates, backtests, and approves a strategy', async () => {
22 | const { app } = await import('../src/api/app');
23 |
24 | // 1) Create strategy
25 | const createRes = await request(app)
26 | .post('/api/strategy')
27 | .send({ name: 'Test Tri', kind: 'triangular', params: {} })
28 | .expect(201);
29 |
30 | const id = createRes.body.id as string;
31 | expect(id).toBeTruthy();
32 |
33 | // 2) Upload backtest with >=24h coverage
34 | const now = Date.now();
35 | const start = now - 26 * 3600_000;
36 | const btRes = await request(app)
37 | .post(`/api/strategy/${id}/backtest`)
38 | .send({
39 | startedAt: start,
40 | endedAt: now,
41 | stats: {
42 | evAdjUsd: 10,
43 | pSuccess: 0.8,
44 | maxDrawdown: 0,
45 | hitRate: 0.75,
46 | pnlUsd: 100,
47 | samples: 1000,
48 | },
49 | })
50 | .expect(201);
51 |
52 | expect(btRes.body.coverageHours).toBeGreaterThanOrEqual(24);
53 |
54 | // 3) Approve per policy
55 | const approve = await request(app)
56 | .post(`/api/strategy/${id}/approve`)
57 | .expect(200);
58 |
59 | expect(approve.body.status).toBe('approved');
60 | expect(approve.body.strategy.status).toBe('approved');
61 |
62 | // 4) Fetch strategy
63 | const getRes = await request(app).get(`/api/strategy/${id}`).expect(200);
64 | expect(getRes.body.status).toBe('approved');
65 | });
66 | });
67 |
--------------------------------------------------------------------------------
/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "hyperliquid-arb-ts",
3 | "version": "0.1.0",
4 | "private": true,
5 | "scripts": {
6 | "dev": "tsx watch src/index.ts",
7 | "build": "tsc",
8 | "start": "node dist/index.js",
9 | "test": "jest",
10 | "typecheck": "tsc --noEmit",
11 | "test:refinement": "tsx src/refinement/test-init.ts",
12 | "test:execution": "tsx src/execution/test-execution.ts",
13 | "start:execution": "tsx src/execution/main.ts",
14 | "test:e2e": "tsx src/tests/integration/test-e2e-pipeline.ts",
15 | "test:full-pipeline": "tsx src/tests/test-full-pipeline.ts",
16 | "start:integration": "tsx src/integration/main.ts",
17 | "monitor": "tsx src/monitoring/dashboard.ts",
18 | "lint": "echo 'no lint configured'",
19 | "dev:8083": "set TS_API_PORT=8083&& ts-node-dev --respawn --transpile-only src/api/server.ts",
20 | "smoke": "ts-node -T scripts/smoke_eval.ts",
21 | "feeds": "ts-node src/feeds/index.ts",
22 | "feeds:dev": "ts-node-dev --respawn --transpile-only src/feeds/index.ts",
23 | "test:watch": "vitest",
24 | "test:cov": "vitest run --coverage",
25 | "seed:strategy": "tsx scripts/seed_strategy.ts"
26 | },
27 | "dependencies": {
28 | "axios": "^1.11.0",
29 | "compression": "^1.7.4",
30 | "cors": "^2.8.5",
31 | "dotenv": "^16.6.1",
32 | "ethers": "^5.7.2",
33 | "express": "^4.21.2",
34 | "express-rate-limit": "^7.1.5",
35 | "helmet": "^7.1.0",
36 | "node-fetch": "^2.7.0",
37 | "prom-client": "^15.1.3",
38 | "proper-lockfile": "^4.1.2",
39 | "tsx": "^3.14.0",
40 | "viem": "^2.33.3",
41 | "winston": "^3.17.0",
42 | "ws": "^8.14.2",
43 | "zod": "^3.23.8"
44 | },
45 | "devDependencies": {
46 | "@types/compression": "^1.8.1",
47 | "@types/cors": "^2.8.19",
48 | "@types/express": "^4.17.21",
49 | "@types/node": "^20.12.12",
50 | "@types/node-fetch": "^2.6.13",
51 | "@types/supertest": "^2.0.16",
52 | "@types/ws": "^8.18.1",
53 | "@vitest/coverage-v8": "^1.6.0",
54 | "supertest": "^7.0.0",
55 | "ts-node": "^10.9.2",
56 | "ts-node-dev": "^2.0.0",
57 | "typescript": "^5.4.5",
58 | "vitest": "^1.6.0"
59 | }
60 | }
61 |
--------------------------------------------------------------------------------
/config/strategies.json:
--------------------------------------------------------------------------------
1 | {
2 | "baseline": {
3 | "min_profit_usd": 5.0,
4 | "min_spread_bps": 10.0,
5 | "min_liquidity_usd": 10000.0,
6 | "slippage_bps": 30.0,
7 | "gas_multiplier": 1.0
8 | },
9 | "HYPE_to_KHYPE": {
10 | "min_profit_usd": 8.0,
11 | "min_spread_bps": 12.0,
12 | "min_liquidity_usd": 15000.0,
13 | "fees_bps": 6.0,
14 | "slippage_bps": 20.0,
15 | "include_assets": [
16 | "wstHYPE",
17 | "HYPE",
18 | "stHYPE",
19 | "KHYPE"
20 | ],
21 | "include_routes_contains": [
22 | "KHYPE"
23 | ],
24 | "include_chain_names": [
25 | "eth-mainnet",
26 | "hyperevm-mainnet"
27 | ],
28 | "note": "Focus HYPE family conversions that end in KHYPE on HyperEVM"
29 | },
30 | "ETH_multichain_to_HyperEVM": {
31 | "min_profit_usd": 10.0,
32 | "min_spread_bps": 10.0,
33 | "min_liquidity_usd": 20000.0,
34 | "fees_bps": 5.0,
35 | "slippage_bps": 18.0,
36 | "include_assets": [
37 | "ETH",
38 | "uETH"
39 | ],
40 | "include_routes_contains": [
41 | "HyperEVM",
42 | "uETH"
43 | ],
44 | "include_chain_names": [
45 | "eth-mainnet",
46 | "arbitrum-mainnet",
47 | "optimism-mainnet",
48 | "polygon-mainnet",
49 | "hyperevm-mainnet"
50 | ],
51 | "note": "Seek cross-chain ETH arbitrage that settles on HyperEVM and direct uETH opps on HyperEVM"
52 | },
53 | "auto_high_spread": {
54 | "min_profit_usd": 5.0,
55 | "min_spread_bps": 28.44,
56 | "min_liquidity_usd": 98274.89,
57 | "include_assets": [
58 | "PRJX",
59 | "USDC"
60 | ],
61 | "include_chain_names": [
62 | "hyperevm-mainnet"
63 | ],
64 | "note": "Auto: favor higher spread opps from last 30m"
65 | },
66 | "auto_liquidity_focus": {
67 | "min_profit_usd": 8.0,
68 | "min_spread_bps": 10.0,
69 | "min_liquidity_usd": 122843.61,
70 | "include_assets": [
71 | "PRJX",
72 | "USDC"
73 | ],
74 | "include_chain_names": [
75 | "hyperevm-mainnet"
76 | ],
77 | "note": "Auto: prioritize higher-liquidity routes"
78 | }
79 | }
--------------------------------------------------------------------------------
/src/feeds/types.ts:
--------------------------------------------------------------------------------
1 | /**
2 | * Shared types for feed infrastructure
3 | */
4 |
5 | export interface ArbitrageOpportunity {
6 | id: string;
7 | timestamp: number;
8 | type: 'triangular' | 'cross_venue' | 'direct';
9 |
10 | // Path information
11 | path: string[]; // Token addresses or pool addresses in order
12 | pools: string[]; // Pool addresses involved
13 | routers: string[]; // Router addresses for execution
14 |
15 | // Financial metrics
16 | estimatedProfitUsd: number;
17 | optimalSizeUsd: number;
18 | maxSizeUsd: number;
19 | minSizeUsd: number;
20 | estimatedGasUsd: number;
21 | netProfitUsd: number;
22 |
23 | // Risk metrics
24 | confidence: number; // 0-1 score
25 | competitionLevel: number; // 0-1, higher = more competition
26 | latencyRequirementMs: number; // Max latency before opportunity degrades
27 |
28 | // Market data
29 | prices: Record; // Token prices at detection time
30 | liquidity: Record; // Pool liquidity levels
31 | volumes: Record; // Recent volume data
32 |
33 | // Metadata
34 | source: 'hyperEVM' | 'goldRush' | 'combined';
35 | blockNumber?: number;
36 | transactionHash?: string;
37 | }
38 |
39 | export interface PoolState {
40 | address: string;
41 | token0: string;
42 | token1: string;
43 | reserve0: bigint;
44 | reserve1: bigint;
45 | fee: number; // In basis points * 100 (e.g., 3000 for 0.3%)
46 | liquidity: bigint;
47 | sqrtPrice?: bigint;
48 | tick?: number;
49 | lastUpdate: number;
50 | }
51 |
52 | export interface TokenPrice {
53 | address: string;
54 | symbol: string;
55 | priceUsd: number;
56 | timestamp: number;
57 | source: string;
58 | }
59 |
60 | export interface ExecutionResult {
61 | signalId: string;
62 | success: boolean;
63 | transactionHash?: string;
64 | actualProfitUsd?: number;
65 | gasUsedUsd?: number;
66 | error?: string;
67 | timestamp: number;
68 | }
69 |
70 | // Signal type for feed layer
71 | export interface Signal {
72 | id: string;
73 | opportunity: ArbitrageOpportunity;
74 | createdAt: number;
75 | updatedAt: number;
76 | status: 'active' | 'executing' | 'executed' | 'expired' | 'invalidated';
77 | expirationTime: number;
78 | priority: number;
79 | metadata?: Record;
80 | }
81 |
--------------------------------------------------------------------------------
/python-backend/app/arb/sizing.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 | from typing import Tuple
3 |
4 |
5 | def slip_bps_from_dx(dx: float, Rin: float) -> float:
6 | # Approximate price impact for XYK: dx/(Rin+dx)
7 | if dx <= 0 or Rin <= 0:
8 | return 0.0
9 | impact = float(dx) / (float(Rin) + float(dx))
10 | return float(impact * 10000.0)
11 |
12 |
13 | def out_given_in(dx: float, Rin: float, Rout: float, fee_bps: float) -> float:
14 | fee = 1.0 - float(fee_bps)/10000.0
15 | dx_eff = float(dx) * fee
16 | denom = float(Rin) + dx_eff
17 | if denom <= 0:
18 | return 0.0
19 | return (dx_eff * float(Rout)) / denom
20 |
21 |
22 | def solve_size_for_max_net(
23 | Rin: float,
24 | Rout: float,
25 | fee_bps: float,
26 | px_out_usd: float,
27 | slip_cap_bps: float,
28 | notional_cap_usd: float,
29 | net_fn,
30 | ) -> Tuple[float, float, float]:
31 | """
32 | Grid-search the input size (in input token units) that maximizes net_fn(notional_usd).
33 | Returns: (best_dx_in_units, best_net_usd, slip_at_best_bps)
34 | """
35 | if Rin <= 0 or Rout <= 0 or px_out_usd <= 0:
36 | return 0.0, 0.0, 0.0
37 | candidates = []
38 | # conservative slip bound approximation: dx such that slip_bps_from_dx(dx, Rin) <= slip_cap
39 | if slip_cap_bps > 0:
40 | # invert approx: slip = dx/(Rin+dx) => dx = slip*Rin/(1-slip)
41 | s = float(slip_cap_bps)/10000.0
42 | if s >= 0.999999:
43 | s = 0.999999
44 | dx_slip = (float(Rin) * s) / (1.0 - s)
45 | else:
46 | dx_slip = float(Rin) * 0.25
47 |
48 | for i in range(1, 25):
49 | frac = i / 25.0
50 | dx_guess = min(float(Rin) * frac * 0.25, dx_slip)
51 | if dx_guess <= 0:
52 | continue
53 | dy = out_given_in(dx_guess, Rin, Rout, fee_bps)
54 | notional_usd = float(dy) * float(px_out_usd)
55 | if notional_cap_usd > 0 and notional_usd > notional_cap_usd:
56 | notional_usd = notional_cap_usd
57 | net_usd = float(net_fn(notional_usd))
58 | slip_bps = slip_bps_from_dx(dx_guess, Rin)
59 | candidates.append((net_usd, dx_guess, slip_bps))
60 |
61 | if not candidates:
62 | return 0.0, 0.0, 0.0
63 | best = max(candidates, key=lambda t: t[0])
64 | return float(best[1]), float(best[0]), float(best[2])
65 |
--------------------------------------------------------------------------------
/scripts/flashloan.ts:
--------------------------------------------------------------------------------
1 | import { ethers } from "ethers";
2 |
3 | // ENV: RPC_URL, OWNER_PK, EXECUTOR, ASSET, AMOUNT
4 | // Build encoded params for the two legs and profit check.
5 | // Note: keep calldata opaque; routers/spenders are provider-specific.
6 |
7 | type FlashParams = {
8 | buyRouter: string;
9 | buySpender: string;
10 | buyCalldata: string; // 0x...
11 | sellRouter: string;
12 | sellSpender: string;
13 | sellCalldata: string; // 0x...
14 | tokenBorrowed: string;
15 | tokenIntermediate: string;
16 | profitToken: string;
17 | minProfit: bigint;
18 | };
19 |
20 | async function main() {
21 | const provider = new ethers.JsonRpcProvider(process.env.RPC_URL!);
22 | const signer = new ethers.Wallet(process.env.OWNER_PK!, provider);
23 |
24 | const executor = new ethers.Contract(
25 | process.env.EXECUTOR!,
26 | [
27 | "function initiateFlashArb(address asset,uint256 amount,bytes params,uint16 referralCode) external",
28 | ],
29 | signer
30 | );
31 |
32 | // TODO: fill addresses and pre-built calldata for your two swaps
33 | const params: FlashParams = {
34 | buyRouter: "0xBUY_ROUTER",
35 | buySpender: "0xBUY_SPENDER",
36 | buyCalldata: "0x",
37 | sellRouter: "0xSELL_ROUTER",
38 | sellSpender: "0xSELL_SPENDER",
39 | sellCalldata: "0x",
40 | tokenBorrowed: process.env.ASSET!,
41 | tokenIntermediate: "0xTOKEN_INTERMEDIATE",
42 | profitToken: process.env.ASSET!, // or a different token
43 | minProfit: BigInt(10_000_000_000_000), // example wei
44 | };
45 |
46 | const encoded = ethers.AbiCoder.defaultAbiCoder().encode(
47 | [
48 | "tuple(address,address,bytes,address,address,bytes,address,address,address,uint256)",
49 | ],
50 | [[
51 | params.buyRouter,
52 | params.buySpender,
53 | params.buyCalldata,
54 | params.sellRouter,
55 | params.sellSpender,
56 | params.sellCalldata,
57 | params.tokenBorrowed,
58 | params.tokenIntermediate,
59 | params.profitToken,
60 | params.minProfit,
61 | ]]
62 | );
63 |
64 | const tx = await executor.initiateFlashArb(
65 | process.env.ASSET!,
66 | ethers.getBigInt(process.env.AMOUNT!),
67 | encoded,
68 | 0 // referralCode
69 | );
70 | console.log("flashloan tx:", tx.hash);
71 | const rc = await tx.wait();
72 | console.log("receipt block:", rc.blockNumber);
73 | }
74 |
75 | main().catch((e) => {
76 | console.error(e);
77 | process.exit(1);
78 | });
79 |
--------------------------------------------------------------------------------
/contracts/HyperliquidArbitrageEngine.sol:
--------------------------------------------------------------------------------
1 | // SPDX-License-Identifier: MIT
2 | pragma solidity ^0.8.19;
3 |
4 | interface IERC20 {
5 | function balanceOf(address account) external view returns (uint256);
6 | function transfer(address to, uint256 value) external returns (bool);
7 | function approve(address spender, uint256 value) external returns (bool);
8 | }
9 |
10 | contract HyperliquidArbitrageEngine {
11 | address public constant HYPERSWAP_ROUTER = 0xD81F56576B1FF2f3Ef18e9Cc71Adaa42516fD990;
12 | address public constant PRJX_ROUTER = address(0); // TODO: set
13 |
14 | bool public emergencyStop;
15 | address public owner;
16 |
17 | event ArbitrageExecuted(address tokenA, address tokenB, uint256 amountIn, uint256 profit);
18 | event EmergencyStop(bool active);
19 |
20 | modifier onlyOwner() {
21 | require(msg.sender == owner, "Unauthorized");
22 | _;
23 | }
24 |
25 | constructor() {
26 | owner = msg.sender;
27 | }
28 |
29 | function setEmergencyStop(bool active) external onlyOwner {
30 | emergencyStop = active;
31 | emit EmergencyStop(active);
32 | }
33 |
34 | function executeArbitrage(
35 | address tokenA,
36 | address tokenB,
37 | uint256 amountIn,
38 | bytes calldata hyperswapData,
39 | bytes calldata prjxData,
40 | uint256 minProfitWei
41 | ) external onlyOwner {
42 | require(!emergencyStop, "Emergency stop active");
43 |
44 | uint256 initialBalance = IERC20(tokenA).balanceOf(address(this));
45 |
46 | _buyOnCheaperDEX(tokenA, tokenB, amountIn, hyperswapData, prjxData);
47 | _sellOnExpensiveDEX(tokenA, tokenB, hyperswapData, prjxData);
48 |
49 | uint256 finalBalance = IERC20(tokenA).balanceOf(address(this));
50 | uint256 profit = finalBalance - initialBalance;
51 | require(profit >= minProfitWei, "Insufficient profit");
52 |
53 | emit ArbitrageExecuted(tokenA, tokenB, amountIn, profit);
54 | }
55 |
56 | function _buyOnCheaperDEX(
57 | address /*tokenA*/, address /*tokenB*/, uint256 /*amountIn*/, bytes calldata /*hyperswapData*/, bytes calldata /*prjxData*/
58 | ) internal {
59 | // TODO: implement router calls
60 | }
61 |
62 | function _sellOnExpensiveDEX(
63 | address /*tokenA*/, address /*tokenB*/, bytes calldata /*hyperswapData*/, bytes calldata /*prjxData*/
64 | ) internal {
65 | // TODO: implement router calls
66 | }
67 | }
68 |
--------------------------------------------------------------------------------
/mvp_py/src/arbitrage/exec_engine.py:
--------------------------------------------------------------------------------
1 | import os, httpx
2 | from typing import Optional, Dict, Any, Tuple
3 |
4 | class ExecEngine:
5 | def __init__(self, rpc_url: str, privkey_hex: Optional[str]=None, chain_id: int=1, private_tx: bool=False):
6 | self.rpc=rpc_url; self.priv=privkey_hex; self.chain_id=chain_id; self.private_tx=private_tx
7 |
8 | async def _rpc(self, method: str, params: list):
9 | async with httpx.AsyncClient(timeout=8.0) as c:
10 | r=await c.post(self.rpc,json={"jsonrpc":"2.0","id":1,"method":method,"params":params})
11 | r.raise_for_status(); return r.json()["result"]
12 |
13 | async def nonce(self, sender: str) -> int:
14 | n=await self._rpc("eth_getTransactionCount",[sender,"pending"]); return int(n,16)
15 |
16 | async def estimate_gas(self, tx: Dict[str,Any]) -> int:
17 | g=await self._rpc("eth_estimateGas",[tx]); return int(g,16)
18 |
19 | async def call_static(self, tx: Dict[str,Any]) -> Tuple[bool,str]:
20 | try:
21 | _=await self._rpc("eth_call",[tx,"latest"])
22 | return True,"OK"
23 | except httpx.HTTPError as e:
24 | return False,f"http error: {e}"
25 | except Exception as e:
26 | return False,str(e)
27 |
28 | async def send_raw(self, raw_tx: str) -> str:
29 | if self.private_tx:
30 | # TODO: post raw_tx to your relay endpoint
31 | raise NotImplementedError("Private relay not configured")
32 | h=await self._rpc("eth_sendRawTransaction",[raw_tx]); return h
33 |
34 | async def execute_route(self, built_tx: Dict[str,Any], signer) -> Dict[str,Any]:
35 | """
36 | built_tx must include: to, data, value, from (sender)
37 | signer: object with sign_tx(tx_dict)->raw_tx
38 | """
39 | tx=dict(built_tx)
40 | tx.setdefault("value","0x0")
41 | tx.setdefault("from", getattr(signer, 'address', os.getenv("BOT_ADDRESS","0xYourAddress")))
42 |
43 | # Estimate gas + simulate
44 | gas = await self.estimate_gas(tx); tx["gas"]=hex(gas)
45 | ok,reason = await self.call_static(tx)
46 | if not ok: return {"ok":False,"stage":"simulate","reason":reason}
47 |
48 | # Sign & send
49 | if os.getenv("EXECUTE_DRY_RUN","1") == "1":
50 | return {"ok":True,"stage":"dry_run","gas":gas}
51 |
52 | raw = signer.sign_tx(tx, chain_id=self.chain_id)
53 | txhash = await self.send_raw(raw)
54 | return {"ok":True,"stage":"sent","gas":gas,"txhash":txhash}
55 |
--------------------------------------------------------------------------------
/web/styles.css:
--------------------------------------------------------------------------------
1 | .dashboard { display: grid; grid-template-columns: 1fr 1fr; gap: 20px; padding: 20px; }
2 | .card { border: 1px solid #ddd; padding: 20px; border-radius: 8px; box-shadow: 0 1px 2px rgba(0,0,0,0.05); }
3 | .status-green { color: #28a745; }
4 | .status-red { color: #dc3545; }
5 | button { margin-right: 8px; }
6 |
7 | /* Table and badges */
8 | .grid { width: 100%; border-collapse: collapse; }
9 | .grid thead th { position: sticky; top: 0; background: #f9fafb; cursor: pointer; text-align: left; }
10 | .grid th, .grid td { padding: 6px 8px; border-bottom: 1px solid #eee; }
11 | .badge { display: inline-block; padding: 2px 8px; background: #e5e7eb; border-radius: 999px; font-size: 12px; }
12 | .badge-route-router { background: #dbeafe; color: #1e3a8a; }
13 | .badge-route-amm { background: #dcfce7; color: #166534; }
14 | .badge-route-rfq { background: #fee2e2; color: #991b1b; }
15 |
16 | /* Flash animations for cell updates */
17 | @keyframes flashUp { from { background: #ecfdf5; } to { background: transparent; } }
18 | @keyframes flashDown { from { background: #fef2f2; } to { background: transparent; } }
19 | .flash-up { animation: flashUp 0.8s ease-out; }
20 | .flash-down { animation: flashDown 0.8s ease-out; }
21 |
22 | /* Monospace + truncation + compact cells */
23 | .mono { font-family: ui-monospace, SFMono-Regular, Menlo, Monaco, Consolas, "Liberation Mono", "Courier New", monospace; }
24 | .truncate { max-width: 320px; display: inline-block; overflow: hidden; text-overflow: ellipsis; white-space: nowrap; vertical-align: bottom; }
25 | .truncate-narrow { max-width: 200px; }
26 | .cell-tight { white-space: nowrap; }
27 |
28 | /* Icon-like light buttons */
29 | .btn-icon { margin-left: 6px; padding: 2px 6px; font-size: 12px; border: 1px solid #e5e7eb; background: #fafafa; border-radius: 4px; cursor: pointer; }
30 | .btn-sm { padding: 4px 8px; font-size: 12px; }
31 |
32 | /* Alerts */
33 | .alert { border: 1px solid #fee2e2; background: #fef2f2; color: #991b1b; padding: 12px; border-radius: 8px; }
34 | .alert .alert-title { font-weight: 700; margin-bottom: 6px; }
35 | .alert .alert-body { font-size: 12px; color: #7f1d1d; margin-bottom: 8px; }
36 | .alert .alert-actions > button { margin-right: 8px; }
37 |
38 | /* Backtest stat cards grid */
39 | .card-grid { display: grid; grid-template-columns: repeat(auto-fill, minmax(160px, 1fr)); gap: 10px; }
40 | .stat-card { display: flex; flex-direction: column; align-items: flex-start; gap: 4px; }
41 | .stat-value { font-size: 18px; font-weight: 700; }
42 | .stat-label { font-size: 12px; color: #6b7280; }
43 | .stat-sub { font-size: 11px; color: #9ca3af; }
44 |
--------------------------------------------------------------------------------
/src/policy/strategyGate.ts:
--------------------------------------------------------------------------------
1 | import { StrategyParams } from '../types/strategy';
2 | import { strategyStore } from '../storage/strategyStore';
3 | import { env } from '../config/env';
4 |
5 | export interface GateDecision {
6 | allowed: boolean;
7 | reason?: string;
8 | params?: StrategyParams;
9 | strategyId?: string;
10 | }
11 |
12 | export async function enforceGate(kind: string, preferredStrategyId?: string): Promise {
13 | // If preferred strategy is supplied, check it first
14 | if (preferredStrategyId) {
15 | const s = await strategyStore.getStrategy(preferredStrategyId);
16 | if (!s) return { allowed: false, reason: 'strategy_not_found' };
17 | if (s.status !== 'approved') return { allowed: false, reason: 'strategy_not_approved' };
18 | return { allowed: true, params: s.params, strategyId: s.id };
19 | }
20 |
21 | // Otherwise, find any approved for kind
22 | const approved = await strategyStore.listApprovedByKind(kind);
23 | if (!approved.length) return { allowed: false, reason: 'no_approved_strategy_for_kind' };
24 |
25 | // Choose the most recently approved
26 | const chosen = approved.sort((a, b) => (b.updatedAt || 0) - (a.updatedAt || 0))[0];
27 | return { allowed: true, params: chosen.params, strategyId: chosen.id };
28 | }
29 |
30 | export interface ApprovalCheck {
31 | status: 'approved' | 'rejected';
32 | coverageHours: number;
33 | thresholds: { minHours: number; minPSuccess: number; minEvAdjUsd: number; maxDrawdown: number };
34 | reason?: string;
35 | }
36 |
37 | export function checkApproval(coverageHours: number, stats: { pSuccess: number; evAdjUsd: number; maxDrawdown: number }): ApprovalCheck {
38 | const minHours = env.MIN_BACKTEST_HOURS;
39 | const minPSuccess = env.MIN_P_SUCCESS;
40 | const minEvAdjUsd = env.MIN_EV_ADJ_USD;
41 | const maxDrawdown = env.MAX_DRAWDOWN;
42 |
43 | if (coverageHours < minHours) {
44 | return { status: 'rejected', coverageHours, thresholds: { minHours, minPSuccess, minEvAdjUsd, maxDrawdown }, reason: 'insufficient_coverage_hours' };
45 | }
46 | if (stats.pSuccess < minPSuccess) {
47 | return { status: 'rejected', coverageHours, thresholds: { minHours, minPSuccess, minEvAdjUsd, maxDrawdown }, reason: 'p_success_below_threshold' };
48 | }
49 | if (stats.evAdjUsd < minEvAdjUsd) {
50 | return { status: 'rejected', coverageHours, thresholds: { minHours, minPSuccess, minEvAdjUsd, maxDrawdown }, reason: 'ev_adj_usd_below_threshold' };
51 | }
52 | if (stats.maxDrawdown > maxDrawdown) {
53 | return { status: 'rejected', coverageHours, thresholds: { minHours, minPSuccess, minEvAdjUsd, maxDrawdown }, reason: 'max_drawdown_above_threshold' };
54 | }
55 |
56 | return { status: 'approved', coverageHours, thresholds: { minHours, minPSuccess, minEvAdjUsd, maxDrawdown } };
57 | }
58 |
--------------------------------------------------------------------------------
/research/data_loader.py:
--------------------------------------------------------------------------------
1 | """
2 | Data loading utilities for offline research/backtesting.
3 |
4 | - Pulls recent opportunities from the backend `/api/backtest/export` endpoint.
5 | - Converts to pandas DataFrame with typed columns and computed helpers.
6 | - Provides save/load to Parquet/CSV for reproducibility.
7 |
8 | Usage:
9 | from research.data_loader import fetch_recent, to_dataframe
10 | rows = fetch_recent("http://127.0.0.1:8000", window_minutes=180)
11 | df = to_dataframe(rows)
12 | """
13 | from __future__ import annotations
14 | import typing as t
15 | import requests
16 | import pandas as pd
17 | from datetime import datetime
18 |
19 | DEFAULT_BACKEND = "http://127.0.0.1:8000"
20 |
21 | COLUMNS = [
22 | "ts",
23 | "pair",
24 | "route",
25 | "chain_name",
26 | "spread_bps",
27 | "est_gas_usd",
28 | "est_profit_usd",
29 | "liquidity_usd",
30 | "confidence",
31 | ]
32 |
33 |
34 | def fetch_recent(base_url: str = DEFAULT_BACKEND, window_minutes: int = 180) -> t.List[dict]:
35 | url = f"{base_url.rstrip('/')}/api/backtest/export?window_minutes={int(window_minutes)}"
36 | resp = requests.get(url, timeout=20)
37 | resp.raise_for_status()
38 | data = resp.json()
39 | return t.cast(t.List[dict], data.get("rows", []))
40 |
41 |
42 | def to_dataframe(rows: t.List[dict]) -> pd.DataFrame:
43 | if not rows:
44 | return pd.DataFrame(columns=COLUMNS)
45 | df = pd.DataFrame(rows)
46 | # Ensure typed columns and defaults
47 | def to_ts(x: t.Any) -> pd.Timestamp:
48 | try:
49 | return pd.to_datetime(x, utc=True)
50 | except Exception:
51 | return pd.Timestamp.utcnow()
52 | df["ts"] = df.get("ts", pd.Series([None]*len(df))).map(to_ts)
53 | for col, dtype, default in [
54 | ("pair", "string", ""),
55 | ("route", "string", ""),
56 | ("chain_name", "string", ""),
57 | ]:
58 | df[col] = df.get(col, default).fillna(default).astype(dtype)
59 | for col in ["spread_bps", "est_gas_usd", "est_profit_usd", "liquidity_usd", "confidence"]:
60 | df[col] = pd.to_numeric(df.get(col, 0.0), errors="coerce").fillna(0.0)
61 | df = df.sort_values("ts").reset_index(drop=True)
62 |
63 | # helpers
64 | df["gross_usd"] = df["est_profit_usd"]
65 | df["gas_usd"] = df["est_gas_usd"]
66 | df["notional_usd"] = df["liquidity_usd"]
67 | return df
68 |
69 |
70 | def save_parquet(df: pd.DataFrame, path: str) -> None:
71 | df.to_parquet(path, index=False)
72 |
73 |
74 | def save_csv(df: pd.DataFrame, path: str) -> None:
75 | df.to_csv(path, index=False)
76 |
77 |
78 | def load_parquet(path: str) -> pd.DataFrame:
79 | return pd.read_parquet(path)
80 |
81 |
82 | def load_csv(path: str) -> pd.DataFrame:
83 | return pd.read_csv(path, parse_dates=["ts"])
84 |
--------------------------------------------------------------------------------
/tests/univ3_math_validation.spec.ts:
--------------------------------------------------------------------------------
1 | import { describe, it, expect } from 'vitest';
2 | import { simulateUniV3SlipBps } from '../src/eval/univ3_math';
3 |
4 | const Q96 = BigInt(2) ** BigInt(96);
5 |
6 | describe('UniV3 Math Validation', () => {
7 | const TEST_VECTORS = [
8 | // Base case: minimal input
9 | {
10 | liquidity: 10n ** 14n,
11 | amountIn: 10n ** 15n,
12 | feeBps: 5,
13 | zeroForOne: true,
14 | description: "Minimal input - low liquidity"
15 | },
16 | {
17 | liquidity: 10n ** 18n,
18 | amountIn: 10n ** 15n,
19 | feeBps: 5,
20 | zeroForOne: true,
21 | description: "Minimal input - high liquidity"
22 | },
23 | // Typical cases
24 | {
25 | liquidity: 10n ** 16n,
26 | amountIn: 10n ** 18n,
27 | feeBps: 30,
28 | zeroForOne: true,
29 | description: "Typical input - medium liquidity"
30 | },
31 | {
32 | liquidity: 10n ** 17n,
33 | amountIn: 10n ** 19n,
34 | feeBps: 100,
35 | zeroForOne: false,
36 | description: "Large input - high fee"
37 | },
38 | // Edge cases
39 | {
40 | liquidity: 10n ** 12n,
41 | amountIn: 10n ** 20n,
42 | feeBps: 1,
43 | zeroForOne: true,
44 | description: "High input/low liquidity"
45 | },
46 | {
47 | liquidity: 10n ** 20n,
48 | amountIn: 10n ** 10n,
49 | feeBps: 0,
50 | zeroForOne: false,
51 | description: "Near-zero input",
52 | acceptZero: true
53 | }
54 | ];
55 |
56 | TEST_VECTORS.forEach(vector => {
57 | it(vector.description, () => {
58 | const { slipBps } = simulateUniV3SlipBps({
59 | sqrtPriceX96: Q96.toString(),
60 | liquidity: vector.liquidity.toString(),
61 | feeTierBps: vector.feeBps,
62 | amountIn: vector.amountIn.toString(),
63 | zeroForOne: vector.zeroForOne
64 | });
65 |
66 | // Validate slippage properties
67 | expect(slipBps).toBeGreaterThanOrEqual(0);
68 | expect(slipBps).toBeLessThanOrEqual(10000);
69 |
70 | // For non-zero fee, slippage should be at least the fee
71 | if (vector.feeBps > 0) {
72 | expect(slipBps).toBeGreaterThan(vector.feeBps);
73 | } else if (vector.description !== "Near-zero input") {
74 | // With zero fee, slippage should be positive due to impact
75 | // except for near-zero inputs where truncation may cause zero slippage
76 | expect(slipBps).toBeGreaterThan(0);
77 | }
78 |
79 | // Additional checks for directional consistency
80 | // (We expect slippage to be positive for any non-zero input, but allow zero for near-zero due to truncation)
81 | if (vector.amountIn > 0 && vector.description !== "Near-zero input") {
82 | expect(slipBps).toBeGreaterThan(0);
83 | }
84 | });
85 | });
86 | });
87 |
--------------------------------------------------------------------------------
/python-backend/app/arb/costs.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 | from dataclasses import dataclass
3 | from typing import Optional
4 | import math
5 |
6 |
7 | @dataclass
8 | class GasInputs:
9 | base_fee_gwei: float # average recent base fee
10 | priority_tip_gwei: float # tip for inclusion
11 | gas_limit: int # expected gas units
12 | native_usd: float # ETH/USD (or native)
13 | max_gas_usd_per_trade: float = 1e9
14 |
15 | def gas_usd(self) -> float:
16 | fee_gwei = float(self.base_fee_gwei) + float(self.priority_tip_gwei)
17 | usd = fee_gwei * 1e9 * int(self.gas_limit) / 1e18 * float(self.native_usd)
18 | return float(min(usd, self.max_gas_usd_per_trade))
19 |
20 |
21 | @dataclass
22 | class LatencyInputs:
23 | decision_to_submit_ms: int = 250 # time from signal to tx sign/broadcast
24 | submit_to_inclusion_blocks: int = 1 # blocks until inclusion
25 | seconds_per_block: float = 1.0 # L2 default; set ~12 for mainnet
26 | k_vol: float = 0.0 # $ adverse drift per sqrt(second) per $1 notional
27 | notional_beta: float = 1.0 # linear coefficient on notional for drift
28 |
29 | def inclusion_seconds(self) -> float:
30 | return (float(self.decision_to_submit_ms)/1000.0) + float(self.submit_to_inclusion_blocks)*float(self.seconds_per_block)
31 |
32 | def adverse_selection_usd(self, notional_usd: float) -> float:
33 | # Simple Brownian-drift style penalty: k * sqrt(Δt) * beta * notional
34 | dt = max(self.inclusion_seconds(), 1e-6)
35 | return float(self.k_vol) * math.sqrt(dt) * float(self.notional_beta) * float(notional_usd)
36 |
37 |
38 | @dataclass
39 | class Frictions:
40 | lp_fees_bps: float = 0.0
41 | router_fees_bps: float = 0.0
42 | extra_usd: float = 0.0 # MEV tip, relayer, etc.
43 |
44 |
45 | def net_after_fees(gross_usd: float, fr: Frictions) -> float:
46 | fee_mult = 1.0 - (float(fr.lp_fees_bps) + float(fr.router_fees_bps))/10000.0
47 | return float(gross_usd) * fee_mult - float(fr.extra_usd)
48 |
49 |
50 | def expected_net_usd(gross_usd: float, notional_usd: float, gas: GasInputs, lat: LatencyInputs, fr: Frictions) -> float:
51 | # (1) fees, (2) gas, (3) adverse selection
52 | after_fees = net_after_fees(gross_usd, fr)
53 | gas_cost = gas.gas_usd()
54 | adv = lat.adverse_selection_usd(notional_usd)
55 | return float(after_fees) - float(gas_cost) - float(adv)
56 |
57 |
58 | def apply_fail_probability(net_usd: float, gas_usd: float, fail_prob: float) -> float:
59 | """
60 | With probability fail_prob, we lose gas (no PnL).
61 | Expected value = (1-p)*net + p*(-gas)
62 | """
63 | p = max(0.0, min(1.0, float(fail_prob)))
64 | return float((1.0 - p) * float(net_usd) + p * (-float(gas_usd)))
65 |
--------------------------------------------------------------------------------
/src/eval/montecarlo.ts:
--------------------------------------------------------------------------------
1 | // Minimal Monte Carlo for VaR/CVaR estimation
2 | import { ArbInputs } from "./types";
3 | import { effectiveSlipBps } from "./slippage";
4 | import { decayEdge, fillProb } from "./latency";
5 |
6 | function randn(): number {
7 | // Box-Muller
8 | let u = 0, v = 0;
9 | while (u === 0) u = Math.random();
10 | while (v === 0) v = Math.random();
11 | return Math.sqrt(-2.0 * Math.log(u)) * Math.cos(2.0 * Math.PI * v);
12 | }
13 |
14 | export function simulatePayouts(inputs: ArbInputs, samples = 2000): number[] {
15 | const latSec = Math.max(0, inputs.latency.latencySec);
16 | const edgeEffBps = decayEdge(inputs.edgeBps, latSec, inputs.latency.edgeDecayBpsPerSec);
17 | const size = Math.max(0, inputs.notionalUsd);
18 | const theta = inputs.latency.theta ?? 0.15;
19 | const pS = Math.max(0, Math.min(1, fillProb(inputs.latency.baseFillProb, latSec, theta)));
20 | const pF0 = Math.max(0, Math.min(1, inputs.failures.failBeforeFillProb));
21 | const pF1 = Math.max(0, Math.min(1, inputs.failures.failBetweenLegsProb));
22 | const pFR = Math.max(0, Math.min(1, inputs.failures.reorgOrMevProb));
23 | const psum = pS + pF0 + pF1 + pFR || 1;
24 | const qS = pS/psum, qF0=pF0/psum, qF1=pF1/psum, qFR=pFR/psum;
25 |
26 | const flashVarBps = inputs.flashEnabled ? (inputs.fees.flashFeeBps + inputs.fees.referralBps)/1e4 : 0;
27 | const flashFixed = inputs.flashEnabled ? (inputs.fees.executorFeeUsd + inputs.fees.flashFixedUsd) : 0;
28 |
29 | const out: number[] = new Array(samples);
30 | for (let i=0;ia-b);
58 | const idx = Math.max(0, Math.min(sorted.length-1, Math.floor((1-alpha)*sorted.length)));
59 | const VaR = sorted[idx];
60 | const tail = sorted.slice(0, idx+1);
61 | const CVaR = tail.length ? tail.reduce((s,x)=>s+x,0)/tail.length : VaR;
62 | return { var: VaR, cvar: CVaR };
63 | }
64 |
--------------------------------------------------------------------------------
/src/config/env.ts:
--------------------------------------------------------------------------------
1 | /**
2 | * Environment configuration with validation
3 | */
4 |
5 | import { z } from 'zod';
6 | import * as dotenv from 'dotenv';
7 |
8 | // Load environment variables
9 | dotenv.config();
10 |
11 | // Environment schema
12 | const envSchema = z.object({
13 | // GoldRush API
14 | GOLDRUSH_HTTP_URL: z.string().url(),
15 | GOLDRUSH_WS_URL: z.string().url(),
16 | GOLDRUSH_API_KEY: z.string().min(1),
17 |
18 | // Blockchain
19 | RPC_URL: z.string().url(),
20 | CHAIN_ID: z.coerce.number(),
21 |
22 | // ML Models
23 | MODEL_FINBLOOM_ENDPOINT: z.string().url(),
24 | MODEL_FINBLOOM_KEY: z.string().min(1),
25 | MODEL_DEEPSEEK_ENDPOINT: z.string().url(),
26 | MODEL_DEEPSEEK_KEY: z.string().min(1),
27 |
28 | // Trading Parameters
29 | EDGE_DECAY_BPS_PER_SEC: z.coerce.number().default(3),
30 | BASE_FILL_PROB: z.coerce.number().min(0).max(1).default(0.9),
31 | FILL_THETA: z.coerce.number().default(0.15),
32 | SLIP_ALPHA: z.coerce.number().default(1.25),
33 | SLIP_K: z.coerce.number().default(0.9),
34 |
35 | // Fees
36 | FLASH_FEE_BPS: z.coerce.number().default(4),
37 | REFERRAL_BPS: z.coerce.number().default(0),
38 | FLASH_FIXED_USD: z.coerce.number().default(0),
39 | EXECUTOR_FEE_USD: z.coerce.number().default(0),
40 |
41 | // Risk Parameters
42 | RISK_AVERSION_LAMBDA: z.coerce.number().default(0.0),
43 | GAS_USD_MEAN: z.coerce.number().default(1.5),
44 | GAS_USD_STD: z.coerce.number().default(0.3),
45 | ADVERSE_USD_MEAN: z.coerce.number().default(0.0),
46 | ADVERSE_USD_STD: z.coerce.number().default(0.3),
47 | MEV_PENALTY_USD: z.coerce.number().default(0),
48 | MAX_NOTIONAL_USD: z.coerce.number().default(10000),
49 |
50 | // Operational
51 | LOG_LEVEL: z.enum(['debug', 'info', 'warn', 'error']).default('info'),
52 | NODE_ENV: z.enum(['development', 'production', 'test']).default('development'),
53 |
54 | // API Configuration
55 | TOTAL_FEES_BPS: z.coerce.number().default(30),
56 | ALLOWED_ORIGINS: z.string().optional(),
57 |
58 | // Storage
59 | DATA_DIR: z.string().optional(),
60 |
61 | // Strategy Approval Policy
62 | MIN_BACKTEST_HOURS: z.coerce.number().default(24),
63 | MIN_P_SUCCESS: z.coerce.number().min(0).max(1).default(0.75),
64 | MIN_EV_ADJ_USD: z.coerce.number().default(0),
65 | MAX_DRAWDOWN: z.coerce.number().default(Number.POSITIVE_INFINITY),
66 | });
67 |
68 | // Parse and validate environment
69 | const parseEnv = () => {
70 | try {
71 | return envSchema.parse(process.env);
72 | } catch (error) {
73 | if (error instanceof z.ZodError) {
74 | console.error('❌ Invalid environment variables:');
75 | error.errors.forEach(err => {
76 | console.error(` ${err.path.join('.')}: ${err.message}`);
77 | });
78 | process.exit(1);
79 | }
80 | throw error;
81 | }
82 | };
83 |
84 | // Export validated config
85 | export const env = parseEnv();
86 | // Backwards compatibility: some modules import { ENV }
87 | export const ENV = env;
88 |
89 | // Export type for use in other modules
90 | export type Env = z.infer;
91 |
--------------------------------------------------------------------------------
/python-backend/app/executor.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 | from dataclasses import dataclass
3 | from typing import Any, Dict
4 |
5 |
6 | @dataclass
7 | class SafetyLimits:
8 | max_amount_in: float = 1_000_000.0
9 | max_gas_limit: int = 1_000_000
10 | max_slippage_bps: float = 500.0 # 5%
11 |
12 |
13 | class TradeExecutor:
14 | """Minimal trade executor skeleton with safety checks and dry-run support."""
15 |
16 | def __init__(self, limits: SafetyLimits | None = None):
17 | self.limits = limits or SafetyLimits()
18 |
19 | def _check_safety(self, amount_in: float, gas_limit: int, slippage_bps: float) -> None:
20 | if amount_in <= 0:
21 | raise ValueError("amount_in must be positive")
22 | if amount_in > self.limits.max_amount_in:
23 | raise ValueError(f"amount_in exceeds safety limit: {amount_in} > {self.limits.max_amount_in}")
24 | if gas_limit <= 0:
25 | raise ValueError("gas_limit must be positive")
26 | if gas_limit > self.limits.max_gas_limit:
27 | raise ValueError(f"gas_limit exceeds safety limit: {gas_limit} > {self.limits.max_gas_limit}")
28 | if slippage_bps < 0 or slippage_bps > self.limits.max_slippage_bps:
29 | raise ValueError(
30 | f"slippage_bps out of range: {slippage_bps} not in [0, {self.limits.max_slippage_bps}]"
31 | )
32 |
33 | def quote(self, amount_in: float, slippage_bps: float) -> Dict[str, Any]:
34 | """Return a conservative quote (worst-case given slippage)."""
35 | worst_case_out = float(amount_in) * (1.0 - float(slippage_bps) / 10_000.0)
36 | return {
37 | "amount_in": float(amount_in),
38 | "slippage_bps": float(slippage_bps),
39 | "amount_out_min": float(max(0.0, worst_case_out)),
40 | }
41 |
42 | def simulate(self, amount_in: float, gas_price_wei: int, gas_limit: int, native_price_usd: float, slippage_bps: float) -> Dict[str, Any]:
43 | self._check_safety(amount_in, gas_limit, slippage_bps)
44 | q = self.quote(amount_in, slippage_bps)
45 | gas_cost_usd = (float(gas_price_wei) / 1e18) * float(native_price_usd) * float(gas_limit)
46 | # In a real implementation, slippage applies to out token; here we show net in "USD-equivalent" terms only as demo
47 | net_value_usd = float(amount_in) - float(gas_cost_usd)
48 | return {
49 | **q,
50 | "gas_limit": int(gas_limit),
51 | "gas_price_wei": int(gas_price_wei),
52 | "native_price_usd": float(native_price_usd),
53 | "gas_cost_usd": float(gas_cost_usd),
54 | "net_value_usd": float(net_value_usd),
55 | }
56 |
57 | def execute(self, *, dry_run: bool = True, **kwargs: Any) -> Dict[str, Any]:
58 | if dry_run:
59 | return {"status": "dry_run", "tx": None, "note": "Execution skipped (dry-run)"}
60 | # Placeholder for on-chain execution integration (signing, routing, etc.)
61 | raise NotImplementedError("Live execution not implemented. Enable dry_run for simulation.")
62 |
--------------------------------------------------------------------------------
/tests/model.spec.ts:
--------------------------------------------------------------------------------
1 | import { describe, it, expect } from 'vitest';
2 | import { evaluateArb } from '../src/eval/model';
3 | import { effectiveSlipBps } from '../src/eval/slippage';
4 | import { fillProb } from '../src/eval/latency';
5 | import { ArbInputs } from '../src/eval/types';
6 |
7 | function baseInputs(): ArbInputs {
8 | return {
9 | edgeBps: 25,
10 | notionalUsd: 10_000,
11 | fees: { totalFeesBps: 8, flashFeeBps: 0, referralBps: 0, executorFeeUsd: 0, flashFixedUsd: 0 },
12 | frictions: { gasUsdMean: 0.2, adverseUsdMean: 0.5 },
13 | latency: { latencySec: 0.5, edgeDecayBpsPerSec: 1.5, baseFillProb: 0.85, theta: 0.15 },
14 | slippage: { kind: 'empirical', k: 0.9, alpha: 1.2, liquidityRefUsd: 1_500_000 },
15 | failures: { failBeforeFillProb: 0.02, failBetweenLegsProb: 0.01, reorgOrMevProb: 0.0 },
16 | flashEnabled: false,
17 | riskAversion: 0.00005,
18 | capitalUsd: 20_000,
19 | };
20 | }
21 |
22 | describe('model.evaluateArb', () => {
23 | it('breakeven sanity: EV near zero when solving EV=0 edge at fixed size', () => {
24 | const inp = baseInputs();
25 | const size = inp.notionalUsd;
26 | const slip = effectiveSlipBps(inp.slippage, size);
27 | const feeUsd = (inp.fees.totalFeesBps / 1e4) * size;
28 | const slipUsd = (slip / 1e4) * size;
29 | const gasUsd = inp.frictions.gasUsdMean;
30 | const advUsd = inp.frictions.adverseUsdMean;
31 | const extraUsd = inp.frictions.extraUsd ?? 0;
32 | const mevUsd = inp.frictions.mevPenaltyUsd ?? 0;
33 | const theta = inp.latency.theta ?? 0.15;
34 | const pS = Math.max(0, Math.min(1, fillProb(inp.latency.baseFillProb, inp.latency.latencySec, theta)));
35 | const pF0 = Math.max(0, Math.min(1, inp.failures.failBeforeFillProb));
36 | const pF1 = Math.max(0, Math.min(1, inp.failures.failBetweenLegsProb));
37 | const pFR = Math.max(0, Math.min(1, inp.failures.reorgOrMevProb));
38 | const unwindUsd = slipUsd * 0.7;
39 | const failCostsWeighted = pF1 * unwindUsd + pF0 * gasUsd + pFR * (gasUsd + mevUsd);
40 | // Solve EV=0 for edgeBps at this fixed size
41 | const numeratorUsd = feeUsd + slipUsd + gasUsd + advUsd + extraUsd + (failCostsWeighted / Math.max(1e-9, pS));
42 | const overheadBps = (numeratorUsd / Math.max(1e-9, size)) * 1e4;
43 | inp.edgeBps = overheadBps;
44 | // Constrain size search to this notional so breakeven is evaluated at this size
45 | inp.capitalUsd = size;
46 | const res = evaluateArb(inp);
47 | expect(Math.abs(res.ev_per_sec)).toBeLessThan(0.05);
48 | });
49 |
50 | it('higher latency reduces success prob and EV', () => {
51 | const a = baseInputs();
52 | a.latency.latencySec = 0.2;
53 | const ra = evaluateArb(a);
54 |
55 | const b = baseInputs();
56 | b.latency.latencySec = 2.0;
57 | const rb = evaluateArb(b);
58 |
59 | expect(ra.p_success).toBeGreaterThan(rb.p_success);
60 | expect(ra.net_usd_est).toBeGreaterThan(rb.net_usd_est);
61 | });
62 |
63 | it('size search picks non-zero but bounded optimal size', () => {
64 | const inp = baseInputs();
65 | inp.capitalUsd = 50_000;
66 | const res = evaluateArb(inp);
67 | expect(res.size_opt_usd).toBeGreaterThan(0);
68 | expect(res.size_opt_usd).toBeLessThanOrEqual(inp.capitalUsd);
69 | });
70 | });
71 |
--------------------------------------------------------------------------------
/src/eval/slippage.ts:
--------------------------------------------------------------------------------
1 | import { SlippageModel } from "./types";
2 | import { simulateUniV3SlipBps, simulateUniV3WithTicksSlipBps } from "./univ3_math";
3 |
4 | // Empirical nonlinear slippage: slip_bps(size) = k * (size/usd_ref)^alpha
5 | export function estimateSlippageBpsEmpirical(
6 | sizeUsd: number,
7 | k: number,
8 | alpha: number,
9 | liquidityRefUsd: number
10 | ): number {
11 | const ref = Math.max(1e-9, liquidityRefUsd);
12 | const ratio = Math.max(0, sizeUsd) / ref;
13 | const slip = Math.max(0, k) * Math.pow(ratio, Math.max(1.0, alpha));
14 | return slip;
15 | }
16 |
17 | // Placeholder for UniV3 exact sim; requires live pool state (ticks/liquidity)
18 | // For now, we expose the API and return zeros. Integrate with on-chain or subgraph later.
19 | export function estimateSlippageUniV3(_: any): { slipInBps: number; slipOutBps: number; minOuts: bigint[] } {
20 | return { slipInBps: 0, slipOutBps: 0, minOuts: [] };
21 | }
22 |
23 | export function effectiveSlipBps(model: SlippageModel, sizeUsd: number): number {
24 | if (model.kind === "empirical" || model.kind === "amm_v2") {
25 | const k = model.k ?? 0;
26 | const alpha = model.alpha ?? 1.25;
27 | const L = model.liquidityRefUsd ?? 1_000_000; // sane default
28 | return estimateSlippageBpsEmpirical(sizeUsd, k, alpha, L);
29 | }
30 | if (model.kind === "univ3") {
31 | const { sqrtPriceX96, liquidity, feeTierBps, usdPerTokenIn, zeroForOne } = model as any;
32 | if (sqrtPriceX96 && liquidity && usdPerTokenIn && Number(usdPerTokenIn) > 0) {
33 | const fee = Number.isFinite(feeTierBps) ? Number(feeTierBps) : (model.k ? Math.max(0, model.k) : 30);
34 | const tokenAmountIn = (Math.max(0, sizeUsd) / Number(usdPerTokenIn));
35 | // assume 18 decimals for tokens by default; callers can pre-scale in the future
36 | const amtInRaw = BigInt(Math.floor(tokenAmountIn * 1e18));
37 | if (Array.isArray((model as any).ticks) && (model as any).ticks.length > 0) {
38 | const uni = simulateUniV3WithTicksSlipBps({
39 | sqrtPriceX96: String(sqrtPriceX96),
40 | liquidity: String(liquidity),
41 | feeTierBps: fee,
42 | amountIn: amtInRaw.toString(),
43 | zeroForOne: Boolean(zeroForOne ?? true),
44 | ticks: (model as any).ticks.map((t: any) => ({
45 | index: Number(t.index),
46 | liquidityNet: BigInt(t.liquidityNet),
47 | sqrtPriceX96: t.sqrtPriceX96 ? BigInt(t.sqrtPriceX96) : undefined,
48 | })),
49 | tickSpacing: (model as any).tickSpacing,
50 | });
51 | return Math.max(0, uni.slipBps);
52 | } else {
53 | const uni = simulateUniV3SlipBps({
54 | sqrtPriceX96: String(sqrtPriceX96),
55 | liquidity: String(liquidity),
56 | feeTierBps: fee,
57 | amountIn: amtInRaw.toString(),
58 | zeroForOne: Boolean(zeroForOne ?? true),
59 | });
60 | return Math.max(0, uni.slipBps);
61 | }
62 | }
63 | // fallback if insufficient data
64 | const k = model.k ?? 1.0; // slight conservatism
65 | const alpha = model.alpha ?? 1.25;
66 | const L = model.liquidityRefUsd ?? 1_000_000;
67 | return estimateSlippageBpsEmpirical(sizeUsd, k, alpha, L);
68 | }
69 | return 0;
70 | }
71 |
--------------------------------------------------------------------------------
/rust-engine/src/main.rs:
--------------------------------------------------------------------------------
1 | use anyhow::Result;
2 | use dotenvy::dotenv;
3 | use reqwest::Client;
4 | use serde::{Deserialize, Serialize};
5 | use serde_json::json;
6 | use std::env;
7 | use tracing::{info, Level};
8 | use tracing_subscriber::FmtSubscriber;
9 | use std::time::Duration;
10 |
11 | use redis::aio::ConnectionManager;
12 | use redis::AsyncCommands;
13 |
14 | #[derive(Debug, Serialize, Deserialize, Clone)]
15 | struct Opportunity {
16 | pair: String,
17 | spread_bps: f64,
18 | est_gas_usd: f64,
19 | est_profit_usd: f64,
20 | liquidity_usd: f64,
21 | confidence: f64,
22 | route: String,
23 | }
24 |
25 | #[tokio::main]
26 | async fn main() -> Result<()> {
27 | dotenv().ok();
28 | let subscriber = FmtSubscriber::builder()
29 | .with_max_level(Level::INFO)
30 | .with_env_filter("info")
31 | .finish();
32 | tracing::subscriber::set_global_default(subscriber).ok();
33 |
34 | info!("starting hyperliquid arbitrage engine (stub)");
35 |
36 | let subgraph = env::var("PRJX_SUBGRAPH").unwrap_or_else(|_|
37 | "https://api.goldsky.com/api/public/project_cmbbm2iwckb1b01t39xed236t/subgraphs/uniswap-v3-hyperevm-position/prod/gn".to_string()
38 | );
39 |
40 | // Optional Redis wiring
41 | let redis_url = env::var("REDIS_URL").ok();
42 | let redis_channel = env::var("REDIS_CHANNEL").unwrap_or_else(|_| "arb:realtime".to_string());
43 | let mut redis_mgr: Option = None;
44 | if let Some(url) = redis_url.clone() {
45 | match redis::Client::open(url) {
46 | Ok(client) => match client.get_tokio_connection_manager().await {
47 | Ok(conn) => {
48 | info!("connected to redis");
49 | redis_mgr = Some(conn);
50 | }
51 | Err(e) => info!(error = %e, "failed to connect redis"),
52 | },
53 | Err(e) => info!(error = %e, "invalid REDIS_URL"),
54 | }
55 | } else {
56 | info!("REDIS_URL not set; publisher disabled");
57 | }
58 |
59 | let client = Client::new();
60 |
61 | let query = r#"query Pools($first: Int!) { pools(first: $first, orderBy: volumeUSD, orderDirection: desc) { id liquidity sqrtPrice tick feeTier } }"#;
62 | let body = json!({"query": query, "variables": {"first": 1}});
63 |
64 | let resp = client.post(&subgraph).json(&body).send().await?;
65 | let status = resp.status();
66 | let text = resp.text().await.unwrap_or_default();
67 | info!(%status, %text, "sample PRJX GraphQL response");
68 |
69 | // TODO: connect to HyperSwap SDK/Router via RPC and price checks
70 | // TODO: opportunity detection + signaling to backend
71 |
72 | // Heartbeat publishing loop (if Redis connected)
73 | if let Some(mut conn) = redis_mgr {
74 | loop {
75 | let payload = json!({
76 | "ts": chrono::Utc::now().to_rfc3339(),
77 | "pnl": 12.34,
78 | "opportunities": [],
79 | "engine": {"status": "running"}
80 | })
81 | .to_string();
82 | let _: () = conn.publish::<_, _, ()>(&redis_channel, payload).await.unwrap_or(());
83 | tokio::time::sleep(Duration::from_millis(800)).await;
84 | }
85 | }
86 |
87 | Ok(())
88 | }
89 |
--------------------------------------------------------------------------------
/research/optimize_scipy.py:
--------------------------------------------------------------------------------
1 | """
2 | Parameter optimization utilities using SciPy.
3 |
4 | - Calibrates HFT cost parameters to data (optional)
5 | - Optimizes backtest parameters for objectives like Net/(1+DD), Sharpe proxy
6 |
7 | Usage:
8 | from research.data_loader import fetch_recent, to_dataframe
9 | from research.optimize_scipy import optimize_params
10 |
11 | rows = fetch_recent()
12 | df = to_dataframe(rows)
13 | best, history = optimize_params(df, init_params={
14 | 'min_spread_bps': 10,
15 | 'min_liquidity_usd': 10000,
16 | 'slippage_bps': 30,
17 | 'fees_bps': 5,
18 | 'gas_multiplier': 1.0,
19 | 'max_trade_usd': 50000,
20 | }, objective='net_over_dd')
21 | """
22 | from __future__ import annotations
23 | import typing as t
24 | import numpy as np
25 | import pandas as pd
26 | from scipy.optimize import minimize
27 |
28 | from .backtest_np import vector_backtest
29 |
30 |
31 | def _objective_wrapper(df: pd.DataFrame, objective: str):
32 | def score(params_vec: np.ndarray, keys: list[str]) -> float:
33 | p = {k: float(v) for k, v in zip(keys, params_vec)}
34 | m = vector_backtest(df, p)
35 | if objective == 'winrate':
36 | return -float(m.get('winrate', 0.0))
37 | if objective == 'sharpe_proxy':
38 | return -float(m.get('sharpe_proxy', 0.0))
39 | if objective == 'avg_profit_per_gas':
40 | return -float(m.get('avg_profit_per_gas', 0.0))
41 | if objective == 'net_over_dd':
42 | net = float(m.get('total_net_profit', 0.0))
43 | dd = abs(float(m.get('max_drawdown_usd', 0.0)))
44 | return -(net / (1.0 + dd))
45 | # default: maximize net
46 | return -float(m.get('total_net_profit', 0.0))
47 | return score
48 |
49 |
50 | def optimize_params(
51 | df: pd.DataFrame,
52 | init_params: dict,
53 | bounds: dict | None = None,
54 | objective: str = 'total_net_profit',
55 | keys: list[str] | None = None,
56 | ) -> tuple[dict, list[tuple[dict, dict]]]:
57 | """Optimize a subset of params using SciPy L-BFGS-B.
58 |
59 | Returns (best_params, history) where history contains (params, metrics).
60 | """
61 | if keys is None:
62 | keys = [
63 | 'min_spread_bps', 'min_liquidity_usd', 'slippage_bps', 'fees_bps',
64 | 'gas_multiplier', 'max_trade_usd', 'notional_cap_usd'
65 | ]
66 | p0 = np.array([float(init_params.get(k, 0.0)) for k in keys], dtype=float)
67 | bspec = []
68 | bounds = bounds or {}
69 | for k in keys:
70 | lo, hi = bounds.get(k, (None, None))
71 | if lo is None: lo = -1e6
72 | if hi is None: hi = 1e6
73 | bspec.append((float(lo), float(hi)))
74 |
75 | history: list[tuple[dict, dict]] = []
76 | obj = _objective_wrapper(df, objective)
77 |
78 | def cb(xk: np.ndarray):
79 | params = {k: float(v) for k, v in zip(keys, xk)}
80 | m = vector_backtest(df, params)
81 | history.append((params, m))
82 |
83 | res = minimize(lambda x: obj(x, keys), p0, method='L-BFGS-B', bounds=bspec, callback=cb, options={'maxiter': 50})
84 | best = {k: float(v) for k, v in zip(keys, res.x)}
85 | best_metrics = vector_backtest(df, best)
86 | history.append((best, best_metrics))
87 | return best, history
88 |
--------------------------------------------------------------------------------
/src/api/state.ts:
--------------------------------------------------------------------------------
1 | import { ArbitrageOpportunity } from "../feeds/types";
2 | import { Signal } from "../feeds/SignalGenerator";
3 |
4 | export interface SystemStats {
5 | activeSignals: number;
6 | recentOpportunities: number;
7 | updatedAt: number;
8 | }
9 |
10 | class InMemoryState {
11 | private _signals: Signal[] = [];
12 | private _opps: ArbitrageOpportunity[] = [];
13 | private _updated = Date.now();
14 |
15 | getActiveSignals(): Signal[] {
16 | return this._signals;
17 | }
18 |
19 | getRecentOpportunities(limit = 50): ArbitrageOpportunity[] {
20 | return this._opps.slice(-limit).reverse();
21 | }
22 |
23 | getStats(): SystemStats {
24 | return {
25 | activeSignals: this._signals.length,
26 | recentOpportunities: this._opps.length,
27 | updatedAt: this._updated,
28 | };
29 | }
30 |
31 | // Hooks for future wiring to feeds pipeline
32 | upsertSignal(sig: Signal) {
33 | const idx = this._signals.findIndex(s => s.id === sig.id);
34 | if (idx >= 0) this._signals[idx] = sig; else this._signals.push(sig);
35 | this._updated = Date.now();
36 | }
37 |
38 | addOpportunity(opp: ArbitrageOpportunity) {
39 | this._opps.push(opp);
40 | if (this._opps.length > 500) this._opps.shift();
41 | this._updated = Date.now();
42 | }
43 | }
44 |
45 | export const State = new InMemoryState();
46 |
47 | // Optional seed for UI/dev
48 | (function seed() {
49 | if (process.env.UI_SEED !== "1") return;
50 | const now = Date.now();
51 | const opp: ArbitrageOpportunity = {
52 | id: `seed-${now}`,
53 | timestamp: now,
54 | type: "cross_venue",
55 | path: ["ETH", "USDC"],
56 | pools: [
57 | "0x1111111111111111111111111111111111111111",
58 | "0x2222222222222222222222222222222222222222"
59 | ],
60 | routers: [],
61 | estimatedProfitUsd: 25,
62 | optimalSizeUsd: 10000,
63 | maxSizeUsd: 20000,
64 | minSizeUsd: 1000,
65 | estimatedGasUsd: 5,
66 | netProfitUsd: 20,
67 | confidence: 0.8,
68 | competitionLevel: 0.5,
69 | latencyRequirementMs: 500,
70 | prices: { "ETH/USDC": 3500 },
71 | liquidity: { buy: 100000, sell: 120000 },
72 | volumes: {},
73 | source: "combined"
74 | } as ArbitrageOpportunity;
75 |
76 | const evaluation = {
77 | net_usd_est: 18.5,
78 | ev_per_sec: 10.2,
79 | size_opt_usd: 9000,
80 | p_success: 0.92,
81 | slip_bps_eff: 2.1,
82 | breakeven_bps: 14.2,
83 | score: 10.2,
84 | gas_usd: 5,
85 | seconds: 0.2,
86 | flash_fee_bps: 9,
87 | referral_bps: 0,
88 | flash_fixed_usd: 0,
89 | executor_fee_usd: 0,
90 | flash_cost_usd: 0,
91 | components: {
92 | edge_eff_bps: 20,
93 | after_router_lp_usd: 30,
94 | slip_cost_usd: 1.2,
95 | }
96 | } as any; // keep loose for seed
97 |
98 | const signal: Signal = {
99 | id: `sig-${now}`,
100 | timestamp: now,
101 | opportunity: opp,
102 | evaluation,
103 | shouldExecute: true,
104 | executionSize: 9000,
105 | expectedValue: evaluation.net_usd_est,
106 | riskScore: 0.2,
107 | confidenceScore: opp.confidence,
108 | validUntil: now + 60_000
109 | } as Signal;
110 |
111 | State.addOpportunity(opp);
112 | State.upsertSignal(signal);
113 | })();
114 |
--------------------------------------------------------------------------------
/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: '3.8'
2 |
3 | services:
4 | # Main bot application
5 | bot:
6 | build: .
7 | container_name: hyperliquid-arb-bot
8 | restart: unless-stopped
9 | env_file:
10 | - .env
11 | - .env.execution
12 | environment:
13 | - NODE_ENV=production
14 | - BOT_MODE=${BOT_MODE:-testnet}
15 | - DASHBOARD_PORT=4000
16 | ports:
17 | - "4000:4000" # Dashboard
18 | volumes:
19 | - ./logs:/app/logs
20 | - ./data:/app/data
21 | networks:
22 | - bot-network
23 | depends_on:
24 | - redis
25 | healthcheck:
26 | test: ["CMD", "curl", "-f", "http://localhost:4000/api/health"]
27 | interval: 30s
28 | timeout: 10s
29 | retries: 3
30 | start_period: 40s
31 | deploy:
32 | resources:
33 | limits:
34 | cpus: '2'
35 | memory: 2G
36 | reservations:
37 | cpus: '1'
38 | memory: 1G
39 |
40 | # Redis for caching and state management
41 | redis:
42 | image: redis:7-alpine
43 | container_name: hyperliquid-redis
44 | restart: unless-stopped
45 | ports:
46 | - "6379:6379"
47 | volumes:
48 | - redis-data:/data
49 | networks:
50 | - bot-network
51 | command: redis-server --appendonly yes --maxmemory 256mb --maxmemory-policy allkeys-lru
52 |
53 | # Prometheus for metrics collection
54 | prometheus:
55 | image: prom/prometheus:latest
56 | container_name: hyperliquid-prometheus
57 | restart: unless-stopped
58 | ports:
59 | - "9090:9090"
60 | volumes:
61 | - ./prometheus.yml:/etc/prometheus/prometheus.yml
62 | - prometheus-data:/prometheus
63 | networks:
64 | - bot-network
65 | command:
66 | - '--config.file=/etc/prometheus/prometheus.yml'
67 | - '--storage.tsdb.path=/prometheus'
68 | - '--web.console.libraries=/usr/share/prometheus/console_libraries'
69 | - '--web.console.templates=/usr/share/prometheus/consoles'
70 | - '--storage.tsdb.retention.time=30d'
71 |
72 | # Grafana for visualization
73 | grafana:
74 | image: grafana/grafana:latest
75 | container_name: hyperliquid-grafana
76 | restart: unless-stopped
77 | ports:
78 | - "3000:3000"
79 | volumes:
80 | - grafana-data:/var/lib/grafana
81 | - ./grafana/provisioning:/etc/grafana/provisioning
82 | environment:
83 | - GF_SECURITY_ADMIN_PASSWORD=${GRAFANA_PASSWORD:-admin}
84 | - GF_INSTALL_PLUGINS=redis-datasource
85 | networks:
86 | - bot-network
87 | depends_on:
88 | - prometheus
89 |
90 | # Node Exporter for system metrics
91 | node-exporter:
92 | image: prom/node-exporter:latest
93 | container_name: hyperliquid-node-exporter
94 | restart: unless-stopped
95 | ports:
96 | - "9100:9100"
97 | networks:
98 | - bot-network
99 | command:
100 | - '--path.procfs=/host/proc'
101 | - '--path.sysfs=/host/sys'
102 | - '--path.rootfs=/rootfs'
103 | volumes:
104 | - /proc:/host/proc:ro
105 | - /sys:/host/sys:ro
106 | - /:/rootfs:ro
107 |
108 | volumes:
109 | redis-data:
110 | prometheus-data:
111 | grafana-data:
112 |
113 | networks:
114 | bot-network:
115 | driver: bridge
116 |
--------------------------------------------------------------------------------
/config/sample.env:
--------------------------------------------------------------------------------
1 | DEEPSEEK_API_KEY=
2 | DEEPSEEK_BASE_URL=https://api.deepseek.com
3 | HYPEREVM_RPC=https://api.hyperliquid-testnet.xyz/evm
4 | PRJX_SUBGRAPH=https://api.goldsky.com/api/public/project_cmbbm2iwckb1b01t39xed236t/subgraphs/uniswap-v3-hyperevm-position/prod/gn
5 | REDIS_URL=redis://127.0.0.1:6379/0
6 | REDIS_CHANNEL=arb:realtime
7 |
8 | # ================= Goldsky / GraphQL =================
9 | # Mode: 'graphql' to query Goldsky subgraphs via HTTP POST
10 | GOLDSKY_MODE=graphql
11 | # Public or private GN endpoint (fill in your values)
12 | # Example (public):
13 | # GOLDSKY_GQL_URL=https://api.goldsky.com/api/public/{project_id}/subgraphs/{subgraph_name}/{tag}/gn
14 | # Example (private):
15 | # GOLDSKY_GQL_URL=https://api.goldsky.com/api/private/{project_id}/subgraphs/{subgraph_name}/{tag}/gn
16 |
17 | # Auth header configuration
18 | # For private endpoints: Authorization: Bearer
19 | GOLDSKY_API_HEADER=Authorization
20 | GOLDSKY_API_PREFIX=Bearer
21 | GOLDSKY_API_KEY=
22 |
23 | # Optional: customize query and items path if your schema differs
24 | # Default query expects pool(id) { reserves(first:, orderBy: timestamp, orderDirection: desc) { ... } }
25 | #GOLDSKY_GQL_QUERY="""
26 | #query ReserveData($poolId: ID!, $limit: Int!) {
27 | # pool(id: $poolId) {
28 | # reserves(first: $limit, orderBy: timestamp, orderDirection: desc) {
29 | # timestamp
30 | # reserve0
31 | # reserve1
32 | # }
33 | # }
34 | #}
35 | #"""
36 | # Path to the array of items inside the GraphQL response
37 | GOLDSKY_GQL_ITEMS_PATH=data.pool.reserves
38 |
39 | # Cache TTL for pool history (seconds)
40 | GOLDSKY_CACHE_TTL_SEC=30
41 |
42 | # ================= Goldsky / REST (optional) =================
43 | # Set mode to 'rest' to use REST-style endpoints
44 | # GOLDSKY_MODE=rest
45 | # Base API URL for REST provider
46 | # GOLDSKY_API_URL=
47 | # Path template to fetch reserves for a pool (must include {pool_id})
48 | # GOLDSKY_POOL_RES_PATH=pools/{pool_id}/reserves
49 |
50 | # ================= Pool IDs (reference) =================
51 | # These are example placeholders for Hyperliquid EVM pool IDs. Replace with real IDs.
52 | # Not directly consumed by the backend; used as reference for UI/manual testing.
53 | # HYPE_USDC_POOL_ID=
54 | # HYPE_uETH_POOL_ID=
55 | # KHYPE_HYPE_POOL_ID=
56 |
57 | # ================= HyperLend / Flash Loan (placeholders) =================
58 | # DO NOT hardcode production keys in this file. Replace with real addresses per network.
59 | # HyperLend pool and deployed executor contract
60 | # HYPERLEND_POOL=
61 | # ARB_EXECUTOR_ADDRESS=
62 |
63 | # Evaluator cost params (USD/percentage)
64 | # FLASH_FEE_BPS=0 # proportional fee on notional (basis points)
65 | # REFERRAL_BPS=0 # optional referral on notional (basis points)
66 | # FLASH_FIXED_USD=0 # fixed overhead per flash loan in USD
67 | # EXECUTOR_FEE_USD=0 # off-chain/on-chain service fee in USD
68 |
69 | # ================= TS Eval Service / Risk Model =================
70 | # TypeScript evaluation server port
71 | # TS_API_PORT=8082
72 |
73 | # Latency & fill
74 | # EDGE_DECAY_BPS_PER_SEC=3
75 | # BASE_FILL_PROB=0.9
76 | # FILL_THETA=0.15
77 |
78 | # Slippage (empirical defaults)
79 | # SLIP_ALPHA=1.25
80 | # SLIP_K=0.9
81 |
82 | # Stochastic frictions
83 | # GAS_USD_STD=0.2
84 | # ADVERSE_USD_STD=0.2
85 | # MEV_PENALTY_USD=0
86 |
87 | # Risk preference (mean-variance)
88 | # RISK_AVERSION_LAMBDA=0
89 |
--------------------------------------------------------------------------------
/check-system.js:
--------------------------------------------------------------------------------
1 | /**
2 | * Simple JavaScript System Check
3 | * No TypeScript compilation needed
4 | */
5 |
6 | const fs = require('fs');
7 | const path = require('path');
8 |
9 | console.log('🔍 Hyperliquid Arbitrage Bot - System Check\n');
10 | console.log('='.repeat(50));
11 |
12 | // Check critical files exist
13 | const criticalFiles = [
14 | // Core Components
15 | { path: 'src/risk/RiskManager.ts', name: 'Risk Manager' },
16 | { path: 'src/risk/PositionMonitor.ts', name: 'Position Monitor' },
17 | { path: 'src/integration/SignalBridge.ts', name: 'Signal Bridge' },
18 | { path: 'src/integration/main.ts', name: 'Main Integration' },
19 |
20 | // Monitoring
21 | { path: 'src/monitoring/MetricsCollector.ts', name: 'Metrics Collector' },
22 | { path: 'src/monitoring/dashboard.ts', name: 'Dashboard' },
23 |
24 | // Configuration
25 | { path: '.env', name: 'Environment Config' },
26 | { path: '.env.execution', name: 'Execution Config' },
27 |
28 | // Docker
29 | { path: 'Dockerfile', name: 'Dockerfile' },
30 | { path: 'docker-compose.yml', name: 'Docker Compose' },
31 | { path: 'prometheus.yml', name: 'Prometheus Config' }
32 | ];
33 |
34 | let allFilesExist = true;
35 | console.log('📁 Checking Critical Files:\n');
36 |
37 | criticalFiles.forEach(file => {
38 | const fullPath = path.join(__dirname, file.path);
39 | const exists = fs.existsSync(fullPath);
40 | console.log(` ${exists ? '✅' : '❌'} ${file.name.padEnd(25)} - ${file.path}`);
41 | if (!exists) allFilesExist = false;
42 | });
43 |
44 | // Check package dependencies
45 | console.log('\n📦 Checking Dependencies:\n');
46 | try {
47 | const packageJson = JSON.parse(fs.readFileSync('package.json', 'utf8'));
48 | const requiredDeps = [
49 | 'express',
50 | 'ws',
51 | 'winston',
52 | 'prom-client',
53 | 'dotenv',
54 | 'ethers',
55 | 'viem'
56 | ];
57 |
58 | requiredDeps.forEach(dep => {
59 | const installed = packageJson.dependencies[dep] || packageJson.devDependencies[dep];
60 | console.log(` ${installed ? '✅' : '❌'} ${dep.padEnd(15)} ${installed ? `(${installed})` : '- NOT FOUND'}`);
61 | });
62 | } catch (error) {
63 | console.log(' ❌ Could not read package.json');
64 | }
65 |
66 | // Check monitoring dashboard
67 | console.log('\n🖥️ Checking Services:\n');
68 |
69 | const http = require('http');
70 |
71 | // Check dashboard
72 | const checkDashboard = new Promise((resolve) => {
73 | http.get('http://localhost:4000/api/health', (res) => {
74 | if (res.statusCode === 200) {
75 | console.log(' ✅ Monitoring Dashboard - RUNNING on port 4000');
76 | resolve(true);
77 | } else {
78 | console.log(' ❌ Monitoring Dashboard - NOT RESPONDING');
79 | resolve(false);
80 | }
81 | }).on('error', () => {
82 | console.log(' ⚠️ Monitoring Dashboard - NOT RUNNING (start with: npm run monitor)');
83 | resolve(false);
84 | });
85 | });
86 |
87 | // Run checks
88 | Promise.all([checkDashboard]).then(results => {
89 | console.log('\n' + '='.repeat(50));
90 | console.log('📊 SYSTEM STATUS SUMMARY\n');
91 |
92 | if (allFilesExist) {
93 | console.log('✅ All critical files present');
94 | } else {
95 | console.log('⚠️ Some files missing - check above');
96 | }
97 |
98 | console.log('\n🎯 NEXT STEPS:\n');
99 | console.log('1. Start monitoring dashboard:');
100 | console.log(' npm run monitor\n');
101 | console.log('2. Run in dry-run mode:');
102 | console.log(' BOT_MODE=dry-run npm run start:integration\n');
103 | console.log('3. Deploy with Docker:');
104 | console.log(' docker-compose up --build\n');
105 |
106 | console.log('📚 Full documentation: PRODUCTION_README.md');
107 | console.log('='.repeat(50));
108 | });
109 |
--------------------------------------------------------------------------------
/PRODUCTION_CHECKLIST.md:
--------------------------------------------------------------------------------
1 | # 🚀 Production Readiness Checklist
2 |
3 | ## ✅ Completed Components
4 |
5 | ### Core Infrastructure
6 | - [x] Risk Management System
7 | - [x] Position Monitoring
8 | - [x] Metrics Collection (Prometheus)
9 | - [x] Monitoring Dashboard
10 | - [x] Docker Containerization
11 | - [x] Deployment Scripts
12 |
13 | ### Safety Features
14 | - [x] Dry-run mode
15 | - [x] Circuit breakers
16 | - [x] Rate limiting
17 | - [x] Error recovery
18 | - [x] Graceful shutdown
19 |
20 | ### Monitoring & Alerts
21 | - [x] Prometheus metrics
22 | - [x] Health endpoints
23 | - [x] WebSocket status
24 | - [x] PnL tracking
25 | - [x] Risk score monitoring
26 |
27 | ---
28 |
29 | ## ⚠️ Known Issues to Fix
30 |
31 | ### Type Safety (Non-Critical)
32 | - [ ] Feed Signal vs Execution Signal type alignment
33 | - [ ] Integration test type updates
34 | - [ ] SignalExecutor property references
35 |
36 | ### Testing
37 | - [ ] End-to-end integration tests
38 | - [ ] Load testing
39 | - [ ] Failover testing
40 |
41 | ---
42 |
43 | ## 📋 Pre-Deployment Checklist
44 |
45 | ### 1. Environment Setup
46 | ```bash
47 | # Check all environment files exist
48 | [ ] .env (main config)
49 | [ ] .env.execution (execution params)
50 | [ ] .env.testnet (testnet config)
51 | ```
52 |
53 | ### 2. API Keys & Credentials
54 | ```bash
55 | # Verify in .env
56 | [ ] HYPERLIQUID_PRIVATE_KEY set
57 | [ ] HYPERLIQUID_TESTNET=true for testing
58 | [ ] REDIS_URL configured
59 | ```
60 |
61 | ### 3. Risk Parameters
62 | ```bash
63 | # Verify in .env.execution
64 | [ ] MAX_POSITION_SIZE appropriate
65 | [ ] STOP_LOSS_PERCENT conservative
66 | [ ] MIN_PROFIT_THRESHOLD realistic
67 | ```
68 |
69 | ### 4. Test Sequence
70 | ```bash
71 | # 1. Start monitoring
72 | npm run monitor
73 |
74 | # 2. Run dry-run mode
75 | BOT_MODE=dry-run npm run start:integration
76 |
77 | # 3. Test with small amounts on testnet
78 | HYPERLIQUID_TESTNET=true npm run start:integration
79 |
80 | # 4. Production deployment
81 | docker-compose up --build
82 | ```
83 |
84 | ---
85 |
86 | ## 🔧 Quick Commands
87 |
88 | ### Start Monitoring Dashboard
89 | ```bash
90 | npm run monitor
91 | # Visit http://localhost:4000
92 | ```
93 |
94 | ### Run in Dry-Run Mode
95 | ```bash
96 | BOT_MODE=dry-run npm run start:integration
97 | ```
98 |
99 | ### Docker Deployment
100 | ```bash
101 | # Build and run
102 | docker-compose up --build
103 |
104 | # Check logs
105 | docker-compose logs -f bot
106 |
107 | # Stop
108 | docker-compose down
109 | ```
110 |
111 | ### Python Backend (Alternative)
112 | ```bash
113 | cd python-backend
114 | pip install -r requirements.txt
115 | python -m uvicorn app.main:app --reload
116 | ```
117 |
118 | ---
119 |
120 | ## 📊 Monitoring URLs
121 |
122 | - **Dashboard**: http://localhost:4000
123 | - **Prometheus**: http://localhost:9090
124 | - **Grafana**: http://localhost:3001
125 | - **API Health**: http://localhost:4000/api/health
126 |
127 | ---
128 |
129 | ## 🚨 Emergency Procedures
130 |
131 | ### Stop All Trading
132 | ```bash
133 | docker-compose stop bot
134 | # or
135 | kill -SIGTERM
136 | ```
137 |
138 | ### Reset State
139 | ```bash
140 | redis-cli FLUSHDB
141 | rm -rf logs/*.log
142 | ```
143 |
144 | ### Rollback
145 | ```bash
146 | git checkout
147 | docker-compose up --build
148 | ```
149 |
150 | ---
151 |
152 | ## 📈 Success Metrics
153 |
154 | Monitor these KPIs after deployment:
155 |
156 | 1. **Uptime**: > 99%
157 | 2. **Win Rate**: > 60%
158 | 3. **Daily PnL**: Positive
159 | 4. **Risk Score**: < 0.7
160 | 5. **Error Rate**: < 1%
161 |
162 | ---
163 |
164 | ## 🎯 Final Steps
165 |
166 | 1. **Review** this checklist completely
167 | 2. **Test** in dry-run mode first
168 | 3. **Monitor** closely for first 24 hours
169 | 4. **Scale** gradually based on performance
170 |
171 | ---
172 |
173 | **Last Updated**: 2025-01-11
174 | **Status**: READY FOR DRY-RUN TESTING
175 |
--------------------------------------------------------------------------------
/src/eval/types.ts:
--------------------------------------------------------------------------------
1 | // Strong types and explicit units for evaluation
2 |
3 | export type FeesConfig = {
4 | totalFeesBps: number; // router + LP total, bps
5 | flashFeeBps: number; // ENV: FLASH_FEE_BPS
6 | referralBps: number; // ENV: REFERRAL_BPS
7 | executorFeeUsd: number; // ENV: EXECUTOR_FEE_USD
8 | flashFixedUsd: number; // ENV: FLASH_FIXED_USD
9 | };
10 |
11 | export type MarketFrictions = {
12 | gasUsdMean: number;
13 | gasUsdStd?: number; // stddev for stochastic gas (USD)
14 | adverseUsdMean: number; // expected adverse selection cost (USD)
15 | adverseUsdStd?: number; // stddev (USD)
16 | extraUsd?: number; // any other fixed overheads per attempt (USD)
17 | mevPenaltyUsd?: number; // penalize reorg/MEV failures (USD)
18 | };
19 |
20 | export type LatencyExec = {
21 | latencySec: number; // time from signal -> submit -> mined
22 | edgeDecayBpsPerSec: number; // how fast edge decays with latency
23 | baseFillProb: number; // baseline prob of getting both legs (no latency)
24 | partialFillShape?: "linear" | "concave" | "convex";
25 | theta?: number; // decay parameter for fill prob; default via env
26 | };
27 |
28 | export type SlippageModel = {
29 | kind: "amm_v2" | "univ3" | "empirical";
30 | k?: number; // invariant proxy or empirical slope
31 | alpha?: number; // curvature exponent (>1 convex)
32 | liquidityRefUsd?: number; // reference depth for scaling
33 | // UniV3 optional parameters (if present, attempt exact-ish sim)
34 | sqrtPriceX96?: string; // current sqrtPrice in Q96 (as decimal string)
35 | liquidity?: string; // current in-range liquidity (as decimal string)
36 | feeTierBps?: number; // 5, 30, 100
37 | tickSpacing?: number; // pool tick spacing
38 | // Optional minimal tick map for offline sim (coarse)
39 | ticks?: Array<{ index: number; liquidityNet: string; sqrtPriceX96?: string }>; // subset of initialized ticks
40 | // Optional USD conversion hints for mapping sizeUsd to token amounts
41 | usdPerTokenIn?: number; // price of input token in USD
42 | zeroForOne?: boolean; // swap direction: token0 -> token1 when true
43 | };
44 |
45 | export type FailureTree = {
46 | failBeforeFillProb: number; // tx fails/replaced, bundle not landed
47 | failBetweenLegsProb: number; // first swap fills, second not
48 | reorgOrMevProb: number; // reorgs/sandwich, etc.
49 | };
50 |
51 | export type ArbInputs = {
52 | edgeBps: number; // instantaneous bps edge at signal time
53 | notionalUsd: number; // proposed size (USD)
54 | fees: FeesConfig;
55 | frictions: MarketFrictions;
56 | latency: LatencyExec;
57 | slippage: SlippageModel;
58 | failures: FailureTree;
59 | flashEnabled: boolean;
60 | riskAversion?: number; // lambda for mean-variance penalty
61 | capitalUsd?: number; // optional capital for utilization metrics
62 | secondsPerBlock?: number; // for convenience if needed by callers
63 | };
64 |
65 | export type ArbResult = {
66 | net_usd_est: number; // EV in USD
67 | ev_per_sec: number; // EV divided by expected seconds
68 | size_opt_usd: number; // argmax of EV_adj subject to constraints
69 | p_success: number; // prob of both legs success
70 | slip_bps_eff: number; // effective slippage bps used
71 | breakeven_bps: number; // all-in break-even bps at size_opt
72 | var95?: number; // optional VaR at 95%
73 | cvar95?: number; // optional CVaR at 95%
74 | score: number; // rank metric (e.g., EV_adj/sec)
75 | // legacy/breakdown
76 | gas_usd: number;
77 | seconds: number;
78 | flash_fee_bps: number;
79 | referral_bps: number;
80 | flash_fixed_usd: number;
81 | executor_fee_usd: number;
82 | flash_cost_usd: number;
83 | components?: Record; // optional diagnostics
84 | };
85 |
--------------------------------------------------------------------------------
/src/api/routes/strategy.ts:
--------------------------------------------------------------------------------
1 | import { Router, Request, Response } from 'express';
2 | import { z } from 'zod';
3 | import { zBacktestRun, zCreateStrategy, zUpdateStrategy } from '../../types/strategy';
4 | import { strategyStore } from '../../storage/strategyStore';
5 | import { checkApproval } from '../../policy/strategyGate';
6 |
7 | const router = Router();
8 |
9 | // Create strategy
10 | router.post('/', async (req: Request, res: Response) => {
11 | try {
12 | const input = zCreateStrategy.parse(req.body);
13 | const created = await strategyStore.createStrategy(input);
14 | res.status(201).json(created);
15 | } catch (e: any) {
16 | if (e instanceof z.ZodError) return res.status(400).json({ error: 'invalid_input', details: e.errors, timestamp: Date.now() });
17 | console.error(e);
18 | res.status(500).json({ error: 'internal_error', timestamp: Date.now() });
19 | }
20 | });
21 |
22 | // List strategies
23 | router.get('/', async (_req: Request, res: Response) => {
24 | const items = await strategyStore.listStrategies();
25 | res.json(items);
26 | });
27 |
28 | // Get strategy
29 | router.get('/:id', async (req: Request, res: Response) => {
30 | const item = await strategyStore.getStrategy(req.params.id);
31 | if (!item) return res.status(404).json({ error: 'not_found', timestamp: Date.now() });
32 | res.json(item);
33 | });
34 |
35 | // Update strategy (limited when approved)
36 | router.patch('/:id', async (req: Request, res: Response) => {
37 | try {
38 | const patch = zUpdateStrategy.parse(req.body);
39 | const updated = await strategyStore.updateStrategy(req.params.id, patch);
40 | if (!updated) return res.status(404).json({ error: 'not_found', timestamp: Date.now() });
41 | res.json(updated);
42 | } catch (e: any) {
43 | if (e instanceof z.ZodError) return res.status(400).json({ error: 'invalid_input', details: e.errors, timestamp: Date.now() });
44 | console.error(e);
45 | res.status(500).json({ error: 'internal_error', timestamp: Date.now() });
46 | }
47 | });
48 |
49 | // Archive strategy
50 | router.delete('/:id', async (req: Request, res: Response) => {
51 | const updated = await strategyStore.archiveStrategy(req.params.id);
52 | if (!updated) return res.status(404).json({ error: 'not_found', timestamp: Date.now() });
53 | res.json(updated);
54 | });
55 |
56 | // Register backtest
57 | router.post('/:id/backtest', async (req: Request, res: Response) => {
58 | try {
59 | const input = zBacktestRun.parse(req.body);
60 | const s = await strategyStore.getStrategy(req.params.id);
61 | if (!s) return res.status(404).json({ error: 'not_found', timestamp: Date.now() });
62 | const run = await strategyStore.addBacktest(req.params.id, input);
63 | res.status(201).json(run);
64 | } catch (e: any) {
65 | if (e instanceof z.ZodError) return res.status(400).json({ error: 'invalid_input', details: e.errors, timestamp: Date.now() });
66 | console.error(e);
67 | res.status(500).json({ error: 'internal_error', timestamp: Date.now() });
68 | }
69 | });
70 |
71 | // Approve strategy (or reject based on policy)
72 | router.post('/:id/approve', async (req: Request, res: Response) => {
73 | try {
74 | const s = await strategyStore.getStrategy(req.params.id);
75 | if (!s) return res.status(404).json({ error: 'not_found', timestamp: Date.now() });
76 | const runs = await strategyStore.listBacktests(s.id);
77 | if (!runs.length) return res.status(400).json({ error: 'no_backtests', timestamp: Date.now() });
78 | const latest = runs.sort((a, b) => b.createdAt - a.createdAt)[0];
79 | const decision = checkApproval(latest.coverageHours, latest.stats);
80 | const updated = await strategyStore.approveStrategy(s.id, {
81 | at: Date.now(),
82 | status: decision.status,
83 | coverageHours: decision.coverageHours,
84 | metrics: latest.stats,
85 | reason: decision.reason,
86 | });
87 | res.json({
88 | status: decision.status,
89 | coverageHours: decision.coverageHours,
90 | thresholds: decision.thresholds,
91 | reason: decision.reason,
92 | strategy: updated,
93 | });
94 | } catch (e: any) {
95 | console.error(e);
96 | res.status(500).json({ error: 'internal_error', timestamp: Date.now() });
97 | }
98 | });
99 |
100 | export default router;
101 |
--------------------------------------------------------------------------------
/src/execution/types.ts:
--------------------------------------------------------------------------------
1 | /**
2 | * Execution System Types
3 | * Bridges the gap between feed types and execution requirements
4 | */
5 |
6 | import { ArbitrageOpportunity as FeedOpportunity } from '../feeds/types';
7 |
8 | /**
9 | * Extended ArbitrageOpportunity for execution
10 | */
11 | export interface ExecutableOpportunity extends FeedOpportunity {
12 | // Additional fields for execution
13 | pair: string; // Trading pair (e.g., "ETH-USDC")
14 | expectedPrice: number; // Expected execution price
15 | exchanges: {
16 | buy: string;
17 | sell: string;
18 | };
19 | priceDiff: number;
20 | expectedProfit: number;
21 | requiredCapital: number;
22 | estimatedGas: number;
23 | priceImpact: number;
24 | volume24h: number;
25 | venueLiquidity: {
26 | buy: number;
27 | sell: number;
28 | };
29 | }
30 |
31 | /**
32 | * Signal for execution
33 | */
34 | export interface Signal {
35 | id: string;
36 | opportunity: ExecutableOpportunity;
37 | timestamp: number;
38 | expectedValue: number;
39 | confidenceScore: number;
40 | riskScore: number;
41 | executionSize: number;
42 | priority: number;
43 | shouldExecute: boolean;
44 | validUntil: number;
45 | metadata: {
46 | source: string;
47 | model: string;
48 | gasEstimate: number;
49 | [key: string]: any;
50 | };
51 | }
52 |
53 | /**
54 | * Convert feed opportunity to executable opportunity
55 | */
56 | export function toExecutableOpportunity(
57 | feedOpp: FeedOpportunity,
58 | additionalData?: Partial
59 | ): ExecutableOpportunity {
60 | // Extract pair from path (first and last tokens)
61 | const pair = feedOpp.path.length >= 2
62 | ? `${feedOpp.path[0]}-${feedOpp.path[feedOpp.path.length - 1]}`
63 | : 'UNKNOWN-PAIR';
64 |
65 | // Calculate expected price from prices object
66 | const prices = Object.values(feedOpp.prices);
67 | const expectedPrice = prices.length > 0 ? prices[0] : 0;
68 |
69 | // Determine exchanges based on type
70 | const exchanges = {
71 | buy: feedOpp.type === 'cross_venue' ? 'hyperliquid' : 'dex',
72 | sell: feedOpp.type === 'cross_venue' ? 'uniswap' : 'dex'
73 | };
74 |
75 | return {
76 | ...feedOpp,
77 | pair,
78 | expectedPrice,
79 | exchanges,
80 | priceDiff: feedOpp.netProfitUsd,
81 | expectedProfit: feedOpp.netProfitUsd,
82 | requiredCapital: feedOpp.optimalSizeUsd,
83 | estimatedGas: feedOpp.estimatedGasUsd,
84 | priceImpact: 0.001, // Default 0.1%
85 | volume24h: Object.values(feedOpp.volumes).reduce((a, b) => a + b, 0),
86 | venueLiquidity: {
87 | buy: Object.values(feedOpp.liquidity)[0] || 0,
88 | sell: Object.values(feedOpp.liquidity)[1] || 0
89 | },
90 | ...additionalData
91 | };
92 | }
93 |
94 | /**
95 | * Order types
96 | */
97 | export interface OrderRequest {
98 | coin: string;
99 | is_buy: boolean;
100 | sz: number;
101 | limit_px: number;
102 | order_type: 'limit' | 'market';
103 | reduce_only?: boolean;
104 | post_only?: boolean;
105 | ioc?: boolean;
106 | cloid?: string;
107 | }
108 |
109 | export interface OrderResponse {
110 | status: 'ok' | 'error';
111 | response?: {
112 | type: 'order';
113 | data: {
114 | statuses: Array<{
115 | resting?: { oid: number };
116 | filled?: { totalSz: string; avgPx: string };
117 | error?: string;
118 | }>;
119 | };
120 | };
121 | error?: string;
122 | }
123 |
124 | export interface ExecutionResult {
125 | signalId: string;
126 | orderId?: string;
127 | status: 'success' | 'failed' | 'partial' | 'rejected';
128 | executedSize: number;
129 | executedPrice: number;
130 | slippage: number;
131 | fees: number;
132 | timestamp: number;
133 | error?: string;
134 | }
135 |
136 | export interface Position {
137 | coin: string;
138 | szi: number; // signed size
139 | entryPx: number;
140 | positionValue: number;
141 | unrealizedPnl: number;
142 | returnOnEquity: number;
143 | funding: number;
144 | }
145 |
146 | export interface AccountState {
147 | marginSummary: {
148 | accountValue: number;
149 | totalMarginUsed: number;
150 | totalNtlPos: number;
151 | totalRawUsd: number;
152 | withdrawable: number;
153 | };
154 | crossMarginSummary: {
155 | accountValue: number;
156 | totalMarginUsed: number;
157 | };
158 | assetPositions: Position[];
159 | }
160 |
--------------------------------------------------------------------------------
/scripts/setup_and_run.ps1:
--------------------------------------------------------------------------------
1 | Param(
2 | [switch]$NoInstall
3 | )
4 |
5 | $ErrorActionPreference = 'Stop'
6 |
7 | # Resolve paths
8 | $ScriptDir = Split-Path -Parent $MyInvocation.MyCommand.Path
9 | $ProjectRoot = Split-Path -Parent $ScriptDir
10 | $BackendPath = Join-Path $ProjectRoot 'python-backend'
11 | $WebPath = Join-Path $ProjectRoot 'web'
12 | $VenvPath = Join-Path $ProjectRoot '.venv'
13 | $PythonExe = Join-Path $VenvPath 'Scripts\python.exe'
14 |
15 | Write-Host "Project root: $ProjectRoot"
16 |
17 | # 1) Ensure venv
18 | if (!(Test-Path $VenvPath)) {
19 | Write-Host 'Creating virtual environment (.venv)...'
20 | python -m venv $VenvPath
21 | }
22 | if (!(Test-Path $PythonExe)) {
23 | throw "Python executable not found in venv: $PythonExe"
24 | }
25 |
26 | # 2) Install dependencies (unless skipped)
27 | if (-not $NoInstall) {
28 | Write-Host 'Upgrading pip...'
29 | & $PythonExe -m pip install --upgrade pip
30 | Write-Host 'Installing backend requirements...'
31 | & $PythonExe -m pip install -r (Join-Path $BackendPath 'requirements.txt')
32 | }
33 |
34 | # 3) Set environment variables (session-only)
35 | $env:ALCHEMY_RPC_URL = 'https://hyperliquid-mainnet.g.alchemy.com/v2/alcht_ejTTQ7WxJAnNw8yYbNSjRvlJyQ9gul'
36 |
37 | # --- Goldsky GraphQL configuration ---
38 | $env:GOLDSKY_MODE = 'graphql'
39 | # Fill with your actual GN endpoint (public or private)
40 | # Public example:
41 | # $env:GOLDSKY_GQL_URL = 'https://api.goldsky.com/api/public/{project_id}/subgraphs/{subgraph_name}/{tag}/gn'
42 | # Private example:
43 | # Default to the public subgraph URL used in config/sample.env for quick start.
44 | # Replace with your private GN URL if needed.
45 | $env:GOLDSKY_GQL_URL = 'https://api.goldsky.com/api/public/project_cmbbm2iwckb1b01t39xed236t/subgraphs/uniswap-v3-hyperevm-position/prod/gn'
46 |
47 | # Auth header scheme
48 | # Private endpoints typically use Authorization: Bearer
49 | # For public endpoints, no header is required; key stays empty.
50 | # For private endpoints, set Authorization: Bearer .
51 | $env:GOLDSKY_API_HEADER = 'Authorization'
52 | $env:GOLDSKY_API_PREFIX = 'Bearer '
53 | $env:GOLDSKY_API_KEY = '' # public subgraph: leave empty; private: set token here or before running
54 |
55 | # Optional overrides for schema differences
56 | # Default to a Uniswap V3-style poolHourData query, which many subgraphs expose.
57 | $env:GOLDSKY_GQL_QUERY = @'
58 | query PoolHour($poolId: ID!, $limit: Int!) {
59 | pool(id: $poolId) {
60 | poolHourData(first: $limit, orderBy: periodStartUnix, orderDirection: desc) {
61 | periodStartUnix
62 | token0Price
63 | token1Price
64 | liquidity
65 | sqrtPrice
66 | tvlUSD
67 | }
68 | }
69 | }
70 | '@
71 | $env:GOLDSKY_GQL_ITEMS_PATH = 'data.pool.poolHourData'
72 |
73 | # Cache TTL for pool history endpoint (seconds)
74 | $env:GOLDSKY_CACHE_TTL_SEC = '30'
75 |
76 | Write-Host 'Environment variables set for this session:'
77 | Write-Host " ALCHEMY_RPC_URL=$($env:ALCHEMY_RPC_URL)"
78 | Write-Host " GOLDSKY_MODE=$($env:GOLDSKY_MODE)"
79 | Write-Host " GOLDSKY_GQL_URL=$($env:GOLDSKY_GQL_URL)"
80 | Write-Host " GOLDSKY_API_KEY=(hidden)"
81 | Write-Host " GOLDSKY_API_HEADER=$($env:GOLDSKY_API_HEADER)"
82 | Write-Host " GOLDSKY_API_PREFIX=$($env:GOLDSKY_API_PREFIX)"
83 | Write-Host " GOLDSKY_GQL_ITEMS_PATH=$($env:GOLDSKY_GQL_ITEMS_PATH)"
84 | Write-Host " GOLDSKY_CACHE_TTL_SEC=$($env:GOLDSKY_CACHE_TTL_SEC)"
85 |
86 | # 4) Start backend (Uvicorn) in a new window
87 | $UvicornArgs = @('-m','uvicorn','app.main:app','--host','0.0.0.0','--port','9011','--reload')
88 | Write-Host 'Starting backend on http://127.0.0.1:9011 ...'
89 | Start-Process -FilePath $PythonExe -ArgumentList $UvicornArgs -WorkingDirectory $BackendPath -WindowStyle Normal
90 |
91 | # 5) Start frontend static server in a new window
92 | $HttpArgs = @('-m','http.server','9010')
93 | Write-Host 'Starting frontend server on http://127.0.0.1:9010 ...'
94 | Start-Process -FilePath $PythonExe -ArgumentList $HttpArgs -WorkingDirectory $WebPath -WindowStyle Normal
95 |
96 | Write-Host ''
97 | Write-Host 'Done. Open the dashboard:'
98 | Write-Host ' Frontend: http://127.0.0.1:9010'
99 | Write-Host ' Backend : http://127.0.0.1:9011'
100 | Write-Host ''
101 | Write-Host 'Notes:'
102 | Write-Host ' - To skip reinstall on subsequent runs, use: -NoInstall'
103 | Write-Host ' - Frontend: ensure Backend Base is http://127.0.0.1:9011 (clear localStorage backend_base if needed).'
104 | Write-Host ' - Goldsky: using GraphQL mode; set GOLDSKY_GQL_URL and GOLDSKY_API_KEY as needed.'
105 |
--------------------------------------------------------------------------------
/python-backend/app/analytics/evaluator.py:
--------------------------------------------------------------------------------
1 | from typing import List, Dict, Any
2 | import pandas as pd
3 | from .arb_formula import GasModel, LatencyModel, expected_net_usd, score_hft
4 | from .size_opt import solve_best_dx
5 |
6 | def evaluate_batch(opps: List[Dict[str,Any]], params: Dict[str,Any]) -> pd.DataFrame:
7 | rows = []
8 | for opp in opps:
9 | pair = opp.get("pair","?")
10 | route = opp.get("route","?")
11 | Rin = float(opp.get("rin") or 0.0)
12 | Rout = float(opp.get("rout") or 0.0)
13 | fee_bps = float(opp.get("fee_bps", 0.0))
14 | px_out_usd = float(opp.get("px_out_usd", opp.get("native_usd",1.0)))
15 | edge_bps = float(opp.get("edge_bps", 0.0))
16 | gas = GasModel(
17 | base_fee_gwei=float(params.get("base_fee_gwei",0.5)),
18 | tip_gwei=float(params.get("priority_tip_gwei",0.5)),
19 | native_usd=float(opp.get("native_usd",1.0)),
20 | gas_limit=int(opp.get("gas_limit", opp.get("est_gas_limit",200000))),
21 | max_gas_usd_per_trade=float(params.get("max_gas_usd_per_trade",100.0))
22 | )
23 | lat = LatencyModel(
24 | decision_to_submit_ms=float(params.get("decision_to_submit_ms",200)),
25 | submit_to_inclusion_blocks=int(params.get("submit_to_inclusion_blocks",1)),
26 | seconds_per_block=float(params.get("seconds_per_block",1.0)),
27 | k_vol=float(params.get("k_vol",0.0)),
28 | notional_beta=float(params.get("notional_beta",1.0))
29 | )
30 | slip_cap_bps = float(params.get("slip_cap_bps",50.0))
31 | notional_cap = float(params.get("max_trade_usd",5e4))
32 | total_fees_bps = float(params.get("total_fees_bps", opp.get("lp_fees_bps",0.0) + opp.get("router_fees_bps",0.0)))
33 | fail_prob = float(params.get("fail_prob",0.0))
34 |
35 | # --- HyperLend flash-loan costs (all in USD space) ---
36 | # Modeled as extra_usd costs added to expected_net_usd
37 | flash_fee_bps = float(params.get("flash_fee_bps", 0.0)) # fee proportional to notional
38 | referral_bps = float(params.get("referral_bps", 0.0)) # optional referral on notional
39 | flash_fixed_usd = float(params.get("flash_fixed_usd", 0.0)) # fixed overhead per flash
40 | executor_fee_usd = float(params.get("executor_fee_usd", 0.0)) # onchain executor service fee
41 |
42 | def flash_cost_usd(notional_usd: float) -> float:
43 | var_fee = (flash_fee_bps + referral_bps) / 10000.0 * notional_usd
44 | return float(var_fee + flash_fixed_usd + executor_fee_usd)
45 |
46 | def net_fn(notional_usd: float)->float:
47 | adv = lat.adverse_usd(notional_usd)
48 | extra = float(params.get("extra_usd",0.0)) + flash_cost_usd(notional_usd)
49 | return expected_net_usd(edge_bps, notional_usd, total_fees_bps, gas.usd(), adv, extra_usd=extra, fail_prob=fail_prob)
50 | if Rin>0 and Rout>0:
51 | best_dx, best_net, best_slip = solve_best_dx(Rin,Rout,fee_bps,px_out_usd,notional_cap,slip_cap_bps,net_fn)
52 | else:
53 | best_dx, best_net, best_slip = 0.0, net_fn(notional_cap), 0.0
54 | seconds = max(lat.inclusion_seconds(), 1e-3)
55 | s = score_hft(best_net, gas.usd(), seconds,
56 | float(params.get("w_net",1.0)),
57 | float(params.get("w_ppg",0.6)),
58 | float(params.get("w_pps",0.6)))
59 | # Expose key components for observability
60 | # For the selected best_dx, approximate flash costs using notional ~ best_dx * px_out_usd (bounded by notional_cap)
61 | approx_notional_usd = min(notional_cap, max(0.0, float(best_dx) * float(px_out_usd))) if Rin>0 and Rout>0 else notional_cap
62 | rows.append({
63 | "pair":pair, "route":route,
64 | "net_usd":round(best_net,6),
65 | "gas_usd":round(gas.usd(),6),
66 | "seconds":round(seconds,4),
67 | "slip_bps":round(best_slip,4),
68 | "score":round(s,6),
69 | "flash_fee_bps":flash_fee_bps,
70 | "referral_bps":referral_bps,
71 | "flash_fixed_usd":flash_fixed_usd,
72 | "executor_fee_usd":executor_fee_usd,
73 | "flash_cost_usd":round(flash_cost_usd(approx_notional_usd),6)
74 | })
75 | df = pd.DataFrame(rows).sort_values("score", ascending=False).reset_index(drop=True)
76 | return df
77 |
78 | def greedy_knapsack_by_score(df: pd.DataFrame, gas_budget_usd: float) -> pd.DataFrame:
79 | sel = []
80 | total_gas = 0.0
81 | for _,r in df.iterrows():
82 | if total_gas + r["gas_usd"] <= gas_budget_usd:
83 | sel.append(r)
84 | total_gas += r["gas_usd"]
85 | return pd.DataFrame(sel)
86 |
--------------------------------------------------------------------------------
/scripts/deploy.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Hyperliquid Arbitrage Bot Deployment Script
4 | # Handles deployment to production/testnet environments
5 |
6 | set -e
7 |
8 | # Colors for output
9 | RED='\033[0;31m'
10 | GREEN='\033[0;32m'
11 | YELLOW='\033[1;33m'
12 | NC='\033[0m' # No Color
13 |
14 | # Default values
15 | ENVIRONMENT="testnet"
16 | ACTION="deploy"
17 | FORCE_BUILD=false
18 |
19 | # Parse arguments
20 | while [[ $# -gt 0 ]]; do
21 | case $1 in
22 | --env)
23 | ENVIRONMENT="$2"
24 | shift 2
25 | ;;
26 | --action)
27 | ACTION="$2"
28 | shift 2
29 | ;;
30 | --force-build)
31 | FORCE_BUILD=true
32 | shift
33 | ;;
34 | --help)
35 | echo "Usage: ./deploy.sh [OPTIONS]"
36 | echo ""
37 | echo "Options:"
38 | echo " --env [production|testnet|dry-run] Deployment environment (default: testnet)"
39 | echo " --action [deploy|stop|restart|status] Action to perform (default: deploy)"
40 | echo " --force-build Force Docker image rebuild"
41 | echo " --help Show this help message"
42 | exit 0
43 | ;;
44 | *)
45 | echo -e "${RED}Unknown option: $1${NC}"
46 | exit 1
47 | ;;
48 | esac
49 | done
50 |
51 | echo -e "${GREEN}========================================${NC}"
52 | echo -e "${GREEN} Hyperliquid Arbitrage Bot Deployer${NC}"
53 | echo -e "${GREEN}========================================${NC}"
54 | echo ""
55 | echo -e "${YELLOW}Environment:${NC} $ENVIRONMENT"
56 | echo -e "${YELLOW}Action:${NC} $ACTION"
57 | echo ""
58 |
59 | # Validate environment
60 | if [[ ! "$ENVIRONMENT" =~ ^(production|testnet|dry-run)$ ]]; then
61 | echo -e "${RED}Invalid environment: $ENVIRONMENT${NC}"
62 | exit 1
63 | fi
64 |
65 | # Check for required files
66 | check_requirements() {
67 | local missing=false
68 |
69 | if [ ! -f ".env" ]; then
70 | echo -e "${RED}Missing .env file${NC}"
71 | missing=true
72 | fi
73 |
74 | if [ ! -f ".env.execution" ]; then
75 | echo -e "${RED}Missing .env.execution file${NC}"
76 | missing=true
77 | fi
78 |
79 | if [ ! -f "docker-compose.yml" ]; then
80 | echo -e "${RED}Missing docker-compose.yml file${NC}"
81 | missing=true
82 | fi
83 |
84 | if [ "$missing" = true ]; then
85 | echo -e "${RED}Please ensure all required files exist${NC}"
86 | exit 1
87 | fi
88 | }
89 |
90 | # Deploy function
91 | deploy() {
92 | echo -e "${YELLOW}Starting deployment...${NC}"
93 |
94 | # Set environment variable
95 | export BOT_MODE=$ENVIRONMENT
96 |
97 | # Build if needed
98 | if [ "$FORCE_BUILD" = true ] || [ ! "$(docker images -q hyperliquid-arb-bot 2> /dev/null)" ]; then
99 | echo -e "${YELLOW}Building Docker image...${NC}"
100 | docker-compose build --no-cache bot
101 | fi
102 |
103 | # Production warning
104 | if [ "$ENVIRONMENT" = "production" ]; then
105 | echo -e "${RED}⚠️ WARNING: Deploying to PRODUCTION${NC}"
106 | echo -e "${RED}⚠️ Real money will be at risk!${NC}"
107 | echo ""
108 | read -p "Type 'CONFIRM' to proceed: " confirm
109 | if [ "$confirm" != "CONFIRM" ]; then
110 | echo -e "${YELLOW}Deployment cancelled${NC}"
111 | exit 0
112 | fi
113 | fi
114 |
115 | # Start services
116 | echo -e "${YELLOW}Starting services...${NC}"
117 | docker-compose up -d
118 |
119 | # Wait for health check
120 | echo -e "${YELLOW}Waiting for services to be healthy...${NC}"
121 | sleep 10
122 |
123 | # Check status
124 | docker-compose ps
125 |
126 | echo ""
127 | echo -e "${GREEN}✅ Deployment complete!${NC}"
128 | echo ""
129 | echo -e "${GREEN}Dashboard:${NC} http://localhost:4000"
130 | echo -e "${GREEN}Grafana:${NC} http://localhost:3000 (admin/admin)"
131 | echo -e "${GREEN}Prometheus:${NC} http://localhost:9090"
132 | echo ""
133 | echo -e "${YELLOW}View logs:${NC} docker-compose logs -f bot"
134 | echo -e "${YELLOW}Stop services:${NC} ./deploy.sh --action stop"
135 | }
136 |
137 | # Stop function
138 | stop() {
139 | echo -e "${YELLOW}Stopping services...${NC}"
140 | docker-compose down
141 | echo -e "${GREEN}✅ Services stopped${NC}"
142 | }
143 |
144 | # Restart function
145 | restart() {
146 | echo -e "${YELLOW}Restarting services...${NC}"
147 | docker-compose restart
148 | echo -e "${GREEN}✅ Services restarted${NC}"
149 | }
150 |
151 | # Status function
152 | status() {
153 | echo -e "${YELLOW}Service status:${NC}"
154 | docker-compose ps
155 | echo ""
156 | echo -e "${YELLOW}Resource usage:${NC}"
157 | docker stats --no-stream
158 | }
159 |
160 | # Main execution
161 | check_requirements
162 |
163 | case $ACTION in
164 | deploy)
165 | deploy
166 | ;;
167 | stop)
168 | stop
169 | ;;
170 | restart)
171 | restart
172 | ;;
173 | status)
174 | status
175 | ;;
176 | *)
177 | echo -e "${RED}Invalid action: $ACTION${NC}"
178 | exit 1
179 | ;;
180 | esac
181 |
--------------------------------------------------------------------------------
/src/metrics/execution.ts:
--------------------------------------------------------------------------------
1 | import client from 'prom-client';
2 | import { register as apiRegister } from '../api/middleware/metrics';
3 |
4 | // Reuse the API metrics registry for a single /metrics endpoint
5 | export const register = apiRegister;
6 |
7 | // Hyperliquid HTTP metrics
8 | const hlHttpLatency = new client.Histogram({
9 | name: 'hyperliquid_http_latency_ms',
10 | help: 'Latency of Hyperliquid HTTP calls',
11 | labelNames: ['endpoint', 'status'],
12 | buckets: [50, 100, 200, 400, 800, 1500, 3000, 5000, 10000],
13 | });
14 |
15 | const hlHttpCount = new client.Counter({
16 | name: 'hyperliquid_http_requests_total',
17 | help: 'Total Hyperliquid HTTP requests',
18 | labelNames: ['endpoint', 'status'],
19 | });
20 |
21 | // WS reconnects
22 | const wsReconnects = new client.Counter({
23 | name: 'hyperliquid_ws_reconnects_total',
24 | help: 'Total number of WebSocket reconnect attempts',
25 | });
26 |
27 | // Execution metrics
28 | const execOrderLatency = new client.Histogram({
29 | name: 'execution_order_latency_ms',
30 | help: 'Per-order execution latency until REST response',
31 | buckets: [20, 50, 100, 200, 400, 800, 1500, 3000, 5000, 10000],
32 | });
33 |
34 | const execOrders = new client.Counter({
35 | name: 'execution_orders_total',
36 | help: 'Execution orders by status and side',
37 | labelNames: ['status', 'side', 'reason'],
38 | });
39 |
40 | const execActiveTasks = new client.Gauge({
41 | name: 'execution_active_tasks',
42 | help: 'Number of active execution tasks',
43 | });
44 |
45 | const execQueueLength = new client.Gauge({
46 | name: 'execution_queue_length',
47 | help: 'Number of signals waiting in the queue',
48 | });
49 |
50 | register.registerMetric(hlHttpLatency);
51 | register.registerMetric(hlHttpCount);
52 | register.registerMetric(wsReconnects);
53 | register.registerMetric(execOrderLatency);
54 | register.registerMetric(execOrders);
55 | register.registerMetric(execActiveTasks);
56 | register.registerMetric(execQueueLength);
57 |
58 | export function recordHttpRequest(endpoint: string, status: string, latencyMs: number) {
59 | const labels = { endpoint, status } as const;
60 | hlHttpCount.inc(labels);
61 | hlHttpLatency.observe(labels, latencyMs);
62 | }
63 |
64 | export function incWsReconnect() {
65 | wsReconnects.inc();
66 | }
67 |
68 | export function observeOrderLatency(ms: number) {
69 | execOrderLatency.observe(ms);
70 | }
71 |
72 | export function incOrder(status: 'success' | 'failed' | 'rejected', side: 'buy' | 'sell' | 'n/a', reason = '') {
73 | execOrders.inc({ status, side, reason });
74 | }
75 |
76 | export function setExecutionGauges(active: number, queueLen: number) {
77 | execActiveTasks.set(active);
78 | execQueueLength.set(queueLen);
79 | }
80 |
81 | // ===== Risk & Equity Metrics =====
82 | const riskRejections = new client.Counter({
83 | name: 'risk_rejections_total',
84 | help: 'Total number of risk-based rejections',
85 | labelNames: ['reason'],
86 | });
87 |
88 | const circuitBreakerTrips = new client.Counter({
89 | name: 'circuit_breaker_trips_total',
90 | help: 'Number of times the circuit breaker was tripped',
91 | labelNames: ['reason'],
92 | });
93 |
94 | const circuitBreakerState = new client.Gauge({
95 | name: 'circuit_breaker_state',
96 | help: 'Circuit breaker state: 1=tripped, 0=normal',
97 | });
98 |
99 | const equityLast = new client.Gauge({
100 | name: 'equity_last_usd',
101 | help: 'Last observed account equity in USD',
102 | });
103 |
104 | const equityPeak = new client.Gauge({
105 | name: 'equity_peak_usd',
106 | help: 'Peak observed account equity in USD',
107 | });
108 |
109 | const equityDailyBaseline = new client.Gauge({
110 | name: 'equity_daily_baseline_usd',
111 | help: 'Daily baseline equity at start of day in USD',
112 | });
113 |
114 | const drawdownGauge = new client.Gauge({
115 | name: 'drawdown_fraction',
116 | help: 'Current drawdown fraction (0-1) from peak equity',
117 | });
118 |
119 | const dailyLossGauge = new client.Gauge({
120 | name: 'daily_loss_usd',
121 | help: 'Current daily loss versus baseline in USD',
122 | });
123 |
124 | register.registerMetric(riskRejections);
125 | register.registerMetric(circuitBreakerTrips);
126 | register.registerMetric(circuitBreakerState);
127 | register.registerMetric(equityLast);
128 | register.registerMetric(equityPeak);
129 | register.registerMetric(equityDailyBaseline);
130 | register.registerMetric(drawdownGauge);
131 | register.registerMetric(dailyLossGauge);
132 |
133 | export function incRiskRejection(reason: string) {
134 | riskRejections.inc({ reason });
135 | }
136 |
137 | export function incCircuitBreakerTrip(reason: string) {
138 | circuitBreakerTrips.inc({ reason });
139 | }
140 |
141 | export function setCircuitBreakerState(on: boolean) {
142 | circuitBreakerState.set(on ? 1 : 0);
143 | }
144 |
145 | export function setEquityMetrics(equityUsd: number, peakUsd: number, baselineUsd: number) {
146 | equityLast.set(equityUsd);
147 | if (peakUsd > 0) equityPeak.set(peakUsd);
148 | if (baselineUsd > 0) equityDailyBaseline.set(baselineUsd);
149 | }
150 |
151 | export function setDrawdown(fraction: number) {
152 | drawdownGauge.set(Math.max(0, fraction));
153 | }
154 |
155 | export function setDailyLoss(usd: number) {
156 | dailyLossGauge.set(Math.max(0, usd));
157 | }
158 |
--------------------------------------------------------------------------------
/python-backend/app/goldrush_client.py:
--------------------------------------------------------------------------------
1 | import os
2 | from typing import Any, Dict, Optional
3 |
4 | import httpx
5 |
6 |
7 | class GoldRushClient:
8 | """Minimal GoldRush SDK wrapper for HyperEVM data.
9 |
10 | Notes:
11 | - Endpoints are thin wrappers; if unavailable, we return None and let callers fallback.
12 | - To reduce latency, callers should cache results between requests when feasible.
13 | """
14 |
15 | def __init__(self, api_key: Optional[str] = None, base_url: Optional[str] = None, timeout: float = 8.0):
16 | self.api_key = api_key or os.getenv("GOLD_RUSH_API_KEY", "")
17 | self.base_url = (base_url or os.getenv("GOLD_RUSH_BASE_URL") or "https://goldrush.dev")
18 | self._client = httpx.AsyncClient(timeout=timeout)
19 |
20 | def _headers(self) -> Dict[str, str]:
21 | headers = {}
22 | if self.api_key:
23 | headers["Authorization"] = f"Bearer {self.api_key}"
24 | return headers
25 |
26 | async def _rpc_gas_price(self) -> Optional[int]:
27 | """Fallback: fetch gas price via JSON-RPC eth_gasPrice using RPC_URL env."""
28 | rpc = os.getenv("RPC_URL", "").strip() or os.getenv("HYPEREVM_RPC", "").strip()
29 | if not rpc:
30 | return None
31 | try:
32 | payload = {"jsonrpc": "2.0", "method": "eth_gasPrice", "params": [], "id": 1}
33 | r = await self._client.post(rpc, json=payload, headers={"Content-Type": "application/json"})
34 | r.raise_for_status()
35 | data = r.json()
36 | if isinstance(data, dict) and isinstance(data.get("result"), str):
37 | return int(data["result"], 16)
38 | except Exception:
39 | return None
40 | return None
41 |
42 | async def _coingecko_price_usd(self) -> Optional[float]:
43 | """Fallback: CoinGecko simple price using COINGECKO_NATIVE_ID env."""
44 | coin_id = os.getenv("COINGECKO_NATIVE_ID", "").strip()
45 | if not coin_id:
46 | return None
47 | try:
48 | url = f"https://api.coingecko.com/api/v3/simple/price?ids={coin_id}&vs_currencies=usd"
49 | r = await self._client.get(url)
50 | r.raise_for_status()
51 | data = r.json()
52 | if isinstance(data, dict) and coin_id in data and isinstance(data[coin_id], dict):
53 | val = data[coin_id].get("usd")
54 | return float(val) if val is not None else None
55 | except Exception:
56 | return None
57 | return None
58 |
59 | async def get_native_gas_price_wei(self, chain: str = "hyperevm-mainnet") -> Optional[int]:
60 | """Return current native gas price in wei for the chain."""
61 | try:
62 | # Placeholder endpoint path; adjust to official GoldRush gas endpoint when known.
63 | url = f"{self.base_url}/api/v1/{chain}/gas-price"
64 | r = await self._client.get(url, headers=self._headers())
65 | r.raise_for_status()
66 | data = r.json()
67 | # Expecting something like {"gas_price_wei": 1234567890}
68 | val = int(data.get("gas_price_wei")) if isinstance(data, dict) and data.get("gas_price_wei") is not None else None
69 | if val is not None:
70 | return val
71 | except Exception:
72 | pass
73 | # Fallback to RPC gas price
74 | return await self._rpc_gas_price()
75 |
76 | async def get_native_price_usd(self, chain: str = "hyperevm-mainnet") -> Optional[float]:
77 | """Return USD price for the chain's native token (e.g., HYPE)."""
78 | # Immediate override via env for MVP
79 | try:
80 | override = os.getenv("GOLD_RUSH_NATIVE_PRICE_USD", "").strip()
81 | if override:
82 | return float(override)
83 | except Exception:
84 | pass
85 | try:
86 | url = f"{self.base_url}/api/v1/{chain}/native-price-usd"
87 | r = await self._client.get(url, headers=self._headers())
88 | r.raise_for_status()
89 | data = r.json()
90 | # Expecting {"price_usd": 1.23}
91 | val = float(data.get("price_usd")) if isinstance(data, dict) and data.get("price_usd") is not None else None
92 | if val is not None:
93 | return val
94 | except Exception:
95 | pass
96 | # Fallback to CoinGecko if configured, else last-resort: 1.0
97 | cg = await self._coingecko_price_usd()
98 | if cg is not None:
99 | return cg
100 | return 1.0
101 |
102 | async def estimate_tx_gas_usd(self, chain: str = "hyperevm-mainnet", gas_limit: int = 250_000) -> Optional[float]:
103 | """Estimate USD cost for a transaction with the given gas_limit.
104 |
105 | gas_usd = gas_price_wei * gas_limit / 1e18 * native_price_usd
106 | """
107 | try:
108 | gp_wei = await self.get_native_gas_price_wei(chain)
109 | price_usd = await self.get_native_price_usd(chain)
110 | if gp_wei is None or price_usd is None:
111 | return None
112 | eth_cost = (gp_wei * gas_limit) / 1e18
113 | return float(eth_cost * price_usd)
114 | except Exception:
115 | return None
116 |
--------------------------------------------------------------------------------
/src/data/markets.ts:
--------------------------------------------------------------------------------
1 | /**
2 | * Market data aggregator and snapshot builder
3 | */
4 |
5 | import { DexQuote, MarketSnapshot } from '../types/market.js';
6 | import { goldRushClient } from './goldrush.js';
7 |
8 | export class MarketAggregator {
9 | private readonly STALE_DATA_THRESHOLD_MS = 2000; // 2 seconds
10 |
11 | /**
12 | * Build a complete market snapshot for a trading pair
13 | */
14 | async buildSnapshot(pair: string): Promise {
15 | const startTime = Date.now();
16 |
17 | // Fetch data in parallel
18 | const [quotes, refData] = await Promise.all([
19 | goldRushClient.getQuotes(pair),
20 | goldRushClient.getReferenceData(pair),
21 | ]);
22 |
23 | // Check for stale quotes
24 | const now = Date.now();
25 | const freshQuotes = quotes.filter(q => {
26 | const age = now - q.ts;
27 | if (age > this.STALE_DATA_THRESHOLD_MS) {
28 | console.warn(`Stale quote detected for ${q.dex}: age=${age}ms`);
29 | return false;
30 | }
31 | return true;
32 | });
33 |
34 | if (freshQuotes.length === 0) {
35 | throw new Error(`No fresh quotes available for ${pair}`);
36 | }
37 |
38 | // Calculate cross-venue edge
39 | const { bestBuy, bestSell, edgeBps } = this.calculateCrossVenueEdge(freshQuotes);
40 |
41 | const snapshot: MarketSnapshot = {
42 | quotes: freshQuotes,
43 | refPriceUsd: refData.ref_price_usd,
44 | volatility: refData.volatility_24h,
45 | funding: refData.funding_rate,
46 | wsLatencyMs: goldRushClient.getWsLatency(),
47 | timestamp: now,
48 | };
49 |
50 | // Add metadata
51 | Object.assign(snapshot, {
52 | bestBuyVenue: bestBuy?.dex,
53 | bestSellVenue: bestSell?.dex,
54 | crossVenueEdgeBps: edgeBps,
55 | computeTimeMs: Date.now() - startTime,
56 | });
57 |
58 | return snapshot;
59 | }
60 |
61 | /**
62 | * Calculate the best cross-venue arbitrage opportunity
63 | */
64 | calculateCrossVenueEdge(quotes: DexQuote[]): {
65 | bestBuy: DexQuote | null;
66 | bestSell: DexQuote | null;
67 | edgeBps: number;
68 | } {
69 | if (quotes.length < 2) {
70 | return { bestBuy: null, bestSell: null, edgeBps: 0 };
71 | }
72 |
73 | // Find best buy (lowest ask) and best sell (highest bid)
74 | let bestBuy: DexQuote | null = null;
75 | let bestSell: DexQuote | null = null;
76 |
77 | for (const quote of quotes) {
78 | // Adjust price for fees
79 | const buyPrice = quote.price * (1 + quote.feeBps / 10000);
80 | const sellPrice = quote.price * (1 - quote.feeBps / 10000);
81 |
82 | if (!bestBuy || buyPrice < bestBuy.price * (1 + bestBuy.feeBps / 10000)) {
83 | bestBuy = quote;
84 | }
85 |
86 | if (!bestSell || sellPrice > bestSell.price * (1 - bestSell.feeBps / 10000)) {
87 | bestSell = quote;
88 | }
89 | }
90 |
91 | if (!bestBuy || !bestSell || bestBuy.dex === bestSell.dex) {
92 | return { bestBuy, bestSell, edgeBps: 0 };
93 | }
94 |
95 | // Calculate edge in basis points
96 | const buyPriceAdjusted = bestBuy.price * (1 + bestBuy.feeBps / 10000);
97 | const sellPriceAdjusted = bestSell.price * (1 - bestSell.feeBps / 10000);
98 | const edgeBps = ((sellPriceAdjusted - buyPriceAdjusted) / buyPriceAdjusted) * 10000;
99 |
100 | return { bestBuy, bestSell, edgeBps: Math.max(0, edgeBps) };
101 | }
102 |
103 | /**
104 | * Get aggregated depth at a price level
105 | */
106 | getAggregatedDepth(quotes: DexQuote[], priceLevel: number, tolerance: number = 0.001): number {
107 | return quotes.reduce((total, quote) => {
108 | const priceDiff = Math.abs(quote.price - priceLevel) / priceLevel;
109 | if (priceDiff <= tolerance) {
110 | return total + quote.depthUsd;
111 | }
112 | return total;
113 | }, 0);
114 | }
115 |
116 | /**
117 | * Check if market data is fresh enough for trading
118 | */
119 | isDataFresh(snapshot: MarketSnapshot): boolean {
120 | const age = Date.now() - snapshot.timestamp;
121 | return age < this.STALE_DATA_THRESHOLD_MS;
122 | }
123 |
124 | /**
125 | * Calculate effective liquidity for a given size
126 | */
127 | calculateEffectiveLiquidity(quotes: DexQuote[], sizeUsd: number): {
128 | effectivePrice: number;
129 | slippageBps: number;
130 | availableLiquidity: number;
131 | } {
132 | // Sort quotes by price (best first)
133 | const sortedQuotes = [...quotes].sort((a, b) => a.price - b.price);
134 |
135 | let remainingSize = sizeUsd;
136 | let totalCost = 0;
137 | let filledSize = 0;
138 |
139 | for (const quote of sortedQuotes) {
140 | const fillSize = Math.min(remainingSize, quote.depthUsd);
141 | totalCost += fillSize * quote.price * (1 + quote.feeBps / 10000);
142 | filledSize += fillSize;
143 | remainingSize -= fillSize;
144 |
145 | if (remainingSize <= 0) break;
146 | }
147 |
148 | if (filledSize === 0) {
149 | return {
150 | effectivePrice: 0,
151 | slippageBps: 0,
152 | availableLiquidity: 0,
153 | };
154 | }
155 |
156 | const effectivePrice = totalCost / filledSize;
157 | const bestPrice = sortedQuotes[0].price;
158 | const slippageBps = ((effectivePrice - bestPrice) / bestPrice) * 10000;
159 |
160 | return {
161 | effectivePrice,
162 | slippageBps: Math.max(0, slippageBps),
163 | availableLiquidity: quotes.reduce((sum, q) => sum + q.depthUsd, 0),
164 | };
165 | }
166 | }
167 |
168 | // Export singleton instance
169 | export const marketAggregator = new MarketAggregator();
170 |
--------------------------------------------------------------------------------
/src/eval/model.ts:
--------------------------------------------------------------------------------
1 | import { ArbInputs, ArbResult } from "./types";
2 | import { effectiveSlipBps } from "./slippage";
3 | import { decayEdge, fillProb } from "./latency";
4 |
5 | // Re-export types for external use
6 | export type { ArbInputs as ArbitrageInput, ArbResult as ArbitrageResult } from "./types";
7 |
8 | function clamp01(x: number) { return Math.max(0, Math.min(1, x)); }
9 | function normalVar(std?: number) { return Math.max(0, (std ?? 0) ** 2); }
10 |
11 | function flashCostUsd(sizeUsd: number, fees: { flashFeeBps: number; referralBps: number; executorFeeUsd: number; flashFixedUsd: number; }): number {
12 | const varBps = (fees.flashFeeBps + fees.referralBps) / 1e4;
13 | return sizeUsd * varBps + (fees.executorFeeUsd || 0) + (fees.flashFixedUsd || 0);
14 | }
15 |
16 | function postRouterFees(grossUsd: number, totalFeesBps: number): number {
17 | return grossUsd * (1 - (totalFeesBps || 0) / 1e4);
18 | }
19 |
20 | function secondsFromLatency(latencySec: number) {
21 | return Math.max(1e-3, latencySec);
22 | }
23 |
24 | export function evaluateArb(inputs: ArbInputs): ArbResult {
25 | const latSec = Math.max(0, inputs.latency.latencySec);
26 | const edgeEffBps = decayEdge(inputs.edgeBps, latSec, inputs.latency.edgeDecayBpsPerSec);
27 | const secs = secondsFromLatency(latSec);
28 |
29 | // start with proposed size; later line search adjusts
30 | const size0 = Math.max(0, inputs.notionalUsd);
31 |
32 | const slipBps = effectiveSlipBps(inputs.slippage, size0);
33 | const grossUsd = (edgeEffBps / 1e4) * size0; // edge-based PnL
34 | const feeUsd = (inputs.fees.totalFeesBps / 1e4) * size0; // fees charged on notional
35 |
36 | const gasMean = Math.max(0, inputs.frictions.gasUsdMean);
37 | const gasVar = normalVar(inputs.frictions.gasUsdStd);
38 | const advMean = Math.max(0, inputs.frictions.adverseUsdMean);
39 | const advVar = normalVar(inputs.frictions.adverseUsdStd);
40 | const extraUsd = Math.max(0, inputs.frictions.extraUsd || 0);
41 | const mevPenaltyUsd = Math.max(0, inputs.frictions.mevPenaltyUsd || 0);
42 |
43 | const flashUsd = inputs.flashEnabled ? flashCostUsd(size0, inputs.fees) : 0;
44 | const slipCostUsd = (slipBps / 1e4) * size0;
45 |
46 | const theta = inputs.latency.theta ?? 0.15;
47 | const pS = clamp01(fillProb(inputs.latency.baseFillProb, latSec, theta));
48 | const pF0 = clamp01(inputs.failures.failBeforeFillProb);
49 | const pF1 = clamp01(inputs.failures.failBetweenLegsProb);
50 | const pFR = clamp01(inputs.failures.reorgOrMevProb);
51 | const pSum = pS + pF0 + pF1 + pFR;
52 | const pNone = pSum < 1 ? clamp01(1 - pSum) : 0;
53 |
54 | // Payoffs per state
55 | // Net payoff when success: gross edge minus fees, slippage, and frictions
56 | const payoffS = grossUsd - feeUsd - slipCostUsd - gasMean - advMean - flashUsd - extraUsd;
57 | const unwindCostUsd = slipCostUsd * 0.7; // conservative unwind approx
58 | const payoffF1 = - unwindCostUsd - gasMean - advMean;
59 | const payoffF0 = - gasMean;
60 | const payoffFR = - gasMean - mevPenaltyUsd;
61 |
62 | const EV = pS*payoffS + pF0*payoffF0 + pF1*payoffF1 + pFR*payoffFR + pNone*0;
63 | // variance approximation: mixture variance + exogenous gas/adverse variances
64 | const mean = EV;
65 | const terms = [payoffS, payoffF0, payoffF1, payoffFR, 0];
66 | const probs = [pS, pF0, pF1, pFR, pNone];
67 | let mixVar = 0;
68 | for (let i=0;i bestEVadjPerSec) {
98 | bestEVadjPerSec = ev_per_sec_i;
99 | bestSize = size;
100 | bestSnapshot = {
101 | net_usd_est: EV_i,
102 | ev_per_sec: ev_per_sec_i,
103 | size_opt_usd: size,
104 | p_success: pS,
105 | slip_bps_eff: slip,
106 | breakeven_bps: (slip + inputs.fees.totalFeesBps) + ((gasMean+advMean+flash+extraUsd)/Math.max(1e-9,size))*1e4,
107 | score: ev_per_sec_i,
108 | gas_usd: gasMean,
109 | seconds: secs,
110 | flash_fee_bps: inputs.fees.flashFeeBps,
111 | referral_bps: inputs.fees.referralBps,
112 | flash_fixed_usd: inputs.fees.flashFixedUsd,
113 | executor_fee_usd: inputs.fees.executorFeeUsd,
114 | flash_cost_usd: flash,
115 | components: {
116 | edge_eff_bps: edgeEffBps,
117 | after_router_lp_usd: gross - (inputs.fees.totalFeesBps/1e4)*size,
118 | slip_cost_usd: slipCost,
119 | }
120 | };
121 | }
122 | }
123 |
124 | return bestSnapshot as ArbResult;
125 | }
126 |
127 | // Batch evaluation function
128 | export async function evaluateBatch(inputs: ArbInputs[]): Promise {
129 | // Process evaluations in parallel for better performance
130 | return Promise.all(inputs.map(input => Promise.resolve(evaluateArb(input))));
131 | }
132 |
--------------------------------------------------------------------------------
/src/eval/arb_math.ts:
--------------------------------------------------------------------------------
1 | /**
2 | * Mathematical utilities for arbitrage calculations
3 | */
4 |
5 | import { env } from '../config/env.js';
6 |
7 | /**
8 | * Convert USD amount to basis points
9 | */
10 | export function usdToBps(usd: number, notionalUsd: number): number {
11 | if (notionalUsd === 0) return 0;
12 | return (usd / notionalUsd) * 10000;
13 | }
14 |
15 | /**
16 | * Convert basis points to USD amount
17 | */
18 | export function bpsToUsd(bps: number, notionalUsd: number): number {
19 | return (bps / 10000) * notionalUsd;
20 | }
21 |
22 | /**
23 | * Calculate total flash loan cost in USD
24 | */
25 | export function flashCostUsd(notionalUsd: number, cfg: {
26 | flashFeeBps: number;
27 | flashFixedUsd: number;
28 | }): number {
29 | const percentageFee = bpsToUsd(cfg.flashFeeBps, notionalUsd);
30 | return percentageFee + cfg.flashFixedUsd;
31 | }
32 |
33 | /**
34 | * Combine multiple fee components in basis points
35 | */
36 | export function combineFeesBps(
37 | routerBps: number,
38 | lpBps: number,
39 | extraBps: number = 0
40 | ): number {
41 | return routerBps + lpBps + extraBps;
42 | }
43 |
44 | /**
45 | * Calculate effective slippage for a given size
46 | */
47 | export function calculateSlippage(
48 | sizeUsd: number,
49 | liquidityUsd: number,
50 | alpha: number = env.SLIP_ALPHA,
51 | k: number = env.SLIP_K
52 | ): number {
53 | if (liquidityUsd === 0) return Infinity;
54 | return k * Math.pow(sizeUsd / liquidityUsd, alpha);
55 | }
56 |
57 | /**
58 | * Calculate fill probability with latency decay
59 | */
60 | export function calculateFillProbability(
61 | latencyMs: number,
62 | baseFillProb: number = env.BASE_FILL_PROB,
63 | theta: number = env.FILL_THETA
64 | ): number {
65 | const latencySec = latencyMs / 1000;
66 | return baseFillProb * Math.exp(-theta * latencySec);
67 | }
68 |
69 | /**
70 | * Apply edge decay based on latency
71 | */
72 | export function applyEdgeDecay(
73 | edgeBps: number,
74 | latencyMs: number,
75 | decayRate: number = env.EDGE_DECAY_BPS_PER_SEC
76 | ): number {
77 | const latencySec = latencyMs / 1000;
78 | const decay = decayRate * latencySec;
79 | return Math.max(0, edgeBps - decay);
80 | }
81 |
82 | /**
83 | * Calculate breakeven threshold in basis points
84 | */
85 | export function calculateBreakevenBps(
86 | totalFeesBps: number,
87 | fixedCostsUsd: number,
88 | notionalUsd: number
89 | ): number {
90 | const fixedCostsBps = usdToBps(fixedCostsUsd, notionalUsd);
91 | return totalFeesBps + fixedCostsBps;
92 | }
93 |
94 | /**
95 | * Calculate expected value with failure probability
96 | */
97 | export function calculateExpectedValue(
98 | profitUsd: number,
99 | pSuccess: number,
100 | failureCostUsd: number = 0
101 | ): number {
102 | return profitUsd * pSuccess - failureCostUsd * (1 - pSuccess);
103 | }
104 |
105 | /**
106 | * Apply mean-variance adjustment
107 | */
108 | export function applyMeanVarianceAdjustment(
109 | expectedValue: number,
110 | variance: number,
111 | riskAversion: number = env.RISK_AVERSION_LAMBDA
112 | ): number {
113 | return expectedValue - riskAversion * variance;
114 | }
115 |
116 | /**
117 | * Sample from normal distribution (Box-Muller transform)
118 | */
119 | export function sampleNormal(mean: number, std: number): number {
120 | const u1 = Math.random();
121 | const u2 = Math.random();
122 | const z = Math.sqrt(-2 * Math.log(u1)) * Math.cos(2 * Math.PI * u2);
123 | return mean + std * z;
124 | }
125 |
126 | /**
127 | * Calculate profit after all fees and costs
128 | */
129 | export function calculateNetProfit(
130 | grossProfitBps: number,
131 | sizeUsd: number,
132 | fees: {
133 | totalFeesBps: number;
134 | flashFeeBps: number;
135 | flashFixedUsd: number;
136 | executorFeeUsd: number;
137 | gasUsd: number;
138 | }
139 | ): number {
140 | const grossProfitUsd = bpsToUsd(grossProfitBps, sizeUsd);
141 | const tradingFeesUsd = bpsToUsd(fees.totalFeesBps, sizeUsd);
142 | const flashFeesUsd = flashCostUsd(sizeUsd, {
143 | flashFeeBps: fees.flashFeeBps,
144 | flashFixedUsd: fees.flashFixedUsd,
145 | });
146 |
147 | return grossProfitUsd - tradingFeesUsd - flashFeesUsd - fees.executorFeeUsd - fees.gasUsd;
148 | }
149 |
150 | /**
151 | * Check if trade meets minimum profit threshold
152 | */
153 | export function meetsMinimumProfit(
154 | netProfitUsd: number,
155 | minThresholdUsd: number = 1
156 | ): boolean {
157 | return netProfitUsd >= minThresholdUsd;
158 | }
159 |
160 | /**
161 | * Calculate optimal size for linear impact model
162 | */
163 | export function calculateOptimalSizeLinear(
164 | edgeBps: number,
165 | totalCostBps: number,
166 | impactCoefficient: number
167 | ): number {
168 | if (impactCoefficient === 0) return Infinity;
169 | return (edgeBps - totalCostBps) / (2 * impactCoefficient);
170 | }
171 |
172 | /**
173 | * Estimate variance of profit
174 | */
175 | export function estimateProfitVariance(
176 | profitUsd: number,
177 | priceVolatility: number,
178 | executionUncertainty: number = 0.1
179 | ): number {
180 | const priceVar = Math.pow(profitUsd * priceVolatility, 2);
181 | const execVar = Math.pow(profitUsd * executionUncertainty, 2);
182 | const gasVar = Math.pow(env.GAS_USD_STD, 2);
183 | const adverseVar = Math.pow(env.ADVERSE_USD_STD, 2);
184 |
185 | return priceVar + execVar + gasVar + adverseVar;
186 | }
187 |
188 | /**
189 | * Calculate Sharpe ratio for a trade
190 | */
191 | export function calculateSharpeRatio(
192 | expectedReturn: number,
193 | variance: number,
194 | riskFreeRate: number = 0
195 | ): number {
196 | const std = Math.sqrt(variance);
197 | if (std === 0) return 0;
198 | return (expectedReturn - riskFreeRate) / std;
199 | }
200 |
201 | /**
202 | * Convert Wei to USD given a price
203 | */
204 | export function weiToUsd(wei: bigint, decimals: number, priceUsd: number): number {
205 | const divisor = BigInt(10 ** decimals);
206 | const tokens = Number(wei) / Number(divisor);
207 | return tokens * priceUsd;
208 | }
209 |
210 | /**
211 | * Convert USD to Wei given a price
212 | */
213 | export function usdToWei(usd: number, decimals: number, priceUsd: number): bigint {
214 | const tokens = usd / priceUsd;
215 | const multiplier = BigInt(10 ** decimals);
216 | return BigInt(Math.floor(tokens * Number(multiplier)));
217 | }
218 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Hyperliquid Arbitrage Bot (MVP)
2 |
3 | Monorepo for a cross-DEX arbitrage bot targeting PRJX and HyperSwap on Hyperliquid EVM.
4 |
5 | ## Structure
6 |
7 | - `rust-engine/` — Latency-sensitive off-chain arbitrage engine (Rust)
8 | - `python-backend/` — FastAPI service with endpoints for health, strategies, Goldsky integration, and trade executor (dry-run) located under `python-backend/app/`.
9 | - `web/` — Lightweight dashboard (HTML/JS/CSS)
10 | - `contracts/` — Core arbitrage smart contract (Solidity)
11 | - `config/` — Environment examples and local settings
12 |
13 | ### HyperLend Flash-Loan Executor
14 |
15 | - Contract: `contracts/ArbitrageExecutor.sol` (Solc ^0.8.24)
16 | - Receives a flash loan from `HYPERLEND_POOL` and performs two generic router swaps via opaque calldata.
17 | - Safety: `Ownable2Step`, `Pausable`, `ReentrancyGuard`, and `SafeERC20`-style helpers.
18 | - Callbacks supported (provider decides which is used):
19 | - `onFlashLoan(address initiator, address asset, uint256 amount, uint256 fee, bytes params)`
20 | - `executeOperation(address asset, uint256 amount, uint256 fee, address initiator, bytes params)`
21 | - Parameters format (ABI tuple) decoded in callback:
22 | - `(address buyRouter,address buySpender,bytes buyCalldata,address sellRouter,address sellSpender,bytes sellCalldata,address tokenBorrowed,address tokenIntermediate,address profitToken,uint256 minProfit)`
23 | - Emits `FlashArbExecuted(asset, amount, fee, profitToken, profit)` upon success.
24 |
25 | - Example script: `scripts/flashloan.ts`
26 | - Encodes FlashParams and calls `initiateFlashArb(asset, amount, params, referralCode)` on the executor.
27 | - ENV: `RPC_URL`, `OWNER_PK`, `EXECUTOR`, `ASSET`, `AMOUNT`.
28 | - You must fill in router addresses and calldata externally.
29 |
30 | > NOTE: Replace `IHyperLendPoolSimple`/`IHyperLendPoolMultiAsset` and callback return conventions with the official HyperLend ABI before production.
31 |
32 | ### Evaluator: Flash-Loan Costs
33 |
34 | - File: `python-backend/app/analytics/evaluator.py`
35 | - Net calculation uses `expected_net_usd(...)` and now includes flash-loan costs via `extra_usd`.
36 | - New params (can be provided per-request or via ENV defaults):
37 | - `flash_fee_bps` (basis points on notional)
38 | - `referral_bps` (optional basis points on notional)
39 | - `flash_fixed_usd` (fixed overhead per flash)
40 | - `executor_fee_usd` (service cost per trade)
41 | - Output includes diagnostic fields: `flash_fee_bps`, `referral_bps`, `flash_fixed_usd`, `executor_fee_usd`, `flash_cost_usd`.
42 |
43 | ### ENV Defaults
44 |
45 | Set in `.env` or your runtime environment. The backend will merge these into evaluator params if the request omits them:
46 |
47 | ```
48 | FLASH_FEE_BPS=0
49 | REFERRAL_BPS=0
50 | FLASH_FIXED_USD=0
51 | EXECUTOR_FEE_USD=0
52 | ```
53 |
54 | HyperLend addresses (placeholders):
55 |
56 | ```
57 | HYPERLEND_POOL=
58 | ARB_EXECUTOR_ADDRESS=
59 | ```
60 |
61 | ### Workflow
62 |
63 | 1) Build router calldata off-chain for both legs (buy then sell).
64 | 2) Encode `FlashParams` and call `initiateFlashArb` on `ArbitrageExecutor`.
65 | 3) Use evaluator endpoints to estimate profitability including flash-loan costs:
66 | - `POST /api/eval/batch` with params including the flash-loan fields.
67 | - Or set ENV defaults listed above.
68 |
69 | ## Quickstart
70 |
71 | 1. Python backend
72 |
73 | ```powershell
74 | # Windows PowerShell
75 | python -m venv .venv
76 | . .venv\Scripts\Activate.ps1
77 | pip install -r python-backend\requirements.txt
78 | uvicorn app.main:app --reload --app-dir python-backend --port 9011
79 | ```
80 |
81 | 2. Rust engine (compiles; logic is stubbed)
82 |
83 | ```bash
84 | cargo run --manifest-path rust-engine/Cargo.toml
85 | ```
86 |
87 | 3. Open dashboard
88 |
89 | - Backend runs on (FastAPI docs at `/docs`)
90 | - Dashboard static files are in `web/` (open `web/index.html` or serve via `python -m http.server`)
91 |
92 | ## Environment
93 |
94 | Copy `config/sample.env` to `.env` and fill values.
95 |
96 | - `DEEPSEEK_API_KEY=...`
97 | - `HYPEREVM_RPC=https://api.hyperliquid-testnet.xyz/evm`
98 | - `PRJX_SUBGRAPH=https://api.goldsky.com/api/public/project_cmbbm2iwckb1b01t39xed236t/subgraphs/uniswap-v3-hyperevm-position/prod/gn`
99 | - Goldsky pool history (choose one):
100 | - GraphQL mode (recommended):
101 | - `GOLDSKY_MODE=graphql`
102 | - `GOLDSKY_GQL_URL=https://api.goldsky.com/api/public//subgraphs///gn`
103 | - `GOLDSKY_API_HEADER=Authorization` (default)
104 | - `GOLDSKY_API_PREFIX=Bearer` (append a space if needed by your provider)
105 | - `GOLDSKY_API_KEY=` (if using private endpoint)
106 | - Optional overrides:
107 | - `GOLDSKY_GQL_QUERY=...` (custom GraphQL query)
108 | - `GOLDSKY_GQL_ITEMS_PATH=data.pool.reserves`
109 | - REST mode:
110 | - `GOLDSKY_MODE=rest`
111 | - `GOLDSKY_API_URL=https://...`
112 | - `GOLDSKY_POOL_RES_PATH=pools/{pool_id}/reserves`
113 |
114 | - Caching:
115 | - `GOLDSKY_CACHE_TTL_SEC=30`
116 |
117 | - Optional reference pool IDs (for UI/manual testing only):
118 | - `HYPE_USDC_POOL_ID=...`
119 | - `HYPE_uETH_POOL_ID=...`
120 | - `KHYPE_HYPE_POOL_ID=...`
121 |
122 | ## Next Steps
123 |
124 | - Finalize Goldsky pool IDs for Hyperliquid EVM and verify snapshots via `/api/goldsky/pools/{pool_id}/history`.
125 | - Wire Rust engine to PRJX GraphQL and HyperSwap SDK (ffi/bindings or RPC calls)
126 | - Implement WebSocket streaming to backend
127 | - Finalize Solidity router calls and test on testnet
128 | - Add authentication and persistent storage (SQLite/Redis)
129 |
130 | ## Useful Endpoints
131 |
132 | - Health and status
133 | - `GET /api/health` — shows service status, Goldsky mode/provider, and Goldsky cache freshness/errors.
134 |
135 | - Goldsky
136 | - `GET /api/goldsky/pools/{pool_id}/history?limit=1000` — validated pool snapshots.
137 |
138 | - Trade (executor skeleton)
139 | - `POST /api/trade/quote` — body: `{ "amount_in": 1000, "slippage_bps": 50 }`
140 | - `POST /api/trade/simulate` — body: `{ "amount_in": 1000, "slippage_bps": 50, "gas_price_wei": 20000000000, "gas_limit": 250000, "native_price_usd": 2000 }`
141 | - `POST /api/trade/execute` — body: `{ "dry_run": true }` (live execution not implemented)
142 | - `GET /api/trade/limits` — view safety limits
143 | - `POST /api/trade/limits` — set safety limits: `{ "max_amount_in": 100000, "max_gas_limit": 600000, "max_slippage_bps": 100 }`
144 |
--------------------------------------------------------------------------------
/research/backtest_np.py:
--------------------------------------------------------------------------------
1 | """
2 | Vectorized NumPy backtester that mirrors server cost logic for fast parameter sweeps.
3 |
4 | Supports:
5 | - Simple cost model: bps + gas
6 | - HFT cost model: gas (base+tip)*gas_limit*price + latency penalty + failure prob + frictions
7 | - Optional notional_cap_usd and slip/fee bps
8 |
9 | Usage:
10 | from research.data_loader import fetch_recent, to_dataframe
11 | from research.backtest_np import vector_backtest
12 | rows = fetch_recent()
13 | df = to_dataframe(rows)
14 | metrics = vector_backtest(df, params={
15 | 'min_spread_bps': 10,
16 | 'min_liquidity_usd': 10000,
17 | 'slippage_bps': 30,
18 | 'fees_bps': 5,
19 | 'gas_multiplier': 1.0,
20 | 'max_trade_usd': 50000,
21 | # HFT optional
22 | # 'base_fee_gwei': 2.0,
23 | # 'priority_tip_gwei': 0.5,
24 | # 'gas_limit': 250000,
25 | # 'native_usd': 2.0,
26 | })
27 | """
28 | from __future__ import annotations
29 | import typing as t
30 | import numpy as np
31 | import pandas as pd
32 |
33 | # ---------------- HFT helpers ----------------
34 |
35 | def _hft_costs(
36 | gross_usd: np.ndarray,
37 | gas_base_usd: np.ndarray,
38 | notional_usd: np.ndarray,
39 | p: dict,
40 | ) -> tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
41 | """Return (gas_usd, bps_cost_usd, net_usd, eff).
42 | If HFT params present, compute advanced costs; else simple bps+gas.
43 | """
44 | # Detect HFT mode by presence of any param
45 | hft_keys = {
46 | 'base_fee_gwei','priority_tip_gwei','gas_limit','native_usd',
47 | 'lp_fees_bps','router_fees_bps','extra_usd','latency_ms','latency_bps_penalty',
48 | 'failure_prob','friction_bps'
49 | }
50 | use_hft = any(k in p for k in hft_keys)
51 |
52 | slippage_bps = float(p.get('slippage_bps', 30.0))
53 | fees_bps = float(p.get('fees_bps', 5.0))
54 |
55 | if use_hft:
56 | base_fee_gwei = float(p.get('base_fee_gwei', 0.0))
57 | tip_gwei = float(p.get('priority_tip_gwei', 0.0))
58 | gas_limit = float(p.get('gas_limit', 250_000))
59 | native_usd = float(p.get('native_usd', 1.0))
60 | lp_fees = float(p.get('lp_fees_bps', 0.0))
61 | router_fees = float(p.get('router_fees_bps', 0.0))
62 | extra_usd = float(p.get('extra_usd', 0.0))
63 | latency_ms = float(p.get('latency_ms', 0.0))
64 | lat_penalty = float(p.get('latency_bps_penalty', 0.0))
65 | fail_prob = float(p.get('failure_prob', 0.0))
66 | friction_bps = float(p.get('friction_bps', 0.0))
67 |
68 | # Gas in USD = (base+tip) * gas_limit * native_price
69 | gas_price_eth = (base_fee_gwei + tip_gwei) * 1e-9
70 | gas_usd = gas_price_eth * gas_limit * native_usd
71 | gas_usd = np.full_like(gross_usd, gas_usd, dtype=float)
72 |
73 | # bps costs
74 | total_bps = (slippage_bps + fees_bps + lp_fees + router_fees + friction_bps) / 10_000.0
75 | bps_cost_usd = notional_usd * total_bps
76 |
77 | # latency penalty on gross in bps
78 | lat_cost = (latency_ms * lat_penalty / 10_000.0) * notional_usd
79 |
80 | net_before_fail = gross_usd - gas_usd - bps_cost_usd - extra_usd - lat_cost
81 | # Expected value with failure prob (assume zero payoff on failure)
82 | net_usd = (1.0 - fail_prob) * net_before_fail
83 | eff = np.where(gas_usd > 0, net_usd / gas_usd, net_usd * 1e6)
84 | return gas_usd, bps_cost_usd, net_usd, eff
85 |
86 | # Simple path: gas multiplier + bps
87 | gas_mult = float(p.get('gas_multiplier', 1.0))
88 | gas_usd = gas_base_usd * gas_mult
89 | bps_cost = (slippage_bps + fees_bps) / 10_000.0
90 | bps_cost_usd = notional_usd * bps_cost
91 | net_usd = gross_usd - gas_usd - bps_cost_usd
92 | eff = np.where(gas_usd > 0, net_usd / gas_usd, net_usd * 1e6)
93 | return gas_usd, bps_cost_usd, net_usd, eff
94 |
95 |
96 | # ---------------- Vector backtest ----------------
97 |
98 | def vector_backtest(df: pd.DataFrame, params: dict) -> dict:
99 | """Compute metrics using vector ops. Expects columns created by data_loader.to_dataframe()."""
100 | p = dict(params or {})
101 | min_spread = float(p.get('min_spread_bps', 10.0))
102 | min_liq = float(p.get('min_liquidity_usd', 10_000.0))
103 | max_trade = float(p.get('max_trade_usd', 50_000.0))
104 | notional_cap = float(p.get('notional_cap_usd', float('inf')))
105 |
106 | # Filters
107 | mask = (df['spread_bps'] >= min_spread) & (df['liquidity_usd'] >= min_liq)
108 | sub = df.loc[mask].copy()
109 | if sub.empty:
110 | return {
111 | 'total_gross_profit': 0.0,
112 | 'total_net_profit': 0.0,
113 | 'winrate': 0.0,
114 | 'avg_profit_per_gas': 0.0,
115 | 'max_drawdown_usd': 0.0,
116 | 'sharpe_proxy': 0.0,
117 | 'count': 0,
118 | }
119 |
120 | gross = sub['gross_usd'].to_numpy(float)
121 | gas_base = sub['gas_usd'].to_numpy(float)
122 | # notional is limited by liquidity and cap
123 | notional = np.minimum(sub['notional_usd'].to_numpy(float), max_trade)
124 | if np.isfinite(notional_cap) and notional_cap > 0:
125 | notional = np.minimum(notional, notional_cap)
126 |
127 | gas_usd, bps_cost_usd, net_usd, eff = _hft_costs(gross, gas_base, notional, p)
128 |
129 | # Aggregate metrics
130 | total_gross = float(np.nansum(gross))
131 | total_net = float(np.nansum(net_usd))
132 | wins = float(np.count_nonzero(net_usd > 0))
133 | count = float(net_usd.shape[0])
134 | winrate = wins / count if count > 0 else 0.0
135 | avg_eff = float(np.nanmean(eff)) if eff.size else 0.0
136 |
137 | # Drawdown path (cum PnL over time)
138 | pnl = pd.Series(net_usd, index=sub['ts'])
139 | pnl = pnl.sort_index()
140 | equity = pnl.cumsum()
141 | roll_max = equity.cummax()
142 | dd = equity - roll_max
143 | max_dd = float(dd.min() if not dd.empty else 0.0)
144 |
145 | # Sharpe proxy: mean/std of per-trade net (no annualization)
146 | mean = float(pnl.mean() if not pnl.empty else 0.0)
147 | std = float(pnl.std(ddof=1) if pnl.size > 1 else 0.0)
148 | sharpe = (mean / std) if std > 0 else 0.0
149 |
150 | return {
151 | 'total_gross_profit': round(total_gross, 6),
152 | 'total_net_profit': round(total_net, 6),
153 | 'winrate': round(winrate, 6),
154 | 'avg_profit_per_gas': round(avg_eff, 6),
155 | 'max_drawdown_usd': round(max_dd, 6),
156 | 'sharpe_proxy': round(sharpe, 6),
157 | 'count': int(count),
158 | }
159 |
--------------------------------------------------------------------------------
/src/api/routes/eval.ts:
--------------------------------------------------------------------------------
1 | import express, { Request, Response } from "express";
2 | import { z } from "zod";
3 | import { ENV } from "../../config/env";
4 | import { evaluateArb } from "../../eval/model";
5 | import { ArbInputs, FeesConfig, MarketFrictions, LatencyExec, SlippageModel, FailureTree } from "../../eval/types";
6 | import { simulatePayouts, varCvar } from "../../eval/montecarlo";
7 | import { rateLimit } from "../middleware/rateLimit";
8 |
9 | const router = express.Router();
10 |
11 | // Schemas (lenient to preserve backward compatibility)
12 | const FeesSchema = z.object({
13 | totalFeesBps: z.number(),
14 | flashFeeBps: z.number().optional(),
15 | referralBps: z.number().optional(),
16 | executorFeeUsd: z.number().optional(),
17 | flashFixedUsd: z.number().optional(),
18 | });
19 |
20 | const FrictionsSchema = z.object({
21 | gasUsdMean: z.number(),
22 | gasUsdStd: z.number().optional(),
23 | adverseUsdMean: z.number(),
24 | adverseUsdStd: z.number().optional(),
25 | extraUsd: z.number().optional(),
26 | mevPenaltyUsd: z.number().optional(),
27 | });
28 |
29 | const LatencySchema = z.object({
30 | latencySec: z.number(),
31 | edgeDecayBpsPerSec: z.number().optional(),
32 | baseFillProb: z.number().optional(),
33 | partialFillShape: z.enum(["linear","concave","convex"]).optional(),
34 | theta: z.number().optional(),
35 | });
36 |
37 | const SlippageSchema = z.object({
38 | kind: z.enum(["amm_v2","univ3","empirical"]).default("empirical"),
39 | k: z.number().optional(),
40 | alpha: z.number().optional(),
41 | liquidityRefUsd: z.number().optional(),
42 | });
43 |
44 | const FailureSchema = z.object({
45 | failBeforeFillProb: z.number().default(0.02),
46 | failBetweenLegsProb: z.number().default(0.01),
47 | reorgOrMevProb: z.number().default(0.0),
48 | });
49 |
50 | const ArbInputSchema = z.object({
51 | edgeBps: z.number(),
52 | notionalUsd: z.number(),
53 | fees: FeesSchema,
54 | frictions: FrictionsSchema,
55 | latency: LatencySchema,
56 | slippage: SlippageSchema,
57 | failures: FailureSchema,
58 | flashEnabled: z.boolean().default(true),
59 | riskAversion: z.number().optional(),
60 | capitalUsd: z.number().optional(),
61 | });
62 |
63 | const BatchSchema = z.object({
64 | // either provide full inputs per item, or legacy arrays with params
65 | items: z.array(z.any()),
66 | // global overrides (optional)
67 | defaults: z.object({
68 | flashFeeBps: z.number().optional(),
69 | referralBps: z.number().optional(),
70 | executorFeeUsd: z.number().optional(),
71 | flashFixedUsd: z.number().optional(),
72 | edgeDecayBpsPerSec: z.number().optional(),
73 | baseFillProb: z.number().optional(),
74 | theta: z.number().optional(),
75 | slipAlpha: z.number().optional(),
76 | slipK: z.number().optional(),
77 | gasUsdStd: z.number().optional(),
78 | adverseUsdStd: z.number().optional(),
79 | mevPenaltyUsd: z.number().optional(),
80 | riskAversion: z.number().optional(),
81 | varCvar: z.boolean().optional(),
82 | mcSamples: z.number().optional(),
83 | }).partial().optional(),
84 | });
85 |
86 | function withEnvFees(fees: Partial): FeesConfig {
87 | return {
88 | totalFeesBps: fees.totalFeesBps ?? 0,
89 | flashFeeBps: fees.flashFeeBps ?? ENV.FLASH_FEE_BPS,
90 | referralBps: fees.referralBps ?? ENV.REFERRAL_BPS,
91 | executorFeeUsd: fees.executorFeeUsd ?? ENV.EXECUTOR_FEE_USD,
92 | flashFixedUsd: fees.flashFixedUsd ?? ENV.FLASH_FIXED_USD,
93 | };
94 | }
95 |
96 | function mapAnyToInputs(x: any, defaults: any): ArbInputs {
97 | // If already conforms, use it
98 | const maybe = ArbInputSchema.safeParse(x);
99 | if (maybe.success) return maybe.data as ArbInputs;
100 |
101 | // Legacy/loose mapping
102 | const fees: FeesConfig = withEnvFees({
103 | totalFeesBps: Number(x.totalFeesBps ?? x.fees_bps ?? 0),
104 | flashFeeBps: defaults.flashFeeBps,
105 | referralBps: defaults.referralBps,
106 | executorFeeUsd: defaults.executorFeeUsd,
107 | flashFixedUsd: defaults.flashFixedUsd,
108 | });
109 | const frictions: MarketFrictions = {
110 | gasUsdMean: Number(x.gas_usd ?? x.gasUsdMean ?? 0),
111 | gasUsdStd: defaults.gasUsdStd ?? ENV.GAS_USD_STD,
112 | adverseUsdMean: Number(x.adverse_usd ?? x.adverseUsdMean ?? 0),
113 | adverseUsdStd: defaults.adverseUsdStd ?? ENV.ADVERSE_USD_STD,
114 | extraUsd: Number(x.extra_usd ?? 0),
115 | mevPenaltyUsd: defaults.mevPenaltyUsd ?? ENV.MEV_PENALTY_USD,
116 | };
117 | const latency: LatencyExec = {
118 | latencySec: Number(x.seconds ?? x.latencySec ?? 1),
119 | edgeDecayBpsPerSec: defaults.edgeDecayBpsPerSec ?? ENV.EDGE_DECAY_BPS_PER_SEC,
120 | baseFillProb: defaults.baseFillProb ?? ENV.BASE_FILL_PROB,
121 | theta: defaults.theta ?? ENV.FILL_THETA,
122 | };
123 | const slippage: SlippageModel = {
124 | kind: "empirical",
125 | k: defaults.slipK ?? ENV.SLIP_K,
126 | alpha: defaults.slipAlpha ?? ENV.SLIP_ALPHA,
127 | liquidityRefUsd: Number(x.liquidity_ref_usd ?? x.liquidityRefUsd ?? 1_000_000),
128 | };
129 | const failures: FailureTree = {
130 | failBeforeFillProb: Number(x.fail_before_prob ?? 0.02),
131 | failBetweenLegsProb: Number(x.fail_between_prob ?? 0.01),
132 | reorgOrMevProb: Number(x.reorg_mev_prob ?? 0.0),
133 | };
134 | const flashEnabled = x.flashEnabled !== undefined ? Boolean(x.flashEnabled) : true;
135 | const riskAversion = defaults.riskAversion ?? ENV.RISK_AVERSION_LAMBDA;
136 |
137 | const inputs: ArbInputs = {
138 | edgeBps: Number(x.edge_bps ?? x.edgeBps ?? 0),
139 | notionalUsd: Number(x.notional_usd ?? x.notionalUsd ?? 0),
140 | fees, frictions, latency, slippage, failures, flashEnabled,
141 | riskAversion,
142 | capitalUsd: Number(x.capital_usd ?? x.capitalUsd ?? 0) || undefined,
143 | };
144 | return inputs;
145 | }
146 |
147 | router.post("/batch", rateLimit({ capacity: 10, refillPerMs: 500 }), (req: Request, res: Response) => {
148 | try {
149 | const parsed = BatchSchema.parse({ items: req.body?.items ?? [], defaults: req.body?.defaults ?? {} });
150 | const defaults = parsed.defaults || {};
151 | const inputs: ArbInputs[] = parsed.items.map((x: any) => mapAnyToInputs(x, defaults));
152 | const out = inputs.map((inp) => {
153 | const base = evaluateArb(inp);
154 | if (defaults.varCvar) {
155 | const samples = Math.max(100, Number(defaults.mcSamples ?? 1500));
156 | const draws = simulatePayouts(inp, samples);
157 | const { var: VaR, cvar: CVaR } = varCvar(draws, 0.95);
158 | return { ...base, var95: VaR, cvar95: CVaR };
159 | }
160 | return base;
161 | });
162 | res.json({ items: out });
163 | } catch (e: any) {
164 | res.status(400).json({ error: e?.message || String(e) });
165 | }
166 | });
167 |
168 | export default router;
169 |
--------------------------------------------------------------------------------
/python-backend/app/data/etl_goldsky.py:
--------------------------------------------------------------------------------
1 | import os, httpx, pandas as pd, asyncio
2 | from typing import Dict, Any, List
3 |
4 | # Base URL and auth
5 | GOLDSKY_API_URL = os.getenv("GOLDSKY_API_URL", "").strip()
6 | GOLDSKY_API_KEY = os.getenv("GOLDSKY_API_KEY", "").strip()
7 | GOLDSKY_MODE = os.getenv("GOLDSKY_MODE", "rest").strip().lower() # 'rest' or 'graphql'
8 |
9 | # Header customization to support different providers (e.g., Authorization Bearer vs X-API-Key)
10 | API_HEADER_NAME = os.getenv("GOLDSKY_API_HEADER", "Authorization").strip() # e.g., "Authorization" or "X-API-Key"
11 | API_HEADER_PREFIX = os.getenv("GOLDSKY_API_PREFIX", "Bearer ") # prefix before key; empty string allowed
12 |
13 | # Path template for pool reserves; must contain {pool_id}
14 | POOL_RES_PATH = os.getenv("GOLDSKY_POOL_RES_PATH", "pools/{pool_id}/reserves")
15 |
16 | # GraphQL-specific settings
17 | GQL_URL = os.getenv("GOLDSKY_GQL_URL", GOLDSKY_API_URL)
18 | # Default query assumes a schema with pool(id) { reserves(first:, orderBy:, orderDirection:) { ... } }
19 | GQL_QUERY = os.getenv(
20 | "GOLDSKY_GQL_QUERY",
21 | (
22 | "query ReserveData($poolId: ID!, $limit: Int!) {\n"
23 | " pool(id: $poolId) {\n"
24 | " reserves(first: $limit, orderBy: timestamp, orderDirection: desc) {\n"
25 | " timestamp\n"
26 | " reserve0\n"
27 | " reserve1\n"
28 | " }\n"
29 | " }\n"
30 | "}"
31 | ),
32 | )
33 | # Dot-path to items in GraphQL response, default: data.pool.reserves
34 | GQL_ITEMS_PATH = os.getenv("GOLDSKY_GQL_ITEMS_PATH", "data.pool.reserves")
35 |
36 | async def fetch_json(path: str, params: Dict[str, Any] | None = None) -> Any:
37 | if not GOLDSKY_API_URL:
38 | raise RuntimeError("GOLDSKY_API_URL not set")
39 | headers: Dict[str, str] = {}
40 | if GOLDSKY_API_KEY:
41 | # Compose header according to configured scheme
42 | headers[API_HEADER_NAME] = f"{API_HEADER_PREFIX}{GOLDSKY_API_KEY}" if API_HEADER_PREFIX is not None else GOLDSKY_API_KEY
43 | url = f"{GOLDSKY_API_URL.rstrip('/')}/{path.lstrip('/')}"
44 | # Retry with exponential backoff on network/5xx
45 | attempts = 3
46 | delay = 0.5
47 | last_err: Exception | None = None
48 | async with httpx.AsyncClient(timeout=20.0) as c:
49 | for i in range(attempts):
50 | try:
51 | r = await c.get(url, params=params or {}, headers=headers)
52 | r.raise_for_status()
53 | return r.json()
54 | except Exception as e:
55 | last_err = e
56 | # 4xx likely won't succeed on retry; only retry on 5xx/transport
57 | try:
58 | status = getattr(e, 'response', None).status_code if hasattr(e, 'response') and e.response else None
59 | except Exception:
60 | status = None
61 | if status is not None and 400 <= int(status) < 500:
62 | break
63 | await asyncio.sleep(delay)
64 | delay *= 2
65 | raise RuntimeError(f"Goldsky REST fetch failed url={url} params={params} err={last_err}")
66 |
67 | async def get_pool_history(pool_id: str, limit: int = 1000) -> pd.DataFrame:
68 | def _extract_core_fields(items: List[Dict[str, Any]]) -> pd.DataFrame:
69 | # Normalize to required keys: timestamp, reserve0, reserve1
70 | out: List[Dict[str, Any]] = []
71 | for it in items or []:
72 | try:
73 | ts = it.get("timestamp")
74 | r0 = it.get("reserve0")
75 | r1 = it.get("reserve1")
76 | if ts is None and "blockTimestamp" in it:
77 | ts = it.get("blockTimestamp")
78 | # Coerce types if needed
79 | if isinstance(ts, str) and ts.isdigit():
80 | ts = int(ts)
81 | out.append({"timestamp": ts, "reserve0": r0, "reserve1": r1})
82 | except Exception:
83 | # Best-effort extraction; skip malformed items
84 | continue
85 | # If extraction produced at least one meaningful row, use it; otherwise return original
86 | valid = [o for o in out if o.get("timestamp") is not None]
87 | return pd.DataFrame(valid) if valid else pd.DataFrame(items)
88 | if GOLDSKY_MODE == "graphql":
89 | # GraphQL POST to GQL_URL
90 | if not GQL_URL:
91 | raise RuntimeError("GOLDSKY_GQL_URL (or GOLDSKY_API_URL) not set for GraphQL mode")
92 | headers: Dict[str, str] = {}
93 | if GOLDSKY_API_KEY:
94 | headers[API_HEADER_NAME] = f"{API_HEADER_PREFIX}{GOLDSKY_API_KEY}" if API_HEADER_PREFIX is not None else GOLDSKY_API_KEY
95 | payload = {"query": GQL_QUERY, "variables": {"poolId": pool_id, "limit": int(limit)}}
96 | # Retry with exponential backoff for GraphQL POST
97 | attempts = 3
98 | delay = 0.5
99 | last_err: Exception | None = None
100 | async with httpx.AsyncClient(timeout=20.0) as c:
101 | for i in range(attempts):
102 | try:
103 | r = await c.post(GQL_URL, json=payload, headers=headers)
104 | r.raise_for_status()
105 | data = r.json()
106 | # GraphQL-level errors array
107 | if isinstance(data, dict) and data.get('errors'):
108 | raise RuntimeError(f"GraphQL errors: {data.get('errors')}")
109 | break
110 | except Exception as e:
111 | last_err = e
112 | try:
113 | status = getattr(e, 'response', None).status_code if hasattr(e, 'response') and e.response else None
114 | except Exception:
115 | status = None
116 | if status is not None and 400 <= int(status) < 500:
117 | # Auth/schema errors won't be fixed by retry
118 | raise
119 | await asyncio.sleep(delay)
120 | delay *= 2
121 | else:
122 | raise RuntimeError(f"Goldsky GraphQL fetch failed url={GQL_URL} err={last_err}")
123 | # Traverse dot-path to get items
124 | node: Any = data
125 | for part in GQL_ITEMS_PATH.split('.'):
126 | if part:
127 | node = node.get(part) if isinstance(node, dict) else None
128 | if node is None:
129 | break
130 | items: List[Dict[str, Any]] = node if isinstance(node, list) else []
131 | return _extract_core_fields(items)
132 | else:
133 | # REST mode
134 | path = POOL_RES_PATH.format(pool_id=pool_id)
135 | data = await fetch_json(path, {"limit": limit})
136 | items = data.get("items", data) if isinstance(data, dict) else data
137 | return _extract_core_fields(items)
138 |
--------------------------------------------------------------------------------
/src/ml/finbloom.ts:
--------------------------------------------------------------------------------
1 | /**
2 | * FinBloom ML adapter for market context and risk analysis
3 | */
4 |
5 | import axios, { AxiosInstance } from 'axios';
6 | import { MarketSnapshot } from '../types/market.js';
7 | import { FinBloomContext } from '../types/ml.js';
8 | import { env } from '../config/env.js';
9 |
10 | export class FinBloomAdapter {
11 | private client: AxiosInstance;
12 | private readonly MAX_RETRIES = 3;
13 | private readonly TIMEOUT_MS = 5000;
14 |
15 | constructor() {
16 | this.client = axios.create({
17 | baseURL: env.MODEL_FINBLOOM_ENDPOINT,
18 | headers: {
19 | 'Authorization': `Bearer ${env.MODEL_FINBLOOM_KEY}`,
20 | 'Content-Type': 'application/json',
21 | },
22 | timeout: this.TIMEOUT_MS,
23 | });
24 | }
25 |
26 | /**
27 | * Analyze market context and generate risk assessment
28 | */
29 | async summarizeContext(snapshot: MarketSnapshot): Promise {
30 | const prompt = this.buildPrompt(snapshot);
31 |
32 | try {
33 | const response = await this.callModel(prompt);
34 | return this.parseResponse(response, snapshot);
35 | } catch (error) {
36 | console.error('FinBloom analysis failed:', error);
37 | // Return conservative defaults on failure
38 | return this.getDefaultContext(snapshot);
39 | }
40 | }
41 |
42 | /**
43 | * Build structured prompt for FinBloom
44 | */
45 | private buildPrompt(snapshot: MarketSnapshot): string {
46 | const dataAge = Date.now() - snapshot.timestamp;
47 | const isStale = dataAge > 3000;
48 |
49 | const quotes = snapshot.quotes.map(q => ({
50 | dex: q.dex,
51 | price: q.price.toFixed(4),
52 | depth: q.depthUsd.toFixed(0),
53 | feeBps: q.feeBps,
54 | age: Date.now() - q.ts,
55 | }));
56 |
57 | const prompt = `You are FinBloom, a financial market analyzer. Analyze the following market data and classify the regime, identify risk flags, and provide a brief narrative.
58 |
59 | MARKET DATA:
60 | - Quotes: ${JSON.stringify(quotes, null, 2)}
61 | - Reference Price: ${snapshot.refPriceUsd?.toFixed(2) || 'N/A'}
62 | - Volatility (24h): ${snapshot.volatility?.toFixed(2) || 'N/A'}%
63 | - Funding Rate: ${snapshot.funding?.toFixed(4) || 'N/A'}
64 | - WS Latency: ${snapshot.wsLatencyMs || 'N/A'}ms
65 | - Data Age: ${dataAge}ms${isStale ? ' (STALE)' : ''}
66 |
67 | TASK:
68 | 1. Classify regime as one of: calm, volatile, event, illiquid
69 | 2. Identify risk flags that should affect arbitrage aggressiveness
70 | 3. Provide a 2-3 sentence narrative summary
71 | 4. Suggest sensitivity adjustment in basis points (0-100)
72 |
73 | Return ONLY valid JSON matching this structure:
74 | {
75 | "regime": "calm|volatile|event|illiquid",
76 | "riskFlags": ["flag1", "flag2"],
77 | "narrative": "Brief market assessment...",
78 | "sensitivityBps": 0,
79 | "confidence": 0.0
80 | }`;
81 |
82 | return prompt;
83 | }
84 |
85 | /**
86 | * Call FinBloom model API
87 | */
88 | private async callModel(prompt: string): Promise {
89 | let lastError: any;
90 |
91 | for (let attempt = 1; attempt <= this.MAX_RETRIES; attempt++) {
92 | try {
93 | const response = await this.client.post('/chat/completions', {
94 | model: 'finbloom-latest',
95 | messages: [
96 | {
97 | role: 'system',
98 | content: 'You are FinBloom, a precise financial market analyzer. Always respond with valid JSON only.',
99 | },
100 | {
101 | role: 'user',
102 | content: prompt,
103 | },
104 | ],
105 | temperature: 0.3,
106 | max_tokens: 500,
107 | response_format: { type: 'json_object' },
108 | });
109 |
110 | if (response.data?.choices?.[0]?.message?.content) {
111 | return JSON.parse(response.data.choices[0].message.content);
112 | }
113 | } catch (error) {
114 | lastError = error;
115 | console.warn(`FinBloom attempt ${attempt} failed:`, error);
116 |
117 | if (attempt < this.MAX_RETRIES) {
118 | await new Promise(resolve => setTimeout(resolve, 1000 * attempt));
119 | }
120 | }
121 | }
122 |
123 | throw lastError;
124 | }
125 |
126 | /**
127 | * Parse and validate model response
128 | */
129 | private parseResponse(response: any, snapshot: MarketSnapshot): FinBloomContext {
130 | // Validate regime
131 | const validRegimes = ['calm', 'volatile', 'event', 'illiquid'];
132 | const regime = validRegimes.includes(response.regime)
133 | ? response.regime
134 | : this.inferRegime(snapshot);
135 |
136 | // Validate risk flags
137 | const riskFlags = Array.isArray(response.riskFlags)
138 | ? response.riskFlags.filter((f: any) => typeof f === 'string')
139 | : [];
140 |
141 | // Add data staleness flag if needed
142 | const dataAge = Date.now() - snapshot.timestamp;
143 | if (dataAge > 3000 && !riskFlags.includes('stale_data')) {
144 | riskFlags.push('stale_data');
145 | }
146 |
147 | // Add high latency flag
148 | if (snapshot.wsLatencyMs && snapshot.wsLatencyMs > 100) {
149 | riskFlags.push('high_latency');
150 | }
151 |
152 | return {
153 | regime: regime as FinBloomContext['regime'],
154 | riskFlags,
155 | narrative: response.narrative || 'Market analysis unavailable',
156 | sensitivityBps: Math.min(100, Math.max(0, response.sensitivityBps || 0)),
157 | confidence: Math.min(1, Math.max(0, response.confidence || 0.5)),
158 | timestamp: Date.now(),
159 | };
160 | }
161 |
162 | /**
163 | * Infer regime from market data
164 | */
165 | private inferRegime(snapshot: MarketSnapshot): FinBloomContext['regime'] {
166 | // High volatility check
167 | if (snapshot.volatility && snapshot.volatility > 5) {
168 | return 'volatile';
169 | }
170 |
171 | // Liquidity check
172 | const totalDepth = snapshot.quotes.reduce((sum, q) => sum + q.depthUsd, 0);
173 | if (totalDepth < 10000) {
174 | return 'illiquid';
175 | }
176 |
177 | // Wide spreads indicate event or illiquid
178 | const prices = snapshot.quotes.map(q => q.price);
179 | const spread = (Math.max(...prices) - Math.min(...prices)) / Math.min(...prices);
180 | if (spread > 0.01) {
181 | return 'event';
182 | }
183 |
184 | return 'calm';
185 | }
186 |
187 | /**
188 | * Get conservative default context
189 | */
190 | private getDefaultContext(snapshot: MarketSnapshot): FinBloomContext {
191 | const regime = this.inferRegime(snapshot);
192 | const riskFlags: string[] = ['model_unavailable'];
193 |
194 | // Add data quality flags
195 | const dataAge = Date.now() - snapshot.timestamp;
196 | if (dataAge > 3000) {
197 | riskFlags.push('stale_data');
198 | }
199 |
200 | if (!snapshot.wsLatencyMs || snapshot.wsLatencyMs > 100) {
201 | riskFlags.push('high_latency');
202 | }
203 |
204 | return {
205 | regime,
206 | riskFlags,
207 | narrative: 'FinBloom unavailable, using conservative defaults based on market metrics',
208 | sensitivityBps: regime === 'calm' ? 10 : 50,
209 | confidence: 0.3,
210 | timestamp: Date.now(),
211 | };
212 | }
213 | }
214 |
215 | // Export singleton instance
216 | export const finBloomAdapter = new FinBloomAdapter();
217 |
--------------------------------------------------------------------------------
/PROGRESS_REPORT.md:
--------------------------------------------------------------------------------
1 | # Hyperliquid Arbitrage Bot - CTO Progress Report
2 | **Date:** 2025-01-10
3 | **Overall Completion:** 68%
4 |
5 | ## Executive Summary
6 | The Hyperliquid arbitrage bot infrastructure has achieved production-grade evaluation modeling with sophisticated execution risk analysis. Core TypeScript evaluation service is operational with UniV3 math, observability, and testing infrastructure in place. Critical gaps remain in live execution, Hyperliquid-specific integration, and operational tooling.
7 |
8 | ---
9 |
10 | ## Component Status by Priority
11 |
12 | ### 🔴 **CRITICAL PATH** (Must Complete for Production)
13 |
14 | #### 1. **Hyperliquid Integration** - 15% Complete
15 | - **Status:** Major gap - no Hyperliquid-specific components implemented
16 | - **Required:**
17 | - [ ] Hyperliquid WebSocket client for real-time orderbook/trades
18 | - [ ] Hyperliquid REST API integration for order placement
19 | - [ ] Account management and position tracking
20 | - [ ] Hyperliquid-specific slippage models
21 | - [ ] Cross-margin and funding rate calculations
22 | - **Effort:** 2-3 weeks
23 |
24 | #### 2. **Live Execution Pipeline** - 25% Complete
25 | - **Status:** Solidity contracts drafted but not deployed/tested
26 | - **Completed:**
27 | - ✅ HyperLend flash loan executor contract (uncompiled)
28 | - ✅ Basic two-leg swap structure
29 | - **Required:**
30 | - [ ] Contract compilation and deployment scripts
31 | - [ ] Contract unit tests with Hardhat/Foundry
32 | - [ ] Calldata generation for live swaps
33 | - [ ] Transaction manager with retry logic
34 | - [ ] Gas optimization and MEV protection
35 | - **Effort:** 1-2 weeks
36 |
37 | #### 3. **Signal Generation & Opportunity Detection** - 20% Complete
38 | - **Status:** No live signal generation implemented
39 | - **Required:**
40 | - [ ] Multi-venue price aggregator
41 | - [ ] Real-time arbitrage opportunity scanner
42 | - [ ] Signal validation and filtering
43 | - [ ] Rate limiting and API management
44 | - [ ] Historical opportunity tracking
45 | - **Effort:** 1-2 weeks
46 |
47 | ---
48 |
49 | ### 🟡 **OPERATIONAL REQUIREMENTS** (Needed for Scale)
50 |
51 | #### 4. **Risk Management & Safety** - 40% Complete
52 | - **Completed:**
53 | - ✅ Risk-adjusted EV calculation with VaR/CVaR
54 | - ✅ Failure probability modeling
55 | - ✅ Optimal sizing with capital constraints
56 | - **Required:**
57 | - [ ] Live P&L tracking and circuit breakers
58 | - [ ] Position limits and exposure monitoring
59 | - [ ] Drawdown protection
60 | - [ ] Emergency shutdown procedures
61 | - **Effort:** 1 week
62 |
63 | #### 5. **Data Infrastructure** - 30% Complete
64 | - **Completed:**
65 | - ✅ Basic on-chain fetching scaffolding (ethers)
66 | - ✅ Environment-based configuration
67 | - **Required:**
68 | - [ ] Time-series database for historical data
69 | - [ ] Real-time data pipeline
70 | - [ ] Caching layer for pool states
71 | - [ ] Data quality monitoring
72 | - **Effort:** 1 week
73 |
74 | #### 6. **Monitoring & Alerting** - 60% Complete
75 | - **Completed:**
76 | - ✅ Prometheus metrics integration
77 | - ✅ Structured JSON logging
78 | - ✅ Request tracing with IDs
79 | - **Required:**
80 | - [ ] Grafana dashboards
81 | - [ ] PagerDuty/alert integration
82 | - [ ] Performance metrics tracking
83 | - [ ] Error rate monitoring
84 | - **Effort:** 3-4 days
85 |
86 | ---
87 |
88 | ### 🟢 **ADVANCED FEATURES** (Completed Core Components)
89 |
90 | #### 7. **Arbitrage Evaluation Model** - 95% Complete ✅
91 | - **Completed:**
92 | - ✅ Production-grade TypeScript evaluation service
93 | - ✅ Stochastic execution risk modeling
94 | - ✅ Non-linear slippage with UniV3 tick simulation
95 | - ✅ Latency decay and fill probability models
96 | - ✅ Flash loan cost breakdown
97 | - ✅ Mean-variance optimization
98 | - ✅ Monte Carlo simulation capability
99 | - **Remaining:**
100 | - [ ] Calibration scripts for model parameters
101 | - **Effort:** 2 days
102 |
103 | #### 8. **UniV3 Mathematics** - 85% Complete ✅
104 | - **Completed:**
105 | - ✅ Q64.96 math helpers
106 | - ✅ Non-crossing swap simulator
107 | - ✅ Tick-walking simulator with liquidity updates
108 | - ✅ Slippage calculation vs mid-price
109 | - **Remaining:**
110 | - [ ] TickLens integration for initialized ticks
111 | - [ ] Multi-pool routing optimization
112 | - **Effort:** 3 days
113 |
114 | #### 9. **Testing Infrastructure** - 80% Complete ✅
115 | - **Completed:**
116 | - ✅ Vitest setup with coverage
117 | - ✅ Unit tests for core models
118 | - ✅ Integration tests with Supertest
119 | - ✅ Smoke test scripts
120 | - **Remaining:**
121 | - [ ] End-to-end tests with mock Hyperliquid
122 | - [ ] Load testing
123 | - [ ] Chaos engineering tests
124 | - **Effort:** 3 days
125 |
126 | #### 10. **Development Tooling** - 75% Complete ✅
127 | - **Completed:**
128 | - ✅ TypeScript with hot reload
129 | - ✅ Python/TS service integration
130 | - ✅ Environment-driven configuration
131 | - ✅ Docker-ready structure
132 | - **Remaining:**
133 | - [ ] CI/CD pipeline (GitHub Actions prepared)
134 | - [ ] Deployment scripts
135 | - [ ] Kubernetes manifests
136 | - **Effort:** 2 days
137 |
138 | ---
139 |
140 | ## Technical Debt & Optimizations
141 |
142 | ### Medium Priority
143 | - **Token Metadata Service** (stub exists, needs implementation)
144 | - **Price Feed Aggregation** (design complete, not implemented)
145 | - **Path Finding Algorithm** (skeleton ready, needs multi-hop)
146 | - **MEV Protection** (basic design, needs private mempool integration)
147 |
148 | ### Low Priority
149 | - **UI Dashboard** (basic HTML exists, needs React upgrade)
150 | - **Rust Engine** (directory exists, not implemented)
151 | - **Historical Backtesting** (framework needed)
152 |
153 | ---
154 |
155 | ## Recommended Sprint Plan (Next 2 Weeks)
156 |
157 | ### **Week 1: Core Execution**
158 | 1. **Days 1-3:** Hyperliquid WebSocket + REST integration
159 | 2. **Days 4-5:** Signal generation and opportunity detection
160 | 3. **Weekend:** Contract compilation, testing, deployment
161 |
162 | ### **Week 2: Production Readiness**
163 | 1. **Days 1-2:** Transaction manager and live execution
164 | 2. **Days 3-4:** Risk management and safety systems
165 | 3. **Day 5:** Monitoring dashboards and alerts
166 | 4. **Weekend:** End-to-end testing and soft launch
167 |
168 | ---
169 |
170 | ## Resource Requirements
171 |
172 | ### Immediate Needs
173 | - **RPC Endpoints:** Production Ethereum/L2 nodes
174 | - **Hyperliquid API Keys:** Trading credentials
175 | - **Infrastructure:**
176 | - Application server (4 vCPU, 8GB RAM)
177 | - PostgreSQL/TimescaleDB instance
178 | - Redis cache
179 | - Monitoring stack (Prometheus/Grafana)
180 |
181 | ### Team Recommendations
182 | - **1 Backend Engineer:** Focus on Hyperliquid integration
183 | - **1 Smart Contract Engineer:** Deploy and audit contracts
184 | - **1 DevOps/SRE:** Production infrastructure and monitoring
185 |
186 | ---
187 |
188 | ## Risk Assessment
189 |
190 | ### High Risk Items
191 | 1. **No Hyperliquid integration** - Complete blocker for production
192 | 2. **Untested contracts** - Could lose funds if bugs exist
193 | 3. **No circuit breakers** - Runaway losses possible
194 |
195 | ### Mitigation Strategy
196 | 1. Start with small position sizes
197 | 2. Implement hard stop-loss limits
198 | 3. Run paper trading for 1 week minimum
199 | 4. Get smart contract audit before mainnet
200 |
201 | ---
202 |
203 | ## Conclusion
204 |
205 | The project has excellent foundational infrastructure with sophisticated risk modeling and evaluation capabilities. However, critical integration work remains for Hyperliquid connectivity and live execution. With focused effort on the critical path items, the bot could be production-ready in 2-3 weeks.
206 |
207 | **Recommended Action:** Prioritize Hyperliquid integration immediately while parallel-tracking smart contract deployment.
208 |
--------------------------------------------------------------------------------