├── .gitignore ├── CHANGELOG.md ├── LICENSE ├── MANIFEST.in ├── README.md ├── _tests ├── test_smoke_basic.py └── test_yf_demo.py ├── docs ├── INDICATORS.md ├── USAGE.md └── technical_indicator.png ├── examples ├── example_basic.py ├── example_indicators.py ├── hedge_fund_example.py ├── run_basic.py ├── run_channels.py └── run_from_csv.py ├── indicator_plots ├── ADOSC_plot.png ├── AD_plot.png ├── ATR_plot.png ├── BB_Middle_plot.png ├── CCI_plot.png ├── DEMA_plot.png ├── EMA_plot.png ├── ElderRay_Bull_plot.png ├── KAMA_plot.png ├── MACD_plot.png ├── MFI_plot.png ├── MomentumIndex_plot.png ├── OBV_plot.png ├── ROC_plot.png ├── RSI_plot.png ├── RVI_plot.png ├── SMA_plot.png ├── STOCH_K_plot.png ├── VWAP_plot.png └── WILLR_plot.png ├── notebooks ├── 01_basic_indicators.ipynb ├── 02_channels_and_bands.ipynb └── 03_streaming_demo.ipynb ├── pyproject.toml ├── quantjourney_ti ├── __init__.py ├── _decorators.py ├── _errors.py ├── _indicator_kernels.py ├── _legacy_ │ └── technical_indicators.py ├── _performance.py ├── _risk_metrics.py ├── _streaming.py ├── _utils.py ├── indicators.py ├── kernels │ ├── __init__.py │ ├── momentum_numba.py │ ├── trend_numba.py │ ├── volatility_numba.py │ └── volume_numba.py └── technical_indicators.py ├── requirements.txt ├── run_demo.bat ├── run_demo.sh ├── tests ├── __init__.py ├── _yf.py ├── conftest.py ├── test_all_indicators.py ├── test_all_indicators_comprehensive.py ├── test_basic.py ├── test_channels_invariants.py ├── test_decorators.py ├── test_demo.py ├── test_fallbacks.py ├── test_hedge_fund_features.py ├── test_indicators.py ├── test_integration_yf.py ├── test_invariants.py ├── test_kernels_attach.py ├── test_streaming_equivalence.py ├── test_technical_indicators.py ├── test_utils.py ├── test_utils_crossovers.py └── test_yf_integration_multi.py └── uv.lock /.gitignore: -------------------------------------------------------------------------------- 1 | # Python-specific 2 | __pycache__/ 3 | *.pyc 4 | *.pyo 5 | *.pyd 6 | build/ 7 | dist/ 8 | *.egg-info/ 9 | .pytest_cache/ 10 | 11 | # Jupyter Notebook checkpoints 12 | .ipynb_checkpoints/ 13 | # Virtual environment 14 | venv/ 15 | .env 16 | .env.local 17 | .env.production 18 | .env.development 19 | .venv/ 20 | # Logs 21 | *.log 22 | *.out 23 | *.err 24 | # Coverage reports 25 | .coverage 26 | *.cover 27 | *.gcov 28 | *.lcov 29 | 30 | # Configuration files 31 | *.cfg 32 | *.ini 33 | *.json 34 | *.yaml 35 | *.yml 36 | 37 | # Documentation 38 | *.rst 39 | 40 | # OS-specific 41 | .DS_Store 42 | Thumbs.db 43 | 44 | # IDE-specific 45 | .idea/ 46 | .vscode/ 47 | .cursorrules 48 | 49 | # Testing and temporary files 50 | *.csv 51 | *.pickle 52 | *.h5 53 | *.temp 54 | 55 | # Adding 56 | !examples/indicator_plots/*.png 57 | 58 | # Error 59 | error/ 60 | 61 | 62 | HEDGE_FUND_ASSESSMENT.md 63 | HEDGE_FUND_FEATURES.md 64 | IMPROVEMENTS_SUMMARY.md 65 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | All notable changes to **QuantJourney Technical Indicators** will be documented in this file. 3 | 4 | The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), 5 | and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). 6 | 7 | ## [Unreleased] 8 | ### Added 9 | - `numba_fallback` decorator in `_decorators.py` for robust Numba error handling. 10 | - Fallbacks to pandas or NaN-filled outputs for all Numba-based indicator methods (`SMA`, `EMA`, `RSI`, `MACD`, `BB`, `ATR`, `STOCH`, `ADX`, `ICHIMOKU`, `KELTNER`, `MFI`, `TRIX`, `CCI`, `ROC`, `WILLR`, `DEMA`, `KAMA`, `DONCHIAN`, `AROON`, `AO`, `ULTIMATE_OSCILLATOR`, `CMO`, `DPO`, `MASS_INDEX`, `VWAP`, `SUPERTREND`, `PVO`, `HISTORICAL_VOLATILITY`, `CHAIKIN_VOLATILITY`, `LINEAR_REGRESSION_CHANNEL`, `AD`, `ALMA`, `KDJ`, `HEIKEN_ASHI`, `BETA`, `DI`, `ADOSC`, `VOLUME_INDICATORS`, `HULL_MA`, `PIVOT_POINTS`, `RAINBOW`). 11 | - Comprehensive logging for fallback events and errors in `indicators.py`. 12 | - Enhanced input validation in `_utils.py` for edge cases (empty data, NaNs, invalid columns). 13 | - Unit tests for Numba and fallback paths in `tests/test_indicators.py` (assumed). 14 | 15 | ### Changed 16 | - Moved `numba_fallback` decorator from `indicators.py` to `_decorators.py` for modularity. 17 | - Fixed Numba compilation errors in `_indicator_kernels.py` for `ALMA`, `BB`, `CMO`, `COPPOCK`, `HISTORICAL_VOLATILITY`, `HULL_MA`, `MACD`, `RAINBOW`, `ROC`, `RSI`, `SMA`. 18 | - Updated `_validate_and_get_prices` to prioritize `adj_close` and handle edge cases. 19 | - Adjusted `RAINBOW` method to handle 2D array output from `_calculate_rainbow_numba`, creating a DataFrame with SMA columns. 20 | - Improved edge-case handling in all Numba kernels to minimize fallback triggers. 21 | 22 | ### Fixed 23 | - Resolved `_cffi_backend` import issue by ensuring proper virtual environment setup (assumed user action). 24 | - Fixed column naming mismatches (`close` vs. `adj_close`) in input validation and tests. 25 | - Corrected dimensionality issues in `RAINBOW` output. 26 | 27 | ### Maintenance 28 | - indicators.py: removed stray `#"""` and an unrelated pasted DEMA comment block; eliminated false positive “unterminated docstring”. 29 | - indicators.py: `validate_market_data` now correctly returns the fixed DataFrame and uses simple forward-filling for small gaps (`limit=3`). 30 | - indicators.py: added L1 caching to `SMA` via `@cached_indicator`. 31 | - indicators.py: auto-normalize OHLCV DataFrames so methods that require `'close'` also work when only `'adj_close'` is present. 32 | - indicators.py: improved queue logging setup to avoid duplicate console handlers when enabling queue mode. 33 | - indicators.py: improved cache key in `calculate_multiple_indicators` to use content-based hashing (values + index) instead of `to_string()`. 34 | - _streaming.py: ensure per-symbol buffers respect `max_buffer_size`, fixing buffer length invariant tests. 35 | - _risk_metrics.py: better detection of price vs. return series (constant price series no longer produce non-zero max drawdown). 36 | - _performance.py: `MemoryManager.get_memory_usage` excludes the index from per-column stats to match expectations. 37 | - _utils.py: harmonized index validation error message to match tests. 38 | 39 | ## [0.2.0] - 2025-01-01 40 | ### Added 41 | - Initial release of `quantjourney_ti` with Numba-optimized technical indicators. 42 | - Support for 50+ indicators including `SMA`, `EMA`, `RSI`, `MACD`, etc. 43 | - `TechnicalIndicators` class with singleton `_TI_INSTANCE` for performance. 44 | - Basic input validation and plotting utilities in `_utils.py`. 45 | - Numba kernels in `_indicator_kernels.py` for performance-critical calculations. 46 | - Helper decorators (`timer`) in `_decorators.py`. 47 | 48 | ### Notes 49 | - This is the initial public release under the MIT License. 50 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2025 Jakub Polec 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include LICENSE 2 | include README.md 3 | include pyproject.toml 4 | recursive-include docs *.png 5 | recursive-include quantjourney_ti *.py 6 | recursive-include examples *.py 7 | recursive-include tests *.py -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # QuantJourney Technical Indicators 2 | 3 | **A high-performance Python library for calculating technical indicators, optimized with Numba for speed and designed for financial data analysis. This project is part of the Quantitative Infrastructure initiative by [QuantJourney](https://quantjourney.substack.com), providing robust tools for traders and researchers.** 4 | 5 | **License**: MIT License - see [LICENSE.md](LICENSE.md) for details. 6 | **Author**: Jakub Polec ([jakub@quantjourney.pro](mailto:jakub@quantjourney.pro)) 7 | **Repository**: [github.com/QuantJourneyOrg/qj_technical_indicators](https://github.com/QuantJourneyOrg/qj_technical_indicators) 8 | 9 | ## Overview 10 | 11 | The QuantJourney Technical Indicators library offers a comprehensive set of technical indicators for analyzing financial time series data. Key features include: 12 | - **Numba-Optimized Calculations**: Fast, JIT-compiled functions for performance-critical computations. 13 | - **Flexible API**: Supports both standalone functions and a `TechnicalIndicators` class for object-oriented usage. 14 | - **Robust Error Handling**: Validates inputs and handles edge cases like NaNs and empty data. 15 | - **Visualization**: Generates individual plots for indicators, saved as PNG files in an `indicator_plots` directory. 16 | - **Integration**: Works seamlessly with `pandas` DataFrames and `yfinance` for data fetching. 17 | 18 | ### Extras and Lazy Loading 19 | 20 | - Optional extras keep the core lightweight: 21 | - `pip install .[yf]` to enable yfinance-based examples/tests 22 | - `pip install .[plot]` to enable plotting with matplotlib 23 | 24 | - Lazy JIT compile: kernels compile on first use and are cached to disk. You can opt-in to eager compile for lower first-call latency: 25 | 26 | ```python 27 | from quantjourney_ti import TechnicalIndicators 28 | ti = TechnicalIndicators(warmup=True) # pre-compiles a common subset of kernels 29 | ``` 30 | 31 | - Logging: queue logging is opt-in to avoid starting background threads on import: 32 | 33 | ```python 34 | from quantjourney_ti import start_logging_queue 35 | start_logging_queue() # enable QueueHandler + background consumer thread 36 | ``` 37 | 38 | The library is ideal for backtesting trading strategies, real-time analysis, and research, with a focus on simplicity and extensibility. 39 | 40 | ### Recent Improvements 41 | 42 | - DataFrame inputs with only `adj_close` are now auto-normalized for indicators that require a `close` column. 43 | - `SMA` gains a lightweight cache via `@cached_indicator` to speed repeat calls on unchanged inputs. 44 | - Streaming buffers now respect the constructor’s `max_buffer_size` argument. 45 | - Risk metrics correctly detect constant price series as prices (max drawdown = 0). 46 | - Validation utilities and logging were streamlined to improve robustness in mixed environments. 47 | 48 | ### Caching 49 | 50 | Some indicators support lightweight caching to avoid repeated computation on identical inputs. 51 | 52 | - The `@cached_indicator(ttl_seconds=3600)` decorator enables memoization with a time‑to‑live. 53 | - Inputs are hashed by content (values + index) to detect duplicates safely. 54 | - Example: `SMA` uses caching out of the box. 55 | 56 | Cache control utilities: 57 | 58 | ```python 59 | from quantjourney_ti import get_cache_stats, clear_indicator_cache 60 | 61 | # View cache statistics 62 | print(get_cache_stats()) 63 | 64 | # Clear all cached entries 65 | clear_indicator_cache() 66 | ``` 67 | 68 | To add caching to your own wrappers, apply `@cached_indicator` above your method. Choose an appropriate TTL for your workload. 69 | 70 | ## Project Structure 71 | 72 | The repository is organized as follows: 73 | 74 | ``` 75 | quantjourney_ti/ 76 | ├── __init__.py # Package initialization and imports 77 | ├── _decorators.py # Decorators for timing and fallback mechanisms 78 | ├── _errors.py # Custom error classes for input validation 79 | ├── _indicator_kernels.py # Numba-optimized functions for indicator calculations 80 | ├── _legacy_/ # Legacy code (not actively maintained) 81 | ├── _utils.py # Utility functions for validation, plotting, and memory optimization 82 | ├── indicators.py # Main API class (TechnicalIndicators) with public methods 83 | ├── docs/ # Documentation 84 | │ ├── INDICATORS.md # Explanation of each indicator 85 | ├── examples/ # Example scripts demonstrating usage 86 | │ ├── example_basic.py # Basic indicator calculations 87 | │ ├── example_indicators.py # Advanced usage with multiple indicators and plotting 88 | ├── tests/ # Unit and integration tests 89 | │ ├── __init__.py 90 | │ ├── _yf.py # yfinance test utilities 91 | │ ├── test_all_indicators.py # Tests for all indicators 92 | │ ├── test_basic.py # Basic functionality tests 93 | │ ├── test_decorators.py # Decorator tests 94 | │ ├── test_demo.py # Demo script tests 95 | │ ├── test_indicators.py # Individual indicator tests 96 | │ ├── test_integration_yf.py # Integration tests with yfinance 97 | │ ├── test_utils.py # Utility function tests 98 | ├── quantjourney_ti.egg-info/ # Package metadata (generated, typically in .gitignore) 99 | ├── README.md # Project documentation (this file) 100 | ├── LICENSE.md # License details 101 | ├── setup.py # Package installation configuration 102 | ``` 103 | 104 | **Note**: The `quantjourney_ti.egg-info` directory is generated during package installation (e.g., `pip install -e .`). It can be safely removed if not using editable mode, and should be included in `.gitignore` to avoid version control. 105 | 106 | ## Installation 107 | 108 | 1. Clone the repository: 109 | ```bash 110 | git clone https://github.com/QuantJourneyOrg/qj_technical_indicators.git 111 | cd qj_technical_indicators 112 | ``` 113 | 114 | 2. Install dependencies: 115 | ```bash 116 | pip install -r requirements.txt 117 | # optional extras 118 | pip install .[yf] 119 | pip install .[plot] 120 | ``` 121 | 122 | 3. Install the package in editable mode: 123 | ```bash 124 | pip install -e . 125 | ``` 126 | 127 | **Requirements**: 128 | - Python 3.11–3.14 129 | - `pandas`, `numpy`, `yfinance`, `numba`, `matplotlib` 130 | 131 | ## Usage 132 | 133 | The library provides a `TechnicalIndicators` class for calculating indicators and saving plots. See `docs/USAGE.md` for a compact guide and `notebooks/` for interactive examples. Example: 134 | 135 | ```python 136 | from quantjourney_ti import TechnicalIndicators 137 | from quantjourney_ti._utils import plot_indicators 138 | import pandas as pd 139 | import yfinance as yf 140 | import os 141 | 142 | # Fetch data (requires extra 'yf') 143 | df = yf.download("AAPL", start="2024-01-01", end="2025-02-01") 144 | if isinstance(df.columns, pd.MultiIndex): 145 | df.columns = df.columns.get_level_values(0).str.lower().str.replace(' ', '_') 146 | else: 147 | df.columns = df.columns.str.lower().str.replace(' ', '_') 148 | df["volume"] = df["volume"].replace(0, np.nan).ffill() 149 | 150 | ti = TechnicalIndicators() 151 | ema = ti.EMA(df["close"], 20) 152 | rsi = ti.RSI(df["close"], 14) 153 | macd = ti.MACD(df["close"], 12, 26, 9) 154 | ``` 155 | 156 | Examples: 157 | 158 | ```bash 159 | python examples/run_basic.py --ticker AAPL --period 6mo 160 | python examples/run_channels.py --ticker AAPL --period 6mo 161 | python examples/run_from_csv.py --csv path/to/ohlcv.csv --sep , 162 | ``` 163 | 164 | **Notebooks**: 165 | - `notebooks/01_basic_indicators.ipynb` 166 | - `notebooks/02_channels_and_bands.ipynb` 167 | - `notebooks/03_streaming_demo.ipynb` 168 | 169 | ## 📊 Example Plot 170 | ![Technical Indicator Example](docs/technical_indicator.png) 171 | 172 | ## Supported Indicators 173 | 174 | The library supports 39 indicators (54 series): 175 | - **Single-Series Indicators** (21): 176 | - SMA (Simple Moving Average) 177 | - EMA (Exponential Moving Average) 178 | - RSI (Relative Strength Index) 179 | - ATR (Average True Range) 180 | - MFI (Money Flow Index) 181 | - TRIX 182 | - CCI (Commodity Channel Index) 183 | - ROC (Rate of Change) 184 | - WILLR (Williams %R) 185 | - DEMA (Double Exponential Moving Average) 186 | - KAMA (Kaufman Adaptive Moving Average) 187 | - AO (Awesome Oscillator) 188 | - ULTIMATE_OSCILLATOR 189 | - CMO (Chande Momentum Oscillator) 190 | - DPO (Detrended Price Oscillator) 191 | - MASS_INDEX 192 | - VWAP (Volume Weighted Average Price) 193 | - AD (Accumulation/Distribution Line) 194 | - HULL_MA (Hull Moving Average) 195 | - OBV (On-Balance Volume) 196 | - RVI (Relative Vigor Index) 197 | - **Multi-Series Indicators** (18): 198 | - MACD (MACD, Signal, Histogram) 199 | - BB (Bollinger Bands: BB_Upper, BB_Middle, BB_Lower) 200 | - STOCH (Stochastic Oscillator: K, D) 201 | - ADX (Average Directional Index: ADX, +DI, -DI) 202 | - ICHIMOKU (Tenkan-sen, Kijun-sen, Senkou Span A, Senkou Span B, Chikou Span) 203 | - KELTNER (Keltner Channels: KC_Upper, KC_Middle, KC_Lower) 204 | - DONCHIAN (Donchian Channels: DC_Upper, DC_Middle, DC_Lower) 205 | - AROON (AROON_UP, AROON_DOWN, AROON_OSC) 206 | - VOLUME_INDICATORS (Volume_SMA, Force_Index, VPT) 207 | - PIVOT_POINTS (PP, R1, R2, S1, S2) 208 | - RAINBOW (9 SMAs for periods 2-10) 209 | - BETA 210 | - DI (Directional Indicator: +DI, -DI) 211 | - ADOSC (Chaikin A/D Oscillator) 212 | - HEIKEN_ASHI (HA_Open, HA_High, HA_Low, HA_Close) 213 | - BENFORD_LAW (Observed, Expected) 214 | - MOMENTUM_INDEX (MomentumIndex, NegativeIndex) 215 | - ELDER_RAY (BullPower, BearPower) 216 | 217 | See `indicators.py` for the full list and parameters. 218 | 219 | ## Development 220 | 221 | To contribute: 222 | 1. Fork the repository and create a branch. 223 | 2. Add new indicators in `_indicator_kernels.py` with Numba optimization. 224 | 3. Define public methods in `indicators.py`. 225 | 4. Update tests in `tests/`. 226 | 5. Submit a pull request. 227 | 228 | **Testing**: 229 | ```bash 230 | # Core tests (no network): 231 | pytest -m "not slow" # skips network-dependent tests 232 | 233 | # Full suite including yfinance examples (requires network + optional extras): 234 | pytest 235 | ``` 236 | 237 | **Cleaning**: 238 | Remove generated files: 239 | ```bash 240 | rm -rf quantjourney_ti.egg-info dist build 241 | ``` 242 | 243 | ## Future Work 244 | 245 | - Add more indicators (e.g., PPO, Ichimoku Cloud). 246 | - Enhance plotting with customizable layouts. 247 | - Optimize Numba functions for additional edge cases. 248 | - Support real-time data feeds. 249 | 250 | ## Contact 251 | 252 | For issues or feedback, contact Jakub Polec at [jakub@quantjourney.pro](mailto:jakub@quantjourney.pro) or open an issue on GitHub. 253 | -------------------------------------------------------------------------------- /_tests/test_smoke_basic.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pandas as pd 3 | 4 | from quantjourney_ti import TechnicalIndicators 5 | 6 | 7 | def _make_series(n=100, seed=1): 8 | rng = np.random.default_rng(seed) 9 | idx = pd.date_range("2024-01-01", periods=n, freq="D") 10 | base = np.cumsum(rng.normal(0, 1, n)) + 100 11 | return pd.Series(base, index=idx, name="close") 12 | 13 | 14 | def test_basic_sma_rsi_shapes(): 15 | s = _make_series() 16 | ti = TechnicalIndicators() 17 | 18 | sma = ti.SMA(s, 20) 19 | rsi = ti.RSI(s, 14) 20 | 21 | assert len(sma) == len(s) 22 | assert len(rsi) == len(s) 23 | assert sma.head(19).isna().all() 24 | assert rsi.isna().sum() >= 10 # warmup head should have NaNs 25 | 26 | -------------------------------------------------------------------------------- /_tests/test_yf_demo.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | 4 | @pytest.mark.slow 5 | def test_yf_demo_plot_optional(): 6 | try: 7 | import yfinance as yf # type: ignore 8 | except Exception: 9 | pytest.skip("yfinance not installed; install extra 'yf' to run") 10 | 11 | try: 12 | import matplotlib.pyplot as plt # noqa: F401 13 | except Exception: 14 | pytest.skip("matplotlib not installed; install extra 'plot' to run") 15 | 16 | from quantjourney_ti import TechnicalIndicators 17 | 18 | ti = TechnicalIndicators() 19 | df = yf.download("AAPL", period="3mo", progress=False) 20 | if df.empty: 21 | pytest.skip("no data returned from yfinance") 22 | 23 | df = df.rename(columns={ 24 | "Close": "close", 25 | "Open": "open", 26 | "High": "high", 27 | "Low": "low", 28 | "Volume": "volume", 29 | }) 30 | ind = { 31 | "SMA_20": ti.SMA(df["close"], 20), 32 | "RSI_14": ti.RSI(df["close"], 14), 33 | } 34 | # Plot (optional; test ensures call doesn't raise) 35 | ti.plot_indicators(df, ind, title="AAPL demo", overlay=False) 36 | 37 | -------------------------------------------------------------------------------- /docs/USAGE.md: -------------------------------------------------------------------------------- 1 | # Usage Guide 2 | 3 | ## Installation 4 | 5 | Base installation: 6 | 7 | ``` 8 | pip install . 9 | ``` 10 | 11 | Optional extras: 12 | 13 | ``` 14 | pip install .[yf] # yfinance integration 15 | pip install .[plot] # matplotlib plotting 16 | ``` 17 | 18 | ## Basics 19 | 20 | ```python 21 | import pandas as pd, numpy as np 22 | from quantjourney_ti import TechnicalIndicators 23 | 24 | idx = pd.date_range('2024-01-01', periods=200, freq='D') 25 | close = pd.Series(np.cumsum(np.random.normal(0,1,200)) + 100, index=idx) 26 | ti = TechnicalIndicators() # or TechnicalIndicators(warmup=True) 27 | 28 | sma = ti.SMA(close, 20) 29 | ema = ti.EMA(close, 20) 30 | rsi = ti.RSI(close, 14) 31 | macd = ti.MACD(close, 12, 26, 9) 32 | ``` 33 | 34 | ## Channels and Bands 35 | 36 | ```python 37 | high = close + np.abs(np.random.normal(0.5,0.2,200)) 38 | low = close - np.abs(np.random.normal(0.5,0.2,200)) 39 | df = pd.DataFrame({'high':high,'low':low,'close':close}) 40 | 41 | bb = ti.BB(close, 20, 2.0) 42 | kc = ti.KELTNER(df[['high','low','close']], 20, 10, 2.0) 43 | dc = ti.DONCHIAN(df[['high','low']], 20) 44 | ``` 45 | 46 | ## Streaming 47 | 48 | ```python 49 | from quantjourney_ti._streaming import StreamingIndicators 50 | 51 | si = StreamingIndicators(max_buffer_size=1000) 52 | for ts, val in close.items(): 53 | si.update_tick('SYM', ts, close=float(val)) 54 | 55 | last_sma = si.states['SYM'].last_values['sma_20'] 56 | ``` 57 | 58 | ## Logging Queue (opt-in) 59 | 60 | ```python 61 | from quantjourney_ti import start_logging_queue 62 | start_logging_queue() 63 | ``` 64 | 65 | -------------------------------------------------------------------------------- /docs/technical_indicator.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantJourneyOrg/qj_technical_indicators/2b944b9e281d4166a34392e2bc25677cf1a36f6b/docs/technical_indicator.png -------------------------------------------------------------------------------- /examples/example_basic.py: -------------------------------------------------------------------------------- 1 | """ 2 | Technical Indicators Demo Script 3 | ================================ 4 | 5 | This script demonstrates the usage of the Technical Indicators Library by fetching AAPL data from 6 | yfinance and calculating 20 top-used technical indicators, saving individual plots for each. 7 | It serves as an example of how to use the library, which is available at: 8 | https://github.com/QuantJourneyOrg/qj_technical_indicators 9 | 10 | License: MIT License - see LICENSE.md for details. 11 | 12 | For questions or feedback, contact Jakub at jakub@quantjourney.pro. 13 | 14 | Last Updated: June 23, 2025 15 | """ 16 | 17 | import numpy as np 18 | import pandas as pd 19 | import yfinance as yf 20 | from quantjourney_ti import TechnicalIndicators 21 | from quantjourney_ti._utils import plot_indicators 22 | import os 23 | 24 | def fetch_data(ticker="AAPL", start="2024-01-01", end="2025-02-01"): 25 | """Fetch data from yfinance.""" 26 | df = yf.download(ticker, start=start, end=end, progress=False, auto_adjust=False) 27 | if df.empty: 28 | raise ValueError("Empty DataFrame") 29 | # Flatten yfinance MultiIndex 30 | if isinstance(df.columns, pd.MultiIndex): 31 | df.columns = df.columns.get_level_values(0).str.lower().str.replace(' ', '_') 32 | df.index.name = df.index.name.lower() if df.index.name else 'date' 33 | df["volume"] = df["volume"].replace(0, np.nan).ffill() # Fix FutureWarning 34 | return df 35 | 36 | def calculate_indicators(df): 37 | """Calculate 20 top-used technical indicators.""" 38 | ti = TechnicalIndicators() 39 | indicators = [ 40 | ("SMA", lambda: ti.SMA(df["close"], period=14)), 41 | ("EMA", lambda: ti.EMA(df["close"], period=14)), 42 | ("RSI", lambda: ti.RSI(df["close"], period=14)), 43 | ("WILLR", lambda: ti.WILLR(df[["high", "low", "close"]], period=14)), 44 | ("MFI", lambda: ti.MFI(df[["high", "low", "close", "volume"]], period=14)), 45 | ("MomentumIndex", lambda: ti.MOMENTUM_INDEX(df["close"], period=14)["MomentumIndex"]), 46 | ("RVI", lambda: ti.RVI(df[["open", "high", "low", "close"]], period=14)), 47 | ("AD", lambda: ti.AD(df[["high", "low", "close", "volume"]])), 48 | ("ADOSC", lambda: ti.ADOSC(df[["high", "low", "close", "volume"]], fast_period=3, slow_period=10)), 49 | ("ElderRay_Bull", lambda: ti.ELDER_RAY(df[["high", "low", "close"]], period=14)["BullPower"]), 50 | ("MACD", lambda: ti.MACD(df["close"], fast_period=12, slow_period=26, signal_period=9)["MACD"]), 51 | ("BB_Middle", lambda: ti.BB(df["close"], period=20, num_std=2.0)["BB_Middle"]), 52 | ("ATR", lambda: ti.ATR(df[["high", "low", "close"]], period=14)), 53 | ("STOCH_K", lambda: ti.STOCH(df[["high", "low", "close"]], k_period=14, d_period=3)["K"]), 54 | ("CCI", lambda: ti.CCI(df[["high", "low", "close"]], period=20)), 55 | ("ROC", lambda: ti.ROC(df["close"], period=12)), 56 | ("OBV", lambda: ti.OBV(df[["close", "volume"]])), 57 | ("VWAP", lambda: ti.VWAP(df[["high", "low", "close", "volume"]], period=14)), 58 | ("DEMA", lambda: ti.DEMA(df["close"], period=14)), 59 | ("KAMA", lambda: ti.KAMA(df["close"], er_period=10)), 60 | ] 61 | results = {} 62 | for name, func in indicators: 63 | print(f"Calculating {name}...") 64 | try: 65 | result = func() 66 | results[name] = result 67 | print(f"{name} sample (last 5):\n{result.tail(5)}\n") 68 | except Exception as e: 69 | print(f"Failed to calculate {name}: {str(e)}") 70 | return results 71 | 72 | def plot_results(df, indicators): 73 | """Save individual plots for each indicator.""" 74 | print("Saving indicator plots...") 75 | try: 76 | os.makedirs("indicator_plots", exist_ok=True) 77 | for name, result in indicators.items(): 78 | if isinstance(result, pd.Series): 79 | plot_indicators_dict = {name: result} 80 | elif isinstance(result, pd.DataFrame): 81 | plot_indicators_dict = {name: result.iloc[:, 0]} 82 | else: 83 | print(f"Skipping {name}: not a Series or DataFrame") 84 | continue 85 | plot_indicators( 86 | df, 87 | plot_indicators_dict, 88 | title=f"{name} Indicator", 89 | price_col="close", 90 | save_path=f"indicator_plots/{name}_plot.png" 91 | ) 92 | print(f"Saved plot for {name} to indicator_plots/{name}_plot.png") 93 | except Exception as e: 94 | print(f"Plotting failed: {str(e)}") 95 | 96 | def main(): 97 | """Main function to run the demo.""" 98 | try: 99 | df = fetch_data() 100 | indicators = calculate_indicators(df) 101 | plot_results(df, indicators) 102 | print("Demo completed successfully") 103 | for name, result in indicators.items(): 104 | try: 105 | if isinstance(result, pd.Series): 106 | last_val = result.dropna() 107 | if not last_val.empty: 108 | print(f"Last {name}: {last_val.iloc[-1]:.2f}") 109 | else: 110 | print(f"{name} has only NaN values.") 111 | elif isinstance(result, pd.DataFrame): 112 | last_row = result.dropna() 113 | if not last_row.empty: 114 | print(f"Last {name}:\n{last_row.iloc[-1]}") 115 | else: 116 | print(f"{name} DataFrame is empty after dropping NaNs.") 117 | except Exception as e: 118 | print(f"Error reporting {name}: {e}") 119 | except Exception as e: 120 | print(f"Demo failed: {str(e)}") 121 | raise 122 | 123 | if __name__ == "__main__": 124 | main() -------------------------------------------------------------------------------- /examples/example_indicators.py: -------------------------------------------------------------------------------- 1 | """ 2 | QuantJourney Technical-Indicators Example Script 3 | ============================================= 4 | 5 | This script demonstrates the usage of the Technical Indicators Library by fetching AAPL data from 6 | yfinance and calculating various technical indicators with timing measurements. It serves as an 7 | example of how to use the library, which is available at: 8 | https://github.com/QuantJourneyOrg/qj_technical_indicators 9 | 10 | License: MIT License - see LICENSE.md for details. 11 | """ 12 | 13 | import logging 14 | import os 15 | import numpy as np 16 | import pandas as pd 17 | import yfinance as yf 18 | 19 | from quantjourney_ti import TechnicalIndicators, validate_data 20 | from quantjourney_ti._decorators import timer 21 | from quantjourney_ti._utils import plot_indicators 22 | # ------------------------------------------------------------------------------ 23 | # Logging setup (clean and conflict-free) 24 | # ------------------------------------------------------------------------------ 25 | logger = logging.getLogger(__name__) 26 | logger.setLevel(logging.INFO) 27 | logger.handlers = [] # Clear old handlers 28 | 29 | handler = logging.StreamHandler() 30 | formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') 31 | handler.setFormatter(formatter) 32 | logger.addHandler(handler) 33 | 34 | # ------------------------------------------------------------------------------ 35 | # Data fetching 36 | # ------------------------------------------------------------------------------ 37 | 38 | @timer 39 | def fetch_data(ticker="AAPL", start="2024-01-01", end="2025-02-01"): 40 | logger.info(f"Fetching {ticker} data from yfinance...") 41 | try: 42 | df = yf.download(ticker, start=start, end=end, progress=False, auto_adjust=False) 43 | except Exception as e: 44 | logger.error(f"Failed to fetch data: {str(e)}") 45 | raise 46 | 47 | if df.empty: 48 | logger.error("No data returned from yfinance") 49 | raise ValueError("Empty DataFrame") 50 | 51 | print("=== RAW df.columns ===") 52 | print(df.columns) 53 | print("=== RAW df.head() ===") 54 | print(df.head()) 55 | 56 | # Flatten yfinance MultiIndex 57 | if isinstance(df.columns, pd.MultiIndex): 58 | df.columns = df.columns.get_level_values(0).str.lower().str.replace(' ', '_') 59 | else: 60 | df.columns = df.columns.str.lower().str.replace(' ', '_') 61 | 62 | df = df.rename(columns={"adj close": "adj_close"}) 63 | 64 | # Pre-clean volume for MFI 65 | if "volume" in df.columns: 66 | df["volume"] = df["volume"].replace(0, np.nan).ffill() 67 | 68 | logger.info(f"DataFrame columns: {df.columns.tolist()}") 69 | 70 | required_columns = ["open", "high", "low", "close", "adj_close", "volume"] 71 | missing = [col for col in required_columns if col not in df.columns] 72 | if missing: 73 | logger.error(f"Missing required columns: {missing}") 74 | raise ValueError(f"Missing columns: {missing}") 75 | 76 | try: 77 | validate_data(df, required_columns) 78 | except Exception as e: 79 | logger.error(f"Data validation failed: {str(e)}") 80 | raise 81 | 82 | return df 83 | 84 | # ------------------------------------------------------------------------------ 85 | # Indicator calculation 86 | # ------------------------------------------------------------------------------ 87 | 88 | @timer 89 | def calculate_indicators(df): 90 | ti = TechnicalIndicators() 91 | indicators = [ 92 | ("SMA", lambda: ti.SMA(df["close"], period=14)), 93 | ("EMA", lambda: ti.EMA(df["close"], period=14)), 94 | ("RSI", lambda: ti.RSI(df["close"], period=14)), 95 | ("WILLR", lambda: ti.WILLR(df[["high", "low", "close"]], period=14)), 96 | ("MFI", lambda: ti.MFI(df[["high", "low", "close", "volume"]], period=14)), 97 | ("ElderRay", lambda: ti.ELDER_RAY(df[["high", "low", "close"]], period=14)), 98 | ] 99 | 100 | results = {} 101 | for name, func in indicators: 102 | logger.info(f"Calculating {name}...") 103 | try: 104 | result = func() 105 | results[name] = result 106 | logger.info(f"{name} sample (last 5):\n{result.tail(5)}\n") 107 | except Exception as e: 108 | logger.error(f"Failed to calculate {name}: {str(e)}") 109 | return results 110 | 111 | # ------------------------------------------------------------------------------ 112 | # Plotting 113 | # ------------------------------------------------------------------------------ 114 | 115 | @timer 116 | def plot_results(df, indicators): 117 | logger.info("Plotting indicators...") 118 | try: 119 | plot_indicators(df, indicators, price_col="close") 120 | except Exception as e: 121 | logger.error(f"Plotting failed: {str(e)}") 122 | 123 | 124 | # ------------------------------------------------------------------------------ 125 | # Main 126 | # ------------------------------------------------------------------------------ 127 | 128 | def main(): 129 | try: 130 | df = fetch_data() 131 | print(f"DataFrame shape: {df.shape}") 132 | print(f"DataFrame tail: \n{df.tail(15)}") 133 | indicators = calculate_indicators(df) 134 | plot_results(df, indicators) 135 | 136 | logger.info("Demo completed successfully") 137 | for name, result in indicators.items(): 138 | try: 139 | if isinstance(result, pd.Series): 140 | last_val = result.dropna() 141 | if not last_val.empty: 142 | logger.info(f"Last {name}: {last_val.iloc[-1]:.2f}") 143 | else: 144 | logger.warning(f"{name} has only NaN values.") 145 | elif isinstance(result, pd.DataFrame): 146 | last_row = result.dropna() 147 | if not last_row.empty: 148 | logger.info(f"Last {name}:\n{last_row.iloc[-1]}") 149 | else: 150 | logger.warning(f"{name} DataFrame is empty after dropping NaNs.") 151 | except Exception as e: 152 | logger.error(f"Error reporting {name}: {e}") 153 | except Exception as e: 154 | logger.error(f"Demo failed: {str(e)}") 155 | raise 156 | 157 | if __name__ == "__main__": 158 | main() 159 | -------------------------------------------------------------------------------- /examples/hedge_fund_example.py: -------------------------------------------------------------------------------- 1 | """ 2 | Hedge Fund Production Example 3 | ============================ 4 | 5 | This example demonstrates the enhanced features for hedge fund production use: 6 | - Risk metrics calculation 7 | - Streaming data processing 8 | - Performance optimization 9 | - Multi-asset batch processing 10 | - Memory management 11 | 12 | Author: Jakub Polec 13 | License: MIT 14 | """ 15 | 16 | import numpy as np 17 | import pandas as pd 18 | import yfinance as yf 19 | from datetime import datetime, timedelta 20 | import time 21 | 22 | # Import the enhanced library 23 | import quantjourney_ti as qti 24 | from quantjourney_ti import ( 25 | TechnicalIndicators, 26 | StreamingIndicators, 27 | calculate_risk_metrics, 28 | get_performance_stats, 29 | get_cache_stats, 30 | MemoryManager, 31 | BatchProcessor 32 | ) 33 | 34 | def fetch_portfolio_data(symbols=['AAPL', 'GOOGL', 'MSFT', 'TSLA', 'NVDA'], period='2y'): 35 | """Fetch data for multiple symbols.""" 36 | print(f"Fetching data for {len(symbols)} symbols...") 37 | data_dict = {} 38 | 39 | for symbol in symbols: 40 | try: 41 | df = yf.download(symbol, period=period, progress=False) 42 | if not df.empty: 43 | # Clean column names 44 | df.columns = df.columns.str.lower().str.replace(' ', '_') 45 | # Handle volume zeros 46 | df['volume'] = df['volume'].replace(0, np.nan).ffill() 47 | data_dict[symbol] = df 48 | print(f"✓ {symbol}: {len(df)} rows") 49 | else: 50 | print(f"✗ {symbol}: No data") 51 | except Exception as e: 52 | print(f"✗ {symbol}: Error - {e}") 53 | 54 | return data_dict 55 | 56 | def demonstrate_risk_metrics(): 57 | """Demonstrate comprehensive risk metrics calculation.""" 58 | print("\n" + "="*60) 59 | print("RISK METRICS DEMONSTRATION") 60 | print("="*60) 61 | 62 | # Fetch sample data 63 | aapl = yf.download('AAPL', period='2y', progress=False) 64 | spy = yf.download('SPY', period='2y', progress=False) # Benchmark 65 | 66 | # Clean data 67 | aapl.columns = aapl.columns.str.lower().str.replace(' ', '_') 68 | spy.columns = spy.columns.str.lower().str.replace(' ', '_') 69 | 70 | # Calculate comprehensive risk metrics 71 | print("Calculating risk metrics for AAPL vs SPY benchmark...") 72 | 73 | ti = TechnicalIndicators() 74 | risk_metrics = ti.RISK_METRICS( 75 | data=aapl['close'], 76 | benchmark=spy['close'], 77 | risk_free_rate=0.05, # 5% risk-free rate 78 | periods_per_year=252, 79 | confidence_level=0.05 80 | ) 81 | 82 | print("\nRisk Metrics Results:") 83 | print("-" * 40) 84 | for metric, value in risk_metrics.items(): 85 | if isinstance(value, (int, float)): 86 | if 'ratio' in metric.lower() or 'return' in metric.lower(): 87 | print(f"{metric:.<25} {value:>8.3f}") 88 | elif 'drawdown' in metric.lower() or 'var' in metric.lower(): 89 | print(f"{metric:.<25} {value:>8.2%}") 90 | else: 91 | print(f"{metric:.<25} {value:>8.4f}") 92 | else: 93 | print(f"{metric:.<25} {str(value):>15}") 94 | 95 | def demonstrate_streaming_indicators(): 96 | """Demonstrate real-time streaming indicators.""" 97 | print("\n" + "="*60) 98 | print("STREAMING INDICATORS DEMONSTRATION") 99 | print("="*60) 100 | 101 | # Get historical data for simulation 102 | df = yf.download('AAPL', period='5d', interval='1m', progress=False) 103 | df.columns = df.columns.str.lower().str.replace(' ', '_') 104 | 105 | # Create streaming setup 106 | streaming_indicators, data_feed = qti.create_streaming_setup(df, symbol='AAPL') 107 | 108 | # Set up callback to track updates 109 | results = [] 110 | 111 | def indicator_callback(symbol, value, timestamp): 112 | results.append({ 113 | 'timestamp': timestamp, 114 | 'symbol': symbol, 115 | 'value': value 116 | }) 117 | 118 | # Register callbacks for key indicators 119 | streaming_indicators.register_callback('AAPL_sma_20', indicator_callback) 120 | streaming_indicators.register_callback('AAPL_rsi_14', indicator_callback) 121 | 122 | print("Starting streaming simulation (processing 100 ticks)...") 123 | 124 | # Simulate streaming data 125 | for i, (timestamp, row) in enumerate(df.head(100).iterrows()): 126 | indicators = streaming_indicators.update_tick( 127 | symbol='AAPL', 128 | timestamp=timestamp, 129 | open_price=row['open'], 130 | high=row['high'], 131 | low=row['low'], 132 | close=row['close'], 133 | volume=row['volume'] 134 | ) 135 | 136 | if i % 20 == 0: # Print every 20th update 137 | print(f"Tick {i:3d}: SMA={indicators.get('sma_20', np.nan):7.2f}, " 138 | f"RSI={indicators.get('rsi_14', np.nan):6.2f}, " 139 | f"MACD={indicators.get('macd', np.nan):7.4f}") 140 | 141 | print(f"\nStreaming complete. Processed {len(results)} indicator updates.") 142 | 143 | # Get final values 144 | final_values = streaming_indicators.get_current_values('AAPL') 145 | print("\nFinal Indicator Values:") 146 | for indicator, value in final_values.items(): 147 | if not np.isnan(value): 148 | print(f" {indicator}: {value:.4f}") 149 | 150 | def demonstrate_batch_processing(): 151 | """Demonstrate efficient multi-asset processing.""" 152 | print("\n" + "="*60) 153 | print("BATCH PROCESSING DEMONSTRATION") 154 | print("="*60) 155 | 156 | # Fetch portfolio data 157 | symbols = ['AAPL', 'GOOGL', 'MSFT', 'TSLA', 'NVDA'] 158 | data_dict = fetch_portfolio_data(symbols, period='1y') 159 | 160 | if not data_dict: 161 | print("No data available for batch processing demo") 162 | return 163 | 164 | ti = TechnicalIndicators() 165 | 166 | # Demonstrate batch RSI calculation 167 | print(f"\nCalculating RSI for {len(data_dict)} symbols...") 168 | start_time = time.time() 169 | 170 | rsi_results = ti.batch_calculate( 171 | data_dict=data_dict, 172 | indicator_name='RSI', 173 | period=14 174 | ) 175 | 176 | batch_time = time.time() - start_time 177 | print(f"Batch processing completed in {batch_time:.2f} seconds") 178 | 179 | # Show results 180 | print("\nRSI Results (last 5 values):") 181 | for symbol, rsi_series in rsi_results.items(): 182 | if rsi_series is not None: 183 | last_values = rsi_series.dropna().tail(5) 184 | print(f"{symbol}: {last_values.iloc[-1]:.2f} (avg: {last_values.mean():.2f})") 185 | 186 | def demonstrate_performance_optimization(): 187 | """Demonstrate performance monitoring and caching.""" 188 | print("\n" + "="*60) 189 | print("PERFORMANCE OPTIMIZATION DEMONSTRATION") 190 | print("="*60) 191 | 192 | # Get sample data 193 | df = yf.download('AAPL', period='2y', progress=False) 194 | df.columns = df.columns.str.lower().str.replace(' ', '_') 195 | 196 | ti = TechnicalIndicators() 197 | 198 | # First calculation (no cache) 199 | print("First SMA calculation (no cache)...") 200 | start_time = time.time() 201 | sma1 = ti.SMA(df['close'], period=20) 202 | first_time = time.time() - start_time 203 | 204 | # Second calculation (should use cache) 205 | print("Second SMA calculation (with cache)...") 206 | start_time = time.time() 207 | sma2 = ti.SMA(df['close'], period=20) 208 | second_time = time.time() - start_time 209 | 210 | print(f"First calculation: {first_time:.4f} seconds") 211 | print(f"Second calculation: {second_time:.4f} seconds") 212 | print(f"Speedup: {first_time/second_time:.1f}x") 213 | 214 | # Show cache statistics 215 | cache_stats = qti.get_cache_stats() 216 | print(f"\nCache Statistics:") 217 | print(f" Hit Rate: {cache_stats['hit_rate']:.1%}") 218 | print(f" Cache Size: {cache_stats['size']}/{cache_stats['max_size']}") 219 | 220 | # Show performance statistics 221 | perf_stats = qti.get_performance_stats('SMA') 222 | if perf_stats: 223 | print(f"\nPerformance Statistics for SMA:") 224 | print(f" Average execution time: {perf_stats['avg_execution_time']:.4f}s") 225 | print(f" Cache hit rate: {perf_stats['cache_hit_rate']:.1%}") 226 | print(f" Total calls: {perf_stats['count']}") 227 | 228 | def demonstrate_memory_management(): 229 | """Demonstrate memory optimization features.""" 230 | print("\n" + "="*60) 231 | print("MEMORY MANAGEMENT DEMONSTRATION") 232 | print("="*60) 233 | 234 | # Create large dataset 235 | print("Creating large dataset for memory optimization demo...") 236 | dates = pd.date_range('2020-01-01', '2024-01-01', freq='1min') 237 | large_df = pd.DataFrame({ 238 | 'open': np.random.randn(len(dates)).cumsum() + 100, 239 | 'high': np.random.randn(len(dates)).cumsum() + 102, 240 | 'low': np.random.randn(len(dates)).cumsum() + 98, 241 | 'close': np.random.randn(len(dates)).cumsum() + 100, 242 | 'volume': np.random.randint(1000, 10000, len(dates)) 243 | }, index=dates) 244 | 245 | # Show original memory usage 246 | original_memory = MemoryManager.get_memory_usage(large_df) 247 | print(f"Original memory usage: {original_memory['total_mb']:.2f} MB") 248 | 249 | # Optimize memory 250 | optimized_df = MemoryManager.optimize_dataframe(large_df, aggressive=False) 251 | optimized_memory = MemoryManager.get_memory_usage(optimized_df) 252 | 253 | print(f"Optimized memory usage: {optimized_memory['total_mb']:.2f} MB") 254 | print(f"Memory savings: {(1 - optimized_memory['total_mb']/original_memory['total_mb']):.1%}") 255 | 256 | # Show system resources 257 | resources = qti.get_system_resources() 258 | print(f"\nSystem Resources:") 259 | print(f" CPU Usage: {resources['cpu_percent']:.1f}%") 260 | print(f" Memory Usage: {resources['memory_percent']:.1f}%") 261 | print(f" Available Memory: {resources['memory_available_gb']:.1f} GB") 262 | print(f" Process Memory: {resources['process_memory_mb']:.1f} MB") 263 | 264 | def demonstrate_market_data_validation(): 265 | """Demonstrate enhanced market data validation.""" 266 | print("\n" + "="*60) 267 | print("MARKET DATA VALIDATION DEMONSTRATION") 268 | print("="*60) 269 | 270 | # Create problematic market data 271 | dates = pd.date_range('2024-01-01', '2024-01-10', freq='D') 272 | problematic_df = pd.DataFrame({ 273 | 'open': [100, 101, np.nan, 103, 104, 105, np.nan, 107, 108, 109], 274 | 'high': [102, 103, 104, 105, 106, 107, 108, 109, 110, 111], 275 | 'low': [99, 100, 101, 102, 103, 104, 105, 106, 107, 108], 276 | 'close': [101, 102, np.nan, 104, 105, 106, np.nan, 108, 109, 110], 277 | 'volume': [1000, 0, 1200, 0, 1400, 1500, 1600, 0, 1800, 1900] # Zero volumes 278 | }, index=dates) 279 | 280 | print("Original data issues:") 281 | print(f" NaN values: {problematic_df.isnull().sum().sum()}") 282 | print(f" Zero volumes: {(problematic_df['volume'] == 0).sum()}") 283 | 284 | ti = TechnicalIndicators() 285 | 286 | # Validate and fix data 287 | fixed_df = ti.validate_market_data( 288 | problematic_df, 289 | allow_gaps=True, 290 | fix_common_issues=True 291 | ) 292 | 293 | print("\nAfter validation and fixing:") 294 | print(f" NaN values: {fixed_df.isnull().sum().sum()}") 295 | print(f" Zero volumes: {(fixed_df['volume'] == 0).sum()}") 296 | print("✓ Data validation and fixing completed successfully") 297 | 298 | def main(): 299 | """Run all demonstrations.""" 300 | print("QUANTJOURNEY TECHNICAL INDICATORS") 301 | print("Hedge Fund Production Features Demo") 302 | print("=" * 60) 303 | 304 | try: 305 | # Run all demonstrations 306 | demonstrate_risk_metrics() 307 | demonstrate_streaming_indicators() 308 | demonstrate_batch_processing() 309 | demonstrate_performance_optimization() 310 | demonstrate_memory_management() 311 | demonstrate_market_data_validation() 312 | 313 | print("\n" + "="*60) 314 | print("ALL DEMONSTRATIONS COMPLETED SUCCESSFULLY!") 315 | print("="*60) 316 | 317 | # Final performance summary 318 | print("\nFinal Performance Summary:") 319 | cache_stats = qti.get_cache_stats() 320 | print(f" Total cache hits: {cache_stats['hit_count']}") 321 | print(f" Overall hit rate: {cache_stats['hit_rate']:.1%}") 322 | 323 | # Clear cache for clean exit 324 | qti.clear_indicator_cache() 325 | print(" Cache cleared for clean exit") 326 | 327 | except Exception as e: 328 | print(f"\nError during demonstration: {e}") 329 | import traceback 330 | traceback.print_exc() 331 | 332 | if __name__ == "__main__": 333 | main() -------------------------------------------------------------------------------- /examples/run_basic.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """ 3 | Run basic indicators for a ticker (requires extra 'yf'). 4 | 5 | Usage: 6 | python examples/run_basic.py --ticker AAPL --period 6mo 7 | python examples/run_basic.py --ticker MSFT --start 2024-01-01 --end 2024-06-30 8 | """ 9 | 10 | import argparse 11 | import sys 12 | from typing import Optional 13 | 14 | import pandas as pd 15 | 16 | try: 17 | import yfinance as yf # type: ignore 18 | except Exception: 19 | print("yfinance not installed. Install with: pip install .[yf]", file=sys.stderr) 20 | sys.exit(1) 21 | 22 | from quantjourney_ti import TechnicalIndicators 23 | 24 | 25 | def fetch_yf(ticker: str, start: Optional[str], end: Optional[str], period: Optional[str]) -> pd.DataFrame: 26 | if period: 27 | df = yf.download(ticker, period=period, progress=False) 28 | else: 29 | df = yf.download(ticker, start=start, end=end, progress=False) 30 | if df.empty: 31 | raise SystemExit("No data returned from yfinance") 32 | # Normalize columns 33 | cols = {"Open": "open", "High": "high", "Low": "low", "Close": "close", "Adj Close": "adj_close", "Volume": "volume"} 34 | df = df.rename(columns=cols) 35 | return df 36 | 37 | 38 | def main(): 39 | ap = argparse.ArgumentParser(description="Compute basic indicators for a ticker") 40 | ap.add_argument("--ticker", required=True) 41 | ap.add_argument("--start", default=None) 42 | ap.add_argument("--end", default=None) 43 | ap.add_argument("--period", default="6mo", help="yfinance period, e.g., 1mo, 6mo, 1y") 44 | args = ap.parse_args() 45 | 46 | df = fetch_yf(args.ticker, args.start, args.end, args.period) 47 | ti = TechnicalIndicators() 48 | 49 | out = {} 50 | out["SMA_20"] = ti.SMA(df["close"], 20) 51 | out["EMA_20"] = ti.EMA(df["close"], 20) 52 | out["RSI_14"] = ti.RSI(df["close"], 14) 53 | macd = ti.MACD(df["close"], 12, 26, 9) 54 | 55 | print(f"=== {args.ticker} ===") 56 | print("SMA_20 tail:\n", out["SMA_20"].tail()) 57 | print("EMA_20 tail:\n", out["EMA_20"].tail()) 58 | print("RSI_14 tail:\n", out["RSI_14"].tail()) 59 | print("MACD tail:\n", macd.tail()) 60 | 61 | 62 | if __name__ == "__main__": 63 | main() 64 | 65 | -------------------------------------------------------------------------------- /examples/run_channels.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """ 3 | Compute channels/bands for a ticker (BB, Keltner, Donchian). 4 | Requires extra 'yf'. 5 | 6 | Usage: 7 | python examples/run_channels.py --ticker AAPL --period 6mo 8 | """ 9 | 10 | import argparse 11 | import sys 12 | import pandas as pd 13 | 14 | try: 15 | import yfinance as yf # type: ignore 16 | except Exception: 17 | print("yfinance not installed. Install with: pip install .[yf]", file=sys.stderr) 18 | sys.exit(1) 19 | 20 | from quantjourney_ti import TechnicalIndicators 21 | 22 | 23 | def normalize(df: pd.DataFrame) -> pd.DataFrame: 24 | cols = {"Open": "open", "High": "high", "Low": "low", "Close": "close", "Adj Close": "adj_close", "Volume": "volume"} 25 | return df.rename(columns=cols) 26 | 27 | 28 | def main(): 29 | ap = argparse.ArgumentParser(description="Compute bands/channels") 30 | ap.add_argument("--ticker", required=True) 31 | ap.add_argument("--period", default="6mo") 32 | args = ap.parse_args() 33 | 34 | df = yf.download(args.ticker, period=args.period, progress=False) 35 | if df.empty: 36 | raise SystemExit("No data returned from yfinance") 37 | df = normalize(df) 38 | ti = TechnicalIndicators() 39 | 40 | bb = ti.BB(df["close"], 20, 2.0) 41 | kc = ti.KELTNER(df[["high", "low", "close"]], 20, 10, 2.0) 42 | dc = ti.DONCHIAN(df[["high", "low"]], 20) 43 | 44 | print("BB tail:\n", bb.tail()) 45 | print("Keltner tail:\n", kc.tail()) 46 | print("Donchian tail:\n", dc.tail()) 47 | 48 | 49 | if __name__ == "__main__": 50 | main() 51 | 52 | -------------------------------------------------------------------------------- /examples/run_from_csv.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """ 3 | Load OHLCV data from CSV and compute selected indicators. 4 | CSV must contain columns: open, high, low, close, volume (case-insensitive). 5 | 6 | Usage: 7 | python examples/run_from_csv.py --csv path/to/data.csv --sep , 8 | """ 9 | 10 | import argparse 11 | import sys 12 | import pandas as pd 13 | 14 | from quantjourney_ti import TechnicalIndicators 15 | 16 | 17 | def load_csv(path: str, sep: str) -> pd.DataFrame: 18 | df = pd.read_csv(path, sep=sep) 19 | # normalize column names 20 | df.columns = [c.strip().lower() for c in df.columns] 21 | required = {"open", "high", "low", "close"} 22 | if not required.issubset(set(df.columns)): 23 | raise SystemExit(f"CSV must include columns: {required}") 24 | # parse date if present 25 | for c in ("date", "datetime", "timestamp"): 26 | if c in df.columns: 27 | df[c] = pd.to_datetime(df[c]) 28 | df = df.set_index(c) 29 | break 30 | return df 31 | 32 | 33 | def main(): 34 | ap = argparse.ArgumentParser(description="Compute indicators from CSV") 35 | ap.add_argument("--csv", required=True, help="Path to CSV file") 36 | ap.add_argument("--sep", default=",", help="CSV separator") 37 | args = ap.parse_args() 38 | 39 | df = load_csv(args.csv, args.sep) 40 | ti = TechnicalIndicators() 41 | 42 | sma = ti.SMA(df["close"], 20) 43 | ema = ti.EMA(df["close"], 20) 44 | atr = ti.ATR(df[["high", "low", "close"]], 14) 45 | print("SMA_20 tail:\n", sma.tail()) 46 | print("EMA_20 tail:\n", ema.tail()) 47 | print("ATR_14 tail:\n", atr.tail() if isinstance(atr, pd.Series) else atr.iloc[:, 0].tail()) 48 | 49 | 50 | if __name__ == "__main__": 51 | main() 52 | 53 | -------------------------------------------------------------------------------- /indicator_plots/ADOSC_plot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantJourneyOrg/qj_technical_indicators/2b944b9e281d4166a34392e2bc25677cf1a36f6b/indicator_plots/ADOSC_plot.png -------------------------------------------------------------------------------- /indicator_plots/AD_plot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantJourneyOrg/qj_technical_indicators/2b944b9e281d4166a34392e2bc25677cf1a36f6b/indicator_plots/AD_plot.png -------------------------------------------------------------------------------- /indicator_plots/ATR_plot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantJourneyOrg/qj_technical_indicators/2b944b9e281d4166a34392e2bc25677cf1a36f6b/indicator_plots/ATR_plot.png -------------------------------------------------------------------------------- /indicator_plots/BB_Middle_plot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantJourneyOrg/qj_technical_indicators/2b944b9e281d4166a34392e2bc25677cf1a36f6b/indicator_plots/BB_Middle_plot.png -------------------------------------------------------------------------------- /indicator_plots/CCI_plot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantJourneyOrg/qj_technical_indicators/2b944b9e281d4166a34392e2bc25677cf1a36f6b/indicator_plots/CCI_plot.png -------------------------------------------------------------------------------- /indicator_plots/DEMA_plot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantJourneyOrg/qj_technical_indicators/2b944b9e281d4166a34392e2bc25677cf1a36f6b/indicator_plots/DEMA_plot.png -------------------------------------------------------------------------------- /indicator_plots/EMA_plot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantJourneyOrg/qj_technical_indicators/2b944b9e281d4166a34392e2bc25677cf1a36f6b/indicator_plots/EMA_plot.png -------------------------------------------------------------------------------- /indicator_plots/ElderRay_Bull_plot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantJourneyOrg/qj_technical_indicators/2b944b9e281d4166a34392e2bc25677cf1a36f6b/indicator_plots/ElderRay_Bull_plot.png -------------------------------------------------------------------------------- /indicator_plots/KAMA_plot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantJourneyOrg/qj_technical_indicators/2b944b9e281d4166a34392e2bc25677cf1a36f6b/indicator_plots/KAMA_plot.png -------------------------------------------------------------------------------- /indicator_plots/MACD_plot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantJourneyOrg/qj_technical_indicators/2b944b9e281d4166a34392e2bc25677cf1a36f6b/indicator_plots/MACD_plot.png -------------------------------------------------------------------------------- /indicator_plots/MFI_plot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantJourneyOrg/qj_technical_indicators/2b944b9e281d4166a34392e2bc25677cf1a36f6b/indicator_plots/MFI_plot.png -------------------------------------------------------------------------------- /indicator_plots/MomentumIndex_plot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantJourneyOrg/qj_technical_indicators/2b944b9e281d4166a34392e2bc25677cf1a36f6b/indicator_plots/MomentumIndex_plot.png -------------------------------------------------------------------------------- /indicator_plots/OBV_plot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantJourneyOrg/qj_technical_indicators/2b944b9e281d4166a34392e2bc25677cf1a36f6b/indicator_plots/OBV_plot.png -------------------------------------------------------------------------------- /indicator_plots/ROC_plot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantJourneyOrg/qj_technical_indicators/2b944b9e281d4166a34392e2bc25677cf1a36f6b/indicator_plots/ROC_plot.png -------------------------------------------------------------------------------- /indicator_plots/RSI_plot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantJourneyOrg/qj_technical_indicators/2b944b9e281d4166a34392e2bc25677cf1a36f6b/indicator_plots/RSI_plot.png -------------------------------------------------------------------------------- /indicator_plots/RVI_plot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantJourneyOrg/qj_technical_indicators/2b944b9e281d4166a34392e2bc25677cf1a36f6b/indicator_plots/RVI_plot.png -------------------------------------------------------------------------------- /indicator_plots/SMA_plot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantJourneyOrg/qj_technical_indicators/2b944b9e281d4166a34392e2bc25677cf1a36f6b/indicator_plots/SMA_plot.png -------------------------------------------------------------------------------- /indicator_plots/STOCH_K_plot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantJourneyOrg/qj_technical_indicators/2b944b9e281d4166a34392e2bc25677cf1a36f6b/indicator_plots/STOCH_K_plot.png -------------------------------------------------------------------------------- /indicator_plots/VWAP_plot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantJourneyOrg/qj_technical_indicators/2b944b9e281d4166a34392e2bc25677cf1a36f6b/indicator_plots/VWAP_plot.png -------------------------------------------------------------------------------- /indicator_plots/WILLR_plot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/QuantJourneyOrg/qj_technical_indicators/2b944b9e281d4166a34392e2bc25677cf1a36f6b/indicator_plots/WILLR_plot.png -------------------------------------------------------------------------------- /notebooks/01_basic_indicators.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Basic Indicators Demo\n", 8 | "This notebook demonstrates simple usage of the TechnicalIndicators API." 9 | ] 10 | }, 11 | { 12 | "cell_type": "code", 13 | "execution_count": null, 14 | "metadata": {}, 15 | "outputs": [], 16 | "source": [ 17 | "import numpy as np\n", 18 | "import pandas as pd\n", 19 | "from quantjourney_ti import TechnicalIndicators\n", 20 | "\n", 21 | "# Synthetic close series\n", 22 | "idx = pd.date_range('2024-01-01', periods=200, freq='D')\n", 23 | "close = pd.Series(np.cumsum(np.random.normal(0,1,200)) + 100, index=idx)\n", 24 | "\n", 25 | "ti = TechnicalIndicators()\n", 26 | "sma20 = ti.SMA(close, 20)\n", 27 | "ema20 = ti.EMA(close, 20)\n", 28 | "rsi14 = ti.RSI(close, 14)\n", 29 | "macd = ti.MACD(close, 12, 26, 9)\n", 30 | "sma20.tail(), ema20.tail(), rsi14.tail(), macd.tail()" 31 | ] 32 | } 33 | ], 34 | "metadata": { 35 | "kernelspec": { 36 | "display_name": "Python 3", 37 | "language": "python", 38 | "name": "python3" 39 | }, 40 | "language_info": { 41 | "name": "python", 42 | "version": "3.x" 43 | } 44 | }, 45 | "nbformat": 4, 46 | "nbformat_minor": 5 47 | } 48 | -------------------------------------------------------------------------------- /notebooks/02_channels_and_bands.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Channels and Bands\n", 8 | "Bollinger Bands, Keltner Channels, and Donchian Channels" 9 | ] 10 | }, 11 | { 12 | "cell_type": "code", 13 | "execution_count": null, 14 | "metadata": {}, 15 | "outputs": [], 16 | "source": [ 17 | "import numpy as np, pandas as pd\n", 18 | "from quantjourney_ti import TechnicalIndicators\n", 19 | "\n", 20 | "n=250\n", 21 | "idx = pd.date_range('2024-01-01', periods=n, freq='D')\n", 22 | "close = np.cumsum(np.random.normal(0,1,n)) + 100\n", 23 | "high = close + np.abs(np.random.normal(0.5,0.2,n))\n", 24 | "low = close - np.abs(np.random.normal(0.5,0.2,n))\n", 25 | "df = pd.DataFrame({'high':high,'low':low,'close':close}, index=idx)\n", 26 | "\n", 27 | "ti = TechnicalIndicators()\n", 28 | "bb = ti.BB(df['close'], 20, 2.0)\n", 29 | "kc = ti.KELTNER(df[['high','low','close']], 20, 10, 2.0)\n", 30 | "dc = ti.DONCHIAN(df[['high','low']], 20)\n", 31 | "bb.tail(), kc.tail(), dc.tail()" 32 | ] 33 | } 34 | ], 35 | "metadata": { 36 | "kernelspec": {"display_name": "Python 3", "language": "python", "name": "python3"}, 37 | "language_info": {"name": "python", "version": "3.x"} 38 | }, 39 | "nbformat": 4, 40 | "nbformat_minor": 5 41 | } 42 | -------------------------------------------------------------------------------- /notebooks/03_streaming_demo.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | {"cell_type": "markdown", "metadata": {}, "source": ["# Streaming Indicators Demo"]}, 4 | {"cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ 5 | "import numpy as np, pandas as pd\n", 6 | "from quantjourney_ti import TechnicalIndicators\n", 7 | "from quantjourney_ti._streaming import StreamingIndicators\n", 8 | "\n", 9 | "ti = TechnicalIndicators()\n", 10 | "si = StreamingIndicators(max_buffer_size=1000)\n", 11 | "\n", 12 | "idx = pd.date_range('2024-01-01', periods=200, freq='D')\n", 13 | "series = pd.Series(np.cumsum(np.random.normal(0,1,200)) + 100, index=idx)\n", 14 | "for ts, val in series.items():\n", 15 | " si.update_tick('SYM', ts, close=float(val))\n", 16 | "\n", 17 | "batch_sma = ti.SMA(series, 20).iloc[-1]\n", 18 | "stream_sma = si.states['SYM'].last_values['sma_20']\n", 19 | "batch_sma, stream_sma" 20 | ]} 21 | ], 22 | "metadata": {"kernelspec": {"display_name": "Python 3", "language": "python", "name": "python3"}, "language_info": {"name": "python", "version": "3.x"}}, 23 | "nbformat": 4, 24 | "nbformat_minor": 5 25 | } 26 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["setuptools<75", "wheel", "build"] 3 | build-backend = "setuptools.build_meta" 4 | 5 | [project] 6 | name = "quantjourney-ti" 7 | version = "0.3.1" 8 | description = "Comprehensive technical indicators for financial data." 9 | readme = "README.md" 10 | requires-python = ">=3.11,<3.15" 11 | authors = [{ name = "Jakub Polec", email = "jakub@quantjourney.pro" }] 12 | license = { file = "LICENSE" } # OK with setuptools<75 13 | keywords = ["financial", "technical analysis", "indicators", "quant", "numba"] 14 | classifiers = [ 15 | "License :: OSI Approved :: MIT License", 16 | "Development Status :: 3 - Alpha", 17 | "Intended Audience :: Developers", 18 | "Intended Audience :: Financial and Insurance Industry", 19 | "Programming Language :: Python :: 3 :: Only", 20 | "Programming Language :: Python :: 3.11", 21 | "Programming Language :: Python :: 3.12", 22 | "Programming Language :: Python :: 3.13", 23 | "Operating System :: OS Independent", 24 | "Topic :: Office/Business :: Financial :: Investment", 25 | "Topic :: Software Development :: Libraries :: Python Modules", 26 | ] 27 | dependencies = [ 28 | "numpy>=1.26.0", 29 | "pandas>=2.0.0,<3.0", 30 | "numba>=0.59.0", 31 | "scipy>=1.11.0,<2.0", 32 | ] 33 | 34 | [project.urls] 35 | Homepage = "https://quantjourney.substack.com" 36 | Repository = "https://github.com/QuantJourneyOrg/qj_technical_indicators" 37 | "Bug Tracker" = "https://github.com/QuantJourneyOrg/qj_technical_indicators/issues" 38 | 39 | [tool.setuptools] # ← correct table 40 | license-files = [] # ← disables emitting License-File header 41 | 42 | [tool.setuptools.packages.find] # ← keep package discovery here only 43 | where = ["."] 44 | include = ["quantjourney_ti*"] 45 | 46 | [tool.pytest.ini_options] 47 | asyncio_default_fixture_loop_scope = "function" 48 | addopts = "--strict-markers --import-mode=importlib -v" 49 | pythonpath = ["."] 50 | testpaths = ["tests", "_tests"] 51 | python_files = ["test_*.py"] 52 | markers = ["slow: marks tests as slow (deselect with '-m \"not slow\"')"] 53 | 54 | [tool.black] 55 | line-length = 88 56 | target-version = ['py311'] 57 | 58 | [tool.isort] 59 | profile = "black" 60 | line_length = 88 61 | 62 | [tool.mypy] 63 | python_version = "3.11" 64 | strict = true 65 | ignore_missing_imports = true 66 | -------------------------------------------------------------------------------- /quantjourney_ti/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | QuantJourney Technical-Indicators 3 | ================================= 4 | Convenience import layer for the **quantjourney_ti** package. It re-exports: 5 | 6 | • `TechnicalIndicators` - High-level class offering 60+ indicator methods. 7 | • `timer` - Decorator for benchmarking function execution time. 8 | • `numba_fallback` - Decorator for Numba fallback to pandas/numpy. 9 | • Selected helper functions from `_utils` for validation, analysis, and plotting. 10 | 11 | Most projects need only:: 12 | 13 | import quantjourney_ti as ti 14 | df['sma'] = ti.sma(df['close'], 20) 15 | 16 | Under the hood, `sma`, `ema`, etc., are thin wrappers around a shared 17 | `TechnicalIndicators` singleton to avoid re-compiling Numba kernels. 18 | 19 | Example usage 20 | ------------- 21 | Flat helpers (quick one-liners):: 22 | 23 | import quantjourney_ti as ti 24 | df["sma"] = ti.sma(df["close"], 20) 25 | 26 | Full flexibility via the class:: 27 | 28 | from quantjourney_ti import TechnicalIndicators 29 | ti = TechnicalIndicators() 30 | df["atr"] = ti.ATR(ohlc_df, 14) 31 | 32 | Power-user shortcut (shared singleton):: 33 | 34 | import quantjourney_ti.indicators as ind 35 | ind._TI_INSTANCE.ATR(ohlc_df, 14) # Same object, no extra compile 36 | 37 | Notes: 38 | - Compatible with pandas 2.x (uses `pandas.api.types.is_any_real_numeric_dtype`). 39 | - Requires `numpy`, `pandas`, and `numba`. `matplotlib` is optional for plotting. 40 | - Logging uses thread-safe `QueueHandler`. Ensure a queue processing thread is active. 41 | 42 | Author: Jakub Polec 43 | License: MIT 44 | """ 45 | 46 | from __future__ import annotations 47 | 48 | from .indicators import TechnicalIndicators, start_logging_queue 49 | from ._decorators import timer, numba_fallback 50 | from ._utils import ( 51 | validate_data, 52 | validate_and_get_prices, 53 | validate_window, 54 | detect_divergence, 55 | detect_crossovers, 56 | plot_indicators, 57 | optimize_memory, 58 | ) 59 | from ._risk_metrics import calculate_risk_metrics 60 | from ._streaming import StreamingIndicators, StreamingDataFeed, create_streaming_setup 61 | from ._performance import ( 62 | cached_indicator, 63 | profile_performance, 64 | clear_indicator_cache, 65 | get_cache_stats, 66 | get_performance_stats, 67 | get_system_resources, 68 | MemoryManager, 69 | BatchProcessor 70 | ) 71 | 72 | # Initialize shared singleton 73 | _TI_INSTANCE = TechnicalIndicators() 74 | 75 | # Public exports 76 | __all__ = [ 77 | # Core classes 78 | "TechnicalIndicators", 79 | "start_logging_queue", 80 | "StreamingIndicators", 81 | "StreamingDataFeed", 82 | "MemoryManager", 83 | "BatchProcessor", 84 | 85 | # Decorators 86 | "timer", 87 | "numba_fallback", 88 | "cached_indicator", 89 | "profile_performance", 90 | 91 | # Validation and utilities 92 | "validate_data", 93 | "validate_and_get_prices", 94 | "validate_window", 95 | "detect_divergence", 96 | "detect_crossovers", 97 | "plot_indicators", 98 | "optimize_memory", 99 | 100 | # Risk and performance 101 | "calculate_risk_metrics", 102 | "create_streaming_setup", 103 | "clear_indicator_cache", 104 | "get_cache_stats", 105 | "get_performance_stats", 106 | "get_system_resources", 107 | 108 | # Metadata 109 | "__author__", 110 | "__email__", 111 | "__url__", 112 | "__version__", 113 | ] 114 | 115 | # Metadata 116 | __author__ = "Jakub Polec" 117 | __email__ = "jakub@quantjourney.pro" 118 | __url__ = "https://quantjourney.substack.com" 119 | __version__ = "0.3.1" 120 | 121 | # Dynamic wrappers for TechnicalIndicators methods 122 | for _name in dir(TechnicalIndicators): 123 | if _name.startswith("_") or not callable(getattr(TechnicalIndicators, _name)): 124 | continue 125 | globals()[_name.lower()] = getattr(_TI_INSTANCE, _name) 126 | __all__.append(_name.lower()) 127 | 128 | # Dynamic wrappers for utility functions 129 | # Use globals() to access already imported functions 130 | _util_functions = [ 131 | "validate_data", 132 | "validate_and_get_prices", 133 | "validate_window", 134 | "detect_divergence", 135 | "detect_crossovers", 136 | "plot_indicators", 137 | "optimize_memory", 138 | ] 139 | for _name in _util_functions: 140 | globals()[_name] = globals()[_name] 141 | __all__.append(_name) 142 | -------------------------------------------------------------------------------- /quantjourney_ti/_decorators.py: -------------------------------------------------------------------------------- 1 | """ 2 | QuantJourney Technical-Indicators - Decorators 3 | ============================================== 4 | 5 | Light-weight decorator utilities used throughout the QuantJourney 6 | Technical-Indicators codebase. They are kept in a dedicated module to avoid 7 | importing heavy numerical packages (NumPy, Numba, Pandas) just for helper 8 | decorators. 9 | 10 | Currently provided: 11 | - timer 12 | Measures a function's wall-clock run-time and emits a log entry at the INFO 13 | level. Useful for benchmarking indicator kernels or spotting slow data pipelines. 14 | - numba_fallback 15 | Handles Numba errors by falling back to a specified function (e.g., Pandas 16 | implementation), logging the fallback event as JSON via IndicatorCalculationError. 17 | Ensures robustness in Numba-optimized indicator methods. 18 | 19 | Example: 20 | from quantjourney_ti.decorators import timer, numba_fallback 21 | import logging 22 | logging.basicConfig(level=logging.INFO) 23 | 24 | @timer 25 | def slow_add(a, b): 26 | import time; time.sleep(0.5); return a + b 27 | 28 | slow_add(2, 3) 29 | # INFO:quantjourney_ti.decorators:Finished slow_add in 0.5001 seconds 30 | # 5 31 | 32 | def pandas_fallback(self, data): 33 | return data.mean() 34 | 35 | @numba_fallback(pandas_fallback) 36 | def numba_mean(self, data): 37 | raise ValueError("Numba failed") 38 | 39 | class Example: 40 | def mean(self, data): return numba_mean(self, data) 41 | 42 | Example().mean([1, 2, 3]) 43 | # WARNING:quantjourney_ti.decorators:{"type": "IndicatorCalculationError", ...} 44 | # 2.0 45 | 46 | Author: Jakub Polec 47 | License: MIT 48 | """ 49 | 50 | import os 51 | import time 52 | import logging 53 | from functools import wraps 54 | from typing import Callable, TypeVar 55 | 56 | from ._errors import IndicatorCalculationError 57 | 58 | T = TypeVar("T") 59 | 60 | logger = logging.getLogger(__name__) 61 | log_level = os.getenv("QUANTJOURNEY_LOG_LEVEL", "INFO").upper() 62 | logger.setLevel(getattr(logging, log_level, logging.INFO)) 63 | 64 | if not logger.handlers: 65 | handler = logging.StreamHandler() 66 | formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') 67 | handler.setFormatter(formatter) 68 | logger.addHandler(handler) 69 | 70 | 71 | def timer(func: Callable[..., T]) -> Callable[..., T]: 72 | """ 73 | Decorator to measure and log the execution time of a function. 74 | Logs the function name and execution time in seconds at the INFO level. 75 | 76 | Args: 77 | func: Function to decorate. 78 | 79 | Returns: 80 | Callable: Wrapped function that logs execution time and returns original result. 81 | """ 82 | @wraps(func) 83 | def wrapper(*args, **kwargs) -> T: 84 | start_time = time.perf_counter() 85 | result = func(*args, **kwargs) 86 | end_time = time.perf_counter() 87 | run_time = end_time - start_time 88 | logger.info(f"Finished {func.__name__} in {run_time:.4f} seconds") 89 | return result 90 | return wrapper 91 | 92 | 93 | def numba_fallback(fallback_fn: Callable[..., T]) -> Callable[..., Callable[..., T]]: 94 | """ 95 | Decorator to handle Numba errors with a fallback function. 96 | Attempts the Numba-optimized calculation, falling back to the provided function 97 | if a Numba-related error occurs. Logs the error as JSON using IndicatorCalculationError 98 | at the WARNING level for fallbacks or ERROR level for re-raised exceptions. 99 | 100 | Args: 101 | fallback_fn: Function to call if Numba fails, with the same signature as the decorated method. 102 | 103 | Returns: 104 | Callable: Decorated method that attempts Numba calculation and falls back if needed. 105 | """ 106 | def decorator(method: Callable[..., T]) -> Callable[..., T]: 107 | @wraps(method) 108 | def wrapper(*args, **kwargs) -> T: 109 | try: 110 | return method(*args, **kwargs) 111 | except (ValueError, TypeError) as e: 112 | error = IndicatorCalculationError( 113 | indicator=method.__name__, 114 | message=str(e), 115 | context={"args": str(args[1:]), "kwargs": kwargs} 116 | ) 117 | logger.warning(error.to_json()) 118 | return fallback_fn(*args, **kwargs) 119 | except Exception as e: 120 | error = IndicatorCalculationError( 121 | indicator=method.__name__, 122 | message=str(e), 123 | context={"args": str(args[1:]), "kwargs": kwargs} 124 | ) 125 | logger.error(error.to_json()) 126 | raise 127 | return wrapper 128 | return decorator 129 | -------------------------------------------------------------------------------- /quantjourney_ti/_errors.py: -------------------------------------------------------------------------------- 1 | """ 2 | QuantJourney Technical-Indicators - Errors 3 | ========================================= 4 | Custom exceptions that provide rich context when indicator calculations or input validations fail. 5 | 6 | Author: Jakub Polec 7 | License: MIT 8 | """ 9 | from typing import Dict, Any, Optional 10 | import json 11 | from datetime import datetime 12 | 13 | 14 | def _serialize_context(context: Dict[str, Any]) -> Dict[str, Any]: 15 | """ 16 | Convert non-serializable objects in context to strings for JSON serialization. 17 | 18 | Args: 19 | context: Dictionary containing context data. 20 | 21 | Returns: 22 | Dictionary with all values converted to JSON-serializable types. 23 | """ 24 | serialized = {} 25 | for key, value in context.items(): 26 | try: 27 | json.dumps(value) 28 | serialized[key] = value 29 | except TypeError: 30 | serialized[key] = str(value) 31 | return serialized 32 | 33 | 34 | class IndicatorCalculationError(Exception): 35 | """ 36 | Custom exception for errors during indicator calculations. 37 | 38 | Attributes: 39 | indicator: Name of the indicator that failed. 40 | message: Error message describing the failure. 41 | context: Additional context (e.g., symbol, parameters). 42 | """ 43 | def __init__(self, indicator: str, message: str, context: Optional[Dict[str, Any]] = None): 44 | """ 45 | Initialize the exception. 46 | 47 | Args: 48 | indicator: Name of the indicator (e.g., 'SMA', 'RSI'). 49 | message: Description of the error. 50 | context: Optional dictionary with additional context (e.g., {'symbol': 'AAPL', 'period': 20}). 51 | """ 52 | self.indicator = indicator 53 | self.message = message 54 | self.context = context or {} 55 | super().__init__(f"Indicator '{indicator}' failed: {message}") 56 | 57 | def to_json(self) -> Dict[str, Any]: 58 | """ 59 | Convert the error to a JSON-serializable dictionary. 60 | 61 | Returns: 62 | Dictionary with indicator, message, context, and timestamp. 63 | """ 64 | return { 65 | "type": "IndicatorCalculationError", 66 | "indicator": self.indicator, 67 | "message": self.message, 68 | "context": _serialize_context(self.context), 69 | "timestamp": datetime.now().isoformat() 70 | } 71 | 72 | 73 | class InvalidInputError(Exception): 74 | """ 75 | Custom exception for invalid input data during indicator calculations or validations. 76 | 77 | Attributes: 78 | message: Error message describing the invalid input. 79 | context: Additional context (e.g., column names, data shape). 80 | """ 81 | def __init__(self, message: str, context: Optional[Dict[str, Any]] = None): 82 | """ 83 | Initialize the exception. 84 | 85 | Args: 86 | message: Description of the invalid input. 87 | context: Optional dictionary with additional context (e.g., {'missing_columns': ['high', 'low']}). 88 | """ 89 | self.message = message 90 | self.context = context or {} 91 | super().__init__(message) 92 | 93 | def to_json(self) -> Dict[str, Any]: 94 | """ 95 | Convert the error to a JSON-serializable dictionary. 96 | 97 | Returns: 98 | Dictionary with error type, message, context, and timestamp. 99 | """ 100 | return { 101 | "type": "InvalidInputError", 102 | "message": self.message, 103 | "context": _serialize_context(self.context), 104 | "timestamp": datetime.now().isoformat() 105 | } -------------------------------------------------------------------------------- /quantjourney_ti/_performance.py: -------------------------------------------------------------------------------- 1 | """ 2 | QuantJourney Technical-Indicators - Performance Optimizations 3 | ============================================================ 4 | Caching, memory management, and performance utilities for hedge fund production use. 5 | 6 | Author: Jakub Polec 7 | License: MIT 8 | """ 9 | 10 | from __future__ import annotations 11 | 12 | import numpy as np 13 | import pandas as pd 14 | from typing import Dict, Any, Optional, Callable, Tuple, Union 15 | from functools import lru_cache, wraps 16 | import hashlib 17 | import pickle 18 | import threading 19 | import time 20 | import psutil 21 | import gc 22 | from dataclasses import dataclass 23 | from collections import defaultdict 24 | 25 | # Performance monitoring 26 | @dataclass 27 | class PerformanceMetrics: 28 | """Container for performance metrics.""" 29 | function_name: str 30 | execution_time: float 31 | memory_usage_mb: float 32 | cache_hit: bool 33 | input_size: int 34 | timestamp: float 35 | 36 | class PerformanceMonitor: 37 | """Monitor and track performance metrics.""" 38 | 39 | def __init__(self, max_history: int = 1000): 40 | self.metrics_history = [] 41 | self.max_history = max_history 42 | self.lock = threading.Lock() 43 | 44 | def record_metric(self, metric: PerformanceMetrics): 45 | """Record a performance metric.""" 46 | with self.lock: 47 | self.metrics_history.append(metric) 48 | if len(self.metrics_history) > self.max_history: 49 | self.metrics_history.pop(0) 50 | 51 | def get_stats(self, function_name: str = None) -> Dict[str, Any]: 52 | """Get performance statistics.""" 53 | with self.lock: 54 | if function_name: 55 | metrics = [m for m in self.metrics_history if m.function_name == function_name] 56 | else: 57 | metrics = self.metrics_history 58 | 59 | if not metrics: 60 | return {} 61 | 62 | execution_times = [m.execution_time for m in metrics] 63 | memory_usage = [m.memory_usage_mb for m in metrics] 64 | cache_hits = sum(1 for m in metrics if m.cache_hit) 65 | 66 | return { 67 | 'count': len(metrics), 68 | 'avg_execution_time': np.mean(execution_times), 69 | 'max_execution_time': np.max(execution_times), 70 | 'min_execution_time': np.min(execution_times), 71 | 'avg_memory_usage_mb': np.mean(memory_usage), 72 | 'cache_hit_rate': cache_hits / len(metrics) if metrics else 0, 73 | 'total_cache_hits': cache_hits 74 | } 75 | 76 | # Global performance monitor 77 | _performance_monitor = PerformanceMonitor() 78 | 79 | def get_performance_stats(function_name: str = None) -> Dict[str, Any]: 80 | """Get performance statistics for monitoring.""" 81 | return _performance_monitor.get_stats(function_name) 82 | 83 | # Advanced caching system 84 | class IndicatorCache: 85 | """Advanced caching system for technical indicators.""" 86 | 87 | def __init__(self, max_size: int = 1000, ttl_seconds: int = 3600): 88 | self.cache = {} 89 | self.access_times = {} 90 | self.max_size = max_size 91 | self.ttl_seconds = ttl_seconds 92 | self.lock = threading.RLock() 93 | self.hit_count = 0 94 | self.miss_count = 0 95 | 96 | def _generate_key(self, func_name: str, args: tuple, kwargs: dict) -> str: 97 | """Generate cache key from function arguments.""" 98 | # Convert pandas objects to hashable representations 99 | hashable_args = [] 100 | for arg in args: 101 | if isinstance(arg, (pd.Series, pd.DataFrame)): 102 | # Use shape, dtypes, and hash of first/last few values 103 | if len(arg) > 10: 104 | sample = pd.concat([arg.head(5), arg.tail(5)]) 105 | else: 106 | sample = arg 107 | if isinstance(arg, pd.Series): 108 | dtypes_repr = str(arg.dtype) 109 | else: 110 | # DataFrame: tuple of dtype strings in column order 111 | dtypes_repr = tuple(str(dt) for dt in arg.dtypes.values) 112 | try: 113 | # Prefer content-based stable hash 114 | if isinstance(sample, pd.Series): 115 | hv = pd.util.hash_pandas_object(sample, index=True).values 116 | content_hash = int(hashlib.md5(hv.tobytes()).hexdigest(), 16) 117 | else: 118 | # DataFrame: combine column-wise hashes 119 | col_hashes = [] 120 | for col in sample.columns: 121 | hv = pd.util.hash_pandas_object(sample[col], index=True).values 122 | col_hashes.append(hashlib.md5(hv.tobytes()).digest()) 123 | content_hash = int(hashlib.md5(b"".join(col_hashes)).hexdigest(), 16) 124 | except Exception: 125 | # Fallback to plain value sampling 126 | content_hash = hash(tuple(sample.values.flatten())) 127 | hashable_args.append((arg.shape, dtypes_repr, content_hash)) 128 | else: 129 | hashable_args.append(arg) 130 | 131 | # Create hash from function name, args, and kwargs 132 | key_data = (func_name, tuple(hashable_args), tuple(sorted(kwargs.items()))) 133 | key_str = str(key_data) 134 | return hashlib.md5(key_str.encode()).hexdigest() 135 | 136 | def get(self, key: str) -> Optional[Any]: 137 | """Get value from cache.""" 138 | with self.lock: 139 | if key not in self.cache: 140 | self.miss_count += 1 141 | return None 142 | 143 | # Check TTL 144 | if time.time() - self.access_times[key] > self.ttl_seconds: 145 | del self.cache[key] 146 | del self.access_times[key] 147 | self.miss_count += 1 148 | return None 149 | 150 | self.access_times[key] = time.time() 151 | self.hit_count += 1 152 | return self.cache[key] 153 | 154 | def set(self, key: str, value: Any): 155 | """Set value in cache.""" 156 | with self.lock: 157 | # Evict oldest entries if cache is full 158 | if len(self.cache) >= self.max_size: 159 | oldest_key = min(self.access_times.keys(), key=lambda k: self.access_times[k]) 160 | del self.cache[oldest_key] 161 | del self.access_times[oldest_key] 162 | 163 | self.cache[key] = value 164 | self.access_times[key] = time.time() 165 | 166 | def clear(self): 167 | """Clear cache.""" 168 | with self.lock: 169 | self.cache.clear() 170 | self.access_times.clear() 171 | self.hit_count = 0 172 | self.miss_count = 0 173 | 174 | def get_stats(self) -> Dict[str, Any]: 175 | """Get cache statistics.""" 176 | with self.lock: 177 | total_requests = self.hit_count + self.miss_count 178 | return { 179 | 'size': len(self.cache), 180 | 'max_size': self.max_size, 181 | 'hit_count': self.hit_count, 182 | 'miss_count': self.miss_count, 183 | 'hit_rate': self.hit_count / total_requests if total_requests > 0 else 0, 184 | 'ttl_seconds': self.ttl_seconds 185 | } 186 | 187 | # Global cache instance 188 | _indicator_cache = IndicatorCache() 189 | 190 | def cached_indicator(ttl_seconds: int = 3600): 191 | """ 192 | Decorator for caching indicator calculations. 193 | 194 | Args: 195 | ttl_seconds: Time to live for cached results 196 | """ 197 | def decorator(func: Callable) -> Callable: 198 | @wraps(func) 199 | def wrapper(*args, **kwargs): 200 | # Generate cache key 201 | cache_key = _indicator_cache._generate_key(func.__name__, args, kwargs) 202 | 203 | # Try to get from cache 204 | cached_result = _indicator_cache.get(cache_key) 205 | if cached_result is not None: 206 | return cached_result 207 | 208 | # Calculate and cache result 209 | start_time = time.time() 210 | process = psutil.Process() 211 | memory_before = process.memory_info().rss / 1024 / 1024 # MB 212 | 213 | result = func(*args, **kwargs) 214 | 215 | execution_time = time.time() - start_time 216 | memory_after = process.memory_info().rss / 1024 / 1024 # MB 217 | memory_usage = memory_after - memory_before 218 | 219 | # Determine input size 220 | input_size = 0 221 | for arg in args: 222 | if isinstance(arg, (pd.Series, pd.DataFrame)): 223 | input_size = max(input_size, len(arg)) 224 | 225 | # Record performance metrics 226 | metric = PerformanceMetrics( 227 | function_name=func.__name__, 228 | execution_time=execution_time, 229 | memory_usage_mb=memory_usage, 230 | cache_hit=False, 231 | input_size=input_size, 232 | timestamp=time.time() 233 | ) 234 | _performance_monitor.record_metric(metric) 235 | 236 | # Cache result 237 | _indicator_cache.set(cache_key, result) 238 | 239 | return result 240 | 241 | return wrapper 242 | return decorator 243 | 244 | def clear_indicator_cache(): 245 | """Clear the indicator cache.""" 246 | _indicator_cache.clear() 247 | 248 | def get_cache_stats() -> Dict[str, Any]: 249 | """Get cache statistics.""" 250 | return _indicator_cache.get_stats() 251 | 252 | # Memory optimization utilities 253 | class MemoryManager: 254 | """Memory management utilities for large datasets.""" 255 | 256 | @staticmethod 257 | def optimize_dataframe(df: pd.DataFrame, aggressive: bool = False) -> pd.DataFrame: 258 | """ 259 | Optimize DataFrame memory usage. 260 | 261 | Args: 262 | df: Input DataFrame 263 | aggressive: Use more aggressive optimization (may lose precision) 264 | """ 265 | optimized = df.copy() 266 | 267 | for col in optimized.columns: 268 | col_type = optimized[col].dtype 269 | 270 | if col_type == 'object': 271 | # Try to convert to category if many repeats 272 | if optimized[col].nunique() / len(optimized) < 0.5: 273 | optimized[col] = optimized[col].astype('category') 274 | 275 | elif 'int' in str(col_type): 276 | c_min = optimized[col].min() 277 | c_max = optimized[col].max() 278 | 279 | if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max: 280 | optimized[col] = optimized[col].astype(np.int8) 281 | elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max: 282 | optimized[col] = optimized[col].astype(np.int16) 283 | elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max: 284 | optimized[col] = optimized[col].astype(np.int32) 285 | 286 | elif 'float' in str(col_type): 287 | c_min = optimized[col].min() 288 | c_max = optimized[col].max() 289 | 290 | if aggressive: 291 | # More aggressive - use float16 if possible 292 | if (c_min > np.finfo(np.float16).min and 293 | c_max < np.finfo(np.float16).max): 294 | optimized[col] = optimized[col].astype(np.float16) 295 | elif (c_min > np.finfo(np.float32).min and 296 | c_max < np.finfo(np.float32).max): 297 | optimized[col] = optimized[col].astype(np.float32) 298 | else: 299 | # Conservative - only use float32 300 | if (c_min > np.finfo(np.float32).min and 301 | c_max < np.finfo(np.float32).max): 302 | optimized[col] = optimized[col].astype(np.float32) 303 | 304 | return optimized 305 | 306 | @staticmethod 307 | def get_memory_usage(obj) -> Dict[str, float]: 308 | """Get memory usage statistics for an object.""" 309 | if isinstance(obj, pd.DataFrame): 310 | # Exclude index from per-column stats to match test expectations 311 | per_col = obj.memory_usage(index=False, deep=True) 312 | return { 313 | 'total_mb': per_col.sum() / 1024 / 1024, 314 | 'per_column_mb': (per_col / 1024 / 1024).to_dict() 315 | } 316 | elif isinstance(obj, pd.Series): 317 | memory_usage = obj.memory_usage(deep=True) 318 | return { 319 | 'total_mb': memory_usage / 1024 / 1024 320 | } 321 | else: 322 | import sys 323 | return { 324 | 'total_mb': sys.getsizeof(obj) / 1024 / 1024 325 | } 326 | 327 | @staticmethod 328 | def force_garbage_collection(): 329 | """Force garbage collection and return memory freed.""" 330 | import gc 331 | before = psutil.Process().memory_info().rss / 1024 / 1024 332 | gc.collect() 333 | after = psutil.Process().memory_info().rss / 1024 / 1024 334 | return before - after 335 | 336 | # Batch processing utilities 337 | class BatchProcessor: 338 | """Process multiple symbols/datasets efficiently.""" 339 | 340 | def __init__(self, batch_size: int = 100, n_workers: int = None): 341 | self.batch_size = batch_size 342 | self.n_workers = n_workers or min(32, (psutil.cpu_count() or 1) + 4) 343 | 344 | def process_symbols( 345 | self, 346 | data_dict: Dict[str, pd.DataFrame], 347 | indicator_func: Callable, 348 | **kwargs 349 | ) -> Dict[str, Any]: 350 | """ 351 | Process indicators for multiple symbols efficiently. 352 | 353 | Args: 354 | data_dict: Dictionary of symbol -> DataFrame 355 | indicator_func: Function to calculate indicator 356 | **kwargs: Arguments for indicator function 357 | 358 | Returns: 359 | Dictionary of symbol -> indicator results 360 | """ 361 | from concurrent.futures import ThreadPoolExecutor, as_completed 362 | 363 | results = {} 364 | 365 | def process_single(symbol_data_pair): 366 | symbol, data = symbol_data_pair 367 | try: 368 | result = indicator_func(data, **kwargs) 369 | return symbol, result 370 | except Exception as e: 371 | return symbol, None 372 | 373 | # Process in batches to manage memory 374 | symbols = list(data_dict.keys()) 375 | 376 | with ThreadPoolExecutor(max_workers=self.n_workers) as executor: 377 | for i in range(0, len(symbols), self.batch_size): 378 | batch_symbols = symbols[i:i + self.batch_size] 379 | batch_data = [(s, data_dict[s]) for s in batch_symbols] 380 | 381 | # Submit batch 382 | futures = { 383 | executor.submit(process_single, item): item[0] 384 | for item in batch_data 385 | } 386 | 387 | # Collect results 388 | for future in as_completed(futures): 389 | symbol, result = future.result() 390 | if result is not None: 391 | results[symbol] = result 392 | 393 | # Force garbage collection between batches 394 | if i + self.batch_size < len(symbols): 395 | MemoryManager.force_garbage_collection() 396 | 397 | return results 398 | 399 | # Performance profiling decorator 400 | def profile_performance(include_memory: bool = True): 401 | """ 402 | Decorator to profile function performance. 403 | 404 | Args: 405 | include_memory: Whether to include memory profiling 406 | """ 407 | def decorator(func: Callable) -> Callable: 408 | @wraps(func) 409 | def wrapper(*args, **kwargs): 410 | start_time = time.time() 411 | 412 | if include_memory: 413 | process = psutil.Process() 414 | memory_before = process.memory_info().rss / 1024 / 1024 415 | 416 | result = func(*args, **kwargs) 417 | 418 | execution_time = time.time() - start_time 419 | 420 | if include_memory: 421 | memory_after = process.memory_info().rss / 1024 / 1024 422 | memory_usage = memory_after - memory_before 423 | else: 424 | memory_usage = 0 425 | 426 | # Determine input size 427 | input_size = 0 428 | for arg in args: 429 | if isinstance(arg, (pd.Series, pd.DataFrame)): 430 | input_size = max(input_size, len(arg)) 431 | 432 | # Record metrics 433 | metric = PerformanceMetrics( 434 | function_name=func.__name__, 435 | execution_time=execution_time, 436 | memory_usage_mb=memory_usage, 437 | cache_hit=False, 438 | input_size=input_size, 439 | timestamp=time.time() 440 | ) 441 | _performance_monitor.record_metric(metric) 442 | 443 | return result 444 | 445 | return wrapper 446 | return decorator 447 | 448 | # System resource monitoring 449 | def get_system_resources() -> Dict[str, Any]: 450 | """Get current system resource usage.""" 451 | process = psutil.Process() 452 | 453 | return { 454 | 'cpu_percent': psutil.cpu_percent(interval=1), 455 | 'memory_percent': psutil.virtual_memory().percent, 456 | 'memory_available_gb': psutil.virtual_memory().available / 1024 / 1024 / 1024, 457 | 'process_memory_mb': process.memory_info().rss / 1024 / 1024, 458 | 'process_cpu_percent': process.cpu_percent(), 459 | 'disk_usage_percent': psutil.disk_usage('/').percent, 460 | 'load_average': psutil.getloadavg() if hasattr(psutil, 'getloadavg') else None 461 | } 462 | 463 | __all__ = [ 464 | 'PerformanceMetrics', 465 | 'PerformanceMonitor', 466 | 'IndicatorCache', 467 | 'MemoryManager', 468 | 'BatchProcessor', 469 | 'cached_indicator', 470 | 'profile_performance', 471 | 'clear_indicator_cache', 472 | 'get_cache_stats', 473 | 'get_performance_stats', 474 | 'get_system_resources' 475 | ] 476 | -------------------------------------------------------------------------------- /quantjourney_ti/_risk_metrics.py: -------------------------------------------------------------------------------- 1 | """ 2 | QuantJourney Technical-Indicators - Risk Metrics 3 | =============================================== 4 | Hedge fund specific risk and performance metrics with Numba optimization. 5 | 6 | Author: Jakub Polec 7 | License: MIT 8 | """ 9 | 10 | from __future__ import annotations 11 | 12 | import numpy as np 13 | import pandas as pd 14 | from numba import njit 15 | from typing import Union, Tuple, Optional 16 | import warnings 17 | 18 | @njit(parallel=False, fastmath=True) 19 | def _calculate_returns_numba(prices: np.ndarray) -> np.ndarray: 20 | """Calculate returns from price series.""" 21 | returns = np.full(len(prices), np.nan, dtype=np.float64) 22 | for i in range(1, len(prices)): 23 | if not np.isnan(prices[i]) and not np.isnan(prices[i-1]) and prices[i-1] != 0: 24 | returns[i] = (prices[i] - prices[i-1]) / prices[i-1] 25 | return returns 26 | 27 | @njit(parallel=False, fastmath=True) 28 | def _calculate_sharpe_ratio_numba(returns: np.ndarray, risk_free_rate: float = 0.0, periods_per_year: int = 252) -> float: 29 | """Calculate annualized Sharpe ratio.""" 30 | valid_returns = returns[~np.isnan(returns)] 31 | if len(valid_returns) < 2: 32 | return np.nan 33 | 34 | mean_return = np.mean(valid_returns) 35 | std_return = np.std(valid_returns) 36 | 37 | if std_return == 0: 38 | return np.nan 39 | 40 | excess_return = mean_return - (risk_free_rate / periods_per_year) 41 | sharpe = (excess_return * np.sqrt(periods_per_year)) / std_return 42 | return sharpe 43 | 44 | @njit(parallel=False, fastmath=True) 45 | def _calculate_sortino_ratio_numba(returns: np.ndarray, target_return: float = 0.0, periods_per_year: int = 252) -> float: 46 | """Calculate annualized Sortino ratio.""" 47 | valid_returns = returns[~np.isnan(returns)] 48 | if len(valid_returns) < 2: 49 | return np.nan 50 | 51 | mean_return = np.mean(valid_returns) 52 | downside_returns = valid_returns[valid_returns < target_return] 53 | 54 | if len(downside_returns) == 0: 55 | return np.inf if mean_return > target_return else np.nan 56 | 57 | downside_deviation = np.sqrt(np.mean((downside_returns - target_return) ** 2)) 58 | 59 | if downside_deviation == 0: 60 | return np.nan 61 | 62 | excess_return = mean_return - (target_return / periods_per_year) 63 | sortino = (excess_return * np.sqrt(periods_per_year)) / downside_deviation 64 | return sortino 65 | 66 | @njit(parallel=False, fastmath=True) 67 | def _calculate_max_drawdown_numba(prices: np.ndarray) -> Tuple[float, int, int]: 68 | """Calculate maximum drawdown and its start/end indices.""" 69 | if len(prices) == 0: 70 | return np.nan, -1, -1 71 | 72 | max_dd = 0.0 73 | peak = prices[0] 74 | peak_idx = 0 75 | start_idx = 0 76 | end_idx = 0 77 | temp_start = 0 78 | 79 | for i in range(1, len(prices)): 80 | if np.isnan(prices[i]): 81 | continue 82 | 83 | if prices[i] > peak: 84 | peak = prices[i] 85 | peak_idx = i 86 | temp_start = i 87 | else: 88 | drawdown = (peak - prices[i]) / peak 89 | if drawdown > max_dd: 90 | max_dd = drawdown 91 | start_idx = temp_start 92 | end_idx = i 93 | 94 | return max_dd, start_idx, end_idx 95 | 96 | @njit(parallel=False, fastmath=True) 97 | def _calculate_var_numba(returns: np.ndarray, confidence: float = 0.05) -> float: 98 | """Calculate Value at Risk using historical simulation.""" 99 | valid_returns = returns[~np.isnan(returns)] 100 | if len(valid_returns) == 0: 101 | return np.nan 102 | 103 | # Sort returns in ascending order 104 | sorted_returns = np.sort(valid_returns) 105 | index = int(confidence * len(sorted_returns)) 106 | 107 | if index >= len(sorted_returns): 108 | return sorted_returns[-1] 109 | 110 | return sorted_returns[index] 111 | 112 | @njit(parallel=False, fastmath=True) 113 | def _calculate_cvar_numba(returns: np.ndarray, confidence: float = 0.05) -> float: 114 | """Calculate Conditional Value at Risk (Expected Shortfall).""" 115 | valid_returns = returns[~np.isnan(returns)] 116 | if len(valid_returns) == 0: 117 | return np.nan 118 | 119 | sorted_returns = np.sort(valid_returns) 120 | index = int(confidence * len(sorted_returns)) 121 | 122 | if index == 0: 123 | return sorted_returns[0] 124 | 125 | return np.mean(sorted_returns[:index]) 126 | 127 | @njit(parallel=False, fastmath=True) 128 | def _calculate_calmar_ratio_numba(returns: np.ndarray, periods_per_year: int = 252) -> float: 129 | """Calculate Calmar ratio (annualized return / max drawdown).""" 130 | valid_returns = returns[~np.isnan(returns)] 131 | if len(valid_returns) < 2: 132 | return np.nan 133 | 134 | # Calculate annualized return 135 | mean_return = np.mean(valid_returns) * periods_per_year 136 | 137 | # Calculate cumulative returns to get prices 138 | cum_returns = np.ones(len(valid_returns) + 1) 139 | for i in range(len(valid_returns)): 140 | cum_returns[i + 1] = cum_returns[i] * (1 + valid_returns[i]) 141 | 142 | # Calculate max drawdown 143 | max_dd = 0.0 144 | peak = cum_returns[0] 145 | 146 | for i in range(1, len(cum_returns)): 147 | if cum_returns[i] > peak: 148 | peak = cum_returns[i] 149 | else: 150 | drawdown = (peak - cum_returns[i]) / peak 151 | if drawdown > max_dd: 152 | max_dd = drawdown 153 | 154 | if max_dd == 0: 155 | return np.inf if mean_return > 0 else np.nan 156 | 157 | return mean_return / max_dd 158 | 159 | @njit(parallel=False, fastmath=True) 160 | def _calculate_omega_ratio_numba(returns: np.ndarray, threshold: float = 0.0) -> float: 161 | """Calculate Omega ratio.""" 162 | valid_returns = returns[~np.isnan(returns)] 163 | if len(valid_returns) == 0: 164 | return np.nan 165 | 166 | gains = 0.0 167 | losses = 0.0 168 | 169 | for ret in valid_returns: 170 | if ret > threshold: 171 | gains += (ret - threshold) 172 | else: 173 | losses += (threshold - ret) 174 | 175 | if losses == 0: 176 | return np.inf if gains > 0 else 1.0 177 | 178 | return gains / losses 179 | 180 | @njit(parallel=False, fastmath=True) 181 | def _calculate_information_ratio_numba(portfolio_returns: np.ndarray, benchmark_returns: np.ndarray) -> float: 182 | """Calculate Information Ratio.""" 183 | if len(portfolio_returns) != len(benchmark_returns): 184 | return np.nan 185 | 186 | active_returns = portfolio_returns - benchmark_returns 187 | valid_active = active_returns[~np.isnan(active_returns)] 188 | 189 | if len(valid_active) < 2: 190 | return np.nan 191 | 192 | mean_active = np.mean(valid_active) 193 | std_active = np.std(valid_active) 194 | 195 | if std_active == 0: 196 | return np.nan 197 | 198 | return mean_active / std_active 199 | 200 | @njit(parallel=False, fastmath=True) 201 | def _calculate_treynor_ratio_numba(returns: np.ndarray, market_returns: np.ndarray, risk_free_rate: float = 0.0, periods_per_year: int = 252) -> float: 202 | """Calculate Treynor ratio.""" 203 | if len(returns) != len(market_returns): 204 | return np.nan 205 | 206 | # Calculate beta 207 | valid_mask = ~(np.isnan(returns) | np.isnan(market_returns)) 208 | valid_returns = returns[valid_mask] 209 | valid_market = market_returns[valid_mask] 210 | 211 | if len(valid_returns) < 2: 212 | return np.nan 213 | 214 | # Calculate covariance and variance 215 | mean_ret = np.mean(valid_returns) 216 | mean_mkt = np.mean(valid_market) 217 | 218 | covariance = np.mean((valid_returns - mean_ret) * (valid_market - mean_mkt)) 219 | market_variance = np.mean((valid_market - mean_mkt) ** 2) 220 | 221 | if market_variance == 0: 222 | return np.nan 223 | 224 | beta = covariance / market_variance 225 | 226 | if beta == 0: 227 | return np.nan 228 | 229 | # Calculate annualized excess return 230 | excess_return = (mean_ret - risk_free_rate / periods_per_year) * periods_per_year 231 | 232 | return excess_return / beta 233 | 234 | def calculate_risk_metrics( 235 | data: Union[pd.Series, pd.DataFrame], 236 | benchmark: Optional[pd.Series] = None, 237 | risk_free_rate: float = 0.02, 238 | periods_per_year: int = 252, 239 | confidence_level: float = 0.05 240 | ) -> pd.Series: 241 | """ 242 | Calculate comprehensive risk metrics for a return series or price series. 243 | 244 | Args: 245 | data: Price or return series 246 | benchmark: Benchmark series for relative metrics 247 | risk_free_rate: Annual risk-free rate (default: 2%) 248 | periods_per_year: Trading periods per year (default: 252) 249 | confidence_level: Confidence level for VaR/CVaR (default: 5%) 250 | 251 | Returns: 252 | Series with calculated risk metrics 253 | """ 254 | if isinstance(data, pd.DataFrame): 255 | if 'close' in data.columns: 256 | prices = data['close'] 257 | elif 'adj_close' in data.columns: 258 | prices = data['adj_close'] 259 | else: 260 | raise ValueError("DataFrame must contain 'close' or 'adj_close' column") 261 | else: 262 | prices = data 263 | 264 | # Determine if data is prices or returns using magnitude heuristic 265 | # Heuristic: if values are small in magnitude (typical returns), treat as returns; 266 | # otherwise treat as prices. 267 | series = prices.dropna() 268 | abs_max = float(series.abs().max()) if not series.empty else 0.0 269 | abs_median = float(series.abs().median()) if not series.empty else 0.0 270 | looks_like_returns = (abs_max <= 2.0 and abs_median <= 0.5) 271 | 272 | if looks_like_returns: 273 | # Treat as returns 274 | returns = series 275 | is_prices = False 276 | prices = (1 + returns).cumprod() 277 | else: 278 | # Treat as prices 279 | returns = series.pct_change().dropna() 280 | is_prices = True 281 | 282 | returns_np = returns.values.astype(np.float64) 283 | prices_np = prices.values.astype(np.float64) 284 | 285 | metrics = {} 286 | 287 | # Basic statistics 288 | metrics['Total Return'] = (prices.iloc[-1] / prices.iloc[0] - 1) if is_prices else returns.sum() 289 | metrics['Annualized Return'] = (1 + metrics['Total Return']) ** (periods_per_year / len(returns)) - 1 290 | metrics['Volatility'] = returns.std() * np.sqrt(periods_per_year) 291 | metrics['Skewness'] = returns.skew() 292 | metrics['Kurtosis'] = returns.kurtosis() 293 | 294 | # Risk-adjusted returns 295 | metrics['Sharpe Ratio'] = _calculate_sharpe_ratio_numba(returns_np, risk_free_rate, periods_per_year) 296 | metrics['Sortino Ratio'] = _calculate_sortino_ratio_numba(returns_np, 0.0, periods_per_year) 297 | metrics['Calmar Ratio'] = _calculate_calmar_ratio_numba(returns_np, periods_per_year) 298 | 299 | # Drawdown metrics 300 | max_dd, start_idx, end_idx = _calculate_max_drawdown_numba(prices_np) 301 | metrics['Max Drawdown'] = max_dd 302 | metrics['Max Drawdown Start'] = prices.index[start_idx] if start_idx >= 0 else None 303 | metrics['Max Drawdown End'] = prices.index[end_idx] if end_idx >= 0 else None 304 | 305 | # Risk metrics 306 | metrics['VaR (5%)'] = _calculate_var_numba(returns_np, confidence_level) 307 | metrics['CVaR (5%)'] = _calculate_cvar_numba(returns_np, confidence_level) 308 | metrics['Omega Ratio'] = _calculate_omega_ratio_numba(returns_np, 0.0) 309 | 310 | # Benchmark relative metrics 311 | if benchmark is not None: 312 | if isinstance(benchmark, pd.Series): 313 | bench_returns = benchmark.pct_change().dropna() if benchmark.min() > 0 else benchmark.dropna() 314 | # Align indices 315 | aligned_returns, aligned_bench = returns.align(bench_returns, join='inner') 316 | 317 | if len(aligned_returns) > 1: 318 | metrics['Information Ratio'] = _calculate_information_ratio_numba( 319 | aligned_returns.values, aligned_bench.values 320 | ) 321 | metrics['Treynor Ratio'] = _calculate_treynor_ratio_numba( 322 | aligned_returns.values, aligned_bench.values, risk_free_rate, periods_per_year 323 | ) 324 | 325 | # Beta calculation 326 | covariance = np.cov(aligned_returns.values, aligned_bench.values)[0, 1] 327 | benchmark_variance = np.var(aligned_bench.values) 328 | metrics['Beta'] = covariance / benchmark_variance if benchmark_variance != 0 else np.nan 329 | 330 | return pd.Series(metrics) 331 | 332 | __all__ = [ 333 | 'calculate_risk_metrics', 334 | '_calculate_sharpe_ratio_numba', 335 | '_calculate_sortino_ratio_numba', 336 | '_calculate_max_drawdown_numba', 337 | '_calculate_var_numba', 338 | '_calculate_cvar_numba', 339 | '_calculate_calmar_ratio_numba', 340 | '_calculate_omega_ratio_numba', 341 | '_calculate_information_ratio_numba', 342 | '_calculate_treynor_ratio_numba' 343 | ] 344 | -------------------------------------------------------------------------------- /quantjourney_ti/_streaming.py: -------------------------------------------------------------------------------- 1 | """ 2 | QuantJourney Technical-Indicators - Streaming Interface 3 | ====================================================== 4 | Real-time streaming data processing for technical indicators with incremental updates. 5 | 6 | Author: Jakub Polec 7 | License: MIT 8 | """ 9 | 10 | from __future__ import annotations 11 | 12 | import numpy as np 13 | import pandas as pd 14 | from typing import Dict, Any, Optional, Union, Callable 15 | from collections import deque 16 | import threading 17 | import time 18 | from dataclasses import dataclass, field 19 | from functools import lru_cache 20 | 21 | from ._indicator_kernels import * 22 | from ._utils import validate_data 23 | from ._errors import InvalidInputError 24 | 25 | @dataclass 26 | class StreamingState: 27 | """State container for streaming indicators.""" 28 | buffer: deque = field(default_factory=lambda: deque(maxlen=1000)) 29 | last_values: Dict[str, float] = field(default_factory=dict) 30 | periods: Dict[str, int] = field(default_factory=dict) 31 | ema_multipliers: Dict[str, float] = field(default_factory=dict) 32 | sma_sums: Dict[str, float] = field(default_factory=dict) 33 | rsi_gains: deque = field(default_factory=lambda: deque(maxlen=100)) 34 | rsi_losses: deque = field(default_factory=lambda: deque(maxlen=100)) 35 | macd_fast_ema: float = 0.0 36 | macd_slow_ema: float = 0.0 37 | macd_signal_ema: float = 0.0 38 | atr_values: deque = field(default_factory=lambda: deque(maxlen=100)) 39 | bb_values: deque = field(default_factory=lambda: deque(maxlen=100)) 40 | initialized: bool = False 41 | tick_count: int = 0 42 | 43 | class StreamingIndicators: 44 | """ 45 | High-performance streaming technical indicators with incremental updates. 46 | 47 | Designed for real-time data feeds where full recalculation would be too slow. 48 | Maintains internal state and updates indicators incrementally as new data arrives. 49 | """ 50 | 51 | def __init__(self, max_buffer_size: int = 1000): 52 | """ 53 | Initialize streaming indicators. 54 | 55 | Args: 56 | max_buffer_size: Maximum number of historical values to keep in memory 57 | """ 58 | self.max_buffer_size = max_buffer_size 59 | self.states: Dict[str, StreamingState] = {} 60 | self.lock = threading.RLock() 61 | self.callbacks: Dict[str, Callable] = {} 62 | 63 | def register_callback(self, indicator: str, callback: Callable[[str, float, pd.Timestamp], None]): 64 | """Register callback for indicator updates.""" 65 | self.callbacks[indicator] = callback 66 | 67 | def _get_or_create_state(self, symbol: str) -> StreamingState: 68 | """Get or create streaming state for a symbol.""" 69 | if symbol not in self.states: 70 | st = StreamingState() 71 | # Ensure buffer respects instance max_buffer_size 72 | from collections import deque as _deque 73 | st.buffer = _deque(maxlen=self.max_buffer_size) 74 | self.states[symbol] = st 75 | return self.states[symbol] 76 | 77 | def _notify_callback(self, symbol: str, indicator: str, value: float, timestamp: pd.Timestamp): 78 | """Notify registered callbacks of indicator updates.""" 79 | callback_key = f"{symbol}_{indicator}" 80 | if callback_key in self.callbacks: 81 | try: 82 | self.callbacks[callback_key](symbol, value, timestamp) 83 | except Exception as e: 84 | # Log error but don't break the stream 85 | pass 86 | 87 | def update_tick( 88 | self, 89 | symbol: str, 90 | timestamp: pd.Timestamp, 91 | open_price: float = None, 92 | high: float = None, 93 | low: float = None, 94 | close: float = None, 95 | volume: float = None 96 | ) -> Dict[str, float]: 97 | """ 98 | Update all indicators with new tick data. 99 | 100 | Args: 101 | symbol: Symbol identifier 102 | timestamp: Timestamp of the tick 103 | open_price: Open price 104 | high: High price 105 | low: Low price 106 | close: Close price 107 | volume: Volume 108 | 109 | Returns: 110 | Dictionary of updated indicator values 111 | """ 112 | with self.lock: 113 | state = self._get_or_create_state(symbol) 114 | 115 | # Add to buffer 116 | tick_data = { 117 | 'timestamp': timestamp, 118 | 'open': open_price, 119 | 'high': high, 120 | 'low': low, 121 | 'close': close, 122 | 'volume': volume 123 | } 124 | state.buffer.append(tick_data) 125 | state.tick_count += 1 126 | 127 | results = {} 128 | 129 | if close is not None: 130 | # Update SMA 131 | if 'sma_20' not in state.periods: 132 | state.periods['sma_20'] = 20 133 | state.sma_sums['sma_20'] = 0.0 134 | 135 | results['sma_20'] = self._update_sma(state, close, 'sma_20', 20) 136 | 137 | # Update EMA 138 | results['ema_20'] = self._update_ema(state, close, 'ema_20', 20) 139 | 140 | # Update RSI 141 | results['rsi_14'] = self._update_rsi(state, close, 14) 142 | 143 | # Update MACD 144 | macd_results = self._update_macd(state, close, 12, 26, 9) 145 | results.update(macd_results) 146 | 147 | if high is not None and low is not None and close is not None: 148 | # Update ATR 149 | results['atr_14'] = self._update_atr(state, high, low, close, 14) 150 | 151 | # Update Bollinger Bands 152 | bb_results = self._update_bollinger_bands(state, close, 20, 2.0) 153 | results.update(bb_results) 154 | 155 | # Notify callbacks 156 | for indicator, value in results.items(): 157 | if not np.isnan(value): 158 | self._notify_callback(symbol, indicator, value, timestamp) 159 | 160 | return results 161 | 162 | def _update_sma(self, state: StreamingState, price: float, key: str, period: int) -> float: 163 | """Update Simple Moving Average incrementally.""" 164 | if key not in state.last_values: 165 | state.last_values[key] = np.nan 166 | state.sma_sums[key] = 0.0 167 | 168 | # Get recent prices 169 | recent_prices = [tick['close'] for tick in list(state.buffer)[-period:] if tick['close'] is not None] 170 | 171 | if len(recent_prices) < period: 172 | return np.nan 173 | 174 | # Calculate SMA 175 | sma_value = sum(recent_prices) / period 176 | state.last_values[key] = sma_value 177 | return sma_value 178 | 179 | def _update_ema(self, state: StreamingState, price: float, key: str, period: int) -> float: 180 | """Update Exponential Moving Average incrementally.""" 181 | multiplier_key = f"{key}_mult" 182 | 183 | if multiplier_key not in state.ema_multipliers: 184 | state.ema_multipliers[multiplier_key] = 2.0 / (period + 1) 185 | 186 | if key not in state.last_values or np.isnan(state.last_values[key]): 187 | state.last_values[key] = price 188 | return price 189 | 190 | multiplier = state.ema_multipliers[multiplier_key] 191 | ema_value = (price - state.last_values[key]) * multiplier + state.last_values[key] 192 | state.last_values[key] = ema_value 193 | return ema_value 194 | 195 | def _update_rsi(self, state: StreamingState, price: float, period: int = 14) -> float: 196 | """Update RSI incrementally.""" 197 | if len(state.buffer) < 2: 198 | return np.nan 199 | 200 | # Get previous price 201 | prev_tick = list(state.buffer)[-2] 202 | prev_price = prev_tick['close'] 203 | 204 | if prev_price is None: 205 | return np.nan 206 | 207 | # Calculate price change 208 | change = price - prev_price 209 | gain = max(change, 0) 210 | loss = max(-change, 0) 211 | 212 | # Add to gain/loss buffers 213 | state.rsi_gains.append(gain) 214 | state.rsi_losses.append(loss) 215 | 216 | # Need at least period values 217 | if len(state.rsi_gains) < period: 218 | return np.nan 219 | 220 | # Calculate average gain and loss 221 | recent_gains = list(state.rsi_gains)[-period:] 222 | recent_losses = list(state.rsi_losses)[-period:] 223 | 224 | avg_gain = sum(recent_gains) / period 225 | avg_loss = sum(recent_losses) / period 226 | 227 | if avg_loss == 0: 228 | return 100.0 229 | 230 | rs = avg_gain / avg_loss 231 | rsi = 100 - (100 / (1 + rs)) 232 | 233 | return rsi 234 | 235 | def _update_macd(self, state: StreamingState, price: float, fast: int = 12, slow: int = 26, signal: int = 9) -> Dict[str, float]: 236 | """Update MACD incrementally.""" 237 | fast_mult = 2.0 / (fast + 1) 238 | slow_mult = 2.0 / (slow + 1) 239 | signal_mult = 2.0 / (signal + 1) 240 | 241 | # Initialize if needed 242 | if state.macd_fast_ema == 0: 243 | state.macd_fast_ema = price 244 | state.macd_slow_ema = price 245 | return {'macd': 0.0, 'macd_signal': 0.0, 'macd_histogram': 0.0} 246 | 247 | # Update EMAs 248 | state.macd_fast_ema = (price - state.macd_fast_ema) * fast_mult + state.macd_fast_ema 249 | state.macd_slow_ema = (price - state.macd_slow_ema) * slow_mult + state.macd_slow_ema 250 | 251 | # Calculate MACD line 252 | macd_line = state.macd_fast_ema - state.macd_slow_ema 253 | 254 | # Update signal line 255 | if state.macd_signal_ema == 0: 256 | state.macd_signal_ema = macd_line 257 | else: 258 | state.macd_signal_ema = (macd_line - state.macd_signal_ema) * signal_mult + state.macd_signal_ema 259 | 260 | # Calculate histogram 261 | histogram = macd_line - state.macd_signal_ema 262 | 263 | return { 264 | 'macd': macd_line, 265 | 'macd_signal': state.macd_signal_ema, 266 | 'macd_histogram': histogram 267 | } 268 | 269 | def _update_atr(self, state: StreamingState, high: float, low: float, close: float, period: int = 14) -> float: 270 | """Update ATR incrementally.""" 271 | if len(state.buffer) < 2: 272 | return np.nan 273 | 274 | # Get previous close 275 | prev_tick = list(state.buffer)[-2] 276 | prev_close = prev_tick['close'] 277 | 278 | if prev_close is None: 279 | return np.nan 280 | 281 | # Calculate True Range 282 | tr = max( 283 | high - low, 284 | abs(high - prev_close), 285 | abs(low - prev_close) 286 | ) 287 | 288 | # Add to ATR buffer 289 | state.atr_values.append(tr) 290 | 291 | # Need at least period values 292 | if len(state.atr_values) < period: 293 | return np.nan 294 | 295 | # Calculate ATR as average of recent TR values 296 | recent_tr = list(state.atr_values)[-period:] 297 | atr_value = sum(recent_tr) / period 298 | 299 | return atr_value 300 | 301 | def _update_bollinger_bands(self, state: StreamingState, price: float, period: int = 20, std_dev: float = 2.0) -> Dict[str, float]: 302 | """Update Bollinger Bands incrementally.""" 303 | # Add price to buffer 304 | state.bb_values.append(price) 305 | 306 | # Need at least period values 307 | if len(state.bb_values) < period: 308 | return {'bb_upper': np.nan, 'bb_middle': np.nan, 'bb_lower': np.nan} 309 | 310 | # Get recent prices 311 | recent_prices = list(state.bb_values)[-period:] 312 | 313 | # Calculate middle band (SMA) 314 | middle = sum(recent_prices) / period 315 | 316 | # Calculate standard deviation 317 | variance = sum((p - middle) ** 2 for p in recent_prices) / period 318 | std = variance ** 0.5 319 | 320 | # Calculate bands 321 | upper = middle + (std_dev * std) 322 | lower = middle - (std_dev * std) 323 | 324 | return { 325 | 'bb_upper': upper, 326 | 'bb_middle': middle, 327 | 'bb_lower': lower 328 | } 329 | 330 | def get_current_values(self, symbol: str) -> Dict[str, float]: 331 | """Get current indicator values for a symbol.""" 332 | with self.lock: 333 | if symbol not in self.states: 334 | return {} 335 | 336 | state = self.states[symbol] 337 | return state.last_values.copy() 338 | 339 | def reset_symbol(self, symbol: str): 340 | """Reset all indicators for a symbol.""" 341 | with self.lock: 342 | if symbol in self.states: 343 | del self.states[symbol] 344 | 345 | def get_buffer_data(self, symbol: str, limit: int = None) -> pd.DataFrame: 346 | """Get historical buffer data as DataFrame.""" 347 | with self.lock: 348 | if symbol not in self.states: 349 | return pd.DataFrame() 350 | 351 | state = self.states[symbol] 352 | buffer_list = list(state.buffer) 353 | 354 | if limit: 355 | buffer_list = buffer_list[-limit:] 356 | 357 | if not buffer_list: 358 | return pd.DataFrame() 359 | 360 | df = pd.DataFrame(buffer_list) 361 | df.set_index('timestamp', inplace=True) 362 | return df 363 | 364 | class StreamingDataFeed: 365 | """ 366 | Mock streaming data feed for testing and development. 367 | """ 368 | 369 | def __init__(self, data: pd.DataFrame, speed_multiplier: float = 1.0): 370 | """ 371 | Initialize streaming data feed. 372 | 373 | Args: 374 | data: Historical data to stream 375 | speed_multiplier: Speed multiplier (1.0 = real-time, 10.0 = 10x faster) 376 | """ 377 | self.data = data.copy() 378 | self.speed_multiplier = speed_multiplier 379 | self.current_index = 0 380 | self.is_running = False 381 | self.subscribers = [] 382 | self.thread = None 383 | 384 | def subscribe(self, callback: Callable): 385 | """Subscribe to data feed updates.""" 386 | self.subscribers.append(callback) 387 | 388 | def start(self): 389 | """Start streaming data.""" 390 | if self.is_running: 391 | return 392 | 393 | self.is_running = True 394 | self.thread = threading.Thread(target=self._stream_data, daemon=True) 395 | self.thread.start() 396 | 397 | def stop(self): 398 | """Stop streaming data.""" 399 | self.is_running = False 400 | if self.thread: 401 | self.thread.join() 402 | 403 | def _stream_data(self): 404 | """Internal method to stream data.""" 405 | while self.is_running and self.current_index < len(self.data): 406 | row = self.data.iloc[self.current_index] 407 | timestamp = self.data.index[self.current_index] 408 | 409 | # Notify subscribers 410 | for callback in self.subscribers: 411 | try: 412 | callback(timestamp, row) 413 | except Exception as e: 414 | # Log error but continue streaming 415 | pass 416 | 417 | self.current_index += 1 418 | 419 | # Sleep based on speed multiplier 420 | if self.speed_multiplier > 0: 421 | time.sleep(1.0 / self.speed_multiplier) 422 | 423 | # Utility functions for streaming setup 424 | def create_streaming_setup(historical_data: pd.DataFrame, symbol: str = "DEFAULT") -> tuple[StreamingIndicators, StreamingDataFeed]: 425 | """ 426 | Create a complete streaming setup with indicators and data feed. 427 | 428 | Args: 429 | historical_data: Historical OHLCV data 430 | symbol: Symbol identifier 431 | 432 | Returns: 433 | Tuple of (StreamingIndicators, StreamingDataFeed) 434 | """ 435 | indicators = StreamingIndicators() 436 | 437 | def data_callback(timestamp, row): 438 | indicators.update_tick( 439 | symbol=symbol, 440 | timestamp=timestamp, 441 | open_price=row.get('open'), 442 | high=row.get('high'), 443 | low=row.get('low'), 444 | close=row.get('close'), 445 | volume=row.get('volume') 446 | ) 447 | 448 | feed = StreamingDataFeed(historical_data) 449 | feed.subscribe(data_callback) 450 | 451 | return indicators, feed 452 | 453 | __all__ = [ 454 | 'StreamingIndicators', 455 | 'StreamingDataFeed', 456 | 'StreamingState', 457 | 'create_streaming_setup' 458 | ] 459 | -------------------------------------------------------------------------------- /quantjourney_ti/kernels/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Kernels package 3 | --------------- 4 | 5 | Domain-organised re-exports of Numba-accelerated kernels. 6 | This module exports all _calculate_*_numba functions from the domain modules. 7 | """ 8 | 9 | from .trend_numba import * # noqa: F401,F403 10 | from .momentum_numba import * # noqa: F401,F403 11 | from .volatility_numba import * # noqa: F401,F403 12 | from .volume_numba import * # noqa: F401,F403 13 | 14 | __all__ = [ 15 | name 16 | for name in globals().keys() 17 | if name.startswith("_calculate_") and name.endswith("_numba") 18 | ] 19 | -------------------------------------------------------------------------------- /quantjourney_ti/kernels/trend_numba.py: -------------------------------------------------------------------------------- 1 | """ 2 | Trend kernels 3 | ------------- 4 | 5 | Numba-accelerated implementations for trend-oriented indicators. 6 | Functions return float64 arrays aligned to the input length and emit NaNs 7 | for warm-up regions and invalid slices. All functions are pure and 8 | side-effect free. 9 | """ 10 | import numpy as np 11 | from typing import Tuple 12 | from .._indicator_kernels import njit as njit # cached njit 13 | 14 | 15 | @njit(parallel=False, fastmath=True) 16 | def _calculate_sma_numba(prices: np.ndarray, window: int) -> np.ndarray: 17 | """Optimized Simple Moving Average calculation.""" 18 | n = len(prices) 19 | if n == 0 or window <= 0 or window > n: 20 | return np.full(n, np.nan, dtype=np.float64) 21 | sma = np.full(n, np.nan, dtype=np.float64) 22 | for i in range(window - 1, n): 23 | if np.any(np.isnan(prices[i - window + 1 : i + 1])): 24 | continue 25 | sum_prices = 0.0 26 | count = 0 27 | for j in range(i - window + 1, i + 1): 28 | sum_prices += prices[j] 29 | count += 1 30 | sma[i] = sum_prices / count 31 | return sma 32 | 33 | 34 | @njit(parallel=False, fastmath=True) 35 | def _calculate_ema_numba(prices: np.ndarray, window: int) -> np.ndarray: 36 | """Optimized Exponential Moving Average calculation.""" 37 | n = len(prices) 38 | if n == 0 or window <= 0 or window > n: 39 | return np.full(n, np.nan, dtype=np.float64) 40 | ema = np.full(n, np.nan, dtype=np.float64) 41 | multiplier = 2.0 / (window + 1) 42 | sum_prices = 0.0 43 | count = 0 44 | for i in range(window): 45 | if not np.isnan(prices[i]): 46 | sum_prices += prices[i] 47 | count += 1 48 | if count > 0: 49 | ema[window - 1] = sum_prices / count 50 | for i in range(window, n): 51 | if not np.isnan(prices[i]) and not np.isnan(ema[i - 1]): 52 | ema[i] = (prices[i] - ema[i - 1]) * multiplier + ema[i - 1] 53 | return ema 54 | 55 | 56 | @njit(parallel=False, fastmath=True) 57 | def _calculate_dema_numba(close: np.ndarray, period: int) -> np.ndarray: 58 | """Double EMA: DEMA = 2*EMA1 - EMA2. 59 | 60 | Args: 61 | close: price array 62 | period: EMA window 63 | Returns: 64 | Float64 array of DEMA values with NaNs for warm-up. 65 | """ 66 | n = len(close) 67 | if n == 0 or period <= 0 or period > n: 68 | return np.full(n, np.nan, dtype=np.float64) 69 | alpha = 2.0 / (period + 1) 70 | ema1 = np.full(n, np.nan, dtype=np.float64) 71 | ema2 = np.full(n, np.nan, dtype=np.float64) 72 | if not np.isnan(close[0]): 73 | ema1[0] = close[0] 74 | ema2[0] = close[0] 75 | for i in range(1, n): 76 | if not np.isnan(close[i]) and not np.isnan(ema1[i - 1]): 77 | ema1[i] = (close[i] - ema1[i - 1]) * alpha + ema1[i - 1] 78 | if not np.isnan(ema1[i]) and not np.isnan(ema2[i - 1]): 79 | ema2[i] = (ema1[i] - ema2[i - 1]) * alpha + ema2[i - 1] 80 | out = np.full(n, np.nan, dtype=np.float64) 81 | for i in range(n): 82 | if not np.isnan(ema1[i]) and not np.isnan(ema2[i]): 83 | out[i] = 2 * ema1[i] - ema2[i] 84 | return out 85 | 86 | 87 | @njit(parallel=False, fastmath=True) 88 | def _calculate_hull_ma_numba(close: np.ndarray, period: int) -> np.ndarray: 89 | """Hull Moving Average (HMA) using WMA cascade. 90 | 91 | Returns a smoother, faster-reacting moving average. 92 | """ 93 | n = len(close) 94 | if n == 0 or period <= 0 or period > n: 95 | return np.full(n, np.nan, dtype=np.float64) 96 | hma = np.full(n, np.nan, dtype=np.float64) 97 | # WMA helper 98 | def wma(arr: np.ndarray, p: int) -> np.ndarray: 99 | out = np.full(n, np.nan, dtype=np.float64) 100 | if p <= 0 or p > n: 101 | return out 102 | weight_sum = p * (p + 1) / 2.0 103 | for i in range(p - 1, n): 104 | s = 0.0 105 | w = 1 106 | valid = True 107 | for j in range(i - p + 1, i + 1): 108 | if np.isnan(arr[j]): 109 | valid = False 110 | break 111 | s += arr[j] * w 112 | w += 1 113 | if valid: 114 | out[i] = s / weight_sum 115 | return out 116 | half = period // 2 117 | sqrt_p = int(np.sqrt(period)) if period > 0 else 0 118 | wma_half = wma(close, half) 119 | wma_full = wma(close, period) 120 | diff = np.full(n, np.nan, dtype=np.float64) 121 | for i in range(n): 122 | if not np.isnan(wma_half[i]) and not np.isnan(wma_full[i]): 123 | diff[i] = 2 * wma_half[i] - wma_full[i] 124 | hma = wma(diff, sqrt_p) 125 | return hma 126 | 127 | 128 | @njit(parallel=False, fastmath=True) 129 | def _calculate_alma_numba( 130 | close: np.ndarray, window: int, sigma: float = 6.0, offset: float = 0.85 131 | ) -> np.ndarray: 132 | """Arnaud Legoux Moving Average (ALMA) with Gaussian weights.""" 133 | n = len(close) 134 | if n == 0 or window <= 0 or window > n: 135 | return np.full(n, np.nan, dtype=np.float64) 136 | alma = np.full(n, np.nan, dtype=np.float64) 137 | m = offset * (window - 1) 138 | s = window / sigma 139 | weights = np.zeros(window, dtype=np.float64) 140 | wsum = 0.0 141 | for i in range(window): 142 | weights[i] = np.exp(-((i - m) ** 2) / (2 * s * s)) 143 | wsum += weights[i] 144 | if wsum == 0: 145 | return alma 146 | for i in range(window): 147 | weights[i] /= wsum 148 | for i in range(window - 1, n): 149 | if np.any(np.isnan(close[i - window + 1 : i + 1])): 150 | continue 151 | acc = 0.0 152 | for j in range(window): 153 | acc += close[i - window + 1 + j] * weights[j] 154 | alma[i] = acc 155 | return alma 156 | @njit(parallel=False, fastmath=True) 157 | def _calculate_kama_numba( 158 | close: np.ndarray, er_period: int = 10, fast_period: int = 2, slow_period: int = 30 159 | ) -> np.ndarray: 160 | n = len(close) 161 | if n == 0 or er_period <= 0 or fast_period <= 0 or slow_period <= 0 or er_period > n: 162 | return np.full(n, np.nan, dtype=np.float64) 163 | er = np.zeros(n, dtype=np.float64) 164 | kama = np.full(n, np.nan, dtype=np.float64) 165 | # Direction and volatility 166 | for i in range(er_period, n): 167 | if np.isnan(close[i]) or np.isnan(close[i - er_period]): 168 | continue 169 | direction = abs(close[i] - close[i - er_period]) 170 | vol = 0.0 171 | valid = True 172 | for j in range(i - er_period + 1, i + 1): 173 | if np.isnan(close[j]) or np.isnan(close[j - 1]): 174 | valid = False 175 | break 176 | vol += abs(close[j] - close[j - 1]) 177 | if not valid or vol == 0: 178 | er[i] = 0.0 179 | else: 180 | er[i] = direction / vol 181 | fast_alpha = 2.0 / (fast_period + 1) 182 | slow_alpha = 2.0 / (slow_period + 1) 183 | # Initialize KAMA 184 | if not np.isnan(close[er_period]): 185 | kama[er_period] = close[er_period] 186 | for i in range(er_period + 1, n): 187 | sc = (er[i] * (fast_alpha - slow_alpha) + slow_alpha) 188 | sc = sc * sc 189 | if not np.isnan(kama[i - 1]) and not np.isnan(close[i]): 190 | kama[i] = kama[i - 1] + sc * (close[i] - kama[i - 1]) 191 | return kama 192 | 193 | 194 | # Ichimoku Cloud 195 | @njit(parallel=False, fastmath=True) 196 | def _calculate_ichimoku_numba( 197 | high: np.ndarray, 198 | low: np.ndarray, 199 | close: np.ndarray, 200 | tenkan_period: int, 201 | kijun_period: int, 202 | senkou_span_b_period: int, 203 | ) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]: 204 | n = len(close) 205 | if ( 206 | n == 0 207 | or len(high) != n 208 | or len(low) != n 209 | or tenkan_period <= 0 210 | or kijun_period <= 0 211 | or senkou_span_b_period <= 0 212 | ): 213 | empty = np.full(n, np.nan, dtype=np.float64) 214 | return empty, empty, empty, empty 215 | tenkan_sen = np.full(n, np.nan, dtype=np.float64) 216 | kijun_sen = np.full(n, np.nan, dtype=np.float64) 217 | senkou_span_a = np.full(n, np.nan, dtype=np.float64) 218 | senkou_span_b = np.full(n, np.nan, dtype=np.float64) 219 | # Tenkan 220 | for i in range(tenkan_period - 1, n): 221 | if ( 222 | np.any(np.isnan(high[i - tenkan_period + 1 : i + 1])) 223 | or np.any(np.isnan(low[i - tenkan_period + 1 : i + 1])) 224 | ): 225 | continue 226 | tenkan_high = np.max(high[i - tenkan_period + 1 : i + 1]) 227 | tenkan_low = np.min(low[i - tenkan_period + 1 : i + 1]) 228 | tenkan_sen[i] = (tenkan_high + tenkan_low) / 2 229 | # Kijun 230 | for i in range(kijun_period - 1, n): 231 | if ( 232 | np.any(np.isnan(high[i - kijun_period + 1 : i + 1])) 233 | or np.any(np.isnan(low[i - kijun_period + 1 : i + 1])) 234 | ): 235 | continue 236 | kijun_high = np.max(high[i - kijun_period + 1 : i + 1]) 237 | kijun_low = np.min(low[i - kijun_period + 1 : i + 1]) 238 | kijun_sen[i] = (kijun_high + kijun_low) / 2 239 | # Senkou A and B 240 | for i in range(kijun_period - 1, n): 241 | if not np.isnan(tenkan_sen[i]) and not np.isnan(kijun_sen[i]): 242 | senkou_span_a[i] = (tenkan_sen[i] + kijun_sen[i]) / 2 243 | for i in range(senkou_span_b_period - 1, n): 244 | if ( 245 | np.any(np.isnan(high[i - senkou_span_b_period + 1 : i + 1])) 246 | or np.any(np.isnan(low[i - senkou_span_b_period + 1 : i + 1])) 247 | ): 248 | continue 249 | senkou_high = np.max(high[i - senkou_span_b_period + 1 : i + 1]) 250 | senkou_low = np.min(low[i - senkou_span_b_period + 1 : i + 1]) 251 | senkou_span_b[i] = (senkou_high + senkou_low) / 2 252 | return tenkan_sen, kijun_sen, senkou_span_a, senkou_span_b 253 | 254 | 255 | @njit(parallel=False, fastmath=True) 256 | def _calculate_heiken_ashi_numba( 257 | open_: np.ndarray, high: np.ndarray, low: np.ndarray, close: np.ndarray 258 | ) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]: 259 | """Heiken Ashi candle components.""" 260 | n = len(close) 261 | if n == 0 or len(open_) != n or len(high) != n or len(low) != n: 262 | empty = np.full(n, np.nan, dtype=np.float64) 263 | return empty, empty, empty, empty 264 | ha_close = np.full(n, np.nan, dtype=np.float64) 265 | ha_open = np.full(n, np.nan, dtype=np.float64) 266 | ha_high = np.full(n, np.nan, dtype=np.float64) 267 | ha_low = np.full(n, np.nan, dtype=np.float64) 268 | for i in range(n): 269 | if not np.isnan(open_[i]) and not np.isnan(high[i]) and not np.isnan(low[i]) and not np.isnan(close[i]): 270 | ha_close[i] = (open_[i] + high[i] + low[i] + close[i]) / 4.0 271 | if not np.isnan(open_[0]) and not np.isnan(close[0]): 272 | ha_open[0] = (open_[0] + close[0]) / 2.0 273 | for i in range(1, n): 274 | if not np.isnan(ha_open[i - 1]) and not np.isnan(ha_close[i - 1]): 275 | ha_open[i] = (ha_open[i - 1] + ha_close[i - 1]) / 2.0 276 | if not np.isnan(high[i]) and not np.isnan(ha_open[i]) and not np.isnan(ha_close[i]): 277 | ha_high[i] = max(high[i], ha_open[i], ha_close[i]) 278 | if not np.isnan(low[i]) and not np.isnan(ha_open[i]) and not np.isnan(ha_close[i]): 279 | ha_low[i] = min(low[i], ha_open[i], ha_close[i]) 280 | return ha_open, ha_high, ha_low, ha_close 281 | 282 | 283 | @njit(parallel=False, fastmath=True) 284 | def _calculate_pivot_points_numba( 285 | high: np.ndarray, low: np.ndarray, close: np.ndarray 286 | ) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray]: 287 | """Classic pivot points: PP, R1, R2, S1, S2.""" 288 | n = len(close) 289 | if n == 0 or len(high) != n or len(low) != n: 290 | empty = np.full(n, np.nan, dtype=np.float64) 291 | return empty, empty, empty, empty, empty 292 | pp = np.full(n, np.nan, dtype=np.float64) 293 | r1 = np.full(n, np.nan, dtype=np.float64) 294 | r2 = np.full(n, np.nan, dtype=np.float64) 295 | s1 = np.full(n, np.nan, dtype=np.float64) 296 | s2 = np.full(n, np.nan, dtype=np.float64) 297 | for i in range(1, n): 298 | if np.isnan(high[i - 1]) or np.isnan(low[i - 1]) or np.isnan(close[i - 1]): 299 | continue 300 | pp[i] = (high[i - 1] + low[i - 1] + close[i - 1]) / 3.0 301 | r1[i] = 2 * pp[i] - low[i - 1] 302 | r2[i] = pp[i] + (high[i - 1] - low[i - 1]) 303 | s1[i] = 2 * pp[i] - high[i - 1] 304 | s2[i] = pp[i] - (high[i - 1] - low[i - 1]) 305 | return pp, r1, r2, s1, s2 306 | 307 | 308 | @njit(parallel=False, fastmath=True) 309 | def _calculate_rainbow_numba(prices: np.ndarray, periods: np.ndarray) -> np.ndarray: 310 | """Rainbow MA: stack multiple SMA lines for provided periods. 311 | 312 | Returns a 2D array shape (len(periods), len(prices)). 313 | """ 314 | n = len(prices) 315 | k = len(periods) 316 | out = np.full((k, n), np.nan, dtype=np.float64) 317 | for p_idx in range(k): 318 | p = int(periods[p_idx]) 319 | if p <= 0 or p > n: 320 | continue 321 | for i in range(p - 1, n): 322 | if np.any(np.isnan(prices[i - p + 1 : i + 1])): 323 | continue 324 | s = 0.0 325 | cnt = 0 326 | for j in range(i - p + 1, i + 1): 327 | s += prices[j] 328 | cnt += 1 329 | out[p_idx, i] = s / cnt 330 | return out 331 | # Re-export additional trend kernels from the monolith until fully migrated 332 | from .._indicator_kernels import ( # noqa: E402 333 | _calculate_linear_regression_channel_numba, 334 | _calculate_rainbow_numba, 335 | ) 336 | 337 | __all__ = [ 338 | "_calculate_sma_numba", 339 | "_calculate_ema_numba", 340 | "_calculate_dema_numba", 341 | "_calculate_kama_numba", 342 | "_calculate_ichimoku_numba", 343 | "_calculate_heiken_ashi_numba", 344 | "_calculate_pivot_points_numba", 345 | "_calculate_rainbow_numba", 346 | "_calculate_hull_ma_numba", 347 | "_calculate_alma_numba", 348 | "_calculate_linear_regression_channel_numba", 349 | "_calculate_rainbow_numba", 350 | ] 351 | -------------------------------------------------------------------------------- /quantjourney_ti/kernels/volatility_numba.py: -------------------------------------------------------------------------------- 1 | """ 2 | Volatility kernels 3 | """ 4 | import numpy as np 5 | from typing import Tuple 6 | from .._indicator_kernels import njit as njit 7 | 8 | 9 | @njit(parallel=False, fastmath=True) 10 | def _calculate_atr_numba( 11 | high: np.ndarray, low: np.ndarray, close: np.ndarray, window: int 12 | ) -> np.ndarray: 13 | n = len(high) 14 | if n == 0 or len(low) != n or len(close) != n or window <= 0 or window > n: 15 | return np.full(n, np.nan, dtype=np.float64) 16 | tr = np.full(n, np.nan, dtype=np.float64) 17 | atr = np.full(n, np.nan, dtype=np.float64) 18 | for i in range(1, n): 19 | if np.isnan(high[i]) or np.isnan(low[i]) or np.isnan(close[i - 1]): 20 | continue 21 | tr[i] = max(high[i] - low[i], abs(high[i] - close[i - 1]), abs(low[i] - close[i - 1])) 22 | if window < n: 23 | initial_sum = 0.0 24 | count = 0 25 | for i in range(1, window + 1): 26 | if not np.isnan(tr[i]): 27 | initial_sum += tr[i] 28 | count += 1 29 | if count > 0: 30 | atr[window] = initial_sum / count 31 | for i in range(window + 1, n): 32 | if not np.isnan(tr[i]) and not np.isnan(atr[i - 1]): 33 | atr[i] = (atr[i - 1] * (window - 1) + tr[i]) / window 34 | return atr 35 | 36 | 37 | @njit(parallel=False, fastmath=True) 38 | def _calculate_bollinger_bands_numba( 39 | prices: np.ndarray, window: int, num_std: float 40 | ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: 41 | n = len(prices) 42 | if n == 0 or window <= 0 or window > n: 43 | empty = np.full(n, np.nan, dtype=np.float64) 44 | return empty, empty, empty 45 | middle_band = np.full(n, np.nan, dtype=np.float64) 46 | upper_band = np.full(n, np.nan, dtype=np.float64) 47 | lower_band = np.full(n, np.nan, dtype=np.float64) 48 | for i in range(window - 1, n): 49 | if np.any(np.isnan(prices[i - window + 1 : i + 1])): 50 | continue 51 | sum_prices = 0.0 52 | sum_sq = 0.0 53 | count = 0 54 | for j in range(i - window + 1, i + 1): 55 | sum_prices += prices[j] 56 | sum_sq += prices[j] * prices[j] 57 | count += 1 58 | mean = sum_prices / count 59 | variance = (sum_sq / count) - (mean * mean) 60 | std = np.sqrt(variance) if variance > 0 else 0.0 61 | middle_band[i] = mean 62 | upper_band[i] = mean + (std * num_std) 63 | lower_band[i] = mean - (std * num_std) 64 | return upper_band, middle_band, lower_band 65 | 66 | 67 | @njit(parallel=False, fastmath=True) 68 | def _calculate_keltner_channels_numba( 69 | high: np.ndarray, 70 | low: np.ndarray, 71 | close: np.ndarray, 72 | ema_period: int, 73 | atr_period: int, 74 | multiplier: float, 75 | ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: 76 | n = len(close) 77 | if n == 0 or len(high) != n or len(low) != n or ema_period <= 0 or atr_period <= 0 or max(ema_period, atr_period) > n: 78 | empty = np.full(n, np.nan, dtype=np.float64) 79 | return empty, empty, empty 80 | ema = np.full(n, np.nan, dtype=np.float64) 81 | multiplier_ema = 2.0 / (ema_period + 1) 82 | if n > 0 and not np.isnan(close[0]): 83 | ema[0] = close[0] 84 | for i in range(1, n): 85 | if not np.isnan(close[i]) and not np.isnan(ema[i - 1]): 86 | ema[i] = (close[i] - ema[i - 1]) * multiplier_ema + ema[i - 1] 87 | tr = np.full(n, np.nan, dtype=np.float64) 88 | atr = np.full(n, np.nan, dtype=np.float64) 89 | for i in range(1, n): 90 | if np.isnan(high[i]) or np.isnan(low[i]) or np.isnan(close[i - 1]): 91 | continue 92 | hl = high[i] - low[i] 93 | hc = abs(high[i] - close[i - 1]) 94 | lc = abs(low[i] - close[i - 1]) 95 | tr[i] = max(hl, hc, lc) 96 | if atr_period < n: 97 | sum_tr = 0.0 98 | count = 0 99 | for i in range(1, atr_period + 1): 100 | if not np.isnan(tr[i]): 101 | sum_tr += tr[i] 102 | count += 1 103 | if count > 0: 104 | atr[atr_period] = sum_tr / count 105 | for i in range(atr_period + 1, n): 106 | if not np.isnan(tr[i]) and not np.isnan(atr[i - 1]): 107 | atr[i] = (atr[i - 1] * (atr_period - 1) + tr[i]) / atr_period 108 | upper = np.full(n, np.nan, dtype=np.float64) 109 | lower = np.full(n, np.nan, dtype=np.float64) 110 | for i in range(max(ema_period, atr_period) - 1, n): 111 | if not np.isnan(ema[i]) and not np.isnan(atr[i]): 112 | upper[i] = ema[i] + (multiplier * atr[i]) 113 | lower[i] = ema[i] - (multiplier * atr[i]) 114 | return upper, ema, lower 115 | 116 | 117 | @njit(parallel=False, fastmath=True) 118 | def _calculate_donchian_channels_numba( 119 | high: np.ndarray, low: np.ndarray, period: int 120 | ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: 121 | n = len(high) 122 | if n == 0 or len(low) != n or period <= 0 or period > n: 123 | empty = np.full(n, np.nan, dtype=np.float64) 124 | return empty, empty, empty 125 | upper = np.full(n, np.nan, dtype=np.float64) 126 | lower = np.full(n, np.nan, dtype=np.float64) 127 | middle = np.full(n, np.nan, dtype=np.float64) 128 | for i in range(period - 1, n): 129 | if np.any(np.isnan(high[i - period + 1 : i + 1])) or np.any(np.isnan(low[i - period + 1 : i + 1])): 130 | continue 131 | upper[i] = np.max(high[i - period + 1 : i + 1]) 132 | lower[i] = np.min(low[i - period + 1 : i + 1]) 133 | middle[i] = (upper[i] + lower[i]) / 2 134 | return upper, middle, lower 135 | 136 | 137 | @njit(parallel=False, fastmath=True) 138 | def _calculate_supertrend_numba( 139 | high: np.ndarray, low: np.ndarray, close: np.ndarray, period: int, multiplier: float 140 | ) -> Tuple[np.ndarray, np.ndarray]: 141 | """Supertrend line and direction (+1 uptrend, -1 downtrend).""" 142 | n = len(close) 143 | if n == 0 or len(high) != n or len(low) != n or period <= 0 or period > n: 144 | empty = np.full(n, np.nan, dtype=np.float64) 145 | return empty, empty 146 | tr = np.full(n, np.nan, dtype=np.float64) 147 | atr = np.full(n, np.nan, dtype=np.float64) 148 | for i in range(1, n): 149 | if np.isnan(high[i]) or np.isnan(low[i]) or np.isnan(close[i - 1]): 150 | continue 151 | tr[i] = max(high[i] - low[i], abs(high[i] - close[i - 1]), abs(low[i] - close[i - 1])) 152 | if period < n: 153 | s = 0.0 154 | cnt = 0 155 | for i in range(1, period + 1): 156 | if not np.isnan(tr[i]): 157 | s += tr[i] 158 | cnt += 1 159 | if cnt > 0: 160 | atr[period] = s / cnt 161 | for i in range(period + 1, n): 162 | if not np.isnan(tr[i]) and not np.isnan(atr[i - 1]): 163 | atr[i] = (atr[i - 1] * (period - 1) + tr[i]) / period 164 | upper = np.full(n, np.nan, dtype=np.float64) 165 | lower = np.full(n, np.nan, dtype=np.float64) 166 | st = np.full(n, np.nan, dtype=np.float64) 167 | direction = np.full(n, np.nan, dtype=np.float64) 168 | for i in range(period, n): 169 | if np.isnan(high[i]) or np.isnan(low[i]) or np.isnan(close[i]) or np.isnan(atr[i]): 170 | continue 171 | mid = (high[i] + low[i]) / 2.0 172 | upper[i] = mid + multiplier * atr[i] 173 | lower[i] = mid - multiplier * atr[i] 174 | if i == period: 175 | st[i] = lower[i] 176 | direction[i] = 1 177 | else: 178 | if direction[i - 1] == 1: 179 | if not np.isnan(upper[i - 1]): 180 | upper[i] = min(upper[i], upper[i - 1]) 181 | if close[i] > st[i - 1]: 182 | st[i] = lower[i] 183 | direction[i] = 1 184 | else: 185 | st[i] = upper[i] 186 | direction[i] = -1 187 | else: 188 | if not np.isnan(lower[i - 1]): 189 | lower[i] = max(lower[i], lower[i - 1]) 190 | if close[i] < st[i - 1]: 191 | st[i] = upper[i] 192 | direction[i] = -1 193 | else: 194 | st[i] = lower[i] 195 | direction[i] = 1 196 | return st, direction 197 | # Re-export remaining volatility kernels until migrated 198 | @njit(parallel=False, fastmath=True) 199 | def _calculate_chaikin_volatility_numba( 200 | high: np.ndarray, low: np.ndarray, ema_period: int = 10, roc_period: int = 10 201 | ) -> np.ndarray: 202 | n = len(high) 203 | if n == 0 or len(low) != n or ema_period <= 0 or roc_period <= 0 or ema_period + roc_period > n: 204 | return np.full(n, np.nan, dtype=np.float64) 205 | hl_range = np.full(n, np.nan, dtype=np.float64) 206 | for i in range(n): 207 | if not np.isnan(high[i]) and not np.isnan(low[i]): 208 | hl_range[i] = high[i] - low[i] 209 | ema = np.full(n, np.nan, dtype=np.float64) 210 | multiplier = 2.0 / (ema_period + 1) 211 | if n > 0 and not np.isnan(hl_range[0]): 212 | ema[0] = hl_range[0] 213 | for i in range(1, n): 214 | if not np.isnan(hl_range[i]) and not np.isnan(ema[i - 1]): 215 | ema[i] = (hl_range[i] - ema[i - 1]) * multiplier + ema[i - 1] 216 | cv = np.full(n, np.nan, dtype=np.float64) 217 | for i in range(roc_period, n): 218 | if not np.isnan(ema[i]) and not np.isnan(ema[i - roc_period]) and ema[i - roc_period] != 0: 219 | cv[i] = ((ema[i] - ema[i - roc_period]) / ema[i - roc_period]) * 100 220 | return cv 221 | 222 | 223 | @njit(parallel=False, fastmath=True) 224 | def _calculate_historical_volatility_numba( 225 | close: np.ndarray, period: int = 20, trading_days: int = 252 226 | ) -> np.ndarray: 227 | n = len(close) 228 | if n <= 1 or period <= 0 or period > n: 229 | return np.full(n, np.nan, dtype=np.float64) 230 | returns = np.full(n - 1, np.nan, dtype=np.float64) 231 | for i in range(1, n): 232 | if not np.isnan(close[i]) and not np.isnan(close[i - 1]) and close[i - 1] != 0: 233 | returns[i - 1] = np.log(close[i] / close[i - 1]) 234 | hv = np.full(n, np.nan, dtype=np.float64) 235 | for i in range(period, n): 236 | if np.any(np.isnan(returns[i - period : i])): 237 | continue 238 | std = np.std(returns[i - period : i]) 239 | hv[i] = std * np.sqrt(trading_days) * 100 240 | return hv 241 | 242 | 243 | @njit(parallel=False, fastmath=True) 244 | def _calculate_linear_regression_channel_numba( 245 | close: np.ndarray, period: int, deviations: float = 2.0 246 | ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: 247 | n = len(close) 248 | if n == 0 or period <= 2 or period > n: 249 | empty = np.full(n, np.nan, dtype=np.float64) 250 | return empty, empty, empty 251 | middle = np.full(n, np.nan, dtype=np.float64) 252 | upper = np.full(n, np.nan, dtype=np.float64) 253 | lower = np.full(n, np.nan, dtype=np.float64) 254 | for i in range(period - 1, n): 255 | y = close[i - period + 1 : i + 1] 256 | if np.any(np.isnan(y)): 257 | continue 258 | x = np.arange(period, dtype=np.float64) 259 | x_mean = np.mean(x) 260 | y_mean = np.mean(y) 261 | num = 0.0 262 | den = 0.0 263 | for j in range(period): 264 | num += (x[j] - x_mean) * (y[j] - y_mean) 265 | den += (x[j] - x_mean) * (x[j] - x_mean) 266 | if den == 0: 267 | continue 268 | slope = num / den 269 | intercept = y_mean - slope * x_mean 270 | predict = slope * (period - 1) + intercept 271 | middle[i] = predict 272 | # compute std error 273 | std_err_sum = 0.0 274 | for j in range(period): 275 | y_pred = slope * x[j] + intercept 276 | std_err_sum += (y[j] - y_pred) * (y[j] - y_pred) 277 | std_err = np.sqrt(std_err_sum / (period - 2)) if period > 2 else 0.0 278 | upper[i] = predict + deviations * std_err 279 | lower[i] = predict - deviations * std_err 280 | return upper, middle, lower 281 | 282 | 283 | @njit(parallel=False, fastmath=True) 284 | def _calculate_choppiness_index_numba( 285 | high: np.ndarray, low: np.ndarray, close: np.ndarray, period: int 286 | ) -> np.ndarray: 287 | n = len(high) 288 | if n == 0 or len(low) != n or len(close) != n or period <= 0 or period > n: 289 | return np.full(n, np.nan, dtype=np.float64) 290 | tr = np.full(n, np.nan, dtype=np.float64) 291 | for i in range(1, n): 292 | if np.isnan(high[i]) or np.isnan(low[i]) or np.isnan(close[i - 1]): 293 | continue 294 | hl = high[i] - low[i] 295 | hc = abs(high[i] - close[i - 1]) 296 | lc = abs(low[i] - close[i - 1]) 297 | tr[i] = max(hl, hc, lc) 298 | ci = np.full(n, np.nan, dtype=np.float64) 299 | for i in range(period, n): 300 | if ( 301 | np.any(np.isnan(tr[i - period + 1 : i + 1])) 302 | or np.any(np.isnan(high[i - period + 1 : i + 1])) 303 | or np.any(np.isnan(low[i - period + 1 : i + 1])) 304 | ): 305 | continue 306 | sum_tr = 0.0 307 | for j in range(i - period + 1, i + 1): 308 | sum_tr += tr[j] 309 | highest_high = np.max(high[i - period + 1 : i + 1]) 310 | lowest_low = np.min(low[i - period + 1 : i + 1]) 311 | if highest_high != lowest_low and sum_tr > 0: 312 | ci[i] = 100 * np.log10(sum_tr / (highest_high - lowest_low)) / np.log10(period) 313 | return ci 314 | 315 | 316 | @njit(parallel=False, fastmath=True) 317 | def _calculate_mass_index_numba( 318 | high: np.ndarray, low: np.ndarray, ema_period: int = 9, sum_period: int = 25 319 | ) -> np.ndarray: 320 | """Mass Index based on EMA(high-low) and EMA(EMA(high-low)).""" 321 | n = len(high) 322 | if n == 0 or len(low) != n or ema_period <= 0 or sum_period <= 0 or ema_period + sum_period > n: 323 | return np.full(n, np.nan, dtype=np.float64) 324 | diff = np.full(n, np.nan, dtype=np.float64) 325 | for i in range(n): 326 | if not np.isnan(high[i]) and not np.isnan(low[i]): 327 | diff[i] = high[i] - low[i] 328 | ema1 = np.full(n, np.nan, dtype=np.float64) 329 | ema2 = np.full(n, np.nan, dtype=np.float64) 330 | a = 2.0 / (ema_period + 1) 331 | if n > 0 and not np.isnan(diff[0]): 332 | ema1[0] = diff[0] 333 | ema2[0] = diff[0] 334 | for i in range(1, n): 335 | if not np.isnan(diff[i]) and not np.isnan(ema1[i - 1]): 336 | ema1[i] = (diff[i] - ema1[i - 1]) * a + ema1[i - 1] 337 | if not np.isnan(ema1[i]) and not np.isnan(ema2[i - 1]): 338 | ema2[i] = (ema1[i] - ema2[i - 1]) * a + ema2[i - 1] 339 | ratio = np.full(n, np.nan, dtype=np.float64) 340 | for i in range(n): 341 | if not np.isnan(ema1[i]) and not np.isnan(ema2[i]) and ema2[i] != 0: 342 | ratio[i] = ema1[i] / ema2[i] 343 | mi = np.full(n, np.nan, dtype=np.float64) 344 | for i in range(sum_period - 1, n): 345 | if np.any(np.isnan(ratio[i - sum_period + 1 : i + 1])): 346 | continue 347 | s = 0.0 348 | for j in range(i - sum_period + 1, i + 1): 349 | s += ratio[j] 350 | mi[i] = s 351 | return mi 352 | 353 | __all__ = [ 354 | "_calculate_atr_numba", 355 | "_calculate_bollinger_bands_numba", 356 | "_calculate_keltner_channels_numba", 357 | "_calculate_donchian_channels_numba", 358 | "_calculate_supertrend_numba", 359 | "_calculate_chaikin_volatility_numba", 360 | "_calculate_historical_volatility_numba", 361 | "_calculate_linear_regression_channel_numba", 362 | "_calculate_choppiness_index_numba", 363 | "_calculate_mass_index_numba", 364 | ] 365 | -------------------------------------------------------------------------------- /quantjourney_ti/kernels/volume_numba.py: -------------------------------------------------------------------------------- 1 | """ 2 | Volume kernels 3 | """ 4 | import numpy as np 5 | from typing import Tuple 6 | from .._indicator_kernels import njit as njit 7 | 8 | 9 | @njit(parallel=False, fastmath=True) 10 | def _calculate_obv_numba(close: np.ndarray, volume: np.ndarray) -> np.ndarray: 11 | n = len(close) 12 | if n == 0 or len(volume) != n: 13 | return np.full(n, np.nan, dtype=np.float64) 14 | obv = np.zeros(n, dtype=np.float64) 15 | for i in range(1, n): 16 | if np.isnan(close[i]) or np.isnan(close[i - 1]) or np.isnan(volume[i]): 17 | obv[i] = obv[i - 1] 18 | elif close[i] > close[i - 1]: 19 | obv[i] = obv[i - 1] + volume[i] 20 | elif close[i] < close[i - 1]: 21 | obv[i] = obv[i - 1] - volume[i] 22 | else: 23 | obv[i] = obv[i - 1] 24 | return obv 25 | 26 | 27 | @njit(parallel=False, fastmath=True) 28 | def _calculate_ad_numba( 29 | high: np.ndarray, low: np.ndarray, close: np.ndarray, volume: np.ndarray 30 | ) -> np.ndarray: 31 | n = len(close) 32 | if n == 0 or len(high) != n or len(low) != n or len(volume) != n: 33 | return np.empty(0, dtype=np.float64) 34 | ad = np.zeros(n, dtype=np.float64) 35 | for i in range(n): 36 | if np.isnan(high[i]) or np.isnan(low[i]) or np.isnan(close[i]) or np.isnan(volume[i]): 37 | ad[i] = ad[i - 1] if i > 0 else 0.0 38 | continue 39 | if high[i] == low[i]: 40 | money_flow_vol = 0.0 41 | else: 42 | clv = ((close[i] - low[i]) - (high[i] - close[i])) / (high[i] - low[i]) 43 | money_flow_vol = clv * volume[i] 44 | ad[i] = money_flow_vol if i == 0 else ad[i - 1] + money_flow_vol 45 | return ad 46 | 47 | 48 | @njit(parallel=False, fastmath=True) 49 | def _calculate_mfi_numba( 50 | high: np.ndarray, 51 | low: np.ndarray, 52 | close: np.ndarray, 53 | volume: np.ndarray, 54 | period: int, 55 | ) -> np.ndarray: 56 | n = len(close) 57 | if n == 0 or len(high) != n or len(low) != n or len(volume) != n or period <= 0 or period > n: 58 | return np.full(n, np.nan, dtype=np.float64) 59 | typical_price = np.full(n, np.nan, dtype=np.float64) 60 | money_flow = np.full(n, np.nan, dtype=np.float64) 61 | positive_flow = np.zeros(n, dtype=np.float64) 62 | negative_flow = np.zeros(n, dtype=np.float64) 63 | for i in range(n): 64 | if not np.isnan(high[i]) and not np.isnan(low[i]) and not np.isnan(close[i]): 65 | typical_price[i] = (high[i] + low[i] + close[i]) / 3 66 | for i in range(n): 67 | if not np.isnan(typical_price[i]) and not np.isnan(volume[i]): 68 | money_flow[i] = typical_price[i] * volume[i] 69 | for i in range(1, n): 70 | if not np.isnan(typical_price[i]) and not np.isnan(typical_price[i - 1]) and not np.isnan(money_flow[i]): 71 | if typical_price[i] > typical_price[i - 1]: 72 | positive_flow[i] = money_flow[i] 73 | elif typical_price[i] < typical_price[i - 1]: 74 | negative_flow[i] = money_flow[i] 75 | mfi = np.full(n, np.nan, dtype=np.float64) 76 | for i in range(period, n): 77 | positive_sum = 0.0 78 | negative_sum = 0.0 79 | for j in range(i - period + 1, i + 1): 80 | positive_sum += positive_flow[j] 81 | negative_sum += negative_flow[j] 82 | if positive_sum == 0 and negative_sum == 0: 83 | mfi[i] = 50.0 84 | elif negative_sum == 0: 85 | mfi[i] = 100.0 86 | else: 87 | money_ratio = positive_sum / negative_sum 88 | mfi[i] = 100 - (100 / (1 + money_ratio)) 89 | return mfi 90 | 91 | 92 | @njit(parallel=False, fastmath=True) 93 | def _calculate_vwap_numba( 94 | high: np.ndarray, 95 | low: np.ndarray, 96 | close: np.ndarray, 97 | volume: np.ndarray, 98 | period: int 99 | ) -> np.ndarray: 100 | n = len(close) 101 | if n == 0 or len(high) != n or len(low) != n or len(volume) != n or period <= 0 or period > n: 102 | return np.full(n, np.nan, dtype=np.float64) 103 | vwap = np.full(n, np.nan, dtype=np.float64) 104 | for i in range(period - 1, n): 105 | if ( 106 | np.any(np.isnan(high[i - period + 1 : i + 1])) 107 | or np.any(np.isnan(low[i - period + 1 : i + 1])) 108 | or np.any(np.isnan(close[i - period + 1 : i + 1])) 109 | or np.any(np.isnan(volume[i - period + 1 : i + 1])) 110 | ): 111 | continue 112 | sum_price_vol = 0.0 113 | sum_vol = 0.0 114 | count = 0 115 | for j in range(i - period + 1, i + 1): 116 | sum_price_vol += ((high[j] + low[j] + close[j]) / 3.0) * volume[j] 117 | sum_vol += volume[j] 118 | count += 1 119 | if sum_vol != 0: 120 | vwap[i] = sum_price_vol / sum_vol 121 | return vwap 122 | 123 | 124 | @njit(parallel=False, fastmath=True) 125 | def _calculate_typical_price_numba(high: np.ndarray, low: np.ndarray, close: np.ndarray) -> np.ndarray: 126 | n = len(close) 127 | out = np.full(n, np.nan, dtype=np.float64) 128 | for i in range(n): 129 | if not np.isnan(high[i]) and not np.isnan(low[i]) and not np.isnan(close[i]): 130 | out[i] = (high[i] + low[i] + close[i]) / 3.0 131 | return out 132 | 133 | 134 | @njit(parallel=False, fastmath=True) 135 | def _calculate_weighted_close_numba(high: np.ndarray, low: np.ndarray, close: np.ndarray) -> np.ndarray: 136 | n = len(close) 137 | out = np.full(n, np.nan, dtype=np.float64) 138 | for i in range(n): 139 | if not np.isnan(high[i]) and not np.isnan(low[i]) and not np.isnan(close[i]): 140 | out[i] = (high[i] + low[i] + 2 * close[i]) / 4.0 141 | return out 142 | 143 | 144 | @njit(parallel=False, fastmath=True) 145 | def _calculate_volume_indicators_numba( 146 | close: np.ndarray, volume: np.ndarray, period: int 147 | ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: 148 | n = len(close) 149 | if n == 0 or len(volume) != n or period <= 0 or period > n: 150 | empty = np.full(n, np.nan, dtype=np.float64) 151 | return empty, empty, empty 152 | vol_sma = np.full(n, np.nan, dtype=np.float64) 153 | force_index = np.full(n, np.nan, dtype=np.float64) 154 | vpt = np.full(n, np.nan, dtype=np.float64) 155 | for i in range(period - 1, n): 156 | if np.any(np.isnan(volume[i - period + 1 : i + 1])): 157 | continue 158 | sum_vol = 0.0 159 | count = 0 160 | for j in range(i - period + 1, i + 1): 161 | sum_vol += volume[j] 162 | count += 1 163 | if count > 0: 164 | vol_sma[i] = sum_vol / count 165 | for i in range(1, n): 166 | if not np.isnan(close[i]) and not np.isnan(close[i - 1]) and not np.isnan(volume[i]): 167 | force_index[i] = (close[i] - close[i - 1]) * volume[i] 168 | for i in range(1, n): 169 | if not np.isnan(close[i]) and not np.isnan(close[i - 1]) and not np.isnan(volume[i]) and close[i - 1] != 0: 170 | vpt[i] = (vpt[i - 1] if i > 1 and not np.isnan(vpt[i - 1]) else 0) + volume[i] * (close[i] - close[i - 1]) / close[i - 1] 171 | return vol_sma, force_index, vpt 172 | 173 | __all__ = [ 174 | "_calculate_obv_numba", 175 | "_calculate_ad_numba", 176 | "_calculate_mfi_numba", 177 | "_calculate_vwap_numba", 178 | "_calculate_volume_indicators_numba", 179 | "_calculate_typical_price_numba", 180 | "_calculate_weighted_close_numba", 181 | ] 182 | 183 | 184 | @njit(parallel=False, fastmath=True) 185 | def _calculate_adosc_numba( 186 | high: np.ndarray, low: np.ndarray, close: np.ndarray, volume: np.ndarray, fast_period: int = 3, slow_period: int = 10 187 | ) -> np.ndarray: 188 | n = len(close) 189 | if n == 0 or len(high) != n or len(low) != n or len(volume) != n or fast_period <= 0 or slow_period <= 0: 190 | return np.full(n, np.nan, dtype=np.float64) 191 | ad = np.zeros(n, dtype=np.float64) 192 | for i in range(n): 193 | if np.isnan(high[i]) or np.isnan(low[i]) or np.isnan(close[i]) or np.isnan(volume[i]): 194 | ad[i] = ad[i - 1] if i > 0 else 0.0 195 | continue 196 | if high[i] == low[i]: 197 | money_flow_vol = 0.0 198 | else: 199 | clv = ((close[i] - low[i]) - (high[i] - close[i])) / (high[i] - low[i]) 200 | money_flow_vol = clv * volume[i] 201 | ad[i] = money_flow_vol if i == 0 else ad[i - 1] + money_flow_vol 202 | fast_ema = np.full(n, np.nan, dtype=np.float64) 203 | slow_ema = np.full(n, np.nan, dtype=np.float64) 204 | if n > 0: 205 | fast_ema[0] = ad[0] 206 | slow_ema[0] = ad[0] 207 | fm = 2.0 / (fast_period + 1) 208 | sm = 2.0 / (slow_period + 1) 209 | for i in range(1, n): 210 | if not np.isnan(ad[i]) and not np.isnan(fast_ema[i - 1]): 211 | fast_ema[i] = (ad[i] - fast_ema[i - 1]) * fm + fast_ema[i - 1] 212 | if not np.isnan(ad[i]) and not np.isnan(slow_ema[i - 1]): 213 | slow_ema[i] = (ad[i] - slow_ema[i - 1]) * sm + slow_ema[i - 1] 214 | out = np.full(n, np.nan, dtype=np.float64) 215 | for i in range(n): 216 | if not np.isnan(fast_ema[i]) and not np.isnan(slow_ema[i]): 217 | out[i] = fast_ema[i] - slow_ema[i] 218 | return out 219 | 220 | 221 | @njit(parallel=False, fastmath=True) 222 | def _calculate_pvo_numba(volume: np.ndarray, short_period: int = 12, long_period: int = 26) -> Tuple[np.ndarray, np.ndarray]: 223 | n = len(volume) 224 | if n == 0 or short_period <= 0 or long_period <= 0 or max(short_period, long_period) > n: 225 | return np.full(n, np.nan, dtype=np.float64), np.full(n, np.nan, dtype=np.float64) 226 | se = np.full(n, np.nan, dtype=np.float64) 227 | le = np.full(n, np.nan, dtype=np.float64) 228 | sm = 2.0 / (short_period + 1) 229 | lm = 2.0 / (long_period + 1) 230 | if n > 0 and not np.isnan(volume[0]): 231 | se[0] = volume[0] 232 | le[0] = volume[0] 233 | for i in range(1, n): 234 | if not np.isnan(volume[i]) and not np.isnan(se[i - 1]): 235 | se[i] = (volume[i] - se[i - 1]) * sm + se[i - 1] 236 | if not np.isnan(volume[i]) and not np.isnan(le[i - 1]): 237 | le[i] = (volume[i] - le[i - 1]) * lm + le[i - 1] 238 | pvo = np.full(n, np.nan, dtype=np.float64) 239 | for i in range(n): 240 | if not np.isnan(se[i]) and not np.isnan(le[i]) and le[i] != 0: 241 | pvo[i] = 100 * (se[i] - le[i]) / le[i] 242 | # Signal EMA (9) 243 | signal = np.full(n, np.nan, dtype=np.float64) 244 | sigm = 2.0 / (9 + 1) 245 | if n > 0 and not np.isnan(pvo[0]): 246 | signal[0] = pvo[0] 247 | for i in range(1, n): 248 | if not np.isnan(pvo[i]) and not np.isnan(signal[i - 1]): 249 | signal[i] = (pvo[i] - signal[i - 1]) * sigm + signal[i - 1] 250 | return pvo, signal 251 | 252 | 253 | __all__ += ["_calculate_adosc_numba", "_calculate_pvo_numba"] 254 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | numpy>=1.26.0 # Or just 'numpy' to let uv pick the newest compatible 2 | pandas<3.0,>=2.0.0 3 | numba>=0.59.0 4 | pytest 5 | yfinance 6 | matplotlib>=3.5.0 -------------------------------------------------------------------------------- /run_demo.bat: -------------------------------------------------------------------------------- 1 | @echo off 2 | echo Setting up environment for QuantJourney Technical Indicators... 3 | 4 | :: Step 0: Check if uv is installed 5 | where uv >nul 2>nul 6 | if errorlevel 1 ( 7 | echo uv is not installed. 8 | echo Installing uv using pipx... 9 | 10 | where pipx >nul 2>nul 11 | if errorlevel 1 ( 12 | echo pipx not found. Installing pipx first... 13 | python -m pip install --user pipx 14 | python -m pipx ensurepath 15 | set PATH=%USERPROFILE%\.local\bin;%PATH% 16 | ) 17 | 18 | pipx install uv 19 | ) else ( 20 | echo uv is already installed. 21 | ) 22 | 23 | :: Step 1: Create virtual environment 24 | echo Creating virtual environment... 25 | uv venv --python 3.11 26 | 27 | :: Step 2: Activate virtual environment 28 | call .venv\Scripts\activate.bat 29 | echo Activated virtualenv with Python: 30 | python --version 31 | 32 | :: Step 3: Install project with dev dependencies 33 | echo Installing project with dev dependencies... 34 | uv pip install -e ".[dev]" 35 | 36 | :: Step 4: Run the example script 37 | echo Running the indicators demo... 38 | python examples\run_indicators.py 39 | -------------------------------------------------------------------------------- /run_demo.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | echo "Setting up environment for QuantJourney Technical Indicators..." 4 | 5 | # Step 0: Check for uv and install if not available 6 | if ! command -v uv &> /dev/null; then 7 | echo "uv is not installed. Installing with pipx..." 8 | if ! command -v pipx &> /dev/null; then 9 | echo "pipx not found. Installing pipx..." 10 | python3 -m pip install --user pipx 11 | python3 -m pipx ensurepath 12 | export PATH="$HOME/.local/bin:$PATH" 13 | fi 14 | pipx install uv 15 | else 16 | echo "uv is already installed: $(uv --version)" 17 | fi 18 | 19 | # Step 1: Create virtual environment with Python 3.11 20 | echo "Creating virtual environment..." 21 | uv venv --python 3.11 22 | 23 | # Step 2: Activate virtual environment 24 | source .venv/bin/activate 25 | echo "Activated virtualenv with Python: $(python --version)" 26 | 27 | # Step 3: Install project with dev dependencies 28 | echo "Installing project with dev dependencies..." 29 | uv pip install -e ".[dev]" 30 | 31 | # Step 4: Run the example script 32 | echo "Running the indicators demo..." 33 | python examples/run_indicators.py 34 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Tests for quantjourney_ti package. 3 | """ 4 | -------------------------------------------------------------------------------- /tests/_yf.py: -------------------------------------------------------------------------------- 1 | """Helper to fetch data via yfinance for integration tests only.""" 2 | 3 | from __future__ import annotations 4 | 5 | import logging 6 | from functools import lru_cache 7 | from typing import Literal, Optional 8 | 9 | import pandas as pd 10 | 11 | logger = logging.getLogger(__name__) 12 | _YF_ERR = "yfinance is required; install with `pip install yfinance`" 13 | 14 | 15 | @lru_cache(maxsize=32) 16 | def get_ohlcv( 17 | ticker: str, 18 | *, 19 | start: Optional[str] = None, 20 | end: Optional[str] = None, 21 | period: str = "1y", 22 | interval: Literal[ 23 | "1m", 24 | "2m", 25 | "5m", 26 | "15m", 27 | "30m", 28 | "60m", 29 | "90m", 30 | "1h", 31 | "1d", 32 | "5d", 33 | "1wk", 34 | "1mo", 35 | "3mo", 36 | ] = "1d", 37 | auto_adjust: bool = True, 38 | ) -> pd.DataFrame: 39 | try: 40 | import yfinance as yf 41 | except ImportError as exc: 42 | raise ImportError(_YF_ERR) from exc 43 | logger.info("Downloading %s %s (interval=%s)", ticker, period, interval) 44 | try: 45 | df = yf.download( 46 | ticker, 47 | start=start, 48 | end=end, 49 | period=period, 50 | interval=interval, 51 | auto_adjust=auto_adjust, 52 | progress=False, 53 | ) 54 | if df.empty: 55 | raise ValueError(f"No data for {ticker}") 56 | # Ensure consistent column names 57 | if isinstance(df.columns, pd.MultiIndex): 58 | df.columns = df.columns.get_level_values(0) 59 | return df.rename(columns=str.lower) 60 | except Exception as exc: 61 | raise RuntimeError(f"Failed to fetch data for {ticker}: {exc}") from exc 62 | 63 | 64 | __all__ = ["get_ohlcv"] 65 | -------------------------------------------------------------------------------- /tests/conftest.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | 3 | def pytest_ignore_collect(collection_path: Path): 4 | """Exclude the comprehensive script-like file from test collection.""" 5 | return collection_path.name == "test_all_indicators_comprehensive.py" 6 | -------------------------------------------------------------------------------- /tests/test_all_indicators.py: -------------------------------------------------------------------------------- 1 | import inspect 2 | import os 3 | import pprint 4 | import time 5 | 6 | import pandas as pd 7 | import pytest 8 | import numpy as np 9 | 10 | if os.getenv("CI") or os.getenv("SKIP_ALL_INDICATORS"): 11 | pytest.skip("Skipping full indicator run on CI", allow_module_level=True) 12 | 13 | try: 14 | from tests._yf import get_ohlcv 15 | import quantjourney_ti as qti 16 | from quantjourney_ti._decorators import timer 17 | except Exception as exc: 18 | pytest.skip(f"Prerequisites missing: {exc}", allow_module_level=True) 19 | 20 | TICKER = "AAPL" 21 | 22 | def _feed_arg(method, df): 23 | """Return appropriate first argument (Series or DataFrame).""" 24 | sig = inspect.signature(method) 25 | first = next(iter(sig.parameters.values())) 26 | if first.annotation is pd.Series or first.annotation is inspect._empty: 27 | return pd.Series(df["adj_close"].values, index=df.index) 28 | return df 29 | 30 | @pytest.fixture 31 | def mock_ohlcv(): 32 | data = { 33 | "open": [100, 101, 102] * 21, # 63 rows 34 | "high": [102, 103, 104] * 21, 35 | "low": [99, 100, 101] * 21, 36 | "adj_close": [101, 102, 103] * 21, 37 | "volume": [1000, 1100, 1200] * 21, 38 | } 39 | return pd.DataFrame(data, index=pd.date_range("2025-01-01", periods=63)) 40 | 41 | def test_run_all_indicators(capsys, mock_ohlcv): 42 | """Execute every indicator once with defaults.""" 43 | df = mock_ohlcv 44 | ti = qti.TechnicalIndicators() 45 | results, errors, timings = {}, {}, {} 46 | skip_indicators = { 47 | "BETA", "calculate_multiple_indicators", "plot_indicators", "BENFORD_LAW", 48 | } 49 | for name, meth in inspect.getmembers(ti, predicate=inspect.ismethod): 50 | if name.startswith("_") or name in skip_indicators: 51 | continue 52 | try: 53 | arg = _feed_arg(meth, df) 54 | @timer 55 | def _run(): 56 | return meth(arg) 57 | start = time.perf_counter() 58 | out = _run() 59 | timings[name] = time.perf_counter() - start 60 | val = out.dropna().iloc[-1] if isinstance(out, (pd.Series, pd.DataFrame)) else out 61 | results[name] = val 62 | except Exception as exc: 63 | errors[name] = str(exc) 64 | print("\n=== Indicator outputs (last non-NaN value) ===") 65 | pprint.pprint(results, compact=True) 66 | print("\n=== Execution times (seconds) ===") 67 | pprint.pprint(timings, compact=True) 68 | if errors: 69 | print("\n=== Errors ===") 70 | pprint.pprint(errors, compact=True) 71 | total = len(results) + len(errors) 72 | assert len(results) / total >= 0.5, f"Too many indicator failures: {len(errors)}/{total}" -------------------------------------------------------------------------------- /tests/test_basic.py: -------------------------------------------------------------------------------- 1 | """ 2 | Comprehensive tests for quantjourney_ti package. 3 | """ 4 | 5 | import pytest 6 | import numpy as np 7 | import pandas as pd 8 | import quantjourney_ti as qti 9 | 10 | 11 | class TestPackageBasics: 12 | """Test basic package functionality.""" 13 | 14 | def test_import(self): 15 | """Test that the package imports correctly.""" 16 | assert hasattr(qti, "__version__") 17 | assert qti.__version__ == "0.3.1" 18 | assert hasattr(qti, "__author__") 19 | assert qti.__author__ == "Jakub Polec" 20 | 21 | def test_package_structure(self): 22 | """Test that the package has the expected structure.""" 23 | # Test that technical indicators module is accessible 24 | assert ( 25 | hasattr(qti, "TechnicalIndicators") or True 26 | ) # Adjust based on your actual class name 27 | 28 | 29 | class TestTechnicalIndicators: 30 | """Test technical indicator calculations.""" 31 | 32 | @pytest.fixture 33 | def sample_data(self): 34 | """Create sample price data for testing.""" 35 | np.random.seed(42) # For reproducible tests 36 | dates = pd.date_range("2023-01-01", periods=100, freq="D") 37 | prices = pd.Series(100 + np.cumsum(np.random.randn(100) * 0.5), index=dates) 38 | return prices 39 | 40 | def test_indicators_exist(self, sample_data): 41 | """Test that main indicator functions exist.""" 42 | # This test will need to be updated based on your actual indicator functions 43 | # Common indicators that should exist: 44 | indicators_to_test = [ 45 | "sma", 46 | "ema", 47 | "rsi", 48 | "macd", 49 | "bollinger_bands", 50 | "atr", 51 | "stochastic", 52 | "williams_r", 53 | ] 54 | 55 | for indicator in indicators_to_test: 56 | if hasattr(qti, indicator): 57 | assert callable(getattr(qti, indicator)) 58 | 59 | def test_basic_calculation(self, sample_data): 60 | """Test that indicators can be calculated without errors.""" 61 | try: 62 | # Test SMA if it exists 63 | if hasattr(qti, "sma"): 64 | result = qti.sma(sample_data, window=20) 65 | assert isinstance(result, pd.Series) 66 | assert len(result) <= len(sample_data) 67 | 68 | # Test EMA if it exists 69 | if hasattr(qti, "ema"): 70 | result = qti.ema(sample_data, window=20) 71 | assert isinstance(result, pd.Series) 72 | assert len(result) <= len(sample_data) 73 | 74 | except Exception as e: 75 | pytest.skip(f"Indicator functions not yet implemented: {e}") 76 | 77 | 78 | if __name__ == "__main__": 79 | pytest.main([__file__]) 80 | -------------------------------------------------------------------------------- /tests/test_channels_invariants.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pandas as pd 3 | 4 | from quantjourney_ti import TechnicalIndicators 5 | 6 | 7 | def _ohlc(n=250, seed=11): 8 | rng = np.random.default_rng(seed) 9 | idx = pd.date_range("2024-01-01", periods=n, freq="D") 10 | close = np.cumsum(rng.normal(0, 1, n)) + 100 11 | high = close + np.abs(rng.normal(0.5, 0.2, n)) 12 | low = close - np.abs(rng.normal(0.5, 0.2, n)) 13 | open_ = close + rng.normal(0, 0.1, n) 14 | vol = np.abs(rng.normal(1e6, 2e5, n)) 15 | return pd.DataFrame({"open": open_, "high": high, "low": low, "close": close, "volume": vol}, index=idx) 16 | 17 | 18 | def test_keltner_upper_lower_positions(): 19 | df = _ohlc() 20 | ti = TechnicalIndicators() 21 | kc = ti.KELTNER(df[["high", "low", "close"]], 20, 10, 2.0) 22 | up = kc.iloc[:, 0] 23 | mid = kc.iloc[:, 1] 24 | lo = kc.iloc[:, 2] 25 | mask = ~(up.isna() | mid.isna() | lo.isna()) 26 | assert (up[mask] >= mid[mask]).all() 27 | assert (mid[mask] >= lo[mask]).all() 28 | 29 | 30 | def test_donchian_upper_lower_positions(): 31 | df = _ohlc() 32 | ti = TechnicalIndicators() 33 | dc = ti.DONCHIAN(df[["high", "low"]], 20) 34 | up = dc.iloc[:, 0] 35 | mid = dc.iloc[:, 1] 36 | lo = dc.iloc[:, 2] 37 | mask = ~(up.isna() | mid.isna() | lo.isna()) 38 | assert (up[mask] >= mid[mask]).all() 39 | assert (mid[mask] >= lo[mask]).all() 40 | 41 | -------------------------------------------------------------------------------- /tests/test_decorators.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import logging 3 | from quantjourney_ti._decorators import timer, numba_fallback 4 | from quantjourney_ti._errors import IndicatorCalculationError 5 | 6 | def test_timer(caplog): 7 | caplog.set_level(logging.INFO) 8 | @timer 9 | def slow_func(): 10 | import time 11 | time.sleep(0.1) 12 | return 42 13 | assert slow_func() == 42 14 | assert "Finished slow_func" in caplog.text 15 | 16 | def test_numba_fallback(caplog): 17 | caplog.set_level(logging.WARNING) 18 | def pandas_fallback(self, data): 19 | return sum(data) 20 | @numba_fallback(pandas_fallback) 21 | def numba_sum(self, data): 22 | raise ValueError("Numba failed") 23 | class TestClass: 24 | def sum(self, data): 25 | return numba_sum(self, data) 26 | result = TestClass().sum([1, 2, 3]) 27 | assert result == 6 28 | assert "Numba failed" in caplog.text 29 | assert "IndicatorCalculationError" in caplog.text -------------------------------------------------------------------------------- /tests/test_demo.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import pandas as pd 3 | import numpy as np 4 | from unittest.mock import patch 5 | from quantjourney_ti import TechnicalIndicators, willr, sma 6 | 7 | @pytest.fixture 8 | def mock_yfinance_data(): 9 | return pd.DataFrame({ 10 | "open": [100, 101, 102, 103, 104], 11 | "high": [101, 102, 103, 104, 105], 12 | "low": [99, 100, 101, 102, 103], 13 | "close": [100, 101, 102, 103, 104], 14 | "adj_close": [100, 101, 102, 103, 104], 15 | "volume": [1000, 1100, 1200, 1300, 1400] 16 | }, index=pd.date_range("2025-01-01", periods=5)) 17 | 18 | def test_demo_willr(monkeypatch, mock_yfinance_data): 19 | def mock_download(*args, **kwargs): 20 | return mock_yfinance_data 21 | monkeypatch.setattr("yfinance.download", mock_download) 22 | ti = TechnicalIndicators() 23 | result = ti.WILLR(mock_yfinance_data[["high", "low", "close"]], period=3) 24 | expected = pd.Series([np.nan, np.nan, -25.0, -25.0, -25.0], index=mock_yfinance_data.index, name="WILLR_3") 25 | pd.testing.assert_series_equal(result, expected, rtol=1e-4, atol=1e-4) 26 | 27 | def test_demo_sma(monkeypatch, mock_yfinance_data): 28 | def mock_download(*args, **kwargs): 29 | return mock_yfinance_data 30 | monkeypatch.setattr("yfinance.download", mock_download) 31 | result = sma(mock_yfinance_data["close"], period=3) # Changed window to period 32 | expected = pd.Series([np.nan, np.nan, 101.0, 102.0, 103.0], index=mock_yfinance_data.index, name="SMA_3") 33 | pd.testing.assert_series_equal(result, expected, rtol=1e-4, atol=1e-4) -------------------------------------------------------------------------------- /tests/test_fallbacks.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | import numpy as np 3 | 4 | from quantjourney_ti import TechnicalIndicators 5 | 6 | 7 | def test_ema_fallback_on_kernel_error(monkeypatch): 8 | # Prepare data 9 | idx = pd.date_range("2024-01-01", periods=50, freq="D") 10 | s = pd.Series(np.linspace(100, 110, 50), index=idx, name="close") 11 | ti = TechnicalIndicators() 12 | 13 | # Monkeypatch the kernel to raise ValueError to trigger fallback 14 | def boom(*args, **kwargs): 15 | raise ValueError("numba failed") 16 | 17 | monkeypatch.setattr(TechnicalIndicators, "_calculate_ema_numba", staticmethod(boom)) 18 | 19 | # Call EMA — decorator should fall back to pandas ewm 20 | out = ti.EMA(s, 10) 21 | ref = s.ewm(span=10, adjust=False).mean().rename("EMA_10") 22 | 23 | # Compare last values (allow tiny numeric differences) 24 | assert abs(out.iloc[-1] - ref.iloc[-1]) < 1e-9 25 | 26 | -------------------------------------------------------------------------------- /tests/test_hedge_fund_features.py: -------------------------------------------------------------------------------- 1 | """ 2 | Tests for hedge fund specific features. 3 | """ 4 | 5 | import pytest 6 | import numpy as np 7 | import pandas as pd 8 | from datetime import datetime, timedelta 9 | import time 10 | 11 | import quantjourney_ti as qti 12 | from quantjourney_ti import ( 13 | TechnicalIndicators, 14 | StreamingIndicators, 15 | calculate_risk_metrics, 16 | MemoryManager, 17 | BatchProcessor, 18 | get_cache_stats, 19 | clear_indicator_cache 20 | ) 21 | 22 | @pytest.fixture 23 | def sample_price_data(): 24 | """Create sample price data for testing.""" 25 | np.random.seed(42) 26 | dates = pd.date_range('2023-01-01', periods=252, freq='D') 27 | 28 | # Generate realistic price data 29 | returns = np.random.normal(0.001, 0.02, 252) # Daily returns 30 | prices = 100 * (1 + returns).cumprod() 31 | 32 | df = pd.DataFrame({ 33 | 'open': prices * (1 + np.random.normal(0, 0.001, 252)), 34 | 'high': prices * (1 + np.abs(np.random.normal(0, 0.005, 252))), 35 | 'low': prices * (1 - np.abs(np.random.normal(0, 0.005, 252))), 36 | 'close': prices, 37 | 'volume': np.random.randint(100000, 1000000, 252) 38 | }, index=dates) 39 | 40 | return df 41 | 42 | @pytest.fixture 43 | def benchmark_data(): 44 | """Create benchmark data for testing.""" 45 | np.random.seed(123) 46 | dates = pd.date_range('2023-01-01', periods=252, freq='D') 47 | 48 | # Generate benchmark returns (slightly lower volatility) 49 | returns = np.random.normal(0.0008, 0.015, 252) 50 | prices = 100 * (1 + returns).cumprod() 51 | 52 | return pd.Series(prices, index=dates) 53 | 54 | class TestRiskMetrics: 55 | """Test risk metrics calculations.""" 56 | 57 | def test_risk_metrics_basic(self, sample_price_data): 58 | """Test basic risk metrics calculation.""" 59 | ti = TechnicalIndicators() 60 | 61 | risk_metrics = ti.RISK_METRICS(sample_price_data['close']) 62 | 63 | # Check that all expected metrics are present 64 | expected_metrics = [ 65 | 'Total Return', 'Annualized Return', 'Volatility', 66 | 'Sharpe Ratio', 'Sortino Ratio', 'Max Drawdown', 67 | 'VaR (5%)', 'CVaR (5%)', 'Calmar Ratio' 68 | ] 69 | 70 | for metric in expected_metrics: 71 | assert metric in risk_metrics.index 72 | assert not pd.isna(risk_metrics[metric]) 73 | 74 | def test_risk_metrics_with_benchmark(self, sample_price_data, benchmark_data): 75 | """Test risk metrics with benchmark.""" 76 | ti = TechnicalIndicators() 77 | 78 | risk_metrics = ti.RISK_METRICS( 79 | sample_price_data['close'], 80 | benchmark=benchmark_data 81 | ) 82 | 83 | # Check benchmark-specific metrics 84 | benchmark_metrics = ['Information Ratio', 'Treynor Ratio', 'Beta'] 85 | 86 | for metric in benchmark_metrics: 87 | assert metric in risk_metrics.index 88 | assert not pd.isna(risk_metrics[metric]) 89 | 90 | def test_risk_metrics_edge_cases(self): 91 | """Test risk metrics with edge cases.""" 92 | ti = TechnicalIndicators() 93 | 94 | # Test with constant prices (zero volatility) 95 | constant_prices = pd.Series([100] * 100, 96 | index=pd.date_range('2023-01-01', periods=100)) 97 | 98 | risk_metrics = ti.RISK_METRICS(constant_prices) 99 | 100 | # Sharpe ratio should be NaN or inf for zero volatility 101 | assert pd.isna(risk_metrics['Sharpe Ratio']) or np.isinf(risk_metrics['Sharpe Ratio']) 102 | assert risk_metrics['Volatility'] == 0 103 | assert risk_metrics['Max Drawdown'] == 0 104 | 105 | class TestStreamingIndicators: 106 | """Test streaming indicators functionality.""" 107 | 108 | def test_streaming_basic(self, sample_price_data): 109 | """Test basic streaming functionality.""" 110 | streaming = StreamingIndicators() 111 | 112 | # Process first few rows 113 | results = [] 114 | for i, (timestamp, row) in enumerate(sample_price_data.head(50).iterrows()): 115 | indicators = streaming.update_tick( 116 | symbol='TEST', 117 | timestamp=timestamp, 118 | close=row['close'], 119 | high=row['high'], 120 | low=row['low'], 121 | volume=row['volume'] 122 | ) 123 | results.append(indicators) 124 | 125 | if i >= 20: # After enough data points 126 | assert 'sma_20' in indicators 127 | assert not np.isnan(indicators['sma_20']) 128 | 129 | # Check final values 130 | final_values = streaming.get_current_values('TEST') 131 | assert len(final_values) > 0 132 | 133 | def test_streaming_callbacks(self, sample_price_data): 134 | """Test streaming callbacks.""" 135 | streaming = StreamingIndicators() 136 | callback_results = [] 137 | 138 | def test_callback(symbol, value, timestamp): 139 | callback_results.append((symbol, value, timestamp)) 140 | 141 | streaming.register_callback('TEST_sma_20', test_callback) 142 | 143 | # Process data 144 | for timestamp, row in sample_price_data.head(30).iterrows(): 145 | streaming.update_tick( 146 | symbol='TEST', 147 | timestamp=timestamp, 148 | close=row['close'], 149 | high=row['high'], 150 | low=row['low'] 151 | ) 152 | 153 | # Should have received callbacks after SMA period 154 | assert len(callback_results) > 0 155 | 156 | def test_streaming_buffer(self, sample_price_data): 157 | """Test streaming buffer functionality.""" 158 | streaming = StreamingIndicators(max_buffer_size=100) 159 | 160 | # Process more data than buffer size 161 | for timestamp, row in sample_price_data.iterrows(): 162 | streaming.update_tick( 163 | symbol='TEST', 164 | timestamp=timestamp, 165 | close=row['close'] 166 | ) 167 | 168 | # Get buffer data 169 | buffer_df = streaming.get_buffer_data('TEST') 170 | 171 | # Should not exceed max buffer size 172 | assert len(buffer_df) <= 100 173 | assert not buffer_df.empty 174 | 175 | class TestPerformanceOptimization: 176 | """Test performance optimization features.""" 177 | 178 | def test_caching(self, sample_price_data): 179 | """Test indicator caching.""" 180 | clear_indicator_cache() # Start fresh 181 | 182 | ti = TechnicalIndicators() 183 | 184 | # First calculation 185 | start_time = time.time() 186 | sma1 = ti.SMA(sample_price_data['close'], period=20) 187 | first_time = time.time() - start_time 188 | 189 | # Second calculation (should be faster due to caching) 190 | start_time = time.time() 191 | sma2 = ti.SMA(sample_price_data['close'], period=20) 192 | second_time = time.time() - start_time 193 | 194 | # Results should be identical 195 | pd.testing.assert_series_equal(sma1, sma2) 196 | 197 | # Second should be faster (though this might be flaky in fast systems) 198 | # Just check that caching is working 199 | cache_stats = get_cache_stats() 200 | assert cache_stats['hit_count'] > 0 201 | 202 | def test_batch_processing(self, sample_price_data): 203 | """Test batch processing functionality.""" 204 | ti = TechnicalIndicators() 205 | 206 | # Create multiple symbol datasets 207 | data_dict = { 208 | 'SYMBOL1': sample_price_data, 209 | 'SYMBOL2': sample_price_data * 1.1, # Slightly different data 210 | 'SYMBOL3': sample_price_data * 0.9 211 | } 212 | 213 | # Batch calculate RSI 214 | results = ti.batch_calculate( 215 | data_dict=data_dict, 216 | indicator_name='RSI', 217 | period=14 218 | ) 219 | 220 | assert len(results) == 3 221 | for symbol, rsi_series in results.items(): 222 | assert isinstance(rsi_series, pd.Series) 223 | assert len(rsi_series) == len(sample_price_data) 224 | assert rsi_series.name == 'RSI_14' 225 | 226 | class TestMemoryManagement: 227 | """Test memory management features.""" 228 | 229 | def test_dataframe_optimization(self, sample_price_data): 230 | """Test DataFrame memory optimization.""" 231 | # Create DataFrame with inefficient dtypes 232 | inefficient_df = sample_price_data.copy() 233 | inefficient_df = inefficient_df.astype(np.float64) # Force float64 234 | 235 | original_memory = MemoryManager.get_memory_usage(inefficient_df) 236 | 237 | # Optimize 238 | optimized_df = MemoryManager.optimize_dataframe(inefficient_df) 239 | optimized_memory = MemoryManager.get_memory_usage(optimized_df) 240 | 241 | # Should use less memory (or at least not more) 242 | assert optimized_memory['total_mb'] <= original_memory['total_mb'] 243 | 244 | # Data should be preserved 245 | pd.testing.assert_frame_equal( 246 | inefficient_df.astype(optimized_df.dtypes), 247 | optimized_df, 248 | check_dtype=False 249 | ) 250 | 251 | def test_memory_usage_calculation(self, sample_price_data): 252 | """Test memory usage calculation.""" 253 | memory_stats = MemoryManager.get_memory_usage(sample_price_data) 254 | 255 | assert 'total_mb' in memory_stats 256 | assert 'per_column_mb' in memory_stats 257 | assert memory_stats['total_mb'] > 0 258 | assert len(memory_stats['per_column_mb']) == len(sample_price_data.columns) 259 | 260 | class TestDataValidation: 261 | """Test enhanced data validation.""" 262 | 263 | def test_market_data_validation(self): 264 | """Test market data validation with gaps.""" 265 | # Create data with gaps (missing weekends) 266 | business_days = pd.bdate_range('2023-01-01', '2023-01-31') 267 | df = pd.DataFrame({ 268 | 'close': np.random.randn(len(business_days)).cumsum() + 100 269 | }, index=business_days) 270 | 271 | ti = TechnicalIndicators() 272 | 273 | # Should not raise error with allow_gaps=True 274 | validated_df = ti.validate_market_data(df, allow_gaps=True) 275 | assert len(validated_df) == len(df) 276 | 277 | def test_data_fixing(self): 278 | """Test automatic data issue fixing.""" 279 | # Create problematic data 280 | dates = pd.date_range('2023-01-01', periods=10, freq='D') 281 | problematic_df = pd.DataFrame({ 282 | 'open': [100, 101, np.nan, 103, 104, 105, np.nan, 107, 108, 109], 283 | 'close': [101, 102, np.nan, 104, 105, 106, np.nan, 108, 109, 110], 284 | 'volume': [1000, 0, 1200, 0, 1400, 1500, 1600, 0, 1800, 1900] 285 | }, index=dates) 286 | 287 | ti = TechnicalIndicators() 288 | 289 | # Fix issues 290 | fixed_df = ti.validate_market_data( 291 | problematic_df, 292 | allow_gaps=True, 293 | fix_common_issues=True 294 | ) 295 | 296 | # Should have fewer NaNs and no zero volumes 297 | assert fixed_df.isnull().sum().sum() <= problematic_df.isnull().sum().sum() 298 | assert (fixed_df['volume'] == 0).sum() == 0 299 | 300 | class TestIntegration: 301 | """Integration tests for hedge fund features.""" 302 | 303 | def test_complete_workflow(self, sample_price_data, benchmark_data): 304 | """Test complete hedge fund workflow.""" 305 | ti = TechnicalIndicators() 306 | 307 | # 1. Validate and fix data 308 | clean_data = ti.validate_market_data( 309 | sample_price_data, 310 | allow_gaps=True, 311 | fix_common_issues=True 312 | ) 313 | 314 | # 2. Calculate technical indicators 315 | sma = ti.SMA(clean_data['close'], period=20) 316 | rsi = ti.RSI(clean_data['close'], period=14) 317 | 318 | # 3. Calculate risk metrics 319 | risk_metrics = ti.RISK_METRICS( 320 | clean_data['close'], 321 | benchmark=benchmark_data 322 | ) 323 | 324 | # 4. Test streaming 325 | streaming = ti.create_streaming_indicators() 326 | 327 | for timestamp, row in clean_data.head(50).iterrows(): 328 | streaming.update_tick( 329 | symbol='TEST', 330 | timestamp=timestamp, 331 | close=row['close'], 332 | high=row['high'], 333 | low=row['low'] 334 | ) 335 | 336 | # All should complete without errors 337 | assert len(sma.dropna()) > 0 338 | assert len(rsi.dropna()) > 0 339 | assert len(risk_metrics) > 0 340 | assert len(streaming.get_current_values('TEST')) > 0 341 | 342 | def test_error_handling(self): 343 | """Test error handling in edge cases.""" 344 | ti = TechnicalIndicators() 345 | 346 | # Empty data 347 | empty_df = pd.DataFrame() 348 | with pytest.raises(Exception): 349 | ti.SMA(empty_df, period=20) 350 | 351 | # Invalid data types 352 | invalid_data = pd.Series(['a', 'b', 'c']) 353 | with pytest.raises(Exception): 354 | ti.RSI(invalid_data, period=14) 355 | 356 | # Insufficient data 357 | short_data = pd.Series([1, 2], index=pd.date_range('2023-01-01', periods=2)) 358 | result = ti.SMA(short_data, period=20) 359 | assert result.isna().all() # Should return all NaN 360 | 361 | if __name__ == "__main__": 362 | pytest.main([__file__, "-v"]) -------------------------------------------------------------------------------- /tests/test_indicators.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import pandas as pd 3 | import numpy as np 4 | 5 | from quantjourney_ti import TechnicalIndicators 6 | from quantjourney_ti._errors import IndicatorCalculationError, InvalidInputError 7 | 8 | @pytest.fixture 9 | def ti(): 10 | return TechnicalIndicators() 11 | 12 | @pytest.fixture 13 | def sample_data(): 14 | return pd.DataFrame({ 15 | "open": [100, 101, 102, 103, 104], 16 | "high": [101, 102, 103, 104, 105], 17 | "low": [99, 100, 101, 102, 103], 18 | "close": [100, 101, 102, 103, 104], 19 | "volume": [1000, 1100, 1200, 1300, 1400] 20 | }, index=pd.date_range("2025-01-01", periods=5)) 21 | 22 | def test_willr(ti, sample_data): 23 | result = ti.WILLR(sample_data, period=3) 24 | expected = pd.Series([np.nan, np.nan, -25.0, -25.0, -25.0], index=sample_data.index, name="WILLR_3") 25 | pd.testing.assert_series_equal(result, expected, rtol=1e-4, atol=1e-4) 26 | 27 | def test_mfi(ti, sample_data): 28 | result = ti.MFI(sample_data, period=3) 29 | # Add expected values based on manual calculation 30 | assert result.name == "MFI_3" 31 | assert len(result) == len(sample_data) 32 | 33 | def test_invalid_index(ti, sample_data): 34 | data_invalid = sample_data.copy() 35 | data_invalid.index = ["a", "b", "c", "d", "e"] 36 | with pytest.raises(InvalidInputError, match="Index must be datetime or numeric"): 37 | ti.SMA(data_invalid["close"], period=3) 38 | 39 | def test_sma(ti, sample_data): 40 | result = ti.SMA(sample_data["close"], period=3) 41 | expected = pd.Series([np.nan, np.nan, 101.0, 102.0, 103.0], index=sample_data.index, name="SMA_3") 42 | pd.testing.assert_series_equal(result, expected, rtol=1e-4, atol=1e-4) 43 | 44 | def test_sma_empty(ti): 45 | empty_series = pd.Series([], index=pd.DatetimeIndex([])) 46 | with pytest.raises(InvalidInputError, match="Input data is empty"): 47 | ti.SMA(empty_series, period=3) 48 | 49 | def test_willr_empty(ti): 50 | empty_df = pd.DataFrame({"high": [], "low": [], "close": []}, index=pd.DatetimeIndex([])) 51 | with pytest.raises(InvalidInputError, match="Input data is empty"): 52 | ti.WILLR(empty_df, period=3) 53 | 54 | def test_willr_non_numeric(ti, sample_data): 55 | invalid_data = sample_data.copy() 56 | invalid_data["close"] = ["a", "b", "c", "d", "e"] 57 | with pytest.raises(InvalidInputError, match="numeric data"): 58 | ti.WILLR(invalid_data, period=3) -------------------------------------------------------------------------------- /tests/test_integration_yf.py: -------------------------------------------------------------------------------- 1 | """Integration test fetching real data via yfinance. 2 | 3 | Skipped automatically if network/yfinance unavailable. 4 | """ 5 | 6 | import pytest 7 | import pandas as pd 8 | 9 | try: 10 | import quantjourney_ti as qti 11 | from tests._yf import get_ohlcv 12 | except Exception as exc: # pragma: no cover 13 | pytest.skip(f"quantjourney_ti import failed: {exc}", allow_module_level=True) 14 | 15 | TICKER = "AAPL" 16 | 17 | 18 | @pytest.fixture(scope="module") 19 | def price_df(): 20 | try: 21 | df = get_ohlcv(TICKER, period="3mo", interval="1d") 22 | if isinstance(df.columns, pd.MultiIndex): 23 | df.columns = df.columns.get_level_values(0) 24 | df = df.rename(columns=str.lower) 25 | except Exception as exc: # pragma: no cover 26 | pytest.skip(f"yfinance unavailable or network error: {exc}") 27 | return df 28 | 29 | 30 | def test_flat_helper(price_df): 31 | close = price_df["close"] 32 | sma = qti.sma(close, 20) 33 | assert isinstance(sma, pd.Series) 34 | assert len(sma) == len(close) 35 | 36 | 37 | def test_class_instance(price_df): 38 | ti = qti.TechnicalIndicators() 39 | rsi = ti.RSI(price_df["close"], 14) 40 | assert isinstance(rsi, pd.Series) 41 | assert rsi.isna().sum() >= 14 # initial NaNs 42 | 43 | 44 | def test_singleton_shortcut(price_df): 45 | import quantjourney_ti.indicators as ind # noqa 46 | 47 | atr_series = ind._TI_INSTANCE.ATR(price_df, 14) # uses high/low/close 48 | assert isinstance(atr_series, pd.Series) 49 | assert len(atr_series) == len(price_df) 50 | -------------------------------------------------------------------------------- /tests/test_invariants.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pandas as pd 3 | 4 | from quantjourney_ti import TechnicalIndicators 5 | 6 | 7 | def _make_ohlcv(n: int = 300, seed: int = 42) -> pd.DataFrame: 8 | rng = np.random.default_rng(seed) 9 | idx = pd.date_range("2023-01-01", periods=n, freq="D") 10 | base = np.cumsum(rng.normal(0, 1, n)) + 100 11 | close = base + rng.normal(0, 0.2, n) 12 | high = close + np.abs(rng.normal(0.5, 0.2, n)) 13 | low = close - np.abs(rng.normal(0.5, 0.2, n)) 14 | open_ = close + rng.normal(0, 0.1, n) 15 | volume = np.abs(rng.normal(1e6, 2e5, n)).astype(np.int64) 16 | return pd.DataFrame({"open": open_, "high": high, "low": low, "close": close, "volume": volume}, index=idx) 17 | 18 | 19 | def test_bb_bands_ordering(): 20 | df = _make_ohlcv() 21 | ti = TechnicalIndicators() 22 | bb = ti.BB(df["close"], 20, 2.0) 23 | up = bb["BB_Upper"] 24 | mid = bb["BB_Middle"] 25 | lo = bb["BB_Lower"] 26 | mask = ~(up.isna() | mid.isna() | lo.isna()) 27 | assert (up[mask] >= mid[mask]).all() 28 | assert (mid[mask] >= lo[mask]).all() 29 | 30 | 31 | def test_atr_nonnegative(): 32 | df = _make_ohlcv() 33 | ti = TechnicalIndicators() 34 | atr = ti.ATR(df[["high", "low", "close"]], 14) 35 | if isinstance(atr, pd.DataFrame): 36 | series = atr.iloc[:, 0] 37 | else: 38 | series = atr 39 | series = series.dropna() 40 | assert (series >= 0).all() 41 | 42 | 43 | def test_vwap_within_high_low_window(): 44 | df = _make_ohlcv() 45 | ti = TechnicalIndicators() 46 | vwap = ti.VWAP(df[["high", "low", "close", "volume"]], 14) 47 | series = vwap.iloc[:, 0] if isinstance(vwap, pd.DataFrame) else vwap 48 | # Rolling bounds 49 | roll_high = df["high"].rolling(14).max() 50 | roll_low = df["low"].rolling(14).min() 51 | mask = ~(series.isna() | roll_high.isna() | roll_low.isna()) 52 | assert (series[mask] <= roll_high[mask] + 1e-9).all() 53 | assert (series[mask] >= roll_low[mask] - 1e-9).all() 54 | 55 | 56 | def test_macd_matches_ema_difference(): 57 | df = _make_ohlcv() 58 | s = df["close"] 59 | ti = TechnicalIndicators() 60 | macd = ti.MACD(s, 12, 26, 9)["MACD"].dropna() 61 | # Reference EMAs using pandas 62 | ema_fast = s.ewm(span=12, adjust=False).mean() 63 | ema_slow = s.ewm(span=26, adjust=False).mean() 64 | ref = (ema_fast - ema_slow).dropna() 65 | aligned = macd.index.intersection(ref.index) 66 | diff = (macd.loc[aligned] - ref.loc[aligned]).abs() 67 | # Allow small numerical differences 68 | assert diff.max() < 1e-6 69 | -------------------------------------------------------------------------------- /tests/test_kernels_attach.py: -------------------------------------------------------------------------------- 1 | from quantjourney_ti import TechnicalIndicators 2 | 3 | 4 | def test_kernels_attached_to_class(): 5 | # The class should have static methods attached for kernels 6 | assert hasattr(TechnicalIndicators, "_calculate_sma_numba") 7 | assert hasattr(TechnicalIndicators, "_calculate_macd_numba") 8 | assert hasattr(TechnicalIndicators, "_calculate_atr_numba") 9 | 10 | ti = TechnicalIndicators() 11 | # Methods should be callable via instance too 12 | assert callable(ti._calculate_sma_numba) 13 | assert callable(ti._calculate_macd_numba) 14 | assert callable(ti._calculate_atr_numba) 15 | 16 | -------------------------------------------------------------------------------- /tests/test_streaming_equivalence.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pandas as pd 3 | 4 | from quantjourney_ti import TechnicalIndicators, StreamingIndicators 5 | 6 | 7 | def _series(n=200, seed=3): 8 | rng = np.random.default_rng(seed) 9 | idx = pd.date_range("2024-01-01", periods=n, freq="D") 10 | s = pd.Series(np.cumsum(rng.normal(0, 1, n)) + 100, index=idx) 11 | return s 12 | 13 | 14 | def test_streaming_sma_matches_batch_last_value(): 15 | s = _series() 16 | ti = TechnicalIndicators() 17 | si = StreamingIndicators(max_buffer_size=1000) 18 | symbol = "SYM" 19 | 20 | for ts, val in s.items(): 21 | si.update_tick(symbol, ts, close=float(val)) 22 | 23 | # batch SMA 24 | batch = ti.SMA(s, 20) 25 | # streaming SMA last value 26 | last = si.states[symbol].last_values.get("sma_20", np.nan) 27 | 28 | assert np.isfinite(last) 29 | assert abs(last - batch.iloc[-1]) < 1e-9 30 | 31 | 32 | def test_streaming_ema_matches_batch_last_value(): 33 | s = _series() 34 | ti = TechnicalIndicators() 35 | si = StreamingIndicators(max_buffer_size=1000) 36 | symbol = "SYM" 37 | 38 | for ts, val in s.items(): 39 | si.update_tick(symbol, ts, close=float(val)) 40 | 41 | batch = ti.EMA(s, 20) 42 | last = si.states[symbol].last_values.get("ema_20", np.nan) 43 | assert np.isfinite(last) 44 | assert abs(last - batch.iloc[-1]) < 1e-6 45 | 46 | -------------------------------------------------------------------------------- /tests/test_technical_indicators.py: -------------------------------------------------------------------------------- 1 | """ 2 | Tests for quantjourney_ti technical indicators. 3 | """ 4 | 5 | import pytest 6 | import numpy as np 7 | import pandas as pd 8 | import quantjourney_ti as qti 9 | 10 | 11 | class TestBasicFunctionality: 12 | """Test basic package functionality.""" 13 | 14 | def test_package_imports(self): 15 | """Test that the package imports correctly.""" 16 | assert hasattr(qti, '__version__') 17 | assert hasattr(qti, '__author__') 18 | assert qti.__author__ == "Jakub Polec" 19 | 20 | def test_sample_data_creation(self): 21 | """Test creation of sample data for testing.""" 22 | # Create sample price data 23 | dates = pd.date_range('2023-01-01', periods=100, freq='D') 24 | prices = pd.Series( 25 | 100 + np.cumsum(np.random.randn(100) * 0.5), 26 | index=dates 27 | ) 28 | assert len(prices) == 100 29 | assert isinstance(prices, pd.Series) 30 | 31 | 32 | class TestTechnicalIndicators: 33 | """Test technical indicator calculations.""" 34 | 35 | @pytest.fixture 36 | def sample_data(self): 37 | """Create sample price data for testing.""" 38 | np.random.seed(42) # For reproducible tests 39 | dates = pd.date_range('2023-01-01', periods=100, freq='D') 40 | prices = pd.Series( 41 | 100 + np.cumsum(np.random.randn(100) * 0.5), 42 | index=dates 43 | ) 44 | return prices 45 | 46 | def test_indicators_exist(self, sample_data): 47 | """Test that main indicator functions exist.""" 48 | # This test will need to be updated based on your actual indicator functions 49 | # Common indicators that should exist: 50 | indicators_to_test = [ 51 | 'sma', 'ema', 'rsi', 'macd', 'bollinger_bands', 52 | 'atr', 'stochastic', 'williams_r' 53 | ] 54 | 55 | for indicator in indicators_to_test: 56 | if hasattr(qti, indicator): 57 | assert callable(getattr(qti, indicator)) 58 | 59 | def test_basic_calculation(self, sample_data): 60 | """Test that indicators can be calculated without errors.""" 61 | try: 62 | # Test SMA if it exists 63 | if hasattr(qti, 'sma'): 64 | result = qti.sma(sample_data, window=20) 65 | assert isinstance(result, pd.Series) 66 | assert len(result) <= len(sample_data) 67 | 68 | # Test EMA if it exists 69 | if hasattr(qti, 'ema'): 70 | result = qti.ema(sample_data, window=20) 71 | assert isinstance(result, pd.Series) 72 | assert len(result) <= len(sample_data) 73 | 74 | except Exception as e: 75 | pytest.skip(f"Indicator functions not yet implemented: {e}") 76 | 77 | 78 | if __name__ == "__main__": 79 | pytest.main([__file__]) 80 | -------------------------------------------------------------------------------- /tests/test_utils.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import pandas as pd 3 | import numpy as np 4 | from quantjourney_ti._utils import ( 5 | validate_data, validate_and_get_prices, validate_window, 6 | detect_divergence, detect_crossovers, optimize_memory 7 | ) 8 | from quantjourney_ti._errors import InvalidInputError 9 | 10 | @pytest.fixture 11 | def sample_data(): 12 | return pd.DataFrame( 13 | { 14 | "close": [100, 101, 102, 103, 104], 15 | "high": [101, 102, 103, 104, 105], 16 | "low": [99, 100, 101, 102, 103] 17 | }, 18 | index=pd.date_range("2025-01-01", periods=5) 19 | ) 20 | 21 | def test_validate_data(sample_data): 22 | assert validate_data(sample_data, ["close", "high", "low"]) is True 23 | with pytest.raises(InvalidInputError, match="Missing required columns"): 24 | validate_data(sample_data, ["open"]) 25 | with pytest.raises(InvalidInputError, match="Index must be datetime or numeric"): 26 | invalid_data = sample_data.copy() 27 | invalid_data.index = ["a", "b", "c", "d", "e"] 28 | validate_data(invalid_data) 29 | 30 | def test_validate_and_get_prices(sample_data): 31 | prices = validate_and_get_prices(sample_data) 32 | assert prices.name == "close" 33 | assert len(prices) == 5 34 | with pytest.raises(InvalidInputError, match="numeric data"): 35 | invalid_data = sample_data.copy() 36 | invalid_data["close"] = ["a", "b", "c", "d", "e"] 37 | validate_and_get_prices(invalid_data) 38 | 39 | def test_validate_window(): 40 | assert validate_window(data_length=10, window=5) is True 41 | with pytest.raises(InvalidInputError, match="Window size must be at least"): 42 | validate_window(data_length=10, window=1) 43 | with pytest.raises(InvalidInputError, match="Window size.*must be less"): 44 | validate_window(data_length=5, window=5) 45 | 46 | def test_optimize_memory(): 47 | df = pd.DataFrame({"int_col": [1, 2, 3], "float_col": [1.0, 2.0, 3.0]}) 48 | optimized = optimize_memory(df) 49 | assert optimized["int_col"].dtype == np.int32 50 | assert optimized["float_col"].dtype == np.float32 -------------------------------------------------------------------------------- /tests/test_utils_crossovers.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | import numpy as np 3 | 4 | from quantjourney_ti._utils import detect_crossovers 5 | 6 | 7 | def test_detect_crossovers_no_off_by_one(): 8 | # Construct two series where s1 crosses above s2 at index 3 exactly 9 | idx = pd.RangeIndex(0, 7) 10 | s2 = pd.Series([0, 0, 0, 0, 0, 0, 0], index=idx, dtype=float) 11 | s1 = pd.Series([-2, -1, 0, 1, 2, 1, 0], index=idx, dtype=float) 12 | 13 | crosses = detect_crossovers(s1, s2) 14 | # Expect bullish at index 3 only 15 | assert crosses.loc[3, "bullish"] == 1 16 | assert crosses["bullish"].sum() == 1 17 | assert crosses["bearish"].sum() == 0 18 | 19 | # Now invert to force a bearish cross at index 3 20 | crosses2 = detect_crossovers(s2, s1) 21 | assert crosses2.loc[3, "bearish"] == 1 22 | assert crosses2["bearish"].sum() == 1 23 | assert crosses2["bullish"].sum() == 0 24 | 25 | -------------------------------------------------------------------------------- /tests/test_yf_integration_multi.py: -------------------------------------------------------------------------------- 1 | import os 2 | import pytest 3 | 4 | 5 | tickers = ["AAPL", "MSFT", "SPY"] 6 | 7 | 8 | @pytest.mark.slow 9 | def test_yf_multi_basic_indicators(): 10 | try: 11 | import yfinance as yf # type: ignore 12 | except Exception: 13 | pytest.skip("yfinance not installed; install extra 'yf' to run") 14 | 15 | from quantjourney_ti import TechnicalIndicators 16 | 17 | ti = TechnicalIndicators() 18 | 19 | for t in tickers: 20 | df = yf.download(t, period="6mo", progress=False) 21 | if df.empty: 22 | continue 23 | # Normalize column names 24 | df = df.rename(columns={ 25 | "Close": "close", 26 | "Open": "open", 27 | "High": "high", 28 | "Low": "low", 29 | "Volume": "volume", 30 | }) 31 | # Sanity calculations 32 | sma = ti.SMA(df["close"], 20) 33 | ema = ti.EMA(df["close"], 20) 34 | macd = ti.MACD(df["close"], 12, 26, 9) 35 | atr = ti.ATR(df[["high", "low", "close"]], 14) 36 | 37 | assert len(sma) == len(df) 38 | assert len(ema) == len(df) 39 | assert set(["MACD", "Signal", "Histogram"]).issubset(set(macd.columns)) 40 | if hasattr(atr, "__len__"): 41 | assert len(atr) == len(df) 42 | 43 | --------------------------------------------------------------------------------