├── .env.sample
├── .gitignore
├── CLAUDE.md
├── Dockerfile
├── LICENSE
├── README.md
├── docker-compose.yml
├── docs
├── api-integration.md
├── architecture.md
├── dashboard.md
├── img
│ └── dashboard-screenshot.png
├── installation.md
├── trading-modes.md
└── troubleshooting.md
├── force_clear_eth.sh
├── force_trading_disabled.sh
├── frontend
├── Dockerfile
├── README.md
├── app.py
├── requirements.txt
├── static
│ ├── css
│ │ └── styles.css
│ ├── img
│ │ ├── dashboard-screenshot.png
│ │ └── favicon.ico
│ └── js
│ │ └── dashboard.js
└── templates
│ └── index.html
├── requirements.txt
├── restart.sh
├── run_tests.sh
├── scripts
└── init-ollama.sh
├── setup.py
├── src
├── __init__.py
├── ai_decision
│ ├── __init__.py
│ ├── ollama_client.py
│ └── service.py
├── config
│ ├── __init__.py
│ └── settings.py
├── data_retrieval
│ ├── __init__.py
│ ├── service.py
│ └── taapi_client.py
├── main.py
├── trade_execution
│ ├── __init__.py
│ ├── alpaca_client.py
│ └── service.py
└── utils
│ ├── __init__.py
│ ├── force_disabled.py
│ ├── logger.py
│ ├── models.py
│ └── redis_client.py
├── test_alpaca.py
└── tests
├── __init__.py
├── test_ai_decision.py
└── test_rsi_client.py
/.env.sample:
--------------------------------------------------------------------------------
1 | # API Keys
2 | TAAPI_API_KEY=your_taapi_api_key
3 |
4 | # Alpaca API Keys - Paper Trading
5 | ALPACA_API_KEY=your_paper_trading_key
6 | ALPACA_API_SECRET=your_paper_trading_secret
7 |
8 | # Environment variables for alpaca-py library
9 | APCA_API_KEY_ID=your_paper_trading_key
10 | APCA_API_SECRET_KEY=your_paper_trading_secret
11 | APCA_API_BASE_URL=https://paper-api.alpaca.markets
12 |
13 | # Trading Configuration
14 | SYMBOLS=BTC/USD,ETH/USD
15 | TRADE_PERCENTAGE=2.0 # Percentage of portfolio to trade (used if TRADE_USE_FIXED=false)
16 | TRADE_FIXED_AMOUNT=10.0 # Fixed amount in USD for each trade (used if TRADE_USE_FIXED=true)
17 | TRADE_USE_FIXED=false # Set to 'true' to use fixed amount or 'false' to use percentage
18 | RSI_PERIOD=14
19 |
20 | # Adjust this based on your TAAPI tier (see README.md):
21 | # Free tier (1 req/15s): 300 seconds recommended for 2 symbols
22 | # Basic tier (5 req/15s): 60 seconds recommended for 2 symbols
23 | # Pro tier (30 req/15s): 10 seconds recommended for 2 symbols
24 | # Expert tier (75 req/15s): 5 seconds recommended for 2 symbols
25 | POLL_INTERVAL=300 # in seconds - set for TAAPI free tier with 2 symbols
26 |
27 | ALPACA_DEBUG_MODE=false # Enable debug mode to simulate trades without API calls
28 |
29 | # Ollama Configuration
30 | OLLAMA_MODEL=llama3.2:1b
31 | OLLAMA_HOST=http://ollama:11434
32 |
33 | # Redis Configuration
34 | REDIS_HOST=redis
35 | REDIS_PORT=6379
36 | REDIS_DB=0
37 |
38 | # Frontend Configuration
39 | FRONTEND_PORT=9753
40 | FRONTEND_HOST=0.0.0.0
41 |
42 | # Logging Configuration
43 | LOG_LEVEL=INFO
44 | LOG_TO_CONSOLE=true
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Environments
2 | .env
3 | .env.*
4 | !.env.sample
5 | .venv
6 | env/
7 | venv/
8 | ENV/
9 | env.bak/
10 | venv.bak/
11 |
12 | # Python cache files
13 | __pycache__/
14 | *.py[cod]
15 | *$py.class
16 | .pytest_cache/
17 |
18 | # Distribution / packaging
19 | .Python
20 | build/
21 | develop-eggs/
22 | dist/
23 | downloads/
24 | eggs/
25 | .eggs/
26 | lib/
27 | lib64/
28 | parts/
29 | sdist/
30 | var/
31 | wheels/
32 | *.egg-info/
33 | .installed.cfg
34 | *.egg
35 |
36 | # Log files
37 | *.log
38 |
39 | # IDE files
40 | .idea/
41 | .vscode/
42 | *.swp
43 | *.swo
44 |
45 | # OS files
46 | .DS_Store
47 | Thumbs.db
48 |
49 | # Docker files
50 | dump.rdb
51 | redis_data/
52 | ollama_data/
53 |
54 | # Jupyter Notebook
55 | .ipynb_checkpoints
56 |
57 | # Local directories
58 | /data/
59 | /volumes/
--------------------------------------------------------------------------------
/CLAUDE.md:
--------------------------------------------------------------------------------
1 | # TraderMagic Development Guide
2 |
3 | ## Commands
4 | - Run all tests: `python -m unittest discover -s tests`
5 | - Run single test: `python -m unittest tests.test_ai_decision`
6 | - Run specific test method: `python -m unittest tests.test_ai_decision.TestAIDecisionService.test_analyze_rsi_buy`
7 | - Start services: `docker compose up -d`
8 | - Restart all services: `./restart.sh`
9 |
10 | ## Code Style
11 | - **Imports**: Standard library first, followed by third-party, then local modules
12 | - **Type Annotations**: Use typing module; all function parameters and returns should be typed
13 | - **Exception Handling**: Use specific exceptions and proper logging; avoid bare except blocks
14 | - **Naming**: snake_case for variables/functions, PascalCase for classes, UPPER_CASE for constants
15 | - **Docstrings**: Include for all public functions and classes; specify parameters and return types
16 | - **Async Code**: Properly await async operations; use asyncio.run for top-level execution
17 | - **Dependency Imports**: Import within functions when handling circular dependencies
18 | - **Error Handling**: Log errors with appropriate log levels; provide context in error messages
19 |
20 | ## Architecture Patterns
21 | - Service-based architecture with clear separation of concerns
22 | - Redis for inter-service communication
23 | - Config-driven with environment variables via dotenv
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM python:3.11-slim
2 |
3 | WORKDIR /app
4 |
5 | COPY requirements.txt setup.py ./
6 | RUN pip install --no-cache-dir -r requirements.txt
7 |
8 | COPY . .
9 |
10 | # Install the package in development mode
11 | RUN pip install -e .
12 |
13 | CMD ["python", "src/main.py"]
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | 
2 |
3 | # TraderMagic 🧙♂️💹
4 |
5 | An AI-powered automated trading system built with Python, running entirely on your local machine. The system uses Ollama for AI decision-making, TAAPI.io for technical indicators, and Alpaca for trade execution.
6 |
7 | ## ✨ Features
8 |
9 | - 🧠 **AI-powered trading decisions** using locally-run LLM models via Ollama
10 | - 📊 **Real-time dashboards** with trade status and history
11 | - 🛑 **Trading on/off toggle** for complete user control
12 | - 📈 **RSI-based technical analysis** for market insights
13 | - 🕒 **Market hours visualization** showing pre-market, regular hours, after-hours, and closed sessions
14 | - 💰 **Flexible trade sizing** with portfolio percentage or fixed amounts
15 | - 🔒 **Paper trading mode** for risk-free testing
16 | - 🔄 **Redis-powered communication** between services
17 | - 🐳 **Docker-based deployment** for easy setup
18 |
19 | ## 🏗️ System Architecture
20 |
21 | TraderMagic consists of four main components:
22 |
23 | 1. **Data Retrieval Service** 📡 - Polls TAAPI.io for RSI data
24 | 2. **AI Decision Engine** 🧠 - Analyzes data using Ollama LLM to make trading decisions
25 | 3. **Trade Execution Service** 💹 - Interfaces with Alpaca to execute trades
26 | 4. **Web Dashboard** 🖥️ - Real-time monitoring interface
27 |
28 | All components are containerized using Docker and communicate through Redis.
29 |
30 | ## 🚀 Quick Start
31 |
32 | ```bash
33 | # Clone the repository
34 | git clone https://github.com/rawveg/trader-magic.git
35 | cd trader-magic
36 |
37 | # Configure your environment
38 | cp .env.sample .env
39 | # Edit .env with your API keys
40 |
41 | # Start the application
42 | docker compose up -d
43 |
44 | # Access the dashboard
45 | # Open http://localhost:9753 in your browser
46 | ```
47 |
48 | ## 📚 Documentation
49 |
50 | For detailed documentation on all aspects of TraderMagic, check out these guides:
51 |
52 | - [📋 Installation Guide](docs/installation.md) - Step-by-step setup instructions
53 | - [🏗️ Architecture Overview](docs/architecture.md) - System design and components
54 | - [🔌 API Integrations](docs/api-integration.md) - Configuring external APIs
55 | - [🚦 Trading Modes](docs/trading-modes.md) - Paper/live trading and debug modes
56 | - [📊 Dashboard Features](docs/dashboard.md) - Using the web interface
57 | - [🔍 Troubleshooting Guide](docs/troubleshooting.md) - Solving common issues
58 |
59 | ## ⚠️ Disclaimer
60 |
61 | This trading system is provided for educational and research purposes only. The authors and contributors are not responsible for any financial losses incurred through the use of this software. Always do your own research and consider consulting a financial advisor before making investment decisions.
62 |
63 | ## 🙏 Acknowledgments
64 |
65 | TraderMagic is inspired by the work of [Mike Russell and the Creator Magic Community](http://www.creatormagic.ai). His innovations in AI-powered creative tools have been pivotal in the development of this project.
66 |
67 | ## 📜 License
68 |
69 | **Important:** As of March 15, 2025, TraderMagic is licensed under AGPL-3.0. Any forks or derivations must comply with AGPL unless a commercial license is obtained.
70 |
71 | TraderMagic is licensed under the GNU Affero General Public License (AGPL). See the [LICENSE](LICENSE) file for details.
--------------------------------------------------------------------------------
/docker-compose.yml:
--------------------------------------------------------------------------------
1 | services:
2 | data_retrieval:
3 | build:
4 | context: .
5 | dockerfile: Dockerfile
6 | image: trader-magic/data-retrieval
7 | container_name: data_retrieval
8 | volumes:
9 | - ./:/app
10 | command: python -m src.data_retrieval.service
11 | env_file:
12 | - .env
13 | environment:
14 | - SERVICE_NAME=data_retrieval
15 | - PYTHONPATH=/app
16 | depends_on:
17 | - redis
18 | networks:
19 | - trader_network
20 | restart: unless-stopped
21 | logging:
22 | driver: "json-file"
23 | options:
24 | max-size: "10m"
25 | max-file: "3"
26 |
27 | ai_decision:
28 | build:
29 | context: .
30 | dockerfile: Dockerfile
31 | image: trader-magic/ai-decision
32 | container_name: ai_decision
33 | volumes:
34 | - ./:/app
35 | command: python -m src.ai_decision.service
36 | env_file:
37 | - .env
38 | environment:
39 | - SERVICE_NAME=ai_decision
40 | - PYTHONPATH=/app
41 | depends_on:
42 | - ollama
43 | - ollama_init
44 | - redis
45 | networks:
46 | - trader_network
47 | restart: unless-stopped
48 | logging:
49 | driver: "json-file"
50 | options:
51 | max-size: "10m"
52 | max-file: "3"
53 |
54 | trade_execution:
55 | build:
56 | context: .
57 | dockerfile: Dockerfile
58 | image: trader-magic/trade-execution
59 | container_name: trade_execution
60 | volumes:
61 | - ./:/app
62 | command: python -m src.trade_execution.service
63 | env_file:
64 | - .env
65 | environment:
66 | - SERVICE_NAME=trade_execution
67 | - PYTHONPATH=/app
68 | depends_on:
69 | - redis
70 | networks:
71 | - trader_network
72 | restart: unless-stopped
73 | logging:
74 | driver: "json-file"
75 | options:
76 | max-size: "10m"
77 | max-file: "3"
78 |
79 | frontend:
80 | build:
81 | context: ./frontend
82 | dockerfile: Dockerfile
83 | image: trader-magic/frontend
84 | container_name: frontend
85 | ports:
86 | - "${FRONTEND_PORT:-9753}:${FRONTEND_PORT:-9753}"
87 | volumes:
88 | - ./frontend:/app/frontend
89 | - ./.env:/app/.env
90 | env_file:
91 | - .env
92 | environment:
93 | - SERVICE_NAME=frontend
94 | - PYTHONPATH=/app
95 | # Use explicit command to run the app
96 | command: python /app/frontend/app.py
97 | depends_on:
98 | - redis
99 | networks:
100 | - trader_network
101 | restart: unless-stopped
102 | logging:
103 | driver: "json-file"
104 | options:
105 | max-size: "10m"
106 | max-file: "3"
107 |
108 | ollama:
109 | image: ollama/ollama:latest
110 | container_name: ollama
111 | volumes:
112 | - ollama_data:/root/.ollama
113 | ports:
114 | - "11434:11434"
115 | networks:
116 | - trader_network
117 | restart: unless-stopped
118 | # Simpler healthcheck with curl preinstalled in the container
119 | healthcheck:
120 | test: ["CMD", "curl", "-f", "http://localhost:11434/"]
121 | interval: 30s
122 | timeout: 10s
123 | retries: 3
124 | start_period: 40s
125 | logging:
126 | driver: "json-file"
127 | options:
128 | max-size: "10m"
129 | max-file: "3"
130 |
131 | ollama_init:
132 | image: curlimages/curl:latest
133 | container_name: ollama_init
134 | volumes:
135 | - ./scripts:/scripts
136 | command: sh /scripts/init-ollama.sh
137 | environment:
138 | - OLLAMA_MODEL=${OLLAMA_MODEL:-llama3.2:latest}
139 | depends_on:
140 | - ollama
141 | networks:
142 | - trader_network
143 | restart: "no"
144 |
145 | redis:
146 | image: redis:alpine
147 | container_name: redis
148 | ports:
149 | - "6379:6379"
150 | volumes:
151 | - redis_data:/data
152 | command: redis-server --notify-keyspace-events KEA
153 | networks:
154 | - trader_network
155 | restart: unless-stopped
156 | logging:
157 | driver: "json-file"
158 | options:
159 | max-size: "10m"
160 | max-file: "3"
161 |
162 | networks:
163 | trader_network:
164 | driver: bridge
165 |
166 | volumes:
167 | ollama_data:
168 | redis_data:
--------------------------------------------------------------------------------
/docs/api-integration.md:
--------------------------------------------------------------------------------
1 | # 🔌 API Integrations
2 |
3 | TraderMagic connects with several external APIs to provide its functionality. This document explains how to configure and optimize these integrations.
4 |
5 | ## 📊 TAAPI.io for Technical Indicators
6 |
7 | TraderMagic uses TAAPI.io to retrieve RSI (Relative Strength Index) data, which serves as the primary technical indicator for trading decisions.
8 |
9 | ### 🔑 API Key Configuration
10 |
11 | Add your TAAPI.io API key to the `.env` file:
12 |
13 | ```
14 | TAAPI_API_KEY=your_taapi_key_here
15 | ```
16 |
17 | ### ⏱️ Rate Limits
18 |
19 | TAAPI.io has different rate limits based on your subscription tier:
20 |
21 | | Tier | Rate Limit | Recommended Poll Interval |
22 | |--------|------------------------|---------------------------|
23 | | Free | 1 request / 15 seconds | 300 seconds (5 minutes) |
24 | | Basic | 5 requests / 15 seconds| 60 seconds (1 minute) |
25 | | Pro | 30 requests / 15 seconds | 10 seconds |
26 | | Expert | 75 requests / 15 seconds | 5 seconds |
27 |
28 | Configure the polling interval in your `.env` file:
29 |
30 | ```
31 | POLL_INTERVAL=300 # Adjust based on your subscription tier
32 | ```
33 |
34 | For multiple symbols, the system automatically calculates spacing between requests to stay within rate limits.
35 |
36 | ### 🪙 Supported Symbols
37 |
38 | When using the free tier of TAAPI.io, you are limited to the following symbols from Binance:
39 |
40 | ```
41 | BTC/USDT, ETH/USDT, XRP/USDT, LTC/USDT, XMR/USDT
42 | ```
43 |
44 | Update your `.env` file to use only supported symbols:
45 |
46 | ```
47 | SYMBOLS=BTC/USDT,ETH/USDT
48 | ```
49 |
50 | ## 💹 Alpaca for Trade Execution
51 |
52 | TraderMagic uses Alpaca for executing trades, offering both paper trading and live trading capabilities.
53 |
54 | ### 🔑 API Key Configuration
55 |
56 | Add your Alpaca API credentials to the `.env` file:
57 |
58 | ```
59 | ALPACA_API_KEY=your_alpaca_key_here
60 | ALPACA_API_SECRET=your_alpaca_secret_here
61 | ```
62 |
63 | ### 📝 Paper Trading Mode
64 |
65 | For testing without real money, enable paper trading:
66 |
67 | ```
68 | ALPACA_PAPER_TRADING=true
69 | ```
70 |
71 | This uses Alpaca's paper trading API which simulates real trading without using actual funds.
72 |
73 | ### 🔰 Pattern Day Trading Rules
74 |
75 | For stock trading in the US, Pattern Day Trading (PDT) rules apply to accounts under $25,000. The system can enforce these rules:
76 |
77 | ```
78 | ALPACA_ENFORCE_PDT_RULES=true # Prevent more than 3 day trades in 5 business days
79 | ```
80 |
81 | Note: PDT rules don't apply to crypto trading or paper trading accounts, so the system automatically bypasses the rules in these cases.
82 |
83 | ## 🧠 Ollama for AI Decision-Making
84 |
85 | TraderMagic uses Ollama to run an LLM (Large Language Model) locally for making trading decisions.
86 |
87 | ### 🤖 Model Configuration
88 |
89 | Configure the model to use:
90 |
91 | ```
92 | OLLAMA_MODEL=llama3.2:latest # Default LLM model
93 | OLLAMA_HOST=http://ollama:11434 # When using Docker
94 | ```
95 |
96 | ### 🔄 Using Local Ollama
97 |
98 | If you already have Ollama installed on your host machine:
99 |
100 | 1. Download the required model manually:
101 | ```bash
102 | ollama pull llama3.2:latest # or your chosen model
103 | ```
104 |
105 | 2. Update the host in your `.env` file:
106 | ```
107 | OLLAMA_HOST=http://localhost:11434
108 | ```
109 |
110 | 3. Update `docker-compose.yml` to remove the Ollama service dependency.
111 |
112 | ### 🧩 Model Alternatives
113 |
114 | Ollama supports multiple models. Some alternatives you might consider:
115 |
116 | ```
117 | OLLAMA_MODEL=llama3:latest # Smaller footprint
118 | OLLAMA_MODEL=mistral:latest # Alternative architecture
119 | ```
120 |
121 | Larger models generally provide better analysis but require more resources.
--------------------------------------------------------------------------------
/docs/architecture.md:
--------------------------------------------------------------------------------
1 | # 🏗️ Architecture
2 |
3 | TraderMagic is built with a modern, service-based architecture that separates concerns and provides flexibility.
4 |
5 | ## 🧩 Components
6 |
7 | The system consists of four main services that work together:
8 |
9 | ### 📡 Data Retrieval Service
10 |
11 | **Purpose**: Fetches technical indicators from external APIs
12 | **Key Features**:
13 | - Polls TAAPI.io for RSI (Relative Strength Index) data
14 | - Rate-limiting to respect API constraints
15 | - Caches results in Redis for other services
16 |
17 | **Technology**:
18 | - Python async HTTP client
19 | - TAAPI.io API integration
20 | - Configurable polling intervals
21 |
22 | ### 🧠 AI Decision Engine
23 |
24 | **Purpose**: Analyzes market data and makes trading decisions
25 | **Key Features**:
26 | - Uses locally-run Ollama LLM to analyze RSI data
27 | - Determines whether to buy, sell, or hold based on analysis
28 | - Explains reasoning behind each decision
29 |
30 | **Technology**:
31 | - Ollama integration for local LLM inference
32 | - Prompt engineering for financial analysis
33 | - Redis for receiving data and publishing decisions
34 |
35 | ### 💹 Trade Execution Service
36 |
37 | **Purpose**: Executes trades based on AI decisions
38 | **Key Features**:
39 | - Connects to Alpaca for trade execution
40 | - Implements trade amount calculation
41 | - Handles order placement and tracking
42 | - Respects trading enabled/disabled flag
43 |
44 | **Technology**:
45 | - Alpaca API integration
46 | - Portfolio management logic
47 | - Trading safeguards and limits
48 |
49 | ### 🖥️ Web Dashboard
50 |
51 | **Purpose**: User interface for monitoring and control
52 | **Key Features**:
53 | - Real-time trade monitoring
54 | - Trading control interface
55 | - Trade mode configuration
56 | - Theme customization
57 |
58 | **Technology**:
59 | - Flask for backend API
60 | - Socket.IO for real-time updates
61 | - Responsive frontend design
62 |
63 | ## 🔄 Data Flow
64 |
65 | 1. **Data Retrieval Service** polls TAAPI.io for RSI data
66 | 2. Data is stored in Redis with symbol-specific keys
67 | 3. **AI Decision Engine** consumes RSI data and generates trade signals
68 | 4. Signals are stored in Redis with corresponding keys
69 | 5. **Trade Execution Service** processes signals and executes trades
70 | 6. Trade results are stored in Redis
71 | 7. **Web Dashboard** displays all data from Redis in real-time
72 |
73 | ## 📦 Containerization
74 |
75 | All components are containerized using Docker:
76 |
77 | ```
78 | ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────┐
79 | │ Data Retrieval │ │ AI Decision │ │Trade Execution │
80 | │ Service │ │ Engine │ │ Service │
81 | └────────┬────────┘ └────────┬────────┘ └────────┬────────┘
82 | │ │ │
83 | │ ▼ │
84 | │ ┌─────────────────┐ │
85 | └────────────►│ Redis │◄────────────┘
86 | │ │
87 | └────────┬────────┘
88 | │
89 | ▼
90 | ┌─────────────────┐
91 | │ Web Dashboard │
92 | │ │
93 | └─────────────────┘
94 | ```
95 |
96 | Each service can be scaled independently and communicates via Redis.
--------------------------------------------------------------------------------
/docs/dashboard.md:
--------------------------------------------------------------------------------
1 | # 📊 Dashboard Features
2 |
3 | The TraderMagic dashboard provides a real-time view of your trading activities with an intuitive, responsive interface.
4 |
5 | 
6 |
7 | ## 📈 Key Features
8 |
9 | - **Current RSI values** for each symbol
10 | - **Latest AI trading decisions** (Buy, Sell, Hold)
11 | - **Trade execution status** with detailed results
12 | - **Recent activity history** log
13 | - **Trading toggle** button to control when trades are executed
14 | - **Market status visualization** showing regular, pre-market, after-hours, and closed market sessions
15 | - **Price charts** with session-specific markers
16 | - **Automatic updates** every 15 seconds
17 | - **Manual refresh** button for on-demand updates
18 |
19 | ## 🔄 Real-Time Data
20 |
21 | Both automatic and manual refreshes retrieve data from Redis cache, not directly from external APIs, ensuring no additional load on rate-limited services.
22 |
23 | ## 💰 Account Summary
24 |
25 | The account summary section provides key financial metrics:
26 |
27 | - **Portfolio Value**: Total value of your account
28 | - **Cash Balance**: Available cash in your account
29 | - **Buying Power**: Available funds for trading
30 | - **Daily Change**: Today's portfolio change (color-coded):
31 | - Green: Positive change (+)
32 | - Red: Negative change (-)
33 | - Shows both dollar amount and percentage
34 |
35 | ## 🌙 Theme Options
36 |
37 | The dashboard supports light and dark modes:
38 |
39 | - **Auto**: Follows your system preference
40 | - **Light**: Classic light theme for daytime use
41 | - **Dark**: Reduced eye strain for nighttime use
42 |
43 | The theme selector ensures all options remain clearly visible regardless of the current theme.
44 |
45 | ## 🔧 Trade Settings Control
46 |
47 | Easily modify trading parameters directly from the dashboard:
48 |
49 | 1. **Trade Mode Selection**:
50 | - Portfolio Percentage: Trade a percentage of your account
51 | - Fixed Amount: Trade a specific dollar amount
52 |
53 | 2. **Amount Configuration**:
54 | - Adjust percentage (0.1% to 100%)
55 | - Set fixed dollar amount ($1 minimum)
56 |
57 | 3. **Trading Control**:
58 | - Start/Stop button to enable or disable trading
59 | - Prevents accidental trades until you're ready
60 |
61 | ## 📱 Responsive Design
62 |
63 | The dashboard is designed to work on all devices:
64 | - Desktop: Full-featured trading dashboard
65 | - Tablet: Optimized layout for medium screens
66 | - Mobile: Compact view for monitoring on the go
67 |
68 | ## 📊 Price Charts with Market Context
69 |
70 | Price charts provide valuable context about market conditions:
71 |
72 | ### 🕒 Market Session Indicators
73 |
74 | Different marker styles show when a price candle was recorded:
75 | - **Regular Market Hours**: Small circular points
76 | - **Pre-Market**: Triangle markers
77 | - **After-Hours**: Rotated square markers
78 | - **Closed Market**: X markers
79 |
80 | ### 📈 Session-Aware Visualization
81 |
82 | This visual distinction helps you:
83 | - Identify price movements during different market sessions
84 | - Recognize patterns unique to pre-market or after-hours trading
85 | - Distinguish between high-volume regular hours and thinner extended hours
86 | - Factor market conditions into trading decisions
87 |
88 | ### 🔄 Automatic Timezone Adjustment
89 |
90 | The system automatically determines the appropriate market session based on Eastern Time (ET), the standard for US stock markets. This provides context about potential liquidity and volatility during different trading sessions.
91 |
92 | ## 🧪 Debug Interface
93 |
94 | For testing purposes, a special debug page is available at `http://localhost:9753/debug`:
95 |
96 | - Direct trade execution buttons
97 | - Current configuration display
98 | - Test trade functionality
99 |
100 | This debug interface is invaluable for verifying that your trade settings are working correctly without waiting for trade signals.
--------------------------------------------------------------------------------
/docs/img/dashboard-screenshot.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/rawveg/trader-magic/6390b1030c3ac50634d1f72ffadfccb0e862127a/docs/img/dashboard-screenshot.png
--------------------------------------------------------------------------------
/docs/installation.md:
--------------------------------------------------------------------------------
1 | # 🚀 Installation Guide
2 |
3 | Follow these steps to get TraderMagic up and running on your system.
4 |
5 | ## 📋 Prerequisites
6 |
7 | Before getting started, make sure you have:
8 |
9 | - **Docker** with Docker Compose support
10 | - **~4GB Disk Space** (for Docker images and Ollama models)
11 |
12 | ### 🔑 Required External Accounts
13 |
14 | TraderMagic requires accounts with these external services:
15 |
16 | #### 📊 TAAPI.io
17 | - **Purpose**: Provides technical indicators (RSI values) for trading decisions
18 | - **Pricing**: Offers free tier with limited usage, and paid tiers for more features
19 | - **Signup**: [Create TAAPI.io Account](https://taapi.io/signup)
20 | - **Documentation**: [TAAPI API Docs](https://taapi.io/documentation/)
21 | - **API Key**: After signup, generate an API key from your dashboard
22 |
23 | #### 💹 Alpaca Markets
24 | - **Purpose**: Executes trades based on system signals
25 | - **Features**: Offers paper trading for safe testing without real money
26 | - **Signup**: [Create Alpaca Account](https://app.alpaca.markets/signup)
27 | - **Documentation**: [Alpaca API Docs](https://alpaca.markets/docs/)
28 | - **API Keys**: After signup, generate API key and secret from your dashboard settings
29 |
30 | > **Note**: The free tier of TAAPI.io has limitations on which symbols you can use and how frequently you can poll data. Consider upgrading if you need more symbols or faster updates.
31 |
32 | ## 🔧 Step 1: Clone the Repository
33 |
34 | ```bash
35 | git clone https://github.com/rawveg/trader-magic.git
36 | cd trader-magic
37 | ```
38 |
39 | ## ⚙️ Step 2: Configure Environment Variables
40 |
41 | Copy the example environment file:
42 |
43 | ```bash
44 | cp .env.sample .env
45 | ```
46 |
47 | Then edit the `.env` file with your favorite text editor and fill in your API keys:
48 |
49 | ```bash
50 | # Essential API Keys
51 | TAAPI_API_KEY=your_taapi_api_key_here
52 | ALPACA_API_KEY=your_alpaca_api_key_here
53 | ALPACA_API_SECRET=your_alpaca_api_secret_here
54 |
55 | # Trading Configuration (defaults shown)
56 | SYMBOLS=BTC/USD,ETH/USD # Can also add stocks like AAPL,TSLA,MSFT
57 | TRADE_PERCENTAGE=2.0
58 | TRADE_FIXED_AMOUNT=10.0
59 | TRADE_USE_FIXED=false
60 |
61 | # Safety Settings (recommended for starting)
62 | ALPACA_PAPER_TRADING=true
63 | ALPACA_DEBUG_MODE=true
64 | ```
65 |
66 | ## 🏗️ Step 3: Start the Services
67 |
68 | Launch the application using Docker Compose:
69 |
70 | ```bash
71 | docker compose up -d
72 | ```
73 |
74 | This command starts all the required services in detached mode:
75 | - Redis database
76 | - Ollama AI model server
77 | - Data retrieval service
78 | - AI decision engine
79 | - Trade execution service
80 | - Web dashboard
81 |
82 | ## 🖥️ Step 4: Access the Dashboard
83 |
84 | Once all services are running, access the web dashboard:
85 |
86 | ```
87 | http://localhost:9753
88 | ```
89 |
90 | The first startup might take a few minutes as the system:
91 | 1. Downloads necessary Docker images
92 | 2. Downloads Ollama models
93 | 3. Initializes connections to external APIs
94 |
95 | ## 🔄 Restarting After Configuration Changes
96 |
97 | After making changes to the `.env` file, use the provided restart script:
98 |
99 | ```bash
100 | chmod +x restart.sh # Make it executable (first time only)
101 | ./restart.sh # Restart all services with new config
102 | ```
103 |
104 | ## 🔍 Verifying Installation
105 |
106 | Check that all services are running:
107 |
108 | ```bash
109 | docker compose ps
110 | ```
111 |
112 | You should see the following services in the "Up" state:
113 | - redis
114 | - ollama
115 | - frontend
116 | - data_retrieval
117 | - ai_decision
118 | - trade_execution
119 |
120 | ## 🔧 Troubleshooting
121 |
122 | If you encounter issues:
123 |
124 | 1. Check the logs for each service:
125 | ```bash
126 | docker compose logs frontend
127 | docker compose logs data_retrieval
128 | docker compose logs ai_decision
129 | docker compose logs trade_execution
130 | ```
131 |
132 | 2. Verify API keys are correctly set in your `.env` file
133 |
134 | 3. Make sure all required ports are available on your system:
135 | - 9753 (Frontend)
136 | - 6379 (Redis)
137 | - 11434 (Ollama)
138 |
139 | ## 📈 Supported Symbol Formats
140 |
141 | TraderMagic supports both cryptocurrency and stock symbols:
142 |
143 | ### Cryptocurrency Symbols
144 | Use the standard format with a slash:
145 | ```
146 | BTC/USDT, ETH/USDT, XRP/USDT, etc.
147 | ```
148 |
149 | ### Stock Symbols
150 | Use the simple ticker format:
151 | ```
152 | AAPL, TSLA, MSFT, NVDA, etc.
153 | ```
154 |
155 | > **Note**: The system automatically converts stock tickers to the proper format for API requests. For example, `TSLA` is converted to `TSLA/USD` when querying TAAPI.io.
156 |
157 | ### Free Tier Limitations
158 | If you're using the free tier of TAAPI.io, you're limited to specific cryptocurrency pairs only:
159 | - BTC/USDT
160 | - ETH/USDT
161 | - XRP/USDT
162 | - LTC/USDT
163 | - XMR/USDT
164 |
165 | For full symbol support, consider upgrading to a paid tier.
--------------------------------------------------------------------------------
/docs/trading-modes.md:
--------------------------------------------------------------------------------
1 | # 🚦 Trading Modes
2 |
3 | TraderMagic has several configuration options that control how trades are executed and displayed.
4 |
5 | ## 🔄 Paper vs. Live Trading
6 |
7 | TraderMagic can operate in two primary trading environments:
8 |
9 | ### 📝 Paper Trading Mode
10 |
11 | Paper trading uses Alpaca's sandbox environment instead of their live trading API. It makes real API calls but uses simulated money.
12 |
13 | ```
14 | ALPACA_PAPER_TRADING=true # Use Alpaca's paper trading API (recommended)
15 | ALPACA_PAPER_TRADING=false # Use Alpaca's live trading API (real money!)
16 | ```
17 |
18 | ### 📊 Debug Mode
19 |
20 | Debug mode is completely local and doesn't make any API calls at all - even to Alpaca's paper trading environment:
21 |
22 | ```
23 | ALPACA_DEBUG_MODE=true # Simulate trades locally with no API calls
24 | ALPACA_DEBUG_MODE=false # Make actual API calls (to paper or live API based on setting above)
25 | ```
26 |
27 | When debug mode is enabled:
28 | 1. **NO API calls** are made to Alpaca (even if paper trading is enabled)
29 | 2. A prominent purple banner appears at the top of the dashboard saying "DEBUG MODE"
30 | 3. A "Debug Mode (No API Calls)" badge is shown in the system info section
31 | 4. All trades will show an order ID starting with "sim-"
32 |
33 | **⚠️ IMPORTANT**: These two settings are independent. You can have any combination:
34 |
35 | | Paper Trading | Debug Mode | Behavior |
36 | |---------------|------------|----------|
37 | | true | true | No API calls, completely simulated trades |
38 | | true | false | Makes API calls to Alpaca's paper trading environment |
39 | | false | true | No API calls, completely simulated trades |
40 | | false | false | Makes API calls to Alpaca's live trading environment (real money!) |
41 |
42 | For development and testing, we recommend:
43 | ```
44 | ALPACA_PAPER_TRADING=true # Use sandbox environment
45 | ALPACA_DEBUG_MODE=true # No API calls made
46 | ```
47 |
48 | When you're ready to test with actual API calls:
49 | ```
50 | ALPACA_PAPER_TRADING=true # Use sandbox environment
51 | ALPACA_DEBUG_MODE=false # Make real API calls to paper trading
52 | ```
53 |
54 | ## ⚙️ Trading Control
55 |
56 | TraderMagic includes a safety feature that prevents automatic trading until explicitly enabled by the user.
57 |
58 | By default, trading is disabled when the system starts for safety. This means:
59 | - The system will collect RSI data and generate trading signals
60 | - The AI will make decisions on what actions to take
61 | - But no actual trades will be executed until trading is enabled through the UI
62 |
63 | To enable trading:
64 | 1. Click the green "Start Trading" button on the dashboard
65 | 2. The button will turn red and change to "Stop Trading" when active
66 | 3. Click again at any time to immediately disable trading
67 |
68 | This safety feature gives you complete control over when the system can execute trades, allowing you to monitor signal quality before committing to automatic trading. Trading is always disabled when the application is restarted.
69 |
70 | ## 💰 Trade Amount Settings
71 |
72 | TraderMagic supports two modes for determining trade sizes:
73 |
74 | ### 📊 Portfolio Percentage (Default)
75 |
76 | By default, the system trades a percentage of your portfolio value (default: 2%). This means:
77 | - The amount traded scales with your account size
78 | - As your portfolio grows, so do your trade sizes
79 | - Trade amounts automatically adjust based on asset price changes
80 |
81 | To configure percentage-based trading:
82 | ```
83 | TRADE_PERCENTAGE=2.0 # Percentage of portfolio to trade
84 | TRADE_USE_FIXED=false # Use percentage mode
85 | ```
86 |
87 | ### 💵 Fixed Amount Trading
88 |
89 | Alternatively, you can configure the system to use a consistent dollar amount for each trade:
90 | - Each trade will use exactly the specified amount (e.g., always trade $10)
91 | - The system calculates the appropriate quantity based on the current price
92 | - This is useful for consistency in testing and for limiting exposure
93 |
94 | To configure fixed amount trading:
95 | ```
96 | TRADE_FIXED_AMOUNT=10.0 # Fixed amount in USD for each trade
97 | TRADE_USE_FIXED=true # Use fixed amount mode
98 | ```
99 |
100 | You can change between these modes directly in the dashboard without modifying the .env file.
101 |
102 | ## 🕒 Market Hours Visualization
103 |
104 | TraderMagic visualizes different market trading sessions for stocks. The system automatically detects and displays the current market status based on the Eastern Time (ET) zone:
105 |
106 | ### 📉 Market Status Types
107 |
108 | The system tracks four distinct market states:
109 |
110 | | Status | Description | Visual Indicator | Time (ET) |
111 | |--------|-------------|------------------|-----------|
112 | | **Open** | Regular market hours | Circle markers | 9:30 AM - 4:00 PM |
113 | | **Pre-Market** | Early trading session | Triangle markers | 4:00 AM - 9:30 AM |
114 | | **After-Hours** | Extended trading after close | Square markers | 4:00 PM - 8:00 PM |
115 | | **Closed** | Market closed (overnight/weekends) | X markers | 8:00 PM - 4:00 AM / Weekends |
116 |
117 | ### 📊 Chart Visualization
118 |
119 | Price charts automatically display different point styles based on when the candle data was recorded:
120 |
121 | - **Regular Hours**: Standard circular points (smaller size)
122 | - **Pre-Market**: Triangle markers (larger size to highlight early trading)
123 | - **After-Hours**: Rotated square markers
124 | - **Closed Market**: X-shaped markers for data points when markets are closed
125 |
126 | This visual differentiation helps you:
127 | - Identify which trading session influenced a price movement
128 | - Recognize patterns specific to pre-market or after-hours trading
129 | - Understand price data in context of market hours
130 | - Factor market session into your trading decisions
131 |
132 | Cryptocurrency pairs (like BTC/USDT) are always shown with "Open" market status since they trade 24/7.
--------------------------------------------------------------------------------
/docs/troubleshooting.md:
--------------------------------------------------------------------------------
1 | # 🔍 Troubleshooting Guide
2 |
3 | This guide helps you diagnose and fix common issues with TraderMagic.
4 |
5 | ## 🛑 Common Issues
6 |
7 | ### 📊 No Trades Appearing in UI
8 |
9 | **Symptoms**:
10 | - Trade signals are being generated but no trades show up in the dashboard
11 | - You see "No recent trades" in the trade info section
12 |
13 | **Possible Causes & Solutions**:
14 |
15 | 1. **Trading is disabled**
16 | - Look for the "Trading Disabled" badge in the footer
17 | - Click the "Start Trading" button to enable trading
18 |
19 | 2. **Debug mode is affecting behavior**
20 | - Check if `ALPACA_DEBUG_MODE=true` in your `.env` file
21 | - Set to `false` to allow real API calls
22 |
23 | 3. **Fixed amount is too low**
24 | - If using fixed amount mode, ensure the amount is sufficient
25 | - Minimum trade amount is often $1 or higher
26 |
27 | 4. **API credentials issue**
28 | - Check your Alpaca API keys in the `.env` file
29 | - Verify account status on Alpaca's dashboard
30 |
31 | **Diagnostic Steps**:
32 | 1. Visit the debug dashboard at `http://localhost:9753/debug`
33 | 2. Check the current trading settings displayed
34 | 3. Try executing a manual test trade to verify functionality
35 |
36 | ### 🐢 Slow Updates or Timeouts
37 |
38 | **Symptoms**:
39 | - Dashboard shows stale data
40 | - Log shows timeout errors or connection issues
41 |
42 | **Possible Causes & Solutions**:
43 |
44 | 1. **TAAPI.io rate limits**
45 | - Increase the `POLL_INTERVAL` in your `.env` file
46 | - Consider upgrading your TAAPI.io subscription tier
47 |
48 | 2. **Network connectivity issues**
49 | - Check your internet connection
50 | - Verify that API services are reachable from your network
51 |
52 | 3. **Resource constraints**
53 | - Ensure your system has sufficient RAM and CPU
54 | - Consider scaling down to fewer symbols or simpler models
55 |
56 | ### ⚠️ API Rate Limit Errors
57 |
58 | **Symptoms**:
59 | - Logs show 429 errors from TAAPI.io
60 | - No price updates for extended periods
61 |
62 | **Solutions**:
63 | 1. Increase poll interval in `.env`:
64 | ```
65 | POLL_INTERVAL=300 # 5 minutes
66 | ```
67 |
68 | 2. Reduce number of symbols monitored:
69 | ```
70 | SYMBOLS=BTC/USD # Just one symbol
71 | ```
72 |
73 | 3. Verify you're using supported symbols on your tier
74 |
75 | ### 🔌 Redis Connection Issues
76 |
77 | **Symptoms**:
78 | - Web UI not updating
79 | - Errors containing "Redis connection"
80 |
81 | **Solutions**:
82 | 1. Check the Redis logs:
83 | ```bash
84 | docker compose logs redis
85 | ```
86 |
87 | 2. Verify Redis is running:
88 | ```bash
89 | docker compose ps redis
90 | ```
91 |
92 | 3. Try restarting just the Redis container:
93 | ```bash
94 | docker compose restart redis
95 | ```
96 |
97 | ### 🤖 Ollama Model Issues
98 |
99 | **Symptoms**:
100 | - No AI decisions being made
101 | - Errors mentioning model not found
102 |
103 | **Solutions**:
104 | 1. Check if the model is downloaded:
105 | ```bash
106 | docker compose exec ollama ollama list
107 | ```
108 |
109 | 2. Check the Ollama logs:
110 | ```bash
111 | docker compose logs ollama
112 | ```
113 |
114 | 3. Try a different model:
115 | ```
116 | OLLAMA_MODEL=llama3:latest # Change in .env file
117 | ```
118 |
119 | 4. Restart the Ollama service:
120 | ```bash
121 | docker compose restart ollama
122 | ```
123 |
124 | ## 📋 Log Analysis
125 |
126 | When troubleshooting, check the logs for specific services:
127 |
128 | ```bash
129 | # Check all logs together
130 | docker compose logs -f
131 |
132 | # Check specific service logs
133 | docker compose logs -f data_retrieval
134 | docker compose logs -f ai_decision
135 | docker compose logs -f trade_execution
136 | docker compose logs -f frontend
137 | ```
138 |
139 | ## 🔄 Reset Procedure
140 |
141 | If you need a complete reset:
142 |
143 | 1. Stop all services:
144 | ```bash
145 | docker compose down
146 | ```
147 |
148 | 2. Remove Redis volumes:
149 | ```bash
150 | docker volume rm tradermagic_redis_data
151 | ```
152 |
153 | 3. Start fresh:
154 | ```bash
155 | docker compose up -d
156 | ```
157 |
158 | ## 🆘 Getting Help
159 |
160 | If you've tried the troubleshooting steps above and still need help:
161 |
162 | 1. Check the GitHub issues: https://github.com/rawveg/trader-magic/issues
163 | 2. Submit a new issue with:
164 | - Detailed description of your problem
165 | - Relevant logs (with sensitive information removed)
166 | - Your configuration settings (without API keys)
167 | - Steps you've already taken to troubleshoot
--------------------------------------------------------------------------------
/force_clear_eth.sh:
--------------------------------------------------------------------------------
1 | #\!/bin/bash
2 | echo "Clearing ETH/USDT trade data from Redis..."
3 | docker compose exec -T redis redis-cli del 'trade_result:ETH/USDT'
4 | docker compose exec -T redis redis-cli del 'signal:ETH/USDT'
5 | echo "Done. Refresh your browser to see the changes."
6 |
--------------------------------------------------------------------------------
/force_trading_disabled.sh:
--------------------------------------------------------------------------------
1 | #\!/bin/bash
2 | # Force trading to be disabled at startup
3 | redis-cli set trading_enabled false
4 | echo "Forced trading to DISABLED state for safety"
5 |
--------------------------------------------------------------------------------
/frontend/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM python:3.11-slim
2 |
3 | WORKDIR /app/frontend
4 |
5 | # Copy only the frontend requirements first
6 | COPY requirements.txt .
7 | RUN pip install --no-cache-dir -r requirements.txt
8 |
9 | # Copy the frontend code
10 | COPY . .
11 |
12 | # Set PYTHONPATH to include parent directory
13 | ENV PYTHONPATH=/app
14 |
15 | CMD ["python", "app.py"]
--------------------------------------------------------------------------------
/frontend/README.md:
--------------------------------------------------------------------------------
1 | # TraderMagic Web Dashboard
2 |
3 | This is the web dashboard for the TraderMagic system. It provides a real-time interface to monitor trading activities.
4 |
5 | ## Features
6 |
7 | - Real-time updates via WebSocket
8 | - Responsive design that works on desktop and mobile
9 | - Activity history tracking
10 | - Symbol-specific cards with trading details
11 | - Clear visualization of buy/sell/hold decisions
12 |
13 | ## Technologies Used
14 |
15 | - Flask for the web server
16 | - Socket.IO for real-time updates
17 | - Redis for data storage and pub/sub
18 | - Modern CSS with flexbox and grid layouts
19 | - Vanilla JavaScript (no frameworks needed)
20 |
21 | ## Development
22 |
23 | To develop the frontend standalone:
24 |
25 | 1. Create a virtual environment:
26 | ```
27 | python -m venv venv
28 | source venv/bin/activate # On Windows: venv\Scripts\activate
29 | ```
30 |
31 | 2. Install dependencies:
32 | ```
33 | pip install -r requirements.txt
34 | ```
35 |
36 | 3. Run the Flask development server:
37 | ```
38 | python app.py
39 | ```
40 |
41 | 4. Access the dashboard at http://localhost:9753
42 |
43 | ## Docker
44 |
45 | In production, the frontend is containerized and runs as part of the Docker setup described in the main README.
46 |
47 | ## Structure
48 |
49 | - `app.py` - Main Flask application
50 | - `templates/` - HTML templates
51 | - `static/css/` - CSS stylesheets
52 | - `static/js/` - JavaScript files
53 | - `static/img/` - Images and favicon
--------------------------------------------------------------------------------
/frontend/requirements.txt:
--------------------------------------------------------------------------------
1 | flask==2.3.3
2 | python-dotenv==1.0.0
3 | redis==5.0.1
4 | pydantic>=1.9.0,<2.0.0 # Use a version compatible with alpaca-py
5 | loguru==0.7.2
6 | flask-socketio==5.3.6
--------------------------------------------------------------------------------
/frontend/static/css/styles.css:
--------------------------------------------------------------------------------
1 | /* Light theme (default) */
2 | :root {
3 | --primary: #2563eb;
4 | --primary-dark: #1e40af;
5 | --primary-light: #3b82f6;
6 | --secondary: #14b8a6;
7 | --background: #f1f5f9;
8 | --card-bg: #ffffff;
9 | --text: #1e293b;
10 | --text-light: #64748b;
11 | --success: #10b981;
12 | --warning: #f59e0b;
13 | --danger: #ef4444;
14 | --border: #cbd5e1;
15 | --header-height: 60px;
16 | --footer-height: 110px;
17 | --shadow: 0 4px 6px -1px rgba(0, 0, 0, 0.1), 0 2px 4px -1px rgba(0, 0, 0, 0.06);
18 | --radius: 8px;
19 | --modal-overlay: rgba(0, 0, 0, 0.5);
20 | }
21 |
22 | /* Dark theme */
23 | [data-theme="dark"] {
24 | --primary: #3b82f6;
25 | --primary-dark: #2563eb;
26 | --primary-light: #60a5fa;
27 | --secondary: #14b8a6;
28 | --background: #1e293b;
29 | --card-bg: #0f172a;
30 | --text: #e2e8f0;
31 | --text-light: #94a3b8;
32 | --success: #10b981;
33 | --warning: #f59e0b;
34 | --danger: #ef4444;
35 | --border: #334155;
36 | --shadow: 0 4px 6px -1px rgba(0, 0, 0, 0.2), 0 2px 4px -1px rgba(0, 0, 0, 0.1);
37 | --modal-overlay: rgba(0, 0, 0, 0.7);
38 | }
39 |
40 | * {
41 | margin: 0;
42 | padding: 0;
43 | box-sizing: border-box;
44 | }
45 |
46 | html, body {
47 | min-height: 100%;
48 | /* Remove overflow: hidden to allow scrolling */
49 | }
50 |
51 | body {
52 | font-family: 'Inter', sans-serif;
53 | background-color: var(--background);
54 | color: var(--text);
55 | /* Keep flex display but allow content to flow naturally */
56 | display: flex;
57 | flex-direction: column;
58 | }
59 |
60 | header {
61 | background-color: var(--card-bg);
62 | box-shadow: var(--shadow);
63 | height: var(--header-height);
64 | flex-shrink: 0;
65 | z-index: 100;
66 | display: flex;
67 | align-items: center;
68 | }
69 |
70 | .header-container {
71 | max-width: 1280px;
72 | margin: 0 auto;
73 | padding: 0 1rem;
74 | height: 100%;
75 | display: flex;
76 | justify-content: space-between;
77 | align-items: center;
78 | width: 100%;
79 | }
80 |
81 | .logo {
82 | display: flex;
83 | align-items: center;
84 | gap: 0.75rem;
85 | flex-shrink: 0;
86 | }
87 |
88 | .logo i {
89 | font-size: 1.75rem;
90 | color: var(--primary);
91 | }
92 |
93 | .logo h1 {
94 | font-size: 1.5rem;
95 | font-weight: 600;
96 | color: var(--primary);
97 | }
98 |
99 | .header-actions {
100 | display: flex;
101 | align-items: center;
102 | gap: 1rem;
103 | justify-content: flex-end;
104 | flex-shrink: 0;
105 | }
106 |
107 | #docs-btn {
108 | background-color: var(--primary);
109 | color: white;
110 | border: none;
111 | border-radius: var(--radius);
112 | padding: 0.5rem 1rem;
113 | font-size: 0.875rem;
114 | font-weight: 500;
115 | cursor: pointer;
116 | display: flex;
117 | align-items: center;
118 | gap: 0.5rem;
119 | transition: background-color 0.2s;
120 | }
121 |
122 | .status-indicators {
123 | display: flex;
124 | align-items: center;
125 | justify-content: center;
126 | gap: 1.5rem;
127 | flex: 1;
128 | }
129 |
130 | .status-indicator {
131 | display: flex;
132 | align-items: center;
133 | font-size: 1rem;
134 | font-weight: 500;
135 | }
136 |
137 | .status-indicator.connected {
138 | color: var(--success);
139 | }
140 |
141 | .status-indicator.disconnected {
142 | color: var(--danger);
143 | }
144 |
145 | .status-indicator.downloading {
146 | color: var(--warning);
147 | }
148 |
149 | .status-indicator.ready {
150 | color: var(--success);
151 | }
152 |
153 | .status-indicator.error {
154 | color: var(--danger);
155 | }
156 |
157 | .status-indicator.initializing {
158 | color: var(--text-light);
159 | }
160 |
161 | main {
162 | padding: 1rem 1rem;
163 | flex-grow: 1;
164 | /* Remove fixed height calculation to allow content to expand naturally */
165 | min-height: 400px; /* Set a minimum height for the main content */
166 | }
167 |
168 | .dashboard-container {
169 | max-width: 1280px;
170 | margin: 0 auto;
171 | }
172 |
173 | .dashboard-header {
174 | display: flex;
175 | justify-content: space-between;
176 | align-items: center;
177 | margin-bottom: 1.5rem;
178 | }
179 |
180 | .dashboard-header h2 {
181 | font-size: 1.5rem;
182 | font-weight: 600;
183 | }
184 |
185 | .controls {
186 | display: flex;
187 | gap: 0.75rem;
188 | align-items: center;
189 | flex-wrap: wrap;
190 | }
191 |
192 | .trade-mode-controls {
193 | display: flex;
194 | align-items: center;
195 | flex-wrap: wrap;
196 | gap: 10px;
197 | background-color: var(--card-bg);
198 | border-radius: 6px;
199 | padding: 8px 12px;
200 | box-shadow: 0 1px 3px rgba(0, 0, 0, 0.1);
201 | }
202 |
203 | .trade-amount-controls {
204 | display: flex;
205 | align-items: center;
206 | gap: 5px;
207 | }
208 |
209 | .trading-select {
210 | background-color: var(--background);
211 | border: 1px solid var(--border);
212 | border-radius: 4px;
213 | padding: 5px 8px;
214 | font-size: 14px;
215 | color: var(--text);
216 | }
217 |
218 | .trade-input {
219 | background-color: var(--background);
220 | border: 1px solid var(--border);
221 | border-radius: 4px;
222 | padding: 5px 8px;
223 | width: 70px;
224 | font-size: 14px;
225 | color: var(--text);
226 | }
227 |
228 | .btn {
229 | background-color: var(--primary);
230 | color: white;
231 | border: none;
232 | border-radius: var(--radius);
233 | padding: 0.5rem 1rem;
234 | font-size: 0.875rem;
235 | font-weight: 500;
236 | cursor: pointer;
237 | display: flex;
238 | align-items: center;
239 | gap: 0.5rem;
240 | transition: background-color 0.2s;
241 | }
242 |
243 | .btn:hover {
244 | background-color: var(--primary-dark);
245 | }
246 |
247 | .symbols-container {
248 | display: grid;
249 | grid-template-columns: repeat(auto-fill, minmax(250px, 1fr));
250 | gap: 1rem;
251 | margin-bottom: 1.5rem;
252 | }
253 |
254 | /* Account Summary Styles */
255 | .account-summary-container {
256 | background-color: var(--card-bg);
257 | border-radius: var(--radius);
258 | padding: 1.25rem;
259 | box-shadow: var(--shadow);
260 | margin-bottom: 1.5rem;
261 | }
262 |
263 | .account-summary-header {
264 | display: flex;
265 | justify-content: space-between;
266 | align-items: center;
267 | margin-bottom: 1rem;
268 | }
269 |
270 | .account-summary-header h3 {
271 | font-size: 1.25rem;
272 | font-weight: 600;
273 | color: var(--text);
274 | }
275 |
276 | .account-type-badge {
277 | background-color: var(--primary);
278 | color: white;
279 | padding: 0.25rem 0.75rem;
280 | border-radius: 1rem;
281 | font-size: 0.75rem;
282 | font-weight: 500;
283 | }
284 |
285 | .account-type-badge.paper {
286 | background-color: var(--primary);
287 | }
288 |
289 | .account-type-badge.live {
290 | background-color: var(--danger);
291 | }
292 |
293 | .account-metrics {
294 | display: flex;
295 | flex-wrap: wrap;
296 | gap: 1.5rem;
297 | }
298 |
299 | .account-actions {
300 | display: flex;
301 | justify-content: flex-end;
302 | gap: 1rem;
303 | margin-top: 1rem;
304 | padding-top: 1rem;
305 | border-top: 1px solid var(--border);
306 | }
307 |
308 | .warning-message {
309 | background-color: rgba(239, 68, 68, 0.1);
310 | border-radius: var(--radius);
311 | padding: 1.25rem;
312 | margin-bottom: 1.5rem;
313 | }
314 |
315 | .warning-message i {
316 | color: var(--danger);
317 | font-size: 1.5rem;
318 | margin-bottom: 1rem;
319 | }
320 |
321 | .warning-message ul {
322 | margin: 1rem 0;
323 | padding-left: 1.5rem;
324 | }
325 |
326 | .warning-message li {
327 | margin-bottom: 0.5rem;
328 | color: var(--text);
329 | }
330 |
331 | .modal-actions {
332 | display: flex;
333 | justify-content: flex-end;
334 | gap: 1rem;
335 | margin-top: 1.5rem;
336 | }
337 |
338 | .account-metric {
339 | flex: 1;
340 | min-width: 150px;
341 | background-color: var(--background);
342 | border-radius: var(--radius);
343 | padding: 0.75rem 1rem;
344 | }
345 |
346 | /* Color coding for positive and negative values in account metrics */
347 | .account-metric .metric-value.positive {
348 | color: var(--success);
349 | }
350 |
351 | .account-metric .metric-value.negative {
352 | color: var(--danger);
353 | }
354 |
355 | .positions-summary {
356 | background-color: var(--background);
357 | border-radius: var(--radius);
358 | padding: 0.75rem;
359 | }
360 |
361 | .positions-header {
362 | display: flex;
363 | justify-content: space-between;
364 | align-items: center;
365 | padding: 0.5rem;
366 | margin-bottom: 0.5rem;
367 | font-weight: 500;
368 | }
369 |
370 | .positions-table-container {
371 | overflow-x: auto;
372 | }
373 |
374 | .positions-table {
375 | width: 100%;
376 | border-collapse: collapse;
377 | font-size: 0.875rem;
378 | }
379 |
380 | .positions-table th,
381 | .positions-table td {
382 | padding: 0.5rem 0.75rem;
383 | text-align: right;
384 | }
385 |
386 | .positions-table th:first-child,
387 | .positions-table td:first-child {
388 | text-align: left;
389 | }
390 |
391 | .positions-table th {
392 | font-weight: 500;
393 | color: var(--text-light);
394 | }
395 |
396 | .positions-table tr:nth-child(even) {
397 | background-color: rgba(0, 0, 0, 0.03);
398 | }
399 |
400 | [data-theme="dark"] .positions-table tr:nth-child(even) {
401 | background-color: rgba(255, 255, 255, 0.03);
402 | }
403 |
404 | .positions-table .positive {
405 | color: var(--success);
406 | }
407 |
408 | .positions-table .negative {
409 | color: var(--danger);
410 | }
411 |
412 | .no-positions {
413 | text-align: center !important;
414 | color: var(--text-light);
415 | padding: 1rem !important;
416 | }
417 |
418 | /* Position Info in Symbol Cards */
419 | .position-info {
420 | margin-top: 0;
421 | margin-bottom: 0;
422 | padding: 0.5rem;
423 | background-color: rgba(0, 0, 0, 0.03);
424 | border-radius: 0 0 var(--radius) var(--radius);
425 | display: block !important; /* Force display */
426 | border-top: 1px solid rgba(0, 0, 0, 0.05);
427 | }
428 |
429 | [data-theme="dark"] .position-info {
430 | background-color: rgba(255, 255, 255, 0.03);
431 | border-top: 1px solid rgba(255, 255, 255, 0.05);
432 | }
433 |
434 | /* Remove the header styles since we're not using it anymore */
435 |
436 | .position-metrics-vertical {
437 | display: flex;
438 | flex-direction: column;
439 | gap: 0.25rem; /* Reduce gap between rows */
440 | }
441 |
442 | .position-metric-row {
443 | display: flex;
444 | justify-content: space-between;
445 | align-items: center;
446 | }
447 |
448 | .position-metric-row .metric-title {
449 | font-size: 0.75rem; /* Slightly smaller font */
450 | color: var(--text-light);
451 | }
452 |
453 | .position-metric-row .metric-value {
454 | font-size: 0.85rem; /* Slightly smaller font */
455 | font-weight: 500;
456 | }
457 |
458 | .position-metric-row .metric-value.positive {
459 | color: var(--success);
460 | }
461 |
462 | .position-metric-row .metric-value.negative {
463 | color: var(--danger);
464 | }
465 |
466 | /* Symbol Card Styles */
467 | .symbol-card {
468 | background-color: var(--card-bg);
469 | border-radius: var(--radius);
470 | padding: 1rem;
471 | box-shadow: var(--shadow);
472 | transition: transform 0.2s, box-shadow 0.2s;
473 | max-height: 450px;
474 | display: flex;
475 | flex-direction: column;
476 | }
477 |
478 | .symbol-card:hover {
479 | transform: translateY(-2px);
480 | box-shadow: 0 10px 15px -3px rgba(0, 0, 0, 0.1), 0 4px 6px -2px rgba(0, 0, 0, 0.05);
481 | }
482 |
483 | .symbol-header {
484 | display: flex;
485 | justify-content: space-between;
486 | align-items: center;
487 | margin-bottom: 1rem;
488 | }
489 |
490 | .symbol-header h3 {
491 | font-size: 1.125rem;
492 | font-weight: 600;
493 | }
494 |
495 | .last-updated {
496 | font-size: 0.7rem;
497 | color: var(--text-light);
498 | }
499 |
500 | .metrics {
501 | display: grid;
502 | grid-template-columns: repeat(3, 1fr);
503 | gap: 0.875rem;
504 | margin-bottom: 1.25rem;
505 | }
506 |
507 | .metric {
508 | text-align: center;
509 | }
510 |
511 | .metric-title {
512 | font-size: 0.7rem;
513 | color: var(--text-light);
514 | margin-bottom: 0.4rem;
515 | }
516 |
517 | .metric-value {
518 | font-size: 1.125rem;
519 | font-weight: 600;
520 | }
521 |
522 | .metric-value.decision {
523 | padding: 0.2rem 0.45rem;
524 | border-radius: var(--radius);
525 | font-size: 0.9rem;
526 | }
527 |
528 | .decision.buy {
529 | background-color: #dcfce7;
530 | color: #166534;
531 | }
532 |
533 | .decision.sell {
534 | background-color: #fee2e2;
535 | color: #991b1b;
536 | }
537 |
538 | .decision.hold {
539 | background-color: #fef9c3;
540 | color: #854d0e;
541 | }
542 |
543 | .price-chart-container {
544 | height: 130px;
545 | margin: 0.75rem 0;
546 | padding: 0.25rem;
547 | background-color: rgba(0, 0, 0, 0.03);
548 | border-radius: var(--radius);
549 | border: 1px solid var(--border);
550 | flex-shrink: 0;
551 | }
552 |
553 | .price-chart {
554 | width: 100%;
555 | height: 100%;
556 | min-height: 120px; /* Ensure canvas has minimum height */
557 | display: block;
558 | }
559 |
560 | .trade-info {
561 | padding-top: 0.6rem;
562 | border-top: 1px solid var(--border);
563 | margin-top: auto;
564 | }
565 |
566 | .trade-details {
567 | font-size: 0.875rem;
568 | color: var(--text-light);
569 | padding: 0.5rem;
570 | border-radius: var(--radius);
571 | }
572 |
573 | .trade-executed {
574 | background-color: #dcfce7;
575 | color: #166534;
576 | }
577 |
578 | /* Special case for SELL transactions to match decision indicator colors */
579 | .trade-executed.sell-transaction {
580 | background-color: #fee2e2;
581 | color: #991b1b;
582 | }
583 |
584 | .trade-failed {
585 | background-color: #fee2e2;
586 | color: #991b1b;
587 | }
588 |
589 | .trade-skipped {
590 | background-color: #fef9c3;
591 | color: #854d0e;
592 | }
593 |
594 | /* Test buttons - removed as requested */
595 |
596 | .history-section {
597 | background-color: var(--card-bg);
598 | border-radius: var(--radius);
599 | padding: 1.25rem;
600 | box-shadow: var(--shadow);
601 | margin-bottom: 1.25rem;
602 | }
603 |
604 | .history-section h3 {
605 | margin-bottom: 0.75rem;
606 | font-size: 1.125rem;
607 | font-weight: 600;
608 | }
609 |
610 | .activity-log {
611 | max-height: 300px; /* Increased height for more visible entries */
612 | overflow-y: auto;
613 | font-size: 0.8rem;
614 | /* Add padding for scrollbar to ensure content doesn't touch scrollbar */
615 | padding-right: 4px;
616 | }
617 |
618 | .activity-item {
619 | padding: 0.6rem 0;
620 | border-bottom: 1px solid var(--border);
621 | display: flex;
622 | align-items: center;
623 | gap: 0.5rem;
624 | /* Add padding to ensure spacing when scrollbar appears */
625 | padding-right: 8px;
626 | flex-wrap: nowrap;
627 | }
628 |
629 | .activity-item:last-child {
630 | border-bottom: none;
631 | }
632 |
633 | .activity-item.buy i {
634 | color: var(--success);
635 | flex-shrink: 0;
636 | }
637 |
638 | .activity-item.sell i {
639 | color: var(--danger);
640 | flex-shrink: 0;
641 | }
642 |
643 | .activity-item.hold i {
644 | color: var(--warning);
645 | flex-shrink: 0;
646 | }
647 |
648 | .activity-details {
649 | /* Allow details to shrink if needed */
650 | flex: 1;
651 | min-width: 0;
652 | white-space: nowrap;
653 | overflow: hidden;
654 | text-overflow: ellipsis;
655 | }
656 |
657 | .activity-timestamp {
658 | font-size: 0.75rem;
659 | color: var(--text-light);
660 | margin-left: auto;
661 | flex-shrink: 0; /* Prevent timestamp from shrinking */
662 | min-width: 120px; /* Ensure space for timestamp */
663 | text-align: right;
664 | }
665 |
666 | .empty-log {
667 | color: var(--text-light);
668 | font-style: italic;
669 | text-align: center;
670 | padding: 2rem 0;
671 | }
672 |
673 | .help-tip {
674 | margin-top: 1rem;
675 | padding: 0.75rem;
676 | background-color: rgba(59, 130, 246, 0.1);
677 | border-radius: var(--radius);
678 | color: var(--text);
679 | font-size: 0.9rem;
680 | border-left: 3px solid var(--primary);
681 | }
682 |
683 | .trading-status-tip {
684 | margin: 0 0 1.5rem 0;
685 | background-color: rgba(59, 130, 246, 0.15);
686 | border-left: 3px solid var(--primary);
687 | font-weight: 500;
688 | border-radius: var(--radius);
689 | padding: 0.9rem 1rem;
690 | box-shadow: var(--shadow);
691 | transition: background-color 0.5s, border-left-color 0.5s;
692 | }
693 |
694 | /* Trading enabled/disabled status classes */
695 | .trading-enabled {
696 | background-color: rgba(16, 185, 129, 0.15);
697 | border-left-color: #10b981;
698 | }
699 |
700 | .trading-enabled strong {
701 | color: #10b981;
702 | }
703 |
704 | .trading-disabled {
705 | background-color: rgba(239, 68, 68, 0.15);
706 | border-left-color: #ef4444;
707 | }
708 |
709 | .trading-disabled strong {
710 | color: #ef4444;
711 | }
712 |
713 | /* Dark mode adjustments */
714 | [data-theme="dark"] .trading-enabled strong {
715 | color: #34d399;
716 | }
717 |
718 | [data-theme="dark"] .trading-disabled strong {
719 | color: #f87171;
720 | }
721 |
722 | footer {
723 | background-color: var(--card-bg);
724 | /* Keep height but remove positioning constraints */
725 | min-height: var(--footer-height);
726 | display: flex;
727 | align-items: center;
728 | justify-content: center;
729 | box-shadow: 0 -1px 3px rgba(0, 0, 0, 0.05);
730 | width: 100%;
731 | z-index: 10;
732 | margin-top: 20px; /* Add space between content and footer */
733 | padding: 20px 0;
734 | }
735 |
736 | .footer-container {
737 | max-width: 1280px;
738 | margin: 0 auto;
739 | padding: 0 1rem;
740 | font-size: 0.8rem;
741 | color: var(--text-light);
742 | width: 100%;
743 | display: flex;
744 | flex-direction: column;
745 | align-items: center;
746 | justify-content: center;
747 | gap: 12px;
748 | height: 100%;
749 | }
750 |
751 | .footer-links {
752 | display: flex;
753 | gap: 1.25rem;
754 | justify-content: center;
755 | margin: 6px 0 8px 0;
756 | }
757 |
758 | .footer-link, .license-link {
759 | color: var(--primary);
760 | text-decoration: none;
761 | display: flex;
762 | align-items: center;
763 | gap: 0.25rem;
764 | transition: color 0.2s;
765 | font-weight: 500;
766 | font-size: 0.75rem;
767 | }
768 |
769 | .footer-link:hover, .license-link:hover {
770 | color: var(--primary-dark);
771 | }
772 |
773 | .license-link {
774 | display: inline;
775 | }
776 |
777 | .footer-link i {
778 | font-size: 0.9rem;
779 | }
780 |
781 | .system-info {
782 | display: flex;
783 | gap: 0.75rem;
784 | justify-content: center;
785 | flex-wrap: wrap;
786 | margin-bottom: 15px;
787 | }
788 |
789 | .badge {
790 | display: inline-block;
791 | padding: 0.15rem 0.4rem;
792 | border-radius: 9999px;
793 | font-size: 0.7rem;
794 | font-weight: 500;
795 | }
796 |
797 | .badge-blue {
798 | background-color: #dbeafe;
799 | color: #1e40af;
800 | }
801 |
802 | .badge-red {
803 | background-color: #fee2e2;
804 | color: #991b1b;
805 | }
806 |
807 | .badge-yellow {
808 | background-color: #fef9c3;
809 | color: #854d0e;
810 | }
811 |
812 | .badge-gray {
813 | background-color: #f3f4f6;
814 | color: #4b5563;
815 | }
816 |
817 | .badge-purple {
818 | background-color: #e9d5ff;
819 | color: #6b21a8;
820 | }
821 |
822 | .badge-green {
823 | background-color: #dcfce7;
824 | color: #166534;
825 | }
826 |
827 | .trading-toggle-container {
828 | margin-right: 1rem;
829 | }
830 |
831 | .btn-large {
832 | padding: 0.75rem 1.25rem;
833 | font-size: 1.1rem;
834 | font-weight: 600;
835 | }
836 |
837 | .btn-success {
838 | background-color: #4CAF50;
839 | color: white;
840 | }
841 |
842 | .btn-danger {
843 | background-color: #ef4444;
844 | color: white;
845 | }
846 |
847 | /* Animation for toasts */
848 | @keyframes fadeIn {
849 | from { opacity: 0; transform: translateY(-20px); }
850 | to { opacity: 1; transform: translateY(0); }
851 | }
852 |
853 | @keyframes fadeOut {
854 | from { opacity: 1; transform: translateY(0); }
855 | to { opacity: 0; transform: translateY(-20px); }
856 | }
857 |
858 | .simulation-banner {
859 | background-color: #8b5cf6;
860 | color: white;
861 | text-align: center;
862 | padding: 8px 0;
863 | font-weight: bold;
864 | position: fixed;
865 | top: 0;
866 | left: 0;
867 | width: 100%;
868 | z-index: 1000;
869 | box-shadow: 0 2px 4px rgba(0, 0, 0, 0.1);
870 | font-size: 1.1rem;
871 | text-shadow: 0px 1px 2px rgba(0, 0, 0, 0.2);
872 | animation: pulse-attention 2s infinite;
873 | }
874 |
875 | @keyframes pulse-attention {
876 | 0% { background-color: #8b5cf6; }
877 | 50% { background-color: #7c3aed; }
878 | 100% { background-color: #8b5cf6; }
879 | }
880 |
881 | @media (max-width: 768px) {
882 | .metrics {
883 | grid-template-columns: 1fr 1fr;
884 | }
885 |
886 | .metric:last-child {
887 | grid-column: span 2;
888 | }
889 | }
890 |
891 | /* Documentation modal */
892 | .modal {
893 | display: none; /* Hidden by default */
894 | position: fixed;
895 | z-index: 1000;
896 | left: 0;
897 | top: 0;
898 | width: 100%;
899 | height: 100%;
900 | background-color: var(--modal-overlay);
901 | overflow: auto;
902 | }
903 |
904 | /* When modal is shown */
905 | .modal.show {
906 | display: flex;
907 | align-items: flex-start;
908 | justify-content: center;
909 | }
910 |
911 | /* Theme Selector Styles */
912 | .theme-selector {
913 | position: relative;
914 | display: inline-block;
915 | }
916 |
917 | .theme-button {
918 | background-color: transparent;
919 | color: var(--text);
920 | border: 1px solid var(--border);
921 | border-radius: var(--radius);
922 | padding: 0.35rem 0.7rem;
923 | cursor: pointer;
924 | display: flex;
925 | align-items: center;
926 | gap: 0.5rem;
927 | font-size: 0.875rem;
928 | }
929 |
930 | .theme-button:hover {
931 | background-color: rgba(203, 213, 225, 0.1);
932 | }
933 |
934 | .theme-options {
935 | display: none;
936 | position: absolute;
937 | right: 0;
938 | top: 100%;
939 | margin-top: 0.5rem;
940 | background-color: var(--card-bg);
941 | border: 1px solid var(--border);
942 | border-radius: var(--radius);
943 | box-shadow: var(--shadow);
944 | z-index: 10;
945 | min-width: 120px;
946 | }
947 |
948 | .theme-options.show {
949 | display: block;
950 | }
951 |
952 | .theme-option {
953 | padding: 0.5rem 1rem;
954 | cursor: pointer;
955 | color: var(--text);
956 | font-size: 0.875rem;
957 | display: flex;
958 | align-items: center;
959 | gap: 0.5rem;
960 | transition: background-color 0.2s;
961 | }
962 |
963 | .theme-option:hover {
964 | background-color: rgba(203, 213, 225, 0.1);
965 | }
966 |
967 | /* Ensure the Dark theme option is visible in both light and dark modes */
968 | .theme-option[data-theme="dark"] {
969 | color: #1e293b;
970 | }
971 |
972 | /* Make dark theme option text visible in dark mode */
973 | [data-theme="dark"] .theme-option[data-theme="dark"] {
974 | color: #ffffff;
975 | }
976 |
977 | .theme-option.active {
978 | background-color: rgba(59, 130, 246, 0.1);
979 | color: var(--primary);
980 | font-weight: 500;
981 | }
982 |
983 | .modal-content {
984 | background-color: var(--card-bg);
985 | margin: 40px auto;
986 | padding: 24px;
987 | border-radius: var(--radius);
988 | box-shadow: var(--shadow);
989 | max-width: 800px;
990 | width: 90%;
991 | max-height: 80vh;
992 | overflow-y: auto;
993 | position: relative;
994 | height: fit-content; /* Adjust height to fit content */
995 | }
996 |
997 | /* Liquidation modal specific styles */
998 | #liquidation-modal .modal-content {
999 | max-width: 400px;
1000 | padding: 12px;
1001 | margin: 15% auto; /* Center vertically at 15% from top instead of fixed 40px */
1002 | position: relative;
1003 | height: auto; /* Allow height to adjust to content */
1004 | display: flex;
1005 | flex-direction: column;
1006 | min-height: 0; /* Prevent extra space */
1007 | }
1008 |
1009 | #liquidation-modal .modal-body {
1010 | padding-bottom: 0; /* Remove padding at the bottom */
1011 | }
1012 |
1013 | #liquidation-modal .modal-header {
1014 | margin-bottom: 0.75rem;
1015 | padding-bottom: 0.5rem;
1016 | }
1017 |
1018 | #liquidation-modal .modal-header h2 {
1019 | font-size: 1.25rem;
1020 | }
1021 |
1022 | #liquidation-modal .warning-message {
1023 | background-color: rgba(220, 38, 38, 0.1);
1024 | border-left: 3px solid var(--danger);
1025 | padding: 0.75rem;
1026 | border-radius: var(--radius);
1027 | margin-bottom: 0.75rem;
1028 | }
1029 |
1030 | #liquidation-modal .warning-row {
1031 | display: flex;
1032 | align-items: flex-start;
1033 | }
1034 |
1035 | #liquidation-modal .warning-icon {
1036 | flex-shrink: 0;
1037 | margin-right: 0.5rem;
1038 | padding-top: 2px;
1039 | }
1040 |
1041 | #liquidation-modal .warning-content {
1042 | flex-grow: 1;
1043 | }
1044 |
1045 | #liquidation-modal .warning-title {
1046 | margin-bottom: 0.5rem;
1047 | line-height: 1.2;
1048 | }
1049 |
1050 | #liquidation-modal .warning-message i {
1051 | color: var(--danger);
1052 | font-size: 1.1rem;
1053 | }
1054 |
1055 |
1056 |
1057 | #liquidation-modal .warning-message ul {
1058 | margin: 0.5rem 0;
1059 | padding-left: 1.5rem;
1060 | }
1061 |
1062 | #liquidation-modal .warning-message li {
1063 | margin-bottom: 0.25rem;
1064 | }
1065 |
1066 | #liquidation-modal .modal-actions {
1067 | display: flex;
1068 | justify-content: flex-end;
1069 | gap: 0.5rem;
1070 | margin-top: 0.75rem;
1071 | margin-bottom: 0; /* Ensure no extra space at bottom */
1072 | }
1073 |
1074 | .modal-header {
1075 | display: flex;
1076 | justify-content: space-between;
1077 | align-items: center;
1078 | margin-bottom: 1.5rem;
1079 | padding-bottom: 1rem;
1080 | border-bottom: 1px solid var(--border);
1081 | }
1082 |
1083 | .modal-header h2 {
1084 | font-size: 1.5rem;
1085 | font-weight: 600;
1086 | color: var(--primary);
1087 | }
1088 |
1089 | .close-modal {
1090 | background: none;
1091 | border: none;
1092 | font-size: 1.5rem;
1093 | cursor: pointer;
1094 | color: var(--text-light);
1095 | }
1096 |
1097 | .close-modal:hover {
1098 | color: var(--text);
1099 | }
1100 |
1101 | .modal-body h3 {
1102 | font-size: 1.2rem;
1103 | font-weight: 600;
1104 | margin: 1.5rem 0 1rem;
1105 | color: var(--primary);
1106 | }
1107 |
1108 | .modal-body h3:first-child {
1109 | margin-top: 0;
1110 | }
1111 |
1112 | .modal-body p {
1113 | margin-bottom: 1rem;
1114 | line-height: 1.6;
1115 | }
1116 |
1117 | .modal-body ul, .modal-body ol {
1118 | margin-bottom: 1rem;
1119 | padding-left: 1.5rem;
1120 | }
1121 |
1122 | .modal-body li {
1123 | margin-bottom: 0.5rem;
1124 | line-height: 1.6;
1125 | }
1126 |
1127 | .modal-body code {
1128 | background-color: rgba(203, 213, 225, 0.2);
1129 | padding: 0.2rem 0.4rem;
1130 | border-radius: 0.25rem;
1131 | font-family: monospace;
1132 | font-size: 0.9em;
1133 | color: var(--text);
1134 | border: 1px solid var(--border);
1135 | }
1136 |
1137 | .modal-body .note {
1138 | background-color: rgba(245, 158, 11, 0.1);
1139 | border-left: 3px solid var(--warning);
1140 | padding: 0.75rem;
1141 | border-radius: var(--radius);
1142 | margin: 1rem 0;
1143 | font-size: 0.9em;
1144 | }
1145 |
1146 | .service-accounts {
1147 | display: grid;
1148 | grid-template-columns: repeat(auto-fill, minmax(300px, 1fr));
1149 | gap: 1rem;
1150 | margin: 1rem 0;
1151 | }
1152 |
1153 | .service-account {
1154 | background-color: rgba(59, 130, 246, 0.05);
1155 | border: 1px solid var(--border);
1156 | border-radius: var(--radius);
1157 | padding: 1rem;
1158 | transition: transform 0.2s, box-shadow 0.2s;
1159 | }
1160 |
1161 | .service-account:hover {
1162 | transform: translateY(-2px);
1163 | box-shadow: 0 4px 6px -1px rgba(0, 0, 0, 0.1);
1164 | }
1165 |
1166 | .service-account h4 {
1167 | margin-top: 0;
1168 | font-size: 1.1rem;
1169 | margin-bottom: 0.75rem;
1170 | color: var(--primary);
1171 | }
1172 |
1173 | .service-account p {
1174 | font-size: 0.9rem;
1175 | margin-bottom: 1rem;
1176 | }
1177 |
1178 | .account-link {
1179 | display: inline-block;
1180 | background-color: var(--primary);
1181 | color: white;
1182 | padding: 0.4rem 0.75rem;
1183 | border-radius: var(--radius);
1184 | text-decoration: none;
1185 | font-size: 0.85rem;
1186 | font-weight: 500;
1187 | transition: background-color 0.2s;
1188 | }
1189 |
1190 | .account-link:hover {
1191 | background-color: var(--primary-dark);
1192 | }
1193 |
1194 | .modal-body .code-command {
1195 | background-color: var(--card-bg);
1196 | color: var(--primary);
1197 | border: 1px solid var(--border);
1198 | padding: 0.3rem 0.5rem;
1199 | border-radius: 0.25rem;
1200 | font-family: monospace;
1201 | font-weight: 500;
1202 | font-size: 0.9em;
1203 | }
1204 |
1205 | .modal-body table {
1206 | width: 100%;
1207 | border-collapse: collapse;
1208 | margin-bottom: 1.5rem;
1209 | }
1210 |
1211 | .modal-body th, .modal-body td {
1212 | border: 1px solid var(--border);
1213 | padding: 0.75rem;
1214 | text-align: left;
1215 | }
1216 |
1217 | .modal-body th {
1218 | background-color: rgba(203, 213, 225, 0.1);
1219 | font-weight: 600;
1220 | }
1221 |
1222 | .modal-body .btn {
1223 | display: inline-block;
1224 | margin-top: 1rem;
1225 | }
1226 |
1227 | @media (max-width: 768px) {
1228 | .header-container {
1229 | flex-direction: column;
1230 | gap: 0.75rem;
1231 | padding: 0.75rem 1rem;
1232 | justify-content: center;
1233 | }
1234 |
1235 | header {
1236 | height: auto;
1237 | min-height: var(--header-height);
1238 | padding: 8px 0;
1239 | }
1240 |
1241 | .dashboard-header {
1242 | flex-direction: column;
1243 | align-items: flex-start;
1244 | gap: 1rem;
1245 | }
1246 |
1247 | .symbols-container {
1248 | grid-template-columns: 1fr;
1249 | }
1250 |
1251 | .modal-content {
1252 | width: 95%;
1253 | margin: 20px auto;
1254 | padding: 16px;
1255 | }
1256 | }
--------------------------------------------------------------------------------
/frontend/static/img/dashboard-screenshot.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/rawveg/trader-magic/6390b1030c3ac50634d1f72ffadfccb0e862127a/frontend/static/img/dashboard-screenshot.png
--------------------------------------------------------------------------------
/frontend/static/img/favicon.ico:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/rawveg/trader-magic/6390b1030c3ac50634d1f72ffadfccb0e862127a/frontend/static/img/favicon.ico
--------------------------------------------------------------------------------
/frontend/templates/index.html:
--------------------------------------------------------------------------------
1 |
21 |
22 |
23 |
24 |
25 |
26 |
27 | TraderMagic Dashboard
28 |
29 |
41 |
42 |
43 |
44 |
45 |
46 |
47 | {% if debug_mode %}
48 |
49 | DEBUG MODE - All trades are simulated locally with NO API CALLS (even in paper trading mode)
50 |
51 | {% endif %}
52 |
53 |
95 |
96 |
97 |
98 |
137 |
138 |
139 |
143 |
144 |
145 |
Portfolio Value
146 |
$0.00
147 |
148 |
149 |
Cash Balance
150 |
$0.00
151 |
152 |
153 |
Buying Power
154 |
$0.00
155 |
156 |
157 |
Daily Change
158 |
$0.00
159 |
160 |
161 |
162 |
163 | Liquidate All Positions
164 |
165 |
166 |
167 |
168 |
169 | {% for symbol in symbols %}
170 |
171 |
175 |
176 |
177 |
RSI Value
178 |
--
179 |
180 |
181 |
Decision
182 |
--
183 |
184 |
185 |
Status
186 |
--
187 |
188 |
189 |
190 |
191 |
192 |
198 |
199 |
200 |
No recent trades
201 |
202 |
203 |
204 |
205 |
Quantity:
206 |
0
207 |
208 |
209 |
Value:
210 |
$0.00
211 |
212 |
213 |
P/L:
214 |
$0.00
215 |
216 |
217 |
218 |
219 | {% endfor %}
220 |
221 |
222 |
223 |
Recent Activity
224 |
225 |
No recent activity
226 |
227 |
228 |
229 |
230 |
231 |
232 |
233 |
234 |
238 |
239 |
240 |
241 |
242 |
243 |
244 |
245 |
Warning: This action will:
246 |
247 | Immediately sell all open positions
248 | Stop all active trading processes
249 | This action cannot be undone
250 |
251 |
252 |
253 |
254 |
Are you sure you want to proceed?
255 |
256 | Cancel
257 | Yes, Liquidate All
258 |
259 |
260 |
261 |
262 |
263 |
305 |
306 |
307 |
308 |
316 |
317 |
318 |
319 |
320 |
324 |
325 |
Overview
326 |
TraderMagic is an AI-powered automated trading system that uses RSI technical indicators to make trading decisions. The system consists of four main components:
327 |
328 | Data Retrieval Service: Fetches RSI data from TAAPI.io
329 | AI Decision Engine: Uses Ollama LLM to analyze RSI data and decide whether to buy, sell, or hold
330 | Trade Execution Service: Connects to Alpaca for executing trades based on AI decisions
331 | Web Dashboard: This interface for monitoring trading activity in real-time
332 |
333 |
334 |
Required Accounts
335 |
To use TraderMagic, you'll need accounts with these services:
336 |
337 |
338 |
339 |
Provides technical analysis data (RSI values). Their free tier allows limited usage, but paid tiers offer more frequent updates and additional symbols.
340 |
Sign up for TAAPI
341 |
342 |
343 |
344 |
Executes trades based on system signals. Offers paper trading (simulated) for testing without using real money.
345 |
Sign up for Alpaca
346 |
347 |
348 |
Note: After creating accounts, you'll need to add your API keys to the .env
file to connect the system to these services.
349 |
350 |
Dashboard Updates
351 |
This dashboard updates in two ways:
352 |
353 | Automatic updates: The dashboard refreshes every 15 seconds
354 | Manual refresh: Use the "Refresh" button for immediate updates
355 |
356 |
Both update methods only retrieve data from Redis cache, not directly from external APIs. This ensures no additional load on rate-limited services.
357 |
358 |
Account Summary
359 |
The account summary section provides key financial metrics:
360 |
361 | Portfolio Value: Total value of your account
362 | Cash Balance: Available cash in your account
363 | Buying Power: Available funds for trading
364 | Daily Change: Today's portfolio change, color-coded green for positive and red for negative changes
365 |
366 |
367 |
Theme Options
368 |
The dashboard supports three theme options, accessible via the theme selector in the top-right corner:
369 |
370 | Auto: Automatically matches your system's theme preference
371 | Light: Classic light theme for daytime use
372 | Dark: Reduced eye strain for nighttime use
373 |
374 |
375 |
TAAPI.io Rate Limits
376 |
The system is designed to respect TAAPI.io's rate limits based on your subscription tier:
377 |
378 |
379 |
380 | Tier
381 | Rate Limit
382 | Recommended Poll Interval
383 |
384 |
385 |
386 |
387 | Free
388 | 1 request / 15 seconds
389 | 300 seconds (5 minutes)
390 |
391 |
392 | Basic
393 | 5 requests / 15 seconds
394 | 60 seconds (1 minute)
395 |
396 |
397 | Pro
398 | 30 requests / 15 seconds
399 | 10 seconds
400 |
401 |
402 | Expert
403 | 75 requests / 15 seconds
404 | 5 seconds
405 |
406 |
407 |
408 |
During initial startup, you may see 429 rate limit errors in the logs - this is expected while the services initialize and space out their requests.
409 |
410 |
Trading Information
411 |
The system trades a small percentage of your portfolio (default: 2%) and includes safeguards:
412 |
413 | Balance checking ensures trades are only executed when sufficient funds are available
414 | Minimum order size enforcement prevents very small trades
415 | Paper trading mode is enabled for safe testing (indicated by the blue "Paper Trading" badge)
416 |
417 |
418 |
Symbol Formats
419 |
The system supports two types of symbols:
420 |
421 | Cryptocurrencies: Use format like "BTC/USDT", "ETH/USDT", etc.
422 | Stocks: Use simple ticker symbols like "AAPL", "TSLA", etc. - these will automatically be converted to the right format
423 |
424 |
Note: Free TAAPI accounts are limited to specific symbols only. If you're using the free tier, you may need to upgrade for full symbol support.
425 |
426 |
Debug Mode
427 |
When running in debug mode (indicated by a yellow banner at the top), you can:
428 |
429 | Test Trading: Execute simulated trades directly from the dashboard
430 | Safe Testing: All trades are simulated locally with no API calls
431 | Instant Feedback: Get immediate responses about trade decisions
432 |
433 |
Debug mode is perfect for testing trading strategies and UI functionality without affecting your account.
434 |
435 |
Troubleshooting
436 |
If you encounter issues:
437 |
438 | Check Docker logs: docker compose logs -f
439 | Ensure your API keys are correctly configured in the .env file
440 | Verify that all services are running: docker compose ps
441 | Restart the system using the restart script: ./restart.sh
442 |
443 |
For more detailed information, refer to the README.md file in the project repository.
444 |
445 |
446 |
447 |
448 |
449 |
450 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | requests==2.31.0
2 | alpaca-py==0.8.2
3 | python-dotenv==1.0.0
4 | redis==5.0.1
5 | pydantic>=1.9.0,<2.0.0 # Use a version compatible with alpaca-py
6 | loguru==0.7.2
7 | httpx==0.25.2
8 | tenacity==8.2.3
9 | pytz==2025.1
--------------------------------------------------------------------------------
/restart.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # restart.sh - Utility script to restart all TraderMagic services
3 | #
4 | # This script:
5 | # 1. Stops all containers
6 | # 2. Rebuilds frontend and redis first (dependencies)
7 | # 3. Waits for redis to be fully ready
8 | # 4. Starts all remaining services
9 | #
10 | # Usage: ./restart.sh
11 | # Author: Claude AI
12 |
13 | echo "=================================================="
14 | echo " TraderMagic - Service Restart Utility "
15 | echo "=================================================="
16 | echo "Stopping and rebuilding TraderMagic containers..."
17 | docker compose down
18 |
19 | echo "Building and starting core dependencies first..."
20 | docker compose up -d --build frontend redis
21 |
22 | echo "Waiting for Redis to initialize (3 seconds)..."
23 | sleep 3
24 |
25 | echo "Forcing trading to disabled state for safety..."
26 | docker compose exec redis redis-cli set trading_enabled false
27 |
28 | echo "Starting remaining trading services..."
29 | docker compose up -d
30 |
31 | echo "=================================================="
32 | echo "All services are now running!"
33 | echo "Web UI is available at: http://localhost:9753"
34 | echo ""
35 | echo "Use 'docker compose logs -f' to follow the logs"
36 | echo "=================================================="
--------------------------------------------------------------------------------
/run_tests.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Create virtual environment if it doesn't exist
4 | if [ ! -d "venv" ]; then
5 | echo "Creating virtual environment..."
6 | python -m venv venv
7 | fi
8 |
9 | # Activate virtual environment
10 | source venv/bin/activate
11 |
12 | # Install requirements
13 | echo "Installing requirements..."
14 | pip install -r requirements.txt
15 |
16 | # Run tests
17 | echo "Running tests..."
18 | python -m unittest discover -s tests
19 |
20 | # Deactivate virtual environment
21 | deactivate
--------------------------------------------------------------------------------
/scripts/init-ollama.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | # Script to wait for Ollama server and pull the model
3 |
4 | # Default to llama3 if not set
5 | MODEL="${OLLAMA_MODEL:-llama3.2:latest}"
6 | echo "Will pull model: $MODEL"
7 |
8 | # Wait for Ollama to be ready
9 | echo "Waiting for Ollama server to be ready..."
10 | for i in $(seq 1 30); do
11 | if curl -s http://ollama:11434/api/version > /dev/null 2>&1; then
12 | echo "Ollama server is ready!"
13 | echo "Pulling model: $MODEL"
14 | curl -X POST http://ollama:11434/api/pull -d "{\"name\":\"$MODEL\"}"
15 | echo "Model pull initiated. This may take some time to complete."
16 | exit 0
17 | fi
18 | echo "Waiting for Ollama server (attempt $i/30)..."
19 | sleep 5
20 | done
21 |
22 | # If we get here, we timed out
23 | echo "Timed out waiting for Ollama server"
24 | exit 1
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | from setuptools import setup, find_packages
2 |
3 | # Centralize version number
4 | VERSION = "1.0.0"
5 |
6 | setup(
7 | name="trader-magic",
8 | version=VERSION,
9 | packages=find_packages(),
10 | install_requires=[
11 | # Requirements are already in requirements.txt
12 | ],
13 | )
--------------------------------------------------------------------------------
/src/__init__.py:
--------------------------------------------------------------------------------
1 | # Main package initialization
--------------------------------------------------------------------------------
/src/ai_decision/__init__.py:
--------------------------------------------------------------------------------
1 | from src.config import config
2 |
3 | # Use the real client
4 | from .ollama_client import ollama_client
5 | from .service import ai_decision_service
6 |
7 | __all__ = ["ollama_client", "ai_decision_service"]
--------------------------------------------------------------------------------
/src/ai_decision/ollama_client.py:
--------------------------------------------------------------------------------
1 | import json
2 | import httpx
3 | import time
4 | import threading
5 | from typing import Dict, Any, Optional
6 | from tenacity import retry, stop_after_attempt, wait_exponential, retry_if_exception_type
7 |
8 | from src.config import config
9 | from src.utils import get_logger
10 | from src.utils.redis_client import redis_client
11 |
12 | logger = get_logger("ollama_client")
13 |
14 | class OllamaClient:
15 | def __init__(self):
16 | self.host = config.ollama.host
17 | self.model = config.ollama.model
18 | self.api_url = f"{self.host}/api/generate"
19 | self.model_ready = False
20 |
21 | # Set initial status in Redis
22 | self._update_model_status("initializing", "Connecting to Ollama server...")
23 |
24 | # Start a background thread to check connection and model status
25 | threading.Thread(target=self._monitor_model_status, daemon=True).start()
26 |
27 | def _update_model_status(self, status: str, message: str):
28 | """Update the Ollama model status in Redis"""
29 | status_data = {
30 | "status": status, # 'initializing', 'downloading', 'ready', 'error'
31 | "message": message,
32 | "timestamp": time.time(),
33 | "model": self.model
34 | }
35 | redis_client.set_json("ollama:status", status_data)
36 | logger.info(f"Ollama status: {status} - {message}")
37 |
38 | def _monitor_model_status(self):
39 | """Background thread to monitor Ollama server and model status"""
40 | retry_count = 0
41 | max_retries = 30 # Try for 5 minutes (10 second intervals)
42 |
43 | while retry_count < max_retries:
44 | try:
45 | # First check if server is available
46 | response = httpx.get(f"{self.host}/api/tags", timeout=5.0)
47 | response.raise_for_status()
48 |
49 | # Check if our model is available
50 | available_models = [model["name"] for model in response.json().get("models", [])]
51 |
52 | if not available_models:
53 | self._update_model_status("downloading", "Waiting for Ollama models to load...")
54 | elif self.model not in available_models:
55 | self._update_model_status("downloading", f"Model {self.model} is downloading...")
56 | # Try to initiate pull if not already in progress
57 | try:
58 | self._pull_model(background=True)
59 | except:
60 | pass # Ignore errors, might already be pulling
61 | else:
62 | # Model is ready
63 | self._update_model_status("ready", f"Model {self.model} is ready")
64 | self.model_ready = True
65 | return
66 |
67 | except Exception as e:
68 | self._update_model_status("error", f"Waiting for Ollama server to be available...")
69 | logger.warning(f"Ollama server not ready: {e}")
70 |
71 | # Wait before checking again
72 | time.sleep(10)
73 | retry_count += 1
74 |
75 | # If we got here, we exceeded retry attempts
76 | self._update_model_status("error", "Failed to connect to Ollama after multiple attempts")
77 | logger.error("Failed to establish connection with Ollama after maximum retry attempts")
78 |
79 | def _test_connection(self):
80 | """Test connection to Ollama server and pull model if needed"""
81 | try:
82 | # First check connection
83 | response = httpx.get(f"{self.host}/api/tags")
84 | response.raise_for_status()
85 | available_models = [model["name"] for model in response.json().get("models", [])]
86 |
87 | if not available_models:
88 | logger.warning("No models found on Ollama server, pulling required model")
89 | self._update_model_status("downloading", "No models found, pulling required model...")
90 | self._pull_model()
91 | elif self.model not in available_models:
92 | logger.warning(f"Selected model '{self.model}' not found. Pulling model...")
93 | self._update_model_status("downloading", f"Model {self.model} not found, downloading...")
94 | self._pull_model()
95 | else:
96 | logger.info(f"Model {self.model} is already available")
97 | self._update_model_status("ready", f"Model {self.model} is ready")
98 | self.model_ready = True
99 |
100 | logger.info(f"Connected to Ollama server at {self.host}")
101 |
102 | except httpx.HTTPError as e:
103 | logger.error(f"Failed to connect to Ollama server: {e}")
104 | self._update_model_status("error", f"Failed to connect to Ollama server: {str(e)}")
105 | raise ConnectionError(f"Failed to connect to Ollama server at {self.host}")
106 |
107 | def _pull_model(self, background=False):
108 | """Pull the required model from Ollama server"""
109 | try:
110 | logger.info(f"Pulling model {self.model}...")
111 |
112 | # Use the Ollama API to pull the model
113 | pull_url = f"{self.host}/api/pull"
114 | payload = {"name": self.model}
115 |
116 | # Send the request (this will take time for large models)
117 | timeout = None if background else 600.0 # No timeout for background pull
118 | response = httpx.post(pull_url, json=payload, timeout=timeout)
119 | response.raise_for_status()
120 |
121 | logger.info(f"Successfully pulled model {self.model}")
122 | if not background:
123 | self._update_model_status("ready", f"Model {self.model} is ready")
124 | self.model_ready = True
125 |
126 | except httpx.HTTPError as e:
127 | logger.error(f"Failed to pull model {self.model}: {e}")
128 | self._update_model_status("error", f"Failed to pull model: {str(e)}")
129 | raise
130 |
131 | @retry(
132 | stop=stop_after_attempt(3),
133 | wait=wait_exponential(multiplier=1, min=2, max=10),
134 | retry=retry_if_exception_type((httpx.HTTPError, ConnectionError)),
135 | reraise=True
136 | )
137 | async def generate(self, prompt: str, system_prompt: Optional[str] = None) -> str:
138 | """
139 | Generate text using Ollama
140 |
141 | Args:
142 | prompt: The user prompt
143 | system_prompt: Optional system instructions
144 |
145 | Returns:
146 | Generated text response
147 | """
148 | if not self.model_ready:
149 | status_data = redis_client.get_json("ollama:status") or {}
150 | status = status_data.get("status", "unknown")
151 | message = status_data.get("message", "Model not ready")
152 | logger.warning(f"Attempted to generate text while model not ready. Status: {status}, Message: {message}")
153 | return f"Model not ready: {message}"
154 |
155 | payload = {
156 | "model": self.model,
157 | "prompt": prompt,
158 | "stream": False,
159 | "options": {
160 | "temperature": 0.1, # Low temperature for more deterministic responses
161 | "num_predict": 500 # Limit response length
162 | }
163 | }
164 |
165 | if system_prompt:
166 | payload["system"] = system_prompt
167 |
168 | try:
169 | async with httpx.AsyncClient() as client:
170 | response = await client.post(
171 | self.api_url,
172 | json=payload,
173 | timeout=30.0
174 | )
175 | response.raise_for_status()
176 | result = response.json()
177 |
178 | # Extract response text
179 | if "response" in result:
180 | return result["response"]
181 | else:
182 | logger.error(f"Unexpected response format from Ollama: {result}")
183 | return ""
184 |
185 | except httpx.HTTPError as e:
186 | logger.error(f"Error calling Ollama API: {e}")
187 | self._update_model_status("error", f"Error calling Ollama API: {str(e)}")
188 | raise
189 |
190 | ollama_client = OllamaClient()
--------------------------------------------------------------------------------
/src/ai_decision/service.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | import re
3 | import time
4 | import threading
5 | from typing import Optional, Dict
6 | from datetime import datetime
7 |
8 | from src.utils import get_logger, RSIData, TradeSignal, TradingDecision
9 | from src.utils.redis_client import redis_client
10 | from src.ai_decision.ollama_client import ollama_client
11 | from src.config.settings import config
12 |
13 | logger = get_logger("ai_decision_service")
14 |
15 | # Import version from setup.py
16 | import sys
17 | sys.path.insert(0, '..')
18 | from setup import VERSION
19 |
20 | if len(sys.argv) > 1 and sys.argv[1] == '--license':
21 | print(f"TraderMagic v{VERSION} - Licensed under AGPL-3.0")
22 | sys.exit(0)
23 |
24 | # System prompt for the LLM
25 | SYSTEM_PROMPT = """
26 | You are a world expert at stock and cryptocurrency trading.
27 | Your task is to analyze RSI (Relative Strength Index) data and make trading decisions.
28 | You should respond ONLY with one of these three decisions: "buy", "sell", or "hold".
29 | DO NOT include any explanations, analysis, or additional text in your response.
30 | Just respond with a single word: "buy", "sell", or "hold".
31 |
32 | Guidelines for RSI trading:
33 | - RSI below 30 typically indicates oversold conditions (potential buy)
34 | - RSI above 70 typically indicates overbought conditions (potential sell)
35 | - RSI between 30-70 is generally neutral (potential hold)
36 | - Consider recent trends when making decisions
37 | """
38 |
39 | class AIDecisionService:
40 | def __init__(self):
41 | self.loop = asyncio.new_event_loop()
42 | asyncio.set_event_loop(self.loop)
43 |
44 | async def analyze_rsi(self, rsi_data: RSIData) -> Optional[TradeSignal]:
45 | """
46 | Analyze RSI data and return a trade signal
47 |
48 | Args:
49 | rsi_data: RSI data for analysis
50 |
51 | Returns:
52 | TradeSignal object with the decision
53 | """
54 | # Check Redis first for latest trading_enabled status, default to False for safety
55 | trading_enabled_data = redis_client.get("trading_enabled")
56 | trading_enabled = trading_enabled_data == "true" if trading_enabled_data is not None else False
57 |
58 | if not trading_enabled:
59 | logger.info(f"Trading is disabled. Skipping signal generation for {rsi_data.symbol}")
60 | return None
61 |
62 | prompt = f"The current Relative Strength Index (RSI) for {rsi_data.symbol} is {rsi_data.value:.2f}. Based on this information alone, should I buy, sell, or hold?"
63 |
64 | try:
65 | response = await ollama_client.generate(prompt, SYSTEM_PROMPT)
66 | logger.info(f"LLM response for {rsi_data.symbol}: {response}")
67 |
68 | # Extract the decision using regex to find 'buy', 'sell', or 'hold'
69 | match = re.search(r'\b(buy|sell|hold)\b', response.lower())
70 | if match:
71 | decision_str = match.group(1)
72 |
73 | # Map the decision string to TradingDecision enum
74 | decision_map = {
75 | "buy": TradingDecision.BUY,
76 | "sell": TradingDecision.SELL,
77 | "hold": TradingDecision.HOLD
78 | }
79 |
80 | decision = decision_map.get(decision_str, TradingDecision.HOLD)
81 |
82 | # Create the trade signal
83 | signal = TradeSignal(
84 | symbol=rsi_data.symbol,
85 | decision=decision,
86 | rsi_value=rsi_data.value
87 | )
88 |
89 | # Store the signal in Redis
90 | redis_key = f"signal:{rsi_data.symbol}"
91 | redis_client.set_json(redis_key, signal.dict())
92 |
93 | logger.info(f"Generated trade signal for {rsi_data.symbol}: {decision.value}")
94 | return signal
95 | else:
96 | logger.error(f"Could not extract a clear decision from LLM response: {response}")
97 | return None
98 |
99 | except Exception as e:
100 | logger.error(f"Error analyzing RSI data: {e}")
101 | return None
102 |
103 | def get_decision(self, rsi_data: RSIData) -> Optional[TradeSignal]:
104 | """
105 | Get a trading decision based on RSI data
106 |
107 | Args:
108 | rsi_data: RSI data for analysis
109 |
110 | Returns:
111 | TradeSignal object or None if analysis fails
112 | """
113 | try:
114 | return self.loop.run_until_complete(self.analyze_rsi(rsi_data))
115 | except Exception as e:
116 | logger.error(f"Error in get_decision: {e}")
117 | return None
118 |
119 |
120 | def get_latest_signal(self, symbol: str) -> Optional[TradeSignal]:
121 | """
122 | Get the latest trade signal for a symbol from Redis
123 |
124 | Args:
125 | symbol: Trading pair symbol
126 |
127 | Returns:
128 | TradeSignal object or None if not found
129 | """
130 | redis_key = f"signal:{symbol}"
131 | data = redis_client.get_json(redis_key)
132 | if data:
133 | return TradeSignal(**data)
134 | return None
135 |
136 | # Singleton instance
137 | ai_decision_service = AIDecisionService()
138 |
139 | # Add this for module execution
140 | if __name__ == "__main__":
141 | import time
142 | logger.info("Starting AI Decision Service as standalone")
143 |
144 | # Keep the main thread alive
145 | try:
146 | while True:
147 | time.sleep(1)
148 | except KeyboardInterrupt:
149 | logger.info("Shutting down AI Decision Service")
--------------------------------------------------------------------------------
/src/config/__init__.py:
--------------------------------------------------------------------------------
1 | from .settings import config
2 |
3 | __all__ = ["config"]
--------------------------------------------------------------------------------
/src/config/settings.py:
--------------------------------------------------------------------------------
1 | import os
2 | from typing import List
3 | from pydantic import BaseModel, Field, validator
4 | from dotenv import load_dotenv
5 |
6 | # Load environment variables
7 | load_dotenv()
8 |
9 | class TaapiConfig(BaseModel):
10 | api_key: str = Field(default_factory=lambda: os.getenv("TAAPI_API_KEY", ""))
11 | rsi_period: int = Field(default_factory=lambda: int(os.getenv("RSI_PERIOD", "14")))
12 | price_history_interval: str = Field(default_factory=lambda: os.getenv("PRICE_HISTORY_INTERVAL", "5m"))
13 | price_history_limit: int = Field(default_factory=lambda: int(os.getenv("PRICE_HISTORY_LIMIT", "20")))
14 |
15 | class AlpacaConfig(BaseModel):
16 | # API credentials
17 | api_key: str = Field(default_factory=lambda: os.getenv("ALPACA_API_KEY", ""))
18 | api_secret: str = Field(default_factory=lambda: os.getenv("ALPACA_API_SECRET", ""))
19 | base_url: str = Field(default_factory=lambda: os.getenv("APCA_API_BASE_URL", "https://paper-api.alpaca.markets"))
20 |
21 | class OllamaConfig(BaseModel):
22 | model: str = Field(default_factory=lambda: os.getenv("OLLAMA_MODEL", "llama3"))
23 | host: str = Field(default_factory=lambda: os.getenv("OLLAMA_HOST", "http://ollama:11434"))
24 |
25 | class RedisConfig(BaseModel):
26 | host: str = Field(default_factory=lambda: os.getenv("REDIS_HOST", "redis"))
27 | port: int = Field(default_factory=lambda: int(os.getenv("REDIS_PORT", "6379")))
28 | db: int = Field(default_factory=lambda: int(os.getenv("REDIS_DB", "0")))
29 |
30 | class TradingConfig(BaseModel):
31 | symbols: List[str] = Field(default_factory=lambda: os.getenv("SYMBOLS", "BTC/USD").split(","))
32 | trade_percentage: float = Field(default_factory=lambda: float(os.getenv("TRADE_PERCENTAGE", "2.0")))
33 | trade_fixed_amount: float = Field(default_factory=lambda: float(os.getenv("TRADE_FIXED_AMOUNT", "10.0")))
34 | use_fixed_amount: bool = Field(default_factory=lambda: os.getenv("TRADE_USE_FIXED", "false").lower() == "true")
35 | # ALWAYS default to False for safety
36 | trading_enabled: bool = Field(default=False)
37 | poll_interval: int = Field(default_factory=lambda: int(os.getenv("POLL_INTERVAL", "120")))
38 |
39 | # Use validator instead of field_validator for pydantic v1
40 | @validator("trade_percentage")
41 | def validate_trade_percentage(cls, value):
42 | if value <= 0 or value > 100:
43 | raise ValueError("Trade percentage must be between 0 and 100")
44 | return value
45 |
46 | @validator("trade_fixed_amount")
47 | def validate_trade_fixed_amount(cls, value):
48 | if value < 1.0:
49 | raise ValueError("Fixed trade amount must be at least $1.00")
50 | return value
51 |
52 | class AppConfig(BaseModel):
53 | taapi: TaapiConfig = TaapiConfig()
54 | alpaca: AlpacaConfig = AlpacaConfig()
55 | ollama: OllamaConfig = OllamaConfig()
56 | redis: RedisConfig = RedisConfig()
57 | trading: TradingConfig = TradingConfig()
58 |
59 | config = AppConfig()
--------------------------------------------------------------------------------
/src/data_retrieval/__init__.py:
--------------------------------------------------------------------------------
1 | from src.config import config
2 |
3 | # Use the real client only
4 | from .taapi_client import taapi_client
5 | from .service import data_retrieval_service
6 |
7 | __all__ = ["taapi_client", "data_retrieval_service"]
--------------------------------------------------------------------------------
/src/data_retrieval/service.py:
--------------------------------------------------------------------------------
1 | import time
2 | import uuid
3 | import threading
4 | from typing import List, Dict, Any, Optional
5 | from datetime import datetime
6 |
7 | from src.config import config
8 | from src.utils import get_logger, redis_client, RSIData, PriceCandle, PriceHistory, MarketStatus
9 | from src.data_retrieval.taapi_client import taapi_client
10 |
11 | logger = get_logger("data_retrieval_service")
12 |
13 | # Import version from setup.py
14 | import sys
15 | sys.path.insert(0, '..')
16 | from setup import VERSION
17 |
18 | if len(sys.argv) > 1 and sys.argv[1] == '--license':
19 | print(f"TraderMagic v{VERSION} - Licensed under AGPL-3.0")
20 | sys.exit(0)
21 |
22 | class DataRetrievalService:
23 | def __init__(self):
24 | self.symbols = config.trading.symbols
25 | self.poll_interval = config.trading.poll_interval
26 | self.price_history_interval = config.taapi.price_history_interval
27 | self.price_history_limit = config.taapi.price_history_limit
28 | self.should_run = True
29 | self.thread = None
30 |
31 | def start(self):
32 | """
33 | Start the data retrieval service in a separate thread
34 | """
35 | if self.thread and self.thread.is_alive():
36 | logger.warning("Data retrieval service is already running")
37 | return
38 |
39 | # Force update price history for all symbols on startup
40 | logger.info("Initializing price history for all symbols on startup")
41 | for symbol in self.symbols:
42 | try:
43 | logger.info(f"Fetching initial price history for {symbol}")
44 | self.update_price_history(symbol)
45 | time.sleep(3) # Add delay to respect rate limits
46 | except Exception as e:
47 | logger.error(f"Error fetching initial price history for {symbol}: {e}")
48 |
49 | self.should_run = True
50 | self.thread = threading.Thread(target=self._run_service)
51 | self.thread.daemon = True
52 | self.thread.start()
53 | logger.info("Data retrieval service started")
54 |
55 | def stop(self):
56 | """
57 | Stop the data retrieval service
58 | """
59 | self.should_run = False
60 | if self.thread and self.thread.is_alive():
61 | self.thread.join(timeout=10)
62 | logger.info("Data retrieval service stopped")
63 |
64 | def _run_service(self):
65 | """
66 | Main service loop - fetches RSI data at regular intervals
67 | """
68 | while self.should_run:
69 | try:
70 | # For TAAPI's free tier (1 request per 15 seconds), process all symbols
71 | # at consistent intervals to respect rate limits
72 | symbols_count = len(self.symbols)
73 | if symbols_count > 0:
74 | # Fixed 16 second interval between API calls for TAAPI free tier
75 | # (slightly more than the 15-second minimum to be safe)
76 | time_between_symbols = 16
77 | logger.info(f"Processing {symbols_count} symbols with {time_between_symbols} seconds between each")
78 |
79 | # Process all symbols in sequence
80 | for i, symbol in enumerate(self.symbols):
81 | start_time = time.time()
82 | logger.info(f"Fetching data for {symbol}")
83 |
84 | try:
85 | # Get RSI data for the symbol
86 | rsi_data = taapi_client.get_rsi(symbol)
87 | if rsi_data:
88 | # Store in Redis
89 | redis_key = f"rsi:{symbol}"
90 | # Set longer TTL to ensure data is still available for main loop processing
91 | redis_client.set_json(redis_key, rsi_data.dict(), ttl=self.poll_interval * 5)
92 | logger.info(f"Stored RSI data for {symbol}: {rsi_data.value}")
93 |
94 | # Get AI decision immediately after getting the data
95 | from src.ai_decision import ai_decision_service
96 | trade_signal = ai_decision_service.get_decision(rsi_data)
97 | if trade_signal:
98 | logger.info(f"Generated trade signal for {symbol}: {trade_signal.decision.value}")
99 |
100 | # Directly execute the trade after generating the signal
101 | if trade_signal.decision.value != "hold":
102 | try:
103 | # NOTE: To avoid potential validation errors,
104 | # we'll use the main module signal-driven approach for trade execution
105 | # rather than executing trades directly from the data retrieval service
106 |
107 | # Simply log that we found a trading opportunity
108 | logger.info(f"Trading opportunity detected for {symbol}: {trade_signal.decision.value}")
109 | logger.info(f"Signal stored in Redis, will be processed by main loop")
110 | except Exception as trade_error:
111 | logger.error(f"Error executing trade from data service: {trade_error}")
112 |
113 | # Get price data for the symbol
114 | # Add a small delay to respect TAAPI rate limits
115 | time.sleep(2)
116 | price_data = taapi_client.get_price(symbol)
117 | if price_data:
118 | # Store in Redis
119 | redis_key = f"price:{symbol}"
120 | redis_client.set_json(redis_key, price_data, ttl=self.poll_interval * 5)
121 | logger.info(f"Stored price data for {symbol}: {price_data['close']}")
122 |
123 | # Get historical price data for all symbols but stagger them to respect rate limits
124 | # Indexes 0, 1, 2, 3, etc. update on cycles 0, 1, 2, 3, etc.
125 | current_cycle = int(time.time()) // self.poll_interval
126 | if current_cycle % len(self.symbols) == i:
127 | time.sleep(2) # Add delay to respect rate limits
128 | logger.info(f"Fetching price history for {symbol} on cycle {current_cycle}")
129 | self.update_price_history(symbol)
130 |
131 | # Calculate remaining time to wait based on elapsed time
132 | elapsed = time.time() - start_time
133 | wait_time = max(0, time_between_symbols - elapsed)
134 |
135 | # If this isn't the last symbol, wait before the next one
136 | if i < symbols_count - 1 and wait_time > 0:
137 | logger.debug(f"Waiting {wait_time:.1f} seconds before next symbol...")
138 | time.sleep(wait_time)
139 |
140 | except Exception as symbol_error:
141 | logger.error(f"Error processing symbol {symbol}: {symbol_error}")
142 | # Even on error, respect the rate limit timing
143 | elapsed = time.time() - start_time
144 | wait_time = max(0, time_between_symbols - elapsed)
145 | if i < symbols_count - 1 and wait_time > 0:
146 | time.sleep(wait_time)
147 |
148 | # After processing all symbols, wait until the next poll interval
149 | remaining_time = max(5, self.poll_interval - (time_between_symbols * symbols_count))
150 | logger.info(f"Completed processing all symbols. Next poll cycle in {remaining_time:.1f} seconds")
151 | time.sleep(remaining_time)
152 | except Exception as e:
153 | logger.error(f"Error in data retrieval service: {e}")
154 | time.sleep(10) # Sleep briefly before retrying
155 |
156 | def get_latest_rsi(self, symbol: str) -> Optional[RSIData]:
157 | """
158 | Get the latest RSI data for a symbol from Redis
159 | """
160 | redis_key = f"rsi:{symbol}"
161 | data = redis_client.get_json(redis_key)
162 | if data:
163 | # Convert dict back to RSIData
164 | return RSIData(**data)
165 | else:
166 | logger.warning(f"No RSI data found for {symbol}")
167 | return None
168 |
169 | def update_price_history(self, symbol: str) -> Optional[PriceHistory]:
170 | """
171 | Fetch and store historical price data for a symbol
172 | """
173 | interval = self.price_history_interval
174 | limit = self.price_history_limit
175 |
176 | # Fetch historical price data
177 | price_history_data = taapi_client.get_price_history(symbol, interval, limit)
178 | if not price_history_data:
179 | logger.warning(f"Failed to get historical price data for {symbol}")
180 | return None
181 |
182 | # Log the raw data format to help diagnose issues
183 | logger.debug(f"Raw price history data for {symbol}: {len(price_history_data)} candles")
184 | if price_history_data and len(price_history_data) > 0:
185 | logger.debug(f"Sample candle: {price_history_data[0]}")
186 |
187 | # Convert to PriceCandle objects
188 | candles = []
189 | try:
190 | for candle in price_history_data:
191 | # Ensure we have all required fields
192 | required_fields = ['open', 'high', 'low', 'close', 'volume', 'timestamp']
193 | for field in required_fields:
194 | if field not in candle:
195 | logger.error(f"Missing required field '{field}' in candle data: {candle}")
196 | continue
197 |
198 | # Create candle object with proper type conversion
199 | try:
200 | # Get market status for this candle (important for stocks)
201 | market_status = None
202 |
203 | # Only check market status for stocks (not crypto)
204 | if not taapi_client.crypto_pattern.match(symbol):
205 | # For historical data, we need to check if this candle's timestamp falls within market hours
206 | # Get the candle's timestamp in Eastern time
207 | candle_time = datetime.fromtimestamp(candle['timestamp'], taapi_client.eastern_tz)
208 | candle_time_of_day = candle_time.time()
209 | candle_weekday = candle_time.weekday()
210 |
211 | # Weekend
212 | if candle_weekday >= 5:
213 | market_status = MarketStatus.CLOSED
214 | # Regular market hours
215 | elif taapi_client.market_open_time <= candle_time_of_day <= taapi_client.market_close_time:
216 | market_status = MarketStatus.OPEN
217 | # Pre-market
218 | elif taapi_client.pre_market_open_time <= candle_time_of_day < taapi_client.market_open_time:
219 | market_status = MarketStatus.PRE_MARKET
220 | # After-hours
221 | elif taapi_client.market_close_time < candle_time_of_day <= taapi_client.after_hours_close_time:
222 | market_status = MarketStatus.AFTER_HOURS
223 | # Closed
224 | else:
225 | market_status = MarketStatus.CLOSED
226 | else:
227 | # Crypto markets are always open
228 | market_status = MarketStatus.OPEN
229 |
230 | price_candle = PriceCandle(
231 | symbol=symbol,
232 | open=float(candle['open']),
233 | high=float(candle['high']),
234 | low=float(candle['low']),
235 | close=float(candle['close']),
236 | volume=float(candle['volume']),
237 | timestamp=datetime.fromtimestamp(candle['timestamp']),
238 | market_status=market_status
239 | )
240 | candles.append(price_candle)
241 | except (ValueError, TypeError) as e:
242 | logger.error(f"Error converting candle data types for {symbol}: {e}, candle: {candle}")
243 | except (KeyError, ValueError, TypeError) as e:
244 | logger.error(f"Error processing candle data for {symbol}: {e}")
245 | return None
246 |
247 | if not candles:
248 | logger.error(f"No valid candles processed for {symbol}")
249 | return None
250 |
251 | # Add market status summary to log output
252 | if not taapi_client.crypto_pattern.match(symbol):
253 | status_counts = {}
254 | for candle in candles:
255 | status = candle.market_status.value
256 | status_counts[status] = status_counts.get(status, 0) + 1
257 | logger.info(f"Market status summary for {symbol}: {status_counts}")
258 |
259 | # Create PriceHistory object
260 | price_history = PriceHistory(
261 | symbol=symbol,
262 | interval=interval,
263 | candles=candles
264 | )
265 |
266 | # Store in Redis with a much longer TTL to prevent charts disappearing
267 | redis_key = f"price_history:{symbol}"
268 | serialized_data = price_history.dict()
269 | # Use 24 hours instead of 10x poll interval to maintain persistent chart data
270 | redis_client.set_json(redis_key, serialized_data, ttl=86400)
271 | logger.info(f"Stored price history for {symbol}: {len(candles)} candles with 24-hour TTL")
272 |
273 | # Verify data was stored correctly
274 | try:
275 | verification = redis_client.get_json(redis_key)
276 | if verification and 'candles' in verification:
277 | logger.info(f"Verification: Redis has {len(verification['candles'])} candles for {symbol}")
278 | else:
279 | logger.error(f"Failed to verify price history data in Redis for {symbol}")
280 | except Exception as e:
281 | logger.error(f"Error verifying Redis data for {symbol}: {e}")
282 |
283 | return price_history
284 |
285 | def get_latest_price_history(self, symbol: str) -> Optional[PriceHistory]:
286 | """
287 | Get the latest price history data for a symbol from Redis
288 | """
289 | redis_key = f"price_history:{symbol}"
290 | data = redis_client.get_json(redis_key)
291 | if data:
292 | # Convert dict back to PriceHistory
293 | return PriceHistory(**data)
294 | else:
295 | logger.warning(f"No price history found for {symbol}")
296 | return None
297 |
298 | # Singleton instance
299 | data_retrieval_service = DataRetrievalService()
300 |
301 | # Add this for module execution
302 | if __name__ == "__main__":
303 | logger.info("Starting Data Retrieval Service as standalone")
304 | data_retrieval_service.start()
305 |
306 | # Keep the main thread alive
307 | try:
308 | while True:
309 | time.sleep(1)
310 | except KeyboardInterrupt:
311 | logger.info("Shutting down Data Retrieval Service")
312 | data_retrieval_service.stop()
--------------------------------------------------------------------------------
/src/data_retrieval/taapi_client.py:
--------------------------------------------------------------------------------
1 | import requests
2 | import re
3 | import pytz
4 | from typing import Dict, Any, Optional, List, Tuple
5 | from tenacity import retry, stop_after_attempt, wait_exponential, retry_if_exception_type
6 | from datetime import datetime, time
7 |
8 | from src.config import config
9 | from src.utils import get_logger, RSIData, MarketStatus
10 |
11 | logger = get_logger("taapi_client")
12 |
13 | class TaapiClient:
14 | def __init__(self):
15 | self.api_key = config.taapi.api_key
16 | self.base_url = "https://api.taapi.io"
17 | self.rsi_period = config.taapi.rsi_period
18 |
19 | # Stock symbols need to be in specific format for TAAPI
20 | self.crypto_exchange = "binance"
21 | self.stock_exchange = "NYSE"
22 |
23 | # Define patterns to identify symbol types
24 | self.crypto_pattern = re.compile(r'[A-Z0-9]+/[A-Z0-9]+') # Pattern for crypto pairs like BTC/USDT
25 | self.stock_pattern = re.compile(r'^[A-Z]{1,5}$') # Pattern for stock tickers like AAPL, TSLA
26 |
27 | # US Eastern timezone for market hours
28 | self.eastern_tz = pytz.timezone('US/Eastern')
29 |
30 | # Regular market hours (9:30 AM - 4:00 PM Eastern)
31 | self.market_open_time = time(9, 30)
32 | self.market_close_time = time(16, 0)
33 |
34 | # Extended hours
35 | self.pre_market_open_time = time(4, 0) # 4:00 AM Eastern
36 | self.after_hours_close_time = time(20, 0) # 8:00 PM Eastern
37 |
38 | if not self.api_key:
39 | logger.error("TAAPI API key is not set")
40 | raise ValueError("TAAPI API key is not set")
41 |
42 | logger.debug(f"Using TAAPI API key: {self.api_key[:10]}...{self.api_key[-10:]}")
43 |
44 | def _get_market_status(self, symbol: str) -> MarketStatus:
45 | """
46 | Determine the current market status for a stock symbol
47 |
48 | Args:
49 | symbol: Stock symbol to check
50 |
51 | Returns:
52 | MarketStatus enum indicating current trading status
53 | """
54 | # Cryptocurrencies trade 24/7
55 | if self.crypto_pattern.match(symbol):
56 | return MarketStatus.OPEN
57 |
58 | # For stocks, check current time in Eastern timezone
59 | now = datetime.now(self.eastern_tz)
60 | current_time = now.time()
61 | current_weekday = now.weekday()
62 |
63 | # Check if it's a weekend (5=Saturday, 6=Sunday)
64 | if current_weekday >= 5:
65 | return MarketStatus.CLOSED
66 |
67 | # Check if within regular market hours (9:30 AM - 4:00 PM Eastern)
68 | if self.market_open_time <= current_time <= self.market_close_time:
69 | return MarketStatus.OPEN
70 |
71 | # Check if it's pre-market (4:00 AM - 9:30 AM Eastern)
72 | elif self.pre_market_open_time <= current_time < self.market_open_time:
73 | return MarketStatus.PRE_MARKET
74 |
75 | # Check if it's after-hours (4:00 PM - 8:00 PM Eastern)
76 | elif self.market_close_time < current_time <= self.after_hours_close_time:
77 | return MarketStatus.AFTER_HOURS
78 |
79 | # Outside of all trading hours
80 | else:
81 | return MarketStatus.CLOSED
82 |
83 | def _normalize_symbol(self, symbol: str) -> Tuple[str, str]:
84 | """
85 | Normalize symbol for TAAPI API and determine the correct exchange.
86 |
87 | Returns:
88 | Tuple containing (normalized_symbol, exchange)
89 | """
90 | # If it already has a slash, it's likely a crypto pair
91 | if self.crypto_pattern.match(symbol):
92 | logger.debug(f"Symbol {symbol} recognized as cryptocurrency pair")
93 | return symbol, self.crypto_exchange
94 |
95 | # If it matches our stock pattern, format it for stocks
96 | elif self.stock_pattern.match(symbol):
97 | # For stocks, TAAPI expects them in format SYMBOL/USD
98 | # Pro tier supports stock symbols, while free tier is limited to specific crypto pairs
99 | normalized = f"{symbol}/USD"
100 | logger.debug(f"Symbol {symbol} converted to stock format: {normalized}")
101 | return normalized, self.stock_exchange
102 |
103 | # Default case - just pass through and use crypto exchange
104 | logger.warning(f"Symbol {symbol} doesn't match known patterns, treating as crypto")
105 | return symbol, self.crypto_exchange
106 |
107 | @retry(
108 | stop=stop_after_attempt(5), # Increased from 3 to 5 attempts
109 | wait=wait_exponential(multiplier=1, min=2, max=15), # Longer max wait time
110 | retry=retry_if_exception_type((requests.RequestException, ConnectionError, requests.Timeout)),
111 | reraise=True
112 | )
113 | def get_rsi(self, symbol: str, interval: str = "1m") -> Optional[RSIData]:
114 | """
115 | Fetch RSI data for a given symbol from TAAPI.io
116 |
117 | Args:
118 | symbol: Trading pair (e.g., BTC/USDT) or stock symbol (e.g., TSLA)
119 | interval: Time interval (e.g., 1m, 5m, 15m, 1h, 4h, 1d)
120 |
121 | Returns:
122 | RSIData object or None if request fails
123 | """
124 | # Normalize the symbol and get appropriate exchange
125 | normalized_symbol, exchange = self._normalize_symbol(symbol)
126 |
127 | # Build parameters for the request
128 | params = {
129 | "secret": self.api_key,
130 | "exchange": exchange,
131 | "symbol": normalized_symbol,
132 | "interval": interval,
133 | "period": self.rsi_period
134 | }
135 |
136 | logger.info(f"Making TAAPI request for {symbol} (normalized to {normalized_symbol} on {exchange})")
137 | logger.debug(f"Request params: {params}")
138 |
139 | try:
140 | # Set appropriate timeout based on symbol type (longer for stocks)
141 | timeout = 60 if exchange == self.stock_exchange else 30
142 | logger.debug(f"Using timeout of {timeout}s for {symbol} ({exchange})")
143 | response = requests.get(f"{self.base_url}/rsi", params=params, timeout=timeout)
144 |
145 | # Handle error responses before raising for status
146 | if response.status_code != 200:
147 | try:
148 | error_data = response.json() if response.text else {"errors": ["Unknown error"]}
149 | except ValueError: # Includes JSONDecodeError
150 | error_data = {"errors": [f"Invalid JSON response: {response.text}"]}
151 |
152 | if "errors" in error_data:
153 | error_message = "; ".join(error_data["errors"])
154 | if "Free plans only permits" in error_message:
155 | logger.error(f"Free plan limitation for {symbol}: {error_message}")
156 | logger.error("Make sure you're using one of the allowed symbols: [BTC/USDT,ETH/USDT,XRP/USDT,LTC/USDT,XMR/USDT]")
157 | else:
158 | logger.error(f"API error for {symbol}: {error_message}")
159 | return None
160 |
161 | response.raise_for_status()
162 |
163 | # Check if the response is empty
164 | if not response.text:
165 | logger.error(f"Empty response from TAAPI for {symbol}")
166 | return None
167 |
168 | try:
169 | data = response.json()
170 | except ValueError as e:
171 | logger.error(f"Invalid JSON response from TAAPI for {symbol}: {e}")
172 | logger.error(f"Response text: '{response.text}'")
173 | return None
174 |
175 | if "value" in data:
176 | logger.info(f"Received RSI data for {symbol}: {data['value']}")
177 | return RSIData(
178 | symbol=symbol, # Keep original symbol in the data
179 | value=float(data["value"]),
180 | timestamp=datetime.now()
181 | )
182 | else:
183 | logger.error(f"Invalid response from TAAPI for {symbol}: {data}")
184 | return None
185 |
186 | except requests.Timeout as e:
187 | if exchange == self.stock_exchange:
188 | logger.error(f"Timeout fetching RSI data for stock symbol {symbol}: {e}")
189 | logger.warning(f"Stock symbols may require TAAPI Pro tier. Consider removing {symbol} if using free tier.")
190 | else:
191 | logger.error(f"Timeout fetching RSI data for {symbol}: {e}")
192 | # Don't raise the exception, just return None to avoid stopping the whole process
193 | return None
194 | except requests.RequestException as e:
195 | logger.error(f"Error fetching RSI data for {symbol}: {e}")
196 | # Don't raise the exception, just return None to avoid stopping the whole process
197 | return None
198 |
199 | def get_price(self, symbol: str, interval: str = "1m") -> Optional[Dict[str, Any]]:
200 | """
201 | Fetch current price data for a given symbol from TAAPI.io
202 |
203 | Args:
204 | symbol: Trading pair (e.g., BTC/USDT) or stock symbol (e.g., TSLA)
205 | interval: Time interval (e.g., 1m, 5m, 15m, 1h, 4h, 1d)
206 |
207 | Returns:
208 | Price data or None if request fails
209 | """
210 | # Normalize the symbol and get appropriate exchange
211 | normalized_symbol, exchange = self._normalize_symbol(symbol)
212 |
213 | params = {
214 | "secret": self.api_key,
215 | "exchange": exchange,
216 | "symbol": normalized_symbol,
217 | "interval": interval
218 | }
219 |
220 | logger.debug(f"Making TAAPI price request for {symbol} (normalized to {normalized_symbol})")
221 |
222 | try:
223 | # Set appropriate timeout based on symbol type (longer for stocks)
224 | timeout = 60 if exchange == self.stock_exchange else 30
225 | logger.debug(f"Using timeout of {timeout}s for price request ({symbol})")
226 | response = requests.get(f"{self.base_url}/candle", params=params, timeout=timeout)
227 |
228 | if response.status_code != 200:
229 | try:
230 | error_data = response.json() if response.text else {"errors": ["Unknown error"]}
231 | except ValueError: # Includes JSONDecodeError
232 | error_data = {"errors": [f"Invalid JSON response: {response.text}"]}
233 |
234 | if "errors" in error_data:
235 | error_message = "; ".join(error_data["errors"])
236 | logger.error(f"API error for {symbol} price data: {error_message}")
237 | return None
238 |
239 | response.raise_for_status()
240 |
241 | # Check if the response is empty
242 | if not response.text:
243 | logger.error(f"Empty response from TAAPI for {symbol} price data")
244 | return None
245 |
246 | try:
247 | data = response.json()
248 | except ValueError as e:
249 | logger.error(f"Invalid JSON response from TAAPI for {symbol} price data: {e}")
250 | logger.error(f"Response text: '{response.text}'")
251 | return None
252 |
253 | logger.info(f"Received price data for {symbol}: {data['close']}")
254 | return data
255 |
256 | except requests.Timeout as e:
257 | if exchange == self.stock_exchange:
258 | logger.error(f"Timeout fetching price data for stock symbol {symbol}: {e}")
259 | logger.warning(f"Stock symbols may require TAAPI Pro tier. Consider removing {symbol} if using free tier.")
260 | else:
261 | logger.error(f"Timeout fetching price data for {symbol}: {e}")
262 | return None
263 | except requests.RequestException as e:
264 | logger.error(f"Error fetching price data for {symbol}: {e}")
265 | return None
266 |
267 | def get_price_history(self, symbol: str, interval: str = "5m", limit: int = 20) -> Optional[List[Dict[str, Any]]]:
268 | """
269 | Fetch historical price data for a given symbol from TAAPI.io
270 |
271 | Args:
272 | symbol: Trading pair (e.g., BTC/USDT) or stock symbol (e.g., TSLA)
273 | interval: Time interval (e.g., 1m, 5m, 15m, 1h, 4h, 1d)
274 | limit: Number of candles to retrieve
275 |
276 | Returns:
277 | List of historical price data or None if request fails
278 | """
279 | # Normalize the symbol and get appropriate exchange
280 | normalized_symbol, exchange = self._normalize_symbol(symbol)
281 |
282 | params = {
283 | "secret": self.api_key,
284 | "exchange": exchange,
285 | "symbol": normalized_symbol,
286 | "interval": interval,
287 | "limit": limit
288 | }
289 |
290 | logger.debug(f"Making TAAPI historical price request for {symbol} (normalized to {normalized_symbol})")
291 |
292 | try:
293 | # Set appropriate timeout based on symbol type (longer for stocks)
294 | timeout = 60 if exchange == self.stock_exchange else 30
295 | logger.debug(f"Using timeout of {timeout}s for historical price request ({symbol})")
296 | response = requests.get(f"{self.base_url}/candles", params=params, timeout=timeout)
297 |
298 | if response.status_code != 200:
299 | try:
300 | error_data = response.json() if response.text else {"errors": ["Unknown error"]}
301 | except ValueError: # Includes JSONDecodeError
302 | error_data = {"errors": [f"Invalid JSON response: {response.text}"]}
303 |
304 | if "errors" in error_data:
305 | error_message = "; ".join(error_data["errors"])
306 | logger.error(f"API error for {symbol} historical price data: {error_message}")
307 | return None
308 |
309 | response.raise_for_status()
310 |
311 | # Check if the response is empty
312 | if not response.text:
313 | logger.error(f"Empty response from TAAPI for {symbol} historical price data")
314 | return None
315 |
316 | try:
317 | data = response.json()
318 | except ValueError as e:
319 | logger.error(f"Invalid JSON response from TAAPI for {symbol} historical price data: {e}")
320 | logger.error(f"Response text: '{response.text}'")
321 | return None
322 |
323 | logger.info(f"Received historical price data for {symbol}: {len(data)} candles")
324 | return data
325 |
326 | except requests.Timeout as e:
327 | if exchange == self.stock_exchange:
328 | logger.error(f"Timeout fetching historical price data for stock symbol {symbol}: {e}")
329 | logger.warning(f"Stock symbols may require TAAPI Pro tier. Consider removing {symbol} if using free tier.")
330 | else:
331 | logger.error(f"Timeout fetching historical price data for {symbol}: {e}")
332 | return None
333 | except requests.RequestException as e:
334 | logger.error(f"Error fetching historical price data for {symbol}: {e}")
335 | return None
336 |
337 | taapi_client = TaapiClient()
--------------------------------------------------------------------------------
/src/main.py:
--------------------------------------------------------------------------------
1 | import os
2 | import time
3 | import signal
4 | import threading
5 | import sys
6 | import uuid
7 | from datetime import datetime
8 |
9 | from src.config import config
10 | from src.utils import get_logger
11 | from src.data_retrieval import data_retrieval_service
12 | from src.ai_decision import ai_decision_service
13 |
14 | # Import trade execution service components directly to avoid circular imports
15 | from src.trade_execution.service import TradeExecutionService
16 | trade_execution_service = TradeExecutionService()
17 |
18 | logger = get_logger("main")
19 |
20 | # Import version from setup.py
21 | import sys
22 | sys.path.insert(0, '..')
23 | from setup import VERSION
24 |
25 | if len(sys.argv) > 1 and sys.argv[1] == '--license':
26 | print(f"TraderMagic v{VERSION} - Licensed under AGPL-3.0")
27 | sys.exit(0)
28 |
29 | # Global flag to control the main loop
30 | running = True
31 |
32 | def signal_handler(sig, frame):
33 | """
34 | Handle termination signals to gracefully shutdown the application
35 | """
36 | global running
37 | logger.info("Received termination signal, shutting down...")
38 | running = False
39 |
40 | # Register signal handlers
41 | signal.signal(signal.SIGINT, signal_handler)
42 | signal.signal(signal.SIGTERM, signal_handler)
43 |
44 | def create_logs_directory():
45 | """
46 | Create logs directory if it doesn't exist
47 | """
48 | if not os.path.exists("logs"):
49 | os.makedirs("logs")
50 | logger.info("Created logs directory")
51 |
52 | def setup_services():
53 | """
54 | Initialize and start all services
55 | """
56 | logger.info("Starting services...")
57 |
58 | # IMPORTANT: ALWAYS initialize trading to disabled on startup for safety
59 | from src.utils import force_trading_disabled
60 | success = force_trading_disabled()
61 | logger.info("Trading initialized to DISABLED for safety")
62 |
63 | # Double-check the value was set correctly
64 | from src.utils import redis_client
65 | trading_enabled = redis_client.client.get("trading_enabled")
66 | logger.info(f"VERIFY: 'trading_enabled' is currently set to '{trading_enabled}' in Redis")
67 |
68 | # Start the data retrieval service
69 | data_retrieval_service.start()
70 | logger.info("Data retrieval service started")
71 |
72 | # Log key components for debugging
73 | import json
74 | logger.info("Active trading symbols: %s", config.trading.symbols)
75 | logger.info("Trade percentage: %s", config.trading.trade_percentage)
76 | logger.info("Poll interval: %s", config.trading.poll_interval)
77 |
78 | # Check if signals are already in Redis
79 | for symbol in config.trading.symbols:
80 | signal_data = redis_client.get_json(f"signal:{symbol}")
81 | if signal_data:
82 | logger.info(f"Found existing signal for {symbol}: {json.dumps(signal_data)}")
83 | else:
84 | logger.info(f"No existing signal for {symbol}")
85 |
86 | result_data = redis_client.get_json(f"trade_result:{symbol}")
87 | if result_data:
88 | logger.info(f"Found existing trade result for {symbol}: {json.dumps(result_data)}")
89 | else:
90 | logger.info(f"No existing trade result for {symbol}")
91 |
92 | def main_loop():
93 | """
94 | Main application loop that coordinates the three components
95 | """
96 | global running
97 |
98 | logger.info(f"Starting main loop with poll interval: {config.trading.poll_interval} seconds")
99 | logger.info(f"Trading symbols: {config.trading.symbols}")
100 |
101 | # Check if ALPACA_DEBUG_MODE is enabled
102 | import os
103 | debug_mode = os.getenv("ALPACA_DEBUG_MODE") == "true"
104 | logger.info(f"ALPACA_DEBUG_MODE: {debug_mode}")
105 |
106 | # Wait initial time for the data retrieval service to get first data
107 | initial_wait = min(30, config.trading.poll_interval)
108 | logger.info(f"Waiting {initial_wait} seconds for initial data collection...")
109 | time.sleep(initial_wait)
110 |
111 | # Import for direct redis access
112 | from src.utils import redis_client
113 | import json
114 |
115 | # Initialize execution count for tracking
116 | execution_count = 0
117 |
118 | while running:
119 | try:
120 | # Log iteration start
121 | execution_count += 1
122 | logger.info(f"Main loop iteration #{execution_count} starting")
123 |
124 | # Get all signals at once to avoid decision delays
125 | all_signals = {}
126 | for symbol in config.trading.symbols:
127 | # Get the latest trade signal (already calculated by data_retrieval service)
128 | signal = ai_decision_service.get_latest_signal(symbol)
129 | if signal:
130 | all_signals[symbol] = signal
131 | logger.info(f"Retrieved signal for {symbol}: {signal.decision.value}")
132 | else:
133 | logger.warning(f"No signal found for {symbol}")
134 | # Check if RSI data is available
135 | rsi_data = data_retrieval_service.get_latest_rsi(symbol)
136 | if rsi_data:
137 | logger.info(f"RSI data available for {symbol}: {rsi_data.value}, generating signal")
138 | # Try to generate a signal
139 | signal = ai_decision_service.get_decision(rsi_data)
140 | if signal:
141 | all_signals[symbol] = signal
142 | logger.info(f"Generated new signal for {symbol}: {signal.decision.value}")
143 |
144 | # Log how many signals we found
145 | logger.info(f"Found {len(all_signals)} signals for execution")
146 |
147 | # Execute trades for all signals that aren't "hold"
148 | for symbol, signal in all_signals.items():
149 | try:
150 | if signal.decision.value != "hold":
151 | logger.info(f"Executing trade for {symbol} with decision: {signal.decision.value}")
152 |
153 | # FORCE SUCCESS FOR TESTING
154 | if debug_mode:
155 | # Force a successful trade result in debug mode
156 | from src.utils import TradeResult
157 | import uuid
158 | from datetime import datetime
159 |
160 | mock_price = 45000.0 if "BTC" in symbol else 3000.0
161 | mock_quantity = 0.001 if "BTC" in symbol else 0.01
162 |
163 | # Create a successful trade result
164 | result = TradeResult(
165 | symbol=symbol,
166 | decision=signal.decision,
167 | order_id=f"debug-{uuid.uuid4()}",
168 | quantity=mock_quantity,
169 | price=mock_price,
170 | status="executed",
171 | error=None,
172 | timestamp=datetime.now()
173 | )
174 |
175 | logger.info(f"DEBUG MODE: Created mock trade result for {symbol}")
176 | else:
177 | # Normal execution
178 | result = trade_execution_service.execute_trade(signal)
179 |
180 | # Handle the result
181 | if result:
182 | logger.info(f"Trade result for {symbol}: {result.status}")
183 | if result.status == "executed":
184 | logger.info(f"Order executed for {symbol}: {result.quantity} @ ~${result.price:.2f}")
185 |
186 | # Debug the result that's being saved in Redis
187 | logger.info(f"Trade result object: {result}")
188 |
189 | # Always save the result to Redis with a long TTL
190 | redis_key = f"trade_result:{symbol}"
191 | success = redis_client.set_json(redis_key, result.dict(), ttl=86400) # 24 hour TTL
192 | logger.info(f"Saved trade result to Redis key: {redis_key} (success: {success})")
193 |
194 | # Check if it was saved properly
195 | saved_data = redis_client.get_json(redis_key)
196 | if saved_data:
197 | logger.info(f"Verified Redis data for {redis_key}: {json.dumps(saved_data)}")
198 | else:
199 | logger.error(f"Failed to verify Redis data for {redis_key}!")
200 | else:
201 | logger.warning(f"Trade not executed. Status: {result.status}, Error: {result.error}")
202 | else:
203 | logger.error(f"No result returned from trade execution for {symbol}")
204 | else:
205 | logger.info(f"Decision is to hold for {symbol}, no trade executed")
206 | except Exception as trade_error:
207 | logger.error(f"Error executing trade for {symbol}: {trade_error}")
208 | import traceback
209 | logger.error(f"Traceback: {traceback.format_exc()}")
210 |
211 | # Sleep less than the poll interval to check for new signals sooner
212 | # This ensures we catch new signals shortly after they're generated
213 | sleep_time = min(60, max(30, config.trading.poll_interval // 2))
214 | logger.info(f"Main loop iteration #{execution_count} completed. Sleeping for {sleep_time} seconds")
215 | time.sleep(sleep_time)
216 |
217 | except Exception as e:
218 | logger.error(f"Error in main loop: {e}")
219 | import traceback
220 | logger.error(f"Traceback: {traceback.format_exc()}")
221 | time.sleep(5) # Brief pause before continuing
222 |
223 | def shutdown():
224 | """
225 | Perform cleanup and shutdown operations
226 | """
227 | logger.info("Shutting down...")
228 |
229 | # Stop the data retrieval service
230 | data_retrieval_service.stop()
231 |
232 | logger.info("Shutdown complete")
233 |
234 | if __name__ == "__main__":
235 | # Make sure the logs directory exists
236 | create_logs_directory()
237 |
238 | # Print startup banner
239 | logger.info("=" * 80)
240 | logger.info(f"TraderMagic starting at {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
241 | logger.info(f"Configured to trade: {', '.join(config.trading.symbols)}")
242 | logger.info(f"Using Ollama model: {config.ollama.model}")
243 | logger.info(f"Alpaca paper trading: {config.alpaca.paper_trading}")
244 | logger.info("=" * 80)
245 |
246 | try:
247 | # Set up and start services
248 | setup_services()
249 |
250 | # Run the main loop
251 | main_loop()
252 | except Exception as e:
253 | logger.error(f"Unhandled exception: {e}")
254 | finally:
255 | # Ensure proper shutdown
256 | shutdown()
--------------------------------------------------------------------------------
/src/trade_execution/__init__.py:
--------------------------------------------------------------------------------
1 | # Import only core dependencies
2 | from src.config import config
3 |
4 | # Empty __init__.py to avoid circular imports
5 | # The components will be imported directly where needed
6 |
7 | __all__ = []
--------------------------------------------------------------------------------
/src/trade_execution/service.py:
--------------------------------------------------------------------------------
1 | from typing import Dict, Optional
2 | import time
3 | import uuid
4 | import threading
5 | from datetime import datetime
6 |
7 | from src.utils import get_logger, TradeSignal, TradeResult, redis_client
8 |
9 | logger = get_logger("trade_execution_service")
10 |
11 | # Import version from setup.py
12 | import sys
13 | sys.path.insert(0, '..')
14 | from setup import VERSION
15 |
16 | if len(sys.argv) > 1 and sys.argv[1] == '--license':
17 | print(f"TraderMagic v{VERSION} - Licensed under AGPL-3.0")
18 | sys.exit(0)
19 |
20 | # Import alpaca_client inside the class methods to avoid circular imports
21 |
22 | class TradeExecutionService:
23 | def __init__(self):
24 | self.last_execution_time: Dict[str, float] = {}
25 | self.min_execution_interval = 300 # Minimum seconds between trades for the same symbol
26 |
27 | # SAFETY: Force trading to disabled state at startup
28 | # This is a critical safety feature
29 | redis_client.client.set("trading_enabled", "false")
30 | logger.warning("SAFETY: Trading initialized to DISABLED in trade execution service")
31 |
32 | def execute_trade(self, signal: TradeSignal) -> Optional[TradeResult]:
33 | """
34 | Execute a trade based on a signal if enough time has passed since last execution
35 |
36 | Args:
37 | signal: Trade signal
38 |
39 | Returns:
40 | TradeResult or None if trade was skipped due to time constraints
41 | """
42 | # Import alpaca_client here to avoid circular imports
43 | from src.trade_execution.alpaca_client import alpaca_client
44 |
45 | # Check if we recently executed a trade for this symbol
46 | current_time = time.time()
47 | last_time = self.last_execution_time.get(signal.symbol, 0)
48 | time_since_last_trade = current_time - last_time
49 |
50 | if time_since_last_trade < self.min_execution_interval:
51 | logger.info(f"Skipping trade for {signal.symbol} - too soon after last trade ({time_since_last_trade:.1f}s < {self.min_execution_interval}s)")
52 | return None
53 |
54 | # Execute the trade
55 | logger.info(f"Calling alpaca_client.execute_trade for {signal.symbol}")
56 | result = alpaca_client.execute_trade(signal)
57 | logger.info(f"Trade execution result: {result}")
58 |
59 | # Update last execution time
60 | if result and result.status == "executed":
61 | self.last_execution_time[signal.symbol] = current_time
62 | logger.info(f"Updated last execution time for {signal.symbol}")
63 |
64 | # Store the result in Redis with explicit TTL
65 | if result:
66 | redis_key = f"trade_result:{signal.symbol}"
67 | success = redis_client.set_json(redis_key, result.dict(), ttl=3600)
68 | logger.info(f"Saved trade result to Redis key {redis_key}: {success}")
69 |
70 | return result
71 |
72 | def get_latest_result(self, symbol: str) -> Optional[TradeResult]:
73 | """
74 | Get the latest trade result for a symbol from Redis
75 |
76 | Args:
77 | symbol: Asset symbol
78 |
79 | Returns:
80 | TradeResult or None if not found
81 | """
82 | redis_key = f"trade_result:{symbol}"
83 | data = redis_client.get_json(redis_key)
84 | if data:
85 | return TradeResult(**data)
86 | return None
87 |
88 | # Listen for account info requests and settings updates via Redis
89 | def start_listeners():
90 | """Start background threads to listen for various Redis requests"""
91 | import json
92 | import threading
93 | from dotenv import load_dotenv
94 |
95 | def settings_listener_thread():
96 | from src.utils import redis_client
97 | logger.info("Starting settings listener thread")
98 | pubsub = redis_client.get_pubsub()
99 | pubsub.subscribe('settings:update')
100 |
101 | for message in pubsub.listen():
102 | try:
103 | if message['type'] == 'message':
104 | logger.info(f"Received settings update: {message['data']}")
105 | # Reload environment variables
106 | load_dotenv(override=True)
107 | logger.info("Environment variables reloaded from .env file")
108 |
109 | # Get updated values for logging
110 | from src.config import config
111 | logger.info(f"Updated settings: use_fixed_amount={config.trading.use_fixed_amount}, " +
112 | f"fixed_amount={config.trading.trade_fixed_amount}, " +
113 | f"percentage={config.trading.trade_percentage}")
114 | except Exception as e:
115 | logger.error(f"Error processing settings update: {e}")
116 |
117 | def account_info_listener_thread():
118 | from src.utils import redis_client
119 | from src.trade_execution.alpaca_client import alpaca_client
120 | import json
121 |
122 | logger.info("Starting account info listener thread")
123 | pubsub = redis_client.get_pubsub()
124 | pubsub.subscribe('trade_execution_requests')
125 |
126 | for message in pubsub.listen():
127 | try:
128 | if message['type'] == 'message':
129 | data = json.loads(message['data'])
130 | logger.info(f"Received trade execution request: {data}")
131 |
132 | # Handle account info requests
133 | if data.get('type') == 'account_info_request':
134 | request_id = data.get('request_id')
135 | if request_id:
136 | try:
137 | # Get account summary from Alpaca
138 | account_summary = alpaca_client.get_account_summary()
139 |
140 | # Store the response in Redis with the request ID
141 | response_key = f'account_info_response:{request_id}'
142 | redis_client.set_json(response_key, account_summary, ttl=60) # 1 minute TTL
143 | logger.info(f"Sent account info response for request {request_id}")
144 | except Exception as e:
145 | logger.error(f"Error getting account info: {e}")
146 | # Store error response
147 | response_key = f'account_info_response:{request_id}'
148 | error_response = {
149 | 'error': str(e),
150 | 'status': 'error'
151 | }
152 | redis_client.set_json(response_key, error_response, ttl=60)
153 | except Exception as e:
154 | logger.error(f"Error processing trade execution request: {e}")
155 |
156 | # Start settings listener in a daemon thread
157 | settings_thread = threading.Thread(target=settings_listener_thread, daemon=True)
158 | settings_thread.start()
159 | logger.info("Settings listener thread started")
160 |
161 | # Start account info listener in a daemon thread
162 | account_thread = threading.Thread(target=account_info_listener_thread, daemon=True)
163 | account_thread.start()
164 | logger.info("Account info listener thread started")
165 |
166 | return settings_thread, account_thread
167 |
168 | # Entry point for running as a standalone module
169 | def run_standalone():
170 | import time
171 | import json
172 | import uuid
173 | logger.info("Starting Trade Execution Service as standalone")
174 |
175 | # Create the service for standalone use
176 | service = TradeExecutionService()
177 |
178 | # Start all listeners
179 | settings_thread, account_thread = start_listeners()
180 |
181 | # Keep the main thread alive and actively check for signals
182 | try:
183 | logger.info("Starting active poll for trade signals")
184 | poll_interval = 5 # seconds
185 | running = True
186 | iteration = 0
187 |
188 | while running:
189 | try:
190 | iteration += 1
191 | if iteration % 4 == 0: # Log less frequently
192 | logger.info(f"Polling for trade signals (iteration {iteration})")
193 |
194 | # Get all signal keys from Redis
195 | from src.utils import redis_client
196 | from src.utils import TradeSignal, TradingDecision
197 | from src.config import config
198 |
199 | # Process each trading symbol
200 | for symbol in config.trading.symbols:
201 | signal_key = f"signal:{symbol}"
202 | try:
203 | # Get the signal data
204 | signal_data = redis_client.get_json(signal_key)
205 | if signal_data:
206 | if iteration % 4 == 0: # Log less frequently
207 | logger.info(f"Found signal for {symbol}: {json.dumps(signal_data)}")
208 |
209 | # Create a trade signal object
210 | try:
211 | trade_signal = TradeSignal(**signal_data)
212 |
213 | # Only execute if this is a BUY or SELL (not HOLD)
214 | if trade_signal.decision.value != "hold":
215 | # Check if trading is enabled from Redis (not config)
216 | trading_enabled_redis = redis_client.client.get("trading_enabled")
217 | trading_enabled = trading_enabled_redis == "true" if trading_enabled_redis is not None else False
218 | if not trading_enabled:
219 | logger.info(f"Trading is disabled. Not executing {trade_signal.decision.value} for {symbol}")
220 |
221 | # SIMPLE SERVICE APPROACH:
222 | # Create standard service messages that look like any other trade result
223 | from src.utils import TradeResult
224 | import uuid
225 |
226 | # Create a standard skipped trade result
227 | result = TradeResult(
228 | symbol=symbol,
229 | decision=trade_signal.decision,
230 | order_id=f"skipped-disabled-{uuid.uuid4()}",
231 | quantity=None,
232 | price=None,
233 | status="skipped",
234 | error="Trading is currently disabled",
235 | timestamp=datetime.now()
236 | )
237 |
238 | # Save to Redis - UI will process this like any other message
239 | redis_key = f"trade_result:{symbol}"
240 | success = redis_client.set_json(redis_key, result.dict(), ttl=3600)
241 | logger.info(f"Created standard disabled service message for {symbol}")
242 |
243 | # Push an immediate keyspace notification to refresh UI
244 | redis_client.client.publish('__keyspace@0__:' + redis_key, 'set')
245 | logger.info(f"Sent keyspace notification for immediate refresh: {redis_key}")
246 | continue
247 |
248 | # Find most recent result
249 | result_key = f"trade_result:{symbol}"
250 | recent_result = redis_client.get_json(result_key)
251 |
252 | # Only execute if we don't have a recent result or the signal is newer
253 | should_execute = True
254 | if recent_result and 'timestamp' in recent_result and 'timestamp' in signal_data:
255 | signal_time = signal_data['timestamp']
256 | result_time = recent_result['timestamp']
257 | # If signal is older than result, don't execute again
258 | if signal_time <= result_time:
259 | should_execute = False
260 | if iteration % 20 == 0: # Log very infrequently
261 | logger.info(f"Signal for {symbol} is not newer than last result, skipping")
262 |
263 | if should_execute:
264 | # Force debug mode execution
265 | import os
266 | os.environ["ALPACA_DEBUG_MODE"] = "true"
267 |
268 | logger.info(f"Executing trade for {symbol}: {trade_signal.decision.value}")
269 | # Import alpaca_client here to avoid circular imports
270 | from src.trade_execution.alpaca_client import alpaca_client
271 | result = alpaca_client.execute_trade(trade_signal)
272 |
273 | if result:
274 | logger.info(f"Trade result for {symbol}: {result.status}")
275 |
276 | # Ensure order_id is always a string and all required fields are present
277 | if not result:
278 | logger.warning(f"No result returned from trade execution for {symbol}")
279 | continue
280 |
281 | if result.order_id is None:
282 | result.order_id = f"unknown-{uuid.uuid4()}"
283 |
284 | # Ensure all fields are valid
285 | if not isinstance(result.order_id, str):
286 | result.order_id = str(result.order_id)
287 |
288 | # Save to Redis
289 | redis_key = f"trade_result:{symbol}"
290 | success = redis_client.set_json(redis_key, result.dict(), ttl=86400)
291 | logger.info(f"Saved trade result to Redis key: {redis_key}: {success}")
292 | except Exception as signal_error:
293 | logger.error(f"Error processing signal for {symbol}: {signal_error}")
294 | except Exception as e:
295 | logger.error(f"Error checking signal for {symbol}: {e}")
296 |
297 | # Sleep between polling cycles
298 | time.sleep(poll_interval)
299 |
300 | except Exception as e:
301 | logger.error(f"Error in polling loop: {e}")
302 | time.sleep(poll_interval)
303 | except KeyboardInterrupt:
304 | logger.info("Shutting down Trade Execution Service")
305 |
306 | # Add this for module execution
307 | if __name__ == "__main__":
308 | run_standalone()
--------------------------------------------------------------------------------
/src/utils/__init__.py:
--------------------------------------------------------------------------------
1 | from .logger import get_logger
2 | from .redis_client import redis_client
3 | from .models import RSIData, TradeSignal, TradeResult, TradingDecision, PriceCandle, PriceHistory, MarketStatus
4 | from .force_disabled import force_trading_disabled
5 |
6 | __all__ = [
7 | "get_logger",
8 | "redis_client",
9 | "RSIData",
10 | "TradeSignal",
11 | "TradeResult",
12 | "TradingDecision",
13 | "PriceCandle",
14 | "PriceHistory",
15 | "MarketStatus",
16 | "force_trading_disabled"
17 | ]
--------------------------------------------------------------------------------
/src/utils/force_disabled.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | import time
3 | import redis
4 | import os
5 |
6 | # Import version from setup.py
7 | import sys
8 | sys.path.insert(0, '..')
9 | from setup import VERSION
10 |
11 | if len(sys.argv) > 1 and sys.argv[1] == '--license':
12 | print(f"TraderMagic v{VERSION} - Licensed under AGPL-3.0")
13 | sys.exit(0)
14 |
15 | def force_trading_disabled():
16 | """Force trading to be disabled at startup regardless of config settings"""
17 | print("SAFETY: Forcing trading to DISABLED state")
18 |
19 | # Connect to Redis
20 | redis_host = os.getenv('REDIS_HOST', 'redis')
21 | redis_port = int(os.getenv('REDIS_PORT', 6379))
22 | redis_client = redis.Redis(host=redis_host, port=redis_port, db=0, decode_responses=True)
23 |
24 | # Set trading_enabled to false
25 | for attempt in range(5):
26 | try:
27 | redis_client.set("trading_enabled", "false")
28 | print("Successfully set trading_enabled to false in Redis")
29 | return True
30 | except Exception as e:
31 | print(f"Attempt {attempt+1} failed: {e}")
32 | time.sleep(2)
33 |
34 | print("CRITICAL: Failed to disable trading in Redis after multiple attempts")
35 | return False
36 |
37 | if __name__ == "__main__":
38 | force_trading_disabled()
39 |
--------------------------------------------------------------------------------
/src/utils/logger.py:
--------------------------------------------------------------------------------
1 | import sys
2 | import os
3 | from loguru import logger
4 |
5 | # Configure loguru logger
6 | log_level = os.getenv("LOG_LEVEL", "INFO")
7 | log_to_console = os.getenv("LOG_TO_CONSOLE", "true").lower() == "true"
8 | service_name = os.getenv("SERVICE_NAME", "unknown")
9 |
10 | # Format for console output (colorized, detailed)
11 | console_format = "{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[name]} :{function} :{line} - {message} "
12 |
13 | # Format for docker logs (structured, syslog-compatible)
14 | syslog_format = "{time:YYYY-MM-DD HH:mm:ss.SSS} | {level} | {extra[name]} | {message}"
15 |
16 | # Remove default logger
17 | logger.remove()
18 |
19 | # Add console logger if enabled
20 | if log_to_console:
21 | logger.add(
22 | sys.stderr,
23 | format=console_format,
24 | level=log_level,
25 | colorize=True,
26 | backtrace=True,
27 | diagnose=True,
28 | )
29 |
30 | # Add structured logger to stderr for Docker to capture
31 | logger.add(
32 | sys.stdout,
33 | format=syslog_format,
34 | level=log_level,
35 | backtrace=False,
36 | diagnose=False,
37 | )
38 |
39 | def get_logger(name):
40 | """
41 | Returns a logger instance with the given name, prefixed with the service name.
42 | """
43 | prefixed_name = f"{service_name}.{name}" if service_name != "unknown" else name
44 | return logger.bind(name=prefixed_name)
--------------------------------------------------------------------------------
/src/utils/models.py:
--------------------------------------------------------------------------------
1 | from enum import Enum
2 | from typing import Optional, List
3 | from pydantic import BaseModel, Field
4 | from datetime import datetime
5 |
6 | class TradingDecision(str, Enum):
7 | BUY = "buy"
8 | SELL = "sell"
9 | HOLD = "hold"
10 |
11 | class MarketStatus(str, Enum):
12 | OPEN = "open"
13 | CLOSED = "closed"
14 | PRE_MARKET = "pre_market"
15 | AFTER_HOURS = "after_hours"
16 |
17 | class RSIData(BaseModel):
18 | symbol: str
19 | value: float
20 | timestamp: datetime = Field(default_factory=datetime.now)
21 |
22 | class PriceCandle(BaseModel):
23 | symbol: str
24 | open: float
25 | high: float
26 | low: float
27 | close: float
28 | volume: float
29 | timestamp: datetime
30 | market_status: Optional[MarketStatus] = MarketStatus.OPEN
31 |
32 | class PriceHistory(BaseModel):
33 | symbol: str
34 | interval: str
35 | candles: List[PriceCandle]
36 | timestamp: datetime = Field(default_factory=datetime.now)
37 |
38 | class TradeSignal(BaseModel):
39 | symbol: str
40 | decision: TradingDecision
41 | confidence: Optional[float] = None
42 | rsi_value: float
43 | timestamp: datetime = Field(default_factory=datetime.now)
44 |
45 | class TradeResult(BaseModel):
46 | symbol: str
47 | decision: TradingDecision
48 | order_id: str # Non-optional to enforce consistent order IDs
49 | quantity: Optional[float] = None
50 | price: Optional[float] = None
51 | status: str = "unknown" # Default value to prevent validation errors
52 | error: Optional[str] = None
53 | timestamp: datetime = Field(default_factory=datetime.now)
54 |
55 | class Config:
56 | # Add extra validation to ensure order_id is always a string
57 | validate_assignment = True
58 | # Allow for extra fields that might be provided
59 | extra = "ignore"
--------------------------------------------------------------------------------
/src/utils/redis_client.py:
--------------------------------------------------------------------------------
1 | import json
2 | import redis
3 | from typing import Any, Dict, Optional
4 | from datetime import datetime
5 | from src.config import config
6 | from src.utils.logger import get_logger
7 |
8 | logger = get_logger("redis_client")
9 |
10 | class DateTimeEncoder(json.JSONEncoder):
11 | """Custom JSON encoder that handles datetime objects"""
12 | def default(self, obj):
13 | if isinstance(obj, datetime):
14 | return obj.isoformat()
15 | return super().default(obj)
16 |
17 | class RedisClient:
18 | def __init__(self):
19 | self.client = redis.Redis(
20 | host=config.redis.host,
21 | port=config.redis.port,
22 | db=config.redis.db,
23 | decode_responses=True
24 | )
25 | try:
26 | self.client.ping()
27 | logger.info("Connected to Redis")
28 |
29 | # Enable keyspace notifications for all events
30 | try:
31 | self.client.config_set('notify-keyspace-events', 'KEA')
32 | logger.info("Redis keyspace notifications enabled")
33 | except Exception as e:
34 | logger.warning(f"Failed to enable Redis keyspace notifications: {e}")
35 |
36 | except redis.ConnectionError:
37 | logger.error("Failed to connect to Redis")
38 | raise
39 |
40 | def scan_iter(self, match=None):
41 | """Return an iterator of keys matching the given pattern"""
42 | try:
43 | for key in self.client.scan_iter(match=match):
44 | yield key
45 | except Exception as e:
46 | logger.error(f"Error scanning Redis keys: {e}")
47 | return []
48 |
49 | def get(self, key: str) -> Optional[str]:
50 | """
51 | Get a string value from Redis
52 | """
53 | try:
54 | return self.client.get(key)
55 | except Exception as e:
56 | logger.error(f"Error getting key {key} from Redis: {e}")
57 | return None
58 |
59 | def set(self, key: str, value: str, ttl: Optional[int] = None) -> bool:
60 | """
61 | Set a string value in Redis
62 | """
63 | try:
64 | result = self.client.set(key, value)
65 | if ttl:
66 | self.client.expire(key, ttl)
67 | return result
68 | except Exception as e:
69 | logger.error(f"Error setting key {key} in Redis: {e}")
70 | return False
71 |
72 | def set_json(self, key: str, data: Dict[str, Any], ttl: Optional[int] = None) -> bool:
73 | """
74 | Store JSON data in Redis
75 | """
76 | try:
77 | # Use the custom encoder to handle datetime objects
78 | serialized = json.dumps(data, cls=DateTimeEncoder)
79 | result = self.client.set(key, serialized)
80 | if ttl:
81 | self.client.expire(key, ttl)
82 | return result
83 | except Exception as e:
84 | logger.error(f"Error storing data in Redis: {e}")
85 | return False
86 |
87 | def get_json(self, key: str) -> Optional[Dict[str, Any]]:
88 | """
89 | Retrieve JSON data from Redis
90 | """
91 | try:
92 | data = self.client.get(key)
93 | if data:
94 | return json.loads(data)
95 | return None
96 | except Exception as e:
97 | logger.error(f"Error retrieving data from Redis: {e}")
98 | return None
99 |
100 | def delete(self, key: str) -> bool:
101 | """
102 | Delete a key from Redis
103 | """
104 | try:
105 | return bool(self.client.delete(key))
106 | except Exception as e:
107 | logger.error(f"Error deleting key from Redis: {e}")
108 | return False
109 |
110 | def get_pubsub(self):
111 | """
112 | Get a Redis PubSub object for subscribing to channels
113 | """
114 | try:
115 | return self.client.pubsub()
116 | except Exception as e:
117 | logger.error(f"Error creating Redis PubSub object: {e}")
118 | raise
119 |
120 | def publish(self, channel: str, message: str) -> int:
121 | """
122 | Publish a message to a Redis channel
123 | """
124 | try:
125 | return self.client.publish(channel, message)
126 | except Exception as e:
127 | logger.error(f"Error publishing to Redis channel {channel}: {e}")
128 | return 0
129 |
130 | # Singleton instance
131 | redis_client = RedisClient()
--------------------------------------------------------------------------------
/test_alpaca.py:
--------------------------------------------------------------------------------
1 | """
2 | Test script to diagnose Alpaca API connection issues.
3 | """
4 | import os
5 | import requests
6 | from dotenv import load_dotenv
7 |
8 | # Load environment variables
9 | load_dotenv()
10 |
11 | # Print environment variables (mask sensitive parts)
12 | api_key = os.getenv("APCA_API_KEY_ID", "")
13 | api_secret = os.getenv("APCA_API_SECRET_KEY", "")
14 | print(f"APCA_API_KEY_ID: {api_key[:4]}...{api_key[-4:] if len(api_key) > 8 else ''}")
15 | print(f"APCA_API_SECRET_KEY: {api_secret[:4]}...{api_secret[-4:] if len(api_secret) > 8 else ''}")
16 |
17 | # Try direct REST API call
18 | base_url = "https://paper-api.alpaca.markets"
19 | endpoint = f"{base_url}/v2/account"
20 | headers = {
21 | "APCA-API-KEY-ID": api_key,
22 | "APCA-API-SECRET-KEY": api_secret
23 | }
24 |
25 | print(f"\nTesting direct REST API call to: {endpoint}")
26 | try:
27 | response = requests.get(endpoint, headers=headers)
28 | if response.status_code == 200:
29 | account = response.json()
30 | print(f"SUCCESS! Account ID: {account.get('id')}")
31 | print(f"Account status: {account.get('status')}")
32 | print(f"Buying power: ${float(account.get('buying_power', 0)):.2f}")
33 | else:
34 | print(f"ERROR! Status code: {response.status_code}")
35 | print(f"Response: {response.text}")
36 | except Exception as e:
37 | print(f"Exception: {e}")
38 |
39 | # Test with alternative API URL
40 | base_url = "https://api.alpaca.markets"
41 | endpoint = f"{base_url}/v2/account"
42 | print(f"\nTesting with alternative URL: {endpoint}")
43 | try:
44 | response = requests.get(endpoint, headers=headers)
45 | if response.status_code == 200:
46 | account = response.json()
47 | print(f"SUCCESS! Account ID: {account.get('id')}")
48 | print(f"Account status: {account.get('status')}")
49 | print(f"Buying power: ${float(account.get('buying_power', 0)):.2f}")
50 | else:
51 | print(f"ERROR! Status code: {response.status_code}")
52 | print(f"Response: {response.text}")
53 | except Exception as e:
54 | print(f"Exception: {e}")
--------------------------------------------------------------------------------
/tests/__init__.py:
--------------------------------------------------------------------------------
1 | # Test package initialization
--------------------------------------------------------------------------------
/tests/test_ai_decision.py:
--------------------------------------------------------------------------------
1 | import unittest
2 | import asyncio
3 | from unittest.mock import patch, MagicMock, AsyncMock
4 | import os
5 | import sys
6 | from datetime import datetime
7 |
8 | # Add the src directory to the path so we can import our modules
9 | sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
10 |
11 | from src.ai_decision.service import AIDecisionService
12 | from src.utils.models import RSIData, TradeSignal, TradingDecision
13 |
14 |
15 | class TestAIDecisionService(unittest.TestCase):
16 |
17 | def setUp(self):
18 | # Create a test instance of the service
19 | self.service = AIDecisionService()
20 |
21 | # Create a test RSI data object
22 | self.rsi_data = RSIData(
23 | symbol="BTC/USD",
24 | value=25.5, # Low RSI value that should trigger a buy
25 | timestamp=datetime.now()
26 | )
27 |
28 | @patch('src.ai_decision.ollama_client.ollama_client.generate')
29 | def test_analyze_rsi_buy(self, mock_generate):
30 | # Mock the ollama_client.generate method
31 | mock_generate.return_value = asyncio.Future()
32 | mock_generate.return_value.set_result("Based on the RSI value of 25.5, I recommend to buy.")
33 |
34 | # Call the method
35 | result = asyncio.run(self.service.analyze_rsi(self.rsi_data))
36 |
37 | # Verify the response
38 | self.assertIsInstance(result, TradeSignal)
39 | self.assertEqual(result.symbol, "BTC/USD")
40 | self.assertEqual(result.decision, TradingDecision.BUY)
41 | self.assertEqual(result.rsi_value, 25.5)
42 |
43 | # Verify ollama_client.generate was called with the correct arguments
44 | mock_generate.assert_called_once()
45 |
46 | @patch('src.ai_decision.ollama_client.ollama_client.generate')
47 | def test_analyze_rsi_sell(self, mock_generate):
48 | # Mock the ollama_client.generate method to return a sell decision
49 | mock_generate.return_value = asyncio.Future()
50 | mock_generate.return_value.set_result("sell")
51 |
52 | # Set RSI value to indicate overbought condition
53 | self.rsi_data.value = 75.5
54 |
55 | # Call the method
56 | result = asyncio.run(self.service.analyze_rsi(self.rsi_data))
57 |
58 | # Verify the response
59 | self.assertEqual(result.decision, TradingDecision.SELL)
60 |
61 | @patch('src.ai_decision.ollama_client.ollama_client.generate')
62 | def test_analyze_rsi_hold(self, mock_generate):
63 | # Mock the ollama_client.generate method to return a hold decision
64 | mock_generate.return_value = asyncio.Future()
65 | mock_generate.return_value.set_result("Based on the RSI value, I would hold.")
66 |
67 | # Set RSI value to neutral range
68 | self.rsi_data.value = 50.0
69 |
70 | # Call the method
71 | result = asyncio.run(self.service.analyze_rsi(self.rsi_data))
72 |
73 | # Verify the response
74 | self.assertEqual(result.decision, TradingDecision.HOLD)
75 |
76 | @patch('src.ai_decision.ollama_client.ollama_client.generate')
77 | def test_analyze_rsi_error(self, mock_generate):
78 | # Mock the ollama_client.generate method to raise an exception
79 | mock_generate.side_effect = Exception("API Error")
80 |
81 | # Call the method
82 | result = asyncio.run(self.service.analyze_rsi(self.rsi_data))
83 |
84 | # Verify the response is None
85 | self.assertIsNone(result)
86 |
87 | if __name__ == '__main__':
88 | unittest.main()
--------------------------------------------------------------------------------
/tests/test_rsi_client.py:
--------------------------------------------------------------------------------
1 | import unittest
2 | from unittest.mock import patch, MagicMock
3 | import json
4 | import os
5 | import sys
6 |
7 | # Add the src directory to the path so we can import our modules
8 | sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
9 |
10 | from src.data_retrieval.taapi_client import TaapiClient
11 | from src.utils.models import RSIData
12 |
13 | class TestTaapiClient(unittest.TestCase):
14 |
15 | @patch('requests.get')
16 | def test_get_rsi_success(self, mock_get):
17 | # Mock the response
18 | mock_response = MagicMock()
19 | mock_response.json.return_value = {"value": 42.5}
20 | mock_response.raise_for_status.return_value = None
21 | mock_get.return_value = mock_response
22 |
23 | # Create client with test API key
24 | client = TaapiClient()
25 | client.api_key = "test_api_key"
26 |
27 | # Call the method
28 | result = client.get_rsi("BTC/USD")
29 |
30 | # Verify the response
31 | self.assertIsInstance(result, RSIData)
32 | self.assertEqual(result.symbol, "BTC/USD")
33 | self.assertEqual(result.value, 42.5)
34 |
35 | # Verify the API was called with correct parameters
36 | mock_get.assert_called_once()
37 | args, kwargs = mock_get.call_args
38 | self.assertEqual(args[0], "https://api.taapi.io/rsi")
39 | self.assertEqual(kwargs['params']['symbol'], "BTC/USD")
40 | self.assertEqual(kwargs['params']['secret'], "test_api_key")
41 |
42 | @patch('requests.get')
43 | def test_get_rsi_error(self, mock_get):
44 | # Mock the response to raise an exception
45 | mock_get.side_effect = Exception("API Error")
46 |
47 | # Create client with test API key
48 | client = TaapiClient()
49 | client.api_key = "test_api_key"
50 |
51 | # Call the method and expect an exception
52 | with self.assertRaises(Exception):
53 | client.get_rsi("BTC/USD")
54 |
55 | if __name__ == '__main__':
56 | unittest.main()
--------------------------------------------------------------------------------