├── cli ├── __init__.py ├── models.py ├── static │ └── welcome.txt └── utils.py ├── requirements_vercel.txt ├── assets ├── risk.png ├── analyst.png ├── schema.png ├── trader.png ├── wechat.png ├── Dashboard.png ├── dark_index.png ├── researcher.png ├── cli │ ├── cli_init.png │ ├── cli_news.png │ ├── cli_technical.png │ └── cli_transaction.png ├── light_index.png └── TauricResearch.png ├── trigger-config.yaml ├── requirements.txt ├── tradingagents ├── graph │ ├── __init__.py │ ├── signal_processing.py │ ├── propagation.py │ ├── conditional_logic.py │ ├── reflection.py │ ├── setup.py │ └── trading_graph.py ├── default_config.py ├── dataflows │ ├── config.py │ ├── utils.py │ ├── finnhub_utils.py │ ├── __init__.py │ ├── stockstats_utils.py │ ├── googlenews_utils.py │ ├── reddit_utils.py │ └── yfin_utils.py └── agents │ ├── __init__.py │ ├── trader │ └── trader.py │ ├── managers │ ├── research_manager.py │ └── risk_manager.py │ ├── utils │ ├── agent_states.py │ └── memory.py │ ├── researchers │ ├── bull_researcher.py │ └── bear_researcher.py │ ├── analysts │ ├── social_media_analyst.py │ ├── news_analyst.py │ ├── fundamentals_analyst.py │ └── market_analyst.py │ └── risk_mgmt │ ├── neutral_debator.py │ ├── conservative_debator.py │ └── aggresive_debator.py ├── requirements_web.txt ├── .vercelignore ├── .dockerignore ├── main.py ├── Dockerfile ├── run_web.py ├── cloudbuild.yaml ├── api └── index.py ├── .gitignore ├── VERCEL_TEST.md ├── CRYPTO_MODIFICATIONS.md ├── VERCEL_DEPLOYMENT.md ├── test_crypto.py ├── web_app_vercel.py ├── simple_web.py ├── templates └── simple_index.html ├── LICENSE └── README.md /cli/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /requirements_vercel.txt: -------------------------------------------------------------------------------- 1 | Flask==3.0.0 2 | requests==2.31.0 -------------------------------------------------------------------------------- /assets/risk.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/0x0funky/TradingAgents-crypto/HEAD/assets/risk.png -------------------------------------------------------------------------------- /assets/analyst.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/0x0funky/TradingAgents-crypto/HEAD/assets/analyst.png -------------------------------------------------------------------------------- /assets/schema.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/0x0funky/TradingAgents-crypto/HEAD/assets/schema.png -------------------------------------------------------------------------------- /assets/trader.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/0x0funky/TradingAgents-crypto/HEAD/assets/trader.png -------------------------------------------------------------------------------- /assets/wechat.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/0x0funky/TradingAgents-crypto/HEAD/assets/wechat.png -------------------------------------------------------------------------------- /assets/Dashboard.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/0x0funky/TradingAgents-crypto/HEAD/assets/Dashboard.png -------------------------------------------------------------------------------- /assets/dark_index.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/0x0funky/TradingAgents-crypto/HEAD/assets/dark_index.png -------------------------------------------------------------------------------- /assets/researcher.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/0x0funky/TradingAgents-crypto/HEAD/assets/researcher.png -------------------------------------------------------------------------------- /assets/cli/cli_init.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/0x0funky/TradingAgents-crypto/HEAD/assets/cli/cli_init.png -------------------------------------------------------------------------------- /assets/cli/cli_news.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/0x0funky/TradingAgents-crypto/HEAD/assets/cli/cli_news.png -------------------------------------------------------------------------------- /assets/light_index.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/0x0funky/TradingAgents-crypto/HEAD/assets/light_index.png -------------------------------------------------------------------------------- /assets/TauricResearch.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/0x0funky/TradingAgents-crypto/HEAD/assets/TauricResearch.png -------------------------------------------------------------------------------- /assets/cli/cli_technical.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/0x0funky/TradingAgents-crypto/HEAD/assets/cli/cli_technical.png -------------------------------------------------------------------------------- /assets/cli/cli_transaction.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/0x0funky/TradingAgents-crypto/HEAD/assets/cli/cli_transaction.png -------------------------------------------------------------------------------- /trigger-config.yaml: -------------------------------------------------------------------------------- 1 | name: github-tradingagents-crypto-trigger 2 | description: "Auto-deploy TradingAgents Crypto on push to main" 3 | github: 4 | owner: 0x0funky 5 | name: TradingAgents-crypto 6 | push: 7 | branch: "^main$" 8 | filename: cloudbuild.yaml 9 | -------------------------------------------------------------------------------- /cli/models.py: -------------------------------------------------------------------------------- 1 | from enum import Enum 2 | from typing import List, Optional, Dict 3 | from pydantic import BaseModel 4 | 5 | 6 | class AnalystType(str, Enum): 7 | MARKET = "market" 8 | SOCIAL = "social" 9 | NEWS = "news" 10 | FUNDAMENTALS = "fundamentals" 11 | -------------------------------------------------------------------------------- /cli/static/welcome.txt: -------------------------------------------------------------------------------- 1 | 2 | ______ ___ ___ __ 3 | /_ __/________ _____/ (_)___ ____ _/ | ____ ____ ____ / /______ 4 | / / / ___/ __ `/ __ / / __ \/ __ `/ /| |/ __ `/ _ \/ __ \/ __/ ___/ 5 | / / / / / /_/ / /_/ / / / / / /_/ / ___ / /_/ / __/ / / / /_(__ ) 6 | /_/ /_/ \__,_/\__,_/_/_/ /_/\__, /_/ |_\__, /\___/_/ /_/\__/____/ 7 | /____/ /____/ 8 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | typing-extensions 2 | langchain-openai 3 | langchain-experimental 4 | pandas 5 | yfinance 6 | praw 7 | feedparser 8 | stockstats 9 | eodhd 10 | langgraph 11 | chromadb 12 | setuptools 13 | backtrader 14 | akshare 15 | tushare 16 | finnhub-python 17 | parsel 18 | requests 19 | tqdm 20 | pytz 21 | redis 22 | chainlit 23 | rich 24 | questionary 25 | langchain_anthropic 26 | langchain-google-genai 27 | Flask==2.3.3 28 | Flask-SocketIO==5.3.6 29 | python-socketio==5.8.0 30 | python-engineio==4.7.1 31 | -------------------------------------------------------------------------------- /tradingagents/graph/__init__.py: -------------------------------------------------------------------------------- 1 | # TradingAgents/graph/__init__.py 2 | 3 | from .trading_graph import TradingAgentsGraph 4 | from .conditional_logic import ConditionalLogic 5 | from .setup import GraphSetup 6 | from .propagation import Propagator 7 | from .reflection import Reflector 8 | from .signal_processing import SignalProcessor 9 | 10 | __all__ = [ 11 | "TradingAgentsGraph", 12 | "ConditionalLogic", 13 | "GraphSetup", 14 | "Propagator", 15 | "Reflector", 16 | "SignalProcessor", 17 | ] 18 | -------------------------------------------------------------------------------- /requirements_web.txt: -------------------------------------------------------------------------------- 1 | # Flask Web Framework 2 | Flask==2.3.3 3 | Flask-SocketIO==5.3.6 4 | python-socketio==5.8.0 5 | python-engineio==4.7.1 6 | 7 | # LangChain and LLM Dependencies 8 | langchain-openai 9 | langchain-experimental 10 | langchain_anthropic 11 | langchain-google-genai 12 | langgraph 13 | 14 | # Data Analysis and Finance 15 | pandas 16 | yfinance 17 | stockstats 18 | pytz 19 | 20 | # Vector Database 21 | chromadb 22 | 23 | # Web Scraping and Data Sources 24 | requests 25 | feedparser 26 | praw 27 | parsel 28 | tqdm 29 | finnhub-python 30 | 31 | # Crypto-specific data sources 32 | eodhd 33 | 34 | # Additional utilities 35 | typing-extensions 36 | setuptools 37 | rich -------------------------------------------------------------------------------- /tradingagents/default_config.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | DEFAULT_CONFIG = { 4 | "project_dir": os.path.abspath(os.path.join(os.path.dirname(__file__), ".")), 5 | "results_dir": os.getenv("TRADINGAGENTS_RESULTS_DIR", "./results"), 6 | "data_dir": os.getenv("TRADINGAGENTS_DATA_DIR", "./data"), 7 | "data_cache_dir": os.path.join( 8 | os.path.abspath(os.path.join(os.path.dirname(__file__), ".")), 9 | "dataflows/data_cache", 10 | ), 11 | # LLM settings 12 | "llm_provider": "openai", 13 | "deep_think_llm": "o4-mini", 14 | "quick_think_llm": "gpt-4o-mini", 15 | "backend_url": "https://api.openai.com/v1", 16 | # Debate and discussion settings 17 | "max_debate_rounds": 1, 18 | "max_risk_discuss_rounds": 1, 19 | "max_recur_limit": 100, 20 | # Tool settings 21 | "online_tools": True, 22 | } 23 | -------------------------------------------------------------------------------- /.vercelignore: -------------------------------------------------------------------------------- 1 | # Python 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | *.so 6 | .Python 7 | env/ 8 | venv/ 9 | ENV/ 10 | env.bak/ 11 | venv.bak/ 12 | 13 | # Results and cache 14 | results/ 15 | eval_results/ 16 | data_cache/ 17 | *.log 18 | 19 | # IDE 20 | .vscode/ 21 | .idea/ 22 | *.swp 23 | *.swo 24 | 25 | # OS 26 | .DS_Store 27 | Thumbs.db 28 | 29 | # Local development 30 | .env 31 | .env.local 32 | .env.development.local 33 | .env.test.local 34 | .env.production.local 35 | 36 | # Dependencies (use requirements_vercel.txt instead) 37 | requirements.txt 38 | requirements_web.txt 39 | 40 | # Documentation 41 | WEB_INTERFACE_README.md 42 | WEB_UPDATES_SUMMARY.md 43 | CRYPTO_MODIFICATIONS.md 44 | 45 | # CLI application 46 | cli/ 47 | main.py 48 | test_crypto.py 49 | 50 | # Original web app with SocketIO 51 | web_app.py 52 | simple_web.py 53 | run_web.py -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | # Git 2 | .git 3 | .gitignore 4 | 5 | # Documentation 6 | *.md 7 | README.md 8 | docs/ 9 | 10 | # Python 11 | __pycache__/ 12 | *.py[cod] 13 | *$py.class 14 | *.so 15 | .Python 16 | env/ 17 | build/ 18 | develop-eggs/ 19 | dist/ 20 | downloads/ 21 | eggs/ 22 | .eggs/ 23 | lib/ 24 | lib64/ 25 | parts/ 26 | sdist/ 27 | var/ 28 | wheels/ 29 | *.egg-info/ 30 | .installed.cfg 31 | *.egg 32 | 33 | # Virtual environments 34 | venv/ 35 | ENV/ 36 | env/ 37 | .venv/ 38 | 39 | # IDE 40 | .vscode/ 41 | .idea/ 42 | *.swp 43 | *.swo 44 | *~ 45 | 46 | # OS 47 | .DS_Store 48 | Thumbs.db 49 | 50 | # Logs 51 | *.log 52 | logs/*.log 53 | 54 | # Testing 55 | .pytest_cache/ 56 | .coverage 57 | htmlcov/ 58 | .tox/ 59 | 60 | # CLI assets (not needed for web deployment) 61 | assets/cli/ 62 | cli/ 63 | 64 | # Development files 65 | requirements_full.txt 66 | 67 | # Cache directories 68 | .cache/ 69 | node_modules/ -------------------------------------------------------------------------------- /main.py: -------------------------------------------------------------------------------- 1 | from tradingagents.graph.trading_graph import TradingAgentsGraph 2 | from tradingagents.default_config import DEFAULT_CONFIG 3 | 4 | # Create a custom config 5 | # config = DEFAULT_CONFIG.copy() 6 | # config["llm_provider"] = "google" # Use a different model 7 | # config["backend_url"] = "https://generativelanguage.googleapis.com/v1" # Use a different backend 8 | # config["deep_think_llm"] = "gemini-2.0-flash" # Use a different model 9 | # config["quick_think_llm"] = "gemini-2.0-flash" # Use a different model 10 | # config["max_debate_rounds"] = 1 # Increase debate rounds 11 | # config["online_tools"] = True # Increase debate rounds 12 | 13 | # Initialize with custom config 14 | ta = TradingAgentsGraph(debug=True) 15 | 16 | # forward propagate with cryptocurrency 17 | _, decision = ta.propagate("BTC", "2024-05-10") 18 | print(decision) 19 | 20 | # Memorize mistakes and reflect 21 | # ta.reflect_and_remember(1000) # parameter is the position returns 22 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # Use the official Python runtime as the base image 2 | FROM python:3.11-slim 3 | 4 | # Set environment variables 5 | ENV PYTHONDONTWRITEBYTECODE=1 6 | ENV PYTHONUNBUFFERED=1 7 | 8 | # Set work directory 9 | WORKDIR /app 10 | 11 | # Install system dependencies 12 | RUN apt-get update && apt-get install -y \ 13 | build-essential \ 14 | curl \ 15 | software-properties-common \ 16 | git \ 17 | && rm -rf /var/lib/apt/lists/* 18 | 19 | # Copy requirements first to leverage Docker cache 20 | COPY requirements_web.txt . 21 | RUN pip install --no-cache-dir -r requirements_web.txt 22 | 23 | # Copy project files 24 | COPY . . 25 | 26 | # Create necessary directories 27 | RUN mkdir -p logs data results 28 | 29 | # Expose port 30 | EXPOSE 8080 31 | 32 | # Health check 33 | HEALTHCHECK --interval=30s --timeout=10s --start-period=30s --retries=3 \ 34 | CMD curl -f http://localhost:8080/health || exit 1 35 | 36 | # Run the application 37 | CMD ["python", "web_app.py"] -------------------------------------------------------------------------------- /tradingagents/dataflows/config.py: -------------------------------------------------------------------------------- 1 | import tradingagents.default_config as default_config 2 | from typing import Dict, Optional 3 | 4 | # Use default config but allow it to be overridden 5 | _config: Optional[Dict] = None 6 | DATA_DIR: Optional[str] = None 7 | 8 | 9 | def initialize_config(): 10 | """Initialize the configuration with default values.""" 11 | global _config, DATA_DIR 12 | if _config is None: 13 | _config = default_config.DEFAULT_CONFIG.copy() 14 | DATA_DIR = _config["data_dir"] 15 | 16 | 17 | def set_config(config: Dict): 18 | """Update the configuration with custom values.""" 19 | global _config, DATA_DIR 20 | if _config is None: 21 | _config = default_config.DEFAULT_CONFIG.copy() 22 | _config.update(config) 23 | DATA_DIR = _config["data_dir"] 24 | 25 | 26 | def get_config() -> Dict: 27 | """Get the current configuration.""" 28 | if _config is None: 29 | initialize_config() 30 | return _config.copy() 31 | 32 | 33 | # Initialize with default config 34 | initialize_config() 35 | -------------------------------------------------------------------------------- /tradingagents/dataflows/utils.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | import pandas as pd 4 | from datetime import date, timedelta, datetime 5 | from typing import Annotated 6 | 7 | SavePathType = Annotated[str, "File path to save data. If None, data is not saved."] 8 | 9 | def save_output(data: pd.DataFrame, tag: str, save_path: SavePathType = None) -> None: 10 | if save_path: 11 | data.to_csv(save_path) 12 | print(f"{tag} saved to {save_path}") 13 | 14 | 15 | def get_current_date(): 16 | return date.today().strftime("%Y-%m-%d") 17 | 18 | 19 | def decorate_all_methods(decorator): 20 | def class_decorator(cls): 21 | for attr_name, attr_value in cls.__dict__.items(): 22 | if callable(attr_value): 23 | setattr(cls, attr_name, decorator(attr_value)) 24 | return cls 25 | 26 | return class_decorator 27 | 28 | 29 | def get_next_weekday(date): 30 | 31 | if not isinstance(date, datetime): 32 | date = datetime.strptime(date, "%Y-%m-%d") 33 | 34 | if date.weekday() >= 5: 35 | days_to_add = 7 - date.weekday() 36 | next_weekday = date + timedelta(days=days_to_add) 37 | return next_weekday 38 | else: 39 | return date 40 | -------------------------------------------------------------------------------- /run_web.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """ 3 | TradingAgents Web Application Launcher 4 | """ 5 | 6 | import sys 7 | import os 8 | from pathlib import Path 9 | 10 | # Add current directory to Python path 11 | sys.path.insert(0, str(Path(__file__).parent)) 12 | 13 | try: 14 | from web_app import app, socketio 15 | print("✅ Successfully imported TradingAgents web application") 16 | except ImportError as e: 17 | print(f"❌ Failed to import required modules: {e}") 18 | print("Please make sure all dependencies are installed:") 19 | print("pip install -r requirements_web.txt") 20 | sys.exit(1) 21 | 22 | if __name__ == '__main__': 23 | print("🚀 Starting TradingAgents Web Application...") 24 | print("📊 Dashboard will be available at: http://localhost:5000") 25 | print("🔄 Real-time analysis updates via WebSocket") 26 | print("📱 Responsive design for desktop and mobile") 27 | print("=" * 50) 28 | 29 | try: 30 | socketio.run(app, debug=False, host='0.0.0.0', port=5000) 31 | except KeyboardInterrupt: 32 | print("\n👋 TradingAgents Web Application stopped by user") 33 | except Exception as e: 34 | print(f"❌ Error starting web application: {e}") 35 | sys.exit(1) -------------------------------------------------------------------------------- /tradingagents/graph/signal_processing.py: -------------------------------------------------------------------------------- 1 | # TradingAgents/graph/signal_processing.py 2 | 3 | from langchain_openai import ChatOpenAI 4 | 5 | 6 | class SignalProcessor: 7 | """Processes trading signals to extract actionable decisions.""" 8 | 9 | def __init__(self, quick_thinking_llm: ChatOpenAI): 10 | """Initialize with an LLM for processing.""" 11 | self.quick_thinking_llm = quick_thinking_llm 12 | 13 | def process_signal(self, full_signal: str) -> str: 14 | """ 15 | Process a full trading signal to extract the core decision. 16 | 17 | Args: 18 | full_signal: Complete trading signal text 19 | 20 | Returns: 21 | Extracted decision (BUY, SELL, or HOLD) 22 | """ 23 | messages = [ 24 | ( 25 | "system", 26 | "You are an efficient assistant designed to analyze paragraphs or financial reports provided by a group of analysts. Your task is to extract the investment decision: SELL, BUY, or HOLD. Provide only the extracted decision (SELL, BUY, or HOLD) as your output, without adding any additional text or information.", 27 | ), 28 | ("human", full_signal), 29 | ] 30 | 31 | return self.quick_thinking_llm.invoke(messages).content 32 | -------------------------------------------------------------------------------- /cloudbuild.yaml: -------------------------------------------------------------------------------- 1 | steps: 2 | # Build the container image 3 | - name: 'gcr.io/cloud-builders/docker' 4 | args: [ 5 | 'build', 6 | '-t', 'gcr.io/$PROJECT_ID/tradingagents-crypto:$BUILD_ID', 7 | '-t', 'gcr.io/$PROJECT_ID/tradingagents-crypto:latest', 8 | '.' 9 | ] 10 | 11 | # Push the container image to Container Registry 12 | - name: 'gcr.io/cloud-builders/docker' 13 | args: ['push', 'gcr.io/$PROJECT_ID/tradingagents-crypto:$BUILD_ID'] 14 | 15 | - name: 'gcr.io/cloud-builders/docker' 16 | args: ['push', 'gcr.io/$PROJECT_ID/tradingagents-crypto:latest'] 17 | 18 | # Deploy container image to Cloud Run 19 | - name: 'gcr.io/google.com/cloudsdktool/cloud-sdk' 20 | entrypoint: gcloud 21 | args: [ 22 | 'run', 'deploy', 'tradingagents-crypto', 23 | '--image', 'gcr.io/$PROJECT_ID/tradingagents-crypto:latest', 24 | '--region', 'us-central1', 25 | '--platform', 'managed', 26 | '--allow-unauthenticated', 27 | '--memory', '2Gi', 28 | '--cpu', '2', 29 | '--timeout', '3600', 30 | '--concurrency', '10', 31 | '--port', '8080', 32 | '--set-env-vars', 'ENVIRONMENT=production' 33 | ] 34 | 35 | images: 36 | - 'gcr.io/$PROJECT_ID/tradingagents-crypto:$BUILD_ID' 37 | - 'gcr.io/$PROJECT_ID/tradingagents-crypto:latest' 38 | 39 | timeout: '1200s' 40 | 41 | options: 42 | machineType: 'E2_HIGHCPU_8' 43 | diskSizeGb: '100' -------------------------------------------------------------------------------- /tradingagents/dataflows/finnhub_utils.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | 4 | 5 | def get_data_in_range(ticker, start_date, end_date, data_type, data_dir, period=None): 6 | """ 7 | Gets finnhub data saved and processed on disk. 8 | Args: 9 | start_date (str): Start date in YYYY-MM-DD format. 10 | end_date (str): End date in YYYY-MM-DD format. 11 | data_type (str): Type of data from finnhub to fetch. Can be insider_trans, SEC_filings, news_data, insider_senti, or fin_as_reported. 12 | data_dir (str): Directory where the data is saved. 13 | period (str): Default to none, if there is a period specified, should be annual or quarterly. 14 | """ 15 | 16 | if period: 17 | data_path = os.path.join( 18 | data_dir, 19 | "finnhub_data", 20 | data_type, 21 | f"{ticker}_{period}_data_formatted.json", 22 | ) 23 | else: 24 | data_path = os.path.join( 25 | data_dir, "finnhub_data", data_type, f"{ticker}_data_formatted.json" 26 | ) 27 | 28 | data = open(data_path, "r") 29 | data = json.load(data) 30 | 31 | # filter keys (date, str in format YYYY-MM-DD) by the date range (str, str in format YYYY-MM-DD) 32 | filtered_data = {} 33 | for key, value in data.items(): 34 | if start_date <= key <= end_date and len(value) > 0: 35 | filtered_data[key] = value 36 | return filtered_data 37 | -------------------------------------------------------------------------------- /tradingagents/dataflows/__init__.py: -------------------------------------------------------------------------------- 1 | from .finnhub_utils import get_data_in_range 2 | from .googlenews_utils import getNewsData 3 | from .yfin_utils import YFinanceUtils 4 | from .reddit_utils import fetch_top_from_category 5 | from .stockstats_utils import StockstatsUtils 6 | from .yfin_utils import YFinanceUtils 7 | 8 | from .interface import ( 9 | # News and sentiment functions 10 | get_finnhub_news, 11 | get_finnhub_company_insider_sentiment, 12 | get_finnhub_company_insider_transactions, 13 | get_google_news, 14 | get_reddit_global_news, 15 | get_reddit_company_news, 16 | # Financial statements functions 17 | get_simfin_balance_sheet, 18 | get_simfin_cashflow, 19 | get_simfin_income_statements, 20 | # Technical analysis functions 21 | get_stock_stats_indicators_window, 22 | get_stockstats_indicator, 23 | # Market data functions 24 | get_YFin_data_window, 25 | get_YFin_data, 26 | ) 27 | 28 | __all__ = [ 29 | # News and sentiment functions 30 | "get_finnhub_news", 31 | "get_finnhub_company_insider_sentiment", 32 | "get_finnhub_company_insider_transactions", 33 | "get_google_news", 34 | "get_reddit_global_news", 35 | "get_reddit_company_news", 36 | # Financial statements functions 37 | "get_simfin_balance_sheet", 38 | "get_simfin_cashflow", 39 | "get_simfin_income_statements", 40 | # Technical analysis functions 41 | "get_stock_stats_indicators_window", 42 | "get_stockstats_indicator", 43 | # Market data functions 44 | "get_YFin_data_window", 45 | "get_YFin_data", 46 | ] 47 | -------------------------------------------------------------------------------- /tradingagents/agents/__init__.py: -------------------------------------------------------------------------------- 1 | from .utils.agent_utils import Toolkit, create_msg_delete 2 | from .utils.agent_states import AgentState, InvestDebateState, RiskDebateState 3 | from .utils.memory import FinancialSituationMemory 4 | 5 | from .analysts.fundamentals_analyst import create_fundamentals_analyst 6 | from .analysts.market_analyst import create_market_analyst 7 | from .analysts.news_analyst import create_news_analyst 8 | from .analysts.social_media_analyst import create_social_media_analyst 9 | 10 | from .researchers.bear_researcher import create_bear_researcher 11 | from .researchers.bull_researcher import create_bull_researcher 12 | 13 | from .risk_mgmt.aggresive_debator import create_risky_debator 14 | from .risk_mgmt.conservative_debator import create_safe_debator 15 | from .risk_mgmt.neutral_debator import create_neutral_debator 16 | 17 | from .managers.research_manager import create_research_manager 18 | from .managers.risk_manager import create_risk_manager 19 | 20 | from .trader.trader import create_trader 21 | 22 | __all__ = [ 23 | "FinancialSituationMemory", 24 | "Toolkit", 25 | "AgentState", 26 | "create_msg_delete", 27 | "InvestDebateState", 28 | "RiskDebateState", 29 | "create_bear_researcher", 30 | "create_bull_researcher", 31 | "create_research_manager", 32 | "create_fundamentals_analyst", 33 | "create_market_analyst", 34 | "create_neutral_debator", 35 | "create_news_analyst", 36 | "create_risky_debator", 37 | "create_risk_manager", 38 | "create_safe_debator", 39 | "create_social_media_analyst", 40 | "create_trader", 41 | ] 42 | -------------------------------------------------------------------------------- /api/index.py: -------------------------------------------------------------------------------- 1 | from flask import Flask, jsonify 2 | import sys 3 | import os 4 | 5 | # Add the project root to Python path 6 | project_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) 7 | sys.path.insert(0, project_root) 8 | 9 | app = Flask(__name__) 10 | app.config['SECRET_KEY'] = 'tradingagents_secret_key' 11 | app.config['DEBUG'] = False 12 | 13 | @app.route('/') 14 | def index(): 15 | return jsonify({ 16 | 'message': 'Trading Agents Crypto - Vercel Demo', 17 | 'status': 'running', 18 | 'environment': 'vercel', 19 | 'version': '1.0.0-test' 20 | }) 21 | 22 | @app.route('/health') 23 | def health(): 24 | return jsonify({ 25 | 'status': 'healthy', 26 | 'message': 'Application is running successfully on Vercel' 27 | }) 28 | 29 | @app.route('/api/info') 30 | def api_info(): 31 | return jsonify({ 32 | 'name': 'Trading Agents Crypto', 33 | 'environment': 'Vercel Serverless', 34 | 'mode': 'Demo/Test', 35 | 'limitations': [ 36 | 'No real-time analysis (SocketIO not supported)', 37 | 'Limited execution time (5 minutes)', 38 | 'No persistent storage', 39 | 'Simplified functionality' 40 | ] 41 | }) 42 | 43 | # Error handlers 44 | @app.errorhandler(404) 45 | def not_found(error): 46 | return jsonify({'error': 'Not found'}), 404 47 | 48 | @app.errorhandler(500) 49 | def internal_error(error): 50 | return jsonify({'error': 'Internal server error'}), 500 51 | 52 | # This is what Vercel expects 53 | def handler(event, context): 54 | return app(event, context) 55 | 56 | # Export the app 57 | vercel_app = app 58 | 59 | if __name__ == '__main__': 60 | app.run(debug=True) -------------------------------------------------------------------------------- /tradingagents/graph/propagation.py: -------------------------------------------------------------------------------- 1 | # TradingAgents/graph/propagation.py 2 | 3 | from typing import Dict, Any 4 | from tradingagents.agents.utils.agent_states import ( 5 | AgentState, 6 | InvestDebateState, 7 | RiskDebateState, 8 | ) 9 | 10 | 11 | class Propagator: 12 | """Handles state initialization and propagation through the graph.""" 13 | 14 | def __init__(self, max_recur_limit=100): 15 | """Initialize with configuration parameters.""" 16 | self.max_recur_limit = max_recur_limit 17 | 18 | def create_initial_state( 19 | self, company_name: str, trade_date: str 20 | ) -> Dict[str, Any]: 21 | """Create the initial state for the agent graph.""" 22 | return { 23 | "messages": [("human", company_name)], 24 | "company_of_interest": company_name, 25 | "trade_date": str(trade_date), 26 | "investment_debate_state": InvestDebateState( 27 | {"history": "", "current_response": "", "count": 0} 28 | ), 29 | "risk_debate_state": RiskDebateState( 30 | { 31 | "history": "", 32 | "current_risky_response": "", 33 | "current_safe_response": "", 34 | "current_neutral_response": "", 35 | "count": 0, 36 | } 37 | ), 38 | "market_report": "", 39 | "fundamentals_report": "", 40 | "sentiment_report": "", 41 | "news_report": "", 42 | } 43 | 44 | def get_graph_args(self) -> Dict[str, Any]: 45 | """Get arguments for the graph invocation.""" 46 | return { 47 | "stream_mode": "values", 48 | "config": {"recursion_limit": self.max_recur_limit}, 49 | } 50 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Python 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | *.so 6 | .Python 7 | build/ 8 | develop-eggs/ 9 | dist/ 10 | downloads/ 11 | eggs/ 12 | .eggs/ 13 | lib/ 14 | lib64/ 15 | parts/ 16 | sdist/ 17 | var/ 18 | wheels/ 19 | share/python-wheels/ 20 | *.egg-info/ 21 | .installed.cfg 22 | *.egg 23 | MANIFEST 24 | 25 | # PyInstaller 26 | # Usually these files are written by a python script from a template 27 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 28 | *.manifest 29 | *.spec 30 | 31 | # Installer logs 32 | pip-log.txt 33 | pip-delete-this-directory.txt 34 | 35 | # Unit test / coverage reports 36 | htmlcov/ 37 | .tox/ 38 | .nox/ 39 | .coverage 40 | .coverage.* 41 | .cache 42 | nosetests.xml 43 | coverage.xml 44 | *.cover 45 | *.py,cover 46 | .hypothesis/ 47 | .pytest_cache/ 48 | cover/ 49 | 50 | # Virtual environments 51 | env/ 52 | venv/ 53 | ENV/ 54 | env.bak/ 55 | venv.bak/ 56 | 57 | # Environment variables 58 | .env 59 | .env.local 60 | .env.development.local 61 | .env.test.local 62 | .env.production.local 63 | 64 | # IDEs and editors 65 | .vscode/ 66 | .idea/ 67 | *.swp 68 | *.swo 69 | *~ 70 | .DS_Store 71 | Thumbs.db 72 | 73 | # Jupyter Notebook 74 | .ipynb_checkpoints 75 | 76 | # Data files 77 | *.csv 78 | *.json 79 | *.xlsx 80 | *.xls 81 | *.parquet 82 | *.pickle 83 | *.pkl 84 | 85 | # Logs 86 | *.log 87 | logs/ 88 | 89 | # Temporary files 90 | *.tmp 91 | *.temp 92 | .tmp/ 93 | .temp/ 94 | 95 | # Node.js (if you add any frontend build tools) 96 | node_modules/ 97 | npm-debug.log* 98 | yarn-debug.log* 99 | yarn-error.log* 100 | 101 | # API Keys and sensitive data 102 | api_keys.txt 103 | secrets.txt 104 | config.json 105 | 106 | # Model outputs and results 107 | outputs/ 108 | results/ 109 | eval_results/ 110 | eval_data/ 111 | src/ 112 | 113 | # Web app specific 114 | sessions/ 115 | *.session 116 | uploads/ 117 | 118 | # System files 119 | .DS_Store 120 | .DS_Store? 121 | ._* 122 | .Spotlight-V100 123 | .Trashes 124 | ehthumbs.db 125 | Thumbs.db 126 | -------------------------------------------------------------------------------- /VERCEL_TEST.md: -------------------------------------------------------------------------------- 1 | # Vercel 部署測試指南 2 | 3 | ## 🚀 簡化版本說明 4 | 5 | 由於原版應用過於複雜(包含大量依賴項和SocketIO),我們創建了一個簡化的測試版本來驗證Vercel部署是否正常工作。 6 | 7 | ## 📋 當前配置 8 | 9 | ### 文件結構 10 | ``` 11 | ├── api/index.py # 簡化的Flask應用 12 | ├── vercel.json # Vercel配置 13 | ├── requirements_vercel.txt # 最小依賴項(只有Flask) 14 | └── .vercelignore # 忽略不必要的文件 15 | ``` 16 | 17 | ### 依賴項 18 | - `Flask==3.0.0` - Web框架 19 | - `requests==2.31.0` - HTTP請求庫 20 | 21 | ## 🧪 測試端點 22 | 23 | 部署成功後,你可以測試以下端點: 24 | 25 | ### 1. 主頁 26 | ``` 27 | GET https://your-app.vercel.app/ 28 | ``` 29 | **期望回應**: 30 | ```json 31 | { 32 | "message": "Trading Agents Crypto - Vercel Demo", 33 | "status": "running", 34 | "environment": "vercel", 35 | "version": "1.0.0-test" 36 | } 37 | ``` 38 | 39 | ### 2. 健康檢查 40 | ``` 41 | GET https://your-app.vercel.app/health 42 | ``` 43 | **期望回應**: 44 | ```json 45 | { 46 | "status": "healthy", 47 | "message": "Application is running successfully on Vercel" 48 | } 49 | ``` 50 | 51 | ### 3. 應用信息 52 | ``` 53 | GET https://your-app.vercel.app/api/info 54 | ``` 55 | **期望回應**: 56 | ```json 57 | { 58 | "name": "Trading Agents Crypto", 59 | "environment": "Vercel Serverless", 60 | "mode": "Demo/Test", 61 | "limitations": [ 62 | "No real-time analysis (SocketIO not supported)", 63 | "Limited execution time (5 minutes)", 64 | "No persistent storage", 65 | "Simplified functionality" 66 | ] 67 | } 68 | ``` 69 | 70 | ## ✅ 成功標準 71 | 72 | 如果所有三個端點都返回正確的JSON回應,那麼Vercel部署就是成功的! 73 | 74 | ## 🔄 下一步 75 | 76 | 一旦基本部署工作正常,我們可以: 77 | 78 | 1. **逐步添加功能** - 慢慢加回必要的依賴項 79 | 2. **添加模板支持** - 重新添加HTML模板 80 | 3. **實現簡化的分析** - 添加基本的crypto分析功能 81 | 4. **優化性能** - 針對serverless環境優化 82 | 83 | ## 🚨 故障排除 84 | 85 | ### 如果端點返回500錯誤 86 | 1. 檢查Vercel logs: `vercel logs ` 87 | 2. 確認Python版本兼容性 88 | 3. 檢查import錯誤 89 | 90 | ### 如果端點返回404錯誤 91 | 1. 確認routes配置正確 92 | 2. 檢查函數部署狀態 93 | 3. 驗證API endpoint路徑 94 | 95 | ### 如果構建失敗 96 | 1. 檢查requirements_vercel.txt語法 97 | 2. 確認沒有循環依賴 98 | 3. 檢查文件路徑大小寫 99 | 100 | ## 📞 獲得幫助 101 | 102 | 如果測試失敗,請提供: 103 | 1. Vercel部署URL 104 | 2. 錯誤訊息截圖 105 | 3. `vercel logs` 輸出 106 | 107 | 這將幫助我們快速診斷問題! -------------------------------------------------------------------------------- /tradingagents/agents/trader/trader.py: -------------------------------------------------------------------------------- 1 | import functools 2 | import time 3 | import json 4 | 5 | 6 | def create_trader(llm, memory): 7 | def trader_node(state, name): 8 | company_name = state["company_of_interest"] 9 | investment_plan = state["investment_plan"] 10 | market_research_report = state["market_report"] 11 | sentiment_report = state["sentiment_report"] 12 | news_report = state["news_report"] 13 | fundamentals_report = state["fundamentals_report"] 14 | 15 | curr_situation = f"{market_research_report}\n\n{sentiment_report}\n\n{news_report}\n\n{fundamentals_report}" 16 | past_memories = memory.get_memories(curr_situation, n_matches=2) 17 | 18 | past_memory_str = "" 19 | if past_memories: 20 | for i, rec in enumerate(past_memories, 1): 21 | past_memory_str += rec["recommendation"] + "\n\n" 22 | else: 23 | past_memory_str = "No past memories found." 24 | 25 | context = { 26 | "role": "user", 27 | "content": f"Based on a comprehensive analysis by a team of analysts, here is an investment plan tailored for {company_name}. This plan incorporates insights from current technical market trends, macroeconomic indicators, and social media sentiment. Use this plan as a foundation for evaluating your next trading decision.\n\nProposed Investment Plan: {investment_plan}\n\nLeverage these insights to make an informed and strategic decision.", 28 | } 29 | 30 | messages = [ 31 | { 32 | "role": "system", 33 | "content": f"""You are a trading agent analyzing market data to make investment decisions. Based on your analysis, provide a specific recommendation to buy, sell, or hold. End with a firm decision and always conclude your response with 'FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL**' to confirm your recommendation. Do not forget to utilize lessons from past decisions to learn from your mistakes. Here is some reflections from similar situatiosn you traded in and the lessons learned: {past_memory_str}""", 34 | }, 35 | context, 36 | ] 37 | 38 | result = llm.invoke(messages) 39 | 40 | return { 41 | "messages": [result], 42 | "trader_investment_plan": result.content, 43 | "sender": name, 44 | } 45 | 46 | return functools.partial(trader_node, name="Trader") 47 | -------------------------------------------------------------------------------- /tradingagents/agents/managers/research_manager.py: -------------------------------------------------------------------------------- 1 | import time 2 | import json 3 | 4 | 5 | def create_research_manager(llm, memory): 6 | def research_manager_node(state) -> dict: 7 | history = state["investment_debate_state"].get("history", "") 8 | market_research_report = state["market_report"] 9 | sentiment_report = state["sentiment_report"] 10 | news_report = state["news_report"] 11 | fundamentals_report = state["fundamentals_report"] 12 | 13 | investment_debate_state = state["investment_debate_state"] 14 | 15 | curr_situation = f"{market_research_report}\n\n{sentiment_report}\n\n{news_report}\n\n{fundamentals_report}" 16 | past_memories = memory.get_memories(curr_situation, n_matches=2) 17 | 18 | past_memory_str = "" 19 | for i, rec in enumerate(past_memories, 1): 20 | past_memory_str += rec["recommendation"] + "\n\n" 21 | 22 | prompt = f"""As the portfolio manager and debate facilitator, your role is to critically evaluate this round of debate and make a definitive decision: align with the bear analyst, the bull analyst, or choose Hold only if it is strongly justified based on the arguments presented. 23 | 24 | Summarize the key points from both sides concisely, focusing on the most compelling evidence or reasoning. Your recommendation—Buy, Sell, or Hold—must be clear and actionable. Avoid defaulting to Hold simply because both sides have valid points; commit to a stance grounded in the debate's strongest arguments. 25 | 26 | Additionally, develop a detailed investment plan for the trader. This should include: 27 | 28 | Your Recommendation: A decisive stance supported by the most convincing arguments. 29 | Rationale: An explanation of why these arguments lead to your conclusion. 30 | Strategic Actions: Concrete steps for implementing the recommendation. 31 | Take into account your past mistakes on similar situations. Use these insights to refine your decision-making and ensure you are learning and improving. Present your analysis conversationally, as if speaking naturally, without special formatting. 32 | 33 | Here are your past reflections on mistakes: 34 | \"{past_memory_str}\" 35 | 36 | Here is the debate: 37 | Debate History: 38 | {history}""" 39 | response = llm.invoke(prompt) 40 | 41 | new_investment_debate_state = { 42 | "judge_decision": response.content, 43 | "history": investment_debate_state.get("history", ""), 44 | "bear_history": investment_debate_state.get("bear_history", ""), 45 | "bull_history": investment_debate_state.get("bull_history", ""), 46 | "current_response": response.content, 47 | "count": investment_debate_state["count"], 48 | } 49 | 50 | return { 51 | "investment_debate_state": new_investment_debate_state, 52 | "investment_plan": response.content, 53 | } 54 | 55 | return research_manager_node 56 | -------------------------------------------------------------------------------- /tradingagents/graph/conditional_logic.py: -------------------------------------------------------------------------------- 1 | # TradingAgents/graph/conditional_logic.py 2 | 3 | from tradingagents.agents.utils.agent_states import AgentState 4 | 5 | 6 | class ConditionalLogic: 7 | """Handles conditional logic for determining graph flow.""" 8 | 9 | def __init__(self, max_debate_rounds=1, max_risk_discuss_rounds=1): 10 | """Initialize with configuration parameters.""" 11 | self.max_debate_rounds = max_debate_rounds 12 | self.max_risk_discuss_rounds = max_risk_discuss_rounds 13 | 14 | def should_continue_market(self, state: AgentState): 15 | """Determine if market analysis should continue.""" 16 | messages = state["messages"] 17 | last_message = messages[-1] 18 | if last_message.tool_calls: 19 | return "tools_market" 20 | return "Msg Clear Market" 21 | 22 | def should_continue_social(self, state: AgentState): 23 | """Determine if social media analysis should continue.""" 24 | messages = state["messages"] 25 | last_message = messages[-1] 26 | if last_message.tool_calls: 27 | return "tools_social" 28 | return "Msg Clear Social" 29 | 30 | def should_continue_news(self, state: AgentState): 31 | """Determine if news analysis should continue.""" 32 | messages = state["messages"] 33 | last_message = messages[-1] 34 | if last_message.tool_calls: 35 | return "tools_news" 36 | return "Msg Clear News" 37 | 38 | def should_continue_fundamentals(self, state: AgentState): 39 | """Determine if fundamentals analysis should continue.""" 40 | messages = state["messages"] 41 | last_message = messages[-1] 42 | if last_message.tool_calls: 43 | return "tools_fundamentals" 44 | return "Msg Clear Fundamentals" 45 | 46 | def should_continue_debate(self, state: AgentState) -> str: 47 | """Determine if debate should continue.""" 48 | 49 | if ( 50 | state["investment_debate_state"]["count"] >= 2 * self.max_debate_rounds 51 | ): # 3 rounds of back-and-forth between 2 agents 52 | return "Research Manager" 53 | if state["investment_debate_state"]["current_response"].startswith("Bull"): 54 | return "Bear Researcher" 55 | return "Bull Researcher" 56 | 57 | def should_continue_risk_analysis(self, state: AgentState) -> str: 58 | """Determine if risk analysis should continue.""" 59 | if ( 60 | state["risk_debate_state"]["count"] >= 3 * self.max_risk_discuss_rounds 61 | ): # 3 rounds of back-and-forth between 3 agents 62 | return "Risk Judge" 63 | if state["risk_debate_state"]["latest_speaker"].startswith("Risky"): 64 | return "Safe Analyst" 65 | if state["risk_debate_state"]["latest_speaker"].startswith("Safe"): 66 | return "Neutral Analyst" 67 | return "Risky Analyst" 68 | -------------------------------------------------------------------------------- /tradingagents/dataflows/stockstats_utils.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | import yfinance as yf 3 | from stockstats import wrap 4 | from typing import Annotated 5 | import os 6 | from .config import get_config 7 | 8 | 9 | class StockstatsUtils: 10 | @staticmethod 11 | def get_stock_stats( 12 | symbol: Annotated[str, "ticker symbol for the company"], 13 | indicator: Annotated[ 14 | str, "quantitative indicators based off of the stock data for the company" 15 | ], 16 | curr_date: Annotated[ 17 | str, "curr date for retrieving stock price data, YYYY-mm-dd" 18 | ], 19 | data_dir: Annotated[ 20 | str, 21 | "directory where the stock data is stored.", 22 | ], 23 | online: Annotated[ 24 | bool, 25 | "whether to use online tools to fetch data or offline tools. If True, will use online tools.", 26 | ] = False, 27 | ): 28 | df = None 29 | data = None 30 | 31 | if not online: 32 | try: 33 | data = pd.read_csv( 34 | os.path.join( 35 | data_dir, 36 | f"{symbol}-YFin-data-2015-01-01-2025-03-25.csv", 37 | ) 38 | ) 39 | df = wrap(data) 40 | except FileNotFoundError: 41 | raise Exception("Stockstats fail: Yahoo Finance data not fetched yet!") 42 | else: 43 | # Get today's date as YYYY-mm-dd to add to cache 44 | today_date = pd.Timestamp.today() 45 | curr_date = pd.to_datetime(curr_date) 46 | 47 | end_date = today_date 48 | start_date = today_date - pd.DateOffset(years=15) 49 | start_date = start_date.strftime("%Y-%m-%d") 50 | end_date = end_date.strftime("%Y-%m-%d") 51 | 52 | # Get config and ensure cache directory exists 53 | config = get_config() 54 | os.makedirs(config["data_cache_dir"], exist_ok=True) 55 | 56 | data_file = os.path.join( 57 | config["data_cache_dir"], 58 | f"{symbol}-YFin-data-{start_date}-{end_date}.csv", 59 | ) 60 | 61 | if os.path.exists(data_file): 62 | data = pd.read_csv(data_file) 63 | data["Date"] = pd.to_datetime(data["Date"]) 64 | else: 65 | data = yf.download( 66 | symbol, 67 | start=start_date, 68 | end=end_date, 69 | multi_level_index=False, 70 | progress=False, 71 | auto_adjust=True, 72 | ) 73 | data = data.reset_index() 74 | data.to_csv(data_file, index=False) 75 | 76 | df = wrap(data) 77 | df["Date"] = df["Date"].dt.strftime("%Y-%m-%d") 78 | curr_date = curr_date.strftime("%Y-%m-%d") 79 | 80 | df[indicator] # trigger stockstats to calculate the indicator 81 | matching_rows = df[df["Date"].str.startswith(curr_date)] 82 | 83 | if not matching_rows.empty: 84 | indicator_value = matching_rows[indicator].values[0] 85 | return indicator_value 86 | else: 87 | return "N/A: Not a trading day (weekend or holiday)" 88 | -------------------------------------------------------------------------------- /tradingagents/agents/utils/agent_states.py: -------------------------------------------------------------------------------- 1 | from typing import Annotated, Sequence 2 | from datetime import date, timedelta, datetime 3 | from typing_extensions import TypedDict, Optional 4 | from langchain_openai import ChatOpenAI 5 | from tradingagents.agents import * 6 | from langgraph.prebuilt import ToolNode 7 | from langgraph.graph import END, StateGraph, START, MessagesState 8 | 9 | 10 | # Researcher team state 11 | class InvestDebateState(TypedDict): 12 | bull_history: Annotated[ 13 | str, "Bullish Conversation history" 14 | ] # Bullish Conversation history 15 | bear_history: Annotated[ 16 | str, "Bearish Conversation history" 17 | ] # Bullish Conversation history 18 | history: Annotated[str, "Conversation history"] # Conversation history 19 | current_response: Annotated[str, "Latest response"] # Last response 20 | judge_decision: Annotated[str, "Final judge decision"] # Last response 21 | count: Annotated[int, "Length of the current conversation"] # Conversation length 22 | 23 | 24 | # Risk management team state 25 | class RiskDebateState(TypedDict): 26 | risky_history: Annotated[ 27 | str, "Risky Agent's Conversation history" 28 | ] # Conversation history 29 | safe_history: Annotated[ 30 | str, "Safe Agent's Conversation history" 31 | ] # Conversation history 32 | neutral_history: Annotated[ 33 | str, "Neutral Agent's Conversation history" 34 | ] # Conversation history 35 | history: Annotated[str, "Conversation history"] # Conversation history 36 | latest_speaker: Annotated[str, "Analyst that spoke last"] 37 | current_risky_response: Annotated[ 38 | str, "Latest response by the risky analyst" 39 | ] # Last response 40 | current_safe_response: Annotated[ 41 | str, "Latest response by the safe analyst" 42 | ] # Last response 43 | current_neutral_response: Annotated[ 44 | str, "Latest response by the neutral analyst" 45 | ] # Last response 46 | judge_decision: Annotated[str, "Judge's decision"] 47 | count: Annotated[int, "Length of the current conversation"] # Conversation length 48 | 49 | 50 | class AgentState(MessagesState): 51 | company_of_interest: Annotated[str, "Company that we are interested in trading"] 52 | trade_date: Annotated[str, "What date we are trading at"] 53 | 54 | sender: Annotated[str, "Agent that sent this message"] 55 | 56 | # research step 57 | market_report: Annotated[str, "Report from the Market Analyst"] 58 | sentiment_report: Annotated[str, "Report from the Social Media Analyst"] 59 | news_report: Annotated[ 60 | str, "Report from the News Researcher of current world affairs" 61 | ] 62 | fundamentals_report: Annotated[str, "Report from the Fundamentals Researcher"] 63 | 64 | # researcher team discussion step 65 | investment_debate_state: Annotated[ 66 | InvestDebateState, "Current state of the debate on if to invest or not" 67 | ] 68 | investment_plan: Annotated[str, "Plan generated by the Analyst"] 69 | 70 | trader_investment_plan: Annotated[str, "Plan generated by the Trader"] 71 | 72 | # risk management team discussion step 73 | risk_debate_state: Annotated[ 74 | RiskDebateState, "Current state of the debate on evaluating risk" 75 | ] 76 | final_trade_decision: Annotated[str, "Final decision made by the Risk Analysts"] 77 | -------------------------------------------------------------------------------- /tradingagents/agents/researchers/bull_researcher.py: -------------------------------------------------------------------------------- 1 | from langchain_core.messages import AIMessage 2 | import time 3 | import json 4 | 5 | 6 | def create_bull_researcher(llm, memory): 7 | def bull_node(state) -> dict: 8 | investment_debate_state = state["investment_debate_state"] 9 | history = investment_debate_state.get("history", "") 10 | bull_history = investment_debate_state.get("bull_history", "") 11 | 12 | current_response = investment_debate_state.get("current_response", "") 13 | market_research_report = state["market_report"] 14 | sentiment_report = state["sentiment_report"] 15 | news_report = state["news_report"] 16 | fundamentals_report = state["fundamentals_report"] 17 | 18 | curr_situation = f"{market_research_report}\n\n{sentiment_report}\n\n{news_report}\n\n{fundamentals_report}" 19 | past_memories = memory.get_memories(curr_situation, n_matches=2) 20 | 21 | past_memory_str = "" 22 | for i, rec in enumerate(past_memories, 1): 23 | past_memory_str += rec["recommendation"] + "\n\n" 24 | 25 | prompt = f"""You are a Bull Analyst advocating for investing in the stock. Your task is to build a strong, evidence-based case emphasizing growth potential, competitive advantages, and positive market indicators. Leverage the provided research and data to address concerns and counter bearish arguments effectively. 26 | 27 | Key points to focus on: 28 | - Growth Potential: Highlight the company's market opportunities, revenue projections, and scalability. 29 | - Competitive Advantages: Emphasize factors like unique products, strong branding, or dominant market positioning. 30 | - Positive Indicators: Use financial health, industry trends, and recent positive news as evidence. 31 | - Bear Counterpoints: Critically analyze the bear argument with specific data and sound reasoning, addressing concerns thoroughly and showing why the bull perspective holds stronger merit. 32 | - Engagement: Present your argument in a conversational style, engaging directly with the bear analyst's points and debating effectively rather than just listing data. 33 | 34 | Resources available: 35 | Market research report: {market_research_report} 36 | Social media sentiment report: {sentiment_report} 37 | Latest world affairs news: {news_report} 38 | Company fundamentals report: {fundamentals_report} 39 | Conversation history of the debate: {history} 40 | Last bear argument: {current_response} 41 | Reflections from similar situations and lessons learned: {past_memory_str} 42 | Use this information to deliver a compelling bull argument, refute the bear's concerns, and engage in a dynamic debate that demonstrates the strengths of the bull position. You must also address reflections and learn from lessons and mistakes you made in the past. 43 | """ 44 | 45 | response = llm.invoke(prompt) 46 | 47 | argument = f"Bull Analyst: {response.content}" 48 | 49 | new_investment_debate_state = { 50 | "history": history + "\n" + argument, 51 | "bull_history": bull_history + "\n" + argument, 52 | "bear_history": investment_debate_state.get("bear_history", ""), 53 | "current_response": argument, 54 | "count": investment_debate_state["count"] + 1, 55 | } 56 | 57 | return {"investment_debate_state": new_investment_debate_state} 58 | 59 | return bull_node 60 | -------------------------------------------------------------------------------- /tradingagents/agents/analysts/social_media_analyst.py: -------------------------------------------------------------------------------- 1 | from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder 2 | import time 3 | import json 4 | 5 | 6 | def create_social_media_analyst(llm, toolkit): 7 | def social_media_analyst_node(state): 8 | current_date = state["trade_date"] 9 | ticker = state["company_of_interest"] 10 | company_name = state["company_of_interest"] 11 | 12 | if toolkit.config["online_tools"]: 13 | tools = [toolkit.get_stock_news_openai] 14 | else: 15 | tools = [ 16 | toolkit.get_reddit_stock_info, 17 | ] 18 | 19 | system_message = ( 20 | "You are a social media and company specific news researcher/analyst tasked with analyzing social media posts, recent company news, and public sentiment for a specific company over the past week. You will be given a company's name your objective is to write a comprehensive long report detailing your analysis, insights, and implications for traders and investors on this company's current state after looking at social media and what people are saying about that company, analyzing sentiment data of what people feel each day about the company, and looking at recent company news. Try to look at all sources possible from social media to sentiment to news. Do not simply state the trends are mixed, provide detailed and finegrained analysis and insights that may help traders make decisions." 21 | + """ Make sure to append a Makrdown table at the end of the report to organize key points in the report, organized and easy to read.""", 22 | ) 23 | 24 | prompt = ChatPromptTemplate.from_messages( 25 | [ 26 | ( 27 | "system", 28 | "You are a helpful AI assistant, collaborating with other assistants." 29 | " Use the provided tools to progress towards answering the question." 30 | " If you are unable to fully answer, that's OK; another assistant with different tools" 31 | " will help where you left off. Execute what you can to make progress." 32 | " If you or any other assistant has the FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL** or deliverable," 33 | " prefix your response with FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL** so the team knows to stop." 34 | " You have access to the following tools: {tool_names}.\n{system_message}" 35 | "For your reference, the current date is {current_date}. The current company we want to analyze is {ticker}", 36 | ), 37 | MessagesPlaceholder(variable_name="messages"), 38 | ] 39 | ) 40 | 41 | prompt = prompt.partial(system_message=system_message) 42 | prompt = prompt.partial(tool_names=", ".join([tool.name for tool in tools])) 43 | prompt = prompt.partial(current_date=current_date) 44 | prompt = prompt.partial(ticker=ticker) 45 | 46 | chain = prompt | llm.bind_tools(tools) 47 | 48 | result = chain.invoke(state["messages"]) 49 | 50 | report = "" 51 | 52 | if len(result.tool_calls) == 0: 53 | report = result.content 54 | 55 | return { 56 | "messages": [result], 57 | "sentiment_report": report, 58 | } 59 | 60 | return social_media_analyst_node 61 | -------------------------------------------------------------------------------- /tradingagents/agents/managers/risk_manager.py: -------------------------------------------------------------------------------- 1 | import time 2 | import json 3 | 4 | 5 | def create_risk_manager(llm, memory): 6 | def risk_manager_node(state) -> dict: 7 | 8 | company_name = state["company_of_interest"] 9 | 10 | history = state["risk_debate_state"]["history"] 11 | risk_debate_state = state["risk_debate_state"] 12 | market_research_report = state["market_report"] 13 | news_report = state["news_report"] 14 | fundamentals_report = state["news_report"] 15 | sentiment_report = state["sentiment_report"] 16 | trader_plan = state["investment_plan"] 17 | 18 | curr_situation = f"{market_research_report}\n\n{sentiment_report}\n\n{news_report}\n\n{fundamentals_report}" 19 | past_memories = memory.get_memories(curr_situation, n_matches=2) 20 | 21 | past_memory_str = "" 22 | for i, rec in enumerate(past_memories, 1): 23 | past_memory_str += rec["recommendation"] + "\n\n" 24 | 25 | prompt = f"""As the Risk Management Judge and Debate Facilitator, your goal is to evaluate the debate between three risk analysts—Risky, Neutral, and Safe/Conservative—and determine the best course of action for the trader. Your decision must result in a clear recommendation: Buy, Sell, or Hold. Choose Hold only if strongly justified by specific arguments, not as a fallback when all sides seem valid. Strive for clarity and decisiveness. 26 | 27 | Guidelines for Decision-Making: 28 | 1. **Summarize Key Arguments**: Extract the strongest points from each analyst, focusing on relevance to the context. 29 | 2. **Provide Rationale**: Support your recommendation with direct quotes and counterarguments from the debate. 30 | 3. **Refine the Trader's Plan**: Start with the trader's original plan, **{trader_plan}**, and adjust it based on the analysts' insights. 31 | 4. **Learn from Past Mistakes**: Use lessons from **{past_memory_str}** to address prior misjudgments and improve the decision you are making now to make sure you don't make a wrong BUY/SELL/HOLD call that loses money. 32 | 33 | Deliverables: 34 | - A clear and actionable recommendation: Buy, Sell, or Hold. 35 | - Detailed reasoning anchored in the debate and past reflections. 36 | 37 | --- 38 | 39 | **Analysts Debate History:** 40 | {history} 41 | 42 | --- 43 | 44 | Focus on actionable insights and continuous improvement. Build on past lessons, critically evaluate all perspectives, and ensure each decision advances better outcomes.""" 45 | 46 | response = llm.invoke(prompt) 47 | 48 | new_risk_debate_state = { 49 | "judge_decision": response.content, 50 | "history": risk_debate_state["history"], 51 | "risky_history": risk_debate_state["risky_history"], 52 | "safe_history": risk_debate_state["safe_history"], 53 | "neutral_history": risk_debate_state["neutral_history"], 54 | "latest_speaker": "Judge", 55 | "current_risky_response": risk_debate_state["current_risky_response"], 56 | "current_safe_response": risk_debate_state["current_safe_response"], 57 | "current_neutral_response": risk_debate_state["current_neutral_response"], 58 | "count": risk_debate_state["count"], 59 | } 60 | 61 | return { 62 | "risk_debate_state": new_risk_debate_state, 63 | "final_trade_decision": response.content, 64 | } 65 | 66 | return risk_manager_node 67 | -------------------------------------------------------------------------------- /tradingagents/agents/researchers/bear_researcher.py: -------------------------------------------------------------------------------- 1 | from langchain_core.messages import AIMessage 2 | import time 3 | import json 4 | 5 | 6 | def create_bear_researcher(llm, memory): 7 | def bear_node(state) -> dict: 8 | investment_debate_state = state["investment_debate_state"] 9 | history = investment_debate_state.get("history", "") 10 | bear_history = investment_debate_state.get("bear_history", "") 11 | 12 | current_response = investment_debate_state.get("current_response", "") 13 | market_research_report = state["market_report"] 14 | sentiment_report = state["sentiment_report"] 15 | news_report = state["news_report"] 16 | fundamentals_report = state["fundamentals_report"] 17 | 18 | curr_situation = f"{market_research_report}\n\n{sentiment_report}\n\n{news_report}\n\n{fundamentals_report}" 19 | past_memories = memory.get_memories(curr_situation, n_matches=2) 20 | 21 | past_memory_str = "" 22 | for i, rec in enumerate(past_memories, 1): 23 | past_memory_str += rec["recommendation"] + "\n\n" 24 | 25 | prompt = f"""You are a Bear Analyst making the case against investing in the stock. Your goal is to present a well-reasoned argument emphasizing risks, challenges, and negative indicators. Leverage the provided research and data to highlight potential downsides and counter bullish arguments effectively. 26 | 27 | Key points to focus on: 28 | 29 | - Risks and Challenges: Highlight factors like market saturation, financial instability, or macroeconomic threats that could hinder the stock's performance. 30 | - Competitive Weaknesses: Emphasize vulnerabilities such as weaker market positioning, declining innovation, or threats from competitors. 31 | - Negative Indicators: Use evidence from financial data, market trends, or recent adverse news to support your position. 32 | - Bull Counterpoints: Critically analyze the bull argument with specific data and sound reasoning, exposing weaknesses or over-optimistic assumptions. 33 | - Engagement: Present your argument in a conversational style, directly engaging with the bull analyst's points and debating effectively rather than simply listing facts. 34 | 35 | Resources available: 36 | 37 | Market research report: {market_research_report} 38 | Social media sentiment report: {sentiment_report} 39 | Latest world affairs news: {news_report} 40 | Company fundamentals report: {fundamentals_report} 41 | Conversation history of the debate: {history} 42 | Last bull argument: {current_response} 43 | Reflections from similar situations and lessons learned: {past_memory_str} 44 | Use this information to deliver a compelling bear argument, refute the bull's claims, and engage in a dynamic debate that demonstrates the risks and weaknesses of investing in the stock. You must also address reflections and learn from lessons and mistakes you made in the past. 45 | """ 46 | 47 | response = llm.invoke(prompt) 48 | 49 | argument = f"Bear Analyst: {response.content}" 50 | 51 | new_investment_debate_state = { 52 | "history": history + "\n" + argument, 53 | "bear_history": bear_history + "\n" + argument, 54 | "bull_history": investment_debate_state.get("bull_history", ""), 55 | "current_response": argument, 56 | "count": investment_debate_state["count"] + 1, 57 | } 58 | 59 | return {"investment_debate_state": new_investment_debate_state} 60 | 61 | return bear_node 62 | -------------------------------------------------------------------------------- /tradingagents/agents/risk_mgmt/neutral_debator.py: -------------------------------------------------------------------------------- 1 | import time 2 | import json 3 | 4 | 5 | def create_neutral_debator(llm): 6 | def neutral_node(state) -> dict: 7 | risk_debate_state = state["risk_debate_state"] 8 | history = risk_debate_state.get("history", "") 9 | neutral_history = risk_debate_state.get("neutral_history", "") 10 | 11 | current_risky_response = risk_debate_state.get("current_risky_response", "") 12 | current_safe_response = risk_debate_state.get("current_safe_response", "") 13 | 14 | market_research_report = state["market_report"] 15 | sentiment_report = state["sentiment_report"] 16 | news_report = state["news_report"] 17 | fundamentals_report = state["fundamentals_report"] 18 | 19 | trader_decision = state["trader_investment_plan"] 20 | 21 | prompt = f"""As the Neutral Risk Analyst, your role is to provide a balanced perspective, weighing both the potential benefits and risks of the trader's decision or plan. You prioritize a well-rounded approach, evaluating the upsides and downsides while factoring in broader market trends, potential economic shifts, and diversification strategies.Here is the trader's decision: 22 | 23 | {trader_decision} 24 | 25 | Your task is to challenge both the Risky and Safe Analysts, pointing out where each perspective may be overly optimistic or overly cautious. Use insights from the following data sources to support a moderate, sustainable strategy to adjust the trader's decision: 26 | 27 | Market Research Report: {market_research_report} 28 | Social Media Sentiment Report: {sentiment_report} 29 | Latest World Affairs Report: {news_report} 30 | Company Fundamentals Report: {fundamentals_report} 31 | Here is the current conversation history: {history} Here is the last response from the risky analyst: {current_risky_response} Here is the last response from the safe analyst: {current_safe_response}. If there are no responses from the other viewpoints, do not halluncinate and just present your point. 32 | 33 | Engage actively by analyzing both sides critically, addressing weaknesses in the risky and conservative arguments to advocate for a more balanced approach. Challenge each of their points to illustrate why a moderate risk strategy might offer the best of both worlds, providing growth potential while safeguarding against extreme volatility. Focus on debating rather than simply presenting data, aiming to show that a balanced view can lead to the most reliable outcomes. Output conversationally as if you are speaking without any special formatting.""" 34 | 35 | response = llm.invoke(prompt) 36 | 37 | argument = f"Neutral Analyst: {response.content}" 38 | 39 | new_risk_debate_state = { 40 | "history": history + "\n" + argument, 41 | "risky_history": risk_debate_state.get("risky_history", ""), 42 | "safe_history": risk_debate_state.get("safe_history", ""), 43 | "neutral_history": neutral_history + "\n" + argument, 44 | "latest_speaker": "Neutral", 45 | "current_risky_response": risk_debate_state.get( 46 | "current_risky_response", "" 47 | ), 48 | "current_safe_response": risk_debate_state.get("current_safe_response", ""), 49 | "current_neutral_response": argument, 50 | "count": risk_debate_state["count"] + 1, 51 | } 52 | 53 | return {"risk_debate_state": new_risk_debate_state} 54 | 55 | return neutral_node 56 | -------------------------------------------------------------------------------- /tradingagents/agents/risk_mgmt/conservative_debator.py: -------------------------------------------------------------------------------- 1 | from langchain_core.messages import AIMessage 2 | import time 3 | import json 4 | 5 | 6 | def create_safe_debator(llm): 7 | def safe_node(state) -> dict: 8 | risk_debate_state = state["risk_debate_state"] 9 | history = risk_debate_state.get("history", "") 10 | safe_history = risk_debate_state.get("safe_history", "") 11 | 12 | current_risky_response = risk_debate_state.get("current_risky_response", "") 13 | current_neutral_response = risk_debate_state.get("current_neutral_response", "") 14 | 15 | market_research_report = state["market_report"] 16 | sentiment_report = state["sentiment_report"] 17 | news_report = state["news_report"] 18 | fundamentals_report = state["fundamentals_report"] 19 | 20 | trader_decision = state["trader_investment_plan"] 21 | 22 | prompt = f"""As the Safe/Conservative Risk Analyst, your primary objective is to protect assets, minimize volatility, and ensure steady, reliable growth. You prioritize stability, security, and risk mitigation, carefully assessing potential losses, economic downturns, and market volatility. When evaluating the trader's decision or plan, critically examine high-risk elements, pointing out where the decision may expose the firm to undue risk and where more cautious alternatives could secure long-term gains. Here is the trader's decision: 23 | 24 | {trader_decision} 25 | 26 | Your task is to actively counter the arguments of the Risky and Neutral Analysts, highlighting where their views may overlook potential threats or fail to prioritize sustainability. Respond directly to their points, drawing from the following data sources to build a convincing case for a low-risk approach adjustment to the trader's decision: 27 | 28 | Market Research Report: {market_research_report} 29 | Social Media Sentiment Report: {sentiment_report} 30 | Latest World Affairs Report: {news_report} 31 | Company Fundamentals Report: {fundamentals_report} 32 | Here is the current conversation history: {history} Here is the last response from the risky analyst: {current_risky_response} Here is the last response from the neutral analyst: {current_neutral_response}. If there are no responses from the other viewpoints, do not halluncinate and just present your point. 33 | 34 | Engage by questioning their optimism and emphasizing the potential downsides they may have overlooked. Address each of their counterpoints to showcase why a conservative stance is ultimately the safest path for the firm's assets. Focus on debating and critiquing their arguments to demonstrate the strength of a low-risk strategy over their approaches. Output conversationally as if you are speaking without any special formatting.""" 35 | 36 | response = llm.invoke(prompt) 37 | 38 | argument = f"Safe Analyst: {response.content}" 39 | 40 | new_risk_debate_state = { 41 | "history": history + "\n" + argument, 42 | "risky_history": risk_debate_state.get("risky_history", ""), 43 | "safe_history": safe_history + "\n" + argument, 44 | "neutral_history": risk_debate_state.get("neutral_history", ""), 45 | "latest_speaker": "Safe", 46 | "current_risky_response": risk_debate_state.get( 47 | "current_risky_response", "" 48 | ), 49 | "current_safe_response": argument, 50 | "current_neutral_response": risk_debate_state.get( 51 | "current_neutral_response", "" 52 | ), 53 | "count": risk_debate_state["count"] + 1, 54 | } 55 | 56 | return {"risk_debate_state": new_risk_debate_state} 57 | 58 | return safe_node 59 | -------------------------------------------------------------------------------- /tradingagents/agents/risk_mgmt/aggresive_debator.py: -------------------------------------------------------------------------------- 1 | import time 2 | import json 3 | 4 | 5 | def create_risky_debator(llm): 6 | def risky_node(state) -> dict: 7 | risk_debate_state = state["risk_debate_state"] 8 | history = risk_debate_state.get("history", "") 9 | risky_history = risk_debate_state.get("risky_history", "") 10 | 11 | current_safe_response = risk_debate_state.get("current_safe_response", "") 12 | current_neutral_response = risk_debate_state.get("current_neutral_response", "") 13 | 14 | market_research_report = state["market_report"] 15 | sentiment_report = state["sentiment_report"] 16 | news_report = state["news_report"] 17 | fundamentals_report = state["fundamentals_report"] 18 | 19 | trader_decision = state["trader_investment_plan"] 20 | 21 | prompt = f"""As the Risky Risk Analyst, your role is to actively champion high-reward, high-risk opportunities, emphasizing bold strategies and competitive advantages. When evaluating the trader's decision or plan, focus intently on the potential upside, growth potential, and innovative benefits—even when these come with elevated risk. Use the provided market data and sentiment analysis to strengthen your arguments and challenge the opposing views. Specifically, respond directly to each point made by the conservative and neutral analysts, countering with data-driven rebuttals and persuasive reasoning. Highlight where their caution might miss critical opportunities or where their assumptions may be overly conservative. Here is the trader's decision: 22 | 23 | {trader_decision} 24 | 25 | Your task is to create a compelling case for the trader's decision by questioning and critiquing the conservative and neutral stances to demonstrate why your high-reward perspective offers the best path forward. Incorporate insights from the following sources into your arguments: 26 | 27 | Market Research Report: {market_research_report} 28 | Social Media Sentiment Report: {sentiment_report} 29 | Latest World Affairs Report: {news_report} 30 | Company Fundamentals Report: {fundamentals_report} 31 | Here is the current conversation history: {history} Here are the last arguments from the conservative analyst: {current_safe_response} Here are the last arguments from the neutral analyst: {current_neutral_response}. If there are no responses from the other viewpoints, do not halluncinate and just present your point. 32 | 33 | Engage actively by addressing any specific concerns raised, refuting the weaknesses in their logic, and asserting the benefits of risk-taking to outpace market norms. Maintain a focus on debating and persuading, not just presenting data. Challenge each counterpoint to underscore why a high-risk approach is optimal. Output conversationally as if you are speaking without any special formatting.""" 34 | 35 | response = llm.invoke(prompt) 36 | 37 | argument = f"Risky Analyst: {response.content}" 38 | 39 | new_risk_debate_state = { 40 | "history": history + "\n" + argument, 41 | "risky_history": risky_history + "\n" + argument, 42 | "safe_history": risk_debate_state.get("safe_history", ""), 43 | "neutral_history": risk_debate_state.get("neutral_history", ""), 44 | "latest_speaker": "Risky", 45 | "current_risky_response": argument, 46 | "current_safe_response": risk_debate_state.get("current_safe_response", ""), 47 | "current_neutral_response": risk_debate_state.get( 48 | "current_neutral_response", "" 49 | ), 50 | "count": risk_debate_state["count"] + 1, 51 | } 52 | 53 | return {"risk_debate_state": new_risk_debate_state} 54 | 55 | return risky_node 56 | -------------------------------------------------------------------------------- /tradingagents/dataflows/googlenews_utils.py: -------------------------------------------------------------------------------- 1 | import json 2 | import requests 3 | from bs4 import BeautifulSoup 4 | from datetime import datetime 5 | import time 6 | import random 7 | from tenacity import ( 8 | retry, 9 | stop_after_attempt, 10 | wait_exponential, 11 | retry_if_exception_type, 12 | retry_if_result, 13 | ) 14 | 15 | 16 | def is_rate_limited(response): 17 | """Check if the response indicates rate limiting (status code 429)""" 18 | return response.status_code == 429 19 | 20 | 21 | @retry( 22 | retry=(retry_if_result(is_rate_limited)), 23 | wait=wait_exponential(multiplier=1, min=4, max=60), 24 | stop=stop_after_attempt(5), 25 | ) 26 | def make_request(url, headers): 27 | """Make a request with retry logic for rate limiting""" 28 | # Random delay before each request to avoid detection 29 | time.sleep(random.uniform(2, 6)) 30 | response = requests.get(url, headers=headers) 31 | return response 32 | 33 | 34 | def getNewsData(query, start_date, end_date): 35 | """ 36 | Scrape Google News search results for a given query and date range. 37 | query: str - search query 38 | start_date: str - start date in the format yyyy-mm-dd or mm/dd/yyyy 39 | end_date: str - end date in the format yyyy-mm-dd or mm/dd/yyyy 40 | """ 41 | if "-" in start_date: 42 | start_date = datetime.strptime(start_date, "%Y-%m-%d") 43 | start_date = start_date.strftime("%m/%d/%Y") 44 | if "-" in end_date: 45 | end_date = datetime.strptime(end_date, "%Y-%m-%d") 46 | end_date = end_date.strftime("%m/%d/%Y") 47 | 48 | headers = { 49 | "User-Agent": ( 50 | "Mozilla/5.0 (Windows NT 10.0; Win64; x64) " 51 | "AppleWebKit/537.36 (KHTML, like Gecko) " 52 | "Chrome/101.0.4951.54 Safari/537.36" 53 | ) 54 | } 55 | 56 | news_results = [] 57 | page = 0 58 | while True: 59 | offset = page * 10 60 | url = ( 61 | f"https://www.google.com/search?q={query}" 62 | f"&tbs=cdr:1,cd_min:{start_date},cd_max:{end_date}" 63 | f"&tbm=nws&start={offset}" 64 | ) 65 | 66 | try: 67 | response = make_request(url, headers) 68 | soup = BeautifulSoup(response.content, "html.parser") 69 | results_on_page = soup.select("div.SoaBEf") 70 | 71 | if not results_on_page: 72 | break # No more results found 73 | 74 | for el in results_on_page: 75 | try: 76 | link = el.find("a")["href"] 77 | title = el.select_one("div.MBeuO").get_text() 78 | snippet = el.select_one(".GI74Re").get_text() 79 | date = el.select_one(".LfVVr").get_text() 80 | source = el.select_one(".NUnG9d span").get_text() 81 | news_results.append( 82 | { 83 | "link": link, 84 | "title": title, 85 | "snippet": snippet, 86 | "date": date, 87 | "source": source, 88 | } 89 | ) 90 | except Exception as e: 91 | print(f"Error processing result: {e}") 92 | # If one of the fields is not found, skip this result 93 | continue 94 | 95 | # Update the progress bar with the current count of results scraped 96 | 97 | # Check for the "Next" link (pagination) 98 | next_link = soup.find("a", id="pnnext") 99 | if not next_link: 100 | break 101 | 102 | page += 1 103 | 104 | except Exception as e: 105 | print(f"Failed after multiple retries: {e}") 106 | break 107 | 108 | return news_results 109 | -------------------------------------------------------------------------------- /CRYPTO_MODIFICATIONS.md: -------------------------------------------------------------------------------- 1 | # 🚀 TradingAgents Crypto Modification 2 | 3 | 這個項目已經從原本的美股交易系統成功改造為支持加密貨幣交易的系統! 4 | 5 | ## 📋 修改摘要 6 | 7 | ### 🔄 主要變更 8 | 9 | 1. **新增加密貨幣數據源** 10 | - 使用 CoinGecko API 替代傳統股票數據源 11 | - 支持實時加密貨幣價格、市場數據、新聞和技術分析 12 | 13 | 2. **智能符號檢測** 14 | - 自動檢測輸入符號是加密貨幣還是股票 15 | - 根據符號類型使用相應的數據源和分析方法 16 | 17 | 3. **加密貨幣專用分析師** 18 | - 基本面分析師:分析市值、供應量、代幣經濟學 19 | - 技術分析師:針對加密貨幣市場的技術分析 20 | - 新聞分析師:關注加密貨幣相關新聞和趨勢 21 | 22 | ## 🆕 新增文件 23 | 24 | ### `tradingagents/dataflows/coingecko_utils.py` 25 | 加密貨幣數據獲取工具,包含: 26 | - `get_crypto_price_data()` - 獲取歷史價格數據 27 | - `get_crypto_market_data()` - 獲取市場數據 28 | - `get_crypto_news()` - 獲取加密貨幣新聞 29 | - `get_crypto_technical_indicators()` - 技術分析指標 30 | 31 | ### `test_crypto.py` 32 | 測試腳本,用於驗證加密貨幣功能是否正常工作 33 | 34 | ## 🔧 修改的文件 35 | 36 | ### `tradingagents/dataflows/interface.py` 37 | - 添加加密貨幣主要接口函數 38 | - 集成新的CoinGecko工具 39 | 40 | ### 分析師文件更新 41 | - `fundamentals_analyst.py` - 添加加密貨幣基本面分析 42 | - `market_analyst.py` - 添加加密貨幣技術分析 43 | - `news_analyst.py` - 添加加密貨幣新聞分析 44 | 45 | ### `main.py` 46 | - 將測試符號從 "NVDA" 改為 "BTC" 47 | 48 | ## 🎯 支持的加密貨幣 49 | 50 | 系統支持所有在CoinGecko上列出的加密貨幣,包括但不限於: 51 | 52 | **主要加密貨幣:** 53 | - BTC (Bitcoin) 54 | - ETH (Ethereum) 55 | - ADA (Cardano) 56 | - SOL (Solana) 57 | - DOT (Polkadot) 58 | 59 | **DeFi代幣:** 60 | - UNI (Uniswap) 61 | - AAVE (Aave) 62 | - LINK (Chainlink) 63 | 64 | **熱門代幣:** 65 | - DOGE (Dogecoin) 66 | - SHIB (Shiba Inu) 67 | - MATIC (Polygon) 68 | 69 | ## 🚀 如何使用 70 | 71 | ### 1. 基本測試 72 | ```bash 73 | # 測試加密貨幣功能 74 | python test_crypto.py 75 | ``` 76 | 77 | ### 2. 運行完整交易系統 78 | ```bash 79 | # 使用BTC進行交易分析 80 | python main.py 81 | ``` 82 | 83 | ### 3. 自定義加密貨幣 84 | 修改 `main.py` 中的符號: 85 | ```python 86 | # 改為你想分析的加密貨幣 87 | _, decision = ta.propagate("ETH", "2024-05-10") # 以太坊 88 | _, decision = ta.propagate("ADA", "2024-05-10") # 卡爾達諾 89 | ``` 90 | 91 | ## 🔑 API 設置 92 | 93 | ### CoinGecko API(可選) 94 | ```bash 95 | # 免費版本已足夠使用,設置API密鑰可以提高請求限制 96 | export COINGECKO_API_KEY=your_coingecko_api_key 97 | ``` 98 | 99 | ### OpenAI API(必需) 100 | ```bash 101 | # 用於LLM代理 102 | export OPENAI_API_KEY=your_openai_api_key 103 | ``` 104 | 105 | ## ⚡ 功能特點 106 | 107 | ### 🔍 智能檢測 108 | - 自動識別輸入符號類型(加密貨幣 vs 股票) 109 | - 無縫切換數據源和分析方法 110 | 111 | ### 📊 加密貨幣專用分析 112 | - **基本面分析**:市值排名、供應量分析、代幣經濟學 113 | - **技術分析**:價格趨勢、成交量分析、支撐阻力位 114 | - **新聞分析**:監管發展、機構採用、技術更新 115 | 116 | ### 🌐 實時數據 117 | - 24/7加密貨幣市場數據 118 | - 實時價格和成交量 119 | - 全球市場概覽 120 | 121 | ## 🔄 向後兼容 122 | 123 | 系統仍然支持原有的股票交易功能: 124 | - 使用股票符號(如AAPL、NVDA)時自動使用股票數據源 125 | - 保持原有的所有股票分析功能 126 | 127 | ## 📈 使用示例 128 | 129 | ### 分析比特幣 130 | ```python 131 | from tradingagents.graph.trading_graph import TradingAgentsGraph 132 | from tradingagents.default_config import DEFAULT_CONFIG 133 | 134 | ta = TradingAgentsGraph(debug=True, config=DEFAULT_CONFIG.copy()) 135 | _, decision = ta.propagate("BTC", "2024-05-10") 136 | print(decision) 137 | ``` 138 | 139 | ### 分析以太坊 140 | ```python 141 | _, decision = ta.propagate("ETH", "2024-05-10") 142 | ``` 143 | 144 | ## 🛠️ 技術架構 145 | 146 | ### 數據流 147 | ``` 148 | 加密貨幣符號 → 符號檢測 → CoinGecko API → 加密貨幣分析師 → 交易決策 149 | 股票符號 → 符號檢測 → 傳統API → 股票分析師 → 交易決策 150 | ``` 151 | 152 | ### 分析師架構 153 | - **基本面分析師**:分析市值、供應量、代幣經濟學 154 | - **技術分析師**:價格趨勢、技術指標、市場動量 155 | - **新聞分析師**:監管新聞、市場趨勢、機構動態 156 | - **研究團隊**:多空辯論和風險評估 157 | - **交易代理**:最終交易決策 158 | 159 | ## 🎉 優勢 160 | 161 | 1. **統一平台**:同時支持股票和加密貨幣交易分析 162 | 2. **專業分析**:針對加密貨幣市場特點的專門分析 163 | 3. **免費數據**:使用CoinGecko免費API,成本低廉 164 | 4. **實時更新**:24/7市場數據支持 165 | 5. **易於擴展**:模塊化設計,易於添加新功能 166 | 167 | ## 🔍 調試和監控 168 | 169 | ### 啟用調試模式 170 | ```python 171 | ta = TradingAgentsGraph(debug=True, config=config) 172 | ``` 173 | 174 | ### 監控API調用 175 | - CoinGecko API調用會顯示請求詳情 176 | - 錯誤處理和重試機制 177 | 178 | ## 📝 注意事項 179 | 180 | 1. **免費API限制**:CoinGecko免費版有請求限制,建議合理使用 181 | 2. **網絡依賴**:需要穩定的網絡連接獲取實時數據 182 | 3. **風險提示**:這是研究工具,不構成投資建議 183 | 184 | ## 🤝 貢獻 185 | 186 | 歡迎提交問題和改進建議! 187 | 188 | --- 189 | 190 | **享受加密貨幣交易分析的新體驗!** 🚀💰 -------------------------------------------------------------------------------- /VERCEL_DEPLOYMENT.md: -------------------------------------------------------------------------------- 1 | # Vercel 部署指南 2 | 3 | 這個指南將幫助你將 Trading Agents Crypto 部署到 Vercel 平台。 4 | 5 | ## ⚠️ 重要限制 6 | 7 | 由於 Vercel 是 serverless 平台,以下功能會受到限制: 8 | 9 | 1. **實時通信**: SocketIO 功能不可用,無法提供實時更新 10 | 2. **執行時間**: 最大執行時間 300 秒(5分鐘) 11 | 3. **記憶體**: 有限的 RAM 和存儲空間 12 | 4. **狀態持久化**: 無法在請求間保持狀態 13 | 14 | ## 📋 部署前準備 15 | 16 | ### 1. 確保文件結構 17 | ``` 18 | TradingAgents-main/ 19 | ├── api/ 20 | │ └── index.py # Vercel 入口點 21 | ├── templates/ # HTML 模板 22 | ├── assets/ # 靜態資源 23 | ├── tradingagents/ # 主要代碼 24 | ├── vercel.json # Vercel 配置 25 | ├── web_app_vercel.py # 簡化版應用 26 | ├── requirements_vercel.txt # 依賴項 27 | └── VERCEL_DEPLOYMENT.md # 此文件 28 | ``` 29 | 30 | ### 2. 環境變量設置 31 | 32 | 在 Vercel 項目設置中添加以下環境變量: 33 | 34 | ``` 35 | PYTHONPATH=. 36 | FLASK_ENV=production 37 | ``` 38 | 39 | ## 🚀 部署步驟 40 | 41 | ### 方法 1: 通過 Vercel CLI 42 | 43 | 1. **安裝 Vercel CLI** 44 | ```bash 45 | npm install -g vercel 46 | ``` 47 | 48 | 2. **登錄 Vercel** 49 | ```bash 50 | vercel login 51 | ``` 52 | 53 | 3. **部署項目** 54 | ```bash 55 | vercel --prod 56 | ``` 57 | 58 | ### 方法 2: 通過 GitHub 集成 59 | 60 | 1. **推送代碼到 GitHub** 61 | ```bash 62 | git add . 63 | git commit -m "Add Vercel deployment configuration" 64 | git push origin main 65 | ``` 66 | 67 | 2. **連接到 Vercel** 68 | - 訪問 [Vercel Dashboard](https://vercel.com/dashboard) 69 | - 點擊 "New Project" 70 | - 選擇你的 GitHub 倉庫 71 | - 配置構建設置(通常自動檢測) 72 | 73 | 3. **部署** 74 | - Vercel 會自動開始部署 75 | - 等待部署完成 76 | 77 | ## ⚙️ 配置說明 78 | 79 | ### vercel.json 配置 80 | ```json 81 | { 82 | "version": 2, 83 | "builds": [ 84 | { 85 | "src": "api/index.py", 86 | "use": "@vercel/python", 87 | "config": { 88 | "maxLambdaSize": "50mb" 89 | } 90 | }, 91 | { 92 | "src": "templates/**", 93 | "use": "@vercel/static" 94 | }, 95 | { 96 | "src": "assets/**", 97 | "use": "@vercel/static" 98 | } 99 | ], 100 | "routes": [ 101 | { 102 | "src": "/assets/(.*)", 103 | "dest": "/assets/$1" 104 | }, 105 | { 106 | "src": "/(.*)", 107 | "dest": "/api/index.py" 108 | } 109 | ], 110 | "env": { 111 | "PYTHONPATH": "." 112 | }, 113 | "functions": { 114 | "api/index.py": { 115 | "maxDuration": 300 116 | } 117 | } 118 | } 119 | ``` 120 | 121 | ### 功能差異 122 | 123 | | 功能 | 本地版本 | Vercel 版本 | 124 | |------|----------|-------------| 125 | | 實時更新 | ✅ SocketIO | ❌ 不支持 | 126 | | 長時間分析 | ✅ 背景執行 | ❌ 5分鐘限制 | 127 | | 多用戶並發 | ✅ 支持 | ⚠️ 有限支持 | 128 | | 狀態持久化 | ✅ 記憶體中 | ❌ 請求間不保持 | 129 | 130 | ## 🛠️ 故障排除 131 | 132 | ### 常見問題 133 | 134 | 1. **依賴項過大** 135 | ``` 136 | Error: Lambda size exceeds limit 137 | ``` 138 | **解決方案**: 減少 `requirements_vercel.txt` 中的依賴項 139 | 140 | 2. **執行超時** 141 | ``` 142 | Error: Function execution timed out 143 | ``` 144 | **解決方案**: 優化代碼或使用更快的 LLM 模型 145 | 146 | 3. **模板找不到** 147 | ``` 148 | TemplateNotFound: index.html 149 | ``` 150 | **解決方案**: 確保 `templates/` 文件夾已正確上傳 151 | 152 | ### 調試技巧 153 | 154 | 1. **查看日誌** 155 | ```bash 156 | vercel logs 157 | ``` 158 | 159 | 2. **本地測試** 160 | ```bash 161 | vercel dev 162 | ``` 163 | 164 | ## 🔧 優化建議 165 | 166 | ### 性能優化 167 | 168 | 1. **減少依賴項** 169 | - 只安裝必要的包 170 | - 考慮使用輕量級替代品 171 | 172 | 2. **優化分析流程** 173 | - 減少 LLM 調用次數 174 | - 使用更快的模型進行初步分析 175 | 176 | 3. **緩存策略** 177 | - 實現簡單的結果緩存 178 | - 避免重複計算 179 | 180 | ### 替代方案 181 | 182 | 如果 Vercel 限制太多,考慮以下替代方案: 183 | 184 | 1. **Railway**: 支持長時間運行的應用 185 | 2. **Render**: 提供更多的執行時間 186 | 3. **Heroku**: 傳統的 PaaS 平台 187 | 4. **DigitalOcean App Platform**: 靈活的部署選項 188 | 189 | ## 📞 獲取幫助 190 | 191 | 如果遇到部署問題: 192 | 193 | 1. 檢查 [Vercel 文檔](https://vercel.com/docs) 194 | 2. 查看項目的 GitHub Issues 195 | 3. 聯繫項目維護者 196 | 197 | ## 🎉 部署成功 198 | 199 | 部署成功後,你的應用將可以通過 Vercel 提供的 URL 訪問。記住,這是一個簡化版本,主要用於演示和輕量級使用。 200 | 201 | 對於生產環境和完整功能,建議使用支持長時間運行的平台部署原始版本。 -------------------------------------------------------------------------------- /test_crypto.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """ 3 | Test script for crypto trading functionality 4 | """ 5 | 6 | import sys 7 | import os 8 | 9 | # Add the project root to Python path 10 | sys.path.insert(0, os.path.abspath('.')) 11 | 12 | from tradingagents.dataflows.coingecko_utils import ( 13 | get_crypto_market_data, 14 | get_crypto_price_data, 15 | get_crypto_news, 16 | get_crypto_technical_indicators 17 | ) 18 | from datetime import datetime, timedelta 19 | 20 | def test_crypto_functions(): 21 | """Test the basic crypto data functions""" 22 | print("🚀 Testing Crypto Trading Functions...") 23 | print("=" * 50) 24 | 25 | # Test crypto symbol 26 | crypto_symbol = "BTC" 27 | curr_date = "2024-12-01" 28 | 29 | print(f"\n📊 Testing Market Data for {crypto_symbol}...") 30 | try: 31 | market_data = get_crypto_market_data(crypto_symbol) 32 | print("✅ Market data retrieved successfully!") 33 | print(market_data[:500] + "..." if len(market_data) > 500 else market_data) 34 | except Exception as e: 35 | print(f"❌ Error getting market data: {e}") 36 | 37 | print(f"\n📈 Testing Price History for {crypto_symbol}...") 38 | try: 39 | start_date = "2024-11-01" 40 | price_data = get_crypto_price_data(crypto_symbol, start_date, curr_date) 41 | print("✅ Price data retrieved successfully!") 42 | print(price_data[:500] + "..." if len(price_data) > 500 else price_data) 43 | except Exception as e: 44 | print(f"❌ Error getting price data: {e}") 45 | 46 | print(f"\n📰 Testing News for {crypto_symbol}...") 47 | try: 48 | news_data = get_crypto_news(crypto_symbol, curr_date, 7) 49 | print("✅ News data retrieved successfully!") 50 | print(news_data[:500] + "..." if len(news_data) > 500 else news_data) 51 | except Exception as e: 52 | print(f"❌ Error getting news data: {e}") 53 | 54 | print(f"\n📊 Testing Technical Analysis for {crypto_symbol}...") 55 | try: 56 | tech_data = get_crypto_technical_indicators(crypto_symbol, curr_date, 30) 57 | print("✅ Technical analysis retrieved successfully!") 58 | print(tech_data[:500] + "..." if len(tech_data) > 500 else tech_data) 59 | except Exception as e: 60 | print(f"❌ Error getting technical data: {e}") 61 | 62 | def test_symbol_detection(): 63 | """Test the crypto symbol detection function""" 64 | print("\n🔍 Testing Symbol Detection...") 65 | print("=" * 50) 66 | 67 | from tradingagents.agents.analysts.fundamentals_analyst import _is_crypto_symbol 68 | 69 | # Test crypto symbols 70 | crypto_symbols = ["BTC", "ETH", "ADA", "SOL", "DOGE"] 71 | stock_symbols = ["AAPL", "NVDA", "MSFT", "TSLA", "GOOGL"] 72 | unknown_symbols = ["XYZ", "ABC", "ZZZZ"] 73 | 74 | print("Known Crypto symbols:") 75 | for symbol in crypto_symbols: 76 | result = _is_crypto_symbol(symbol) 77 | print(f" {symbol}: {result} {'✅' if result else '❌'}") 78 | 79 | print("\nKnown Stock symbols:") 80 | for symbol in stock_symbols: 81 | result = _is_crypto_symbol(symbol) 82 | print(f" {symbol}: {result} {'❌' if result else '✅'}") 83 | 84 | print("\nUnknown symbols (should default to stocks):") 85 | for symbol in unknown_symbols: 86 | result = _is_crypto_symbol(symbol) 87 | print(f" {symbol}: {result} {'❌' if result else '✅'}") 88 | 89 | if __name__ == "__main__": 90 | print("🔄 Starting Crypto Trading System Tests...") 91 | print("🔧 Testing with improved symbol detection and CoinGecko API fixes...") 92 | print() 93 | 94 | # Test symbol detection first 95 | test_symbol_detection() 96 | 97 | # Test crypto data functions 98 | test_crypto_functions() 99 | 100 | print("\n🎉 Testing completed!") 101 | print("\n📈 Key Improvements Made:") 102 | print(" ✅ Fixed symbol detection (stocks vs crypto)") 103 | print(" ✅ Added direct mapping for major cryptocurrencies") 104 | print(" ✅ Improved API rate limit handling") 105 | print(" ✅ Better error handling and fallback logic") 106 | 107 | print("\n🚀 To test the full trading system with crypto:") 108 | print(" python main.py") 109 | print("\n🔑 API Keys Setup:") 110 | print(" export COINGECKO_API_KEY=your_key # Optional, works without key") 111 | print(" export OPENAI_API_KEY=your_key # Required for trading agents") 112 | 113 | print("\n💡 Supported Crypto Examples:") 114 | print(" BTC, ETH, ADA, SOL, DOGE, AVAX, MATIC, LINK, UNI...") -------------------------------------------------------------------------------- /tradingagents/dataflows/reddit_utils.py: -------------------------------------------------------------------------------- 1 | import requests 2 | import time 3 | import json 4 | from datetime import datetime, timedelta 5 | from contextlib import contextmanager 6 | from typing import Annotated 7 | import os 8 | import re 9 | 10 | ticker_to_company = { 11 | "AAPL": "Apple", 12 | "MSFT": "Microsoft", 13 | "GOOGL": "Google", 14 | "AMZN": "Amazon", 15 | "TSLA": "Tesla", 16 | "NVDA": "Nvidia", 17 | "TSM": "Taiwan Semiconductor Manufacturing Company OR TSMC", 18 | "JPM": "JPMorgan Chase OR JP Morgan", 19 | "JNJ": "Johnson & Johnson OR JNJ", 20 | "V": "Visa", 21 | "WMT": "Walmart", 22 | "META": "Meta OR Facebook", 23 | "AMD": "AMD", 24 | "INTC": "Intel", 25 | "QCOM": "Qualcomm", 26 | "BABA": "Alibaba", 27 | "ADBE": "Adobe", 28 | "NFLX": "Netflix", 29 | "CRM": "Salesforce", 30 | "PYPL": "PayPal", 31 | "PLTR": "Palantir", 32 | "MU": "Micron", 33 | "SQ": "Block OR Square", 34 | "ZM": "Zoom", 35 | "CSCO": "Cisco", 36 | "SHOP": "Shopify", 37 | "ORCL": "Oracle", 38 | "X": "Twitter OR X", 39 | "SPOT": "Spotify", 40 | "AVGO": "Broadcom", 41 | "ASML": "ASML ", 42 | "TWLO": "Twilio", 43 | "SNAP": "Snap Inc.", 44 | "TEAM": "Atlassian", 45 | "SQSP": "Squarespace", 46 | "UBER": "Uber", 47 | "ROKU": "Roku", 48 | "PINS": "Pinterest", 49 | } 50 | 51 | 52 | def fetch_top_from_category( 53 | category: Annotated[ 54 | str, "Category to fetch top post from. Collection of subreddits." 55 | ], 56 | date: Annotated[str, "Date to fetch top posts from."], 57 | max_limit: Annotated[int, "Maximum number of posts to fetch."], 58 | query: Annotated[str, "Optional query to search for in the subreddit."] = None, 59 | data_path: Annotated[ 60 | str, 61 | "Path to the data folder. Default is 'reddit_data'.", 62 | ] = "reddit_data", 63 | ): 64 | base_path = data_path 65 | 66 | all_content = [] 67 | 68 | if max_limit < len(os.listdir(os.path.join(base_path, category))): 69 | raise ValueError( 70 | "REDDIT FETCHING ERROR: max limit is less than the number of files in the category. Will not be able to fetch any posts" 71 | ) 72 | 73 | limit_per_subreddit = max_limit // len( 74 | os.listdir(os.path.join(base_path, category)) 75 | ) 76 | 77 | for data_file in os.listdir(os.path.join(base_path, category)): 78 | # check if data_file is a .jsonl file 79 | if not data_file.endswith(".jsonl"): 80 | continue 81 | 82 | all_content_curr_subreddit = [] 83 | 84 | with open(os.path.join(base_path, category, data_file), "rb") as f: 85 | for i, line in enumerate(f): 86 | # skip empty lines 87 | if not line.strip(): 88 | continue 89 | 90 | parsed_line = json.loads(line) 91 | 92 | # select only lines that are from the date 93 | post_date = datetime.utcfromtimestamp( 94 | parsed_line["created_utc"] 95 | ).strftime("%Y-%m-%d") 96 | if post_date != date: 97 | continue 98 | 99 | # if is company_news, check that the title or the content has the company's name (query) mentioned 100 | if "company" in category and query: 101 | search_terms = [] 102 | if "OR" in ticker_to_company[query]: 103 | search_terms = ticker_to_company[query].split(" OR ") 104 | else: 105 | search_terms = [ticker_to_company[query]] 106 | 107 | search_terms.append(query) 108 | 109 | found = False 110 | for term in search_terms: 111 | if re.search( 112 | term, parsed_line["title"], re.IGNORECASE 113 | ) or re.search(term, parsed_line["selftext"], re.IGNORECASE): 114 | found = True 115 | break 116 | 117 | if not found: 118 | continue 119 | 120 | post = { 121 | "title": parsed_line["title"], 122 | "content": parsed_line["selftext"], 123 | "url": parsed_line["url"], 124 | "upvotes": parsed_line["ups"], 125 | "posted_date": post_date, 126 | } 127 | 128 | all_content_curr_subreddit.append(post) 129 | 130 | # sort all_content_curr_subreddit by upvote_ratio in descending order 131 | all_content_curr_subreddit.sort(key=lambda x: x["upvotes"], reverse=True) 132 | 133 | all_content.extend(all_content_curr_subreddit[:limit_per_subreddit]) 134 | 135 | return all_content 136 | -------------------------------------------------------------------------------- /tradingagents/dataflows/yfin_utils.py: -------------------------------------------------------------------------------- 1 | # gets data/stats 2 | 3 | import yfinance as yf 4 | from typing import Annotated, Callable, Any, Optional 5 | from pandas import DataFrame 6 | import pandas as pd 7 | from functools import wraps 8 | 9 | from .utils import save_output, SavePathType, decorate_all_methods 10 | 11 | 12 | def init_ticker(func: Callable) -> Callable: 13 | """Decorator to initialize yf.Ticker and pass it to the function.""" 14 | 15 | @wraps(func) 16 | def wrapper(symbol: Annotated[str, "ticker symbol"], *args, **kwargs) -> Any: 17 | ticker = yf.Ticker(symbol) 18 | return func(ticker, *args, **kwargs) 19 | 20 | return wrapper 21 | 22 | 23 | @decorate_all_methods(init_ticker) 24 | class YFinanceUtils: 25 | 26 | def get_stock_data( 27 | symbol: Annotated[str, "ticker symbol"], 28 | start_date: Annotated[ 29 | str, "start date for retrieving stock price data, YYYY-mm-dd" 30 | ], 31 | end_date: Annotated[ 32 | str, "end date for retrieving stock price data, YYYY-mm-dd" 33 | ], 34 | save_path: SavePathType = None, 35 | ) -> DataFrame: 36 | """retrieve stock price data for designated ticker symbol""" 37 | ticker = symbol 38 | # add one day to the end_date so that the data range is inclusive 39 | end_date = pd.to_datetime(end_date) + pd.DateOffset(days=1) 40 | end_date = end_date.strftime("%Y-%m-%d") 41 | stock_data = ticker.history(start=start_date, end=end_date) 42 | # save_output(stock_data, f"Stock data for {ticker.ticker}", save_path) 43 | return stock_data 44 | 45 | def get_stock_info( 46 | symbol: Annotated[str, "ticker symbol"], 47 | ) -> dict: 48 | """Fetches and returns latest stock information.""" 49 | ticker = symbol 50 | stock_info = ticker.info 51 | return stock_info 52 | 53 | def get_company_info( 54 | symbol: Annotated[str, "ticker symbol"], 55 | save_path: Optional[str] = None, 56 | ) -> DataFrame: 57 | """Fetches and returns company information as a DataFrame.""" 58 | ticker = symbol 59 | info = ticker.info 60 | company_info = { 61 | "Company Name": info.get("shortName", "N/A"), 62 | "Industry": info.get("industry", "N/A"), 63 | "Sector": info.get("sector", "N/A"), 64 | "Country": info.get("country", "N/A"), 65 | "Website": info.get("website", "N/A"), 66 | } 67 | company_info_df = DataFrame([company_info]) 68 | if save_path: 69 | company_info_df.to_csv(save_path) 70 | print(f"Company info for {ticker.ticker} saved to {save_path}") 71 | return company_info_df 72 | 73 | def get_stock_dividends( 74 | symbol: Annotated[str, "ticker symbol"], 75 | save_path: Optional[str] = None, 76 | ) -> DataFrame: 77 | """Fetches and returns the latest dividends data as a DataFrame.""" 78 | ticker = symbol 79 | dividends = ticker.dividends 80 | if save_path: 81 | dividends.to_csv(save_path) 82 | print(f"Dividends for {ticker.ticker} saved to {save_path}") 83 | return dividends 84 | 85 | def get_income_stmt(symbol: Annotated[str, "ticker symbol"]) -> DataFrame: 86 | """Fetches and returns the latest income statement of the company as a DataFrame.""" 87 | ticker = symbol 88 | income_stmt = ticker.financials 89 | return income_stmt 90 | 91 | def get_balance_sheet(symbol: Annotated[str, "ticker symbol"]) -> DataFrame: 92 | """Fetches and returns the latest balance sheet of the company as a DataFrame.""" 93 | ticker = symbol 94 | balance_sheet = ticker.balance_sheet 95 | return balance_sheet 96 | 97 | def get_cash_flow(symbol: Annotated[str, "ticker symbol"]) -> DataFrame: 98 | """Fetches and returns the latest cash flow statement of the company as a DataFrame.""" 99 | ticker = symbol 100 | cash_flow = ticker.cashflow 101 | return cash_flow 102 | 103 | def get_analyst_recommendations(symbol: Annotated[str, "ticker symbol"]) -> tuple: 104 | """Fetches the latest analyst recommendations and returns the most common recommendation and its count.""" 105 | ticker = symbol 106 | recommendations = ticker.recommendations 107 | if recommendations.empty: 108 | return None, 0 # No recommendations available 109 | 110 | # Assuming 'period' column exists and needs to be excluded 111 | row_0 = recommendations.iloc[0, 1:] # Exclude 'period' column if necessary 112 | 113 | # Find the maximum voting result 114 | max_votes = row_0.max() 115 | majority_voting_result = row_0[row_0 == max_votes].index.tolist() 116 | 117 | return majority_voting_result[0], max_votes 118 | -------------------------------------------------------------------------------- /tradingagents/agents/utils/memory.py: -------------------------------------------------------------------------------- 1 | import chromadb 2 | from chromadb.config import Settings 3 | from openai import OpenAI 4 | 5 | 6 | class FinancialSituationMemory: 7 | def __init__(self, name, config): 8 | if config["backend_url"] == "http://localhost:11434/v1": 9 | self.embedding = "nomic-embed-text" 10 | else: 11 | self.embedding = "text-embedding-3-small" 12 | self.client = OpenAI( 13 | base_url=config["backend_url"], 14 | api_key=config["api_key"] 15 | ) 16 | self.chroma_client = chromadb.Client(Settings(allow_reset=True)) 17 | 18 | # Make collection name unique per session to avoid conflicts 19 | session_id = config.get('session_id', 'default') 20 | unique_name = f"{name}_{session_id}" 21 | 22 | # Check if collection already exists, if so delete it and create new one 23 | try: 24 | existing_collections = [col.name for col in self.chroma_client.list_collections()] 25 | if unique_name in existing_collections: 26 | self.chroma_client.delete_collection(name=unique_name) 27 | except Exception as e: 28 | # If there's any issue checking/deleting, just continue 29 | pass 30 | 31 | # Create the collection (now guaranteed to be fresh and unique) 32 | self.situation_collection = self.chroma_client.create_collection(name=unique_name) 33 | 34 | def get_embedding(self, text): 35 | """Get OpenAI embedding for a text""" 36 | 37 | response = self.client.embeddings.create( 38 | model=self.embedding, input=text 39 | ) 40 | return response.data[0].embedding 41 | 42 | def add_situations(self, situations_and_advice): 43 | """Add financial situations and their corresponding advice. Parameter is a list of tuples (situation, rec)""" 44 | 45 | situations = [] 46 | advice = [] 47 | ids = [] 48 | embeddings = [] 49 | 50 | offset = self.situation_collection.count() 51 | 52 | for i, (situation, recommendation) in enumerate(situations_and_advice): 53 | situations.append(situation) 54 | advice.append(recommendation) 55 | ids.append(str(offset + i)) 56 | embeddings.append(self.get_embedding(situation)) 57 | 58 | self.situation_collection.add( 59 | documents=situations, 60 | metadatas=[{"recommendation": rec} for rec in advice], 61 | embeddings=embeddings, 62 | ids=ids, 63 | ) 64 | 65 | def get_memories(self, current_situation, n_matches=1): 66 | """Find matching recommendations using OpenAI embeddings""" 67 | query_embedding = self.get_embedding(current_situation) 68 | 69 | results = self.situation_collection.query( 70 | query_embeddings=[query_embedding], 71 | n_results=n_matches, 72 | include=["metadatas", "documents", "distances"], 73 | ) 74 | 75 | matched_results = [] 76 | for i in range(len(results["documents"][0])): 77 | matched_results.append( 78 | { 79 | "matched_situation": results["documents"][0][i], 80 | "recommendation": results["metadatas"][0][i]["recommendation"], 81 | "similarity_score": 1 - results["distances"][0][i], 82 | } 83 | ) 84 | 85 | return matched_results 86 | 87 | 88 | if __name__ == "__main__": 89 | # Example usage 90 | matcher = FinancialSituationMemory() 91 | 92 | # Example data 93 | example_data = [ 94 | ( 95 | "High inflation rate with rising interest rates and declining consumer spending", 96 | "Consider defensive sectors like consumer staples and utilities. Review fixed-income portfolio duration.", 97 | ), 98 | ( 99 | "Tech sector showing high volatility with increasing institutional selling pressure", 100 | "Reduce exposure to high-growth tech stocks. Look for value opportunities in established tech companies with strong cash flows.", 101 | ), 102 | ( 103 | "Strong dollar affecting emerging markets with increasing forex volatility", 104 | "Hedge currency exposure in international positions. Consider reducing allocation to emerging market debt.", 105 | ), 106 | ( 107 | "Market showing signs of sector rotation with rising yields", 108 | "Rebalance portfolio to maintain target allocations. Consider increasing exposure to sectors benefiting from higher rates.", 109 | ), 110 | ] 111 | 112 | # Add the example situations and recommendations 113 | matcher.add_situations(example_data) 114 | 115 | # Example query 116 | current_situation = """ 117 | Market showing increased volatility in tech sector, with institutional investors 118 | reducing positions and rising interest rates affecting growth stock valuations 119 | """ 120 | 121 | try: 122 | recommendations = matcher.get_memories(current_situation, n_matches=2) 123 | 124 | for i, rec in enumerate(recommendations, 1): 125 | print(f"\nMatch {i}:") 126 | print(f"Similarity Score: {rec['similarity_score']:.2f}") 127 | print(f"Matched Situation: {rec['matched_situation']}") 128 | print(f"Recommendation: {rec['recommendation']}") 129 | 130 | except Exception as e: 131 | print(f"Error during recommendation: {str(e)}") 132 | -------------------------------------------------------------------------------- /tradingagents/graph/reflection.py: -------------------------------------------------------------------------------- 1 | # TradingAgents/graph/reflection.py 2 | 3 | from typing import Dict, Any 4 | from langchain_openai import ChatOpenAI 5 | 6 | 7 | class Reflector: 8 | """Handles reflection on decisions and updating memory.""" 9 | 10 | def __init__(self, quick_thinking_llm: ChatOpenAI): 11 | """Initialize the reflector with an LLM.""" 12 | self.quick_thinking_llm = quick_thinking_llm 13 | self.reflection_system_prompt = self._get_reflection_prompt() 14 | 15 | def _get_reflection_prompt(self) -> str: 16 | """Get the system prompt for reflection.""" 17 | return """ 18 | You are an expert financial analyst tasked with reviewing trading decisions/analysis and providing a comprehensive, step-by-step analysis. 19 | Your goal is to deliver detailed insights into investment decisions and highlight opportunities for improvement, adhering strictly to the following guidelines: 20 | 21 | 1. Reasoning: 22 | - For each trading decision, determine whether it was correct or incorrect. A correct decision results in an increase in returns, while an incorrect decision does the opposite. 23 | - Analyze the contributing factors to each success or mistake. Consider: 24 | - Market intelligence. 25 | - Technical indicators. 26 | - Technical signals. 27 | - Price movement analysis. 28 | - Overall market data analysis 29 | - News analysis. 30 | - Social media and sentiment analysis. 31 | - Fundamental data analysis. 32 | - Weight the importance of each factor in the decision-making process. 33 | 34 | 2. Improvement: 35 | - For any incorrect decisions, propose revisions to maximize returns. 36 | - Provide a detailed list of corrective actions or improvements, including specific recommendations (e.g., changing a decision from HOLD to BUY on a particular date). 37 | 38 | 3. Summary: 39 | - Summarize the lessons learned from the successes and mistakes. 40 | - Highlight how these lessons can be adapted for future trading scenarios and draw connections between similar situations to apply the knowledge gained. 41 | 42 | 4. Query: 43 | - Extract key insights from the summary into a concise sentence of no more than 1000 tokens. 44 | - Ensure the condensed sentence captures the essence of the lessons and reasoning for easy reference. 45 | 46 | Adhere strictly to these instructions, and ensure your output is detailed, accurate, and actionable. You will also be given objective descriptions of the market from a price movements, technical indicator, news, and sentiment perspective to provide more context for your analysis. 47 | """ 48 | 49 | def _extract_current_situation(self, current_state: Dict[str, Any]) -> str: 50 | """Extract the current market situation from the state.""" 51 | curr_market_report = current_state["market_report"] 52 | curr_sentiment_report = current_state["sentiment_report"] 53 | curr_news_report = current_state["news_report"] 54 | curr_fundamentals_report = current_state["fundamentals_report"] 55 | 56 | return f"{curr_market_report}\n\n{curr_sentiment_report}\n\n{curr_news_report}\n\n{curr_fundamentals_report}" 57 | 58 | def _reflect_on_component( 59 | self, component_type: str, report: str, situation: str, returns_losses 60 | ) -> str: 61 | """Generate reflection for a component.""" 62 | messages = [ 63 | ("system", self.reflection_system_prompt), 64 | ( 65 | "human", 66 | f"Returns: {returns_losses}\n\nAnalysis/Decision: {report}\n\nObjective Market Reports for Reference: {situation}", 67 | ), 68 | ] 69 | 70 | result = self.quick_thinking_llm.invoke(messages).content 71 | return result 72 | 73 | def reflect_bull_researcher(self, current_state, returns_losses, bull_memory): 74 | """Reflect on bull researcher's analysis and update memory.""" 75 | situation = self._extract_current_situation(current_state) 76 | bull_debate_history = current_state["investment_debate_state"]["bull_history"] 77 | 78 | result = self._reflect_on_component( 79 | "BULL", bull_debate_history, situation, returns_losses 80 | ) 81 | bull_memory.add_situations([(situation, result)]) 82 | 83 | def reflect_bear_researcher(self, current_state, returns_losses, bear_memory): 84 | """Reflect on bear researcher's analysis and update memory.""" 85 | situation = self._extract_current_situation(current_state) 86 | bear_debate_history = current_state["investment_debate_state"]["bear_history"] 87 | 88 | result = self._reflect_on_component( 89 | "BEAR", bear_debate_history, situation, returns_losses 90 | ) 91 | bear_memory.add_situations([(situation, result)]) 92 | 93 | def reflect_trader(self, current_state, returns_losses, trader_memory): 94 | """Reflect on trader's decision and update memory.""" 95 | situation = self._extract_current_situation(current_state) 96 | trader_decision = current_state["trader_investment_plan"] 97 | 98 | result = self._reflect_on_component( 99 | "TRADER", trader_decision, situation, returns_losses 100 | ) 101 | trader_memory.add_situations([(situation, result)]) 102 | 103 | def reflect_invest_judge(self, current_state, returns_losses, invest_judge_memory): 104 | """Reflect on investment judge's decision and update memory.""" 105 | situation = self._extract_current_situation(current_state) 106 | judge_decision = current_state["investment_debate_state"]["judge_decision"] 107 | 108 | result = self._reflect_on_component( 109 | "INVEST JUDGE", judge_decision, situation, returns_losses 110 | ) 111 | invest_judge_memory.add_situations([(situation, result)]) 112 | 113 | def reflect_risk_manager(self, current_state, returns_losses, risk_manager_memory): 114 | """Reflect on risk manager's decision and update memory.""" 115 | situation = self._extract_current_situation(current_state) 116 | judge_decision = current_state["risk_debate_state"]["judge_decision"] 117 | 118 | result = self._reflect_on_component( 119 | "RISK JUDGE", judge_decision, situation, returns_losses 120 | ) 121 | risk_manager_memory.add_situations([(situation, result)]) 122 | -------------------------------------------------------------------------------- /tradingagents/agents/analysts/news_analyst.py: -------------------------------------------------------------------------------- 1 | from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder 2 | import time 3 | import json 4 | 5 | 6 | def _is_crypto_symbol(symbol: str) -> bool: 7 | """ 8 | Detect if a symbol is likely a cryptocurrency 9 | Uses a whitelist approach for known crypto symbols and excludes known stock patterns 10 | """ 11 | # Known crypto symbols (most common ones) 12 | crypto_symbols = { 13 | 'BTC', 'ETH', 'ADA', 'SOL', 'DOT', 'AVAX', 'MATIC', 'LINK', 'UNI', 'AAVE', 14 | 'XRP', 'LTC', 'BCH', 'EOS', 'TRX', 'XLM', 'VET', 'ALGO', 'ATOM', 'LUNA', 15 | 'NEAR', 'FTM', 'CRO', 'SAND', 'MANA', 'AXS', 'GALA', 'ENJ', 'CHZ', 'BAT', 16 | 'ZEC', 'DASH', 'XMR', 'DOGE', 'SHIB', 'PEPE', 'FLOKI', 'BNB', 'USDT', 'USDC', 17 | 'TON', 'ICP', 'HBAR', 'THETA', 'FIL', 'ETC', 'MKR', 'APT', 'LDO', 'OP', 18 | 'IMX', 'GRT', 'RUNE', 'FLOW', 'EGLD', 'XTZ', 'MINA', 'ROSE', 'KAVA' 19 | } 20 | 21 | # Known stock symbols (to avoid false positives) 22 | stock_symbols = { 23 | 'AAPL', 'GOOGL', 'MSFT', 'AMZN', 'TSLA', 'NVDA', 'META', 'NFLX', 'DIS', 'AMD', 24 | 'INTC', 'CRM', 'ORCL', 'ADBE', 'CSCO', 'PEP', 'KO', 'WMT', 'JNJ', 'PFE', 25 | 'V', 'MA', 'HD', 'UNH', 'BAC', 'XOM', 'CVX', 'LLY', 'ABBV', 'COST', 26 | 'AVGO', 'TMO', 'ACN', 'DHR', 'TXN', 'LOW', 'QCOM', 'HON', 'UPS', 'MDT' 27 | } 28 | 29 | symbol_upper = symbol.upper() 30 | 31 | # If it's a known stock symbol, it's definitely not crypto 32 | if symbol_upper in stock_symbols: 33 | return False 34 | 35 | # If it's a known crypto symbol, it's definitely crypto 36 | if symbol_upper in crypto_symbols: 37 | return True 38 | 39 | # For unknown symbols, be conservative and assume it's a stock 40 | # unless it has typical crypto characteristics 41 | if len(symbol) >= 5: # Most stocks are 4+ characters 42 | return False 43 | 44 | # Short symbols (2-4 chars) could be crypto if they don't look like stocks 45 | if len(symbol) <= 4 and symbol.isalnum() and not any(c in symbol for c in ['.', '-', '_']): 46 | # Additional heuristic: crypto symbols often have certain patterns 47 | return True 48 | 49 | return False 50 | 51 | 52 | def create_news_analyst(llm, toolkit): 53 | def news_analyst_node(state): 54 | current_date = state["trade_date"] 55 | ticker = state["company_of_interest"] 56 | 57 | # Check if we're dealing with crypto or stocks 58 | is_crypto = _is_crypto_symbol(ticker) 59 | 60 | if is_crypto: 61 | # Use crypto-specific tools 62 | tools = [toolkit.get_crypto_news_analysis, toolkit.get_google_news] 63 | 64 | system_message = ( 65 | "You are a cryptocurrency news researcher tasked with analyzing recent news and trends over the past week that affect cryptocurrency markets. Please write a comprehensive report of the current state of the crypto world and broader macroeconomic factors that are relevant for cryptocurrency trading. " 66 | "Focus on crypto-specific news including: regulatory developments, institutional adoption, technology updates, market sentiment, DeFi trends, NFT markets, blockchain developments, and major crypto exchange news. " 67 | "Also consider traditional macroeconomic factors that impact crypto markets such as inflation, monetary policy, global economic uncertainty, and traditional market trends. " 68 | "Do not simply state the trends are mixed, provide detailed and fine-grained analysis and insights that may help crypto traders make decisions." 69 | + """ Make sure to append a Markdown table at the end of the report to organize key points in the report, organized and easy to read.""" 70 | ) 71 | else: 72 | # Use stock-specific tools (original functionality) 73 | if toolkit.config["online_tools"]: 74 | tools = [toolkit.get_global_news_openai, toolkit.get_google_news] 75 | else: 76 | tools = [ 77 | toolkit.get_finnhub_news, 78 | toolkit.get_reddit_news, 79 | toolkit.get_google_news, 80 | ] 81 | 82 | system_message = ( 83 | "You are a news researcher tasked with analyzing recent news and trends over the past week. Please write a comprehensive report of the current state of the world that is relevant for trading and macroeconomics. Look at news from EODHD, and finnhub to be comprehensive. Do not simply state the trends are mixed, provide detailed and finegrained analysis and insights that may help traders make decisions." 84 | + """ Make sure to append a Markdown table at the end of the report to organize key points in the report, organized and easy to read.""" 85 | ) 86 | 87 | prompt = ChatPromptTemplate.from_messages( 88 | [ 89 | ( 90 | "system", 91 | "You are a helpful AI assistant, collaborating with other assistants." 92 | " Use the provided tools to progress towards answering the question." 93 | " If you are unable to fully answer, that's OK; another assistant with different tools" 94 | " will help where you left off. Execute what you can to make progress." 95 | " If you or any other assistant has the FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL** or deliverable," 96 | " prefix your response with FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL** so the team knows to stop." 97 | " You have access to the following tools: {tool_names}.\n{system_message}" 98 | "For your reference, the current date is {current_date}. We are looking at the company {ticker}", 99 | ), 100 | MessagesPlaceholder(variable_name="messages"), 101 | ] 102 | ) 103 | 104 | prompt = prompt.partial(system_message=system_message) 105 | prompt = prompt.partial(tool_names=", ".join([tool.name for tool in tools])) 106 | prompt = prompt.partial(current_date=current_date) 107 | prompt = prompt.partial(ticker=ticker) 108 | 109 | chain = prompt | llm.bind_tools(tools) 110 | result = chain.invoke(state["messages"]) 111 | 112 | report = "" 113 | 114 | if len(result.tool_calls) == 0: 115 | report = result.content 116 | 117 | return { 118 | "messages": [result], 119 | "news_report": report, 120 | } 121 | 122 | return news_analyst_node 123 | -------------------------------------------------------------------------------- /tradingagents/agents/analysts/fundamentals_analyst.py: -------------------------------------------------------------------------------- 1 | from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder 2 | import time 3 | import json 4 | 5 | 6 | def _is_crypto_symbol(symbol: str) -> bool: 7 | """ 8 | Detect if a symbol is likely a cryptocurrency 9 | Uses a whitelist approach for known crypto symbols and excludes known stock patterns 10 | """ 11 | # Known crypto symbols (most common ones) 12 | crypto_symbols = { 13 | 'BTC', 'ETH', 'ADA', 'SOL', 'DOT', 'AVAX', 'MATIC', 'LINK', 'UNI', 'AAVE', 14 | 'XRP', 'LTC', 'BCH', 'EOS', 'TRX', 'XLM', 'VET', 'ALGO', 'ATOM', 'LUNA', 15 | 'NEAR', 'FTM', 'CRO', 'SAND', 'MANA', 'AXS', 'GALA', 'ENJ', 'CHZ', 'BAT', 16 | 'ZEC', 'DASH', 'XMR', 'DOGE', 'SHIB', 'PEPE', 'FLOKI', 'BNB', 'USDT', 'USDC', 17 | 'TON', 'ICP', 'HBAR', 'THETA', 'FIL', 'ETC', 'MKR', 'APT', 'LDO', 'OP', 18 | 'IMX', 'GRT', 'RUNE', 'FLOW', 'EGLD', 'XTZ', 'MINA', 'ROSE', 'KAVA' 19 | } 20 | 21 | # Known stock symbols (to avoid false positives) 22 | stock_symbols = { 23 | 'AAPL', 'GOOGL', 'MSFT', 'AMZN', 'TSLA', 'NVDA', 'META', 'NFLX', 'DIS', 'AMD', 24 | 'INTC', 'CRM', 'ORCL', 'ADBE', 'CSCO', 'PEP', 'KO', 'WMT', 'JNJ', 'PFE', 25 | 'V', 'MA', 'HD', 'UNH', 'BAC', 'XOM', 'CVX', 'LLY', 'ABBV', 'COST', 26 | 'AVGO', 'TMO', 'ACN', 'DHR', 'TXN', 'LOW', 'QCOM', 'HON', 'UPS', 'MDT' 27 | } 28 | 29 | symbol_upper = symbol.upper() 30 | 31 | # If it's a known stock symbol, it's definitely not crypto 32 | if symbol_upper in stock_symbols: 33 | return False 34 | 35 | # If it's a known crypto symbol, it's definitely crypto 36 | if symbol_upper in crypto_symbols: 37 | return True 38 | 39 | # For unknown symbols, be conservative and assume it's a stock 40 | # unless it has typical crypto characteristics 41 | if len(symbol) >= 5: # Most stocks are 4+ characters 42 | return False 43 | 44 | # Short symbols (2-4 chars) could be crypto if they don't look like stocks 45 | if len(symbol) <= 4 and symbol.isalnum() and not any(c in symbol for c in ['.', '-', '_']): 46 | # Additional heuristic: crypto symbols often have certain patterns 47 | return True 48 | 49 | return False 50 | 51 | 52 | def create_fundamentals_analyst(llm, toolkit): 53 | def fundamentals_analyst_node(state): 54 | current_date = state["trade_date"] 55 | ticker = state["company_of_interest"] 56 | company_name = state["company_of_interest"] 57 | 58 | # Check if we're dealing with crypto or stocks 59 | is_crypto = _is_crypto_symbol(ticker) 60 | 61 | if is_crypto: 62 | # Use crypto-specific tools 63 | tools = [toolkit.get_crypto_fundamentals_analysis, toolkit.get_crypto_market_analysis] 64 | 65 | system_message = ( 66 | "You are a cryptocurrency fundamental analyst tasked with analyzing fundamental information about a cryptocurrency. Please write a comprehensive report of the cryptocurrency's fundamental information such as market capitalization, supply mechanics, token economics, network metrics, adoption indicators, and market positioning to gain a full view of the cryptocurrency's fundamental value proposition to inform traders. " 67 | "Focus on crypto-specific metrics like: market cap rank, circulating vs total supply, trading volume patterns, network activity, developer ecosystem, regulatory environment, community strength, and technology fundamentals. " 68 | "Make sure to include as much detail as possible. Do not simply state the trends are mixed, provide detailed and fine-grained analysis and insights that may help crypto traders make decisions." 69 | + " Make sure to append a Markdown table at the end of the report to organize key points in the report, organized and easy to read.", 70 | ) 71 | else: 72 | # Use stock-specific tools (original functionality) 73 | if toolkit.config["online_tools"]: 74 | tools = [toolkit.get_fundamentals_openai] 75 | else: 76 | tools = [ 77 | toolkit.get_finnhub_company_insider_sentiment, 78 | toolkit.get_finnhub_company_insider_transactions, 79 | toolkit.get_simfin_balance_sheet, 80 | toolkit.get_simfin_cashflow, 81 | toolkit.get_simfin_income_stmt, 82 | ] 83 | 84 | system_message = ( 85 | "You are a researcher tasked with analyzing fundamental information over the past week about a company. Please write a comprehensive report of the company's fundamental information such as financial documents, company profile, basic company financials, company financial history, insider sentiment and insider transactions to gain a full view of the company's fundamental information to inform traders. Make sure to include as much detail as possible. Do not simply state the trends are mixed, provide detailed and finegrained analysis and insights that may help traders make decisions." 86 | + " Make sure to append a Markdown table at the end of the report to organize key points in the report, organized and easy to read.", 87 | ) 88 | 89 | prompt = ChatPromptTemplate.from_messages( 90 | [ 91 | ( 92 | "system", 93 | "You are a helpful AI assistant, collaborating with other assistants." 94 | " Use the provided tools to progress towards answering the question." 95 | " If you are unable to fully answer, that's OK; another assistant with different tools" 96 | " will help where you left off. Execute what you can to make progress." 97 | " If you or any other assistant has the FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL** or deliverable," 98 | " prefix your response with FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL** so the team knows to stop." 99 | " You have access to the following tools: {tool_names}.\n{system_message}" 100 | "For your reference, the current date is {current_date}. The company we want to look at is {ticker}", 101 | ), 102 | MessagesPlaceholder(variable_name="messages"), 103 | ] 104 | ) 105 | 106 | prompt = prompt.partial(system_message=system_message) 107 | prompt = prompt.partial(tool_names=", ".join([tool.name for tool in tools])) 108 | prompt = prompt.partial(current_date=current_date) 109 | prompt = prompt.partial(ticker=ticker) 110 | 111 | chain = prompt | llm.bind_tools(tools) 112 | 113 | result = chain.invoke(state["messages"]) 114 | 115 | report = "" 116 | 117 | if len(result.tool_calls) == 0: 118 | report = result.content 119 | 120 | return { 121 | "messages": [result], 122 | "fundamentals_report": report, 123 | } 124 | 125 | return fundamentals_analyst_node 126 | -------------------------------------------------------------------------------- /web_app_vercel.py: -------------------------------------------------------------------------------- 1 | from flask import Flask, render_template, request, jsonify 2 | import datetime 3 | import json 4 | import time 5 | from pathlib import Path 6 | from typing import Dict, List, Optional 7 | import os 8 | 9 | app = Flask(__name__) 10 | app.config['SECRET_KEY'] = 'tradingagents_secret_key' 11 | 12 | # Global storage for analysis sessions (simplified for serverless) 13 | analysis_sessions = {} 14 | 15 | class SimpleMessageBuffer: 16 | def __init__(self, session_id): 17 | self.session_id = session_id 18 | self.messages = [] 19 | self.agent_status = { 20 | "Market Analyst": "pending", 21 | "Social Analyst": "pending", 22 | "News Analyst": "pending", 23 | "Fundamentals Analyst": "pending", 24 | "Bull Researcher": "pending", 25 | "Bear Researcher": "pending", 26 | "Research Manager": "pending", 27 | "Trader": "pending", 28 | "Portfolio Manager": "pending", 29 | } 30 | self.report_sections = { 31 | "market_report": None, 32 | "sentiment_report": None, 33 | "news_report": None, 34 | "fundamentals_report": None, 35 | "investment_plan": None, 36 | "trader_investment_plan": None, 37 | "final_trade_decision": None, 38 | } 39 | self.current_step = "waiting" 40 | self.progress = 0 41 | 42 | def add_message(self, message_type, content): 43 | timestamp = datetime.datetime.now().strftime("%H:%M:%S") 44 | message = {"timestamp": timestamp, "type": message_type, "content": content} 45 | self.messages.append(message) 46 | 47 | def update_agent_status(self, agent, status): 48 | self.agent_status[agent] = status 49 | 50 | def update_report_section(self, section_name, content): 51 | if section_name in self.report_sections: 52 | self.report_sections[section_name] = content 53 | 54 | def update_progress(self, progress, step): 55 | self.progress = progress 56 | self.current_step = step 57 | 58 | @app.route('/') 59 | def index(): 60 | try: 61 | return render_template('index.html') 62 | except Exception as e: 63 | return jsonify({ 64 | 'error': 'Template not found', 65 | 'details': str(e), 66 | 'message': 'Please ensure templates folder is properly deployed' 67 | }) 68 | 69 | @app.route('/analysis') 70 | def analysis_page(): 71 | try: 72 | return render_template('analysis.html') 73 | except Exception as e: 74 | return jsonify({ 75 | 'error': 'Template not found', 76 | 'details': str(e), 77 | 'message': 'Please ensure templates folder is properly deployed' 78 | }) 79 | 80 | @app.route('/api/start_analysis', methods=['POST']) 81 | def start_analysis(): 82 | try: 83 | data = request.json 84 | session_id = data.get('session_id', str(int(time.time()))) 85 | 86 | # Create a simplified response for Vercel environment 87 | buffer = SimpleMessageBuffer(session_id) 88 | buffer.add_message("System", f"Analysis request received for {data.get('ticker', 'Unknown')}...") 89 | buffer.add_message("System", "Note: This is a demo version running on Vercel with limited functionality.") 90 | buffer.add_message("System", "For full analysis capabilities, please run the application locally.") 91 | 92 | # Simulate some progress 93 | buffer.update_progress(10, "Initializing...") 94 | buffer.update_agent_status("Market Analyst", "in_progress") 95 | buffer.update_progress(30, "Market analysis...") 96 | buffer.update_agent_status("Market Analyst", "completed") 97 | buffer.update_agent_status("Social Analyst", "in_progress") 98 | buffer.update_progress(50, "Social sentiment analysis...") 99 | buffer.update_agent_status("Social Analyst", "completed") 100 | buffer.update_progress(100, "Demo completed") 101 | 102 | # Add demo reports 103 | buffer.update_report_section("market_report", "## Demo Market Analysis\n\nThis is a demonstration version running on Vercel. For real analysis, please deploy locally or use a persistent server environment.") 104 | buffer.update_report_section("final_trade_decision", "## Demo Decision\n\n**HOLD** - This is a demo response. Real trading analysis requires full system deployment.") 105 | 106 | analysis_sessions[session_id] = { 107 | 'config': data, 108 | 'buffer': buffer, 109 | 'status': 'demo_completed' 110 | } 111 | 112 | return jsonify({ 113 | 'session_id': session_id, 114 | 'status': 'demo_completed', 115 | 'message': 'Demo analysis completed. For full functionality, please run locally.' 116 | }) 117 | 118 | except Exception as e: 119 | return jsonify({ 120 | 'error': str(e), 121 | 'message': 'Failed to process analysis request' 122 | }), 500 123 | 124 | @app.route('/api/session//status', methods=['GET']) 125 | def get_session_status(session_id): 126 | """Get the current status of an analysis session""" 127 | try: 128 | if session_id in analysis_sessions: 129 | session = analysis_sessions[session_id] 130 | return jsonify({ 131 | 'session_id': session_id, 132 | 'status': session['status'], 133 | 'messages': session['buffer'].messages, 134 | 'agent_status': session['buffer'].agent_status, 135 | 'report_sections': session['buffer'].report_sections, 136 | 'progress': session['buffer'].progress, 137 | 'current_step': session['buffer'].current_step 138 | }) 139 | else: 140 | return jsonify({'error': 'Session not found'}), 404 141 | except Exception as e: 142 | return jsonify({'error': str(e)}), 500 143 | 144 | @app.route('/health') 145 | def health_check(): 146 | return jsonify({ 147 | 'status': 'healthy', 148 | 'timestamp': datetime.datetime.now().isoformat(), 149 | 'environment': 'vercel', 150 | 'mode': 'demo' 151 | }) 152 | 153 | @app.route('/api/info') 154 | def api_info(): 155 | return jsonify({ 156 | 'name': 'Trading Agents Crypto - Vercel Demo', 157 | 'version': '1.0.0-vercel', 158 | 'description': 'Simplified demo version for Vercel deployment', 159 | 'limitations': [ 160 | 'No real-time analysis', 161 | 'No persistent storage', 162 | 'Demo responses only', 163 | 'Limited to 5-minute execution time' 164 | ], 165 | 'recommendation': 'For full functionality, deploy to a persistent server environment' 166 | }) 167 | 168 | # Error handlers 169 | @app.errorhandler(404) 170 | def not_found(error): 171 | return jsonify({'error': 'Not found', 'message': 'The requested resource was not found'}), 404 172 | 173 | @app.errorhandler(500) 174 | def internal_error(error): 175 | return jsonify({'error': 'Internal server error', 'message': 'An unexpected error occurred'}), 500 176 | 177 | if __name__ == '__main__': 178 | app.run(debug=True) -------------------------------------------------------------------------------- /tradingagents/graph/setup.py: -------------------------------------------------------------------------------- 1 | # TradingAgents/graph/setup.py 2 | 3 | from typing import Dict, Any 4 | from langchain_openai import ChatOpenAI 5 | from langgraph.graph import END, StateGraph, START 6 | from langgraph.prebuilt import ToolNode 7 | 8 | from tradingagents.agents import * 9 | from tradingagents.agents.utils.agent_states import AgentState 10 | from tradingagents.agents.utils.agent_utils import Toolkit 11 | 12 | from .conditional_logic import ConditionalLogic 13 | 14 | 15 | class GraphSetup: 16 | """Handles the setup and configuration of the agent graph.""" 17 | 18 | def __init__( 19 | self, 20 | quick_thinking_llm: ChatOpenAI, 21 | deep_thinking_llm: ChatOpenAI, 22 | toolkit: Toolkit, 23 | tool_nodes: Dict[str, ToolNode], 24 | bull_memory, 25 | bear_memory, 26 | trader_memory, 27 | invest_judge_memory, 28 | risk_manager_memory, 29 | conditional_logic: ConditionalLogic, 30 | ): 31 | """Initialize with required components.""" 32 | self.quick_thinking_llm = quick_thinking_llm 33 | self.deep_thinking_llm = deep_thinking_llm 34 | self.toolkit = toolkit 35 | self.tool_nodes = tool_nodes 36 | self.bull_memory = bull_memory 37 | self.bear_memory = bear_memory 38 | self.trader_memory = trader_memory 39 | self.invest_judge_memory = invest_judge_memory 40 | self.risk_manager_memory = risk_manager_memory 41 | self.conditional_logic = conditional_logic 42 | 43 | def setup_graph( 44 | self, selected_analysts=["market", "social", "news", "fundamentals"] 45 | ): 46 | """Set up and compile the agent workflow graph. 47 | 48 | Args: 49 | selected_analysts (list): List of analyst types to include. Options are: 50 | - "market": Market analyst 51 | - "social": Social media analyst 52 | - "news": News analyst 53 | - "fundamentals": Fundamentals analyst 54 | """ 55 | if len(selected_analysts) == 0: 56 | raise ValueError("Trading Agents Graph Setup Error: no analysts selected!") 57 | 58 | # Create analyst nodes 59 | analyst_nodes = {} 60 | delete_nodes = {} 61 | tool_nodes = {} 62 | 63 | if "market" in selected_analysts: 64 | analyst_nodes["market"] = create_market_analyst( 65 | self.quick_thinking_llm, self.toolkit 66 | ) 67 | delete_nodes["market"] = create_msg_delete() 68 | tool_nodes["market"] = self.tool_nodes["market"] 69 | 70 | if "social" in selected_analysts: 71 | analyst_nodes["social"] = create_social_media_analyst( 72 | self.quick_thinking_llm, self.toolkit 73 | ) 74 | delete_nodes["social"] = create_msg_delete() 75 | tool_nodes["social"] = self.tool_nodes["social"] 76 | 77 | if "news" in selected_analysts: 78 | analyst_nodes["news"] = create_news_analyst( 79 | self.quick_thinking_llm, self.toolkit 80 | ) 81 | delete_nodes["news"] = create_msg_delete() 82 | tool_nodes["news"] = self.tool_nodes["news"] 83 | 84 | if "fundamentals" in selected_analysts: 85 | analyst_nodes["fundamentals"] = create_fundamentals_analyst( 86 | self.quick_thinking_llm, self.toolkit 87 | ) 88 | delete_nodes["fundamentals"] = create_msg_delete() 89 | tool_nodes["fundamentals"] = self.tool_nodes["fundamentals"] 90 | 91 | # Create researcher and manager nodes 92 | bull_researcher_node = create_bull_researcher( 93 | self.quick_thinking_llm, self.bull_memory 94 | ) 95 | bear_researcher_node = create_bear_researcher( 96 | self.quick_thinking_llm, self.bear_memory 97 | ) 98 | research_manager_node = create_research_manager( 99 | self.deep_thinking_llm, self.invest_judge_memory 100 | ) 101 | trader_node = create_trader(self.quick_thinking_llm, self.trader_memory) 102 | 103 | # Create risk analysis nodes 104 | risky_analyst = create_risky_debator(self.quick_thinking_llm) 105 | neutral_analyst = create_neutral_debator(self.quick_thinking_llm) 106 | safe_analyst = create_safe_debator(self.quick_thinking_llm) 107 | risk_manager_node = create_risk_manager( 108 | self.deep_thinking_llm, self.risk_manager_memory 109 | ) 110 | 111 | # Create workflow 112 | workflow = StateGraph(AgentState) 113 | 114 | # Add analyst nodes to the graph 115 | for analyst_type, node in analyst_nodes.items(): 116 | workflow.add_node(f"{analyst_type.capitalize()} Analyst", node) 117 | workflow.add_node( 118 | f"Msg Clear {analyst_type.capitalize()}", delete_nodes[analyst_type] 119 | ) 120 | workflow.add_node(f"tools_{analyst_type}", tool_nodes[analyst_type]) 121 | 122 | # Add other nodes 123 | workflow.add_node("Bull Researcher", bull_researcher_node) 124 | workflow.add_node("Bear Researcher", bear_researcher_node) 125 | workflow.add_node("Research Manager", research_manager_node) 126 | workflow.add_node("Trader", trader_node) 127 | workflow.add_node("Risky Analyst", risky_analyst) 128 | workflow.add_node("Neutral Analyst", neutral_analyst) 129 | workflow.add_node("Safe Analyst", safe_analyst) 130 | workflow.add_node("Risk Judge", risk_manager_node) 131 | 132 | # Define edges 133 | # Start with the first analyst 134 | first_analyst = selected_analysts[0] 135 | workflow.add_edge(START, f"{first_analyst.capitalize()} Analyst") 136 | 137 | # Connect analysts in sequence 138 | for i, analyst_type in enumerate(selected_analysts): 139 | current_analyst = f"{analyst_type.capitalize()} Analyst" 140 | current_tools = f"tools_{analyst_type}" 141 | current_clear = f"Msg Clear {analyst_type.capitalize()}" 142 | 143 | # Add conditional edges for current analyst 144 | workflow.add_conditional_edges( 145 | current_analyst, 146 | getattr(self.conditional_logic, f"should_continue_{analyst_type}"), 147 | [current_tools, current_clear], 148 | ) 149 | workflow.add_edge(current_tools, current_analyst) 150 | 151 | # Connect to next analyst or to Bull Researcher if this is the last analyst 152 | if i < len(selected_analysts) - 1: 153 | next_analyst = f"{selected_analysts[i+1].capitalize()} Analyst" 154 | workflow.add_edge(current_clear, next_analyst) 155 | else: 156 | workflow.add_edge(current_clear, "Bull Researcher") 157 | 158 | # Add remaining edges 159 | workflow.add_conditional_edges( 160 | "Bull Researcher", 161 | self.conditional_logic.should_continue_debate, 162 | { 163 | "Bear Researcher": "Bear Researcher", 164 | "Research Manager": "Research Manager", 165 | }, 166 | ) 167 | workflow.add_conditional_edges( 168 | "Bear Researcher", 169 | self.conditional_logic.should_continue_debate, 170 | { 171 | "Bull Researcher": "Bull Researcher", 172 | "Research Manager": "Research Manager", 173 | }, 174 | ) 175 | workflow.add_edge("Research Manager", "Trader") 176 | workflow.add_edge("Trader", "Risky Analyst") 177 | workflow.add_conditional_edges( 178 | "Risky Analyst", 179 | self.conditional_logic.should_continue_risk_analysis, 180 | { 181 | "Safe Analyst": "Safe Analyst", 182 | "Risk Judge": "Risk Judge", 183 | }, 184 | ) 185 | workflow.add_conditional_edges( 186 | "Safe Analyst", 187 | self.conditional_logic.should_continue_risk_analysis, 188 | { 189 | "Neutral Analyst": "Neutral Analyst", 190 | "Risk Judge": "Risk Judge", 191 | }, 192 | ) 193 | workflow.add_conditional_edges( 194 | "Neutral Analyst", 195 | self.conditional_logic.should_continue_risk_analysis, 196 | { 197 | "Risky Analyst": "Risky Analyst", 198 | "Risk Judge": "Risk Judge", 199 | }, 200 | ) 201 | 202 | workflow.add_edge("Risk Judge", END) 203 | 204 | # Compile and return 205 | return workflow.compile() 206 | -------------------------------------------------------------------------------- /simple_web.py: -------------------------------------------------------------------------------- 1 | from flask import Flask, render_template, request, jsonify, redirect, url_for, session 2 | import datetime 3 | import json 4 | import threading 5 | import time 6 | from pathlib import Path 7 | from typing import Dict, List, Optional 8 | import uuid 9 | 10 | from tradingagents.graph.trading_graph import TradingAgentsGraph 11 | from tradingagents.default_config import DEFAULT_CONFIG 12 | from cli.models import AnalystType 13 | 14 | app = Flask(__name__) 15 | app.config['SECRET_KEY'] = 'tradingagents_secret_key' 16 | 17 | # Global storage for analysis sessions 18 | analysis_sessions = {} 19 | 20 | class SimpleMessageBuffer: 21 | def __init__(self, session_id): 22 | self.session_id = session_id 23 | self.messages = [] 24 | self.agent_status = { 25 | "Market Analyst": "pending", 26 | "Social Analyst": "pending", 27 | "News Analyst": "pending", 28 | "Fundamentals Analyst": "pending", 29 | "Bull Researcher": "pending", 30 | "Bear Researcher": "pending", 31 | "Research Manager": "pending", 32 | "Trader": "pending", 33 | "Risky Analyst": "pending", 34 | "Neutral Analyst": "pending", 35 | "Safe Analyst": "pending", 36 | "Portfolio Manager": "pending", 37 | } 38 | self.report_sections = { 39 | "market_report": None, 40 | "sentiment_report": None, 41 | "news_report": None, 42 | "fundamentals_report": None, 43 | "investment_plan": None, 44 | "trader_investment_plan": None, 45 | "final_trade_decision": None, 46 | } 47 | self.current_step = "waiting" 48 | self.progress = 0 49 | self.status = "pending" 50 | 51 | def add_message(self, message_type, content): 52 | timestamp = datetime.datetime.now().strftime("%H:%M:%S") 53 | message = {"timestamp": timestamp, "type": message_type, "content": content} 54 | self.messages.append(message) 55 | 56 | def update_agent_status(self, agent, status): 57 | self.agent_status[agent] = status 58 | 59 | def update_report_section(self, section_name, content): 60 | if section_name in self.report_sections: 61 | self.report_sections[section_name] = content 62 | 63 | def update_progress(self, progress, step): 64 | self.progress = progress 65 | self.current_step = step 66 | 67 | @app.route('/') 68 | def index(): 69 | return render_template('simple_index.html') 70 | 71 | @app.route('/analysis/') 72 | def analysis_page(session_id): 73 | if session_id not in analysis_sessions: 74 | return redirect(url_for('index')) 75 | return render_template('simple_analysis.html', session_id=session_id) 76 | 77 | @app.route('/api/start_analysis', methods=['POST']) 78 | def start_analysis(): 79 | data = request.json 80 | session_id = str(uuid.uuid4()) 81 | 82 | # Store analysis configuration 83 | analysis_sessions[session_id] = { 84 | 'config': data, 85 | 'buffer': SimpleMessageBuffer(session_id), 86 | 'status': 'running' 87 | } 88 | 89 | # Start analysis in background 90 | thread = threading.Thread( 91 | target=run_analysis_background, 92 | args=(session_id, data) 93 | ) 94 | thread.daemon = True 95 | thread.start() 96 | 97 | return jsonify({'session_id': session_id, 'status': 'started'}) 98 | 99 | @app.route('/api/status/') 100 | def get_status(session_id): 101 | if session_id not in analysis_sessions: 102 | return jsonify({'error': 'Session not found'}), 404 103 | 104 | buffer = analysis_sessions[session_id]['buffer'] 105 | return jsonify({ 106 | 'messages': buffer.messages[-10:], # Last 10 messages 107 | 'agent_status': buffer.agent_status, 108 | 'report_sections': buffer.report_sections, 109 | 'progress': buffer.progress, 110 | 'current_step': buffer.current_step, 111 | 'status': analysis_sessions[session_id]['status'] 112 | }) 113 | 114 | def run_analysis_background(session_id: str, config: Dict): 115 | """Run the trading analysis in background thread""" 116 | try: 117 | buffer = analysis_sessions[session_id]['buffer'] 118 | 119 | # Initialize the graph 120 | graph = TradingAgentsGraph(DEFAULT_CONFIG) 121 | 122 | # Update configuration based on user selections 123 | updated_config = DEFAULT_CONFIG.copy() 124 | updated_config.update({ 125 | 'llm_provider': config['llm_provider'], 126 | 'backend_url': config['backend_url'], 127 | 'shallow_thinker': config['shallow_thinker'], 128 | 'deep_thinker': config['deep_thinker'], 129 | 'research_depth': config['research_depth'] 130 | }) 131 | 132 | # Create initial state 133 | init_state = graph.propagator.create_initial_state( 134 | config['ticker'], 135 | config['analysis_date'] 136 | ) 137 | 138 | buffer.add_message("System", f"Starting analysis for {config['ticker']} on {config['analysis_date']}") 139 | buffer.update_progress(10, "Initializing analysis...") 140 | 141 | # Get graph args 142 | args = graph.propagator.get_graph_args() 143 | 144 | # Stream the analysis 145 | step_count = 0 146 | total_steps = len(config['analysts']) * 2 + 5 147 | 148 | for chunk in graph.graph.stream(init_state, **args): 149 | step_count += 1 150 | progress = min(90, (step_count / total_steps) * 80 + 10) 151 | 152 | if len(chunk.get("messages", [])) > 0: 153 | last_message = chunk["messages"][-1] 154 | 155 | if hasattr(last_message, "content"): 156 | content = str(last_message.content) 157 | if len(content) > 500: 158 | content = content[:500] + "..." 159 | buffer.add_message("Analysis", content) 160 | 161 | # Update agent statuses and reports (same logic as before) 162 | if "market_report" in chunk and chunk["market_report"]: 163 | buffer.update_report_section("market_report", chunk["market_report"]) 164 | buffer.update_agent_status("Market Analyst", "completed") 165 | buffer.update_progress(progress, "Market analysis completed") 166 | 167 | if "sentiment_report" in chunk and chunk["sentiment_report"]: 168 | buffer.update_report_section("sentiment_report", chunk["sentiment_report"]) 169 | buffer.update_agent_status("Social Analyst", "completed") 170 | buffer.update_progress(progress, "Social sentiment analysis completed") 171 | 172 | if "news_report" in chunk and chunk["news_report"]: 173 | buffer.update_report_section("news_report", chunk["news_report"]) 174 | buffer.update_agent_status("News Analyst", "completed") 175 | buffer.update_progress(progress, "News analysis completed") 176 | 177 | if "fundamentals_report" in chunk and chunk["fundamentals_report"]: 178 | buffer.update_report_section("fundamentals_report", chunk["fundamentals_report"]) 179 | buffer.update_agent_status("Fundamentals Analyst", "completed") 180 | buffer.update_progress(progress, "Fundamentals analysis completed") 181 | 182 | if "investment_debate_state" in chunk and chunk["investment_debate_state"]: 183 | debate_state = chunk["investment_debate_state"] 184 | 185 | if "judge_decision" in debate_state and debate_state["judge_decision"]: 186 | buffer.update_report_section("investment_plan", debate_state["judge_decision"]) 187 | buffer.update_agent_status("Research Manager", "completed") 188 | buffer.update_progress(progress, "Research team decision completed") 189 | 190 | if "trader_investment_plan" in chunk and chunk["trader_investment_plan"]: 191 | buffer.update_report_section("trader_investment_plan", chunk["trader_investment_plan"]) 192 | buffer.update_agent_status("Trader", "completed") 193 | buffer.update_progress(progress, "Trading plan completed") 194 | 195 | if "final_trade_decision" in chunk and chunk["final_trade_decision"]: 196 | buffer.update_report_section("final_trade_decision", chunk["final_trade_decision"]) 197 | buffer.update_agent_status("Portfolio Manager", "completed") 198 | buffer.update_progress(100, "Analysis completed!") 199 | 200 | buffer.update_progress(100, "Analysis completed successfully!") 201 | analysis_sessions[session_id]['status'] = 'completed' 202 | 203 | except Exception as e: 204 | buffer.add_message("Error", f"Analysis failed: {str(e)}") 205 | buffer.update_progress(0, "Analysis failed") 206 | analysis_sessions[session_id]['status'] = 'failed' 207 | 208 | if __name__ == '__main__': 209 | # Create templates directory if it doesn't exist 210 | Path('templates').mkdir(exist_ok=True) 211 | 212 | app.run(debug=True, host='0.0.0.0', port=5000) -------------------------------------------------------------------------------- /tradingagents/agents/analysts/market_analyst.py: -------------------------------------------------------------------------------- 1 | from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder 2 | import time 3 | import json 4 | 5 | 6 | def _is_crypto_symbol(symbol: str) -> bool: 7 | """ 8 | Detect if a symbol is likely a cryptocurrency 9 | Uses a whitelist approach for known crypto symbols and excludes known stock patterns 10 | """ 11 | # Known crypto symbols (most common ones) 12 | crypto_symbols = { 13 | 'BTC', 'ETH', 'ADA', 'SOL', 'DOT', 'AVAX', 'MATIC', 'LINK', 'UNI', 'AAVE', 14 | 'XRP', 'LTC', 'BCH', 'EOS', 'TRX', 'XLM', 'VET', 'ALGO', 'ATOM', 'LUNA', 15 | 'NEAR', 'FTM', 'CRO', 'SAND', 'MANA', 'AXS', 'GALA', 'ENJ', 'CHZ', 'BAT', 16 | 'ZEC', 'DASH', 'XMR', 'DOGE', 'SHIB', 'PEPE', 'FLOKI', 'BNB', 'USDT', 'USDC', 17 | 'TON', 'ICP', 'HBAR', 'THETA', 'FIL', 'ETC', 'MKR', 'APT', 'LDO', 'OP', 18 | 'IMX', 'GRT', 'RUNE', 'FLOW', 'EGLD', 'XTZ', 'MINA', 'ROSE', 'KAVA' 19 | } 20 | 21 | # Known stock symbols (to avoid false positives) 22 | stock_symbols = { 23 | 'AAPL', 'GOOGL', 'MSFT', 'AMZN', 'TSLA', 'NVDA', 'META', 'NFLX', 'DIS', 'AMD', 24 | 'INTC', 'CRM', 'ORCL', 'ADBE', 'CSCO', 'PEP', 'KO', 'WMT', 'JNJ', 'PFE', 25 | 'V', 'MA', 'HD', 'UNH', 'BAC', 'XOM', 'CVX', 'LLY', 'ABBV', 'COST', 26 | 'AVGO', 'TMO', 'ACN', 'DHR', 'TXN', 'LOW', 'QCOM', 'HON', 'UPS', 'MDT' 27 | } 28 | 29 | symbol_upper = symbol.upper() 30 | 31 | # If it's a known stock symbol, it's definitely not crypto 32 | if symbol_upper in stock_symbols: 33 | return False 34 | 35 | # If it's a known crypto symbol, it's definitely crypto 36 | if symbol_upper in crypto_symbols: 37 | return True 38 | 39 | # For unknown symbols, be conservative and assume it's a stock 40 | # unless it has typical crypto characteristics 41 | if len(symbol) >= 5: # Most stocks are 4+ characters 42 | return False 43 | 44 | # Short symbols (2-4 chars) could be crypto if they don't look like stocks 45 | if len(symbol) <= 4 and symbol.isalnum() and not any(c in symbol for c in ['.', '-', '_']): 46 | # Additional heuristic: crypto symbols often have certain patterns 47 | return True 48 | 49 | return False 50 | 51 | 52 | def create_market_analyst(llm, toolkit): 53 | 54 | def market_analyst_node(state): 55 | current_date = state["trade_date"] 56 | ticker = state["company_of_interest"] 57 | company_name = state["company_of_interest"] 58 | 59 | # Check if we're dealing with crypto or stocks 60 | is_crypto = _is_crypto_symbol(ticker) 61 | 62 | if is_crypto: 63 | # Use crypto-specific tools 64 | tools = [toolkit.get_crypto_price_history, toolkit.get_crypto_technical_analysis] 65 | 66 | system_message = ( 67 | """You are a cryptocurrency technical analyst tasked with analyzing crypto markets. Your role is to provide comprehensive technical analysis for cryptocurrency trading. Focus on crypto-specific patterns and indicators that are most relevant for digital assets. 68 | 69 | Key areas to analyze for cryptocurrency: 70 | - Price action and trend analysis 71 | - Volume patterns and market liquidity 72 | - Support and resistance levels 73 | - Market volatility and risk assessment 74 | - Momentum indicators and their reliability in crypto markets 75 | - Market sentiment and psychological levels 76 | 77 | Please write a very detailed and nuanced report of the trends you observe in the cryptocurrency market. Analyze both short-term and long-term trends. Do not simply state the trends are mixed, provide detailed and fine-grained analysis and insights that may help crypto traders make decisions. Consider the unique characteristics of cryptocurrency markets such as 24/7 trading, higher volatility, and sentiment-driven movements.""" 78 | + """ Make sure to append a Markdown table at the end of the report to organize key points in the report, organized and easy to read.""" 79 | ) 80 | else: 81 | # Use stock-specific tools (original functionality) 82 | if toolkit.config["online_tools"]: 83 | tools = [ 84 | toolkit.get_YFin_data_online, 85 | toolkit.get_stockstats_indicators_report_online, 86 | ] 87 | else: 88 | tools = [ 89 | toolkit.get_YFin_data, 90 | toolkit.get_stockstats_indicators_report, 91 | ] 92 | 93 | system_message = ( 94 | """You are a trading assistant tasked with analyzing financial markets. Your role is to select the **most relevant indicators** for a given market condition or trading strategy from the following list. The goal is to choose up to **8 indicators** that provide complementary insights without redundancy. Categories and each category's indicators are: 95 | 96 | Moving Averages: 97 | - close_50_sma: 50 SMA: A medium-term trend indicator. Usage: Identify trend direction and serve as dynamic support/resistance. Tips: It lags price; combine with faster indicators for timely signals. 98 | - close_200_sma: 200 SMA: A long-term trend benchmark. Usage: Confirm overall market trend and identify golden/death cross setups. Tips: It reacts slowly; best for strategic trend confirmation rather than frequent trading entries. 99 | - close_10_ema: 10 EMA: A responsive short-term average. Usage: Capture quick shifts in momentum and potential entry points. Tips: Prone to noise in choppy markets; use alongside longer averages for filtering false signals. 100 | 101 | MACD Related: 102 | - macd: MACD: Computes momentum via differences of EMAs. Usage: Look for crossovers and divergence as signals of trend changes. Tips: Confirm with other indicators in low-volatility or sideways markets. 103 | - macds: MACD Signal: An EMA smoothing of the MACD line. Usage: Use crossovers with the MACD line to trigger trades. Tips: Should be part of a broader strategy to avoid false positives. 104 | - macdh: MACD Histogram: Shows the gap between the MACD line and its signal. Usage: Visualize momentum strength and spot divergence early. Tips: Can be volatile; complement with additional filters in fast-moving markets. 105 | 106 | Momentum Indicators: 107 | - rsi: RSI: Measures momentum to flag overbought/oversold conditions. Usage: Apply 70/30 thresholds and watch for divergence to signal reversals. Tips: In strong trends, RSI may remain extreme; always cross-check with trend analysis. 108 | 109 | Volatility Indicators: 110 | - boll: Bollinger Middle: A 20 SMA serving as the basis for Bollinger Bands. Usage: Acts as a dynamic benchmark for price movement. Tips: Combine with the upper and lower bands to effectively spot breakouts or reversals. 111 | - boll_ub: Bollinger Upper Band: Typically 2 standard deviations above the middle line. Usage: Signals potential overbought conditions and breakout zones. Tips: Confirm signals with other tools; prices may ride the band in strong trends. 112 | - boll_lb: Bollinger Lower Band: Typically 2 standard deviations below the middle line. Usage: Indicates potential oversold conditions. Tips: Use additional analysis to avoid false reversal signals. 113 | - atr: ATR: Averages true range to measure volatility. Usage: Set stop-loss levels and adjust position sizes based on current market volatility. Tips: It's a reactive measure, so use it as part of a broader risk management strategy. 114 | 115 | Volume-Based Indicators: 116 | - vwma: VWMA: A moving average weighted by volume. Usage: Confirm trends by integrating price action with volume data. Tips: Watch for skewed results from volume spikes; use in combination with other volume analyses. 117 | 118 | - Select indicators that provide diverse and complementary information. Avoid redundancy (e.g., do not select both rsi and stochrsi). Also briefly explain why they are suitable for the given market context. When you tool call, please use the exact name of the indicators provided above as they are defined parameters, otherwise your call will fail. Please make sure to call get_YFin_data first to retrieve the CSV that is needed to generate indicators. Write a very detailed and nuanced report of the trends you observe. Do not simply state the trends are mixed, provide detailed and finegrained analysis and insights that may help traders make decisions.""" 119 | + """ Make sure to append a Markdown table at the end of the report to organize key points in the report, organized and easy to read.""" 120 | ) 121 | 122 | prompt = ChatPromptTemplate.from_messages( 123 | [ 124 | ( 125 | "system", 126 | "You are a helpful AI assistant, collaborating with other assistants." 127 | " Use the provided tools to progress towards answering the question." 128 | " If you are unable to fully answer, that's OK; another assistant with different tools" 129 | " will help where you left off. Execute what you can to make progress." 130 | " If you or any other assistant has the FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL** or deliverable," 131 | " prefix your response with FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL** so the team knows to stop." 132 | " You have access to the following tools: {tool_names}.\n{system_message}" 133 | "For your reference, the current date is {current_date}. The company we want to look at is {ticker}", 134 | ), 135 | MessagesPlaceholder(variable_name="messages"), 136 | ] 137 | ) 138 | 139 | prompt = prompt.partial(system_message=system_message) 140 | prompt = prompt.partial(tool_names=", ".join([tool.name for tool in tools])) 141 | prompt = prompt.partial(current_date=current_date) 142 | prompt = prompt.partial(ticker=ticker) 143 | 144 | chain = prompt | llm.bind_tools(tools) 145 | 146 | result = chain.invoke(state["messages"]) 147 | 148 | report = "" 149 | 150 | if len(result.tool_calls) == 0: 151 | report = result.content 152 | 153 | return { 154 | "messages": [result], 155 | "market_report": report, 156 | } 157 | 158 | return market_analyst_node 159 | -------------------------------------------------------------------------------- /cli/utils.py: -------------------------------------------------------------------------------- 1 | import questionary 2 | from typing import List, Optional, Tuple, Dict 3 | 4 | from cli.models import AnalystType 5 | 6 | ANALYST_ORDER = [ 7 | ("Market Analyst", AnalystType.MARKET), 8 | ("Social Media Analyst", AnalystType.SOCIAL), 9 | ("News Analyst", AnalystType.NEWS), 10 | ("Fundamentals Analyst", AnalystType.FUNDAMENTALS), 11 | ] 12 | 13 | 14 | def get_ticker() -> str: 15 | """Prompt the user to enter a ticker symbol.""" 16 | ticker = questionary.text( 17 | "Enter the ticker symbol to analyze:", 18 | validate=lambda x: len(x.strip()) > 0 or "Please enter a valid ticker symbol.", 19 | style=questionary.Style( 20 | [ 21 | ("text", "fg:green"), 22 | ("highlighted", "noinherit"), 23 | ] 24 | ), 25 | ).ask() 26 | 27 | if not ticker: 28 | console.print("\n[red]No ticker symbol provided. Exiting...[/red]") 29 | exit(1) 30 | 31 | return ticker.strip().upper() 32 | 33 | 34 | def get_analysis_date() -> str: 35 | """Prompt the user to enter a date in YYYY-MM-DD format.""" 36 | import re 37 | from datetime import datetime 38 | 39 | def validate_date(date_str: str) -> bool: 40 | if not re.match(r"^\d{4}-\d{2}-\d{2}$", date_str): 41 | return False 42 | try: 43 | datetime.strptime(date_str, "%Y-%m-%d") 44 | return True 45 | except ValueError: 46 | return False 47 | 48 | date = questionary.text( 49 | "Enter the analysis date (YYYY-MM-DD):", 50 | validate=lambda x: validate_date(x.strip()) 51 | or "Please enter a valid date in YYYY-MM-DD format.", 52 | style=questionary.Style( 53 | [ 54 | ("text", "fg:green"), 55 | ("highlighted", "noinherit"), 56 | ] 57 | ), 58 | ).ask() 59 | 60 | if not date: 61 | console.print("\n[red]No date provided. Exiting...[/red]") 62 | exit(1) 63 | 64 | return date.strip() 65 | 66 | 67 | def select_analysts() -> List[AnalystType]: 68 | """Select analysts using an interactive checkbox.""" 69 | choices = questionary.checkbox( 70 | "Select Your [Analysts Team]:", 71 | choices=[ 72 | questionary.Choice(display, value=value) for display, value in ANALYST_ORDER 73 | ], 74 | instruction="\n- Press Space to select/unselect analysts\n- Press 'a' to select/unselect all\n- Press Enter when done", 75 | validate=lambda x: len(x) > 0 or "You must select at least one analyst.", 76 | style=questionary.Style( 77 | [ 78 | ("checkbox-selected", "fg:green"), 79 | ("selected", "fg:green noinherit"), 80 | ("highlighted", "noinherit"), 81 | ("pointer", "noinherit"), 82 | ] 83 | ), 84 | ).ask() 85 | 86 | if not choices: 87 | console.print("\n[red]No analysts selected. Exiting...[/red]") 88 | exit(1) 89 | 90 | return choices 91 | 92 | 93 | def select_research_depth() -> int: 94 | """Select research depth using an interactive selection.""" 95 | 96 | # Define research depth options with their corresponding values 97 | DEPTH_OPTIONS = [ 98 | ("Shallow - Quick research, few debate and strategy discussion rounds", 1), 99 | ("Medium - Middle ground, moderate debate rounds and strategy discussion", 3), 100 | ("Deep - Comprehensive research, in depth debate and strategy discussion", 5), 101 | ] 102 | 103 | choice = questionary.select( 104 | "Select Your [Research Depth]:", 105 | choices=[ 106 | questionary.Choice(display, value=value) for display, value in DEPTH_OPTIONS 107 | ], 108 | instruction="\n- Use arrow keys to navigate\n- Press Enter to select", 109 | style=questionary.Style( 110 | [ 111 | ("selected", "fg:yellow noinherit"), 112 | ("highlighted", "fg:yellow noinherit"), 113 | ("pointer", "fg:yellow noinherit"), 114 | ] 115 | ), 116 | ).ask() 117 | 118 | if choice is None: 119 | console.print("\n[red]No research depth selected. Exiting...[/red]") 120 | exit(1) 121 | 122 | return choice 123 | 124 | 125 | def select_shallow_thinking_agent(provider) -> str: 126 | """Select shallow thinking llm engine using an interactive selection.""" 127 | 128 | # Define shallow thinking llm engine options with their corresponding model names 129 | SHALLOW_AGENT_OPTIONS = { 130 | "openai": [ 131 | ("GPT-4o-mini - Fast and efficient for quick tasks", "gpt-4o-mini"), 132 | ("GPT-4.1-nano - Ultra-lightweight model for basic operations", "gpt-4.1-nano"), 133 | ("GPT-4.1-mini - Compact model with good performance", "gpt-4.1-mini"), 134 | ("GPT-4o - Standard model with solid capabilities", "gpt-4o"), 135 | ], 136 | "anthropic": [ 137 | ("Claude Haiku 3.5 - Fast inference and standard capabilities", "claude-3-5-haiku-latest"), 138 | ("Claude Sonnet 3.5 - Highly capable standard model", "claude-3-5-sonnet-latest"), 139 | ("Claude Sonnet 3.7 - Exceptional hybrid reasoning and agentic capabilities", "claude-3-7-sonnet-latest"), 140 | ("Claude Sonnet 4 - High performance and excellent reasoning", "claude-sonnet-4-0"), 141 | ], 142 | "google": [ 143 | ("Gemini 2.0 Flash-Lite - Cost efficiency and low latency", "gemini-2.0-flash-lite"), 144 | ("Gemini 2.0 Flash - Next generation features, speed, and thinking", "gemini-2.0-flash"), 145 | ("Gemini 2.5 Flash - Adaptive thinking, cost efficiency", "gemini-2.5-flash-preview-05-20"), 146 | ], 147 | "openrouter": [ 148 | ("Meta: Llama 4 Scout", "meta-llama/llama-4-scout:free"), 149 | ("Meta: Llama 3.3 8B Instruct - A lightweight and ultra-fast variant of Llama 3.3 70B", "meta-llama/llama-3.3-8b-instruct:free"), 150 | ("google/gemini-2.0-flash-exp:free - Gemini Flash 2.0 offers a significantly faster time to first token", "google/gemini-2.0-flash-exp:free"), 151 | ], 152 | "ollama": [ 153 | ("llama3.1 local", "llama3.1"), 154 | ("llama3.2 local", "llama3.2"), 155 | ] 156 | } 157 | 158 | choice = questionary.select( 159 | "Select Your [Quick-Thinking LLM Engine]:", 160 | choices=[ 161 | questionary.Choice(display, value=value) 162 | for display, value in SHALLOW_AGENT_OPTIONS[provider.lower()] 163 | ], 164 | instruction="\n- Use arrow keys to navigate\n- Press Enter to select", 165 | style=questionary.Style( 166 | [ 167 | ("selected", "fg:magenta noinherit"), 168 | ("highlighted", "fg:magenta noinherit"), 169 | ("pointer", "fg:magenta noinherit"), 170 | ] 171 | ), 172 | ).ask() 173 | 174 | if choice is None: 175 | console.print( 176 | "\n[red]No shallow thinking llm engine selected. Exiting...[/red]" 177 | ) 178 | exit(1) 179 | 180 | return choice 181 | 182 | 183 | def select_deep_thinking_agent(provider) -> str: 184 | """Select deep thinking llm engine using an interactive selection.""" 185 | 186 | # Define deep thinking llm engine options with their corresponding model names 187 | DEEP_AGENT_OPTIONS = { 188 | "openai": [ 189 | ("GPT-4.1-nano - Ultra-lightweight model for basic operations", "gpt-4.1-nano"), 190 | ("GPT-4.1-mini - Compact model with good performance", "gpt-4.1-mini"), 191 | ("GPT-4o - Standard model with solid capabilities", "gpt-4o"), 192 | ("o4-mini - Specialized reasoning model (compact)", "o4-mini"), 193 | ("o3-mini - Advanced reasoning model (lightweight)", "o3-mini"), 194 | ("o3 - Full advanced reasoning model", "o3"), 195 | ("o1 - Premier reasoning and problem-solving model", "o1"), 196 | ], 197 | "anthropic": [ 198 | ("Claude Haiku 3.5 - Fast inference and standard capabilities", "claude-3-5-haiku-latest"), 199 | ("Claude Sonnet 3.5 - Highly capable standard model", "claude-3-5-sonnet-latest"), 200 | ("Claude Sonnet 3.7 - Exceptional hybrid reasoning and agentic capabilities", "claude-3-7-sonnet-latest"), 201 | ("Claude Sonnet 4 - High performance and excellent reasoning", "claude-sonnet-4-0"), 202 | ("Claude Opus 4 - Most powerful Anthropic model", " claude-opus-4-0"), 203 | ], 204 | "google": [ 205 | ("Gemini 2.0 Flash-Lite - Cost efficiency and low latency", "gemini-2.0-flash-lite"), 206 | ("Gemini 2.0 Flash - Next generation features, speed, and thinking", "gemini-2.0-flash"), 207 | ("Gemini 2.5 Flash - Adaptive thinking, cost efficiency", "gemini-2.5-flash-preview-05-20"), 208 | ("Gemini 2.5 Pro", "gemini-2.5-pro-preview-06-05"), 209 | ], 210 | "openrouter": [ 211 | ("DeepSeek V3 - a 685B-parameter, mixture-of-experts model", "deepseek/deepseek-chat-v3-0324:free"), 212 | ("Deepseek - latest iteration of the flagship chat model family from the DeepSeek team.", "deepseek/deepseek-chat-v3-0324:free"), 213 | ], 214 | "ollama": [ 215 | ("llama3.1 local", "llama3.1"), 216 | ("qwen3", "qwen3"), 217 | ] 218 | } 219 | 220 | choice = questionary.select( 221 | "Select Your [Deep-Thinking LLM Engine]:", 222 | choices=[ 223 | questionary.Choice(display, value=value) 224 | for display, value in DEEP_AGENT_OPTIONS[provider.lower()] 225 | ], 226 | instruction="\n- Use arrow keys to navigate\n- Press Enter to select", 227 | style=questionary.Style( 228 | [ 229 | ("selected", "fg:magenta noinherit"), 230 | ("highlighted", "fg:magenta noinherit"), 231 | ("pointer", "fg:magenta noinherit"), 232 | ] 233 | ), 234 | ).ask() 235 | 236 | if choice is None: 237 | console.print("\n[red]No deep thinking llm engine selected. Exiting...[/red]") 238 | exit(1) 239 | 240 | return choice 241 | 242 | def select_llm_provider() -> tuple[str, str]: 243 | """Select the OpenAI api url using interactive selection.""" 244 | # Define OpenAI api options with their corresponding endpoints 245 | BASE_URLS = [ 246 | ("OpenAI", "https://api.openai.com/v1"), 247 | ("Anthropic", "https://api.anthropic.com/"), 248 | ("Google", "https://generativelanguage.googleapis.com/v1"), 249 | ("Openrouter", "https://openrouter.ai/api/v1"), 250 | ("Ollama", "http://localhost:11434/v1"), 251 | ] 252 | 253 | choice = questionary.select( 254 | "Select your LLM Provider:", 255 | choices=[ 256 | questionary.Choice(display, value=(display, value)) 257 | for display, value in BASE_URLS 258 | ], 259 | instruction="\n- Use arrow keys to navigate\n- Press Enter to select", 260 | style=questionary.Style( 261 | [ 262 | ("selected", "fg:magenta noinherit"), 263 | ("highlighted", "fg:magenta noinherit"), 264 | ("pointer", "fg:magenta noinherit"), 265 | ] 266 | ), 267 | ).ask() 268 | 269 | if choice is None: 270 | console.print("\n[red]no OpenAI backend selected. Exiting...[/red]") 271 | exit(1) 272 | 273 | display_name, url = choice 274 | print(f"You selected: {display_name}\tURL: {url}") 275 | 276 | return display_name, url 277 | -------------------------------------------------------------------------------- /templates/simple_index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | TradingAgents - Multi-Agent LLM Trading Framework 7 | 8 | 9 | 109 | 110 | 111 |
112 |
113 |
114 |

TradingAgents

115 |

Multi-Agent LLM Financial Trading Framework

116 |
117 | 118 |
119 |
120 | 121 |
122 |
123 |
1
124 |

Ticker Symbol

125 |
126 | 128 |
129 | 130 | 131 |
132 |
133 |
2
134 |

Analysis Date

135 |
136 | 137 |
138 | 139 | 140 |
141 |
142 |
3
143 |

Analyst Team

144 |
145 |
146 |
147 | 148 |
Market Analyst
149 |
150 |
151 | 152 |
Social Analyst
153 |
154 |
155 | 156 |
News Analyst
157 |
158 |
159 | 160 |
Fundamentals Analyst
161 |
162 |
163 |
164 | 165 | 166 |
167 |
168 |
4
169 |

Research Depth

170 |
171 | 176 |
177 | 178 |
179 |
180 |
5
181 |

LLM Provider

182 |
183 | 187 | 189 |
190 | 191 |
192 |
193 |
6
194 |

Thinking Agents

195 |
196 |
197 |
198 | 201 |
202 |
203 | 206 |
207 |
208 |
209 | 210 | 213 |
214 |
215 |
216 |
217 | 218 | 269 | 270 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Trading Agents Crypto 🚀 2 | 3 | > **Multi-Agent LLM Cryptocurrency Trading Framework with Web Interface** 4 | 5 | A powerful cryptocurrency trading analysis framework powered by multiple specialized AI agents working together to provide comprehensive market insights. This is an enhanced version of [TauricResearch/TradingAgents](https://github.com/TauricResearch/TradingAgents) with **comprehensive cryptocurrency support**, specialized crypto agents, real-time data integration, and a modern responsive web interface. 6 | 7 | > 📸 **Check out the [screenshots](#-screenshots)** to see the beautiful light/dark mode interface and real-time dashboard in action! 8 | 9 | ## ✨ Features 10 | 11 | ### 🌐 **Modern Web Interface** 12 | - **Dark/Light Mode**: Eye-friendly themes with smooth transitions *(see screenshots below)* 13 | - **Real-time Dashboard**: Live updates of agent status and analysis progress 14 | - **Interactive Forms**: Easy configuration of analysis parameters 15 | - **Responsive Design**: Works seamlessly on desktop and mobile devices 16 | - **Professional UI**: Clean, modern design optimized for crypto trading workflows 17 | 18 | ### 🤖 **Multi-Agent Architecture** 19 | - **Analyst Team**: Market, Social, News, and Fundamentals analysis specialists 20 | - **Research Team**: Bull and Bear researchers with debate-driven insights 21 | - **Trading Team**: Strategy formulation and risk assessment 22 | - **Portfolio Management**: Final decision making and trade execution 23 | 24 | ### 💰 **Cryptocurrency Focus** 25 | - **Native Crypto Support**: Built-in support for popular cryptocurrencies (BTC, ETH, ADA, etc.) 26 | - **Crypto Market Data**: Real-time price feeds, volume analysis, and market cap tracking 27 | - **Social Sentiment**: Reddit, Twitter, and crypto community sentiment analysis 28 | - **News Integration**: Crypto-specific news sources and market impact assessment 29 | - **Technical Analysis**: Crypto-adapted indicators (RSI, MACD, Bollinger Bands for volatile markets) 30 | - **On-chain Metrics**: Integration with blockchain data sources 31 | - **DeFi Analytics**: Decentralized finance protocol analysis and yield farming insights 32 | 33 | ### 🧠 **LLM Integration** 34 | - **Multiple Providers**: Support for OpenAI, Anthropic, and Google models 35 | - **Latest Models**: Including GPT-4, Claude Sonnet/Opus, and Gemini 2.0/2.5 36 | - **Quick & Deep Thinking**: Separate models for fast responses and complex analysis 37 | - **API Key Management**: Secure API key input and management 38 | 39 | ## 🆕 **What's New for Crypto Support** 40 | 41 | This fork adds comprehensive cryptocurrency trading capabilities to the original TradingAgents framework: 42 | 43 | ### **Enhanced Data Sources** 44 | - **CoinGecko API**: Real-time crypto prices, market caps, and volume data 45 | - **CryptoCompare**: Historical data and social metrics 46 | - **Crypto News APIs**: Specialized crypto news aggregation 47 | - **Social Media Integration**: Reddit sentiment analysis for crypto communities 48 | - **On-chain Data**: Blockchain transaction metrics and wallet analysis 49 | 50 | ### **Crypto-Specific Agents** 51 | - **DeFi Analyst**: Specialized in decentralized finance protocols and yield analysis 52 | - **Tokenomics Analyst**: Token supply, distribution, and economic model analysis 53 | - **Chain Analyst**: Cross-chain analysis and bridge monitoring 54 | - **NFT Market Analyst**: Non-fungible token market trends and analysis 55 | 56 | ### **Market-Adapted Features** 57 | - **24/7 Trading**: Crypto markets never close - agents adapted for continuous operation 58 | - **Volatility Management**: Enhanced risk assessment for crypto's high volatility 59 | - **Multi-Exchange Support**: Analysis across different crypto exchanges 60 | - **Stablecoin Integration**: USDT, USDC, DAI analysis for portfolio stability 61 | - **Regulatory Monitoring**: Crypto regulation tracking and impact analysis 62 | 63 | ### **Technical Enhancements** 64 | - **WebSocket Streams**: Real-time crypto price feeds 65 | - **Portfolio Rebalancing**: Dynamic crypto portfolio management 66 | - **Yield Farming Analysis**: DeFi yield opportunity identification 67 | - **Cross-chain Analytics**: Multi-blockchain analysis capabilities 68 | 69 | ## 📸 Screenshots 70 | 71 | ### **Light Mode Interface** 72 | ![Light Mode Homepage](assets/light_index.png) 73 | *Clean and professional light mode interface with intuitive crypto analysis configuration* 74 | 75 | ### **Dark Mode Interface** 76 | ![Dark Mode Homepage](assets/dark_index.png) 77 | *Eye-friendly dark mode perfect for extended trading sessions* 78 | 79 | ### **Real-time Analysis Dashboard** 80 | ![Analysis Dashboard](assets/Dashboard.png) 81 | *Live dashboard showing multi-agent analysis progress with real-time updates* 82 | 83 | ## 🚀 Quick Start 84 | 85 | > **Note**: This crypto-enhanced version includes specialized agents and data sources optimized for cryptocurrency trading analysis. 86 | 87 | ### Prerequisites 88 | - Python 3.8 or higher 89 | - Node.js (for web dependencies) 90 | - API keys for your chosen LLM provider 91 | - FinnHub API key for financial data 92 | - Optional: CoinGecko API for enhanced crypto data (free tier available) 93 | 94 | ### Installation 95 | 96 | 1. **Clone the repository** 97 | ```bash 98 | git clone https://github.com/yourusername/trading-agents-crypto.git 99 | cd trading-agents-crypto 100 | ``` 101 | 102 | 2. **Create virtual environment** 103 | ```bash 104 | python -m venv venv 105 | source venv/bin/activate # On Windows: venv\Scripts\activate 106 | ``` 107 | 108 | 3. **Install dependencies** 109 | ```bash 110 | pip install -r requirements.txt 111 | ``` 112 | 113 | 4. **Set up environment variables** 114 | ```bash 115 | export FINNHUB_API_KEY=your_finnhub_api_key 116 | # Note: LLM API keys are entered via the web interface 117 | ``` 118 | 119 | 5. **Run the web application** 120 | ```bash 121 | python web_app.py 122 | ``` 123 | 124 | 6. **Open your browser** 125 | Navigate to `http://localhost:5000` to access the web interface 126 | 127 | ## 🎯 Usage 128 | 129 | ### Web Interface Workflow 130 | 131 | 1. **Configure Crypto Analysis** 132 | - Enter cryptocurrency symbol (e.g., BTC, ETH, ADA, MATIC, SOL) 133 | - Select analysis date 134 | - Choose specialized crypto analyst team members 135 | - Set research depth level (market conditions, DeFi analysis, etc.) 136 | - Configure LLM provider and models 137 | - Enter your API key 138 | 139 | 2. **Start Analysis** 140 | - Click "Start Analysis" to begin 141 | - Automatically redirected to real-time dashboard 142 | - Monitor live progress with visual agent status indicators 143 | - View agent status updates and analysis messages as they stream 144 | 145 | 3. **Review Results** 146 | - Examine detailed reports from each specialized agent 147 | - Export comprehensive analysis results (HTML format) 148 | - Switch between light/dark mode for optimal viewing 149 | - Start new analysis with different parameters 150 | 151 | ### Supported LLM Providers 152 | 153 | #### **OpenAI** 154 | - GPT-4o, GPT-4.1 series 155 | - o1, o3, o4 reasoning models 156 | - API endpoint: `https://api.openai.com/v1` 157 | 158 | #### **Anthropic** 159 | - Claude Haiku 3.5, Sonnet 3.5/3.7/4 160 | - Claude Opus 4 (Deep Thinking) 161 | - API endpoint: `https://api.anthropic.com/` 162 | 163 | #### **Google** 164 | - Gemini 2.0 Flash-Lite, 2.0 Flash, 2.5 Flash 165 | - Gemini 2.5 Pro 166 | - API endpoint: `https://generativelanguage.googleapis.com/v1` 167 | 168 | ## 🏗️ Architecture 169 | 170 | ### Agent Workflow 171 | ``` 172 | Analyst Team → Research Team → Trader → Risk Management → Portfolio Management 173 | ``` 174 | 175 | ### Web Technology Stack 176 | - **Backend**: Flask + SocketIO for real-time communication 177 | - **Frontend**: Bootstrap 5 + FontAwesome for modern UI 178 | - **Real-time**: WebSocket connections for live updates 179 | - **Theme**: Dark/Light mode with CSS variables 180 | 181 | ## 📊 Analysis Components 182 | 183 | ### **Analyst Team** 184 | - 📈 **Market Analyst**: Technical indicators and price patterns 185 | - 📱 **Social Analyst**: Social media sentiment and trends 186 | - 📰 **News Analyst**: News events and market impact 187 | - 📋 **Fundamentals Analyst**: Project fundamentals and metrics 188 | 189 | ### **Research Team** 190 | - 🐂 **Bull Researcher**: Optimistic market perspectives 191 | - 🐻 **Bear Researcher**: Risk assessment and concerns 192 | - ⚖️ **Research Manager**: Balanced decision coordination 193 | 194 | ### **Trading Team** 195 | - 💼 **Trader**: Strategy formulation and timing 196 | - 🛡️ **Risk Manager**: Risk assessment and mitigation 197 | - 📊 **Portfolio Manager**: Final execution decisions 198 | 199 | ## 🔧 Configuration 200 | 201 | ### Environment Variables 202 | ```bash 203 | FINNHUB_API_KEY=your_finnhub_key # Required for financial data 204 | ``` 205 | 206 | ### LLM Configuration 207 | LLM API keys and model selection are configured through the web interface for security and flexibility. 208 | 209 | ## 📝 API Documentation 210 | 211 | ### REST Endpoints 212 | - `GET /` - Main configuration page 213 | - `GET /analysis` - Analysis dashboard 214 | - `POST /api/start_analysis` - Start new analysis 215 | 216 | ### WebSocket Events 217 | - `join_session` - Join analysis session 218 | - `new_message` - Real-time message updates 219 | - `agent_status_update` - Agent status changes 220 | - `progress_update` - Analysis progress updates 221 | 222 | ## ⚠️ Disclaimer 223 | 224 | This framework is designed for research and educational purposes. Trading performance may vary based on many factors, including model selection, market conditions, and data quality. **This is not financial advice.** Always conduct your own research and consider consulting with financial professionals before making trading decisions. 225 | 226 | ## 🚧 **Next Features** 227 | 228 | We're continuously working to enhance the cryptocurrency trading capabilities. Here's what's coming next: 229 | 230 | ### **🔗 Enhanced Data Sources** 231 | - **Binance API Integration**: Direct integration with Binance for real-time order book data 232 | - **Coinbase Pro API**: Professional trading data and advanced metrics 233 | - **DeFiPulse Integration**: TVL (Total Value Locked) tracking across DeFi protocols 234 | - **Messari API**: Comprehensive crypto asset metrics and research data 235 | - **The Graph Protocol**: Decentralized blockchain data indexing 236 | - **Dune Analytics**: On-chain analytics and custom dashboard integration 237 | - **Alternative Data**: Social media influence metrics, GitHub activity, and developer engagement 238 | 239 | ### **🤖 Optimized Agent Prompts for Multi-Agent Collaboration** 240 | - **Inter-Agent Communication**: Enhanced protocols for agents to share insights more effectively 241 | - **Consensus Mechanisms**: Advanced voting and agreement systems between agents 242 | - **Conflict Resolution**: Automatic handling of contradictory analysis from different agents 243 | - **Context Sharing**: Improved memory and context passing between agent interactions 244 | - **Role Specialization**: More granular agent roles with specialized expertise 245 | - **Dynamic Team Formation**: Adaptive agent team composition based on market conditions 246 | - **Learning from Interactions**: Agents learn from past collaborative successes and failures 247 | 248 | ### **📊 Advanced Analytics** 249 | - **Sentiment Heat Maps**: Visual representation of market sentiment across different timeframes 250 | - **Cross-Asset Correlation**: Analysis of crypto correlations with traditional markets 251 | - **Liquidity Analysis**: Deep dive into market liquidity and slippage estimation 252 | - **MEV Analysis**: Maximum Extractable Value opportunities in DeFi 253 | - **Governance Token Analysis**: DAO proposal and voting impact assessment 254 | 255 | ### **🛠️ Platform Enhancements** 256 | - **Mobile App**: Native mobile application for iOS and Android 257 | - **Telegram Bot**: Real-time alerts and analysis via Telegram integration 258 | - **Portfolio Tracking**: Advanced portfolio management with P&L tracking 259 | - **Backtesting Engine**: Historical strategy testing and optimization 260 | - **Paper Trading**: Risk-free strategy testing with simulated trades 261 | 262 | ## 📄 License 263 | 264 | This project is licensed under the Apache 2.0 License - see the original [TauricResearch/TradingAgents](https://github.com/TauricResearch/TradingAgents) repository for details. 265 | 266 | ## 🙏 Acknowledgments 267 | 268 | - Original framework by [TauricResearch](https://github.com/TauricResearch) 269 | - Built with [LangGraph](https://langchain-ai.github.io/langgraph/) for agent orchestration 270 | - UI components from [Bootstrap](https://getbootstrap.com/) and [FontAwesome](https://fontawesome.com/) 271 | -------------------------------------------------------------------------------- /tradingagents/graph/trading_graph.py: -------------------------------------------------------------------------------- 1 | # TradingAgents/graph/trading_graph.py 2 | 3 | import os 4 | from pathlib import Path 5 | import json 6 | from datetime import date 7 | from typing import Dict, Any, Tuple, List, Optional 8 | 9 | from langchain_openai import ChatOpenAI 10 | from langchain_anthropic import ChatAnthropic 11 | from langchain_google_genai import ChatGoogleGenerativeAI 12 | 13 | from langgraph.prebuilt import ToolNode 14 | 15 | from tradingagents.agents import * 16 | from tradingagents.default_config import DEFAULT_CONFIG 17 | from tradingagents.agents.utils.memory import FinancialSituationMemory 18 | from tradingagents.agents.utils.agent_states import ( 19 | AgentState, 20 | InvestDebateState, 21 | RiskDebateState, 22 | ) 23 | from tradingagents.dataflows.interface import set_config 24 | 25 | from .conditional_logic import ConditionalLogic 26 | from .setup import GraphSetup 27 | from .propagation import Propagator 28 | from .reflection import Reflector 29 | from .signal_processing import SignalProcessor 30 | 31 | 32 | class TradingAgentsGraph: 33 | """Main class that orchestrates the trading agents framework.""" 34 | 35 | def __init__( 36 | self, 37 | selected_analysts=["market", "social", "news", "fundamentals"], 38 | debug=False, 39 | config: Dict[str, Any] = None, 40 | ): 41 | """Initialize the trading agents graph and components. 42 | 43 | Args: 44 | selected_analysts: List of analyst types to include 45 | debug: Whether to run in debug mode 46 | config: Configuration dictionary. If None, uses default config 47 | """ 48 | self.debug = debug 49 | self.config = config or DEFAULT_CONFIG 50 | 51 | # Update the interface's config 52 | set_config(self.config) 53 | 54 | # Create necessary directories 55 | os.makedirs( 56 | os.path.join(self.config["project_dir"], "dataflows/data_cache"), 57 | exist_ok=True, 58 | ) 59 | 60 | # Initialize LLMs 61 | if self.config["llm_provider"].lower() == "openai" or self.config["llm_provider"] == "ollama" or self.config["llm_provider"] == "openrouter": 62 | self.deep_thinking_llm = ChatOpenAI( 63 | model=self.config["deep_think_llm"], 64 | base_url=self.config["backend_url"], 65 | api_key=self.config["api_key"] 66 | ) 67 | self.quick_thinking_llm = ChatOpenAI( 68 | model=self.config["quick_think_llm"], 69 | base_url=self.config["backend_url"], 70 | api_key=self.config["api_key"] 71 | ) 72 | elif self.config["llm_provider"].lower() == "anthropic": 73 | self.deep_thinking_llm = ChatAnthropic( 74 | model=self.config["deep_think_llm"], 75 | base_url=self.config["backend_url"], 76 | api_key=self.config["api_key"] 77 | ) 78 | self.quick_thinking_llm = ChatAnthropic( 79 | model=self.config["quick_think_llm"], 80 | base_url=self.config["backend_url"], 81 | api_key=self.config["api_key"] 82 | ) 83 | elif self.config["llm_provider"].lower() == "google": 84 | self.deep_thinking_llm = ChatGoogleGenerativeAI( 85 | model=self.config["deep_think_llm"], 86 | google_api_key=self.config["api_key"] 87 | ) 88 | self.quick_thinking_llm = ChatGoogleGenerativeAI( 89 | model=self.config["quick_think_llm"], 90 | google_api_key=self.config["api_key"] 91 | ) 92 | else: 93 | raise ValueError(f"Unsupported LLM provider: {self.config['llm_provider']}") 94 | 95 | self.toolkit = Toolkit(config=self.config) 96 | 97 | # Initialize memories 98 | self.bull_memory = FinancialSituationMemory("bull_memory", self.config) 99 | self.bear_memory = FinancialSituationMemory("bear_memory", self.config) 100 | self.trader_memory = FinancialSituationMemory("trader_memory", self.config) 101 | self.invest_judge_memory = FinancialSituationMemory("invest_judge_memory", self.config) 102 | self.risk_manager_memory = FinancialSituationMemory("risk_manager_memory", self.config) 103 | 104 | # Create tool nodes 105 | self.tool_nodes = self._create_tool_nodes() 106 | 107 | # Initialize components 108 | self.conditional_logic = ConditionalLogic() 109 | self.graph_setup = GraphSetup( 110 | self.quick_thinking_llm, 111 | self.deep_thinking_llm, 112 | self.toolkit, 113 | self.tool_nodes, 114 | self.bull_memory, 115 | self.bear_memory, 116 | self.trader_memory, 117 | self.invest_judge_memory, 118 | self.risk_manager_memory, 119 | self.conditional_logic, 120 | ) 121 | 122 | self.propagator = Propagator() 123 | self.reflector = Reflector(self.quick_thinking_llm) 124 | self.signal_processor = SignalProcessor(self.quick_thinking_llm) 125 | 126 | # State tracking 127 | self.curr_state = None 128 | self.ticker = None 129 | self.log_states_dict = {} # date to full state dict 130 | 131 | # Set up the graph 132 | self.graph = self.graph_setup.setup_graph(selected_analysts) 133 | 134 | def _create_tool_nodes(self) -> Dict[str, ToolNode]: 135 | """Create tool nodes for different data sources.""" 136 | return { 137 | "market": ToolNode( 138 | [ 139 | # Stock tools (online) 140 | self.toolkit.get_YFin_data_online, 141 | self.toolkit.get_stockstats_indicators_report_online, 142 | # Stock tools (offline) 143 | self.toolkit.get_YFin_data, 144 | self.toolkit.get_stockstats_indicators_report, 145 | # Crypto tools 146 | self.toolkit.get_crypto_price_history, 147 | self.toolkit.get_crypto_technical_analysis, 148 | self.toolkit.get_crypto_market_analysis, 149 | ] 150 | ), 151 | "social": ToolNode( 152 | [ 153 | # Stock tools (online) 154 | self.toolkit.get_stock_news_openai, 155 | # Stock tools (offline) 156 | self.toolkit.get_reddit_stock_info, 157 | # Crypto tools 158 | self.toolkit.get_crypto_news_analysis, 159 | ] 160 | ), 161 | "news": ToolNode( 162 | [ 163 | # Stock tools (online) 164 | self.toolkit.get_global_news_openai, 165 | self.toolkit.get_google_news, 166 | # Stock tools (offline) 167 | self.toolkit.get_finnhub_news, 168 | self.toolkit.get_reddit_news, 169 | # Crypto tools 170 | self.toolkit.get_crypto_news_analysis, 171 | ] 172 | ), 173 | "fundamentals": ToolNode( 174 | [ 175 | # Stock tools (online) 176 | self.toolkit.get_fundamentals_openai, 177 | # Stock tools (offline) 178 | self.toolkit.get_finnhub_company_insider_sentiment, 179 | self.toolkit.get_finnhub_company_insider_transactions, 180 | self.toolkit.get_simfin_balance_sheet, 181 | self.toolkit.get_simfin_cashflow, 182 | self.toolkit.get_simfin_income_stmt, 183 | # Crypto tools 184 | self.toolkit.get_crypto_fundamentals_analysis, 185 | self.toolkit.get_crypto_market_analysis, 186 | ] 187 | ), 188 | } 189 | 190 | def propagate(self, company_name, trade_date): 191 | """Run the trading agents graph for a company on a specific date.""" 192 | 193 | self.ticker = company_name 194 | 195 | # Initialize state 196 | init_agent_state = self.propagator.create_initial_state( 197 | company_name, trade_date 198 | ) 199 | args = self.propagator.get_graph_args() 200 | 201 | if self.debug: 202 | # Debug mode with tracing 203 | trace = [] 204 | for chunk in self.graph.stream(init_agent_state, **args): 205 | if len(chunk["messages"]) == 0: 206 | pass 207 | else: 208 | chunk["messages"][-1].pretty_print() 209 | trace.append(chunk) 210 | 211 | final_state = trace[-1] 212 | else: 213 | # Standard mode without tracing 214 | final_state = self.graph.invoke(init_agent_state, **args) 215 | 216 | # Store current state for reflection 217 | self.curr_state = final_state 218 | 219 | # Log state 220 | self._log_state(trade_date, final_state) 221 | 222 | # Return decision and processed signal 223 | return final_state, self.process_signal(final_state["final_trade_decision"]) 224 | 225 | def _log_state(self, trade_date, final_state): 226 | """Log the final state to a JSON file.""" 227 | self.log_states_dict[str(trade_date)] = { 228 | "company_of_interest": final_state["company_of_interest"], 229 | "trade_date": final_state["trade_date"], 230 | "market_report": final_state["market_report"], 231 | "sentiment_report": final_state["sentiment_report"], 232 | "news_report": final_state["news_report"], 233 | "fundamentals_report": final_state["fundamentals_report"], 234 | "investment_debate_state": { 235 | "bull_history": final_state["investment_debate_state"]["bull_history"], 236 | "bear_history": final_state["investment_debate_state"]["bear_history"], 237 | "history": final_state["investment_debate_state"]["history"], 238 | "current_response": final_state["investment_debate_state"][ 239 | "current_response" 240 | ], 241 | "judge_decision": final_state["investment_debate_state"][ 242 | "judge_decision" 243 | ], 244 | }, 245 | "trader_investment_decision": final_state["trader_investment_plan"], 246 | "risk_debate_state": { 247 | "risky_history": final_state["risk_debate_state"]["risky_history"], 248 | "safe_history": final_state["risk_debate_state"]["safe_history"], 249 | "neutral_history": final_state["risk_debate_state"]["neutral_history"], 250 | "history": final_state["risk_debate_state"]["history"], 251 | "judge_decision": final_state["risk_debate_state"]["judge_decision"], 252 | }, 253 | "investment_plan": final_state["investment_plan"], 254 | "final_trade_decision": final_state["final_trade_decision"], 255 | } 256 | 257 | # Save to file 258 | directory = Path(f"eval_results/{self.ticker}/TradingAgentsStrategy_logs/") 259 | directory.mkdir(parents=True, exist_ok=True) 260 | 261 | with open( 262 | f"eval_results/{self.ticker}/TradingAgentsStrategy_logs/full_states_log_{trade_date}.json", 263 | "w", 264 | ) as f: 265 | json.dump(self.log_states_dict, f, indent=4) 266 | 267 | def reflect_and_remember(self, returns_losses): 268 | """Reflect on decisions and update memory based on returns.""" 269 | self.reflector.reflect_bull_researcher( 270 | self.curr_state, returns_losses, self.bull_memory 271 | ) 272 | self.reflector.reflect_bear_researcher( 273 | self.curr_state, returns_losses, self.bear_memory 274 | ) 275 | self.reflector.reflect_trader( 276 | self.curr_state, returns_losses, self.trader_memory 277 | ) 278 | self.reflector.reflect_invest_judge( 279 | self.curr_state, returns_losses, self.invest_judge_memory 280 | ) 281 | self.reflector.reflect_risk_manager( 282 | self.curr_state, returns_losses, self.risk_manager_memory 283 | ) 284 | 285 | def process_signal(self, full_signal): 286 | """Process a signal to extract the core decision.""" 287 | return self.signal_processor.process_signal(full_signal) 288 | --------------------------------------------------------------------------------