├── .gitignore
├── README.md
├── backend
├── .env.sample
├── AI
│ ├── AIService.py
│ ├── agents
│ │ ├── ben_graham.py
│ │ ├── bill_ackman.py
│ │ ├── cathie_wood.py
│ │ ├── charlie_munger.py
│ │ ├── fundamentals.py
│ │ ├── michael_burry.py
│ │ ├── peter_lynch.py
│ │ ├── phil_fisher.py
│ │ ├── portfolio_manager.py
│ │ ├── risk_manager.py
│ │ ├── sentiment.py
│ │ ├── stanley_druckenmiller.py
│ │ ├── technicals.py
│ │ ├── valuation.py
│ │ └── warren_buffett.py
│ ├── backtester.py
│ ├── data
│ │ ├── cache.py
│ │ └── models.py
│ ├── graph
│ │ └── state.py
│ ├── llm
│ │ └── models.py
│ ├── tools
│ │ └── api.py
│ └── utils
│ │ ├── __init__.py
│ │ ├── analysts.py
│ │ ├── display.py
│ │ ├── llm.py
│ │ ├── ollama.py
│ │ ├── progress.py
│ │ └── visualize.py
├── __init__.py
├── config.py
├── daily_data
│ ├── M1101.DCE_future_daily_20100101_20251231.csv
│ ├── M1105.DCE_future_daily_20100101_20251231.csv
│ ├── M1109.DCE_future_daily_20100101_20251231.csv
│ ├── M1201.DCE_future_daily_20100101_20251231.csv
│ ├── M1205.DCE_future_daily_20100101_20251231.csv
│ ├── M1209.DCE_future_daily_20100101_20251231.csv
│ ├── M1301.DCE_future_daily_20100101_20251231.csv
│ ├── M1305.DCE_future_daily_20100101_20251231.csv
│ ├── M1309.DCE_future_daily_20100101_20251231.csv
│ ├── M1401.DCE_future_daily_20100101_20251231.csv
│ ├── M1405.DCE_future_daily_20100101_20251231.csv
│ ├── M1409.DCE_future_daily_20100101_20251231.csv
│ ├── M1501.DCE_future_daily_20100101_20251231.csv
│ ├── M1505.DCE_future_daily_20100101_20251231.csv
│ ├── M1509.DCE_future_daily_20100101_20251231.csv
│ ├── M1601.DCE_future_daily_20100101_20251231.csv
│ ├── M1605.DCE_future_daily_20100101_20251231.csv
│ ├── M1609.DCE_future_daily_20100101_20251231.csv
│ ├── M1701.DCE_future_daily_20100101_20251231.csv
│ ├── M1705.DCE_future_daily_20100101_20251231.csv
│ ├── M1709.DCE_future_daily_20100101_20251231.csv
│ ├── M1801.DCE_future_daily_20100101_20251231.csv
│ ├── M1805.DCE_future_daily_20100101_20251231.csv
│ ├── M1809.DCE_future_daily_20100101_20251231.csv
│ ├── M1901.DCE_future_daily_20100101_20251231.csv
│ ├── M1905.DCE_future_daily_20100101_20251231.csv
│ ├── M1909.DCE_future_daily_20100101_20251231.csv
│ ├── M2001.DCE_future_daily_20100101_20251231.csv
│ ├── M2005.DCE_future_daily_20100101_20251231.csv
│ ├── M2009.DCE_future_daily_20100101_20251231.csv
│ ├── M2101.DCE_future_daily_20100101_20251231.csv
│ ├── M2105.DCE_future_daily_20100101_20251231.csv
│ ├── M2109.DCE_future_daily_20100101_20251231.csv
│ ├── M2201.DCE_future_daily_20100101_20251231.csv
│ ├── M2205.DCE_future_daily_20100101_20251231.csv
│ ├── M2209.DCE_future_daily_20100101_20251231.csv
│ ├── M2301.DCE_future_daily_20100101_20251231.csv
│ ├── M2305.DCE_future_daily_20100101_20251231.csv
│ ├── M2309.DCE_future_daily_20100101_20251231.csv
│ ├── M2401.DCE_future_daily_20100101_20251231.csv
│ ├── M2405.DCE_future_daily_20100101_20251231.csv
│ ├── M2409.DCE_future_daily_20100101_20251231.csv
│ ├── M2501.DCE_future_daily_20100101_20251231.csv
│ ├── M2505.DCE_future_daily_20100101_20251231.csv
│ ├── M2509.DCE_future_daily_20100101_20251231.csv
│ └── M2601.DCE_future_daily_20100101_20251231.csv
├── data
│ ├── 159985.SZ_fund_30min_20190101_20251231.csv
│ ├── 159985.SZ_fund_daily_20190101_20251231.csv
│ ├── 159985.SZ_fund_weekly_20190101_20251231.csv
│ ├── B2501.DCE_future_5min_20240801_20241130.csv
│ ├── M2501.DCE_future_15min_20240101_20251231.csv
│ ├── M2501.DCE_future_30min_20240101_20251231.csv
│ ├── M2501.DCE_future_5min_20240101_20251231.csv
│ ├── M2501.DCE_future_5min_20240801_20241130.csv
│ ├── M2501.DCE_future_60min_20240101_20251231.csv
│ ├── M2501.DCE_future_daily_20240101_20251231.csv
│ └── Y2501.DCE_future_5min_20240801_20241130.csv
├── init_db.py
├── main.py
├── models
│ ├── __init__.py
│ ├── account.py
│ ├── core_factor.py
│ ├── fundamental.py
│ ├── kline.py
│ ├── market_data.py
│ ├── news.py
│ ├── position.py
│ ├── signals.py
│ ├── soybean.py
│ ├── trading.py
│ └── trading_strategy.py
├── requirements.txt
├── routers
│ ├── __init__.py
│ ├── account.py
│ ├── ai.py
│ ├── arbitrage.py
│ ├── core_factor.py
│ ├── dual_ma.py
│ ├── fundamental.py
│ ├── grid.py
│ ├── holding_analysis.py
│ ├── market_data.py
│ ├── news.py
│ ├── obv_adx_ema.py
│ ├── signals.py
│ ├── soybean.py
│ ├── stockfutures.py
│ ├── support_resistance.py
│ ├── trading.py
│ ├── trading_model.py
│ └── trend_follow.py
├── services
│ ├── __init__.py
│ ├── account.py
│ ├── core_factor.py
│ ├── fundamental.py
│ ├── market_data.py
│ ├── news_service.py
│ ├── opt_service.py
│ ├── position.py
│ ├── signals.py
│ ├── soybean.py
│ ├── stockfutures.py
│ ├── support_resistance.py
│ └── trading.py
├── strategies
│ ├── dual_ma_strategy.py
│ ├── grid_strategy.py
│ ├── obv_adx_ema_strategy.py
│ ├── support_resistance_strategy.py
│ └── trend_follow_strategy.py
├── test
│ └── test.py
├── tools
│ ├── __init__.py
│ ├── data_fetcher.py
│ └── feedtrade_crawler.py
├── trading.db
└── utils
│ ├── __init__.py
│ └── logger.py
├── docs
└── 多品种套利.md
└── frontend
├── package-lock.json
├── package.json
├── postcss.config.js
├── public
├── donate.jpg
├── favicon.ico
├── images
│ └── experts
│ │ ├── ben-graham.jpg
│ │ ├── bill-ackman.jpg
│ │ ├── cathie-wood.jpg
│ │ ├── charlie-munger.jpg
│ │ ├── michael-burry.jpg
│ │ ├── peter-lynch.jpg
│ │ ├── phil-fisher.jpg
│ │ ├── stanley-druckenmiller.jpg
│ │ └── warren-buffett.jpg
├── index.html
├── logo.png
├── logo192.png
└── manifest.json
├── src
├── App.tsx
├── api
│ ├── ai.ts
│ ├── arbitrage.ts
│ ├── signals.ts
│ └── soybean.ts
├── components
│ ├── KLineChart.tsx
│ ├── SRLevels.tsx
│ ├── Signallet.css
│ ├── Signallet.tsx
│ ├── Toast.css
│ ├── Toast.tsx
│ ├── analysis
│ │ ├── CoreFactorAnalysis.tsx
│ │ └── StandardizedAnalysis.tsx
│ ├── home
│ │ ├── FuturesKLineChart.tsx
│ │ ├── InventoryInfo.tsx
│ │ └── TimeRangeSelector.tsx
│ ├── layout
│ │ ├── Footer.tsx
│ │ ├── Header.tsx
│ │ └── Layout.tsx
│ ├── market
│ │ ├── HistoricalComparison.tsx
│ │ ├── InventoryChart.tsx
│ │ ├── MarketOverview.tsx
│ │ ├── OptionData.tsx
│ │ ├── PriceChart.tsx
│ │ ├── TechnicalChart.tsx
│ │ └── TechnicalIndicators.tsx
│ └── trading
│ │ ├── ArbitrageStrategy.tsx
│ │ ├── ETFStrategy.tsx
│ │ ├── FuturesOptionsHedgeStrategy.tsx
│ │ ├── GridStrategy.tsx
│ │ ├── InterSpeciesArbitrageStrategy.tsx
│ │ ├── OBVADXEMAStrategy.tsx
│ │ ├── OptionsStrategy.tsx
│ │ ├── StrategyAdvice.tsx
│ │ ├── SupportResistanceStrategy.tsx
│ │ └── TrendFollowStrategy.tsx
├── config
│ └── api.ts
├── index.css
├── index.tsx
├── pages
│ ├── Agents.tsx
│ ├── CoreFactorAnalysisPage.tsx
│ ├── Disclaimer.tsx
│ ├── HoldingAnalysis.tsx
│ ├── Home.tsx
│ ├── MarketView.tsx
│ ├── MultiVarietyArbitrage.tsx
│ ├── NewsAnalysis.tsx
│ ├── OptionsStrategyPage.tsx
│ ├── PrivacyPolicy.tsx
│ ├── ProAnalysis.tsx
│ ├── Research.tsx
│ ├── Signals.css
│ ├── Signals.tsx
│ ├── StrategyAdvicePage.tsx
│ ├── TermsOfService.tsx
│ ├── Trading.tsx
│ └── pro
│ │ ├── SoybeanImport.tsx
│ │ └── StockFutures.tsx
└── types
│ ├── market.ts
│ └── stockFutures.ts
├── tailwind.config.js
└── tsconfig.json
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | share/python-wheels/
24 | *.egg-info/
25 | .installed.cfg
26 | *.egg
27 | MANIFEST
28 |
29 | # PyInstaller
30 | # Usually these files are written by a python script from a template
31 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
32 | *.manifest
33 | *.spec
34 |
35 | # Installer logs
36 | pip-log.txt
37 | pip-delete-this-directory.txt
38 |
39 | # Unit test / coverage reports
40 | htmlcov/
41 | .tox/
42 | .nox/
43 | .coverage
44 | .coverage.*
45 | .cache
46 | nosetests.xml
47 | coverage.xml
48 | *.cover
49 | *.py,cover
50 | .hypothesis/
51 | .pytest_cache/
52 | cover/
53 |
54 | # Translations
55 | *.mo
56 | *.pot
57 |
58 | # Django stuff:
59 | *.log
60 | local_settings.py
61 | db.sqlite3
62 | db.sqlite3-journal
63 |
64 | # Flask stuff:
65 | instance/
66 | .webassets-cache
67 |
68 | # Scrapy stuff:
69 | .scrapy
70 |
71 | # Sphinx documentation
72 | docs/_build/
73 |
74 | # PyBuilder
75 | .pybuilder/
76 | target/
77 |
78 | # Jupyter Notebook
79 | .ipynb_checkpoints
80 |
81 | # IPython
82 | profile_default/
83 | ipython_config.py
84 |
85 | # pyenv
86 | # For a library or package, you might want to ignore these files since the code is
87 | # intended to run in multiple environments; otherwise, check them in:
88 | # .python-version
89 |
90 | # pipenv
91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
94 | # install all needed dependencies.
95 | #Pipfile.lock
96 |
97 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow
98 | __pypackages__/
99 |
100 | # Celery stuff
101 | celerybeat-schedule
102 | celerybeat.pid
103 |
104 | # SageMath parsed files
105 | *.sage.py
106 |
107 | # Environments
108 | .env
109 | .venv
110 | env/
111 | venv/
112 | ENV/
113 | env.bak/
114 | venv.bak/
115 |
116 | # Spyder project settings
117 | .spyderproject
118 | .spyproject
119 |
120 | # Rope project settings
121 | .ropeproject
122 |
123 | # mkdocs documentation
124 | /site
125 |
126 | # mypy
127 | .mypy_cache/
128 | .dmypy.json
129 | dmypy.json
130 |
131 | # Pyre type checker
132 | .pyre/
133 |
134 | # pytype static type analyzer
135 | .pytype/
136 |
137 | # Cython debug symbols
138 | cython_debug/
139 |
140 | node_modules/
141 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # 豆粕品种量化交易策略平台
2 |
3 | ## 项目简介
4 |
5 | 这是一个专注于豆粕品种交易的量化交易策略平台。平台整合了豆粕ETF、豆粕期货和期权的交易分析,为投资者提供全方位的交易决策支持。
6 | 访问网站立即体验:https://www.singzquant.com/
7 |
8 | ## 核心功能
9 |
10 | - **核心观点**: 提供豆粕市场的核心交易观点和策略建议
11 | - **每日分析**: 实时更新市场动态,技术面和基本面分析
12 | - **AI量化模型**: 基于深度强化学习和多智能体的交易策略辅助系统
13 | - 功能截图
14 | 
15 | 
16 | 
17 | 
18 | 
19 |
20 | ## 技术架构
21 |
22 | ### 前端技术栈
23 | - React + TypeScript
24 | - TailwindCSS
25 | - Ant Design Pro
26 | - ECharts 图表库
27 | - WebSocket实时数据
28 |
29 | ### 后端技术栈
30 | - Python FastAPI
31 | - SQLite数据库
32 | - Tushare数据接口
33 | - PyTorch深度学习框架
34 | - 日志监控系统
35 |
36 | ## 快速开始
37 |
38 | ### 环境要求
39 | - Node.js >= 16
40 | - Python >= 3.8
41 | - pip
42 | - npm 或 yarn
43 |
44 | ### 前端部署
45 | ```bash
46 | cd frontend
47 | npm install
48 | npm start
49 | ```
50 |
51 | ### 后端部署
52 | ```bash
53 | cd backend
54 | pip install -r requirements.txt
55 | uvicorn main:app
56 | ```
57 |
58 | ### 环境变量配置
59 | 1. 在backend目录下复制`.env.sample`为`.env`
60 | 2. 配置必要的环境变量:
61 | - TUSHARE_TOKEN
62 | - DATABASE_URL
63 | - API_KEY等
64 |
65 | ## 项目结构
66 | ```
67 | singz_quant/
68 | ├── frontend/ # React前端项目
69 | │ ├── src/ # 源代码
70 | │ ├── public/ # 静态资源
71 | │ └── build/ # 构建输出
72 | ├── backend/ # FastAPI后端服务
73 | │ ├── models/ # 数据模型
74 | │ ├── routers/ # API路由
75 | │ ├── services/ # 业务逻辑
76 | │ └── utils/ # 工具函数
77 | └── docs/ # 项目文档
78 | ```
79 |
80 | ## 其他
81 | 如果你喜欢我的项目,可以给我买杯咖啡:
82 |
83 |
84 | ## 风险提示
85 |
86 | 本系统仅供学习和研究使用,不构成任何投资建议。使用本系统进行实盘交易需要自行承担风险。
87 |
88 | ## 许可证
89 |
90 | MIT License
91 |
--------------------------------------------------------------------------------
/backend/.env.sample:
--------------------------------------------------------------------------------
1 | # Tushare API配置
2 | TUSHARE_TOKEN=your_tushare_token_here
3 |
4 | # Deepseek API配置
5 | DEEPSEEK_API_KEY=your_deepseek_api_key_here
6 | OPENAI_API_KEY=your_openai_api_key_here
7 | OPENAI_BASE_URL=your_openai_base_url_here
8 |
9 | # 应用配置
10 | PROJECT_NAME=新致量化策略
11 | API_V1_STR=/api/v1
12 |
13 | # 数据库配置(如果需要)
14 | # DATABASE_URL=postgresql://user:password@localhost:5432/dbname
15 |
16 | # 日志配置
17 | LOG_LEVEL=INFO
18 | LOG_FILE_PATH=logs/app.log
19 |
20 | # 安全配置
21 | SECRET_KEY=your_secret_key_here
22 | ALGORITHM=HS256
23 | ACCESS_TOKEN_EXPIRE_MINUTES=30
--------------------------------------------------------------------------------
/backend/AI/agents/risk_manager.py:
--------------------------------------------------------------------------------
1 | from langchain_core.messages import HumanMessage
2 | from AI.graph.state import AgentState, show_agent_reasoning
3 | from AI.utils.progress import progress
4 | from AI.tools.api import get_prices, prices_to_df
5 | from loguru import logger
6 | import json
7 | from langchain_core.prompts import ChatPromptTemplate
8 | from pydantic import BaseModel
9 |
10 |
11 | ##### 风险管理代理 #####
12 | def risk_management_agent(state: AgentState):
13 | """
14 | 基于多个股票的实时风险因素控制仓位大小
15 |
16 | 主要功能:
17 | 1. 获取股票价格数据
18 | 2. 计算投资组合价值
19 | 3. 确定单个股票的最大仓位限制
20 | 4. 考虑当前持仓和可用现金
21 | 5. 生成风险分析报告
22 | """
23 | logger.info("开始风险管理代理")
24 | portfolio = state["data"]["portfolio"]
25 | data = state["data"]
26 | tickers = data["tickers"]
27 | logger.info(f"处理股票: {tickers}")
28 |
29 | # 初始化每个股票的风险分析
30 | risk_analysis = {}
31 | current_prices = {} # 存储价格以避免重复API调用
32 |
33 | for ticker in tickers:
34 | progress.update_status("risk_management_agent", ticker, "分析价格数据")
35 | logger.info(f"开始分析 {ticker} 的风险因素")
36 |
37 | prices = get_prices(
38 | ticker=ticker,
39 | start_date=data["start_date"],
40 | end_date=data["end_date"],
41 | )
42 | logger.debug(f"获取到价格数据: {prices}")
43 |
44 | if not prices:
45 | progress.update_status("risk_management_agent", ticker, "失败:未找到价格数据")
46 | logger.error(f"无法获取 {ticker} 的价格数据")
47 | continue
48 |
49 | prices_df = prices_to_df(prices)
50 | logger.debug(f"价格数据转换为DataFrame: {prices_df}")
51 |
52 | progress.update_status("risk_management_agent", ticker, "计算仓位限制")
53 | logger.info(f"计算 {ticker} 的仓位限制")
54 |
55 | # 计算投资组合价值
56 | current_price = prices_df["close"].iloc[-1]
57 | current_prices[ticker] = current_price # 存储当前价格
58 | logger.debug(f"当前价格: {current_price}")
59 |
60 | # 计算该股票的当前仓位价值
61 | current_position_value = portfolio.get("cost_basis", {}).get(ticker, 0)
62 | logger.debug(f"当前仓位价值: {current_position_value}")
63 |
64 | # 使用存储的价格计算总投资组合价值
65 | total_portfolio_value = portfolio.get("cash", 0) + sum(portfolio.get("cost_basis", {}).get(t, 0) for t in portfolio.get("cost_basis", {}))
66 | logger.debug(f"总投资组合价值: {total_portfolio_value}")
67 |
68 | # 基础限制是任何单个仓位的投资组合的20%
69 | position_limit = total_portfolio_value * 0.20
70 | logger.debug(f"单个仓位限制: {position_limit}")
71 |
72 | # 对于现有仓位,从限制中减去当前仓位价值
73 | remaining_position_limit = position_limit - current_position_value
74 | logger.debug(f"剩余仓位限制: {remaining_position_limit}")
75 |
76 | # 确保不超过可用现金
77 | available_cash = portfolio.get("cash", 0)
78 | max_position_size = min(remaining_position_limit, available_cash)
79 | logger.debug(f"可用现金: {available_cash}, 最大仓位大小: {max_position_size}")
80 |
81 | risk_analysis[ticker] = {
82 | "remaining_position_limit": float(max_position_size),
83 | "current_price": float(current_price),
84 | "reasoning": {
85 | "portfolio_value": float(total_portfolio_value),
86 | "current_position": float(current_position_value),
87 | "position_limit": float(position_limit),
88 | "remaining_limit": float(remaining_position_limit),
89 | "available_cash": float(available_cash),
90 | },
91 | }
92 | logger.info(f"风险分析结果: {risk_analysis[ticker]}")
93 |
94 | progress.update_status("risk_management_agent", ticker, "完成")
95 | logger.info(f"完成 {ticker} 的风险分析")
96 |
97 | message = HumanMessage(
98 | content=json.dumps(risk_analysis),
99 | name="risk_management_agent",
100 | )
101 |
102 | if state["metadata"]["show_reasoning"]:
103 | show_agent_reasoning(risk_analysis, "风险管理代理")
104 |
105 | # 将信号添加到analyst_signals列表
106 | state["data"]["analyst_signals"]["risk_management_agent"] = risk_analysis
107 | logger.info("风险管理分析完成,返回结果")
108 |
109 | return {
110 | "messages": state["messages"] + [message],
111 | "data": data,
112 | }
113 |
--------------------------------------------------------------------------------
/backend/AI/agents/sentiment.py:
--------------------------------------------------------------------------------
1 | from langchain_core.messages import HumanMessage
2 | from AI.graph.state import AgentState, show_agent_reasoning
3 | from AI.utils.progress import progress
4 | import pandas as pd
5 | import numpy as np
6 | import json
7 |
8 | from AI.tools.api import get_insider_trades, get_company_news
9 |
10 |
11 | ##### Sentiment Agent #####
12 | def sentiment_agent(state: AgentState):
13 | """Analyzes market sentiment and generates trading signals for multiple tickers."""
14 | data = state.get("data", {})
15 | end_date = data.get("end_date")
16 | tickers = data.get("tickers")
17 |
18 | # Initialize sentiment analysis for each ticker
19 | sentiment_analysis = {}
20 |
21 | for ticker in tickers:
22 | progress.update_status("sentiment_agent", ticker, "Fetching insider trades")
23 |
24 | # Get the insider trades
25 | insider_trades = get_insider_trades(
26 | ticker=ticker,
27 | end_date=end_date,
28 | limit=1000,
29 | )
30 |
31 | progress.update_status("sentiment_agent", ticker, "Analyzing trading patterns")
32 |
33 | # Get the signals from the insider trades
34 | transaction_shares = pd.Series([t.transaction_shares for t in insider_trades]).dropna()
35 | insider_signals = np.where(transaction_shares < 0, "bearish", "bullish").tolist()
36 |
37 | progress.update_status("sentiment_agent", ticker, "Fetching company news")
38 |
39 | # Get the company news
40 | company_news = get_company_news(ticker, end_date, limit=100)
41 |
42 | # Get the sentiment from the company news
43 | sentiment = pd.Series([n.sentiment for n in company_news]).dropna()
44 | news_signals = np.where(sentiment == "negative", "bearish",
45 | np.where(sentiment == "positive", "bullish", "neutral")).tolist()
46 |
47 | progress.update_status("sentiment_agent", ticker, "Combining signals")
48 | # Combine signals from both sources with weights
49 | insider_weight = 0.3
50 | news_weight = 0.7
51 |
52 | # Calculate weighted signal counts
53 | bullish_signals = (
54 | insider_signals.count("bullish") * insider_weight +
55 | news_signals.count("bullish") * news_weight
56 | )
57 | bearish_signals = (
58 | insider_signals.count("bearish") * insider_weight +
59 | news_signals.count("bearish") * news_weight
60 | )
61 |
62 | if bullish_signals > bearish_signals:
63 | overall_signal = "bullish"
64 | elif bearish_signals > bullish_signals:
65 | overall_signal = "bearish"
66 | else:
67 | overall_signal = "neutral"
68 |
69 | # Calculate confidence level based on the weighted proportion
70 | total_weighted_signals = len(insider_signals) * insider_weight + len(news_signals) * news_weight
71 | confidence = 0 # Default confidence when there are no signals
72 | if total_weighted_signals > 0:
73 | confidence = round(max(bullish_signals, bearish_signals) / total_weighted_signals, 2) * 100
74 | reasoning = f"Weighted Bullish signals: {bullish_signals:.1f}, Weighted Bearish signals: {bearish_signals:.1f}"
75 |
76 | sentiment_analysis[ticker] = {
77 | "signal": overall_signal,
78 | "confidence": confidence,
79 | "reasoning": reasoning,
80 | }
81 |
82 | progress.update_status("sentiment_agent", ticker, "Done")
83 |
84 | # Create the sentiment message
85 | message = HumanMessage(
86 | content=json.dumps(sentiment_analysis),
87 | name="sentiment_agent",
88 | )
89 |
90 | # Print the reasoning if the flag is set
91 | if state["metadata"]["show_reasoning"]:
92 | show_agent_reasoning(sentiment_analysis, "Sentiment Analysis Agent")
93 |
94 | # Add the signal to the analyst_signals list
95 | state["data"]["analyst_signals"]["sentiment_agent"] = sentiment_analysis
96 |
97 | return {
98 | "messages": [message],
99 | "data": data,
100 | }
101 |
--------------------------------------------------------------------------------
/backend/AI/data/cache.py:
--------------------------------------------------------------------------------
1 | class Cache:
2 | """In-memory cache for API responses."""
3 |
4 | def __init__(self):
5 | self._prices_cache: dict[str, list[dict[str, any]]] = {}
6 | self._financial_metrics_cache: dict[str, list[dict[str, any]]] = {}
7 | self._line_items_cache: dict[str, list[dict[str, any]]] = {}
8 | self._insider_trades_cache: dict[str, list[dict[str, any]]] = {}
9 | self._company_news_cache: dict[str, list[dict[str, any]]] = {}
10 |
11 | def _merge_data(self, existing: list[dict] | None, new_data: list[dict], key_field: str) -> list[dict]:
12 | """Merge existing and new data, avoiding duplicates based on a key field."""
13 | if not existing:
14 | return new_data
15 |
16 | # Create a set of existing keys for O(1) lookup
17 | existing_keys = {item[key_field] for item in existing}
18 |
19 | # Only add items that don't exist yet
20 | merged = existing.copy()
21 | merged.extend([item for item in new_data if item[key_field] not in existing_keys])
22 | return merged
23 |
24 | def get_prices(self, ticker: str) -> list[dict[str, any]] | None:
25 | """Get cached price data if available."""
26 | return self._prices_cache.get(ticker)
27 |
28 | def set_prices(self, ticker: str, data: list[dict[str, any]]):
29 | """Append new price data to cache."""
30 | self._prices_cache[ticker] = self._merge_data(self._prices_cache.get(ticker), data, key_field="time")
31 |
32 | def get_financial_metrics(self, ticker: str) -> list[dict[str, any]]:
33 | """Get cached financial metrics if available."""
34 | return self._financial_metrics_cache.get(ticker)
35 |
36 | def set_financial_metrics(self, ticker: str, data: list[dict[str, any]]):
37 | """Append new financial metrics to cache."""
38 | self._financial_metrics_cache[ticker] = self._merge_data(self._financial_metrics_cache.get(ticker), data, key_field="report_period")
39 |
40 | def get_line_items(self, ticker: str) -> list[dict[str, any]] | None:
41 | """Get cached line items if available."""
42 | return self._line_items_cache.get(ticker)
43 |
44 | def set_line_items(self, ticker: str, data: list[dict[str, any]]):
45 | """Append new line items to cache."""
46 | self._line_items_cache[ticker] = self._merge_data(self._line_items_cache.get(ticker), data, key_field="report_period")
47 |
48 | def get_insider_trades(self, ticker: str) -> list[dict[str, any]] | None:
49 | """Get cached insider trades if available."""
50 | return self._insider_trades_cache.get(ticker)
51 |
52 | def set_insider_trades(self, ticker: str, data: list[dict[str, any]]):
53 | """Append new insider trades to cache."""
54 | self._insider_trades_cache[ticker] = self._merge_data(self._insider_trades_cache.get(ticker), data, key_field="filing_date") # Could also use transaction_date if preferred
55 |
56 | def get_company_news(self, ticker: str) -> list[dict[str, any]] | None:
57 | """Get cached company news if available."""
58 | return self._company_news_cache.get(ticker)
59 |
60 | def set_company_news(self, ticker: str, data: list[dict[str, any]]):
61 | """Append new company news to cache."""
62 | self._company_news_cache[ticker] = self._merge_data(self._company_news_cache.get(ticker), data, key_field="date")
63 |
64 |
65 | # Global cache instance
66 | _cache = Cache()
67 |
68 |
69 | def get_cache() -> Cache:
70 | """Get the global cache instance."""
71 | return _cache
72 |
--------------------------------------------------------------------------------
/backend/AI/data/models.py:
--------------------------------------------------------------------------------
1 | from pydantic import BaseModel
2 |
3 |
4 | class Price(BaseModel):
5 | open: float
6 | close: float
7 | high: float
8 | low: float
9 | volume: int
10 | time: str
11 |
12 |
13 | class PriceResponse(BaseModel):
14 | ticker: str
15 | prices: list[Price]
16 |
17 |
18 | class FinancialMetrics(BaseModel):
19 | ticker: str
20 | report_period: str
21 | period: str
22 | currency: str
23 | market_cap: float | None
24 | enterprise_value: float | None
25 | price_to_earnings_ratio: float | None
26 | price_to_book_ratio: float | None
27 | price_to_sales_ratio: float | None
28 | enterprise_value_to_ebitda_ratio: float | None
29 | enterprise_value_to_revenue_ratio: float | None
30 | free_cash_flow_yield: float | None
31 | peg_ratio: float | None
32 | gross_margin: float | None
33 | operating_margin: float | None
34 | net_margin: float | None
35 | return_on_equity: float | None
36 | return_on_assets: float | None
37 | return_on_invested_capital: float | None
38 | asset_turnover: float | None
39 | inventory_turnover: float | None
40 | receivables_turnover: float | None
41 | days_sales_outstanding: float | None
42 | operating_cycle: float | None
43 | working_capital_turnover: float | None
44 | current_ratio: float | None
45 | quick_ratio: float | None
46 | cash_ratio: float | None
47 | operating_cash_flow_ratio: float | None
48 | debt_to_equity: float | None
49 | debt_to_assets: float | None
50 | interest_coverage: float | None
51 | revenue_growth: float | None
52 | earnings_growth: float | None
53 | book_value_growth: float | None
54 | earnings_per_share_growth: float | None
55 | free_cash_flow_growth: float | None
56 | operating_income_growth: float | None
57 | ebitda_growth: float | None
58 | payout_ratio: float | None
59 | earnings_per_share: float | None
60 | book_value_per_share: float | None
61 | free_cash_flow_per_share: float | None
62 |
63 |
64 | class FinancialMetricsResponse(BaseModel):
65 | financial_metrics: list[FinancialMetrics]
66 |
67 |
68 | class LineItem(BaseModel):
69 | ticker: str
70 | report_period: str
71 | period: str
72 | currency: str
73 | revenue: float | None = None
74 | earnings_per_share: float | None = None
75 | net_income: float | None = None
76 | free_cash_flow: float | None = None
77 | operating_margin: float | None = None
78 | depreciation_and_amortization: float | None = None
79 | total_assets: float | None = None
80 | total_liabilities: float | None = None
81 | current_assets: float | None = None
82 | current_liabilities: float | None = None
83 | book_value_per_share: float | None = None
84 | dividends_and_other_cash_distributions: float | None = None
85 | outstanding_shares: float | None = None
86 | # Allow additional fields dynamically
87 | model_config = {"extra": "allow"}
88 |
89 |
90 | class LineItemResponse(BaseModel):
91 | search_results: list[LineItem]
92 |
93 |
94 | class InsiderTrade(BaseModel):
95 | ticker: str
96 | issuer: str | None
97 | name: str | None
98 | title: str | None
99 | is_board_director: bool | None
100 | transaction_date: str | None
101 | transaction_shares: float | None
102 | transaction_price_per_share: float | None
103 | transaction_value: float | None
104 | shares_owned_before_transaction: float | None
105 | shares_owned_after_transaction: float | None
106 | security_title: str | None
107 | filing_date: str
108 |
109 |
110 | class InsiderTradeResponse(BaseModel):
111 | insider_trades: list[InsiderTrade]
112 |
113 |
114 | class CompanyNews(BaseModel):
115 | ticker: str
116 | title: str
117 | author: str
118 | source: str
119 | date: str
120 | url: str
121 | sentiment: str | None = None
122 |
123 |
124 | class CompanyNewsResponse(BaseModel):
125 | news: list[CompanyNews]
126 |
127 |
128 | class Position(BaseModel):
129 | cash: float = 0.0
130 | shares: int = 0
131 | ticker: str
132 |
133 |
134 | class Portfolio(BaseModel):
135 | positions: dict[str, Position] # ticker -> Position mapping
136 | total_cash: float = 0.0
137 |
138 |
139 | class AnalystSignal(BaseModel):
140 | signal: str | None = None
141 | confidence: float | None = None
142 | reasoning: dict | str | None = None
143 | max_position_size: float | None = None # For risk management signals
144 |
145 |
146 | class TickerAnalysis(BaseModel):
147 | ticker: str
148 | analyst_signals: dict[str, AnalystSignal] # agent_name -> signal mapping
149 |
150 |
151 | class AgentStateData(BaseModel):
152 | tickers: list[str]
153 | portfolio: Portfolio
154 | start_date: str
155 | end_date: str
156 | ticker_analyses: dict[str, TickerAnalysis] # ticker -> analysis mapping
157 |
158 |
159 | class AgentStateMetadata(BaseModel):
160 | show_reasoning: bool = False
161 | model_config = {"extra": "allow"}
162 |
--------------------------------------------------------------------------------
/backend/AI/graph/state.py:
--------------------------------------------------------------------------------
1 | from typing_extensions import Annotated, Sequence, TypedDict
2 |
3 | import operator
4 | from langchain_core.messages import BaseMessage
5 |
6 |
7 | import json
8 |
9 |
10 | def merge_dicts(a: dict[str, any], b: dict[str, any]) -> dict[str, any]:
11 | return {**a, **b}
12 |
13 |
14 | # Define agent state
15 | class AgentState(TypedDict):
16 | messages: Annotated[Sequence[BaseMessage], operator.add]
17 | data: Annotated[dict[str, any], merge_dicts]
18 | metadata: Annotated[dict[str, any], merge_dicts]
19 |
20 |
21 | def show_agent_reasoning(output, agent_name):
22 | print(f"\n{'=' * 10} {agent_name.center(28)} {'=' * 10}")
23 |
24 | def convert_to_serializable(obj):
25 | if hasattr(obj, "to_dict"): # Handle Pandas Series/DataFrame
26 | return obj.to_dict()
27 | elif hasattr(obj, "__dict__"): # Handle custom objects
28 | return obj.__dict__
29 | elif isinstance(obj, (int, float, bool, str)):
30 | return obj
31 | elif isinstance(obj, (list, tuple)):
32 | return [convert_to_serializable(item) for item in obj]
33 | elif isinstance(obj, dict):
34 | return {key: convert_to_serializable(value) for key, value in obj.items()}
35 | else:
36 | return str(obj) # Fallback to string representation
37 |
38 | if isinstance(output, (dict, list)):
39 | # Convert the output to JSON-serializable format
40 | serializable_output = convert_to_serializable(output)
41 | print(json.dumps(serializable_output, indent=2))
42 | else:
43 | try:
44 | # Parse the string as JSON and pretty print it
45 | parsed_output = json.loads(output)
46 | print(json.dumps(parsed_output, indent=2))
47 | except json.JSONDecodeError:
48 | # Fallback to original string if not valid JSON
49 | print(output)
50 |
51 | print("=" * 48)
52 |
--------------------------------------------------------------------------------
/backend/AI/llm/models.py:
--------------------------------------------------------------------------------
1 | import os
2 | from langchain_deepseek import ChatDeepSeek
3 | from langchain_openai import ChatOpenAI
4 | from langchain_ollama import ChatOllama
5 | from enum import Enum
6 | from pydantic import BaseModel
7 | from typing import Tuple, List, Dict, Any, Optional
8 | from config import settings
9 |
10 | class ModelProvider(str, Enum):
11 | """Enum for supported LLM providers"""
12 | DEEPSEEK = "DeepSeek"
13 | OPENAI = "OpenAI"
14 | OLLAMA = "Ollama"
15 |
16 |
17 |
18 | class LLMModel(BaseModel):
19 | """Represents an LLM model configuration"""
20 | display_name: str
21 | model_name: str
22 | provider: ModelProvider
23 |
24 | def to_choice_tuple(self) -> Tuple[str, str, str]:
25 | """Convert to format needed for questionary choices"""
26 | return (self.display_name, self.model_name, self.provider.value)
27 |
28 | def has_json_mode(self) -> bool:
29 | """Check if the model supports JSON mode"""
30 | if self.is_deepseek() or self.is_gemini():
31 | return False
32 | # Only certain Ollama models support JSON mode
33 | if self.is_ollama():
34 | return "llama3" in self.model_name or "neural-chat" in self.model_name
35 | return True
36 |
37 | def is_deepseek(self) -> bool:
38 | """Check if the model is a DeepSeek model"""
39 | return self.model_name.startswith("deepseek")
40 |
41 | def is_ollama(self) -> bool:
42 | """Check if the model is an Ollama model"""
43 | return self.provider == ModelProvider.OLLAMA
44 |
45 |
46 | # Define available models
47 | AVAILABLE_MODELS = [
48 | LLMModel(
49 | display_name="[deepseek] deepseek-r1",
50 | model_name="deepseek-reasoner",
51 | provider=ModelProvider.DEEPSEEK
52 | ),
53 | LLMModel(
54 | display_name="[deepseek] deepseek-v3",
55 | model_name="deepseek-chat",
56 | provider=ModelProvider.DEEPSEEK
57 | ),
58 | LLMModel(
59 | display_name="[openai] gpt-4.5",
60 | model_name="gpt-4.5-preview",
61 | provider=ModelProvider.OPENAI
62 | ),
63 | LLMModel(
64 | display_name="[openai] gpt-4o",
65 | model_name="gpt-4o",
66 | provider=ModelProvider.OPENAI
67 | ),
68 | LLMModel(
69 | display_name="[openai] o1",
70 | model_name="o1",
71 | provider=ModelProvider.OPENAI
72 | ),
73 | LLMModel(
74 | display_name="[openai] o3-mini",
75 | model_name="o3-mini",
76 | provider=ModelProvider.OPENAI
77 | ),
78 | ]
79 |
80 | # Define Ollama models separately
81 | OLLAMA_MODELS = [
82 | LLMModel(
83 | display_name="[ollama] gemma3 (4B)",
84 | model_name="gemma3:4b",
85 | provider=ModelProvider.OLLAMA
86 | ),
87 | LLMModel(
88 | display_name="[ollama] qwen2.5 (7B)",
89 | model_name="qwen2.5",
90 | provider=ModelProvider.OLLAMA
91 | ),
92 | LLMModel(
93 | display_name="[ollama] llama3.1 (8B)",
94 | model_name="llama3.1:latest",
95 | provider=ModelProvider.OLLAMA
96 | ),
97 | LLMModel(
98 | display_name="[ollama] gemma3 (12B)",
99 | model_name="gemma3:12b",
100 | provider=ModelProvider.OLLAMA
101 | ),
102 | LLMModel(
103 | display_name="[ollama] mistral-small3.1 (24B)",
104 | model_name="mistral-small3.1",
105 | provider=ModelProvider.OLLAMA
106 | ),
107 | LLMModel(
108 | display_name="[ollama] gemma3 (27B)",
109 | model_name="gemma3:27b",
110 | provider=ModelProvider.OLLAMA
111 | ),
112 | LLMModel(
113 | display_name="[ollama] qwen2.5 (32B)",
114 | model_name="qwen2.5:32b",
115 | provider=ModelProvider.OLLAMA
116 | ),
117 | LLMModel(
118 | display_name="[ollama] llama-3.3 (70B)",
119 | model_name="llama3.3:70b-instruct-q4_0",
120 | provider=ModelProvider.OLLAMA
121 | ),
122 | ]
123 |
124 | # Create LLM_ORDER in the format expected by the UI
125 | LLM_ORDER = [model.to_choice_tuple() for model in AVAILABLE_MODELS]
126 |
127 | # Create Ollama LLM_ORDER separately
128 | OLLAMA_LLM_ORDER = [model.to_choice_tuple() for model in OLLAMA_MODELS]
129 |
130 | def get_model_info(model_name: str) -> LLMModel | None:
131 | """Get model information by model_name"""
132 | all_models = AVAILABLE_MODELS + OLLAMA_MODELS
133 | return next((model for model in all_models if model.model_name == model_name), None)
134 |
135 | def get_model(model_name: str, model_provider: ModelProvider) -> ChatOpenAI | ChatOllama | None:
136 | if model_provider == ModelProvider.OPENAI:
137 | # Get and validate API key
138 | api_key = settings.OPENAI_API_KEY
139 | if not api_key:
140 | # Print error to console
141 | print(f"API Key Error: Please make sure OPENAI_API_KEY is set in your .env file.")
142 | raise ValueError("OpenAI API key not found. Please make sure OPENAI_API_KEY is set in your .env file.")
143 | base_url = settings.OPENAI_BASE_URL
144 | return ChatOpenAI(model=model_name, api_key=api_key, base_url=base_url)
145 | elif model_provider == ModelProvider.DEEPSEEK:
146 | api_key = settings.DEEPSEEK_API_KEY
147 | if not api_key:
148 | print(f"API Key Error: Please make sure DEEPSEEK_API_KEY is set in your .env file.")
149 | raise ValueError("DeepSeek API key not found. Please make sure DEEPSEEK_API_KEY is set in your .env file.")
150 | return ChatDeepSeek(model=model_name, api_key=api_key)
151 | elif model_provider == ModelProvider.OLLAMA:
152 | # For Ollama, we use a base URL instead of an API key
153 | base_url = os.getenv("OLLAMA_BASE_URL", "http://localhost:11434")
154 | return ChatOllama(
155 | model=model_name,
156 | base_url=base_url,
157 | )
--------------------------------------------------------------------------------
/backend/AI/utils/__init__.py:
--------------------------------------------------------------------------------
1 | # This file can be empty
2 |
--------------------------------------------------------------------------------
/backend/AI/utils/analysts.py:
--------------------------------------------------------------------------------
1 | """Constants and utilities related to analysts configuration."""
2 |
3 | from AI.agents.ben_graham import ben_graham_agent
4 | from AI.agents.bill_ackman import bill_ackman_agent
5 | from AI.agents.cathie_wood import cathie_wood_agent
6 | from AI.agents.charlie_munger import charlie_munger_agent
7 | from AI.agents.fundamentals import fundamentals_agent
8 | from AI.agents.michael_burry import michael_burry_agent
9 | from AI.agents.phil_fisher import phil_fisher_agent
10 | from AI.agents.peter_lynch import peter_lynch_agent
11 | from AI.agents.sentiment import sentiment_agent
12 | from AI.agents.stanley_druckenmiller import stanley_druckenmiller_agent
13 | from AI.agents.technicals import technical_analyst_agent
14 | from AI.agents.valuation import valuation_agent
15 | from AI.agents.warren_buffett import warren_buffett_agent
16 |
17 | # Define analyst configuration - single source of truth
18 | ANALYST_CONFIG = {
19 | "ben_graham": {
20 | "display_name": "Ben Graham",
21 | "agent_func": ben_graham_agent,
22 | "order": 0,
23 | },
24 | "bill_ackman": {
25 | "display_name": "Bill Ackman",
26 | "agent_func": bill_ackman_agent,
27 | "order": 1,
28 | },
29 | "cathie_wood": {
30 | "display_name": "Cathie Wood",
31 | "agent_func": cathie_wood_agent,
32 | "order": 2,
33 | },
34 | "charlie_munger": {
35 | "display_name": "Charlie Munger",
36 | "agent_func": charlie_munger_agent,
37 | "order": 3,
38 | },
39 | "michael_burry": {
40 | "display_name": "Michael Burry",
41 | "agent_func": michael_burry_agent,
42 | "order": 4,
43 | },
44 | "peter_lynch": {
45 | "display_name": "Peter Lynch",
46 | "agent_func": peter_lynch_agent,
47 | "order": 5,
48 | },
49 | "phil_fisher": {
50 | "display_name": "Phil Fisher",
51 | "agent_func": phil_fisher_agent,
52 | "order": 6,
53 | },
54 | "stanley_druckenmiller": {
55 | "display_name": "Stanley Druckenmiller",
56 | "agent_func": stanley_druckenmiller_agent,
57 | "order": 7,
58 | },
59 | "warren_buffett": {
60 | "display_name": "Warren Buffett",
61 | "agent_func": warren_buffett_agent,
62 | "order": 8,
63 | },
64 | "technical_analyst": {
65 | "display_name": "Technical Analyst",
66 | "agent_func": technical_analyst_agent,
67 | "order": 9,
68 | },
69 | "fundamentals_analyst": {
70 | "display_name": "Fundamentals Analyst",
71 | "agent_func": fundamentals_agent,
72 | "order": 10,
73 | },
74 | "sentiment_analyst": {
75 | "display_name": "Sentiment Analyst",
76 | "agent_func": sentiment_agent,
77 | "order": 11,
78 | },
79 | "valuation_analyst": {
80 | "display_name": "Valuation Analyst",
81 | "agent_func": valuation_agent,
82 | "order": 12,
83 | },
84 | }
85 |
86 | # Derive ANALYST_ORDER from ANALYST_CONFIG for backwards compatibility
87 | ANALYST_ORDER = [(config["display_name"], key) for key, config in sorted(ANALYST_CONFIG.items(), key=lambda x: x[1]["order"])]
88 |
89 |
90 | def get_analyst_nodes():
91 | """Get the mapping of analyst keys to their (node_name, agent_func) tuples."""
92 | return {key: (f"{key}_agent", config["agent_func"]) for key, config in ANALYST_CONFIG.items()}
93 |
--------------------------------------------------------------------------------
/backend/AI/utils/llm.py:
--------------------------------------------------------------------------------
1 | """Helper functions for LLM"""
2 |
3 | import json
4 | from typing import TypeVar, Type, Optional, Any
5 | from pydantic import BaseModel
6 | from AI.utils.progress import progress
7 |
8 | T = TypeVar('T', bound=BaseModel)
9 |
10 | def call_llm(
11 | prompt: Any,
12 | model_name: str,
13 | model_provider: str,
14 | pydantic_model: Type[T],
15 | agent_name: Optional[str] = None,
16 | max_retries: int = 3,
17 | default_factory = None
18 | ) -> T:
19 | """
20 | Makes an LLM call with retry logic, handling both JSON supported and non-JSON supported models.
21 |
22 | Args:
23 | prompt: The prompt to send to the LLM
24 | model_name: Name of the model to use
25 | model_provider: Provider of the model
26 | pydantic_model: The Pydantic model class to structure the output
27 | agent_name: Optional name of the agent for progress updates
28 | max_retries: Maximum number of retries (default: 3)
29 | default_factory: Optional factory function to create default response on failure
30 |
31 | Returns:
32 | An instance of the specified Pydantic model
33 | """
34 | from AI.llm.models import get_model, get_model_info
35 |
36 | model_info = get_model_info(model_name)
37 | llm = get_model(model_name, model_provider)
38 |
39 | # For non-JSON support models, we can use structured output
40 | if not (model_info and not model_info.has_json_mode()):
41 | llm = llm.with_structured_output(
42 | pydantic_model,
43 | method="json_mode",
44 | )
45 |
46 | # Call the LLM with retries
47 | for attempt in range(max_retries):
48 | try:
49 | # Call the LLM
50 | result = llm.invoke(prompt)
51 |
52 | # For non-JSON support models, we need to extract and parse the JSON manually
53 | if model_info and not model_info.has_json_mode():
54 | parsed_result = extract_json_from_response(result.content)
55 | if parsed_result:
56 | return pydantic_model(**parsed_result)
57 | else:
58 | return result
59 |
60 | except Exception as e:
61 | if agent_name:
62 | progress.update_status(agent_name, None, f"Error - retry {attempt + 1}/{max_retries}")
63 |
64 | if attempt == max_retries - 1:
65 | print(f"Error in LLM call after {max_retries} attempts: {e}")
66 | # Use default_factory if provided, otherwise create a basic default
67 | if default_factory:
68 | return default_factory()
69 | return create_default_response(pydantic_model)
70 |
71 | # This should never be reached due to the retry logic above
72 | return create_default_response(pydantic_model)
73 |
74 | def create_default_response(model_class: Type[T]) -> T:
75 | """Creates a safe default response based on the model's fields."""
76 | default_values = {}
77 | for field_name, field in model_class.model_fields.items():
78 | if field.annotation == str:
79 | default_values[field_name] = "Error in analysis, using default"
80 | elif field.annotation == float:
81 | default_values[field_name] = 0.0
82 | elif field.annotation == int:
83 | default_values[field_name] = 0
84 | elif hasattr(field.annotation, "__origin__") and field.annotation.__origin__ == dict:
85 | default_values[field_name] = {}
86 | else:
87 | # For other types (like Literal), try to use the first allowed value
88 | if hasattr(field.annotation, "__args__"):
89 | default_values[field_name] = field.annotation.__args__[0]
90 | else:
91 | default_values[field_name] = None
92 |
93 | return model_class(**default_values)
94 |
95 | def extract_json_from_response(content: str) -> Optional[dict]:
96 | """Extracts JSON from markdown-formatted response."""
97 | try:
98 | json_start = content.find("```json")
99 | if json_start != -1:
100 | json_text = content[json_start + 7:] # Skip past ```json
101 | json_end = json_text.find("```")
102 | if json_end != -1:
103 | json_text = json_text[:json_end].strip()
104 | return json.loads(json_text)
105 | except Exception as e:
106 | print(f"Error extracting JSON from response: {e}")
107 | return None
108 |
--------------------------------------------------------------------------------
/backend/AI/utils/progress.py:
--------------------------------------------------------------------------------
1 | from rich.console import Console
2 | from rich.live import Live
3 | from rich.table import Table
4 | from rich.style import Style
5 | from rich.text import Text
6 | from typing import Dict, Optional
7 | from datetime import datetime
8 | from loguru import logger
9 |
10 | console = Console()
11 |
12 |
13 | class AgentProgress:
14 | """管理多个代理的进度跟踪"""
15 |
16 | def __init__(self):
17 | self.agent_status: Dict[str, Dict[str, str]] = {}
18 | self.table = Table(show_header=False, box=None, padding=(0, 1))
19 | self.live = Live(self.table, console=console, refresh_per_second=4)
20 | self.started = False
21 |
22 | def start(self):
23 | """开始进度显示"""
24 | if not self.started:
25 | self.live.start()
26 | self.started = True
27 | logger.info("开始显示进度")
28 |
29 | def stop(self):
30 | """停止进度显示"""
31 | if self.started:
32 | self.live.stop()
33 | self.started = False
34 | logger.info("停止显示进度")
35 |
36 | def update_status(self, agent_name: str, ticker: Optional[str] = None, status: str = ""):
37 | """更新代理状态"""
38 | if agent_name not in self.agent_status:
39 | self.agent_status[agent_name] = {"status": "", "ticker": None}
40 |
41 | if ticker:
42 | self.agent_status[agent_name]["ticker"] = ticker
43 | if status:
44 | self.agent_status[agent_name]["status"] = status
45 | logger.info(f"代理 {agent_name} 状态更新: {status}")
46 |
47 | self._refresh_display()
48 |
49 | def _refresh_display(self):
50 | """刷新进度显示"""
51 | self.table.columns.clear()
52 | self.table.add_column(width=100)
53 |
54 | # 对代理进行排序,风险管理和投资组合管理放在底部
55 | def sort_key(item):
56 | agent_name = item[0]
57 | if "risk_management" in agent_name:
58 | return (2, agent_name)
59 | elif "portfolio_management" in agent_name:
60 | return (3, agent_name)
61 | else:
62 | return (1, agent_name)
63 |
64 | for agent_name, info in sorted(self.agent_status.items(), key=sort_key):
65 | status = info["status"]
66 | ticker = info["ticker"]
67 |
68 | # 创建带有适当样式的状态文本
69 | if status.lower() == "done":
70 | style = Style(color="green", bold=True)
71 | symbol = "✓"
72 | elif status.lower() == "error":
73 | style = Style(color="red", bold=True)
74 | symbol = "✗"
75 | else:
76 | style = Style(color="yellow")
77 | symbol = "⋯"
78 |
79 | agent_display = agent_name.replace("_agent", "").replace("_", " ").title()
80 | status_text = Text()
81 | status_text.append(f"{symbol} ", style=style)
82 | status_text.append(f"{agent_display:<20}", style=Style(bold=True))
83 |
84 | if ticker:
85 | status_text.append(f"[{ticker}] ", style=Style(color="cyan"))
86 | status_text.append(status, style=style)
87 |
88 | self.table.add_row(status_text)
89 |
90 |
91 | # 创建全局实例
92 | progress = AgentProgress()
93 |
--------------------------------------------------------------------------------
/backend/AI/utils/visualize.py:
--------------------------------------------------------------------------------
1 | from langgraph.graph.state import CompiledGraph
2 | from langchain_core.runnables.graph import MermaidDrawMethod
3 |
4 |
5 | def save_graph_as_png(app: CompiledGraph, output_file_path) -> None:
6 | png_image = app.get_graph().draw_mermaid_png(draw_method=MermaidDrawMethod.API)
7 | file_path = output_file_path if len(output_file_path) > 0 else "graph.png"
8 | with open(file_path, "wb") as f:
9 | f.write(png_image)
--------------------------------------------------------------------------------
/backend/__init__.py:
--------------------------------------------------------------------------------
1 | """
2 | 新致量化策略 Backend
3 | """
--------------------------------------------------------------------------------
/backend/config.py:
--------------------------------------------------------------------------------
1 | from pydantic_settings import BaseSettings
2 | from functools import lru_cache
3 |
4 | class Settings(BaseSettings):
5 | # Tushare API配置
6 | TUSHARE_TOKEN: str = "你的tushare token"
7 |
8 | # Deepseek API配置
9 | DEEPSEEK_API_KEY: str
10 |
11 | OPENAI_API_KEY: str
12 | OPENAI_BASE_URL: str
13 |
14 | # 应用配置
15 | PROJECT_NAME: str = "新致量化策略"
16 | API_V1_STR: str = "/api/v1"
17 |
18 | # 数据库配置
19 | DATABASE_URL: str | None = None
20 |
21 | # 日志配置
22 | LOG_LEVEL: str = "INFO"
23 | LOG_FILE_PATH: str = "logs/app.log"
24 |
25 | # 安全配置
26 | SECRET_KEY: str = "your_secret_key_here"
27 | ALGORITHM: str = "HS256"
28 | ACCESS_TOKEN_EXPIRE_MINUTES: int = 30
29 |
30 | class Config:
31 | env_file = ".env"
32 | case_sensitive = True
33 |
34 | @lru_cache()
35 | def get_settings():
36 | return Settings()
37 |
38 | settings = get_settings()
39 |
40 | # 期货交易乘数配置
41 | FUTURES_MULTIPLIER = {
42 | # 商品期货
43 | 'M': 10, # 豆粕
44 | 'Y': 10, # 豆油
45 | 'P': 10, # 棕榈油
46 | 'C': 10, # 玉米
47 | 'A': 10, # 豆一
48 | 'RB': 10, # 螺纹钢
49 | 'I': 100, # 铁矿石
50 | 'J': 100, # 焦炭
51 | 'JM': 60, # 焦煤
52 | 'CU': 5, # 铜
53 | 'AL': 5, # 铝
54 | 'ZN': 5, # 锌
55 | 'PB': 5, # 铅
56 | 'AU': 1000, # 黄金
57 | 'AG': 15, # 白银
58 | 'RU': 10, # 橡胶
59 | 'FU': 10, # 燃油
60 | 'TA': 5, # PTA
61 | 'MA': 10, # 甲醇
62 | 'PP': 5, # 聚丙烯
63 | 'L': 5, # 塑料
64 | 'V': 5, # PVC
65 |
66 | # 金融期货
67 | 'IF': 300, # 沪深300
68 | 'IC': 200, # 中证500
69 | 'IH': 300, # 上证50
70 | 'T': 10000, # 国债
71 | 'TF': 10000, # 5年国债
72 | }
73 |
74 | def get_multiplier(symbol: str) -> int:
75 | """
76 | 获取期货品种的交易乘数
77 | :param symbol: 期货代码,例如 'M2401'
78 | :return: 交易乘数,如果找不到则返回1
79 | """
80 | # 提取品种代码(去掉月份)
81 | # 只提取第一个字母或连续的字母作为品种代码
82 | import re
83 | product = re.match(r'([A-Za-z]+)', symbol.upper())
84 | product = product.group(1) if product else ""
85 | return FUTURES_MULTIPLIER.get(product, 1)
86 |
87 | def is_futures(symbol: str) -> bool:
88 | """
89 | 判断是否是期货品种
90 | :param symbol: 完整代码,例如 'futures-M2401'
91 | :return: 是否是期货
92 | """
93 | if not symbol:
94 | return False
95 | parts = symbol.split('-')
96 | return len(parts) > 1 and parts[0].lower() == 'futures'
--------------------------------------------------------------------------------
/backend/init_db.py:
--------------------------------------------------------------------------------
1 | from models.soybean import Base
2 | from models.trading_strategy import Base as TradingStrategyBase
3 | from config import settings
4 | from sqlalchemy import create_engine
5 |
6 | def init_db():
7 | """初始化数据库表"""
8 | engine = create_engine(settings.DATABASE_URL or "sqlite:///./trading.db")
9 | Base.metadata.create_all(bind=engine)
10 | TradingStrategyBase.metadata.create_all(bind=engine)
11 | print("数据库表初始化完成")
12 |
13 | if __name__ == "__main__":
14 | init_db()
--------------------------------------------------------------------------------
/backend/main.py:
--------------------------------------------------------------------------------
1 | from fastapi import FastAPI
2 | from fastapi.middleware.cors import CORSMiddleware
3 | import sys
4 | import os
5 |
6 | # 添加项目根目录到Python路径
7 | sys.path.append(os.path.dirname(os.path.abspath(__file__)))
8 |
9 | from routers import market_data, trading, fundamental, core_factor, arbitrage, trend_follow, dual_ma, obv_adx_ema, news, ai, signals, account, grid, support_resistance, soybean, stockfutures, holding_analysis
10 | from config import settings
11 | from utils.logger import logger
12 |
13 | app = FastAPI(
14 | title=settings.PROJECT_NAME,
15 | openapi_url=f"{settings.API_V1_STR}/openapi.json"
16 | )
17 |
18 | # 配置CORS
19 | app.add_middleware(
20 | CORSMiddleware,
21 | allow_origins=["http://localhost:3000"], # 允许前端开发服务器访问
22 | allow_credentials=True,
23 | allow_methods=["*"], # 允许所有HTTP方法
24 | allow_headers=["*"], # 允许所有请求头
25 | )
26 | logger.info("CORS中间件配置完成")
27 |
28 | # 注册路由
29 | app.include_router(
30 | market_data.router,
31 | prefix=f"{settings.API_V1_STR}/market",
32 | tags=["market"]
33 | )
34 | logger.info("市场数据路由注册完成")
35 |
36 | app.include_router(
37 | trading.router,
38 | prefix=f"{settings.API_V1_STR}/trading",
39 | tags=["trading"]
40 | )
41 | logger.info("交易策略路由注册完成")
42 |
43 | app.include_router(
44 | fundamental.router,
45 | prefix=f"{settings.API_V1_STR}/fundamental",
46 | tags=["fundamental"]
47 | )
48 | logger.info("基本面分析路由注册完成")
49 |
50 | app.include_router(
51 | core_factor.router,
52 | prefix=f"{settings.API_V1_STR}/core-factor",
53 | tags=["core_factor"]
54 | )
55 | logger.info("核心驱动因子分析路由注册完成")
56 |
57 | app.include_router(
58 | arbitrage.router,
59 | prefix=f"{settings.API_V1_STR}/arbitrage",
60 | tags=["arbitrage"]
61 | )
62 | logger.info("套利策略路由注册完成")
63 |
64 | app.include_router(
65 | trend_follow.router,
66 | prefix=f"{settings.API_V1_STR}/trend_follow",
67 | tags=["trend_follow"]
68 | )
69 | logger.info("趋势跟随策略路由注册完成")
70 |
71 | app.include_router(
72 | dual_ma.router,
73 | prefix=f"{settings.API_V1_STR}/dual_ma",
74 | tags=["dual_ma"]
75 | )
76 | logger.info("双均线策略路由注册完成")
77 |
78 | app.include_router(
79 | grid.router,
80 | prefix=f"{settings.API_V1_STR}/grid",
81 | tags=["grid"]
82 | )
83 | logger.info("网格策略路由注册完成")
84 |
85 | app.include_router(
86 | obv_adx_ema.router,
87 | prefix=f"{settings.API_V1_STR}/obv_adx_ema",
88 | tags=["obv_adx_ema"]
89 | )
90 | logger.info("OBV、ADX与EMA组合策略路由注册完成")
91 |
92 | app.include_router(
93 | support_resistance.router,
94 | prefix=f"{settings.API_V1_STR}/support_resistance",
95 | tags=["support_resistance"]
96 | )
97 | logger.info("支撑阻力策略路由注册完成")
98 |
99 | app.include_router(
100 | news.router,
101 | prefix=f"{settings.API_V1_STR}/news",
102 | tags=["news"]
103 | )
104 | logger.info("新闻分析路由注册完成")
105 |
106 | app.include_router(
107 | ai.router,
108 | prefix=f"{settings.API_V1_STR}/ai",
109 | tags=["ai"]
110 | )
111 | logger.info("AI分析路由注册完成")
112 |
113 | app.include_router(
114 | signals.router,
115 | prefix=f"{settings.API_V1_STR}",
116 | tags=["signals"]
117 | )
118 | logger.info("信号路由注册完成")
119 |
120 | app.include_router(
121 | account.router,
122 | prefix=f"{settings.API_V1_STR}/account",
123 | tags=["account"]
124 | )
125 | logger.info("账户路由注册完成")
126 |
127 | app.include_router(
128 | soybean.router,
129 | prefix=f"{settings.API_V1_STR}/soybean",
130 | tags=["soybean"]
131 | )
132 | logger.info("大豆进口分析路由注册完成")
133 |
134 | app.include_router(
135 | stockfutures.router,
136 | prefix=f"{settings.API_V1_STR}/stockfutures",
137 | tags=["stockfutures"]
138 | )
139 | logger.info("期股联动分析路由注册完成")
140 |
141 | app.include_router(
142 | holding_analysis.router,
143 | prefix=f"{settings.API_V1_STR}/holding",
144 | tags=["holding"]
145 | )
146 | logger.info("持仓变化分析路由注册完成")
147 |
148 | @app.on_event("startup")
149 | async def startup_event():
150 | logger.info("应用启动")
151 | logger.info(f"项目名称: {settings.PROJECT_NAME}")
152 | logger.info(f"API版本: {settings.API_V1_STR}")
153 |
154 | # 检查 Tushare Token
155 | if not settings.TUSHARE_TOKEN:
156 | logger.error("Tushare token not configured")
157 | sys.exit(1)
158 |
159 | @app.on_event("shutdown")
160 | async def shutdown_event():
161 | logger.info("应用关闭")
162 |
163 | @app.get("/")
164 | async def root():
165 | logger.debug("收到根路径请求")
166 | return {"message": "Welcome to 新致量化策略 API"}
--------------------------------------------------------------------------------
/backend/models/__init__.py:
--------------------------------------------------------------------------------
1 | """
2 | Data Models
3 | """
--------------------------------------------------------------------------------
/backend/models/account.py:
--------------------------------------------------------------------------------
1 | from sqlalchemy import Column, Integer, String, DateTime, Float, func
2 | from sqlalchemy.ext.declarative import declarative_base
3 | from pydantic import BaseModel
4 | from datetime import datetime
5 | from typing import Optional
6 |
7 | Base = declarative_base()
8 |
9 | class AccountDB(Base):
10 | __tablename__ = "accounts"
11 |
12 | id = Column(Integer, primary_key=True, index=True)
13 | initial_balance = Column(Float, nullable=False, default=1000000.0) # 初始资金
14 | current_balance = Column(Float, nullable=False, default=1000000.0) # 当前资产
15 | available_balance = Column(Float, nullable=False)
16 | total_profit = Column(Float, nullable=False, default=0.0)
17 | total_commission = Column(Float, nullable=False, default=0.0) # 总手续费
18 | position_cost = Column(Float, nullable=False, default=0.0) # 持仓成本
19 | position_quantity = Column(Integer, nullable=False, default=0) # 持仓数量
20 | created_at = Column(DateTime, default=func.current_timestamp())
21 | updated_at = Column(DateTime, default=func.current_timestamp(), onupdate=func.current_timestamp())
22 |
23 | class AccountBase(BaseModel):
24 | initial_balance: float
25 | current_balance: float
26 | available_balance: float
27 | total_profit: float
28 | total_commission: float
29 | position_cost: float = 0.0 # 持仓成本
30 | position_quantity: int = 0 # 持仓数量
31 |
32 | class AccountCreate(AccountBase):
33 | pass
34 |
35 | class Account(AccountBase):
36 | id: int
37 | created_at: datetime
38 | updated_at: datetime
39 |
40 | class Config:
41 | from_attributes = True
--------------------------------------------------------------------------------
/backend/models/core_factor.py:
--------------------------------------------------------------------------------
1 | from datetime import datetime, date
2 | from typing import Dict, Any, Optional
3 | from sqlalchemy import Column, Integer, Date, DateTime, JSON
4 | from sqlalchemy.ext.declarative import declarative_base
5 | from pydantic import BaseModel
6 |
7 | Base = declarative_base()
8 |
9 | class CoreFactorAnalysisDB(Base):
10 | __tablename__ = "core_factor_analysis"
11 |
12 | id = Column(Integer, primary_key=True, index=True)
13 | date = Column(Date, unique=True, index=True)
14 | inventory_cycle = Column(JSON)
15 | technical_signals = Column(JSON)
16 | price_anchors = Column(JSON)
17 | news_policy = Column(JSON)
18 | hog_market = Column(JSON)
19 | created_at = Column(DateTime, default=datetime.utcnow)
20 | updated_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow)
21 |
22 | class CoreFactorAnalysis(BaseModel):
23 | date: date
24 | inventory_cycle: Dict[str, Any]
25 | technical_signals: Dict[str, Any]
26 | price_anchors: Dict[str, Any]
27 | news_policy: Dict[str, Any]
28 | hog_market: Dict[str, Any]
29 | created_at: Optional[datetime] = None
30 | updated_at: Optional[datetime] = None
31 |
32 | class Config:
33 | orm_mode = True
--------------------------------------------------------------------------------
/backend/models/fundamental.py:
--------------------------------------------------------------------------------
1 | from pydantic import BaseModel
2 | from typing import List, Dict, Any, Optional
3 | from datetime import date, datetime
4 | from sqlalchemy import Column, String, Date, DateTime, JSON
5 | from sqlalchemy.ext.declarative import declarative_base
6 |
7 | Base = declarative_base()
8 |
9 | class FundamentalAnalysisDB(Base):
10 | __tablename__ = "fundamental_analysis"
11 |
12 | date = Column(Date, primary_key=True)
13 | supply_demand = Column(JSON)
14 | seasonal = Column(JSON)
15 | weather = Column(JSON)
16 | crush_profit = Column(JSON)
17 | overall = Column(JSON)
18 | created_at = Column(DateTime, default=datetime.utcnow)
19 | updated_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow)
20 |
21 | class FundamentalAnalysis(BaseModel):
22 | """用于API响应的Pydantic模型"""
23 | date: date
24 | supply_demand: Dict[str, Any]
25 | seasonal: Dict[str, Any]
26 | weather: Dict[str, Any]
27 | crush_profit: Dict[str, Any]
28 | overall: Dict[str, Any]
29 | created_at: Optional[datetime] = None
30 | updated_at: Optional[datetime] = None
--------------------------------------------------------------------------------
/backend/models/kline.py:
--------------------------------------------------------------------------------
1 | from pydantic import BaseModel
2 | from datetime import datetime
3 | from typing import List, Optional
4 |
5 | class KLineData(BaseModel):
6 | date: datetime
7 | symbol: str
8 | open: float
9 | high: float
10 | low: float
11 | close: float
12 | volume: float
13 | ema5: float
14 | ema20: float
15 | open_interest: float
16 |
17 | class SignalRequest(BaseModel):
18 | start_date: str
19 | end_date: str
20 | type: Optional[str] = None
21 | page: int = 1
22 | page_size: int = 10
23 | klines: List[KLineData]
--------------------------------------------------------------------------------
/backend/models/market_data.py:
--------------------------------------------------------------------------------
1 | from pydantic import BaseModel, Field
2 | from typing import Optional, List, Union
3 | from datetime import date
4 |
5 | class FuturesData(BaseModel):
6 | ts_code: str
7 | trade_date: str
8 | pre_close: float
9 | pre_settle: float
10 | open: float
11 | high: float
12 | low: float
13 | close: float
14 | settle: float
15 | change1: float
16 | change2: float
17 | vol: float
18 | amount: float
19 | oi: float
20 | oi_chg: float
21 | contract: str
22 | price: float
23 | historicalPrices: List[dict] = Field(default_factory=list)
24 | volume: Optional[float] = None
25 |
26 | class ETFData(BaseModel):
27 | ts_code: str
28 | trade_date: str
29 | open: float
30 | high: float
31 | low: float
32 | close: float
33 | vol: float
34 | amount: float
35 | ma5: float
36 | ma8: float
37 | atr: float
38 | signal: Optional[str] = None # 'buy', 'sell', 'hold'
39 | stop_loss: Optional[float] = None
40 | take_profit: Optional[float] = None
41 | last_signal: Optional[str] = None # 上一个有效信号
42 | last_signal_date: Optional[str] = None # 信号触发时间
43 | last_signal_price: Optional[float] = None # 信号触发价格
44 | last_stop_loss: Optional[float] = None # 上一个止损价格
45 | last_take_profit: Optional[float] = None # 上一个止盈价格
46 |
47 | class OptionsData(BaseModel):
48 | ts_code: str
49 | name: str
50 | underlying: str
51 | exchange: str
52 | call_put: str
53 | exercise_price: float
54 | exercise_date: date
55 | list_date: date
56 | delist_date: Optional[date]
57 |
58 | class InventoryData(BaseModel):
59 | date: str
60 | value: float
61 | mom_change: float # 环比变化
62 | yoy_change: float # 同比变化
63 | data_type: str = Field(description="数据类型:'inventory' 或 'warehouse_receipts'") # 数据类型,区分库存和仓单
64 |
65 | class TechnicalIndicators(BaseModel):
66 | contract: str
67 | last_updated: str
68 | current_price: float
69 | price_targets: dict
70 | ema: dict
71 | macd: dict
72 | rsi: dict
73 | kdj: dict
74 | bollinger_bands: dict
75 | volume: dict
76 |
77 | class OptionsHedgeData(BaseModel):
78 | ts_code: str
79 | trade_date: str
80 | futures_price: float
81 | options_price: float
82 | delta: float
83 | gamma: float
84 | theta: float
85 | vega: float
86 | hedge_ratio: float
87 | pl: float
88 | cumulative_pl: float
89 | signal: str
90 | volatility: float
91 | risk_exposure: Optional[float] = None
92 |
93 | class OptionBasic(BaseModel):
94 | ts_code: str
95 | name: str
96 | exercise_price: float
97 | maturity_date: str
98 | call_put: str
99 | exchange: str = 'DCE' # 默认为大连商品交易所
100 | opt_code: Optional[str] = None # 标准期权代码
101 | underlying_code: Optional[str] = None # 标的代码
102 |
103 | class OptionDaily(BaseModel):
104 | ts_code: str
105 | trade_date: str
106 | exchange: str = 'DCE' # 默认为大连商品交易所
107 | pre_settle: Optional[float] = None
108 | pre_close: Optional[float] = None
109 | open: Optional[float] = None
110 | high: Optional[float] = None
111 | low: Optional[float] = None
112 | close: Optional[float] = None
113 | settle: Optional[float] = None
114 | vol: Optional[float] = None
115 | amount: Optional[float] = None
116 | oi: Optional[float] = None
117 |
118 | class CostComparisonData(BaseModel):
119 | date: str
120 | cost: float # 豆粕成本价
121 | futures_price: float # 主力合约价格
122 | price_diff: float # 价差
123 | price_ratio: float # 价格比
124 |
125 | class KlineData(BaseModel):
126 | trade_date: str
127 | open: float
128 | high: float
129 | low: float
130 | close: float
131 | vol: float
132 |
133 | class HistoricalBottom(BaseModel):
134 | start_date: str
135 | end_date: str
136 | duration: int
137 | bounce_amplitude: float
138 | lowest_price: float
139 | contract: str
140 | kline_data: Optional[List[KlineData]]
141 |
142 | class ContractStats(BaseModel):
143 | contract: str
144 | lowest_price: float
145 | highest_price: float
146 | price_range: float
147 | start_price: float
148 | end_price: float
149 | volatility_30d: float # 30日波动率
150 | quantile_coef: float # 分位系数 = 最低价/开始价格
151 | standardized_value: float # 标准化值 = (当前价-最低价)/(最高价-最低价)
152 |
153 | class PriceRangeAnalysis(BaseModel):
154 | bottom_price: float
155 | current_price: float
156 | bottom_range_start: float
157 | bottom_range_end: float
158 | bounce_success_rate: float
159 | avg_bounce_amplitude: float
160 | avg_bottom_duration: float
161 | historical_bottoms: List[HistoricalBottom]
162 | contract_stats: List[ContractStats]
163 | price_quartiles: dict = Field( # 价格分位数
164 | default_factory=lambda: {
165 | 'q1': 0.0,
166 | 'q2': 0.0,
167 | 'q3': 0.0
168 | }
169 | )
170 | volatility_quartiles: dict = Field( # 波动率分位数
171 | default_factory=lambda: {
172 | 'q1': 0.0,
173 | 'q2': 0.0,
174 | 'q3': 0.0
175 | }
176 | )
177 | cycle_analysis: dict = Field( # 周期性分析
178 | default_factory=lambda: {
179 | 'cycle_length': 4, # 周期长度(年)
180 | 'last_bottom_year': 2020, # 上一个周期底部年份
181 | 'next_bottom_year': 2024, # 预测下一个周期底部年份
182 | 'current_phase': 'late', # 当前所处周期阶段 early/mid/late
183 | }
184 | )
185 | predicted_low: dict = Field( # 低点预测
186 | default_factory=lambda: {
187 | 'base': 0.0, # 基准预测值
188 | 'lower': 0.0, # 下限
189 | 'upper': 0.0, # 上限
190 | 'confidence': 0.0, # 预测置信度
191 | 'factors': { # 影响因子
192 | 'supply_pressure': 0.0, # 供应压力
193 | 'policy_risk': 0.0, # 政策风险
194 | 'basis_impact': 0.0, # 基差影响
195 | }
196 | }
197 | )
--------------------------------------------------------------------------------
/backend/models/news.py:
--------------------------------------------------------------------------------
1 | from pydantic import BaseModel
2 | from typing import Optional
3 | from datetime import datetime
4 |
5 | class FlashNews(BaseModel):
6 | """快讯数据模型"""
7 | id: Optional[int] = None
8 | datetime: datetime
9 | content: str
10 | analysis: Optional[str] = None
11 | remarks: Optional[str] = None
12 |
13 | class NewsArticle(BaseModel):
14 | """资讯数据模型"""
15 | id: Optional[int] = None
16 | datetime: datetime
17 | title: str
18 | content: str
19 | analysis: Optional[str] = None
20 | remarks: Optional[str] = None
--------------------------------------------------------------------------------
/backend/models/position.py:
--------------------------------------------------------------------------------
1 | from sqlalchemy import Column, Integer, String, Float, DateTime, func
2 | from sqlalchemy.ext.declarative import declarative_base
3 | from pydantic import BaseModel
4 | from datetime import datetime
5 | from typing import Optional
6 |
7 | Base = declarative_base()
8 |
9 | class PositionDB(Base):
10 | __tablename__ = "positions"
11 |
12 | id = Column(Integer, primary_key=True, index=True)
13 | symbol = Column(String, nullable=False, index=True)
14 | price = Column(Float, nullable=False) # 持仓成本
15 | quantity = Column(Integer, nullable=False) # 持仓数量
16 | status = Column(String, nullable=False, default='open') # open/closed
17 | created_at = Column(DateTime, default=func.current_timestamp())
18 | updated_at = Column(DateTime, default=func.current_timestamp(), onupdate=func.current_timestamp())
19 |
20 | class PositionBase(BaseModel):
21 | symbol: str
22 | price: float
23 | quantity: int
24 | status: str = 'open'
25 |
26 | class PositionCreate(PositionBase):
27 | pass
28 |
29 | class Position(PositionBase):
30 | id: int
31 | created_at: datetime
32 | updated_at: datetime
33 |
34 | class Config:
35 | from_attributes = True
--------------------------------------------------------------------------------
/backend/models/signals.py:
--------------------------------------------------------------------------------
1 | from pydantic import BaseModel, Field
2 | from typing import Optional
3 | from datetime import datetime
4 | from enum import Enum
5 | import uuid
6 |
7 | class SignalType(str, Enum):
8 | BUY_OPEN = "BUY_OPEN"
9 | SELL_OPEN = "SELL_OPEN"
10 | BUY_CLOSE = "BUY_CLOSE"
11 | SELL_CLOSE = "SELL_CLOSE"
12 |
13 | class SignalStatus(str, Enum):
14 | OPEN = "open"
15 | CLOSED = "closed"
16 | PARTIAL_CLOSED = "partial_closed"
17 |
18 | class SignalBase(BaseModel):
19 | date: datetime
20 | symbol: str
21 | type: SignalType
22 | price: float
23 | quantity: int
24 | status: SignalStatus
25 | reason: str # 开平仓原因
26 | close_date: Optional[datetime] = None
27 | close_price: Optional[float] = None
28 | profit: float = 0.0
29 |
30 | class SignalCreate(SignalBase):
31 | pass
32 |
33 | class SignalUpdate(BaseModel):
34 | status: Optional[SignalStatus] = None
35 | close_date: Optional[datetime] = None
36 | close_price: Optional[float] = None
37 | profit: Optional[float] = None
38 | reason: Optional[str] = None # 平仓原因
39 |
40 | class Signal(SignalBase):
41 | id: str
42 | created_at: datetime
43 | updated_at: datetime
44 |
45 | class Config:
46 | from_attributes = True
--------------------------------------------------------------------------------
/backend/models/soybean.py:
--------------------------------------------------------------------------------
1 | from datetime import date, datetime
2 | from typing import Dict, List, Optional
3 | from pydantic import BaseModel
4 | from sqlalchemy import Column, Integer, Float, String, Date, DateTime, JSON
5 | from sqlalchemy.ext.declarative import declarative_base
6 |
7 | Base = declarative_base()
8 |
9 | class PolicyEvent(BaseModel):
10 | """政策事件模型"""
11 | date: str
12 | event: str
13 | impact: str
14 | type: str
15 |
16 | class SoybeanImportDB(Base):
17 | """大豆进口数据库模型"""
18 | __tablename__ = "soybean_imports"
19 |
20 | id = Column(Integer, primary_key=True, index=True)
21 | date = Column(Date, nullable=False, index=True)
22 | # 装船数据
23 | current_shipment = Column(Float, nullable=False) # 当前装船量
24 | forecast_shipment = Column(Float, nullable=False) # 预计装船量
25 | forecast_next_shipment = Column(Float, nullable=False) # 下月预计装船量
26 |
27 | # 到港数据
28 | current_arrival = Column(Float, nullable=False) # 当月到港量
29 | next_arrival = Column(Float, nullable=False) # 下月到港预期
30 | current_month_arrival = Column(Float, nullable=False) # 当月实际到港量
31 | next_month_arrival = Column(Float, nullable=False) # 下月预计到港量
32 |
33 | port_details = Column(JSON, nullable=False) # 存储港口详细数据
34 | customs_details = Column(JSON, nullable=False) # 存储海关详细数据
35 | policy_events = Column(JSON, nullable=True) # 存储政策事件数据
36 | created_at = Column(DateTime, default=datetime.utcnow)
37 | updated_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow)
38 |
39 | class PortDetail(BaseModel):
40 | """港口详细数据模型"""
41 | port: str
42 | current: float
43 | next_month: float
44 | next_two_month: float
45 |
46 | class CustomsDetail(BaseModel):
47 | """海关详细数据模型"""
48 | customs: str
49 | current: float
50 | next_period: float
51 | next_month: float
52 | next_two_month: float
53 |
54 | class ComparisonData(BaseModel):
55 | """月度对比数据"""
56 | month: str
57 | value: float
58 | type: str
59 |
60 | class PortDistributionData(BaseModel):
61 | """港口分布数据"""
62 | port: str
63 | value: float
64 | type: str
65 |
66 | class SoybeanImport(BaseModel):
67 | """用于API响应的大豆进口数据模型"""
68 | date: str # YYYY-MM-DD
69 |
70 | # 装船数据
71 | current_shipment: float
72 | forecast_shipment: float
73 | forecast_next_shipment: float
74 |
75 | # 到港数据
76 | current_arrival: float
77 | next_arrival: float
78 | current_month_arrival: float
79 | next_month_arrival: float
80 |
81 | # 同环比数据
82 | current_shipment_yoy: float = 0.0
83 | current_shipment_mom: float = 0.0
84 | forecast_shipment_yoy: float = 0.0
85 | forecast_shipment_mom: float = 0.0
86 | current_arrival_yoy: float = 0.0
87 | current_arrival_mom: float = 0.0
88 | next_arrival_yoy: float = 0.0
89 |
90 | # 预期差异
91 | shipment_forecast_diff: float = 0.0
92 | arrival_forecast_diff: float = 0.0
93 |
94 | # 图表数据
95 | monthly_comparison: List[ComparisonData] = []
96 | port_distribution: List[PortDistributionData] = []
97 |
98 | # 详细数据
99 | port_details: List[PortDetail]
100 | customs_details: List[CustomsDetail]
101 | policy_events: List[PolicyEvent] = []
102 | created_at: Optional[datetime] = None
103 | updated_at: Optional[datetime] = None
104 |
105 | class Config:
106 | from_attributes = True
107 |
108 | class PolicyEvent(BaseModel):
109 | """政策事件模型"""
110 | date: str
111 | event: str
112 | impact: str
113 | type: str
--------------------------------------------------------------------------------
/backend/models/trading.py:
--------------------------------------------------------------------------------
1 | from pydantic import BaseModel
2 | from typing import List, Dict, Optional
3 | from datetime import date
4 |
5 | class StrategyDetails(BaseModel):
6 | entry_points: List[str]
7 | exit_points: List[str]
8 | risk_management: List[str]
9 |
10 | class OptionsStrategy(BaseModel):
11 | id: str
12 | title: str
13 | description: str
14 | risk_level: str
15 | expected_return: str
16 | time_horizon: str
17 | strategy_details: StrategyDetails
18 |
19 | class DailyStrategyAnalysis(BaseModel):
20 | date: date
21 | reasoning_content: str
22 | content: str
23 | created_at: Optional[date] = None
24 | updated_at: Optional[date] = None
--------------------------------------------------------------------------------
/backend/models/trading_strategy.py:
--------------------------------------------------------------------------------
1 | from datetime import datetime
2 | from sqlalchemy import Column, Integer, String, DateTime, JSON, Float
3 | from sqlalchemy.ext.declarative import declarative_base
4 |
5 | Base = declarative_base()
6 |
7 | class TradingStrategy(Base):
8 | """操盘策略数据库模型"""
9 | __tablename__ = "trading_strategies"
10 |
11 | id = Column(Integer, primary_key=True, index=True)
12 | contract = Column(String, nullable=False, index=True) # 合约代码
13 | strategy = Column(String, nullable=False) # 策略内容
14 | sr_levels = Column(JSON, nullable=True) # 支撑阻力位数据
15 | created_at = Column(DateTime, default=datetime.utcnow)
16 | updated_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow)
--------------------------------------------------------------------------------
/backend/requirements.txt:
--------------------------------------------------------------------------------
1 | fastapi
2 | uvicorn
3 | tushare
4 | pandas
5 | pydantic
6 | pydantic-settings
7 | python-dotenv
8 | sqlalchemy
9 | python-jose
10 | passlib
11 | python-multipart
12 | scikit-learn
13 | tensorflow
14 | loguru
15 | akshare
16 | httpx
17 | openai
18 | beautifulsoup4
19 | aiohttp
20 | langchain_core
21 | langgraph
22 | questionary
23 | langchain_openai
24 | langchain_ollama
25 | langchain_deepseek
26 | colorama
27 | matplotlib
--------------------------------------------------------------------------------
/backend/routers/__init__.py:
--------------------------------------------------------------------------------
1 | """
2 | API Routers
3 | """
--------------------------------------------------------------------------------
/backend/routers/account.py:
--------------------------------------------------------------------------------
1 | from fastapi import APIRouter, Depends
2 | from services.account import AccountService
3 | from models.account import Account
4 |
5 | router = APIRouter()
6 |
7 | @router.get("/account", response_model=Account)
8 | async def get_account():
9 | """获取账户信息"""
10 | account_service = AccountService()
11 | return account_service.get_account()
--------------------------------------------------------------------------------
/backend/routers/ai.py:
--------------------------------------------------------------------------------
1 | from fastapi import APIRouter, HTTPException
2 | from pydantic import BaseModel
3 | from typing import List, Dict, Optional
4 | from datetime import datetime
5 | import sys
6 | import os
7 | from loguru import logger
8 |
9 | # 添加项目根目录到Python路径
10 | sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
11 |
12 | from AI.AIService import run_hedge_fund
13 | from AI.backtester import Backtester
14 |
15 | router = APIRouter()
16 |
17 | class BacktestRequest(BaseModel):
18 | tickers: List[str]
19 | start_date: str
20 | end_date: str
21 | initial_capital: float = 100000.0
22 | portfolio: Dict[str, float]
23 | selected_analysts: Optional[List[str]] = []
24 | model_name: str = "bot-20250329163710-8zcqm"
25 | model_provider: str = "OpenAI"
26 |
27 | @router.post("/backtest")
28 | async def run_backtest(request: BacktestRequest):
29 | """
30 | 运行回测接口
31 |
32 | 参数:
33 | request: 回测请求对象,包含股票代码、日期范围、初始资金、投资组合等信息
34 |
35 | 返回:
36 | 包含分析结果和回测结果的字典
37 | """
38 | logger.info(f"开始回测: 股票={request.tickers}, 开始日期={request.start_date}, 结束日期={request.end_date}")
39 | logger.info(f"回测参数: 初始资金={request.initial_capital}, 模型={request.model_name}, 提供商={request.model_provider}")
40 |
41 | try:
42 | # 运行对冲基金分析
43 | logger.info("开始运行对冲基金分析...")
44 | result = run_hedge_fund(
45 | tickers=request.tickers,
46 | start_date=request.start_date,
47 | end_date=request.end_date,
48 | portfolio=request.portfolio,
49 | selected_analysts=request.selected_analysts,
50 | model_name=request.model_name,
51 | model_provider=request.model_provider
52 | )
53 | logger.info("对冲基金分析完成")
54 |
55 | # 初始化回测器
56 | logger.info("初始化回测器...")
57 | backtester = Backtester(
58 | agent=run_hedge_fund,
59 | tickers=request.tickers,
60 | start_date=request.start_date,
61 | end_date=request.end_date,
62 | initial_capital=request.initial_capital,
63 | model_name=request.model_name,
64 | model_provider=request.model_provider,
65 | selected_analysts=request.selected_analysts
66 | )
67 |
68 | # 运行回测
69 | logger.info("开始运行回测...")
70 | backtest_results = backtester.run_backtest()
71 | logger.info("回测完成")
72 |
73 | logger.info(f"回测成功: 股票数量={len(request.tickers)}, 回测结果={backtest_results}")
74 |
75 | return {
76 | "analysis": result,
77 | "backtest": backtest_results
78 | }
79 |
80 | except Exception as e:
81 | logger.error(f"回测失败: {str(e)}")
82 | import traceback
83 | logger.error(f"异常堆栈: {traceback.format_exc()}")
84 | raise HTTPException(status_code=500, detail=str(e))
--------------------------------------------------------------------------------
/backend/routers/core_factor.py:
--------------------------------------------------------------------------------
1 | from fastapi import APIRouter, HTTPException, Depends
2 | from datetime import datetime
3 | from typing import Dict, Any
4 | from services.core_factor import CoreFactorAnalyzer
5 |
6 | router = APIRouter()
7 | analyzer = CoreFactorAnalyzer()
8 |
9 | @router.get("/{date}")
10 | async def get_core_factor_analysis(date: str) -> Dict[str, Any]:
11 | """获取指定日期的核心驱动因子分析数据"""
12 | try:
13 | return await analyzer.get_core_factor_analysis(date)
14 | except Exception as e:
15 | raise HTTPException(status_code=500, detail=str(e))
--------------------------------------------------------------------------------
/backend/routers/dual_ma.py:
--------------------------------------------------------------------------------
1 | from fastapi import APIRouter, HTTPException
2 | from pathlib import Path
3 | import pandas as pd
4 | import numpy as np
5 | import talib
6 | from utils.logger import logger
7 | from datetime import datetime
8 | from typing import List, Dict, Union
9 | from strategies.dual_ma_strategy import DualMAStrategy
10 | from pydantic import BaseModel
11 |
12 | class BacktestRequest(BaseModel):
13 | use_atr_tp: bool = False
14 | data_period: str = 'weekly' # 新增数据周期参数
15 |
16 | router = APIRouter()
17 |
18 | def load_and_process_data(file_path):
19 | try:
20 | logger.info(f"开始加载数据文件: {file_path}")
21 |
22 | if not file_path.exists():
23 | error_msg = f"数据文件不存在: {file_path}"
24 | logger.error(error_msg)
25 | raise HTTPException(status_code=404, detail=error_msg)
26 |
27 | df = pd.read_csv(file_path)
28 | logger.info(f"成功读取CSV文件,总数据行数: {len(df)}")
29 |
30 | logger.debug("开始处理时间列")
31 | # 根据文件名判断数据周期
32 | if '30min' in str(file_path):
33 | # 30分钟数据字段映射
34 | df = df.rename(columns={
35 | '时间': 'date',
36 | '开盘': 'open',
37 | '收盘': 'close',
38 | '最高': 'high',
39 | '最低': 'low',
40 | '成交量': 'vol',
41 | '成交额': 'amount'
42 | })
43 | else:
44 | # 周线和日线数据保持原有字段名
45 | df['date'] = pd.to_datetime(df['date'])
46 |
47 | logger.debug("开始计算EMA指标")
48 | df['ema_short'] = talib.EMA(df['close'], timeperiod=8)
49 | df['ema_long'] = talib.EMA(df['close'], timeperiod=21)
50 | # 处理NaN和Infinity值
51 | df = df.replace([np.inf, -np.inf], np.nan)
52 | df['ema_short'] = df['ema_short'].fillna(method='ffill').fillna(method='bfill')
53 | df['ema_long'] = df['ema_long'].fillna(method='ffill').fillna(method='bfill')
54 |
55 | logger.debug(f"数据中是否还存在NaN值: {df.isna().any().any()}")
56 |
57 | # 按时间升序排序
58 | df = df.sort_values('date')
59 | logger.info(f"数据时间范围: {df['date'].min()} 至 {df['date'].max()}")
60 |
61 | # 转换为前端需要的格式
62 | columns = ['date', 'open', 'close', 'high', 'low']
63 | columns.extend(['ema_short', 'ema_long'])
64 |
65 | result = df[columns].to_dict('records')
66 | for item in result:
67 | item['date'] = item['date'].strftime('%Y-%m-%d %H:%M')
68 | # 确保所有数值都是有效的JSON数字
69 | numeric_keys = ['open', 'close', 'high', 'low']
70 | numeric_keys.extend(['ema_short', 'ema_long'])
71 |
72 | for key in numeric_keys:
73 | if not np.isfinite(item[key]):
74 | item[key] = None
75 |
76 | logger.info(f"数据处理完成,返回 {len(result)} 条记录")
77 | return result
78 | except Exception as e:
79 | error_msg = f"处理数据时发生错误: {str(e)}"
80 | logger.error(error_msg, exc_info=True)
81 | raise HTTPException(status_code=500, detail=error_msg)
82 |
83 | @router.get("/weekly")
84 | async def get_weekly_data():
85 | logger.info("收到周线数据请求")
86 | data_path = Path("data/159985.SZ_fund_weekly_20190101_20251231.csv")
87 | logger.debug(f"周线数据文件路径: {data_path.absolute()}")
88 | return load_and_process_data(data_path)
89 |
90 | @router.post("/backtest")
91 | async def backtest_strategy(request: BacktestRequest):
92 | """执行策略回测"""
93 | try:
94 | logger.info(f"收到回测请求,use_atr_tp={request.use_atr_tp}, data_period={request.data_period}")
95 |
96 | # 根据选择的周期加载对应的数据文件
97 | if request.data_period == 'weekly':
98 | data_path = Path("data/159985.SZ_fund_weekly_20190101_20251231.csv")
99 | elif request.data_period == 'daily':
100 | data_path = Path("data/159985.SZ_fund_daily_20190101_20251231.csv")
101 | else: # 30min
102 | data_path = Path("data/159985.SZ_fund_30min_20190101_20251231.csv")
103 |
104 | # 读取并处理数据
105 | df = pd.read_csv(data_path)
106 |
107 | # 根据文件名判断数据周期并处理字段名
108 | if '30min' in str(data_path):
109 | # 30分钟数据字段映射
110 | df = df.rename(columns={
111 | '时间': 'date',
112 | '开盘': 'open',
113 | '收盘': 'close',
114 | '最高': 'high',
115 | '最低': 'low',
116 | '成交量': 'vol',
117 | '成交额': 'amount'
118 | })
119 | df['date'] = pd.to_datetime(df['date'])
120 | else:
121 | # 转换日期格式
122 | df['date'] = pd.to_datetime(df['date'], format='%Y%m%d')
123 |
124 | # 使用策略类计算信号和执行回测
125 | strategy = DualMAStrategy()
126 | df_with_signals = strategy.calculate_signals(df, use_atr_tp=request.use_atr_tp)
127 | result = strategy.run_backtest(df_with_signals, use_atr_tp=request.use_atr_tp)
128 |
129 | logger.info("回测完成")
130 | return result
131 |
132 | except Exception as e:
133 | error_msg = f"回测过程中发生错误: {str(e)}"
134 | logger.error(error_msg, exc_info=True)
135 | raise HTTPException(status_code=500, detail=error_msg)
--------------------------------------------------------------------------------
/backend/routers/fundamental.py:
--------------------------------------------------------------------------------
1 | from fastapi import APIRouter, HTTPException
2 | from services.fundamental import FundamentalAnalyzer
3 | from utils.logger import logger
4 | from datetime import datetime
5 | from typing import Dict, Any
6 |
7 | router = APIRouter()
8 | analyzer = FundamentalAnalyzer()
9 |
10 | @router.get("/analysis")
11 | async def get_fundamental_analysis(date: str):
12 | """获取基本面分析数据"""
13 | try:
14 | # 验证日期格式
15 | try:
16 | datetime.strptime(date, "%Y-%m-%d")
17 | except ValueError:
18 | raise HTTPException(status_code=400, detail="日期格式错误,请使用YYYY-MM-DD格式")
19 |
20 | return await analyzer.get_fundamental_analysis(date)
21 | except Exception as e:
22 | logger.error(f"获取基本面分析失败: {str(e)}")
23 | raise HTTPException(status_code=500, detail=str(e))
24 |
25 | @router.get("/supply-demand")
26 | async def get_supply_demand() -> Dict[str, Any]:
27 | """获取供需平衡数据"""
28 | try:
29 | return await analyzer.get_supply_demand_data()
30 | except Exception as e:
31 | raise HTTPException(status_code=500, detail=str(e))
32 |
33 | @router.get("/seasonal")
34 | async def get_seasonal_pattern() -> Dict[str, Any]:
35 | """获取季节性规律数据"""
36 | try:
37 | return {"data": await analyzer.get_seasonal_pattern()}
38 | except Exception as e:
39 | raise HTTPException(status_code=500, detail=str(e))
40 |
41 | @router.get("/weather")
42 | async def get_weather_data() -> Dict[str, Any]:
43 | """获取天气数据"""
44 | try:
45 | return await analyzer.get_weather_data()
46 | except Exception as e:
47 | raise HTTPException(status_code=500, detail=str(e))
48 |
49 | @router.get("/crush-profit")
50 | async def get_crush_profit() -> Dict[str, Any]:
51 | """获取压榨利润数据"""
52 | try:
53 | return await analyzer.get_crush_profit()
54 | except Exception as e:
55 | raise HTTPException(status_code=500, detail=str(e))
56 |
57 | @router.get("/overall")
58 | async def get_overall_assessment() -> Dict[str, Any]:
59 | """获取综合评估数据"""
60 | try:
61 | return await analyzer.get_overall_assessment()
62 | except Exception as e:
63 | raise HTTPException(status_code=500, detail=str(e))
--------------------------------------------------------------------------------
/backend/routers/grid.py:
--------------------------------------------------------------------------------
1 | from fastapi import APIRouter, HTTPException
2 | from pathlib import Path
3 | import pandas as pd
4 | import numpy as np
5 | from utils.logger import logger
6 | from datetime import datetime
7 | from typing import List, Dict, Union
8 | from strategies.grid_strategy import GridStrategy
9 | from pydantic import BaseModel, Field
10 |
11 | class BacktestRequest(BaseModel):
12 | grid_levels: int = Field(default=10, ge=2, le=50, description="网格数量,范围2-50")
13 | atr_period: int = Field(default=14, ge=5, le=30, description="ATR周期,范围5-30")
14 | data_period: str = Field(default='daily', description="数据周期: daily/weekly/30min")
15 |
16 | class Config:
17 | schema_extra = {
18 | "example": {
19 | "grid_levels": 10,
20 | "atr_period": 14,
21 | "data_period": "daily"
22 | }
23 | }
24 |
25 | router = APIRouter()
26 |
27 | def load_and_process_data(file_path: Path) -> pd.DataFrame:
28 | """加载并处理数据"""
29 | try:
30 | # 读取CSV文件
31 | df = pd.read_csv(file_path)
32 | logger.info(f"成功读取CSV文件,总数据行数: {len(df)}")
33 |
34 | # 确保必要的列存在
35 | required_columns = ['date', 'open', 'high', 'low', 'close']
36 | if not all(col in df.columns for col in required_columns):
37 | raise ValueError(f"CSV文件缺少必要的列: {required_columns}")
38 |
39 | # 转换日期格式
40 | df['date'] = pd.to_datetime(df['date'], format='%Y%m%d')
41 |
42 | # 处理NaN和Infinity值
43 | df = df.replace([np.inf, -np.inf], np.nan)
44 | df = df.fillna(method='ffill').fillna(method='bfill')
45 |
46 | # 按日期排序
47 | df = df.sort_values('date')
48 |
49 | # 确保数值列是浮点数类型
50 | numeric_columns = ['open', 'high', 'low', 'close']
51 | for col in numeric_columns:
52 | df[col] = df[col].astype(float)
53 |
54 | logger.info(f"数据处理完成,时间范围: {df['date'].min()} 至 {df['date'].max()}")
55 | return df
56 |
57 | except Exception as e:
58 | error_msg = f"加载数据文件失败: {str(e)}"
59 | logger.error(error_msg)
60 | raise HTTPException(status_code=500, detail=error_msg)
61 |
62 | @router.post("/backtest")
63 | async def run_backtest(request: BacktestRequest):
64 | try:
65 | logger.info("开始网格策略回测")
66 | logger.info(f"参数: grid_levels={request.grid_levels}, atr_period={request.atr_period}, data_period={request.data_period}")
67 |
68 | # 根据数据周期选择数据文件
69 | data_file = {
70 | 'weekly': 'data/M2501.DCE_future_daily_20240101_20251231.csv',
71 | 'daily': 'data/M2501.DCE_future_daily_20240101_20251231.csv',
72 | '30min': 'data/M2501.DCE_future_daily_20240101_20251231.csv'
73 | }.get(request.data_period)
74 |
75 | if not data_file:
76 | raise HTTPException(status_code=400, detail=f"不支持的数据周期: {request.data_period}")
77 |
78 | # 加载数据
79 | df = load_and_process_data(Path(data_file))
80 |
81 | # 计算交易信号
82 | df_with_signals = GridStrategy.calculate_signals(
83 | df=df,
84 | grid_levels=request.grid_levels,
85 | atr_period=request.atr_period
86 | )
87 |
88 | # 执行回测
89 | backtest_results = GridStrategy.run_backtest(
90 | df=df_with_signals,
91 | grid_levels=request.grid_levels
92 | )
93 |
94 | logger.info("网格策略回测完成")
95 | return {
96 | "status": "success",
97 | "data": {
98 | "backtest_results": backtest_results,
99 | "summary": {
100 | "total_trades": len(backtest_results['trades']),
101 | "win_rate": round(backtest_results['win_rate'], 2),
102 | "total_profit": round(backtest_results['total_profit'], 2),
103 | "sharpe_ratio": round(backtest_results['sharpe_ratio'], 2),
104 | "max_drawdown": round(backtest_results['max_drawdown'], 2),
105 | "annual_returns": round(backtest_results['annual_returns'] * 100, 2)
106 | }
107 | }
108 | }
109 |
110 | except Exception as e:
111 | error_msg = f"执行回测失败: {str(e)}"
112 | logger.error(error_msg)
113 | raise HTTPException(status_code=500, detail=error_msg)
114 |
115 | @router.get("/data")
116 | async def get_grid_data():
117 | """获取网格策略的最新数据"""
118 | try:
119 | # 使用日线数据
120 | data_file = 'data/M2501.DCE_future_daily_20240101_20251231.csv'
121 |
122 | # 加载数据
123 | df = load_and_process_data(Path(data_file))
124 |
125 | # 计算网格信号
126 | df_with_signals = GridStrategy.calculate_signals(df)
127 |
128 | # 转换数据格式
129 | result = []
130 | for _, row in df_with_signals.iterrows():
131 | # 获取当前网格的价格
132 | grid_price = row['grids'][row['current_grid']] if not pd.isna(row['current_grid']) else None
133 |
134 | # 检查并处理无效的浮点数值
135 | def safe_float(value):
136 | if pd.isna(value) or np.isinf(value):
137 | return None
138 | return float(value)
139 |
140 | result.append({
141 | 'date': row['date'].strftime('%Y-%m-%d'),
142 | 'open': safe_float(row['open']),
143 | 'close': safe_float(row['close']),
144 | 'high': safe_float(row['high']),
145 | 'low': safe_float(row['low']),
146 | 'grid_level': int(row['current_grid']) if not pd.isna(row['current_grid']) else None,
147 | 'grid_price': safe_float(grid_price)
148 | })
149 |
150 | return result
151 |
152 | except Exception as e:
153 | error_msg = f"获取网格数据失败: {str(e)}"
154 | logger.error(error_msg)
155 | raise HTTPException(status_code=500, detail=error_msg)
--------------------------------------------------------------------------------
/backend/routers/news.py:
--------------------------------------------------------------------------------
1 | from fastapi import APIRouter, HTTPException, Depends, Query
2 | from typing import List, Optional
3 | from services.news_service import NewsService
4 | from utils.logger import logger
5 | from models.news import FlashNews, NewsArticle
6 |
7 | router = APIRouter()
8 |
9 | def get_news_service() -> NewsService:
10 | logger.debug("创建新闻服务实例")
11 | return NewsService()
12 |
13 | @router.get("/daily")
14 | async def get_daily_news(
15 | start_date: Optional[str] = Query(None, description="开始日期,格式:YYYYMMDD"),
16 | end_date: Optional[str] = Query(None, description="结束日期,格式:YYYYMMDD"),
17 | service: NewsService = Depends(get_news_service)
18 | ):
19 | """获取每日新闻"""
20 | try:
21 | news = service.get_news(start_date, end_date)
22 | return news
23 | except Exception as e:
24 | logger.error(f"获取每日新闻失败: {e}")
25 | raise HTTPException(status_code=500, detail=str(e))
26 |
27 | @router.get("/flash")
28 | async def get_flash_news(
29 | service: NewsService = Depends(get_news_service)
30 | ):
31 | """获取快讯"""
32 | try:
33 | flash_news = service.get_flash_news()
34 | return flash_news
35 | except Exception as e:
36 | logger.error(f"获取快讯失败: {e}")
37 | raise HTTPException(status_code=500, detail=str(e))
38 |
39 | @router.get("/articles")
40 | async def get_news_articles(
41 | service: NewsService = Depends(get_news_service)
42 | ):
43 | """获取资讯文章"""
44 | try:
45 | articles = service.get_news_articles()
46 | return articles
47 | except Exception as e:
48 | logger.error(f"获取资讯文章失败: {e}")
49 | raise HTTPException(status_code=500, detail=str(e))
50 |
51 | @router.get("/analysis")
52 | async def get_news_analysis(
53 | news_date: str = Query(..., description="新闻日期,格式:YYYYMMDD"),
54 | service: NewsService = Depends(get_news_service)
55 | ):
56 | """获取新闻分析"""
57 | try:
58 | analysis = service.analyze_news_impact(news_date)
59 | if not analysis:
60 | return {
61 | "date": news_date,
62 | "news_count": 0,
63 | "price_change": None,
64 | "volume_change": None,
65 | "analysis": [],
66 | "message": "未找到该日期的新闻数据"
67 | }
68 | return analysis
69 | except Exception as e:
70 | logger.error(f"获取新闻分析失败: {e}")
71 | raise HTTPException(status_code=500, detail=str(e))
72 |
73 | @router.post("/analyze")
74 | async def analyze_news(
75 | news_date: str = Query(..., description="新闻日期,格式:YYYYMMDD"),
76 | service: NewsService = Depends(get_news_service)
77 | ):
78 | """分析新闻"""
79 | try:
80 | analysis = await service.analyze_news_with_deepseek(news_date)
81 | return analysis
82 | except Exception as e:
83 | logger.error(f"分析新闻失败: {e}")
84 | raise HTTPException(status_code=500, detail=str(e))
--------------------------------------------------------------------------------
/backend/routers/obv_adx_ema.py:
--------------------------------------------------------------------------------
1 | from fastapi import APIRouter, HTTPException
2 | from pathlib import Path
3 | import pandas as pd
4 | import numpy as np
5 | import talib
6 | from utils.logger import logger
7 | from datetime import datetime
8 | from typing import List, Dict, Union
9 | from strategies.obv_adx_ema_strategy import OBVADXEMAStrategy
10 |
11 | router = APIRouter()
12 |
13 | def load_and_process_data(file_path):
14 | try:
15 | logger.info(f"开始加载数据文件: {file_path}")
16 |
17 | if not file_path.exists():
18 | error_msg = f"数据文件不存在: {file_path}"
19 | logger.error(error_msg)
20 | raise HTTPException(status_code=404, detail=error_msg)
21 |
22 | df = pd.read_csv(file_path)
23 | logger.info(f"成功读取CSV文件,总数据行数: {len(df)}")
24 |
25 | logger.debug("开始处理时间列")
26 | df['date'] = pd.to_datetime(df['date'])
27 |
28 | # 计算技术指标
29 | df['ema20'] = talib.EMA(df['close'], timeperiod=20)
30 | df['ema60'] = talib.EMA(df['close'], timeperiod=60)
31 | df['ema5'] = talib.EMA(df['close'], timeperiod=5)
32 | df['adx'] = talib.ADX(df['high'], df['low'], df['close'], timeperiod=14)
33 | df['obv'] = talib.OBV(df['close'], df['volume'])
34 | df['obv_ma30'] = talib.SMA(df['obv'], timeperiod=30)
35 |
36 | # 处理NaN和Infinity值
37 | df = df.replace([np.inf, -np.inf], np.nan)
38 | df['ema20'] = df['ema20'].fillna(method='ffill').fillna(method='bfill')
39 | df['ema60'] = df['ema60'].fillna(method='ffill').fillna(method='bfill')
40 | df['ema5'] = df['ema5'].fillna(method='ffill').fillna(method='bfill')
41 | df['adx'] = df['adx'].fillna(method='ffill').fillna(method='bfill')
42 | df['obv'] = df['obv'].fillna(method='ffill').fillna(method='bfill')
43 | df['obv_ma30'] = df['obv_ma30'].fillna(method='ffill').fillna(method='bfill')
44 |
45 | logger.debug(f"数据中是否还存在NaN值: {df.isna().any().any()}")
46 |
47 | # 按时间升序排序
48 | df = df.sort_values('date')
49 | logger.info(f"数据时间范围: {df['date'].min()} 至 {df['date'].max()}")
50 |
51 | # 转换为前端需要的格式
52 | columns = ['date', 'open', 'close', 'high', 'low', 'volume', 'ema20', 'ema60', 'ema5', 'adx', 'obv', 'obv_ma30']
53 | result = df[columns].to_dict('records')
54 | for item in result:
55 | item['date'] = item['date'].strftime('%Y-%m-%d %H:%M')
56 | # 确保所有数值都是有效的JSON数字
57 | numeric_keys = ['open', 'close', 'high', 'low', 'volume', 'ema20', 'ema60', 'ema5', 'adx', 'obv', 'obv_ma30']
58 | for key in numeric_keys:
59 | if not np.isfinite(item[key]):
60 | item[key] = None
61 |
62 | logger.info(f"数据处理完成,返回 {len(result)} 条记录")
63 | return result
64 | except Exception as e:
65 | error_msg = f"处理数据时发生错误: {str(e)}"
66 | logger.error(error_msg, exc_info=True)
67 | raise HTTPException(status_code=500, detail=error_msg)
68 |
69 | @router.get("/data")
70 | async def get_data():
71 | logger.info("收到OBV、ADX与EMA组合策略数据请求")
72 | data_path = Path("data/M2501.DCE_future_60min_20240101_20251231.csv")
73 | logger.debug(f"60分钟数据文件路径: {data_path.absolute()}")
74 | return load_and_process_data(data_path)
75 |
76 | @router.post("/backtest")
77 | async def backtest_strategy():
78 | """执行策略回测"""
79 | try:
80 | logger.info("收到OBV、ADX与EMA组合策略回测请求")
81 |
82 | # 加载数据
83 | data_path = Path("data/M2501.DCE_future_60min_20240101_20251231.csv")
84 |
85 | # 读取并处理数据
86 | df_60min = pd.read_csv(data_path)
87 | df_60min['date'] = pd.to_datetime(df_60min['date'])
88 |
89 | # 使用策略类计算信号和执行回测
90 | strategy = OBVADXEMAStrategy()
91 | df_with_signals = strategy.calculate_signals(df_60min)
92 | result = strategy.run_backtest(df_with_signals)
93 |
94 | logger.info("OBV、ADX与EMA组合策略回测完成")
95 | return result
96 |
97 | except Exception as e:
98 | error_msg = f"回测过程中发生错误: {str(e)}"
99 | logger.error(error_msg, exc_info=True)
100 | raise HTTPException(status_code=500, detail=error_msg)
--------------------------------------------------------------------------------
/backend/routers/signals.py:
--------------------------------------------------------------------------------
1 | from fastapi import APIRouter, HTTPException, Depends, Query
2 | from typing import List, Optional
3 | from datetime import datetime, date
4 | from models.signals import Signal, SignalCreate, SignalUpdate
5 | from services.signals import SignalService
6 | from utils.logger import logger
7 | from models.kline import KLineData, SignalRequest
8 |
9 | router = APIRouter()
10 |
11 | def get_signal_service() -> SignalService:
12 | return SignalService()
13 |
14 | @router.post("/signals", response_model=dict)
15 | async def get_signals(
16 | request: SignalRequest,
17 | signal_service: SignalService = Depends(get_signal_service)
18 | ):
19 | """获取交易信号列表"""
20 | try:
21 | # 将字符串日期转换为datetime对象,设置时间为当天的开始和结束
22 | start = datetime.strptime(request.start_date, "%Y-%m-%d").replace(hour=0, minute=0, second=0)
23 | end = datetime.strptime(request.end_date, "%Y-%m-%d").replace(hour=23, minute=59, second=59)
24 |
25 | # 获取信号
26 | signals, total = signal_service.generate_signals(
27 | start,
28 | end,
29 | request.type,
30 | request.page,
31 | request.page_size,
32 | request.klines
33 | )
34 |
35 | return {
36 | "signals": signals,
37 | "total": total,
38 | "page": request.page,
39 | "page_size": request.page_size,
40 | "total_pages": (total + request.page_size - 1) // request.page_size
41 | }
42 | except Exception as e:
43 | logger.error(f"获取信号失败: {str(e)}", exc_info=True)
44 | raise HTTPException(status_code=500, detail=str(e))
45 |
46 | @router.post("/signals", response_model=Signal)
47 | async def create_signal(
48 | signal: SignalCreate,
49 | signal_service: SignalService = Depends(get_signal_service)
50 | ):
51 | """创建新信号"""
52 | try:
53 | return signal_service.create_signal(signal)
54 | except Exception as e:
55 | logger.error(f"创建信号失败: {str(e)}", exc_info=True)
56 | raise HTTPException(status_code=500, detail=str(e))
57 |
58 | @router.put("/signals/{signal_id}", response_model=Signal)
59 | async def update_signal(
60 | signal_id: str,
61 | signal: SignalUpdate,
62 | signal_service: SignalService = Depends(get_signal_service)
63 | ):
64 | """更新信号"""
65 | try:
66 | return signal_service.update_signal(signal_id, signal)
67 | except Exception as e:
68 | logger.error(f"更新信号失败: {str(e)}", exc_info=True)
69 | raise HTTPException(status_code=500, detail=str(e))
70 |
71 | @router.delete("/signals/{signal_id}")
72 | async def delete_signal(
73 | signal_id: str,
74 | signal_service: SignalService = Depends(get_signal_service)
75 | ):
76 | """删除信号"""
77 | try:
78 | signal_service.delete_signal(signal_id)
79 | return {"message": "信号已删除"}
80 | except Exception as e:
81 | logger.error(f"删除信号失败: {str(e)}", exc_info=True)
82 | raise HTTPException(status_code=500, detail=str(e))
--------------------------------------------------------------------------------
/backend/routers/soybean.py:
--------------------------------------------------------------------------------
1 | from fastapi import APIRouter, Depends, HTTPException
2 | from typing import List
3 | from datetime import datetime
4 | from services.soybean import SoybeanService
5 | from models.soybean import SoybeanImport
6 | from utils.logger import logger
7 |
8 | router = APIRouter()
9 |
10 | def get_soybean_service() -> SoybeanService:
11 | logger.debug("创建大豆进口数据服务实例")
12 | return SoybeanService()
13 |
14 | @router.get("/import", response_model=SoybeanImport)
15 | async def get_soybean_import_data(
16 | service: SoybeanService = Depends(get_soybean_service)
17 | ):
18 | """获取大豆进口数据"""
19 | try:
20 | data = service.get_soybean_import_data()
21 | if not data:
22 | raise HTTPException(status_code=404, detail="未找到大豆进口数据")
23 | return data
24 | except Exception as e:
25 | logger.error(f"获取大豆进口数据失败: {e}")
26 | import traceback
27 | traceback.print_exc()
28 | raise HTTPException(status_code=500, detail=str(e))
--------------------------------------------------------------------------------
/backend/routers/stockfutures.py:
--------------------------------------------------------------------------------
1 | from fastapi import APIRouter, HTTPException, Depends, Query
2 | from typing import List, Optional
3 | from datetime import datetime
4 | from pydantic import BaseModel
5 | from utils.logger import logger
6 | from services.stockfutures import StockFuturesService
7 |
8 | router = APIRouter()
9 |
10 | class StockRecommendation(BaseModel):
11 | """股票推荐结果模型"""
12 | code: str
13 | name: str
14 | level: str # 推荐级别:强烈推荐/推荐
15 | price: float
16 | change_pct: float
17 | reason: str
18 |
19 | class StockPickingResponse(BaseModel):
20 | """选股返回结果模型"""
21 | timestamp: str
22 | recommendations: List[StockRecommendation]
23 |
24 | def get_stock_futures_service():
25 | """依赖注入:获取StockFuturesService实例"""
26 | return StockFuturesService()
27 |
28 | @router.post("/stock-picking", response_model=StockPickingResponse)
29 | async def pick_stocks(
30 | limit: Optional[int] = Query(10, description="返回的股票数量,默认10只"),
31 | service: StockFuturesService = Depends(get_stock_futures_service)
32 | ):
33 | """智能选股接口"""
34 | try:
35 | # 获取支撑位附近的股票
36 | stocks_near_support = service.find_stocks_near_support(threshold_percent=0.03)
37 |
38 | if not stocks_near_support:
39 | logger.warning("未找到符合条件的股票")
40 | return StockPickingResponse(
41 | timestamp=datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
42 | recommendations=[]
43 | )
44 |
45 | # 转换为推荐结果格式
46 | recommendations = []
47 | for stock in stocks_near_support: # 已经在service层限制了数量
48 | # 计算推荐级别:距离支撑位越近,级别越高
49 | level = "强烈推荐" if stock['hourly_distance_percent'] < 0.02 else "推荐"
50 |
51 | # 构建推荐理由
52 | reason = (
53 | f"日线距支撑位{stock['distance_percent']*100:.1f}%,"
54 | f"小时线距支撑位{stock['hourly_distance_percent']*100:.1f}%,"
55 | )
56 | if 'industry' in stock:
57 | reason += f"所属{stock['industry']}行业,"
58 | reason += "多级别支撑共振,建议关注"
59 |
60 | recommendations.append(
61 | StockRecommendation(
62 | code=stock['ts_code'],
63 | name=stock['name'],
64 | level=level,
65 | price=stock['hourly_latest_price'], # 使用小时级别的最新价格
66 | change_pct=stock['pct_chg'], # 保持使用日线的涨跌幅
67 | reason=reason
68 | )
69 | )
70 |
71 | return StockPickingResponse(
72 | timestamp=datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
73 | recommendations=recommendations
74 | )
75 | except Exception as e:
76 | logger.error(f"选股失败: {str(e)}", exc_info=True)
77 | raise HTTPException(status_code=500, detail=str(e))
78 |
79 | @router.get("/hs300-stocks")
80 | async def get_hs300_stocks(service: StockFuturesService = Depends(get_stock_futures_service)):
81 | """获取沪深300成分股列表"""
82 | try:
83 | stocks = service.get_hs300_stocks()
84 | if not stocks:
85 | raise HTTPException(status_code=404, detail="未找到沪深300成分股数据")
86 | return stocks
87 | except Exception as e:
88 | logger.error(f"获取沪深300成分股失败: {str(e)}", exc_info=True)
89 | raise HTTPException(status_code=500, detail=str(e))
90 |
91 | @router.get("/stock-daily/{ts_code}")
92 | async def get_stock_daily(
93 | ts_code: str,
94 | start_date: Optional[str] = None,
95 | end_date: Optional[str] = None,
96 | service: StockFuturesService = Depends(get_stock_futures_service)
97 | ):
98 | """获取股票日线数据"""
99 | try:
100 | df = service.get_stock_daily(ts_code, start_date, end_date)
101 | if df.empty:
102 | raise HTTPException(status_code=404, detail=f"未找到股票{ts_code}的日线数据")
103 | return df.to_dict(orient='records')
104 | except Exception as e:
105 | logger.error(f"获取股票{ts_code}日线数据失败: {str(e)}", exc_info=True)
106 | raise HTTPException(status_code=500, detail=str(e))
--------------------------------------------------------------------------------
/backend/routers/trading.py:
--------------------------------------------------------------------------------
1 | from fastapi import APIRouter, HTTPException, Request, Depends
2 | from typing import List
3 | from models.trading import OptionsStrategy, DailyStrategyAnalysis
4 | from services.trading import TradingService
5 | from utils.logger import logger
6 | import httpx
7 | from config import settings
8 | import json
9 | from openai import OpenAI
10 | from fastapi.responses import StreamingResponse
11 | import asyncio
12 | from starlette.background import BackgroundTask
13 | from datetime import datetime
14 |
15 | router = APIRouter()
16 |
17 | # 初始化OpenAI客户端
18 | client = OpenAI(
19 | api_key=settings.DEEPSEEK_API_KEY,
20 | base_url="https://ark.cn-beijing.volces.com/api/v3/bots"
21 | )
22 |
23 | def get_trading_service() -> TradingService:
24 | return TradingService()
25 |
26 | async def stream_response(response, request: Request, date: str):
27 | reasoning_content = ""
28 | content = ""
29 |
30 | try:
31 | for chunk in response:
32 | # 检查客户端是否断开连接
33 | if await request.is_disconnected():
34 | logger.info("客户端断开连接")
35 | break
36 |
37 | try:
38 | if hasattr(chunk, "references"):
39 | pass
40 | if not chunk.choices:
41 | continue
42 | if chunk.choices[0].delta.content:
43 | content += chunk.choices[0].delta.content
44 | yield f"data: {json.dumps({'type': 'content', 'content': content})}\n\n"
45 | elif hasattr(chunk.choices[0].delta, "reasoning_content"):
46 | reasoning_content += chunk.choices[0].delta.reasoning_content
47 | yield f"data: {json.dumps({'type': 'reasoning', 'content': reasoning_content})}\n\n"
48 |
49 | except Exception as e:
50 | logger.error(f"处理chunk时出错: {str(e)}")
51 | continue
52 |
53 | # 发送完成标记
54 | yield f"data: {json.dumps({'type': 'done', 'reasoning': reasoning_content, 'content': content})}\n\n"
55 |
56 | # 保存到数据库
57 | try:
58 | trading_service = TradingService()
59 | analysis = DailyStrategyAnalysis(
60 | date=date, # 使用用户选择的日期
61 | content=content,
62 | reasoning_content=reasoning_content
63 | )
64 | trading_service.save_strategy_analysis(analysis)
65 | logger.info(f"策略分析已保存到数据库 - 日期: {date}")
66 | except Exception as e:
67 | logger.error(f"保存策略分析到数据库失败: {str(e)}")
68 |
69 | except Exception as e:
70 | logger.error(f"流式响应出错: {str(e)}", exc_info=True)
71 | yield f"data: {json.dumps({'type': 'error', 'message': str(e)})}\n\n"
72 | finally:
73 | logger.info("流式响应结束")
74 |
75 | @router.get("/options")
76 | async def get_options_strategies(
77 | date: str,
78 | request: Request,
79 | trading_service: TradingService = Depends(get_trading_service)
80 | ):
81 | """获取期权策略分析"""
82 | try:
83 | # 首先尝试从数据库获取
84 | analysis = trading_service.get_strategy_analysis(date)
85 | if analysis:
86 | logger.info(f"从数据库获取到策略分析 - 日期: {date}")
87 | return {
88 | "content": analysis.content,
89 | "reasoning_content": analysis.reasoning_content
90 | }
91 |
92 | # 如果数据库中没有,则调用Deepseek API
93 | logger.info("数据库中没有找到策略分析,开始调用Deepseek API")
94 |
95 | # 获取交易数据
96 | trading_data = trading_service.get_trading_data()
97 | if not trading_data or 'raw_data' not in trading_data:
98 | raise HTTPException(status_code=500, detail="获取交易数据失败")
99 |
100 | raw_data = trading_data['raw_data']
101 | prompt = f"""我需要生成一份提示词,其核心目标是:基于多维度数据生成豆粕主力合约({raw_data['main_contract']})下一个交易日的交易操作策略。 目前我已经有如下框架,请帮我把【】内的数据说明用最新的互联网资讯补充完成(最好是今天的),并将补完后的这份提示词完整返回给我。
102 | 请直接返回提示词,不需要其他任何额外的文字,特别是引用参考和来源,不要出现任何的引用和来源,还有markdown语法字符也不能出现。
103 |
104 | ``` 目标:基于多维度数据生成豆粕主力合约({raw_data['main_contract']})下一个交易日({raw_data['next_day']})的量化策略,不要出现任何的引用和来源。
105 | 一、实时价格与技术指标
106 | {trading_data['price_analysis']}
107 | {trading_data['technical_analysis']}
108 | {trading_data['volume_analysis']}
109 | 二、基本面与市场情绪 【请补充供应端数据(进口大豆到港量、油厂开机率、豆粕库存)、需求端数据(饲料企业采购量、替代品价格)】
110 | 三、国际市场联动
111 | 1、【请补充隔夜CBOT美豆走势情况】
112 | 2、【请补充USDA出口销售数据和巴西贴水情况】
113 | 3、人民币汇率:{raw_data['usd_cny']}。
114 | 四、资金与政策风险
115 | 1、【请补充机构行为,包括净空、净多头寸、机构仓位变化情况】。
116 | 2、政策风险 中美关税:美豆进口关税138%,远月成本支撑,但5月前到港压力主导。
117 | 3、【请补充天气炒作情况】
118 | 4、【请补充基差变化】
119 | 5、【请补充突发事件】 ```
120 | """
121 | logger.info(f"生成提示词:{prompt}")
122 | try:
123 | response = client.chat.completions.create(
124 | model="bot-20250329163710-8zcqm",
125 | messages=[{"role": "system", "content": "你是DeepSeek,是一个提示词工程专家"}, {"role": "user", "content": prompt}],
126 | stream=False
127 | )
128 | content = response.choices[0].message.content
129 | logger.info(f"生成提示词:{content}")
130 |
131 | response = client.chat.completions.create(
132 | model="bot-20250329163710-8zcqm",
133 | messages=[{"role": "system", "content": "现在你是一个豆粕期货量化策略专家,请根据我给你的提示词,生成一份豆粕期货交易操作策略。"}, {"role": "user", "content": content}],
134 | stream=True
135 | )
136 |
137 | return StreamingResponse(
138 | stream_response(response, request, date),
139 | media_type="text/event-stream",
140 | background=BackgroundTask(logger.info, "请求处理完成")
141 | )
142 |
143 | except Exception as e:
144 | logger.error(f"API调用失败: {str(e)}", exc_info=True)
145 | raise HTTPException(status_code=500, detail=f"API调用失败: {str(e)}")
146 |
147 | except Exception as e:
148 | logger.error(f"获取期权策略分析失败: {str(e)}", exc_info=True)
149 | import traceback
150 | traceback.print_exc()
151 | raise HTTPException(status_code=500, detail=str(e))
152 |
--------------------------------------------------------------------------------
/backend/routers/trading_model.py:
--------------------------------------------------------------------------------
1 | from fastapi import APIRouter, HTTPException
2 | import numpy as np
3 | import pandas as pd
4 | from typing import List, Dict
5 | from datetime import datetime, timedelta
6 | import tensorflow as tf
7 | from sklearn.preprocessing import MinMaxScaler
8 |
9 | router = APIRouter()
10 |
11 | class TradingModel:
12 | def __init__(self):
13 | self.model = self._build_model()
14 | self.scaler = MinMaxScaler()
15 |
16 | def _build_model(self):
17 | model = tf.keras.Sequential([
18 | tf.keras.layers.LSTM(50, return_sequences=True, input_shape=(60, 5)),
19 | tf.keras.layers.Dropout(0.2),
20 | tf.keras.layers.LSTM(50, return_sequences=False),
21 | tf.keras.layers.Dropout(0.2),
22 | tf.keras.layers.Dense(1)
23 | ])
24 | model.compile(optimizer='adam', loss='mse')
25 | return model
26 |
27 | def prepare_data(self, data: List[Dict]):
28 | df = pd.DataFrame(data)
29 | features = ['open', 'high', 'low', 'close', 'vol']
30 | scaled_data = self.scaler.fit_transform(df[features])
31 |
32 | X, y = [], []
33 | for i in range(60, len(scaled_data)):
34 | X.append(scaled_data[i-60:i])
35 | y.append(scaled_data[i, 3]) # 预测收盘价
36 |
37 | return np.array(X), np.array(y)
38 |
39 | @router.get("/predict")
40 | async def get_prediction(
41 | symbol: str = "M",
42 | days: int = 5
43 | ):
44 | """获取模型预测结果"""
45 | try:
46 | # 这里应该从数据库或API获取历史数据
47 | # 示例返回
48 | return {
49 | "next_day_prediction": 3500.0,
50 | "confidence": 0.85,
51 | "trend": "上涨",
52 | "risk_level": "中等",
53 | "suggested_position": "多头"
54 | }
55 | except Exception as e:
56 | raise HTTPException(status_code=500, detail=str(e))
57 |
58 | @router.get("/model-performance")
59 | async def get_model_performance():
60 | """获取模型性能指标"""
61 | try:
62 | return {
63 | "accuracy": 0.78,
64 | "sharpe_ratio": 1.5,
65 | "max_drawdown": 0.15,
66 | "win_rate": 0.65,
67 | "profit_factor": 1.8
68 | }
69 | except Exception as e:
70 | raise HTTPException(status_code=500, detail=str(e))
--------------------------------------------------------------------------------
/backend/routers/trend_follow.py:
--------------------------------------------------------------------------------
1 | from fastapi import APIRouter, HTTPException
2 | from pathlib import Path
3 | import pandas as pd
4 | import numpy as np
5 | import talib
6 | from utils.logger import logger
7 | from datetime import datetime
8 | from typing import List, Dict, Union
9 | from strategies.trend_follow_strategy import TrendFollowStrategy
10 |
11 | router = APIRouter()
12 |
13 | def load_and_process_data(file_path, is_15min=False):
14 | try:
15 | logger.info(f"开始加载数据文件: {file_path}")
16 |
17 | if not file_path.exists():
18 | error_msg = f"数据文件不存在: {file_path}"
19 | logger.error(error_msg)
20 | raise HTTPException(status_code=404, detail=error_msg)
21 |
22 | df = pd.read_csv(file_path)
23 | logger.info(f"成功读取CSV文件,总数据行数: {len(df)}")
24 |
25 | logger.debug("开始处理时间列")
26 | df['date'] = pd.to_datetime(df['date'])
27 |
28 | if is_15min:
29 | logger.debug("开始计算15分钟EMA12和EMA26指标")
30 | df['ema12'] = talib.EMA(df['close'], timeperiod=12)
31 | df['ema26'] = talib.EMA(df['close'], timeperiod=26)
32 | # 处理NaN和Infinity值
33 | df = df.replace([np.inf, -np.inf], np.nan)
34 | df['ema12'] = df['ema12'].fillna(method='ffill').fillna(method='bfill')
35 | df['ema26'] = df['ema26'].fillna(method='ffill').fillna(method='bfill')
36 | else:
37 | logger.debug("开始计算60分钟EMA60指标")
38 | df['ema60'] = talib.EMA(df['close'], timeperiod=60)
39 | # 处理NaN和Infinity值
40 | df = df.replace([np.inf, -np.inf], np.nan)
41 | df['ema60'] = df['ema60'].fillna(method='ffill').fillna(method='bfill')
42 |
43 | logger.debug(f"数据中是否还存在NaN值: {df.isna().any().any()}")
44 |
45 | # 按时间升序排序
46 | df = df.sort_values('date')
47 | logger.info(f"数据时间范围: {df['date'].min()} 至 {df['date'].max()}")
48 |
49 | # 转换为前端需要的格式
50 | columns = ['date', 'open', 'close', 'high', 'low']
51 | if is_15min:
52 | columns.extend(['ema12', 'ema26'])
53 | else:
54 | columns.append('ema60')
55 |
56 | result = df[columns].to_dict('records')
57 | for item in result:
58 | item['date'] = item['date'].strftime('%Y-%m-%d %H:%M')
59 | # 确保所有数值都是有效的JSON数字
60 | numeric_keys = ['open', 'close', 'high', 'low']
61 | if is_15min:
62 | numeric_keys.extend(['ema12', 'ema26'])
63 | else:
64 | numeric_keys.append('ema60')
65 |
66 | for key in numeric_keys:
67 | if not np.isfinite(item[key]):
68 | item[key] = None
69 |
70 | logger.info(f"数据处理完成,返回 {len(result)} 条记录")
71 | return result
72 | except Exception as e:
73 | error_msg = f"处理数据时发生错误: {str(e)}"
74 | logger.error(error_msg, exc_info=True)
75 | raise HTTPException(status_code=500, detail=error_msg)
76 |
77 | @router.get("/15min")
78 | async def get_15min_data():
79 | logger.info("收到15分钟数据请求")
80 | data_path = Path("data/M2501.DCE_future_15min_20240101_20251231.csv")
81 | logger.debug(f"15分钟数据文件路径: {data_path.absolute()}")
82 | return load_and_process_data(data_path, is_15min=True)
83 |
84 | @router.get("/60min")
85 | async def get_60min_data():
86 | logger.info("收到60分钟数据请求")
87 | data_path = Path("data/M2501.DCE_future_60min_20240101_20251231.csv")
88 | logger.debug(f"60分钟数据文件路径: {data_path.absolute()}")
89 | return load_and_process_data(data_path, is_15min=False)
90 |
91 | @router.post("/backtest")
92 | async def backtest_strategy():
93 | """执行策略回测"""
94 | try:
95 | logger.info("收到回测请求")
96 |
97 | # 加载数据
98 | data_path_15min = Path("data/M2501.DCE_future_15min_20240101_20251231.csv")
99 | data_path_60min = Path("data/M2501.DCE_future_60min_20240101_20251231.csv")
100 |
101 | # 读取并处理数据
102 | df_15min = pd.read_csv(data_path_15min)
103 | df_60min = pd.read_csv(data_path_60min)
104 |
105 | df_15min['date'] = pd.to_datetime(df_15min['date'])
106 | df_60min['date'] = pd.to_datetime(df_60min['date'])
107 |
108 | # 计算技术指标
109 | df_15min['ema12'] = talib.EMA(df_15min['close'], timeperiod=12)
110 | df_15min['ema26'] = talib.EMA(df_15min['close'], timeperiod=26)
111 | df_60min['ema60'] = talib.EMA(df_60min['close'], timeperiod=60)
112 |
113 | # 使用策略类计算信号和执行回测
114 | strategy = TrendFollowStrategy()
115 | df_with_signals = strategy.calculate_signals(df_15min, df_60min)
116 | result = strategy.run_backtest(df_with_signals)
117 |
118 | logger.info("回测完成")
119 | return result
120 |
121 | except Exception as e:
122 | error_msg = f"回测过程中发生错误: {str(e)}"
123 | logger.error(error_msg, exc_info=True)
124 | raise HTTPException(status_code=500, detail=error_msg)
--------------------------------------------------------------------------------
/backend/services/__init__.py:
--------------------------------------------------------------------------------
1 | """
2 | Business Services
3 | """
--------------------------------------------------------------------------------
/backend/services/account.py:
--------------------------------------------------------------------------------
1 | from sqlalchemy import create_engine
2 | from sqlalchemy.orm import sessionmaker
3 | from models.account import AccountDB
4 | from config import settings
5 | from utils.logger import logger
6 | import uuid
7 |
8 | class AccountService:
9 | def __init__(self):
10 | self.engine = create_engine(settings.DATABASE_URL or "sqlite:///./trading.db")
11 | AccountDB.metadata.create_all(self.engine)
12 | self.SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=self.engine)
13 | self.logger = logger
14 |
15 | # 初始化账户
16 | self._init_account()
17 |
18 | def _init_account(self) -> None:
19 | """初始化账户"""
20 | try:
21 | db = self.SessionLocal()
22 | # 检查是否已存在账户
23 | account = db.query(AccountDB).first()
24 | if not account:
25 | # 创建新账户
26 | account = AccountDB(
27 | initial_balance=1000000.0, # 初始资金100万
28 | current_balance=1000000.0, # 当前资产等于初始资金
29 | available_balance=1000000.0, # 可用资金等于当前资产
30 | total_profit=0.0,
31 | total_commission=0.0,
32 | position_cost=0.0,
33 | position_quantity=0
34 | )
35 | db.add(account)
36 | db.commit()
37 | self.logger.info("账户初始化成功")
38 | except Exception as e:
39 | self.logger.error(f"初始化账户失败: {e}")
40 | db.rollback()
41 | raise
42 |
43 | def get_account(self) -> AccountDB:
44 | """获取账户信息"""
45 | try:
46 | db = self.SessionLocal()
47 | account = db.query(AccountDB).first()
48 | return account
49 | except Exception as e:
50 | self.logger.error(f"获取账户信息失败: {e}")
51 | raise
52 | finally:
53 | db.close()
54 |
55 | def update_account(self, account: AccountDB) -> None:
56 | """更新账户信息"""
57 | try:
58 | db = self.SessionLocal()
59 | db_account = db.query(AccountDB).first()
60 | if not db_account:
61 | raise ValueError("账户不存在")
62 |
63 | # 更新账户信息
64 | db_account.current_balance = account.current_balance
65 | db_account.available_balance = account.available_balance
66 | db_account.total_profit = account.total_profit
67 | db_account.total_commission = account.total_commission
68 | db_account.position_cost = account.position_cost
69 | db_account.position_quantity = account.position_quantity
70 |
71 | db.commit()
72 | self.logger.info("账户信息更新成功")
73 | except Exception as e:
74 | self.logger.error(f"更新账户信息失败: {e}")
75 | db.rollback()
76 | raise
77 | finally:
78 | db.close()
79 |
80 | def update_balance(self, profit: float, commission: float) -> None:
81 | """更新账户余额"""
82 | try:
83 | db = self.SessionLocal()
84 | account = db.query(AccountDB).first()
85 | if not account:
86 | raise ValueError("账户不存在")
87 |
88 | # 更新账户余额
89 | account.current_balance += profit
90 | account.available_balance += profit
91 | account.total_profit += profit
92 | account.total_commission += commission
93 |
94 | db.commit()
95 | self.logger.info(f"账户余额更新成功: 盈亏={profit}, 手续费={commission}")
96 | except Exception as e:
97 | self.logger.error(f"更新账户余额失败: {e}")
98 | db.rollback()
99 | raise
100 | finally:
101 | db.close()
--------------------------------------------------------------------------------
/backend/services/position.py:
--------------------------------------------------------------------------------
1 | from sqlalchemy import create_engine
2 | from sqlalchemy.orm import Session, sessionmaker
3 | from models.position import PositionDB, Position, PositionCreate
4 | from utils.logger import logger
5 | from typing import List, Optional
6 | from config import settings
7 |
8 | class PositionService:
9 | def __init__(self, db: Session = None):
10 | self.engine = create_engine(settings.DATABASE_URL or "sqlite:///./trading.db")
11 | PositionDB.metadata.create_all(self.engine)
12 | self.SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=self.engine)
13 | self.logger = logger
14 |
15 | def get_position(self, symbol: str) -> Optional[PositionDB]:
16 | """获取指定品种的持仓"""
17 | try:
18 | db = self.SessionLocal()
19 | self.logger.info(f"尝试获取持仓,品种: {symbol}")
20 | position = db.query(PositionDB).filter(
21 | PositionDB.symbol == symbol,
22 | PositionDB.status.in_(['open', 'partial_closed']) # 同时查询open和partial_closed状态的持仓
23 | ).first()
24 | self.logger.info(f"查询结果: {position}")
25 | if position:
26 | self.logger.info(f"持仓详情 - ID: {position.id}, 品种: {position.symbol}, 状态: {position.status}, 数量: {position.quantity}, 价格: {position.price}")
27 | return position
28 | except Exception as e:
29 | self.logger.error(f"获取持仓失败: {e}")
30 | return None
31 | finally:
32 | db.close()
33 |
34 | def create_position(self, position: PositionCreate) -> PositionDB:
35 | """创建新持仓"""
36 | try:
37 | db = self.SessionLocal()
38 | db_position = PositionDB(**position.dict())
39 | db.add(db_position)
40 | db.commit()
41 | db.refresh(db_position)
42 | return db_position
43 | except Exception as e:
44 | self.logger.error(f"创建持仓失败: {e}")
45 | db.rollback()
46 | raise
47 | finally:
48 | db.close()
49 |
50 | def update_position(self, position: Position) -> Position:
51 | """更新持仓信息"""
52 | try:
53 | db = self.SessionLocal()
54 | db_position = db.query(PositionDB).filter(PositionDB.id == position.id).first()
55 | if db_position:
56 | db_position.symbol = position.symbol
57 | db_position.price = position.price
58 | db_position.quantity = position.quantity
59 | db_position.status = position.status
60 | db.commit()
61 | db.refresh(db_position)
62 | return Position(
63 | id=db_position.id,
64 | symbol=db_position.symbol,
65 | price=db_position.price,
66 | quantity=db_position.quantity,
67 | status=db_position.status,
68 | created_at=db_position.created_at,
69 | updated_at=db_position.updated_at
70 | )
71 | else:
72 | raise ValueError(f"Position with id {position.id} not found")
73 | except Exception as e:
74 | self.logger.error(f"更新持仓失败: {e}")
75 | db.rollback()
76 | raise
77 | finally:
78 | db.close()
79 |
80 | def close_position(self, symbol: str) -> None:
81 | """平仓"""
82 | try:
83 | db = self.SessionLocal()
84 | position = self.get_position(symbol)
85 | if position:
86 | position.status = 'closed'
87 | db.commit()
88 | except Exception as e:
89 | self.logger.error(f"平仓失败: {e}")
90 | db.rollback()
91 | raise
92 | finally:
93 | db.close()
94 |
95 | def delete_position(self, position_id: str) -> None:
96 | """删除持仓"""
97 | try:
98 | db = self.SessionLocal()
99 | position = db.query(PositionDB).filter(PositionDB.id == position_id).first()
100 | if not position:
101 | raise ValueError(f"持仓不存在: {position_id}")
102 |
103 | db.delete(position)
104 | db.commit()
105 | self.logger.info(f"删除持仓成功: {position_id}")
106 | except Exception as e:
107 | self.logger.error(f"删除持仓失败: {e}")
108 | db.rollback()
109 | raise
110 | finally:
111 | db.close()
--------------------------------------------------------------------------------
/backend/test/test.py:
--------------------------------------------------------------------------------
1 | import akshare as ak
2 |
3 | futures_zh_spot_df = ak.futures_zh_spot(symbol='M2601', market="CF", adjust='0')
4 | print(futures_zh_spot_df)
5 |
6 | futures_zh_realtime_df = ak.futures_zh_realtime(symbol="豆粕")
7 | print(futures_zh_realtime_df)
--------------------------------------------------------------------------------
/backend/tools/__init__.py:
--------------------------------------------------------------------------------
1 | """
2 | Tools for fetching data from Tushare
3 | """
--------------------------------------------------------------------------------
/backend/trading.db:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sencloud/m_quant/e5a957986954fc95ffaa2a815ccdd1744001fac3/backend/trading.db
--------------------------------------------------------------------------------
/backend/utils/__init__.py:
--------------------------------------------------------------------------------
1 | """
2 | Utility Functions
3 | """
--------------------------------------------------------------------------------
/backend/utils/logger.py:
--------------------------------------------------------------------------------
1 | import sys
2 | from loguru import logger
3 | from pathlib import Path
4 |
5 | # 创建日志目录
6 | log_path = Path("logs")
7 | log_path.mkdir(exist_ok=True)
8 |
9 | # 配置日志
10 | logger.remove() # 移除默认的处理器
11 |
12 | # 添加控制台处理器
13 | logger.add(
14 | sys.stderr,
15 | format="
108 | 选择的时间范围内豆粕期货价格走势 109 |
110 |104 | 实时获取豆粕期货库存信息 105 |
106 |112 | {data?.total_inventory.toLocaleString() || '-'} 吨 113 |
114 | {data?.history_data && ( 115 | 116 | )} 117 |121 | {data?.warehouse_inventory.toLocaleString() || '-'} 吨 122 |
123 |127 | {data?.port_inventory.toLocaleString() || '-'} 吨 128 |
129 |15 | 选择要查看的时间范围 16 |
17 |最新价格
54 |55 | ¥{latestData?.close?.toFixed(2) ?? '0.00'} 56 |
57 |涨跌额(收盘价-昨结算价)
60 |61 | {pctChange} 62 |
63 |成交量
66 |67 | {(latestData?.vol ?? 0).toLocaleString()} 68 |
69 |成交额(万)
72 |73 | ¥{(latestData?.amount ?? 0).toLocaleString()} 74 |
75 |${historicalPoint.axisValue}
39 |40 | 41 | 主力合约收盘价格: ${historicalPoint.value.toFixed(2)}元/吨 42 |
43 |44 | 45 | 当前价格: ${contract.price.toFixed(2)}元/吨 46 |
47 |
43 | {children}
44 |
45 | );
46 | },
47 | h1: ({ children }) => {children}
, 51 | ul: ({ children }) =>56 | {children} 57 |58 | ), 59 | table: ({ children }) => ( 60 |
基于市场分析的交易策略建议
85 |42 | 豆粕市场核心驱动因子深度分析 43 |
44 |14 | 新致量化策略分析平台(以下简称"本平台")提供的所有信息、分析、策略建议和其他内容仅供参考,不构成投资建议或交易指导。用户在做出任何投资决策前,应当咨询专业金融顾问。 15 |
16 |21 | 股票、期货、基金和衍生品交易涉及重大风险,可能导致严重的财务损失。本平台提供的策略和分析不保证能获得投资收益或避免损失。用户在交易前应充分了解相关风险,并根据自身财务状况和风险承受能力作出决策。 22 |
23 |28 | 尽管我们努力确保本平台提供的信息准确可靠,但我们不对信息的准确性、完整性、时效性或适用性做出任何保证。市场情况瞬息万变,任何分析和预测可能会因诸多因素而失效。 29 |
30 |35 | 使用本平台涉及互联网和电子系统的使用,可能会受到硬件故障、软件问题、连接中断、系统延迟等技术因素的影响。用户应当意识到这些风险,并准备好应对可能出现的问题。 36 |
37 |42 | 本平台可能包含来自第三方的内容或链接到第三方网站。我们不对这些第三方内容的准确性或可靠性负责,也不对用户访问这些链接可能造成的任何损失或损害承担责任。 43 |
44 |49 | 本平台使用的分析模型和算法基于历史数据和特定假设,存在固有局限性。市场行为可能会偏离历史模式,导致模型预测失效。用户不应完全依赖这些模型做出决策。 50 |
51 |56 | 在法律允许的最大范围内,本平台及其运营者、员工、合作伙伴不对用户因使用或依赖本平台提供的信息而产生的任何直接、间接、附带、特殊或后果性损害承担责任,包括但不限于财务损失、利润损失、业务中断等。 57 |
58 |63 | 用户应当了解并遵守与股票、期货、基金和衍生品交易相关的所有适用法律、法规和监管要求。本平台不对用户的交易行为是否符合相关法律法规负责,用户应自行承担合规责任。 64 |
65 |70 | 用户确认其具备使用本平台服务和进行相关交易的法律资格,包括但不限于达到法定年龄、具备足够的风险识别和承担能力等。 71 |
72 |77 | 使用本平台即表示您已阅读、理解并接受本免责声明的所有条款。如果您不同意这些条款,请勿使用本平台及其提供的服务。 78 |
79 |44 | 基于基本面的豆粕策略分析 45 |
46 |14 | 新致量化策略分析平台(以下简称"本平台")尊重并保护用户隐私。我们可能收集以下信息: 15 |
16 |我们使用收集的信息用于:
27 |我们不会出售或出租您的个人信息给第三方。在以下情况下,我们可能会分享您的信息:
39 |49 | 我们采取合理的技术和组织措施保护您的个人信息,防止未经授权的访问、披露或滥用。然而,没有任何互联网传输或电子存储方法是100%安全的。 50 |
51 |56 | 我们使用Cookie和类似技术来记住您的偏好设置、分析使用模式、优化服务体验。您可以通过浏览器设置管理Cookie偏好。 57 |
58 |63 | 根据适用的数据保护法,您有权访问、更正、删除您的个人信息,并限制或反对其处理。如需行使这些权利,请联系我们。 64 |
65 |70 | 我们可能会更新本隐私政策以反映服务变化或法律要求。更新后的政策将在本页面公布,重大变更时我们会通知您。 71 |
72 |
77 | 如果您对本隐私政策有任何问题或疑虑,请通过以下方式联系我们:
78 | 邮箱:support@soymeal-strategy.com
79 | 电话:400-123-4567
80 |
15 | 基于 DeepSeek 的豆粕市场分析和交易策略研究 16 |
17 |31 | 基于市场数据的交易策略分析和建议 32 |
33 |50 | 豆粕市场核心驱动因子的深度分析 51 |
52 |69 | 基于基本面的豆粕策略分析 70 |
71 |98 | 基于 DeepSeek 的豆粕市场交易策略分析 99 |
100 |14 | 欢迎使用新致量化策略分析平台(以下简称"本平台"或"我们")。通过访问或使用我们的服务,您同意受本服务条款的约束。如果您不同意这些条款,请勿使用本平台。 15 |
16 |21 | 本平台提供豆粕市场分析、交易策略推荐、风险管理工具等服务。我们保留随时修改、暂停或终止部分或全部服务的权利,恕不另行通知。 22 |
23 |28 | 您可能需要创建账户才能使用某些服务功能。您应当: 29 |
30 |41 | 使用本平台时,您同意不会: 42 |
43 |56 | 本平台的所有内容,包括但不限于文本、图形、图像、数据、分析模型、软件等,均受知识产权法保护,归本平台或其许可方所有。未经我们明确书面许可,您不得复制、修改、分发、销售或利用这些内容。 57 |
58 |63 | 本平台提供的所有信息和分析仅供参考,不构成投资建议,不保证准确性、完整性或时效性。用户应当自行承担使用本平台进行投资决策的风险。我们对因使用本平台导致的任何损失不承担责任。 64 |
65 |70 | 在法律允许的最大范围内,本平台对于因使用或无法使用本服务而导致的任何直接、间接、附带、特殊或后果性损害不承担责任,即使我们已被告知此类损害的可能性。 71 |
72 |77 | 我们保留随时修改本服务条款的权利。修改后的条款将在本平台上发布。您继续使用本平台将被视为接受修改后的条款。 78 |
79 |84 | 我们保留因任何理由随时终止您使用本平台的权利,无需事先通知。一旦终止,您访问本平台的权利将立即停止。 85 |
86 |91 | 本服务条款受中华人民共和国法律管辖,并按其解释。与本条款相关的任何争议应提交至本平台所在地有管辖权的法院解决。 92 |
93 |