├── main.py
├── res
├── img_01.png
├── img_03.png
├── img_04.png
├── weixin.jpg
├── img_02_1.png
└── img_02_2.png
├── .gitignore
├── ui
├── start_ui.sh
├── README.md
├── start_ui.py
├── config.py
├── components
│ ├── page_cache_management.py
│ ├── page_token_stats.py
│ ├── page_settings.py
│ └── page_common.py
└── app.py
├── version.py
├── llm
├── __init__.py
├── usage_logger.py
└── openai_client.py
├── dev.sh
├── dev-start.sh
├── backtesting
├── __init__.py
├── get_stock_data.py
└── backtest.py
├── docker-compose.dev.yml
├── docker-compose.yml
├── LICENSE
├── stock
├── __init__.py
├── analysis_prompts.py
├── chip_data_cache.py
├── etf_holdings_fetcher.py
├── stock_code_map.py
├── stock_report.py
└── stock_utils.py
├── config_default.toml
├── CHANGELOG.md
├── Dockerfile
├── tests
├── test_market_report.py
└── test_stock_report.py
├── utils
├── draw.py
├── format_utils.py
├── string_utils.py
├── risk_metrics.py
├── news_tools.py
└── report_utils.py
├── market
├── market_ai_analysis.py
├── market_report.py
└── kline_data_manager.py
├── config_manager.py
├── README.md
└── requirements.txt
/main.py:
--------------------------------------------------------------------------------
1 | # to test
2 |
3 | """
4 | 主入口,串联数据获取和大模型分析
5 | """
6 |
--------------------------------------------------------------------------------
/res/img_01.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/xieyan0811/xystock/HEAD/res/img_01.png
--------------------------------------------------------------------------------
/res/img_03.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/xieyan0811/xystock/HEAD/res/img_03.png
--------------------------------------------------------------------------------
/res/img_04.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/xieyan0811/xystock/HEAD/res/img_04.png
--------------------------------------------------------------------------------
/res/weixin.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/xieyan0811/xystock/HEAD/res/weixin.jpg
--------------------------------------------------------------------------------
/res/img_02_1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/xieyan0811/xystock/HEAD/res/img_02_1.png
--------------------------------------------------------------------------------
/res/img_02_2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/xieyan0811/xystock/HEAD/res/img_02_2.png
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | chroma_cache
2 | .pytest_cache
3 | config.toml
4 | #*.ipynb
5 | *.csv
6 | *.log
7 | *.pyc
8 | data/
9 | reports/
10 | notebook/
11 |
--------------------------------------------------------------------------------
/ui/start_ui.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # XY Stock Streamlit UI 启动脚本
4 | # 在8811端口启动Streamlit应用
5 |
6 | echo "正在启动 XY Stock Streamlit UI..."
7 | echo "访问地址: http://localhost:8811"
8 | echo "按 Ctrl+C 停止服务"
9 | echo ""
10 |
11 | # 切换到项目根目录
12 | cd "$(dirname "$0")/.."
13 |
14 | # 启动streamlit应用
15 | streamlit run ui/app.py --server.port 8811 --server.address 0.0.0.0 --server.headless true
16 |
--------------------------------------------------------------------------------
/version.py:
--------------------------------------------------------------------------------
1 | """
2 | XY Stock 版本信息配置
3 | """
4 |
5 | __version__ = "1.2.0"
6 | __app_name__ = "XY Stock"
7 | __full_version__ = f"{__app_name__} v{__version__}"
8 |
9 | def get_version():
10 | """获取版本号"""
11 | return __version__
12 |
13 | def get_app_name():
14 | """获取应用名称"""
15 | return __app_name__
16 |
17 | def get_full_version():
18 | """获取完整版本信息"""
19 | return __full_version__
20 |
--------------------------------------------------------------------------------
/llm/__init__.py:
--------------------------------------------------------------------------------
1 | """
2 | LLM模块 - 大语言模型相关功能
3 | 包含OpenAI客户端、使用记录等功能
4 | """
5 |
6 | from .openai_client import OpenAIClient
7 | from .usage_logger import UsageLogger
8 |
9 | __all__ = [
10 | 'OpenAIClient',
11 | 'UsageLogger'
12 | ]
13 |
14 | # 版本信息
15 | __version__ = '1.0.0'
16 | __author__ = 'XYStock Team'
17 | __description__ = 'Enhanced OpenAI client with usage tracking and configuration management'
18 |
--------------------------------------------------------------------------------
/dev.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # 开发环境启动脚本
4 |
5 | echo "启动开发环境容器..."
6 | docker-compose -f docker-compose.dev.yml up -d
7 |
8 | echo "等待容器启动..."
9 | sleep 3
10 |
11 | echo "进入开发容器..."
12 | echo "在容器内,你可以运行以下命令启动应用:"
13 | echo " python -m streamlit run ui/app.py --server.address=0.0.0.0 --server.port=8811"
14 | echo ""
15 | echo "或者运行其他 Python 脚本进行开发调试"
16 | echo ""
17 |
18 | # 进入容器的交互式终端
19 | docker exec -it xystock-dev bash
20 |
--------------------------------------------------------------------------------
/dev-start.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | echo "🚀 启动开发环境..."
4 |
5 | # 检查是否已经运行
6 | if [ "$(docker ps -q -f name=xystock-dev)" ]; then
7 | echo "📦 容器已经在运行,直接进入..."
8 | docker exec -it xystock-dev bash
9 | else
10 | echo "📦 启动新的开发容器..."
11 | docker-compose -f docker-compose.dev.yml up -d
12 |
13 | echo "⏳ 等待容器启动..."
14 | sleep 3
15 |
16 | echo "🔑 进入开发容器..."
17 | docker exec -it xystock-dev bash
18 | fi
19 |
--------------------------------------------------------------------------------
/backtesting/__init__.py:
--------------------------------------------------------------------------------
1 | """
2 | Backtesting module
3 | Provides backtesting framework and visualization tools
4 | """
5 |
6 | from .backtest import SimpleBacktest
7 | from .visualizer import (
8 | BacktestVisualizer,
9 | plot_backtest_results,
10 | plot_single_strategy_analysis,
11 | plot_trade_details,
12 | plot_monthly_performance,
13 | check_font_setup
14 | )
15 |
16 | __all__ = [
17 | 'SimpleBacktest',
18 | 'BacktestVisualizer',
19 | 'plot_backtest_results',
20 | 'plot_single_strategy_analysis',
21 | 'plot_trade_details',
22 | 'plot_monthly_performance',
23 | 'check_font_setup'
24 | ]
25 |
--------------------------------------------------------------------------------
/docker-compose.dev.yml:
--------------------------------------------------------------------------------
1 | version: '3.8'
2 |
3 | services:
4 | # XY Stock Web 应用服务 - 开发模式
5 | xystock-web:
6 | #build: .
7 | image: xieyan800811/xystock:latest
8 | container_name: xystock-web
9 | ports:
10 | - "8811:8811"
11 | volumes:
12 | # 整个项目目录映射到容器 - 便于开发调试
13 | - .:/app
14 | environment:
15 | # 设置时区
16 | - TZ=Asia/Shanghai
17 | # Python相关环境变量
18 | - PYTHONUNBUFFERED=1
19 | - PYTHONDONTWRITEBYTECODE=1
20 | # Streamlit配置
21 | - STREAMLIT_SERVER_ADDRESS=0.0.0.0
22 | - STREAMLIT_SERVER_PORT=8811
23 | - STREAMLIT_SERVER_HEADLESS=true
24 | - STREAMLIT_BROWSER_GATHER_USAGE_STATS=false
25 | # 开发模式:保持容器运行但不自动启动应用
26 | tty: true
27 | stdin_open: true
28 | command: /bin/bash # 覆盖 Dockerfile 中的 CMD,启动为交互式 bash
29 | restart: unless-stopped
30 | networks:
31 | - xystock-network
32 |
33 | networks:
34 | xystock-network:
35 | driver: bridge
36 |
37 |
--------------------------------------------------------------------------------
/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: '3.8'
2 |
3 | services:
4 | # XY Stock Web 应用服务
5 | xystock-web:
6 | #build: .
7 | image: xieyan800811/xystock:latest
8 | container_name: xystock-web
9 | ports:
10 | - "8811:8811"
11 | volumes:
12 | # 整个项目目录映射到容器 - 便于开发调试
13 | - .:/app
14 | environment:
15 | # 设置时区
16 | - TZ=Asia/Shanghai
17 | # Python相关环境变量
18 | - PYTHONUNBUFFERED=1
19 | - PYTHONDONTWRITEBYTECODE=1
20 | # Streamlit配置
21 | - STREAMLIT_SERVER_ADDRESS=0.0.0.0
22 | - STREAMLIT_SERVER_PORT=8811
23 | - STREAMLIT_SERVER_HEADLESS=true
24 | - STREAMLIT_BROWSER_GATHER_USAGE_STATS=false
25 | restart: unless-stopped
26 | healthcheck:
27 | test: ["CMD", "curl", "-f", "http://localhost:8811/_stcore/health"]
28 | interval: 30s
29 | timeout: 10s
30 | retries: 3
31 | start_period: 30s
32 | networks:
33 | - xystock-network
34 |
35 | networks:
36 | xystock-network:
37 | driver: bridge
38 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2025 xieyan0811
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/stock/__init__.py:
--------------------------------------------------------------------------------
1 | # 后面可能去掉
2 |
3 | """
4 | xystock 数据获取模块
5 |
6 | 该模块提供了统一的股票数据获取接口,支持A股数据:
7 | - efinance: A股数据(东方财富)
8 |
9 | 使用示例:
10 | # 推荐用法:使用全局实例
11 | from xystock.data import data_manager
12 |
13 | # 获取实时行情
14 | quote = data_manager.get_realtime_quote("ETF")
15 | quote = data_manager.get_realtime_quote("沪深A股")
16 |
17 | # 获取K线数据
18 | kline = data_manager.get_kline_data("600519", KLineType.DAY, 30)
19 |
20 | # 方式2:创建新实例
21 | from xystock.data import StockDataFetcher, KLineType
22 |
23 | fetcher = StockDataFetcher()
24 | fetcher.initialize()
25 | quote = fetcher.get_realtime_quote("ETF")
26 | """
27 |
28 | from utils.kline_cache import KLineData, KLineType
29 | from stock.stock_data_fetcher import (
30 | StockDataFetcher,
31 | RealTimeQuote,
32 | DataFetcherError,
33 | DataFetcherNotAvailableError,
34 | InvalidSymbolError,
35 | data_manager # 全局实例
36 | )
37 |
38 | __all__ = [
39 | # 数据结构
40 | "KLineData",
41 | "RealTimeQuote",
42 | "KLineType",
43 |
44 | # 异常类
45 | "DataFetcherError",
46 | "DataFetcherNotAvailableError",
47 | "InvalidSymbolError",
48 |
49 | # 具体实现
50 | "StockDataFetcher",
51 |
52 | # 全局实例
53 | "data_manager",
54 | ]
55 |
56 | __version__ = "1.0.0"
57 |
--------------------------------------------------------------------------------
/config_default.toml:
--------------------------------------------------------------------------------
1 | [LLM_OPENAI]
2 | # OpenAI API 配置
3 | API_KEY = "sk-"
4 | BASE_URL = ""
5 | TIMEOUT = 60 # 请求超时时间(秒)
6 | MAX_RETRIES = 3 # 最大重试次数
7 |
8 | # 默认模型配置
9 | DEFAULT_MODEL = "deepseek-chat"
10 | INFERENCE_MODEL = "deepseek-chat"
11 | DEFAULT_TEMPERATURE = 0.7
12 |
13 | [LLM_LOGGING]
14 | # Token使用记录配置
15 | USAGE_LOG_FILE = "logs/openai_usage.csv"
16 | ENABLE_LOGGING = true
17 | LOG_LEVEL = "INFO"
18 |
19 | [LLM_CACHE]
20 | # 缓存配置(可选)
21 | ENABLE_CACHE = false
22 | CACHE_TTL = 3600 # 缓存时间(秒)
23 |
24 | [AI_ANALYSIS]
25 | # AI分析配置
26 | [AI_ANALYSIS.TECHNICAL]
27 | TEMPERATURE = 0.5
28 | MODEL_TYPE = "inference"
29 | CACHE_FILENAME = "req_tech.txt"
30 |
31 | [AI_ANALYSIS.NEWS]
32 | TEMPERATURE = 0.7
33 | MODEL_TYPE = "default"
34 | CACHE_FILENAME = "req_news.txt"
35 |
36 | [AI_ANALYSIS.CHIP]
37 | TEMPERATURE = 0.5
38 | MODEL_TYPE = "default"
39 | CACHE_FILENAME = "req_chip.txt"
40 |
41 | [AI_ANALYSIS.FUNDAMENTAL]
42 | TEMPERATURE = 0.6
43 | MODEL_TYPE = "default"
44 | CACHE_FILENAME = "req_basic_info.txt"
45 |
46 | [AI_ANALYSIS.COMPREHENSIVE]
47 | TEMPERATURE = 0.4
48 | MODEL_TYPE = "default"
49 | CACHE_FILENAME = "req.txt"
50 |
51 | [MARKET]
52 | # 市场相关配置
53 | ENABLE_NEWS = true # 是否启用市场新闻功能
54 |
55 | [ANALYSIS]
56 | # 分析偏好设置
57 | RISK_PREFERENCE = "neutral" # 分析风险偏好: neutral, conservative, aggressive, custom
58 | CUSTOM_PRINCIPLES = "" # 自定义核心原则(当RISK_PREFERENCE为custom时使用)
59 |
60 | [USER_PROFILE]
61 | # 用户画像配置
62 | RAW = "" # 用户画像描述
63 | MISTAKES = [] # 用户常犯错误列表
64 |
--------------------------------------------------------------------------------
/CHANGELOG.md:
--------------------------------------------------------------------------------
1 | # CHANGELOG
2 |
3 | 本文档记录了项目的所有重要变更。
4 |
5 | ## [1.2.0] - 2025-09-30
6 |
7 | ### Added
8 |
9 | - 新增市场情绪数据获取与分析功能
10 | - 新增股票股息分红数据获取与分析功能
11 | - 新增ETF持仓数据获取与分析功能
12 | - 新增指数与个股风险指标计算与展示功能
13 | - 新增指数相关新闻数据获取与展示功能
14 | - 新增指数K线图可视化展示功能
15 | - 新增多个主要指数分析支持(上证指数、深证成指、创业板指、科创50等)
16 | - 新增风险偏好设置
17 | - 增强个股基本面分析,新增盈利能力、偿债能力、运营能力、成长能力等关键指标
18 | - 新增回测模块及可视化工具(使用方法请参考demo.ipynb)
19 | - 新增指数报告导出界面
20 | - 增强中文字体支持,改善图表显示效果
21 | - 新增指数K线数据缓存机制
22 |
23 | ### Changed
24 |
25 | - 优化用户界面描述,提升移动端显示效果
26 | - 改进指数与个股分析相关的AI提示词
27 | - 重构市场报告及股票分析页面
28 | - 优化数据格式化、缓存加载保存及综合分析缓存逻辑
29 | - 整合重复代码,简化界面逻辑
30 | - 重新调整项目目录结构
31 |
32 | ### Fixed
33 |
34 | - 修复缓存数据丢失问题,完善异常处理机制
35 | - 优化报告生成时的异常容错处理,自动跳过无效字段
36 | - 进一步提升代码可读性和可维护性
37 | - 修复无效数据缓存问题,确保数据质量
38 | - 将当日K线数据作为分析条件,解决收盘前无法获取当天信息的问题
39 |
40 | ### Removed
41 |
42 | - 清理冗余代码和调试信息
43 |
44 |
45 | ## [1.1.0] - 2025-09-05
46 |
47 | ### Added
48 |
49 | - 添加大盘报告生成功能,支持生成详细的市场分析报告
50 | - 新增无界面脚本支持,可批量生成大盘和个股报告
51 | - 在个股AI的各个子分析界面中加入当前股价作为参考依据
52 | - 在个股AI技术分析中加入风险参数(VaR、回撤等指标)
53 | - 发布 Docker Hub 版本,方便部署和分发
54 |
55 | ### Changed
56 |
57 | - 修改了所有股票预测相关的提示词
58 | - 简化股票相关函数接口,提升易用性和可维护性
59 | - 优化缓存文件内容结构,减少存储空间占用和文件复杂度
60 | - 优化重复调用逻辑,提升系统性能
61 | - 改进清除缓存的逻辑和用户界面
62 |
63 | ### Fixed
64 |
65 | - 修复界面显示问题:单位格式、空值处理、容错机制
66 |
67 | ### Removed
68 |
69 | - 清理冗余代码和无用注释
70 |
71 |
72 | ## [1.0.0] - 2025-09-01
73 |
74 | ### Added
75 |
76 | - 项目初始版本发布
77 | - 支持股票和基金的实时行情获取
78 | - 集成大模型进行智能分析
79 | - 提供大盘分析和个股分析功能
80 | - 支持多种AI模型(OpenAI、阿里百炼、DeepSeek等)
81 | - Docker容器化部署支持
82 | - Web界面操作
83 |
--------------------------------------------------------------------------------
/ui/README.md:
--------------------------------------------------------------------------------
1 | # XY Stock Streamlit UI
2 |
3 | 基于 Streamlit 构建的股票分析系统Web界面。
4 |
5 | ## 功能特性
6 |
7 | - 🎯 支持多市场查询:A股、港股、指数、基金
8 | - 💻 简洁直观的用户界面
9 | - 📊 实时股票信息展示
10 | - 🚀 快速响应的查询体验
11 |
12 | ## 界面结构
13 |
14 | - **左侧边栏**: 功能菜单和系统信息
15 | - **右侧主区域**:
16 | - 市场类型选择下拉框
17 | - 股票代码输入框
18 | - 查询结果显示区域
19 |
20 | ## 启动方式
21 |
22 | ### 方式1: 使用Python脚本启动
23 | ```bash
24 | cd /exports/stock/mine/xystock
25 | python ui/start_ui.py
26 | ```
27 |
28 | ### 方式2: 使用Shell脚本启动
29 | ```bash
30 | cd /exports/stock/mine/xystock
31 | ./ui/start_ui.sh
32 | ```
33 |
34 | ### 方式3: 直接使用Streamlit命令
35 | ```bash
36 | cd /exports/stock/mine/xystock
37 | streamlit run ui/app.py --server.port 8811 --server.address 0.0.0.0
38 | ```
39 |
40 | ## 访问地址
41 |
42 | 启动成功后,在浏览器中访问:
43 | - http://localhost:8811
44 | - http://0.0.0.0:8811
45 |
46 | ## 支持的股票代码示例
47 |
48 | ### A股
49 | - 000001 (平安银行)
50 | - 000002 (万科A)
51 | - 600000 (浦发银行)
52 | - 600036 (招商银行)
53 |
54 | ### 港股
55 | - 00700 (腾讯控股)
56 | - 00941 (中国移动)
57 | - 02318 (中国平安)
58 |
59 | ### 指数
60 | - 000001 (上证指数)
61 | - 399001 (深证成指)
62 | - 399006 (创业板指)
63 |
64 | ### 基金
65 | - 159915 (创业板ETF)
66 | - 510300 (沪深300ETF)
67 | - 512100 (中证1000ETF)
68 |
69 | ## 注意事项
70 |
71 | 1. 当前使用模拟数据进行演示
72 | 2. 实际部署时需要连接真实的股票数据源
73 | 3. 确保系统已安装所需的Python依赖包
74 |
75 | ## 技术栈
76 |
77 | - **前端框架**: Streamlit
78 | - **后端语言**: Python 3.x
79 | - **数据源**: 可配置多种股票数据API
80 | - **部署端口**: 8811
81 |
82 | ## 目录结构
83 |
84 | ```
85 | ui/
86 | ├── app.py # 主应用文件
87 | ├── config.py # 配置文件
88 | ├── start_ui.py # Python启动脚本
89 | ├── start_ui.sh # Shell启动脚本
90 | └── README.md # 说明文档
91 | ```
92 |
--------------------------------------------------------------------------------
/ui/start_ui.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | """
3 | XY Stock Streamlit UI 启动脚本
4 | 在8811端口启动Streamlit应用
5 | """
6 |
7 | import subprocess
8 | import sys
9 | import os
10 | from pathlib import Path
11 |
12 | def main():
13 | """启动Streamlit应用"""
14 |
15 | project_root = Path(__file__).parent.parent
16 | ui_file = project_root / "ui" / "app.py"
17 |
18 | print("🚀 正在启动 XY Stock Streamlit UI...")
19 | print(f"📂 项目路径: {project_root}")
20 | print(f"🌐 访问地址: http://localhost:8811")
21 | print("⏹️ 按 Ctrl+C 停止服务")
22 | print("-" * 50)
23 |
24 | try:
25 | import streamlit
26 | print(f"✅ Streamlit 版本: {streamlit.__version__}")
27 | except ImportError:
28 | print("❌ 错误: 未安装 Streamlit")
29 | print("请运行: pip install streamlit")
30 | return 1
31 |
32 | if not ui_file.exists():
33 | print(f"❌ 错误: 应用文件不存在: {ui_file}")
34 | return 1
35 |
36 | cmd = [
37 | sys.executable, "-m", "streamlit", "run",
38 | str(ui_file),
39 | "--server.port", "8811",
40 | "--server.address", "0.0.0.0",
41 | "--server.headless", "true"
42 | ]
43 |
44 | try:
45 | os.chdir(project_root)
46 | subprocess.run(cmd, check=True)
47 |
48 | except KeyboardInterrupt:
49 | print("\n⏹️ 用户中断,正在停止服务...")
50 | return 0
51 | except subprocess.CalledProcessError as e:
52 | print(f"❌ 启动失败: {e}")
53 | return 1
54 | except Exception as e:
55 | print(f"❌ 未知错误: {e}")
56 | return 1
57 |
58 | if __name__ == "__main__":
59 | sys.exit(main())
60 |
--------------------------------------------------------------------------------
/stock/analysis_prompts.py:
--------------------------------------------------------------------------------
1 | """
2 | 股票分析提示词配置
3 | 定义不同风险偏好下的核心原则提示词
4 | """
5 |
6 | # 中性风格(默认)- 平衡风险与机会
7 | PROMPT_NEUTRAL = """核心原则:
8 | - 诚实第一:如实、直接地指出股票的优缺点,避免任何客套和模糊表述。
9 | - 客观判断:全面评估正面和负面信号,既要警惕风险,也要把握机会。
10 | - 操作明确:当出现合理买入机会时,应明确给出买入建议及理由;如不建议买入,也要直接说明原因。"""
11 |
12 | # 保守风格 - 本金安全优先
13 | PROMPT_CONSERVATIVE = """核心原则:
14 | - 本金安全优先:始终将用户资金安全放在首位,宁可错过机会,也要避免本金出现重大损失。
15 | - 严格风控:对所有潜在风险保持高度警惕,遇到业绩下滑、财务异常、行业衰退等负面信号时,优先建议回避或观望。
16 | - 谨慎操作:只有在风险极低、机会明确时才建议买入,避免激进操作。"""
17 |
18 | # 激进风格 - 成长机会优先
19 | PROMPT_AGGRESSIVE = """核心原则:
20 | - 积极把握成长机会:优先关注具备高成长性、行业领先、创新驱动的标的,敢于在合理风险下抓住投资机会。
21 | - 适度承担风险:在风险可控的前提下,勇于布局潜力股和阶段性热点,追求超额收益。
22 | - 灵活操作:遇到明显的上涨信号或重大利好时,及时给出买入或加仓建议,避免因过度谨慎错失良机。"""
23 |
24 | # 风险偏好选项映射
25 | RISK_PREFERENCE_PROMPTS = {
26 | 'neutral': PROMPT_NEUTRAL,
27 | 'conservative': PROMPT_CONSERVATIVE,
28 | 'aggressive': PROMPT_AGGRESSIVE
29 | }
30 |
31 | # 风险偏好描述
32 | RISK_PREFERENCE_DESCRIPTIONS = {
33 | 'neutral': '中性(平衡)- 客观评估正负信号,既警惕风险也把握机会',
34 | 'conservative': '保守(稳健)- 本金安全优先,严格风控,谨慎操作',
35 | 'aggressive': '激进(成长)- 积极把握成长机会,适度承担风险',
36 | 'custom': '自定义 - 使用自定义核心原则'
37 | }
38 |
39 | def get_core_principles(risk_preference: str, custom_principles: str = "") -> str:
40 | """
41 | 根据风险偏好获取核心原则
42 |
43 | Args:
44 | risk_preference: 风险偏好 ('neutral', 'conservative', 'aggressive', 'custom')
45 | custom_principles: 自定义核心原则(当risk_preference为'custom'时使用)
46 |
47 | Returns:
48 | 对应的核心原则提示词
49 | """
50 | if risk_preference == 'custom' and custom_principles.strip():
51 | return custom_principles
52 |
53 | return RISK_PREFERENCE_PROMPTS.get(risk_preference, PROMPT_NEUTRAL)
54 |
--------------------------------------------------------------------------------
/ui/config.py:
--------------------------------------------------------------------------------
1 | """
2 | UI配置文件
3 | """
4 | import sys
5 | import os
6 | sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
7 | from version import get_version, get_app_name, get_full_version
8 |
9 | # 版本信息
10 | VERSION = get_version()
11 | APP_NAME = get_app_name()
12 | FULL_VERSION = get_full_version()
13 |
14 | # Streamlit 应用配置
15 | STREAMLIT_CONFIG = {
16 | "port": 8811,
17 | "host": "0.0.0.0",
18 | "headless": True,
19 | "title": "XY Stock 股票分析系统"
20 | }
21 |
22 | # 市场类型配置
23 | MARKET_TYPES = [
24 | "A股",
25 | "港股",
26 | "ETF"
27 | ]
28 |
29 | # 股票代码示例
30 | STOCK_CODE_EXAMPLES = {
31 | "A股": ["000001", "000002", "600000", "600036"],
32 | "港股": ["00700", "00941", "02318"],
33 | "ETF": ["159915", "510300", "512100"]
34 | }
35 |
36 | # UI主题配置
37 | UI_THEME = {
38 | "primary_color": "#1f77b4",
39 | "background_color": "#ffffff",
40 | "secondary_background_color": "#f0f2f6",
41 | "text_color": "#262730"
42 | }
43 |
44 | # 关注的指数配置
45 | FOCUS_INDICES = [
46 | "上证指数",
47 | "深证成指",
48 | "沪深300",
49 | "中证500",
50 | "中证1000",
51 | "中证2000",
52 | "北证50",
53 | "创业板指",
54 | "科创50"
55 | ]
56 |
57 | # 指数代码映射
58 | INDEX_CODE_MAPPING = {
59 | '上证指数': '000001',
60 | '深证成指': '399001',
61 | '创业板指': '399006',
62 | '沪深300': '000300',
63 | '中证500': '000905',
64 | '科创50': '000688',
65 | '中证1000': '000852',
66 | '中证2000': '932000',
67 | '北证50': '899050'
68 | }
69 |
70 | # 指数符号映射(用于akshare查询)
71 | INDEX_SYMBOL_MAPPING = {
72 | '上证指数': 'sh000001',
73 | '深证成指': 'sz399001',
74 | '创业板指': 'sz399006',
75 | '沪深300': 'sh000300',
76 | '中证500': 'sh000905',
77 | '科创50': 'sh000688',
78 | '中证1000': 'sh000852',
79 | '中证2000': 'sh000932',
80 | '北证50': 'bj899050'
81 | }
82 |
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | # 使用官方Python镜像
2 | FROM python:3.11-slim
3 |
4 | # 安装uv包管理器
5 | RUN pip install -i https://mirrors.aliyun.com/pypi/simple uv
6 |
7 | WORKDIR /app
8 |
9 | RUN mkdir -p /app/data /app/logs
10 |
11 | ENV PYTHONUNBUFFERED=1 \
12 | PYTHONDONTWRITEBYTECODE=1
13 |
14 | # 配置阿里云镜像源
15 | RUN echo 'deb http://mirrors.aliyun.com/debian/ bookworm main' > /etc/apt/sources.list && \
16 | echo 'deb-src http://mirrors.aliyun.com/debian/ bookworm main' >> /etc/apt/sources.list && \
17 | echo 'deb http://mirrors.aliyun.com/debian/ bookworm-updates main' >> /etc/apt/sources.list && \
18 | echo 'deb-src http://mirrors.aliyun.com/debian/ bookworm-updates main' >> /etc/apt/sources.list && \
19 | echo 'deb http://mirrors.aliyun.com/debian-security bookworm-security main' >> /etc/apt/sources.list && \
20 | echo 'deb-src http://mirrors.aliyun.com/debian-security bookworm-security main' >> /etc/apt/sources.list
21 |
22 | # 安装基础系统依赖 (暂不安装 weasyprint)
23 | RUN apt-get update && apt-get install -y --no-install-recommends \
24 | build-essential \
25 | xvfb \
26 | fonts-wqy-zenhei \
27 | fonts-wqy-microhei \
28 | fonts-liberation \
29 | pandoc \
30 | procps \
31 | git \
32 | wget \
33 | && rm -rf /var/lib/apt/lists/*
34 |
35 | # 启动Xvfb虚拟显示器
36 | RUN echo '#!/bin/bash\nXvfb :99 -screen 0 1024x768x24 -ac +extension GLX &\nexport DISPLAY=:99\nexec "$@"' > /usr/local/bin/start-xvfb.sh \
37 | && chmod +x /usr/local/bin/start-xvfb.sh
38 |
39 | # 复制requirements.txt文件
40 | COPY requirements.txt .
41 |
42 | # 多源轮询安装Python依赖
43 | RUN set -e; \
44 | for src in \
45 | https://mirrors.aliyun.com/pypi/simple \
46 | https://pypi.tuna.tsinghua.edu.cn/simple \
47 | https://pypi.doubanio.com/simple \
48 | https://pypi.org/simple; do \
49 | echo "Try installing from $src"; \
50 | pip install --no-cache-dir -r requirements.txt -i $src && break; \
51 | echo "Failed at $src, try next"; \
52 | done
53 |
54 | # 复制配置文件
55 | COPY config_default.toml ./config_default.toml
56 |
57 | # 复制所有源代码
58 | COPY . .
59 |
60 | # 暴露streamlit端口
61 | EXPOSE 8811
62 |
63 | # 启动streamlit应用
64 | CMD ["python", "-m", "streamlit", "run", "ui/app.py", "--server.address=0.0.0.0", "--server.port=8811"]
65 |
--------------------------------------------------------------------------------
/tests/test_market_report.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | """
3 | 市场报告测试
4 | """
5 | import sys
6 | import os
7 | import argparse
8 |
9 | project_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
10 | if project_root not in sys.path:
11 | sys.path.append(project_root)
12 |
13 | from market.market_report import write_market_report
14 |
15 |
16 | def test_market_report(index_name="上证指数", format_type="markdown", has_ai_analysis=True, user_opinion=None):
17 | """测试生成市场报告"""
18 | print(f"🧪 测试市场报告生成 - {index_name}...")
19 | if has_ai_analysis:
20 | print("🤖 启用AI分析模式")
21 |
22 | try:
23 | default_opinion = "当前市场处于调整期,建议关注政策面变化"
24 |
25 | report = write_market_report(
26 | index_name=index_name,
27 | format_type=format_type,
28 | has_ai_analysis=has_ai_analysis,
29 | user_opinion=user_opinion or default_opinion
30 | )
31 |
32 | if format_type == "markdown":
33 | ext = "md"
34 | else:
35 | ext = format_type
36 |
37 | ai_suffix = "_ai" if has_ai_analysis else ""
38 | filename = f"market_report_{index_name}{ai_suffix}.{ext}"
39 | path = os.path.join(project_root, "reports", filename)
40 |
41 | mode = "wb" if isinstance(report, bytes) else "w"
42 | encoding = None if isinstance(report, bytes) else "utf-8"
43 |
44 | with open(path, mode, encoding=encoding) as f:
45 | f.write(report)
46 |
47 | print(f"✅ 报告生成成功,大小: {len(report)} 字节")
48 | print(f"📄 报告已保存: {path}")
49 |
50 | except Exception as e:
51 | print(f"❌ 测试失败: {e}")
52 |
53 |
54 | def main():
55 | parser = argparse.ArgumentParser(description="市场报告测试程序")
56 | parser.add_argument("--index-name", default="上证指数", help="指数名称 (默认: 上证指数)")
57 | parser.add_argument("--format", default="markdown", choices=["pdf", "docx", "markdown", "html"], help="报告格式 (默认: markdown)")
58 | parser.add_argument("--disable-ai", action="store_true", help="禁用AI分析功能")
59 | parser.add_argument("--user-opinion", help="用户观点 (可选)")
60 |
61 | args = parser.parse_args()
62 |
63 | test_market_report(
64 | index_name=args.index_name,
65 | format_type=args.format,
66 | has_ai_analysis=not args.disable_ai,
67 | user_opinion=args.user_opinion
68 | )
69 |
70 |
71 | if __name__ == "__main__":
72 | main()
73 |
--------------------------------------------------------------------------------
/utils/draw.py:
--------------------------------------------------------------------------------
1 | """
2 | 股票数据可视化模块
3 | 专注于K线图绘制功能
4 | """
5 |
6 | import matplotlib.pyplot as plt
7 | import matplotlib.dates as mdates
8 | from matplotlib.patches import Rectangle
9 | import pandas as pd
10 | import numpy as np
11 |
12 | # 设置中文字体
13 | plt.rcParams['font.sans-serif'] = ['SimHei', 'DejaVu Sans']
14 | plt.rcParams['axes.unicode_minus'] = False
15 |
16 |
17 | def plot_kline(df, title="K线图", figsize=(15, 8)):
18 | """
19 | 绘制K线图
20 |
21 | Args:
22 | df: DataFrame,包含股票数据,需要有 open, high, low, close, volume 列
23 | title: str,图表标题
24 | figsize: tuple,图表大小
25 | """
26 | # 创建数据副本,避免修改原数据
27 | df = df.copy()
28 |
29 | # 确保日期列为datetime类型
30 | if 'date' in df.columns:
31 | df['date'] = pd.to_datetime(df['date'])
32 | df = df.set_index('date')
33 |
34 | fig, (ax1, ax2) = plt.subplots(2, 1, figsize=figsize, height_ratios=[3, 1])
35 |
36 | # K线图
37 | for i, (idx, row) in enumerate(df.iterrows()):
38 | open_price = row['open']
39 | high_price = row['high']
40 | low_price = row['low']
41 | close_price = row['close']
42 |
43 | # 判断涨跌
44 | color = 'red' if close_price >= open_price else 'green'
45 |
46 | # 绘制上下影线
47 | ax1.plot([i, i], [low_price, high_price], color='black', linewidth=1)
48 |
49 | # 绘制实体
50 | body_height = abs(close_price - open_price)
51 | body_bottom = min(open_price, close_price)
52 |
53 | rect = Rectangle((i-0.3, body_bottom), 0.6, body_height,
54 | facecolor=color, edgecolor='black', alpha=0.8)
55 | ax1.add_patch(rect)
56 |
57 | ax1.set_title(f'{title} - K线图', fontsize=14, fontweight='bold')
58 | ax1.set_ylabel('价格', fontsize=12)
59 | ax1.grid(True, alpha=0.3)
60 |
61 | # 设置x轴标签
62 | x_labels = [idx.strftime('%Y-%m-%d') if hasattr(idx, 'strftime') else str(idx)
63 | for idx in df.index[::len(df)//10]] # 显示10个标签
64 | x_positions = list(range(0, len(df), len(df)//10))
65 | ax1.set_xticks(x_positions)
66 | ax1.set_xticklabels(x_labels, rotation=45)
67 |
68 | # 成交量图
69 | volumes = df['volume'] if 'volume' in df.columns else [0] * len(df)
70 | colors = ['red' if df.iloc[i]['close'] >= df.iloc[i]['open'] else 'green'
71 | for i in range(len(df))]
72 |
73 | ax2.bar(range(len(df)), volumes, color=colors, alpha=0.7)
74 | ax2.set_title('成交量', fontsize=12)
75 | ax2.set_ylabel('成交量', fontsize=10)
76 | ax2.set_xticks(x_positions)
77 | ax2.set_xticklabels(x_labels, rotation=45)
78 | ax2.grid(True, alpha=0.3)
79 |
80 | plt.tight_layout()
81 | plt.show()
82 |
--------------------------------------------------------------------------------
/backtesting/get_stock_data.py:
--------------------------------------------------------------------------------
1 | import baostock as bs
2 | import os
3 | import argparse
4 |
5 |
6 | OUTPUT = '/app/data/stockdata/'
7 |
8 |
9 | def mkdir(directory):
10 | if not os.path.exists(directory):
11 | os.makedirs(directory)
12 |
13 |
14 | class Downloader(object):
15 | def __init__(self,
16 | output_dir,
17 | date_start='1990-01-01',
18 | date_end='2025-09-05',
19 | stock_code=None):
20 | self._bs = bs
21 | bs.login()
22 | self.date_start = date_start
23 | self.date_end = date_end
24 | self.output_dir = output_dir
25 | self.fields = "date,code,open,high,low,close,volume,amount," \
26 | "adjustflag,turn,tradestatus,pctChg,peTTM," \
27 | "pbMRQ,psTTM,pcfNcfTTM,isST"
28 | self.stock_code = stock_code
29 |
30 | def exit(self):
31 | bs.logout()
32 |
33 | def get_codes_by_date(self, date):
34 | print(date)
35 | stock_rs = bs.query_all_stock(date)
36 | stock_df = stock_rs.get_data()
37 | print(stock_df)
38 | return stock_df
39 |
40 | def run(self):
41 | if self.stock_code:
42 | # 只下载指定股票
43 | print(f'processing {self.stock_code}')
44 | # 获取股票名称
45 | stock_df = self.get_codes_by_date(self.date_end)
46 | row = stock_df[stock_df['code'] == self.stock_code]
47 | if row.empty:
48 | print(f"股票代码 {self.stock_code} 未找到")
49 | else:
50 | code_name = row.iloc[0]["code_name"]
51 | df_code = bs.query_history_k_data_plus(self.stock_code, self.fields,
52 | start_date=self.date_start,
53 | end_date=self.date_end).get_data()
54 | df_code.to_csv(f'{self.output_dir}/{self.stock_code}.{code_name}.csv', index=False)
55 | else:
56 | # 下载全部股票
57 | stock_df = self.get_codes_by_date(self.date_end)
58 | for index, row in stock_df.iterrows():
59 | print(f'processing {row["code"]} {row["code_name"]}')
60 | df_code = bs.query_history_k_data_plus(row["code"], self.fields,
61 | start_date=self.date_start,
62 | end_date=self.date_end).get_data()
63 | df_code.to_csv(f'{self.output_dir}/{row["code"]}.{row["code_name"]}.csv', index=False)
64 | self.exit()
65 |
66 |
67 | if __name__ == '__main__':
68 | parser = argparse.ArgumentParser(description='Download stock data')
69 | parser.add_argument('--stock_code', type=str, default=None, help='指定股票代码,如 sh.600036')
70 | parser.add_argument('--date_start', type=str, default='1990-01-01', help='开始日期')
71 | parser.add_argument('--date_end', type=str, default='2025-09-05', help='结束日期') # 注意必须为开盘日
72 | parser.add_argument('--output_dir', type=str, default=OUTPUT, help='输出目录')
73 | args = parser.parse_args()
74 |
75 | mkdir(args.output_dir)
76 | downloader = Downloader(args.output_dir, date_start=args.date_start, date_end=args.date_end, stock_code=args.stock_code)
77 | downloader.run()
78 |
79 |
--------------------------------------------------------------------------------
/tests/test_stock_report.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | """
3 | 股票报告测试
4 | """
5 | import sys
6 | import os
7 | import argparse
8 |
9 | project_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
10 | if project_root not in sys.path:
11 | sys.path.append(project_root)
12 |
13 | from stock.stock_report import generate_stock_report
14 | from stock.stock_code_map import get_stock_identity
15 |
16 | def test_stock_report(stock_code="600519", stock_name="贵州茅台", market_type="A股",
17 | format_type="markdown", use_ai=False):
18 | """测试生成股票报告"""
19 | print(f"🧪 测试股票报告生成 - {stock_name}({stock_code})...")
20 | if use_ai:
21 | print("🤖 启用AI分析模式")
22 |
23 | try:
24 | # AI分析开关
25 | has_fundamental_ai = use_ai
26 | has_market_ai = use_ai
27 | has_news_ai = use_ai
28 | has_chip_ai = use_ai and market_type not in ["港股", "ETF"]
29 | has_comprehensive_ai = use_ai
30 |
31 | if stock_code and len(stock_code) > 6 and stock_code.isdigit():
32 | stock_identity = get_stock_identity(stock_code, market_type)
33 | else:
34 | stock_identity = get_stock_identity(stock_name, market_type)
35 |
36 | if not stock_identity or 'error' in stock_identity:
37 | print(f"❌ 获取股票代码失败")
38 | return
39 |
40 | report = generate_stock_report(
41 | stock_identity=stock_identity,
42 | format_type=format_type,
43 | has_fundamental_ai=has_fundamental_ai,
44 | has_market_ai=has_market_ai,
45 | has_news_ai=has_news_ai,
46 | has_chip_ai=has_chip_ai,
47 | has_comprehensive_ai=has_comprehensive_ai
48 | )
49 |
50 | # 生成文件名
51 | if format_type == "markdown":
52 | ext = "md"
53 | else:
54 | ext = format_type
55 |
56 | ai_suffix = "_ai" if use_ai else ""
57 | filename = f"stock_report_{stock_name}_{stock_code}{ai_suffix}.{ext}"
58 | path = os.path.join(project_root, "reports", filename)
59 |
60 | # 保存报告
61 | mode = "wb" if isinstance(report, bytes) else "w"
62 | encoding = None if isinstance(report, bytes) else "utf-8"
63 |
64 | with open(path, mode, encoding=encoding) as f:
65 | f.write(report)
66 |
67 | print(f"✅ 报告生成成功,大小: {len(report)} 字节")
68 | print(f"📄 报告已保存: {path}")
69 |
70 | except Exception as e:
71 | print(f"❌ 测试失败: {e}")
72 |
73 |
74 | def main():
75 | parser = argparse.ArgumentParser(description="股票报告测试程序")
76 | parser.add_argument("--stock-code", default="600519", help="股票代码 (默认: 600519)")
77 | parser.add_argument("--stock-name", default="贵州茅台", help="股票名称 (默认: 贵州茅台)")
78 | parser.add_argument("--market-type", default="A股", choices=["A股", "港股", "指数"], help="市场类型 (默认: A股)")
79 | parser.add_argument("--format", default="markdown", choices=["pdf", "docx", "markdown", "html"], help="报告格式 (默认: markdown)")
80 | parser.add_argument("--use-ai", action="store_true", help="启用所有AI分析功能")
81 |
82 | args = parser.parse_args()
83 |
84 | test_stock_report(
85 | stock_code=args.stock_code,
86 | stock_name=args.stock_name,
87 | market_type=args.market_type,
88 | format_type=args.format,
89 | use_ai=args.use_ai
90 | )
91 |
92 |
93 | if __name__ == "__main__":
94 | main()
95 |
--------------------------------------------------------------------------------
/market/market_ai_analysis.py:
--------------------------------------------------------------------------------
1 | from typing import Dict, Any, Tuple
2 | from llm.openai_client import OpenAIClient
3 | import datetime
4 | import sys
5 | import os
6 |
7 | project_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
8 | if project_dir not in sys.path:
9 | sys.path.append(project_dir)
10 |
11 | from market.market_formatters import MarketTextFormatter
12 | from config_manager import config
13 |
14 | def generate_index_analysis_report(
15 | stock_code: str,
16 | stock_name: str,
17 | market_report_data: Dict[str, Any],
18 | user_opinion: str = ''
19 | ) -> Tuple[str, str]:
20 | """生成指数AI分析报告"""
21 | client = OpenAIClient()
22 | core_data = market_report_data
23 |
24 | # 使用统一的格式化函数
25 | try:
26 | analysis_data = MarketTextFormatter.format_data_for_ai_analysis(
27 | core_data, stock_name
28 | )
29 | except Exception as e:
30 | error_msg = f"格式化市场数据失败: {str(e)}"
31 | return False, error_msg, datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
32 |
33 | # 根据新闻功能是否启用调整系统消息
34 | news_enabled = config.is_market_news_enabled()
35 |
36 | if news_enabled:
37 | data_sources = "市场综合数据、技术指标、市场新闻资讯和用户观点"
38 | analysis_sections = """1. **市场现状与技术面**
39 | - 总结当前市场核心特征和技术指标状态
40 | - 结合指数表现分析市场情绪和资金动向
41 | - 重点关注市场新闻中的政策面、宏观经济动向对市场的影响
42 |
43 | 2. **新闻面分析**
44 | - 结合最新市场资讯,分析对大盘指数的潜在影响
45 | - 识别政策导向、资金流向等关键信息
46 |
47 | 3. **用户观点整合**
48 | - 如有用户观点,简要评价其合理性与风险点"""
49 | else:
50 | data_sources = "市场综合数据、技术指标和用户观点"
51 | analysis_sections = """1. **市场现状与技术面**
52 | - 总结当前市场核心特征和技术指标状态
53 | - 结合指数表现分析市场情绪和资金动向
54 | - 基于技术指标分析市场趋势和动向
55 |
56 | 2. **用户观点整合**
57 | - 如有用户观点,简要评价其合理性与风险点"""
58 |
59 | system_message = f"""你是一位资深的投资顾问和市场分析师。请基于{data_sources},对{stock_name}({stock_code})提供精炼的投资决策分析。
60 |
61 | 请严格按照以下结构输出,内容务必简洁、聚焦决策:
62 |
63 | ## 📄 市场分析报告
64 |
65 | {analysis_sections}
66 |
67 | {"3" if news_enabled else "2"}. **涨跌预测**
68 | - 下个交易日:上涨、平盘、下跌的概率分布,预测置信度(±1% 内的波动认为"平盘")
69 | - 超短期(1周)短期(1个月)和中期(3-6个月)趋势判断
70 |
71 | {"4" if news_enabled else "3"}. **操作建议**
72 | - 针对不同风险偏好给出具体建议(仓位、板块、时机)
73 | - 市场波动时特别提醒保持理性,避免情绪化操作
74 |
75 | 6. **风险提示**
76 | - 列出1-3个当前最需警惕的市场风险
77 | - 关注新闻中提到的潜在风险因素
78 |
79 | 【要求】全文不超过700字,只输出最有决策价值的内容,结论要有明确操作性。
80 | """
81 |
82 | user_message = f"""基于以下数据,请对{stock_name}({stock_code})提供精简分析报告:
83 |
84 | {analysis_data}"""
85 |
86 | if user_opinion and user_opinion.strip():
87 | user_message += f"""
88 |
89 | ## 用户观点及关注点
90 |
91 | {user_opinion.strip()}
92 |
93 | 请在分析中特别关注用户提到的观点和关注点,并针对性地给出建议。
94 | """
95 |
96 | with open(os.path.join(project_dir, "data", "cache", "req_market.txt"), "w", encoding="utf-8") as f:
97 | f.write(system_message + "\n\n")
98 | f.write(user_message)
99 | print(f'req length {len(user_message)}')
100 | # return False, user_message, datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') # 注释掉调试返回
101 |
102 | try:
103 | messages = [
104 | {"role": "system", "content": system_message},
105 | {"role": "user", "content": user_message}
106 | ]
107 |
108 | response = client.chat(
109 | messages=messages,
110 | temperature=0.3, # 降低温度,确保输出更简洁一致
111 | model_type="inference"
112 | )
113 |
114 | now = datetime.datetime.now()
115 | timestamp = now.strftime('%Y-%m-%d %H:%M:%S')
116 |
117 | return True, response, timestamp
118 |
119 | except Exception as e:
120 | error_msg = f"生成{stock_name}AI分析报告失败: {str(e)}"
121 | print(f"❌ {error_msg}")
122 | return False, error_msg, datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
123 |
--------------------------------------------------------------------------------
/market/market_report.py:
--------------------------------------------------------------------------------
1 | import sys
2 | import os
3 |
4 | # 添加路径以便导入
5 | project_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
6 | if project_root not in sys.path:
7 | sys.path.append(project_root)
8 |
9 | from utils.report_utils import generate_pdf_report, generate_docx_report, generate_markdown_file, generate_html_report
10 | from version import get_full_version
11 |
12 |
13 | def write_market_report(index_name="上证指数", format_type="pdf", has_ai_analysis=False, user_opinion=""):
14 | """生成完整的市场分析报告(优化版本,复用AI分析数据)"""
15 | try:
16 | from market.market_data_tools import get_market_tools
17 |
18 | market_tools = get_market_tools()
19 |
20 | # 获取AI分析(如果需要)
21 | ai_analysis = None
22 | if has_ai_analysis:
23 | ai_analysis = market_tools.get_ai_analysis(
24 | use_cache=True,
25 | index_name=index_name,
26 | force_regenerate=bool(user_opinion.strip()),
27 | user_opinion=user_opinion
28 | )
29 |
30 | # 获取综合市场数据(两种情况都需要)
31 | comprehensive_report = market_tools.get_comprehensive_market_report(
32 | use_cache=True,
33 | index_name=index_name
34 | )
35 |
36 | # 获取当前指数数据
37 | current_indices = market_tools.get_current_indices(use_cache=True)
38 |
39 | # 获取焦点指数数据
40 | focus_index_data = None
41 | if index_name in current_indices.get('indices_dict', {}):
42 | focus_index_data = current_indices['indices_dict'][index_name]
43 |
44 | # 构建报告数据
45 | report_data = {
46 | 'ai_analysis': ai_analysis,
47 | 'technical_indicators': comprehensive_report.get('technical_indicators', {}),
48 | 'sentiment_indicators': comprehensive_report.get('sentiment_indicators', {}),
49 | 'valuation_indicators': comprehensive_report.get('valuation_indicators', {}),
50 | 'money_flow_indicators': comprehensive_report.get('money_flow_indicators', {}),
51 | 'margin_detail': comprehensive_report.get('margin_detail', {}),
52 | 'market_news_data': comprehensive_report.get('market_news_data', []),
53 | 'current_indices': current_indices,
54 | 'focus_index_data': focus_index_data
55 | }
56 |
57 | from market.market_formatters import MarketTextFormatter
58 | md_content = MarketTextFormatter.format_data_for_report(
59 | index_name,
60 | report_data,
61 | get_full_version()
62 | )
63 |
64 | if format_type == "pdf":
65 | return generate_pdf_report(md_content)
66 | elif format_type == "docx":
67 | return generate_docx_report(md_content)
68 | elif format_type == "html":
69 | return generate_html_report(md_content)
70 | elif format_type == "markdown":
71 | return generate_markdown_file(md_content)
72 | else:
73 | return md_content
74 |
75 | except Exception as e:
76 | error_msg = f"生成市场报告失败: {str(e)}"
77 | if format_type == "pdf":
78 | return generate_pdf_report(f"# 错误\n\n{error_msg}")
79 | elif format_type == "docx":
80 | return generate_docx_report(f"# 错误\n\n{error_msg}")
81 | elif format_type == "html":
82 | return generate_html_report(f"# 错误\n\n{error_msg}")
83 | elif format_type == "markdown":
84 | return generate_markdown_file(f"# 错误\n\n{error_msg}")
85 | else:
86 | return f"# 错误\n\n{error_msg}"
87 |
88 |
89 | if __name__ == "__main__":
90 | # 测试用例
91 | print("🧪 测试市场报告生成模块...")
92 |
93 | # 测试生成Markdown报告
94 | print("\n1. 生成上证指数Markdown报告:")
95 | try:
96 | md_report = write_market_report(
97 | index_name="上证指数",
98 | format_type="markdown",
99 | has_ai_analysis=False
100 | )
101 |
102 | if isinstance(md_report, bytes):
103 | print(f" ✅ Markdown报告生成成功,大小: {len(md_report)} 字节")
104 | # 显示前500个字符的预览
105 | preview = md_report.decode('utf-8')[:500]
106 | print(f" 📄 内容预览:\n{preview}...")
107 | else:
108 | print(f" ❌ 报告格式错误: {type(md_report)}")
109 |
110 | except Exception as e:
111 | print(f" ❌ 报告生成失败: {e}")
112 |
113 | print("\n✅ 测试完成!")
114 |
--------------------------------------------------------------------------------
/config_manager.py:
--------------------------------------------------------------------------------
1 | """
2 | 配置管理器
3 | """
4 | import os
5 | import toml
6 | from pathlib import Path
7 | from typing import Dict, Any, Optional
8 | import logging
9 |
10 | class ConfigManager:
11 | """配置管理器"""
12 |
13 | def __init__(self, config_file: str = "config.toml"):
14 | """
15 | 初始化配置管理器
16 |
17 | Args:
18 | config_file: 配置文件路径
19 | """
20 | self.config_file = Path(config_file)
21 | self.config = self._load_config()
22 |
23 | def _load_config(self) -> Dict[str, Any]:
24 | """加载配置文件"""
25 | if not self.config_file.exists():
26 | logging.warning(f"配置文件 {self.config_file} 不存在,使用默认配置")
27 | return self._get_default_config()
28 |
29 | try:
30 | with open(self.config_file, 'r', encoding='utf-8') as f:
31 | config = toml.load(f)
32 | logging.info(f"成功加载配置文件: {self.config_file}")
33 | return config
34 | except Exception as e:
35 | logging.error(f"加载配置文件失败: {e}")
36 | return self._get_default_config()
37 |
38 | def _get_default_config(self) -> Dict[str, Any]:
39 | """获取默认配置"""
40 | return {
41 | 'LLM_OPENAI': {
42 | 'API_KEY': os.getenv('OPENAI_API_KEY', 'sk-'),
43 | 'BASE_URL': 'https://api.deepseek.com',
44 | 'TIMEOUT': 60,
45 | 'MAX_RETRIES': 3,
46 | 'DEFAULT_MODEL': 'deepseek-chat',
47 | 'INFERENCE_MODEL': 'deepseek-chat',
48 | 'DEFAULT_TEMPERATURE': 0.7
49 | },
50 | 'LLM_LOGGING': {
51 | 'USAGE_LOG_FILE': 'data/logs/openai_usage.csv',
52 | 'ENABLE_LOGGING': True,
53 | 'LOG_LEVEL': 'INFO'
54 | },
55 | 'LLM_CACHE': {
56 | 'ENABLE_CACHE': False,
57 | 'CACHE_TTL': 3600
58 | },
59 | 'MARKET': {
60 | 'ENABLE_NEWS': True
61 | }
62 | }
63 |
64 | def get(self, key: str, default: Any = None) -> Any:
65 | """
66 | 获取配置值,支持点号分隔的键路径
67 |
68 | Args:
69 | key: 配置键,支持 'section.key' 格式
70 | default: 默认值
71 |
72 | Returns:
73 | 配置值
74 | """
75 | keys = key.split('.')
76 | value = self.config
77 |
78 | try:
79 | for k in keys:
80 | value = value[k]
81 | return value
82 | except (KeyError, TypeError):
83 | return default
84 |
85 | def set(self, key: str, value: Any):
86 | """
87 | 设置配置值
88 |
89 | Args:
90 | key: 配置键,支持 'section.key' 格式
91 | value: 配置值
92 | """
93 | keys = key.split('.')
94 | config = self.config
95 |
96 | # 创建嵌套字典结构
97 | for k in keys[:-1]:
98 | if k not in config:
99 | config[k] = {}
100 | config = config[k]
101 |
102 | config[keys[-1]] = value
103 |
104 | def save(self):
105 | """保存配置到文件"""
106 | try:
107 | # 确保目录存在
108 | self.config_file.parent.mkdir(parents=True, exist_ok=True)
109 |
110 | with open(self.config_file, 'w', encoding='utf-8') as f:
111 | toml.dump(self.config, f)
112 | logging.info(f"配置已保存到: {self.config_file}")
113 | except Exception as e:
114 | logging.error(f"保存配置失败: {e}")
115 |
116 | def get_openai_config(self) -> Dict[str, Any]:
117 | """获取OpenAI相关配置"""
118 | return self.get('LLM_OPENAI', {})
119 |
120 | def get_logging_config(self) -> Dict[str, Any]:
121 | """获取日志相关配置"""
122 | return self.get('LLM_LOGGING', {})
123 |
124 | def get_cache_config(self) -> Dict[str, Any]:
125 | """获取缓存相关配置"""
126 | return self.get('LLM_CACHE', {})
127 |
128 | def get_market_config(self) -> Dict[str, Any]:
129 | """获取市场相关配置"""
130 | return self.get('MARKET', {})
131 |
132 | def is_market_news_enabled(self) -> bool:
133 | """检查是否启用市场新闻功能"""
134 | return self.get('MARKET.ENABLE_NEWS', True)
135 |
136 | def reload(self):
137 | """重新加载配置"""
138 | self.config = self._load_config()
139 | logging.info("配置已重新加载")
140 |
141 | # 全局配置实例
142 | XYSTOCK_DIR = Path(__file__).resolve().parent
143 | config = ConfigManager(os.path.join(XYSTOCK_DIR, 'config.toml'))
144 |
--------------------------------------------------------------------------------
/ui/components/page_cache_management.py:
--------------------------------------------------------------------------------
1 | """
2 | 缓存管理页面组件
3 | """
4 |
5 | import streamlit as st
6 | import sys
7 | import os
8 |
9 | project_root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
10 | sys.path.append(project_root)
11 |
12 |
13 | def main():
14 | """缓存管理主页面"""
15 | st.header("🗂️ 缓存管理")
16 | st.markdown("管理系统中的各类数据缓存,清理后下次查询会重新获取最新数据。")
17 |
18 | col1, col2 = st.columns(2)
19 |
20 | with col1:
21 | st.subheader("📈 股票数据缓存")
22 | st.markdown("清理股票基本信息、技术指标、新闻、AI分析等数据缓存。")
23 |
24 | if st.button("🗑️ 清理股票数据缓存",
25 | type="primary",
26 | width='stretch',
27 | help="清理所有股票相关的数据缓存"):
28 | try:
29 | from stock.stock_data_tools import clear_stock_cache
30 | clear_stock_cache()
31 | st.success("✅ 股票数据缓存已清理完成!")
32 | except Exception as e:
33 | st.error(f"❌ 清理股票缓存失败:{str(e)}")
34 |
35 | with col2:
36 | st.subheader("📊 大盘数据缓存")
37 | st.markdown("清理市场情绪、估值指标、资金流向等大盘数据缓存。")
38 |
39 | if st.button("🗑️ 清理大盘数据缓存",
40 | type="primary",
41 | width='stretch',
42 | help="清理所有大盘相关的数据缓存"):
43 | try:
44 | from market.market_data_tools import get_market_tools
45 | market_tools = get_market_tools()
46 | market_tools.clear_cache()
47 | st.success("✅ 大盘数据缓存已清理完成!")
48 | except Exception as e:
49 | st.error(f"❌ 清理大盘缓存失败:{str(e)}")
50 |
51 | st.markdown("---")
52 |
53 | st.subheader("🧹 批量操作")
54 |
55 | col3, col4 = st.columns([1, 3])
56 |
57 | with col3:
58 | # 使用session_state实现确认弹窗
59 | if st.button("🗑️ 清理所有缓存",
60 | type="secondary",
61 | width='stretch',
62 | help="一键清理所有股票和大盘数据缓存"):
63 | st.session_state['show_clear_all_confirm'] = True
64 |
65 | if st.session_state.get('show_clear_all_confirm', False):
66 | st.warning(
67 | "⚠️ 此操作将**删除所有缓存,包括股票名映射、K线缓存等**,后续拉取数据会变慢,请谨慎操作!",
68 | icon="⚠️"
69 | )
70 | if st.button("确认清理", key="confirm_clear_all_cache"):
71 | try:
72 | # 清理所有相关缓存
73 | from stock.stock_data_tools import clear_stock_cache, clear_chip_cache
74 | clear_stock_cache()
75 | clear_chip_cache() # 清理筹码缓存
76 |
77 | from market.market_data_tools import get_market_tools
78 | market_tools = get_market_tools()
79 | market_tools.clear_cache()
80 |
81 | from utils.kline_cache import cache_manager
82 | cache_manager.clear_cache()
83 |
84 | from stock.stock_code_map import clear_stock_map_cache, clear_hk_stock_map_cache
85 | clear_stock_map_cache()
86 | clear_hk_stock_map_cache()
87 |
88 | # 删除data/cache目录下所有txt文件
89 | import glob
90 | from pathlib import Path
91 | cache_dir = os.path.join(Path(__file__).parent.parent.parent, 'data', 'cache')
92 | for txt_file in glob.glob(os.path.join(cache_dir, '*.txt')):
93 | print("清除文本文件:", txt_file)
94 | try:
95 | os.remove(txt_file)
96 | except Exception as e:
97 | print(f"删除文件失败: {txt_file} {e}")
98 |
99 | st.success("✅ 所有缓存已清理完成!")
100 | except Exception as e:
101 | st.error(f"❌ 清理所有缓存失败:{str(e)}")
102 | st.session_state['show_clear_all_confirm'] = False
103 | if st.button("取消", key="cancel_clear_all_cache"):
104 | st.session_state['show_clear_all_confirm'] = False
105 |
106 | st.markdown("---")
107 | st.subheader("ℹ️ 缓存说明")
108 |
109 | st.markdown("""
110 | **股票数据缓存包括:**
111 | - 📋 基本信息:股票价格、涨跌幅等基础数据
112 | - 📈 技术指标:移动平均线、MACD、RSI等技术分析指标
113 | - 📰 新闻资讯:相关新闻和公告信息
114 | - 🧮 筹码分析:筹码分布和成本分析数据
115 | - 🤖 AI分析:各类AI分析报告
116 |
117 | **大盘数据缓存包括:**
118 | - 💭 市场情绪:情绪指标和市场热度
119 | - 💰 估值指标:市场整体估值水平
120 | - 💸 资金流向:资金进出和流向分析
121 | - 📊 融资融券:详细的融资融券数据
122 | - 🤖 AI分析:市场AI分析报告
123 |
124 | **注意事项:**
125 | - 清理缓存后,下次查询会重新获取最新数据
126 | - K线数据有独立的缓存机制,不会被此处清理影响
127 | - 过期的缓存数据会自动更新,无需手动清理
128 | """)
129 |
130 |
131 | if __name__ == "__main__":
132 | main()
133 |
--------------------------------------------------------------------------------
/utils/format_utils.py:
--------------------------------------------------------------------------------
1 | """
2 | 数字格式化工具模块
3 | 提供统一的数字显示格式化功能
4 | """
5 |
6 | def format_large_number(number, decimal_places=2):
7 | """
8 | 格式化大数字,自动添加单位(万、亿等)
9 |
10 | Args:
11 | number: 要格式化的数字
12 | decimal_places: 保留的小数位数,默认为2位
13 |
14 | Returns:
15 | str: 格式化后的字符串
16 |
17 | Examples:
18 | >>> format_large_number(12345678)
19 | '1234.57万'
20 | >>> format_large_number(123456789012)
21 | '1234.57亿'
22 | >>> format_large_number(1234)
23 | '1234.00'
24 | """
25 | if number is None or number == 0:
26 | return "0.00"
27 |
28 | # 确保输入是数字类型
29 | try:
30 | number = float(number)
31 | except (ValueError, TypeError):
32 | return str(number)
33 |
34 | # 处理负数
35 | is_negative = number < 0
36 | number = abs(number)
37 |
38 | # 根据数字大小选择单位
39 | if number >= 100000000: # 大于等于1亿
40 | result = f"{number / 100000000:.{decimal_places}f}亿"
41 | elif number >= 10000: # 大于等于1万
42 | result = f"{number / 10000:.{decimal_places}f}万"
43 | else:
44 | result = f"{number:.{decimal_places}f}"
45 |
46 | # 添加负号
47 | if is_negative:
48 | result = "-" + result
49 |
50 | return result
51 |
52 |
53 | def format_volume(volume, decimal_places=2):
54 | """
55 | 格式化成交量数字
56 |
57 | Args:
58 | volume: 成交量数字
59 | decimal_places: 保留的小数位数,默认为2位
60 |
61 | Returns:
62 | str: 格式化后的字符串
63 | """
64 | return format_large_number(volume, decimal_places)
65 |
66 |
67 | def format_market_value(value, decimal_places=2):
68 | """
69 | 格式化市值数字
70 |
71 | Args:
72 | value: 市值数字
73 | decimal_places: 保留的小数位数,默认为2位
74 |
75 | Returns:
76 | str: 格式化后的字符串
77 | """
78 | return format_large_number(value, decimal_places)
79 |
80 |
81 | def format_price(price, decimal_places=2):
82 | """
83 | 格式化价格数字
84 |
85 | Args:
86 | price: 价格数字
87 | decimal_places: 保留的小数位数,默认为2位
88 |
89 | Returns:
90 | str: 格式化后的字符串
91 | """
92 | if price is None:
93 | return "0.00"
94 |
95 | try:
96 | price = float(price)
97 | return f"{price:.{decimal_places}f}"
98 | except (ValueError, TypeError):
99 | return str(price)
100 |
101 |
102 | def format_percentage(value, decimal_places=2):
103 | """
104 | 格式化百分比数字
105 |
106 | Args:
107 | value: 百分比数值(如 0.05 表示 5%)
108 | decimal_places: 保留的小数位数,默认为2位
109 |
110 | Returns:
111 | str: 格式化后的字符串,包含%符号
112 | """
113 | if value is None:
114 | return "0.00%"
115 |
116 | try:
117 | value = float(value)
118 | return f"{value:.{decimal_places}f}%"
119 | except (ValueError, TypeError):
120 | return str(value)
121 |
122 |
123 | def format_change(change, change_percent, decimal_places=2):
124 | """
125 | 格式化价格变化和变化百分比
126 |
127 | Args:
128 | change: 价格变化数值
129 | change_percent: 变化百分比
130 | decimal_places: 保留的小数位数,默认为2位
131 |
132 | Returns:
133 | str: 格式化后的字符串,如 "1.23 (2.45%)" 或 "-1.23 (-2.45%)"
134 | """
135 | if change is None or change_percent is None:
136 | return "0.00 (0.00%)"
137 |
138 | try:
139 | change = float(change)
140 | change_percent = float(change_percent)
141 |
142 | return f"{change:.{decimal_places}f} ({change_percent:.{decimal_places}f}%)"
143 | except (ValueError, TypeError):
144 | return f"{change} ({change_percent}%)"
145 |
146 | def format_number(number, decimal_places=2):
147 | """
148 | 四舍五入格式化数字到指定小数位数
149 | """
150 | if number is None:
151 | return f"0.{''.join(['0' for _ in range(decimal_places)])}"
152 |
153 | try:
154 | number = float(number)
155 | return f"{number:.{decimal_places}f}"
156 | except (ValueError, TypeError):
157 | return str(number)
158 |
159 |
160 | def judge_rsi_level(rsi: float) -> str:
161 | """
162 | 判断RSI水平
163 |
164 | Args:
165 | rsi: RSI值
166 |
167 | Returns:
168 | str: RSI水平描述
169 | """
170 | if rsi >= 80:
171 | return "超买"
172 | elif rsi >= 70:
173 | return "强势"
174 | elif rsi >= 30:
175 | return "正常"
176 | elif rsi >= 20:
177 | return "弱势"
178 | else:
179 | return "超卖"
180 |
181 | def get_section_separator(markdown: bool = False) -> list:
182 | """获取章节分隔符
183 |
184 | Args:
185 | markdown: 是否为markdown格式
186 |
187 | Returns:
188 | list: 分隔符行列表
189 | """
190 | if markdown:
191 | return ["\n---\n"]
192 | else:
193 | return ["\n" + "=" * 40 + "\n"]
194 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # xystock
2 |
3 | 一个极简的股票/基金分析工具,支持实时行情获取和大模型分析。
4 |
5 | ## 版本更新 - v1.2.0 (2025-09-30)
6 |
7 | **🎉 重大更新!** 新版本增加了以下主要功能:
8 |
9 | - **市场情绪分析** - 全新市场情绪数据获取与分析功能
10 | - **股息分红分析** - 深度分析股票分红历史与收益潜力
11 | - **ETF持仓分析** - 获取ETF详细持仓数据与投资策略分析
12 | - **风险指标计算** - 新增个股与指数多维度风险评估
13 | - **指数新闻集成** - 实时获取指数新闻并结合数据分析
14 | - **多指数支持** - 支持分析上证、深成、创业板、科创50等主要指数
15 | - **风险偏好设置** --在设置中新增风险偏好选项
16 | - **基本面增强** - 加入盈利、偿债、运营、成长等更多数据支持
17 | - **策略回测模块** - 完整回测框架和可视化工具
18 |
19 | 本次更新包含 **30+ 次提交**,新增 **5000+ 行代码**,欢迎大家体验新版本!
20 |
21 | ## 主要功能
22 |
23 | 本工具支持对A股市场全部股票、ETF基金、港股通股票,以及上证指数、深证成指、创业板指、科创50等主要大盘指数的行情预测与智能分析。无论是个股、基金还是指数,均可一站式获取实时数据、AI分析报告和操作建议。
24 |
25 | 工具提供了三个主要功能模块:大盘分析、个股分析和策略回测。
26 |
27 | 
28 |
29 | 在预测个股时,需要以大趋势为前提,尤其是在现阶段,大盘有明显的趋势。因此,我将大盘预测这一部分单独作了界面。另一个启发是,对于难以预测的长期趋势和短期事件,可以通过经济数据和新闻事件实时捕捉,可解决一部分问题。
30 |
31 | 在大盘分析部分,收集的数据包括:大盘指数(如上证、深成、创业板、科创板等)、技术指标(如移动平均线、MACD、RSI 等)、市场基本面(包括估值、资金流向、融资融券等)、市场新闻和情绪分析。支持导出多种格式的分析报告(Markdown、PDF 等)。
32 |
33 | 
34 |
35 | 在个股分析部分,收集的数据包括:股票基本信息、行情趋势(含风险分析)、新闻资讯、筹码分析、股息分红信息、ETF持仓数据等。同样支持多格式报告导出。
36 |
37 | 
38 |
39 | 新增的策略回测模块提供了完整的回测功能,包括:策略分析、交易记录展示、可视化工具,帮助用户验证投资策略的有效性。
40 |
41 | 在进行最终的 AI 分析时,还考虑了用户相关的因素:支持用户输入自己的观点和关注重点;输入持仓情况,设置用户画像,以及用户常犯的错误。这些用户相关的特征,在一些券商 APP 中,可能是通过数据总结出来的。在没有数据的情况下,让用户自己输入,只要是客观全面,其实效果也差不多,可能还更好。再退一步,可以把券商 APP 中的用户画像直接贴过来。
42 |
43 | 
44 |
45 |
46 | ## 安装和使用
47 |
48 | 拉取最新代码
49 |
50 | ```bash
51 | git clone https://github.com/xieyan0811/xystock.git
52 | cd xystock
53 | ```
54 |
55 | ### 方式一:使用预构建镜像(推荐)
56 |
57 | **适用场景:** 快速部署、生产环境使用
58 |
59 | 直接使用 Docker Hub 上的稳定版镜像,无需本地编译:
60 |
61 | ```bash
62 | docker pull xieyan800811/xystock:latest
63 | ```
64 |
65 | > 💡 **提示:** 此步骤可选,`docker-compose up` 会自动拉取镜像。后续版本更新时,重新执行 `docker pull` 即可获取最新版本。
66 |
67 | ### 方式二:本地构建镜像(开发版)
68 |
69 | **适用场景:** 二次开发、功能定制、体验最新特性
70 |
71 | 从源码本地构建 Docker 镜像:
72 |
73 | ```bash
74 | docker build . -t xieyan800811/xystock:latest
75 | ```
76 |
77 | ### 启动应用
78 |
79 | **生产模式(推荐):**
80 |
81 | ```bash
82 | docker compose up -d
83 | ```
84 |
85 | **开发模式:**
86 |
87 | ```bash
88 | docker compose -f docker-compose.dev.yml up -d
89 | docker exec -it xystock-web bash
90 | python -m streamlit run ui/app.py --server.address=0.0.0.0 --server.port=8811
91 | ```
92 |
93 | ### 使用指南
94 |
95 | 1. 打开浏览器访问 `http://localhost:8811`
96 | 2. 首次使用需先配置大模型相关参数
97 | 3. 建议按以下顺序体验功能:
98 | - **Token 统计** - 了解模型调用成本
99 | - **大盘分析** - 获取市场整体趋势
100 | - **个股分析** - 深入分析具体股票
101 |
102 | ## 支持的模型
103 |
104 | 
105 |
106 | 系统支持所有兼容 OpenAI API 的模型服务,包括但不限于 OpenAI、OpenRouter、阿里百炼、Ollama 等。以下是经过测试和推荐的模型列表:
107 |
108 | ### OpenAI 模型
109 |
110 | | 模型名称 | 模型ID | 推荐用途 |
111 | |---------|--------|---------|
112 | | GPT-4.1-nano | gpt-4.1-nano | 超轻量级模型,适合基础操作 |
113 | | GPT-4.1-mini | gpt-4.1-mini | 紧凑型模型,性能较好 |
114 | | GPT-4o | gpt-4o | 标准模型,功能全面 |
115 | | o4-mini | o4-mini | 专业推理模型(紧凑版) |
116 | | o3-mini | o3-mini | 高级推理模型(轻量级) |
117 | | o3 | o3 | 完整高级推理模型 |
118 | | o1 | o1 | 顶级推理和问题解决模型 |
119 |
120 | ### 阿里百炼 (DashScope) 模型
121 |
122 | | 模型名称 | 模型ID | 推荐用途 |
123 | |---------|--------|---------|
124 | | 通义千问 Turbo | qwen-turbo | 快速响应,适合日常对话 |
125 | | 通义千问 Plus | qwen-plus | 平衡性能和成本 |
126 | | 通义千问 Max | qwen-max | 最强性能 |
127 | | 通义千问 Max 长文本版 | qwen-max-longcontext | 支持超长上下文 |
128 |
129 | ### DeepSeek v3 模型
130 |
131 | | 模型名称 | 模型ID | 推荐用途 |
132 | |---------|--------|---------|
133 | | DeepSeek Chat | deepseek-chat | 通用对话模型,适合股票投资分析 |
134 | | DeepSeek Reasoner | deepseek-reasoner | 推理模型 |
135 |
136 | 目前我使用的是 deepseek 系列模型,价格便宜且更了解中国情况。目前 deepseek-reasoner 比 deepseek-chat 贵一倍,2025 年 09 月 06后有调整,具体价格与是否缓存有关,不太好统计。计费方法具体见:https://api-docs.deepseek.com/zh-cn/quick_start/pricing/
137 |
138 | ### 其他兼容服务
139 |
140 | 系统也支持 OpenRouter、Ollama 等提供兼容 OpenAI 接口的服务。只需在设置中配置相应的 Base URL 和 API Key 即可。
141 |
142 | ## 模型使用建议
143 |
144 | - **普通模型**:对于快速查询和简单任务,建议使用较轻量的模型,如 GPT-4.1-mini、o4-mini 或通义千问 Turbo。
145 | - **推理模型**:建议使用功能更强大的模型,如 GPT-4o、o3 或通义千问 Max,以获取更深入的股票分析。
146 |
147 | ## 目录结构
148 | - backtesting/ 回测模块及可视化工具
149 | - llm/ 大模型相关及prompt
150 | - market/ 市场数据获取和分析
151 | - stock/ 个股数据获取和分析
152 | - ui/ 用户界面
153 | - utils/ 工具函数和数据格式化
154 | - data/ 缓存和数据存储
155 |
156 | ## 使用场景
157 |
158 | 股票相关的数据和指标我可以通过 APP 或者在财经网站上看到,有些还有“持仓/买入/卖出”的建议。那为什么还需要自己做工具呢?
159 |
160 | 下面列出了一些需求,而我目前还没找到现成方法:
161 |
162 | * 信息并不是获取不到或不充足,而是看不过来,不知道如何解读:比如,如何看 KDJ 指标,布林开口大小背后的逻辑是什么?净利润 xx 在所属行业中算高还是低……
163 | * 数据指标是零散的,没有具体结论。即使有结论,没有推理过程,也不敢相信。需要一个像 DeepSeek 深度思考一样的工具,可以看到推理过程。即使出了问题,至少知道问题出在哪里,而不是玄学、黑盒。
164 | * 网上给出的推荐操作是卖出,但不知道什么时候卖,什么情况下卖。需要具体的建议:如果持有,什么时候卖;如果空仓,什么时候买;做长线、中线还是短线?具体点位是多少?
165 | * 有些信息 API 和大模型可能抓不到,但我想告诉大模型。比如:最近微信群里大家都在议论股票,我需要一个渠道把这些信息输入给模型和其它数据一同分析。
166 | * 需要针对用户定制的建议,考虑到用户的风险偏好、交易方式、容易犯的错误,需要在决策时提醒用户。因为不同的操作风格(左侧/右侧,长线/短线)有时需要完全不同的建议。
167 | * 考虑到人的情绪波动,不仅在股票下跌时会感到焦虑,当盘面剧烈波动时,不确定性会导致焦虑,从而引发非理性操作,也需要从情绪角度提示用户。
168 | * 对于一些意料外的情况,比如某股票突然上涨超过 5%,用户可能会想看一下新闻,分析原因,并需要对突发事件的实时分析和建议。
169 | * 优化那些 " 多数人 " 能够操作的可选项,比如:A 股、ETF 基金、港股通。有些项目可能参考了一些国外开源代码,包含很多美股内容,但我们买不了,即使是港股通也有门槛。与其花时间做得全面,不如重点开发 80%+ 用户都会使用的功能。
170 | * 试用的几个 AI 工具每次分析一支股票耗时约 15-20 分钟,有的不缓存,每次都重新拉取数据并调用 AI 进行从头思考,需要更节省时间和金钱的解决方案。
171 |
172 |
173 | *如果你觉得项目对你有帮助或能解决你的实际问题,请帮我点亮小星星~*
174 |
175 | ## 更新日志
176 |
177 | 查看详细的版本更新历史和功能变化,请参考 [CHANGELOG.md](./CHANGELOG.md)。
178 |
179 | ## 许可证
180 |
181 | 本项目采用 MIT 许可证 - 详情请参阅 [LICENSE](./LICENSE) 文件。
182 |
183 | MIT 许可证允许您自由地使用、修改、分发本软件,包括商业用途,只需保留原始的版权声明和许可证声明。
184 |
185 | ---
186 |
187 | ## 公众号实盘预测说明
188 |
189 | 为便于大家直观了解实际预测效果,2025 年剩余几个月内,我将在公众号【得之有道】每天发布上证指数次日涨跌的预测结果(受篇幅限制,仅展示上证指数,供参考)。欢迎关注公众号,持续跟踪和验证工具表现。
190 |
191 |
192 | 扫码关注公众号:
193 |
194 | 
195 |
--------------------------------------------------------------------------------
/ui/components/page_token_stats.py:
--------------------------------------------------------------------------------
1 | """
2 | API使用量统计页面 - 显示OpenAI API的使用情况和成本统计
3 | """
4 |
5 | import streamlit as st
6 | import pandas as pd
7 | import os
8 | import sys
9 | import altair as alt
10 |
11 | project_root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
12 | if project_root not in sys.path:
13 | sys.path.append(project_root)
14 |
15 | from llm.usage_logger import UsageLogger
16 |
17 | usage_logger = UsageLogger()
18 |
19 | def format_cost(cost):
20 | """格式化成本显示"""
21 | if cost >= 1:
22 | return f"${cost:.2f}"
23 | else:
24 | return f"${cost:.4f}"
25 |
26 | def show_usage_overview(days=30):
27 | """显示使用概览"""
28 | st.header("API使用概览")
29 |
30 | # 获取使用统计
31 | stats = usage_logger.get_usage_stats(days=days)
32 |
33 | if not stats:
34 | st.warning("暂无使用数据")
35 | return
36 |
37 | # 显示关键指标
38 | col1, col2, col3 = st.columns(3)
39 |
40 | with col1:
41 | st.metric("总请求数", f"{stats.get('total_requests', 0)}")
42 |
43 | with col2:
44 | st.metric("总Token数", f"{stats.get('total_tokens', 0):,}")
45 |
46 | with col3:
47 | avg_response_time = stats.get('avg_response_time', 0)
48 | st.metric("平均响应时间", f"{avg_response_time:.2f}秒")
49 |
50 | # 成功率指标
51 | success_rate = stats.get('success_rate', 0) * 100
52 | st.progress(success_rate / 100, text=f"成功率: {success_rate:.1f}%")
53 |
54 | def show_model_distribution(days=30):
55 | """显示模型使用分布"""
56 | stats = usage_logger.get_usage_stats(days=days)
57 |
58 | if not stats or 'model_distribution' not in stats or not stats['model_distribution']:
59 | st.warning("暂无模型分布数据")
60 | return
61 |
62 | st.subheader("模型使用分布")
63 |
64 | model_dist = stats['model_distribution']
65 | models = list(model_dist.keys())
66 | counts = list(model_dist.values())
67 |
68 | model_df = pd.DataFrame({
69 | 'model': models,
70 | 'count': counts
71 | })
72 |
73 | chart = alt.Chart(model_df).mark_bar().encode(
74 | x=alt.X('model', sort='-y', title='模型'),
75 | y=alt.Y('count', title='使用次数'),
76 | color=alt.Color('model', legend=None)
77 | ).properties(height=300)
78 |
79 | st.altair_chart(chart, use_container_width=True)
80 |
81 | with st.expander("模型使用详细数据", expanded=False):
82 | st.dataframe(model_df, width='stretch')
83 |
84 | def show_detailed_logs():
85 | """显示详细日志"""
86 | st.subheader("详细使用记录")
87 |
88 | try:
89 | df = pd.read_csv(usage_logger.log_file)
90 |
91 | if df.empty:
92 | st.warning("暂无使用记录")
93 | return
94 |
95 | df['timestamp'] = pd.to_datetime(df['timestamp'])
96 | df = df.sort_values('timestamp', ascending=False)
97 |
98 | df['input_preview'] = df['input_text'].str[:50] + '...'
99 | df['output_preview'] = df['output_text'].str[:50] + '...'
100 |
101 | # 显示表格
102 | display_cols = [
103 | 'timestamp', 'model', 'prompt_tokens',
104 | 'completion_tokens', 'total_tokens',
105 | #'cost_estimate',
106 | 'response_time', 'success'
107 | ]
108 |
109 | st.dataframe(
110 | df[display_cols],
111 | width='stretch',
112 | column_config={
113 | 'timestamp': st.column_config.DatetimeColumn('时间'),
114 | 'model': st.column_config.TextColumn('模型'),
115 | 'prompt_tokens': st.column_config.NumberColumn('输入Token'),
116 | 'completion_tokens': st.column_config.NumberColumn('输出Token'),
117 | 'total_tokens': st.column_config.NumberColumn('总Token'),
118 | 'cost_estimate': st.column_config.NumberColumn('成本($)', format="$%.4f"),
119 | 'response_time': st.column_config.NumberColumn('响应时间(秒)'),
120 | 'success': st.column_config.CheckboxColumn('成功')
121 | }
122 | )
123 |
124 | with st.expander("查看详细请求内容", expanded=False):
125 | record_idx = st.selectbox(
126 | "选择记录查看详情:",
127 | range(len(df)),
128 | format_func=lambda i: f"{df.iloc[i]['timestamp']} - {df.iloc[i]['model']} (Tokens: {df.iloc[i]['total_tokens']})"
129 | )
130 |
131 | record = df.iloc[record_idx]
132 |
133 | st.write("#### 请求详情")
134 | col1, col2 = st.columns(2)
135 |
136 | with col1:
137 | st.write("**时间:**", record['timestamp'])
138 | st.write("**模型:**", record['model'])
139 | st.write("**输入Token:**", record['prompt_tokens'])
140 | st.write("**输出Token:**", record['completion_tokens'])
141 | st.write("**总Token:**", record['total_tokens'])
142 |
143 | with col2:
144 | st.write("**成本:**", format_cost(record['cost_estimate']))
145 | st.write("**响应时间:**", f"{record['response_time']:.2f}秒")
146 | st.write("**成功:**", "✅" if record['success'] else "❌")
147 | if not record['success'] and record['error_message']:
148 | st.error(f"错误信息: {record['error_message']}")
149 | st.write("**温度参数:**", record['temperature'])
150 |
151 | st.text_area("输入文本", record['input_text'], height=150)
152 | st.text_area("输出文本", record['output_text'], height=150)
153 |
154 | except Exception as e:
155 | st.error(f"加载详细日志失败: {str(e)}")
156 |
157 | def main():
158 | """API使用统计页面主函数"""
159 | st.title("🔍 API使用统计")
160 |
161 | period_options = {
162 | "过去7天": 7,
163 | "过去30天": 30,
164 | "过去90天": 90,
165 | "所有时间": 3650 # 约10年
166 | }
167 |
168 | selected_period = st.selectbox(
169 | "选择时间范围:",
170 | list(period_options.keys()),
171 | index=1,
172 | help="选择要分析的时间范围"
173 | )
174 |
175 | days = period_options[selected_period]
176 |
177 | tab1, tab2 = st.tabs(["📊 使用概览", "📝 详细记录"])
178 |
179 | with tab1:
180 | show_usage_overview(days)
181 | show_model_distribution(days)
182 |
183 | with tab2:
184 | show_detailed_logs()
185 |
186 |
187 | if __name__ == "__main__":
188 | main()
189 |
--------------------------------------------------------------------------------
/utils/string_utils.py:
--------------------------------------------------------------------------------
1 | """
2 | 通用工具模块
3 | 包含各种通用的工具函数
4 |
5 | 主要功能:
6 | - remove_markdown_format: 去除markdown格式,将markdown文本转换为纯文本
7 | - clean_text: 清理文本中的多余空格和换行
8 | - normalize_whitespace: 标准化空白字符
9 |
10 | 使用示例:
11 | from common_utils import remove_markdown_format
12 |
13 | markdown_text = "这是**粗体**和*斜体*文本"
14 | plain_text = remove_markdown_format(markdown_text)
15 | print(plain_text) # 输出: 这是粗体和斜体文本
16 | """
17 |
18 | import re
19 | from typing import Dict, Any
20 |
21 | def remove_markdown_format(text: str, only_headers: bool = False) -> str:
22 | """
23 | 去除markdown文本的格式,输入字符串,输出纯文本字符串
24 |
25 | Args:
26 | text (str): 包含markdown格式的文本
27 | only_headers (bool): 如果为True,只去除标题符号;如果为False,去除所有格式
28 |
29 | Returns:
30 | str: 去除markdown格式后的纯文本
31 | """
32 | if not text or not isinstance(text, str):
33 | return ""
34 |
35 | # 移除标题 # ## ### 等
36 | text = re.sub(r'^#{1,6}\s*', '', text, flags=re.MULTILINE)
37 |
38 | # 如果只处理标题,直接返回
39 | if only_headers:
40 | # 将标题符号替换为粗体格式
41 | text = re.sub(r'^#{1,6}\s*(.+)$', r'**\1**', text, flags=re.MULTILINE)
42 | return text.strip()
43 | else:
44 | # 移除标题符号
45 | text = re.sub(r'^#{1,6}\s*', '', text, flags=re.MULTILINE)
46 |
47 | # 移除代码块 ```
48 | text = re.sub(r'```[\s\S]*?```', '', text)
49 |
50 | # 移除行内代码 `code`
51 | text = re.sub(r'`([^`]+)`', r'\1', text)
52 |
53 | # 移除粗体 **text** 或 __text__
54 | text = re.sub(r'\*\*([^*]+)\*\*', r'\1', text)
55 | text = re.sub(r'__([^_]+)__', r'\1', text)
56 |
57 | # 移除斜体 *text* 或 _text_
58 | text = re.sub(r'\*([^*]+)\*', r'\1', text)
59 | text = re.sub(r'_([^_]+)_', r'\1', text)
60 |
61 | # 移除删除线 ~~text~~
62 | text = re.sub(r'~~([^~]+)~~', r'\1', text)
63 |
64 | # 移除图片  - 需要在链接处理之前
65 | text = re.sub(r'!\[[^\]]*\]\([^)]*\)', '', text)
66 |
67 | # 移除链接 [text](url)
68 | text = re.sub(r'\[([^\]]+)\]\([^)]+\)', r'\1', text)
69 |
70 | # 移除引用 >
71 | text = re.sub(r'^>\s*', '', text, flags=re.MULTILINE)
72 |
73 | # 移除水平线 --- 或 ***
74 | text = re.sub(r'^(-{3,}|\*{3,})$', '', text, flags=re.MULTILINE)
75 |
76 | # 移除列表标记
77 | # 无序列表 - * +
78 | text = re.sub(r'^[\s]*[-*+]\s+', '', text, flags=re.MULTILINE)
79 | # 有序列表 1. 2. 等
80 | text = re.sub(r'^[\s]*\d+\.\s+', '', text, flags=re.MULTILINE)
81 |
82 | # 移除表格分隔符
83 | text = re.sub(r'\|', '', text)
84 | text = re.sub(r'^[-\s:]+$', '', text, flags=re.MULTILINE)
85 |
86 | # 清理多余的空行
87 | text = re.sub(r'\n\s*\n', '\n\n', text)
88 |
89 | # 去除首尾空白
90 | text = text.strip()
91 |
92 | return text
93 |
94 |
95 | def clean_text(text: str) -> str:
96 | """
97 | 清理文本,去除多余的空格和换行
98 |
99 | Args:
100 | text (str): 需要清理的文本
101 |
102 | Returns:
103 | str: 清理后的文本
104 | """
105 | if not text or not isinstance(text, str):
106 | return ""
107 |
108 | # 将多个空格替换为单个空格
109 | text = re.sub(r'\s+', ' ', text)
110 |
111 | # 去除首尾空白
112 | text = text.strip()
113 |
114 | return text
115 |
116 |
117 | def normalize_whitespace(text: str) -> str:
118 | """
119 | 标准化空白字符,将各种空白字符统一为普通空格
120 |
121 | Args:
122 | text (str): 需要标准化的文本
123 |
124 | Returns:
125 | str: 标准化后的文本
126 | """
127 | if not text or not isinstance(text, str):
128 | return ""
129 |
130 | # 将各种空白字符(制表符、换行符等)替换为空格
131 | text = re.sub(r'[\t\r\n\f\v]', ' ', text)
132 |
133 | # 将多个连续空格替换为单个空格
134 | text = re.sub(r' +', ' ', text)
135 |
136 | # 去除首尾空白
137 | text = text.strip()
138 |
139 | return text
140 |
141 |
142 | def format_indicators_dict(data_dict: Dict[str, Any], title: str, desc = None) -> str:
143 | """
144 | 将指标字典格式化为标准 Markdown 字符串
145 |
146 | Args:
147 | data_dict: 要格式化的指标字典
148 | title: 指标类型标题(如"技术指标"、"风险指标")
149 | desc: 可选的描述文本
150 |
151 | Returns:
152 | 格式化后的标准 Markdown 字符串
153 | """
154 | if not data_dict:
155 | return f"**{title}**:无数据\n\n"
156 |
157 | result_text = f"**{title}**:\n\n"
158 | if desc:
159 | result_text += f"{desc}\n\n"
160 |
161 | for key, value in data_dict.items():
162 | if isinstance(value, (int, float)):
163 | # 数值型数据保留2位小数
164 | formatted_value = round(float(value), 2)
165 | result_text += f"- **{key}**: {formatted_value}\n"
166 | elif isinstance(value, str):
167 | # 字符串直接显示
168 | result_text += f"- **{key}**: {value}\n"
169 | elif isinstance(value, dict):
170 | # 嵌套字典数据
171 | result_text += f"- **{key}**:\n"
172 | for sub_key, sub_value in value.items():
173 | if isinstance(sub_value, (int, float)):
174 | formatted_sub_value = round(float(sub_value), 2)
175 | result_text += f" - {sub_key}: {formatted_sub_value}\n"
176 | else:
177 | result_text += f" - {sub_key}: {sub_value}\n"
178 | elif isinstance(value, list):
179 | # 列表数据处理
180 | if len(value) > 0:
181 | if isinstance(value[0], (int, float)):
182 | # 数值列表
183 | formatted_values = [round(float(v), 2) for v in value[:3]]
184 | result_text += f"- **{key}**: {formatted_values}{'...' if len(value) > 3 else ''}\n"
185 | elif isinstance(value[0], dict):
186 | # 字典列表,如 summary_table
187 | result_text += f"- **{key}**:\n"
188 | for i, item in enumerate(value):
189 | if isinstance(item, dict):
190 | result_text += f" - {i+1}:\n"
191 | for sub_key, sub_value in item.items():
192 | if isinstance(sub_value, (int, float)):
193 | formatted_sub_value = round(float(sub_value), 4)
194 | result_text += f" - {sub_key}: {formatted_sub_value}\n"
195 | else:
196 | result_text += f" - {sub_key}: {sub_value}\n"
197 | else:
198 | # 其他类型的列表
199 | result_text += f"- **{key}**: {value}\n"
200 | else:
201 | result_text += f"- **{key}**: {value}\n"
202 | else:
203 | result_text += f"- **{key}**: {value}\n"
204 |
205 | # 在末尾添加空行,确保与后续内容分隔
206 | result_text += "\n"
207 |
208 | return result_text
209 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | # xystock - 股票分析和交易系统依赖库
2 | # 安装方法: pip install -r requirements.txt
3 | # 注意:只包含当前系统已安装的库,未安装的已注释
4 |
5 | # =============================================================================
6 | # 核心Python库 - 已安装
7 | # =============================================================================
8 | numpy>=2.2.0
9 | pandas>=2.2.0
10 | # scipy>=1.10.0 # 未安装
11 |
12 | # =============================================================================
13 | # 股票数据获取和分析库 - 已安装
14 | # =============================================================================
15 | # 股票数据源
16 | akshare>=1.16.0 # A股数据获取 ✓
17 | tushare>=1.4.0 # Tushare Pro数据 ✓
18 | yfinance>=0.2.0 # 雅虎财经数据 ✓
19 | baostock>=0.8.0 # 宝塔数据 ✓
20 | eodhd>=1.0.0 # EOD历史数据 ✓
21 |
22 | # 技术分析
23 | stockstats>=0.6.0 # 股票技术指标计算 ✓
24 | backtrader>=1.9.0 # 量化交易回测框架 ✓
25 | # empyrical==0.5.5 # 风险计算库(主要功能已自己实现)
26 | # ta # 技术分析指标库(未安装)
27 | # talib # TA-Lib技术分析库(未安装)
28 |
29 | # =============================================================================
30 | # 机器学习和AI库 - 大部分未安装
31 | # =============================================================================
32 | # 基础机器学习
33 | # scikit-learn>=1.3.0 # 未安装
34 | # xgboost>=2.0.0 # 未安装
35 | # lightgbm>=4.0.0 # 未安装
36 | # catboost>=1.2.0 # 未安装
37 |
38 | # 深度学习(未安装)
39 | # torch>=2.0.0
40 | # tensorflow>=2.13.0
41 | # transformers>=4.30.0
42 |
43 | # =============================================================================
44 | # LLM和自然语言处理 - 已安装
45 | # =============================================================================
46 | # OpenAI和LLM相关
47 | openai>=1.0.0 # ✓
48 | anthropic>=0.60.0 # ✓
49 | langchain>=0.3.0 # ✓
50 | langchain-openai>=0.3.0 # ✓
51 | langchain-anthropic>=0.3.0 # ✓
52 | langchain-google-genai>=2.0.0 # ✓
53 | langchain-experimental>=0.3.0 # ✓
54 | langchain-community>=0.3.0 # ✓
55 | dashscope>=1.20.0 # 阿里云通义千问 ✓
56 |
57 | # 自然语言处理
58 | nltk>=3.8.0 # ✓
59 | tiktoken>=0.9.0 # ✓
60 | tenacity>=9.0.0 # ✓
61 |
62 | # =============================================================================
63 | # 数据可视化 - 部分已安装
64 | # =============================================================================
65 | matplotlib>=3.10.0 # ✓
66 | # seaborn>=0.13.0 # 未安装
67 | plotly>=5.17.0 # ✓
68 | altair>=5.0.0 # ✓
69 |
70 | # =============================================================================
71 | # Web应用和API - 已安装
72 | # =============================================================================
73 | # Web框架
74 | streamlit>=1.48.0 # ✓
75 | fastapi>=0.110.0 # ✓
76 | starlette>=0.35.0 # ✓
77 | uvicorn>=0.25.0 # ✓
78 |
79 | # Chainlit
80 | chainlit>=2.6.0 # ✓
81 | # gradio>=4.0.0 # 未安装
82 |
83 | # =============================================================================
84 | # 数据库和缓存
85 | # =============================================================================
86 | # redis>=6.0.0 #
87 | # pymongo>=4.10.0 # MongoDB支持
88 | # chromadb>=1.0.0 # 向量数据库
89 |
90 | # =============================================================================
91 | # 网络请求和爬虫 - 已安装
92 | # =============================================================================
93 | requests>=2.32.0 # ✓
94 | beautifulsoup4>=4.13.0 # ✓
95 | lxml>=4.9.0 # ✓
96 | parsel>=1.10.0 # 网页解析 ✓
97 | html5lib>=1.1 # ✓
98 | aiohttp>=3.9.0 # ✓
99 |
100 | # 搜索引擎API
101 | googlesearch-python>=1.3.0 # ✓
102 | baidusearch>=1.0.0 # ✓
103 | duckduckgo-search>=7.5.0 # ✓
104 |
105 | # =============================================================================
106 | # 数据处理和存储 - 已安装
107 | # =============================================================================
108 | openpyxl>=3.1.0 # Excel文件处理 ✓
109 | jsonpath>=0.82.0 # JSON路径查询 ✓
110 | Markdown>=3.4.0 # Markdown处理 ✓
111 | pypandoc>=1.11.0 # 文档格式转换 ✓
112 | reportlab>=4.0.0 # PDF生成 ✓
113 |
114 | # =============================================================================
115 | # 系统工具和配置 - 已安装
116 | # =============================================================================
117 | python-dotenv>=1.0.0 # 环境变量管理 ✓
118 | loguru>=0.7.0 # 日志记录 ✓
119 | rich>=13.0.0 # 终端美化输出 ✓
120 | tqdm>=4.65.0 # 进度条 ✓
121 | psutil>=7.0.0 # 系统信息 ✓
122 | schedule>=1.2.0 # 任务调度 ✓
123 | questionary>=2.0.0 # 命令行交互 ✓
124 |
125 | # =============================================================================
126 | # 时间和时区处理 - 已安装
127 | # =============================================================================
128 | pytz>=2025.0 # ✓
129 | tzdata>=2025.0 # ✓
130 |
131 | # =============================================================================
132 | # 开发和调试工具 - 已安装
133 | # =============================================================================
134 | ipython>=8.0.0 # ✓
135 | # jupyter>=1.0.0 # 主包未安装,但有相关组件
136 | jupyter-client>=8.0.0 # ✓
137 | jupyterlab-widgets>=3.0.0 # ✓
138 |
139 | # 配置文件处理
140 | toml>=0.10.0 # TOML配置文件支持 ✓
141 |
142 | # 类型检查和验证
143 | pydantic>=2.10.0 # ✓
144 | typing-extensions>=4.0.0 # ✓
145 | annotated-types>=0.7.0 # ✓
146 |
147 | # =============================================================================
148 | # 其他工具 - 已安装
149 | # =============================================================================
150 | # setuptools # 包管理(系统自带)
151 | zstandard>=0.23.0 # 压缩算法 ✓
152 | multitasking>=0.0.12 # 多任务处理 ✓
153 | tabulate>=0.9.0 # 表格格式化 ✓
154 | feedparser>=6.0.0 # RSS解析(新闻数据)✓
155 |
156 | # =============================================================================
157 | # 额外已安装的有用库
158 | # =============================================================================
159 | praw>=7.8.0 # Reddit API(情感分析)✓
160 | finnhub-python>=2.4.0 # Finnhub API ✓
161 | pytdx>=1.72 # 通达信数据接口 ✓
162 | efinance>=0.5.5 # 东方财富数据 ✓
163 |
164 | # =============================================================================
165 | # 暂时注释的库(未安装,回头再说)
166 | # =============================================================================
167 | # scipy>=1.10.0 # 科学计算库
168 | # scikit-learn>=1.3.0 # 机器学习基础库
169 | # xgboost>=2.0.0 # 梯度提升框架
170 | # lightgbm>=4.0.0 # 轻量级梯度提升
171 | # catboost>=1.2.0 # Yandex梯度提升
172 | # seaborn>=0.13.0 # 统计绘图库
173 | # gradio>=4.0.0 # ML模型Web界面
174 | # torch>=2.0.0 # PyTorch深度学习
175 | # tensorflow>=2.13.0 # TensorFlow深度学习
176 | # transformers>=4.30.0 # Hugging Face transformers
177 | # ta # 技术分析指标
178 | # talib # TA-Lib技术分析
179 | # selenium>=4.0.0 # 网页自动化
180 | # dash>=2.0.0 # Dash web应用框架
181 | # jupyter>=1.0.0 # Jupyter主包
182 |
--------------------------------------------------------------------------------
/llm/usage_logger.py:
--------------------------------------------------------------------------------
1 | """
2 | OpenAI API 使用记录管理器
3 | """
4 | import pandas as pd
5 | from datetime import datetime
6 | from pathlib import Path
7 | from typing import Dict, Any
8 | import logging
9 |
10 | class UsageLogger:
11 | """OpenAI API 使用记录器"""
12 |
13 | def __init__(self, log_file: str = "data/logs/openai_usage.csv"):
14 | """
15 | 初始化使用记录器
16 |
17 | Args:
18 | log_file: 日志文件路径
19 | """
20 | self.log_file = Path(log_file)
21 | self.log_file.parent.mkdir(parents=True, exist_ok=True)
22 |
23 | # 初始化CSV文件(如果不存在)
24 | if not self.log_file.exists():
25 | self._init_csv()
26 |
27 | def _init_csv(self):
28 | """初始化CSV文件,创建列标题"""
29 | df = pd.DataFrame(columns=[
30 | 'timestamp',
31 | 'model',
32 | 'prompt_tokens',
33 | 'completion_tokens',
34 | 'total_tokens',
35 | 'cost_estimate',
36 | 'temperature',
37 | 'input_text',
38 | 'output_text',
39 | 'response_time',
40 | 'success',
41 | 'error_message'
42 | ])
43 | df.to_csv(self.log_file, index=False)
44 |
45 | def log_usage(self,
46 | model: str,
47 | usage_data: Dict[str, Any],
48 | input_text: str,
49 | output_text: str,
50 | response_time: float,
51 | temperature: float = 0.7,
52 | success: bool = True,
53 | error_message: str = ""):
54 | """
55 | 记录API使用情况
56 |
57 | Args:
58 | model: 使用的模型名称
59 | usage_data: 使用数据(token信息)
60 | input_text: 输入文本
61 | output_text: 输出文本
62 | response_time: 响应时间
63 | temperature: 温度参数
64 | success: 是否成功
65 | error_message: 错误信息
66 | """
67 | # 估算成本(基于OpenAI定价,仅供参考)
68 | cost_estimate = self._estimate_cost(model, usage_data)
69 |
70 | # 创建记录
71 | record = {
72 | 'timestamp': datetime.now().isoformat(),
73 | 'model': model,
74 | 'prompt_tokens': usage_data.get('prompt_tokens', 0),
75 | 'completion_tokens': usage_data.get('completion_tokens', 0),
76 | 'total_tokens': usage_data.get('total_tokens', 0),
77 | 'cost_estimate': cost_estimate,
78 | 'temperature': temperature,
79 | 'input_text': self._truncate_text(input_text, 500), # 截断长文本
80 | 'output_text': self._truncate_text(output_text, 500),
81 | 'response_time': response_time,
82 | 'success': success,
83 | 'error_message': error_message
84 | }
85 |
86 | # 追加到CSV文件
87 | df = pd.DataFrame([record])
88 | df.to_csv(self.log_file, mode='a', header=False, index=False)
89 |
90 | logging.info(f"记录API使用: {model}, tokens: {usage_data.get('total_tokens', 0)}, 成本: ${cost_estimate:.4f}")
91 |
92 | def _estimate_cost(self, model: str, usage_data: Dict[str, Any]) -> float:
93 | """
94 | 估算API调用成本
95 |
96 | Args:
97 | model: 模型名称
98 | usage_data: 使用数据
99 |
100 | Returns:
101 | 估算成本(美元)
102 | """
103 | # OpenAI定价(2024年8月参考价格,实际价格可能变化)
104 | pricing = {
105 | 'gpt-4o': {'input': 0.005 / 1000, 'output': 0.015 / 1000},
106 | 'gpt-4o-mini': {'input': 0.00015 / 1000, 'output': 0.0006 / 1000},
107 | 'gpt-4': {'input': 0.03 / 1000, 'output': 0.06 / 1000},
108 | 'gpt-3.5-turbo': {'input': 0.0015 / 1000, 'output': 0.002 / 1000},
109 | }
110 |
111 | if model not in pricing:
112 | return 0.0
113 |
114 | input_cost = usage_data.get('prompt_tokens', 0) * pricing[model]['input']
115 | output_cost = usage_data.get('completion_tokens', 0) * pricing[model]['output']
116 |
117 | return input_cost + output_cost
118 |
119 | def _truncate_text(self, text: str, max_length: int) -> str:
120 | """截断文本以节省存储空间"""
121 | if len(text) <= max_length:
122 | return text
123 | return text[:max_length] + "..."
124 |
125 | def get_usage_stats(self, days: int = 30) -> Dict[str, Any]:
126 | """
127 | 获取使用统计信息
128 |
129 | Args:
130 | days: 统计天数
131 |
132 | Returns:
133 | 统计信息字典
134 | """
135 | try:
136 | df = pd.read_csv(self.log_file)
137 | if df.empty:
138 | return {}
139 |
140 | # 过滤最近N天的数据
141 | df['timestamp'] = pd.to_datetime(df['timestamp'])
142 | cutoff_date = datetime.now() - pd.Timedelta(days=days)
143 | recent_df = df[df['timestamp'] >= cutoff_date]
144 |
145 | if recent_df.empty:
146 | return {}
147 |
148 | stats = {
149 | 'total_requests': len(recent_df),
150 | 'total_tokens': recent_df['total_tokens'].sum(),
151 | 'total_cost': recent_df['cost_estimate'].sum(),
152 | 'avg_response_time': recent_df['response_time'].mean(),
153 | 'success_rate': recent_df['success'].mean(),
154 | 'model_distribution': recent_df['model'].value_counts().to_dict(),
155 | 'daily_usage': recent_df.groupby(recent_df['timestamp'].dt.date)['total_tokens'].sum().to_dict()
156 | }
157 |
158 | return stats
159 |
160 | except Exception as e:
161 | logging.error(f"获取使用统计失败: {e}")
162 | return {}
163 |
164 | def export_usage_report(self, output_file: str = "reports/usage_report.html"):
165 | """
166 | 导出使用报告
167 |
168 | Args:
169 | output_file: 输出文件路径
170 | """
171 | try:
172 | df = pd.read_csv(self.log_file)
173 | if df.empty:
174 | return
175 |
176 | # 创建输出目录
177 | Path(output_file).parent.mkdir(parents=True, exist_ok=True)
178 |
179 | # 生成HTML报告
180 | html_content = f"""
181 |
182 |
OpenAI API 使用报告
183 |
184 | OpenAI API 使用报告
185 | 总体统计
186 | 总请求数: {len(df)}
187 | 总Token数: {df['total_tokens'].sum()}
188 | 总成本: ${df['cost_estimate'].sum():.4f}
189 | 平均响应时间: {df['response_time'].mean():.2f}秒
190 |
191 | 详细数据
192 | {df.to_html(index=False)}
193 |
194 |
195 | """
196 |
197 | with open(output_file, 'w', encoding='utf-8') as f:
198 | f.write(html_content)
199 |
200 | logging.info(f"使用报告已导出到: {output_file}")
201 |
202 | except Exception as e:
203 | logging.error(f"导出使用报告失败: {e}")
204 |
--------------------------------------------------------------------------------
/backtesting/backtest.py:
--------------------------------------------------------------------------------
1 | """
2 | 简单回测框架
3 | 支持基本的买卖策略回测、绩效计算和可视化
4 | """
5 |
6 | import pandas as pd
7 | import numpy as np
8 | from typing import Dict, Callable
9 |
10 |
11 | class SimpleBacktest:
12 | """简单回测引擎"""
13 |
14 | def __init__(self, initial_cash: float = 100000):
15 | """
16 | 初始化回测引擎
17 |
18 | Args:
19 | initial_cash: 初始资金
20 | """
21 | self.initial_cash = initial_cash
22 | self.cash = initial_cash
23 | self.position = 0 # 持仓数量
24 | self.total_value = initial_cash # 总资产
25 |
26 | # 记录历史
27 | self.history = []
28 | self.trades = []
29 |
30 | # 统计指标
31 | self.max_drawdown = 0
32 | self.max_value = initial_cash
33 |
34 | def reset(self):
35 | """重置回测状态"""
36 | self.cash = self.initial_cash
37 | self.position = 0
38 | self.total_value = self.initial_cash
39 | self.history = []
40 | self.trades = []
41 | self.max_drawdown = 0
42 | self.max_value = self.initial_cash
43 |
44 | def buy(self, price: float, volume: int, date: str):
45 | """买入操作"""
46 | cost = price * volume
47 | if cost <= self.cash:
48 | self.cash -= cost
49 | self.position += volume
50 | self.trades.append({
51 | 'date': date,
52 | 'action': 'buy',
53 | 'price': price,
54 | 'volume': volume,
55 | 'cost': cost
56 | })
57 | return True
58 | return False
59 |
60 | def sell(self, price: float, volume: int, date: str):
61 | """卖出操作"""
62 | if volume <= self.position:
63 | self.cash += price * volume
64 | self.position -= volume
65 | self.trades.append({
66 | 'date': date,
67 | 'action': 'sell',
68 | 'price': price,
69 | 'volume': volume,
70 | 'revenue': price * volume
71 | })
72 | return True
73 | return False
74 |
75 | def update_value(self, current_price: float, date: str):
76 | """更新总资产"""
77 | self.total_value = self.cash + self.position * current_price
78 |
79 | # 更新最大回撤
80 | if self.total_value > self.max_value:
81 | self.max_value = self.total_value
82 |
83 | drawdown = (self.max_value - self.total_value) / self.max_value
84 | if drawdown > self.max_drawdown:
85 | self.max_drawdown = drawdown
86 |
87 | # 记录历史
88 | self.history.append({
89 | 'date': date,
90 | 'cash': self.cash,
91 | 'position': self.position,
92 | 'current_price': current_price,
93 | 'total_value': self.total_value,
94 | 'drawdown': drawdown
95 | })
96 |
97 | def run_backtest(self, data: pd.DataFrame, strategy: Callable):
98 | """
99 | 运行回测
100 |
101 | Args:
102 | data: 包含日期、价格等信息的DataFrame
103 | strategy: 策略函数,接收(index, row, backtest, data)参数,返回交易信号
104 | 信号可以是字符串('buy', 'sell', 'hold')或字典({'action': 'buy', 'ratio': 0.3})
105 | """
106 | print("开始回测...")
107 |
108 | for i, row in data.iterrows():
109 | # 执行策略,传递当前回测数据
110 | signal = strategy(i, row, self, data)
111 |
112 | # 处理交易信号
113 | if isinstance(signal, dict):
114 | # 字典格式信号,包含仓位比例
115 | action = signal.get('action', 'hold')
116 | ratio = signal.get('ratio', 1.0) # 默认全仓
117 | else:
118 | # 字符串格式信号,默认全仓
119 | action = signal
120 | ratio = 1.0
121 |
122 | if action == 'buy' and self.cash > 0:
123 | # 按比例买入
124 | max_shares = int(self.cash * ratio / row['close'])
125 | if max_shares > 0:
126 | self.buy(row['close'], max_shares, str(row['date']))
127 |
128 | elif action == 'sell' and self.position > 0:
129 | # 按比例卖出
130 | sell_shares = int(self.position * ratio)
131 | if sell_shares > 0:
132 | self.sell(row['close'], sell_shares, str(row['date']))
133 |
134 | # 更新资产价值
135 | self.update_value(row['close'], str(row['date']))
136 |
137 | print("回测完成!")
138 | return self.get_results()
139 |
140 | def get_results(self) -> Dict:
141 | """获取回测结果"""
142 | if not self.history:
143 | return {}
144 |
145 | history_df = pd.DataFrame(self.history)
146 | trades_df = pd.DataFrame(self.trades) if self.trades else pd.DataFrame()
147 |
148 | # 计算收益率
149 | total_return = (self.total_value - self.initial_cash) / self.initial_cash
150 |
151 | # 计算年化收益率(假设250个交易日)
152 | days = len(history_df)
153 | annual_return = (1 + total_return) ** (250 / days) - 1 if days > 0 else 0
154 |
155 | # 计算夏普比率
156 | returns = history_df['total_value'].pct_change().dropna()
157 | sharpe_ratio = returns.mean() / returns.std() * np.sqrt(250) if len(returns) > 1 else 0
158 |
159 | # 胜率计算
160 | if len(trades_df) > 0:
161 | buy_trades = trades_df[trades_df['action'] == 'buy']
162 | sell_trades = trades_df[trades_df['action'] == 'sell']
163 |
164 | win_count = 0
165 | total_trades = min(len(buy_trades), len(sell_trades))
166 |
167 | for i in range(total_trades):
168 | if sell_trades.iloc[i]['price'] > buy_trades.iloc[i]['price']:
169 | win_count += 1
170 |
171 | win_rate = win_count / total_trades if total_trades > 0 else 0
172 | else:
173 | win_rate = 0
174 | total_trades = 0
175 |
176 | results = {
177 | 'initial_cash': self.initial_cash,
178 | 'final_value': self.total_value,
179 | 'total_return': total_return,
180 | 'annual_return': annual_return,
181 | 'max_drawdown': self.max_drawdown,
182 | 'sharpe_ratio': sharpe_ratio,
183 | 'total_trades': total_trades,
184 | 'win_rate': win_rate,
185 | 'history': history_df,
186 | 'trades': trades_df
187 | }
188 |
189 | return results
190 |
191 | def print_summary(self, results: Dict):
192 | """打印回测摘要"""
193 | print("\n" + "="*50)
194 | print("回测结果摘要")
195 | print("="*50)
196 | print(f"初始资金: {results['initial_cash']:,.2f}")
197 | print(f"最终资产: {results['final_value']:,.2f}")
198 | print(f"总收益率: {results['total_return']:.2%}")
199 | print(f"年化收益率: {results['annual_return']:.2%}")
200 | print(f"最大回撤: {results['max_drawdown']:.2%}")
201 | print(f"夏普比率: {results['sharpe_ratio']:.3f}")
202 | print(f"交易次数: {results['total_trades']}")
203 | print(f"胜率: {results['win_rate']:.2%}")
204 | print("="*50)
205 |
206 |
207 |
--------------------------------------------------------------------------------
/ui/components/page_settings.py:
--------------------------------------------------------------------------------
1 | """
2 | XY Stock 股票分析系统 - 设置界面
3 | """
4 |
5 | import streamlit as st
6 | import os
7 | import sys
8 |
9 | project_root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
10 | sys.path.append(project_root)
11 |
12 | from config_manager import config
13 | from ui.config import FULL_VERSION
14 | from stock.analysis_prompts import RISK_PREFERENCE_DESCRIPTIONS, RISK_PREFERENCE_PROMPTS
15 |
16 | def save_config(section, key, value):
17 | """保存单个配置项到配置文件"""
18 | config.set(f'{section}.{key}', value)
19 | config.save()
20 | return True
21 |
22 | def main():
23 | """显示设置界面"""
24 | st.header("⚙️ 系统设置")
25 |
26 | with st.container():
27 | st.subheader("OpenAI API 设置")
28 |
29 | # API 基本设置
30 | api_key = st.text_input(
31 | "API Key",
32 | value=config.get('LLM_OPENAI.API_KEY', ''),
33 | type="password",
34 | help="输入您的OpenAI API密钥"
35 | )
36 |
37 | base_url = st.text_input(
38 | "API Base URL",
39 | value=config.get('LLM_OPENAI.BASE_URL', 'https://api.openai.com/v1'),
40 | help="输入API基础URL,使用替代服务时需要修改"
41 | )
42 |
43 | # 模型设置
44 | col1, col2 = st.columns(2)
45 | with col1:
46 | analysis_model = st.text_input(
47 | "分析模型",
48 | value=config.get('LLM_OPENAI.DEFAULT_MODEL', 'gpt-4o'),
49 | help="用于详细分析的高级模型"
50 | )
51 |
52 | with col2:
53 | inference_model = st.text_input(
54 | "推理模型",
55 | value=config.get('LLM_OPENAI.INFERENCE_MODEL', 'gpt-4o-mini'),
56 | help="用于快速推理的轻量模型"
57 | )
58 |
59 | # 高级设置
60 | with st.expander("高级设置", expanded=False):
61 | timeout = st.number_input(
62 | "超时时间(秒)",
63 | min_value=10,
64 | max_value=300,
65 | value=int(config.get('LLM_OPENAI.TIMEOUT', 60)),
66 | help="API请求超时时间"
67 | )
68 |
69 | max_retries = st.number_input(
70 | "最大重试次数",
71 | min_value=0,
72 | max_value=10,
73 | value=int(config.get('LLM_OPENAI.MAX_RETRIES', 3)),
74 | help="API请求失败时最大重试次数"
75 | )
76 |
77 | temperature = st.slider(
78 | "温度参数",
79 | min_value=0.0,
80 | max_value=2.0,
81 | value=float(config.get('LLM_OPENAI.DEFAULT_TEMPERATURE', 0.7)),
82 | step=0.1,
83 | help="控制生成文本的随机性,值越高越有创意,值越低越确定"
84 | )
85 |
86 | # 缓存设置
87 | with st.expander("缓存设置", expanded=False):
88 | enable_cache = st.toggle(
89 | "启用缓存",
90 | value=config.get('LLM_CACHE.ENABLE_CACHE', False),
91 | help="是否启用API响应缓存"
92 | )
93 |
94 | cache_ttl = st.number_input(
95 | "缓存有效期(秒)",
96 | min_value=60,
97 | max_value=86400,
98 | value=int(config.get('LLM_CACHE.CACHE_TTL', 3600)),
99 | help="缓存数据的有效期"
100 | )
101 |
102 | # 保存按钮
103 | if st.button("💾 保存设置", type="primary"):
104 | try:
105 | save_config('LLM_OPENAI', 'API_KEY', api_key)
106 | save_config('LLM_OPENAI', 'BASE_URL', base_url)
107 | save_config('LLM_OPENAI', 'DEFAULT_MODEL', analysis_model)
108 | save_config('LLM_OPENAI', 'INFERENCE_MODEL', inference_model)
109 |
110 | save_config('LLM_OPENAI', 'TIMEOUT', timeout)
111 | save_config('LLM_OPENAI', 'MAX_RETRIES', max_retries)
112 | save_config('LLM_OPENAI', 'DEFAULT_TEMPERATURE', temperature)
113 |
114 | save_config('LLM_CACHE', 'ENABLE_CACHE', enable_cache)
115 | save_config('LLM_CACHE', 'CACHE_TTL', cache_ttl)
116 |
117 | st.success("设置已保存!")
118 | except Exception as e:
119 | st.error(f"保存失败: {str(e)}")
120 |
121 |
122 | st.subheader("测试连接")
123 | if st.button("🔄 测试API连接"):
124 | with st.spinner("正在测试连接..."):
125 | try:
126 | from llm.openai_client import OpenAIClient
127 |
128 | client = OpenAIClient(api_key=api_key)
129 | response = client.ask("这是一个API连接测试,请回复'连接成功'", model_type="inference")
130 |
131 | if "连接成功" in response:
132 | st.success(f"API连接测试成功!响应:{response}")
133 | else:
134 | st.warning(f"API连接成功但响应不符合预期:{response}")
135 | except Exception as e:
136 | st.error(f"API连接测试失败:{str(e)}")
137 |
138 |
139 | with st.container():
140 | st.subheader("分析偏好设置")
141 |
142 | # 风险偏好选项
143 | risk_preference = st.selectbox(
144 | "分析风险偏好",
145 | options=list(RISK_PREFERENCE_DESCRIPTIONS.keys()),
146 | format_func=lambda x: RISK_PREFERENCE_DESCRIPTIONS[x],
147 | index=list(RISK_PREFERENCE_DESCRIPTIONS.keys()).index(config.get('ANALYSIS.RISK_PREFERENCE', 'neutral')),
148 | help="选择分析风格,影响AI给出建议的保守程度"
149 | )
150 |
151 | # 显示选中风格的具体提示词内容(不包括自定义)
152 | if risk_preference in RISK_PREFERENCE_PROMPTS:
153 | #with st.expander(f"查看「{RISK_PREFERENCE_DESCRIPTIONS[risk_preference]}」的具体提示词", expanded=False):
154 | st.code(RISK_PREFERENCE_PROMPTS[risk_preference], language="markdown")
155 |
156 | # 自定义核心原则(仅在选择自定义时显示)
157 | custom_principles = ""
158 | if risk_preference == 'custom':
159 | custom_principles = st.text_area(
160 | "自定义核心原则",
161 | value=config.get('ANALYSIS.CUSTOM_PRINCIPLES', ''),
162 | placeholder="请输入您的分析核心原则,例如:\n核心原则:\n- 风险第一:...\n- 机会把握:...\n- 操作建议:...",
163 | height=150,
164 | help="请按照Markdown格式输入您的自定义核心原则"
165 | )
166 |
167 | if st.button("💾 保存分析偏好", key="save_analysis_preference", type="primary"):
168 | try:
169 | save_config('ANALYSIS', 'RISK_PREFERENCE', risk_preference)
170 | if risk_preference == 'custom':
171 | save_config('ANALYSIS', 'CUSTOM_PRINCIPLES', custom_principles)
172 | st.success("分析偏好已保存!")
173 | except Exception as e:
174 | st.error(f"保存失败: {str(e)}")
175 |
176 | with st.container():
177 | st.subheader("用户画像")
178 | user_profile = st.text_area(
179 | "请描述您的用户画像",
180 | value=config.get('USER_PROFILE.RAW', ''),
181 | placeholder="例如:\n擅长领域:科技、医疗,长期关注新能源板块\n交易习惯:偏好左/右侧交易,风险偏好,平均持仓时间等",
182 | help="请简要描述您的擅长领域、交易习惯等,有助于系统更好地理解您的需求"
183 | )
184 |
185 | common_mistakes_options = [
186 | "踏空", "套牢", "卖飞", "追高杀跌", "频繁操作", "重仓单一标的", "止损不坚决", "盲目跟风", "情绪化交易", "行情不好时回避关注"
187 | ]
188 | user_mistakes = st.multiselect(
189 | "常犯的错误(可多选)",
190 | options=common_mistakes_options,
191 | default=config.get('USER_PROFILE.MISTAKES', []),
192 | help="请选择您在投资过程中常见的错误,有助于系统个性化分析建议"
193 | )
194 |
195 | if st.button("💾 保存用户画像", key="save_user_profile", type="primary"):
196 | try:
197 | save_config('USER_PROFILE', 'RAW', user_profile)
198 | save_config('USER_PROFILE', 'MISTAKES', user_mistakes)
199 | st.success("用户画像已保存!")
200 | except Exception as e:
201 | st.error(f"保存失败: {str(e)}")
202 |
203 |
204 | st.markdown("---")
205 | st.markdown(
206 | f"""
207 |
208 | {FULL_VERSION} | 配置管理 | 重启应用后设置生效
209 |
210 | """,
211 | unsafe_allow_html=True
212 | )
213 |
214 | if __name__ == "__main__":
215 | main()
216 |
--------------------------------------------------------------------------------
/stock/chip_data_cache.py:
--------------------------------------------------------------------------------
1 | """
2 | 筹码数据专用缓存管理器 - 独立存储筹码原始数据
3 | """
4 |
5 | import json
6 | import os
7 | from datetime import datetime, timedelta
8 | from typing import Dict, Optional
9 |
10 |
11 | class ChipDataCache:
12 | """筹码数据专用缓存管理器"""
13 |
14 | def __init__(self, cache_dir: str = "data/cache"):
15 | self.cache_dir = cache_dir
16 | project_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
17 | self.cache_file = os.path.join(project_dir, cache_dir, "chip_raw_data.json")
18 | os.makedirs(os.path.dirname(self.cache_file), exist_ok=True)
19 |
20 | # 筹码数据缓存配置:24小时过期
21 | self.expire_hours = 24
22 |
23 | def _make_json_safe(self, obj):
24 | """对象转为JSON安全格式"""
25 | import numpy as np
26 | import pandas as pd
27 |
28 | if isinstance(obj, dict):
29 | return {key: self._make_json_safe(value) for key, value in obj.items()}
30 | elif isinstance(obj, list):
31 | return [self._make_json_safe(item) for item in obj]
32 | elif isinstance(obj, pd.Series):
33 | return obj.tolist()
34 | elif isinstance(obj, pd.DataFrame):
35 | return obj.to_dict('records')
36 | elif isinstance(obj, (np.integer, np.int64, np.int32)):
37 | return int(obj)
38 | elif isinstance(obj, (np.floating, np.float64, np.float32)):
39 | return float(obj)
40 | elif isinstance(obj, np.ndarray):
41 | return obj.tolist()
42 | elif pd.isna(obj):
43 | return None
44 | elif hasattr(obj, 'isoformat'):
45 | return obj.isoformat()
46 | else:
47 | return obj
48 |
49 | def load_cache(self) -> Dict:
50 | """加载筹码缓存文件"""
51 | try:
52 | if os.path.exists(self.cache_file):
53 | with open(self.cache_file, 'r', encoding='utf-8') as f:
54 | return json.load(f)
55 | return {}
56 | except Exception as e:
57 | print(f"❌ 读取筹码缓存文件失败: {e}")
58 | return {}
59 |
60 | def save_cache(self, cache_data: Dict):
61 | """保存筹码缓存文件"""
62 | try:
63 | safe_cache_data = self._make_json_safe(cache_data)
64 | with open(self.cache_file, 'w', encoding='utf-8') as f:
65 | json.dump(safe_cache_data, f, ensure_ascii=False, indent=2)
66 | except Exception as e:
67 | print(f"❌ 保存筹码缓存文件失败: {e}")
68 |
69 | def is_cache_valid(self, stock_code: str) -> bool:
70 | """检查筹码缓存是否有效"""
71 | try:
72 | cache_data = self.load_cache()
73 | if stock_code not in cache_data:
74 | return False
75 |
76 | cache_time_str = cache_data[stock_code].get('cache_time')
77 | if not cache_time_str:
78 | return False
79 |
80 | cache_time = datetime.fromisoformat(cache_time_str)
81 | expire_time = cache_time + timedelta(hours=self.expire_hours)
82 |
83 | return datetime.now() < expire_time
84 | except Exception:
85 | return False
86 |
87 | def get_cached_raw_data(self, stock_code: str) -> Optional[list]:
88 | """获取缓存的筹码原始数据"""
89 | try:
90 | if not self.is_cache_valid(stock_code):
91 | return None
92 |
93 | cache_data = self.load_cache()
94 | return cache_data.get(stock_code, {}).get('raw_data')
95 | except Exception:
96 | return None
97 |
98 | def save_raw_data(self, stock_code: str, raw_data: list):
99 | """保存筹码原始数据到缓存"""
100 | try:
101 | cache_data = self.load_cache()
102 |
103 | cache_data[stock_code] = {
104 | 'stock_code': stock_code,
105 | 'raw_data': raw_data,
106 | 'cache_time': datetime.now().isoformat(),
107 | 'data_count': len(raw_data) if raw_data else 0,
108 | 'expire_hours': self.expire_hours
109 | }
110 |
111 | self.save_cache(cache_data)
112 | print(f"💾 {stock_code} 筹码原始数据已缓存 ({len(raw_data) if raw_data else 0}条记录)")
113 | except Exception as e:
114 | print(f"❌ 缓存筹码原始数据失败: {e}")
115 |
116 | def clear_cache(self, stock_code: Optional[str] = None):
117 | """清理筹码缓存"""
118 | try:
119 | if stock_code:
120 | # 清理特定股票的筹码缓存
121 | cache_data = self.load_cache()
122 | if stock_code in cache_data:
123 | del cache_data[stock_code]
124 | self.save_cache(cache_data)
125 | print(f"✅ 已清理 {stock_code} 筹码缓存")
126 | else:
127 | print(f"ℹ️ {stock_code} 筹码缓存不存在")
128 | else:
129 | # 清理所有筹码缓存
130 | if os.path.exists(self.cache_file):
131 | os.remove(self.cache_file)
132 | print("✅ 已清理所有筹码缓存")
133 | else:
134 | print("ℹ️ 筹码缓存文件不存在")
135 | except Exception as e:
136 | print(f"❌ 清理筹码缓存失败: {e}")
137 |
138 | def get_cache_status(self, stock_code: Optional[str] = None) -> Dict:
139 | """获取筹码缓存状态"""
140 | status = {}
141 | current_time = datetime.now()
142 | cache_data = self.load_cache()
143 |
144 | for cached_stock_code, cache_info in cache_data.items():
145 | if stock_code and cached_stock_code != stock_code:
146 | continue
147 |
148 | try:
149 | cache_time = datetime.fromisoformat(cache_info['cache_time'])
150 | expire_time = cache_time + timedelta(hours=self.expire_hours)
151 | is_valid = current_time < expire_time
152 | remaining_hours = (expire_time - current_time).total_seconds() / 3600
153 |
154 | if remaining_hours > 0:
155 | remaining_text = f"剩余 {int(remaining_hours)} 小时"
156 | else:
157 | remaining_text = "已过期"
158 |
159 | status[cached_stock_code] = {
160 | 'stock_code': cached_stock_code,
161 | 'data_count': cache_info.get('data_count', 0),
162 | 'valid': is_valid,
163 | 'cache_time': cache_time.strftime('%Y-%m-%d %H:%M:%S'),
164 | 'expire_hours': self.expire_hours,
165 | 'remaining': remaining_text
166 | }
167 | except Exception:
168 | continue
169 |
170 | return status
171 |
172 | def print_cache_status(self, stock_code: Optional[str] = None):
173 | """打印筹码缓存状态"""
174 | status = self.get_cache_status(stock_code)
175 |
176 | print("=" * 70)
177 | if stock_code:
178 | print(f"📊 股票 {stock_code} 筹码数据缓存状态")
179 | else:
180 | print("📊 筹码数据缓存状态")
181 | print(f"📁 缓存文件: {self.cache_file}")
182 | print("=" * 70)
183 |
184 | if not status:
185 | if stock_code:
186 | print(f"ℹ️ 股票 {stock_code} 无筹码缓存数据")
187 | else:
188 | print("ℹ️ 无筹码缓存数据")
189 | else:
190 | for stock_code, info in status.items():
191 | status_icon = "✅" if info['valid'] else "❌"
192 | print(f"{status_icon} {info['stock_code']:<8} | 数据:{info['data_count']:<4}条 | {info['remaining']:<15} | 过期: {info['expire_hours']}小时")
193 |
194 | try:
195 | if os.path.exists(self.cache_file):
196 | file_size = os.path.getsize(self.cache_file) / 1024 # KB
197 | print(f"💾 缓存文件大小: {file_size:.1f} KB")
198 | else:
199 | print("💾 缓存文件: 不存在")
200 | except Exception:
201 | pass
202 |
203 | print("=" * 70)
204 |
205 |
206 | # 全局筹码缓存管理器实例
207 | _chip_cache_manager = None
208 |
209 | def get_chip_cache_manager() -> ChipDataCache:
210 | """获取全局筹码缓存管理器实例"""
211 | global _chip_cache_manager
212 | if _chip_cache_manager is None:
213 | _chip_cache_manager = ChipDataCache()
214 | return _chip_cache_manager
215 |
--------------------------------------------------------------------------------
/ui/app.py:
--------------------------------------------------------------------------------
1 | """
2 | XY Stock 股票分析系统 - Streamlit Web界面
3 | """
4 |
5 | import streamlit as st
6 | import sys
7 | import os
8 | from datetime import datetime
9 |
10 | project_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
11 | sys.path.append(project_root)
12 |
13 | from ui.config import MARKET_TYPES, STOCK_CODE_EXAMPLES
14 | from ui.components.page_settings import main as display_settings
15 | from ui.components.page_token_stats import main as display_token_stats
16 | from ui.components.page_stock import display_stock_info
17 | from ui.components.page_market_overview import display_market_overview
18 | from ui.components.page_cache_management import main as display_cache_management
19 | from stock.stock_code_map import get_stock_identity
20 | from ui.config import FULL_VERSION
21 |
22 | def set_requests_timeout(timeout=30):
23 | """全局设置 requests 默认超时时间(monkey patch)"""
24 | import requests.sessions
25 | old_request = requests.sessions.Session.request
26 | def new_request(self, *args, **kwargs):
27 | if 'timeout' not in kwargs:
28 | kwargs['timeout'] = timeout
29 | return old_request(self, *args, **kwargs)
30 | requests.sessions.Session.request = new_request
31 |
32 | def main():
33 | """主应用程序"""
34 |
35 | set_requests_timeout(30)
36 | st.set_page_config(
37 | page_title="XY Stock 股票分析系统",
38 | page_icon="📈",
39 | layout="wide",
40 | initial_sidebar_state="expanded"
41 | )
42 |
43 | minimal_hide_style = """
44 |
52 | """
53 | st.markdown(minimal_hide_style, unsafe_allow_html=True)
54 |
55 | #st.title("📈 XY Stock 股票分析系统")
56 |
57 | with st.sidebar:
58 | st.header("功能菜单")
59 |
60 | menu = st.radio(
61 | "选择功能:",
62 | ["大盘分析", "股票分析", "缓存管理", "Token统计", "设置"],
63 | index=0,
64 | help="选择要使用的功能模块"
65 | )
66 |
67 | st.markdown("---")
68 | st.write("版本信息:")
69 | st.write(f"- {FULL_VERSION}")
70 | st.write("- 端口: 8811")
71 |
72 | if menu == "大盘分析":
73 | display_market_overview()
74 | elif menu == "股票分析":
75 | display_analysis_page()
76 | elif menu == "缓存管理":
77 | display_cache_management()
78 | elif menu == "Token统计":
79 | display_token_stats()
80 | elif menu == "设置":
81 | display_settings()
82 |
83 |
84 | def display_analysis_page():
85 | """显示股票分析页面"""
86 | st.header("🏢 股票查询")
87 |
88 | market_type = st.selectbox(
89 | "选择市场类型:",
90 | MARKET_TYPES,
91 | index=0,
92 | help="选择要查询的股票市场类型"
93 | )
94 |
95 | if market_type in STOCK_CODE_EXAMPLES:
96 | examples = ", ".join(STOCK_CODE_EXAMPLES[market_type])
97 | st.caption(f"示例代码: {examples}")
98 |
99 | stock_code = st.text_input(
100 | "代码/名称:",
101 | placeholder=f"请输入{market_type}代码",
102 | help=f"输入{market_type}代码进行查询"
103 | )
104 |
105 | use_ai_analysis = st.checkbox("🤖 AI智能分析", value=False, help="选中后将使用AI对股票进行全面分析,包括行情、新闻、筹码、基本面等")
106 | use_cache = st.checkbox("💾 使用缓存数据", value=True, help="使用缓存数据可以加快查询速度,取消勾选将强制获取最新数据")
107 |
108 | # 用户观点输入框(仅在选择AI分析时显示)
109 | user_opinion = ""
110 | user_position = "不确定"
111 | if use_ai_analysis:
112 | user_opinion = st.text_area(
113 | "补充观点(可选):",
114 | placeholder="请输入您对该股票的观点、看法或关注的重点...",
115 | help="输入您的投资观点或关注的重点,AI将结合多维度分析给出综合建议",
116 | height=100
117 | )
118 | user_position = st.selectbox(
119 | "当前持仓状态:",
120 | options=["不确定", "空仓", "低仓位", "中仓位", "重仓", "满仓"],
121 | index=0,
122 | help="请选择您当前的大致持仓状态"
123 | )
124 |
125 | col1, col2, col3 = st.columns([1, 1, 4])
126 | with col1:
127 | query_btn = st.button("🔍 查询", type="primary")
128 | with col2:
129 | clear_btn = st.button("🗑️ 重置")
130 |
131 | # 处理按钮逻辑 - 使用session_state保持状态
132 | if query_btn and stock_code.strip():
133 | # 只有在明确点击查询按钮时才设置显示状态
134 | st.session_state['show_stock_info'] = True
135 | st.session_state['current_stock_code'] = stock_code.strip()
136 | st.session_state['current_market_type'] = market_type
137 | st.session_state['query_time'] = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
138 | st.session_state['use_cache'] = use_cache
139 | st.session_state['just_reset'] = False # 标记非重置状态
140 |
141 | if use_ai_analysis:
142 | st.session_state['include_ai_analysis'] = True
143 | st.session_state['user_opinion'] = user_opinion
144 | st.session_state['user_position'] = user_position
145 |
146 | for report_type in ['ai_market_report', 'ai_news_report', 'ai_chip_report',
147 | 'ai_fundamental_report', 'ai_comprehensive_report']:
148 | if report_type not in st.session_state:
149 | st.session_state[report_type] = {}
150 | else:
151 | st.session_state['include_ai_analysis'] = False
152 | else:
153 | st.session_state['use_cache'] = True # 避免在不使用缓存时由于刷新控件导致重复查询
154 |
155 | if clear_btn:
156 | # 标记为刚刚重置,防止意外触发查询
157 | st.session_state['just_reset'] = True
158 |
159 | # 清除所有相关的session state
160 | keys_to_clear = [
161 | 'show_stock_info', 'current_stock_code', 'current_market_type',
162 | 'query_time', 'include_ai_analysis', 'user_opinion', 'user_position',
163 | 'use_cache', 'ai_market_report', 'ai_news_report', 'ai_chip_report',
164 | 'ai_fundamental_report', 'ai_comprehensive_report', 'ai_company_report'
165 | ]
166 |
167 | for key in keys_to_clear:
168 | if key in st.session_state:
169 | del st.session_state[key]
170 |
171 | st.rerun()
172 |
173 | st.subheader("查询结果")
174 |
175 | result_container = st.container()
176 |
177 | # 只有在没有刚刚重置的情况下才显示股票信息
178 | if st.session_state.get('show_stock_info', False) and not st.session_state.get('just_reset', False):
179 | current_stock_code = st.session_state.get('current_stock_code', '')
180 | current_market_type = st.session_state.get('current_market_type', '')
181 | query_time = st.session_state.get('query_time', '')
182 |
183 | with result_container:
184 | with st.spinner("正在查询数据..."):
185 | try:
186 | stock_identity = get_stock_identity(current_stock_code, current_market_type)
187 | if stock_identity is None or stock_identity.get('error'):
188 | st.error(f"获取股票代码失败")
189 | else:
190 | display_stock_info(stock_identity)
191 | with st.expander("📊 详细信息", expanded=False):
192 | st.write(f"**查询时间:** {query_time}")
193 | st.write(f"**市场类型:** {current_market_type}")
194 | st.write(f"**股票代码:** {stock_identity['code']}")
195 | st.write(f"**股票名称:** {stock_identity['name']}")
196 |
197 | except Exception as e:
198 | st.error(f"查询失败: {str(e)}")
199 | st.write("请检查股票代码是否正确,或稍后重试。")
200 |
201 | with st.expander("🔍 错误详情", expanded=False):
202 | st.code(str(e), language="text")
203 | else:
204 | # 清除重置标志,避免影响后续操作
205 | if 'just_reset' in st.session_state:
206 | del st.session_state['just_reset']
207 |
208 | if query_btn:
209 | if not stock_code.strip():
210 | with result_container:
211 | st.warning("请输入股票代码")
212 | else:
213 | with result_container:
214 | st.info("请输入股票代码并点击查询按钮")
215 |
216 | st.markdown("---")
217 | st.markdown(
218 | """
219 |
220 | XY Stock 股票分析系统 | 数据仅供参考,不构成任何投资建议
221 |
222 | """,
223 | unsafe_allow_html=True
224 | )
225 |
226 |
227 | if __name__ == "__main__":
228 | main()
229 |
--------------------------------------------------------------------------------
/utils/risk_metrics.py:
--------------------------------------------------------------------------------
1 | """
2 | 风险指标计算模块
3 | 提供核心金融风险指标的计算功能
4 | """
5 |
6 | import pandas as pd
7 | import numpy as np
8 | from typing import Dict
9 |
10 | class RiskCalculator:
11 | """风险指标计算器"""
12 |
13 | def __init__(self, trading_days_per_year: int = 252):
14 | """
15 | 初始化风险计算器
16 |
17 | Args:
18 | trading_days_per_year: 每年交易日数量,默认252天
19 | """
20 | self.trading_days = trading_days_per_year
21 |
22 | def calculate_returns(self, prices: pd.Series) -> pd.Series:
23 | """计算收益率序列"""
24 | return prices.pct_change().dropna()
25 |
26 | def annual_volatility(self, returns: pd.Series) -> float:
27 | """计算年化波动率"""
28 | return returns.std() * np.sqrt(self.trading_days)
29 |
30 | def max_drawdown(self, returns: pd.Series) -> float:
31 | """计算最大回撤"""
32 | cumulative_returns = (1 + returns).cumprod()
33 | running_max = cumulative_returns.expanding().max()
34 | drawdown = (cumulative_returns - running_max) / running_max
35 | return drawdown.min()
36 |
37 | def sharpe_ratio(self, returns: pd.Series, risk_free_rate: float = 0.0) -> float:
38 | """计算夏普比率"""
39 | daily_risk_free = risk_free_rate / self.trading_days
40 | excess_returns = returns - daily_risk_free
41 | return excess_returns.mean() / returns.std() * np.sqrt(self.trading_days)
42 |
43 | def value_at_risk(self, returns: pd.Series, confidence_level: float = 0.05) -> float:
44 | """计算VaR (Value at Risk)"""
45 | return np.percentile(returns, confidence_level * 100)
46 |
47 | def conditional_var(self, returns: pd.Series, confidence_level: float = 0.05) -> float:
48 | """计算CVaR (Conditional Value at Risk)"""
49 | var = self.value_at_risk(returns, confidence_level)
50 | return returns[returns <= var].mean()
51 |
52 | def calculate_all_metrics(self,
53 | prices: pd.Series,
54 | confidence_level: float = 0.05,
55 | risk_free_rate: float = 0.03) -> Dict[str, float]:
56 | """
57 | 计算所有风险指标
58 |
59 | Args:
60 | prices: 价格序列
61 | confidence_level: VaR/CVaR置信水平
62 | risk_free_rate: 无风险利率
63 |
64 | Returns:
65 | 包含所有风险指标的字典
66 | """
67 | returns = self.calculate_returns(prices)
68 |
69 | if len(returns) == 0:
70 | raise ValueError("价格序列太短,无法计算收益率")
71 |
72 | metrics = {
73 | 'annual_volatility': self.annual_volatility(returns),
74 | 'max_drawdown': self.max_drawdown(returns),
75 | 'sharpe_ratio': self.sharpe_ratio(returns, risk_free_rate),
76 | 'var_95': self.value_at_risk(returns, confidence_level),
77 | 'cvar_95': self.conditional_var(returns, confidence_level),
78 | }
79 |
80 | return metrics
81 |
82 | def get_risk_summary(self,
83 | prices: pd.Series,
84 | confidence_level: float = 0.05,
85 | risk_free_rate: float = 0.03) -> pd.DataFrame:
86 | """
87 | 获取风险指标汇总表
88 |
89 | Args:
90 | prices: 价格序列
91 | confidence_level: VaR/CVaR置信水平
92 | risk_free_rate: 无风险利率
93 |
94 | Returns:
95 | 风险指标汇总DataFrame
96 | """
97 | metrics = self.calculate_all_metrics(prices, confidence_level, risk_free_rate)
98 |
99 | risk_df = pd.DataFrame({
100 | '风险指标': [
101 | '年化波动率',
102 | '最大回撤',
103 | '夏普比率',
104 | f'VaR ({(1-confidence_level)*100:.0f}%)',
105 | f'CVaR ({(1-confidence_level)*100:.0f}%)',
106 | ],
107 | '数值': [
108 | metrics['annual_volatility'],
109 | metrics['max_drawdown'],
110 | metrics['sharpe_ratio'],
111 | metrics['var_95'],
112 | metrics['cvar_95'],
113 | ]
114 | })
115 |
116 | # 添加百分比格式列
117 | risk_df['百分比形式'] = [
118 | f"{metrics['annual_volatility']*100:.2f}%",
119 | f"{metrics['max_drawdown']*100:.2f}%",
120 | f"{metrics['sharpe_ratio']:.4f}",
121 | f"{metrics['var_95']*100:.2f}%",
122 | f"{metrics['cvar_95']*100:.2f}%",
123 | ]
124 |
125 | return risk_df
126 |
127 |
128 | def calculate_portfolio_risk_summary(df: pd.DataFrame,
129 | price_col: str = 'close',
130 | confidence_level: float = 0.05,
131 | risk_free_rate: float = 0.03) -> Dict:
132 | """
133 | 计算单个资产的风险指标摘要(精简版本,仅统计信息,用于缓存)
134 |
135 | Args:
136 | df: 包含价格数据的DataFrame
137 | price_col: 价格列名
138 | confidence_level: VaR/CVaR置信水平
139 | risk_free_rate: 无风险利率
140 |
141 | Returns:
142 | 包含关键统计信息的字典,适合缓存和给大模型分析
143 | """
144 | calculator = RiskCalculator()
145 |
146 | if price_col not in df.columns:
147 | raise ValueError(f"DataFrame中未找到列 '{price_col}'")
148 |
149 | prices = df[price_col].dropna()
150 |
151 | if len(prices) < 5:
152 | raise ValueError("价格数据不足,至少需要5个数据点")
153 |
154 | # 计算风险指标(只保留关键统计数据)
155 | metrics = calculator.calculate_all_metrics(prices, confidence_level, risk_free_rate)
156 | returns = calculator.calculate_returns(prices)
157 | risk_summary = calculator.get_risk_summary(prices, confidence_level, risk_free_rate)
158 |
159 | # 计算价格趋势
160 | price_change = (prices.iloc[-1] - prices.iloc[0]) / prices.iloc[0]
161 | recent_volatility = returns.tail(20).std() * np.sqrt(252) if len(returns) >= 20 else returns.std() * np.sqrt(252)
162 |
163 | # 构建适合大模型分析的风险摘要
164 | risk_analysis = {
165 | 'period_analysis': {
166 | 'data_length': len(df),
167 | 'price_change_pct': float(price_change * 100), # 期间涨跌幅
168 | 'trend_direction': 'up' if price_change > 0 else 'down',
169 | },
170 | 'volatility_analysis': {
171 | 'annual_volatility': float(metrics['annual_volatility']),
172 | 'recent_volatility': float(recent_volatility),
173 | 'volatility_trend': 'increasing' if recent_volatility > metrics['annual_volatility'] else 'decreasing',
174 | },
175 | 'risk_metrics': {
176 | 'max_drawdown': float(metrics['max_drawdown']),
177 | 'sharpe_ratio': float(metrics['sharpe_ratio']),
178 | 'var_5pct': float(metrics['var_95']),
179 | 'cvar_5pct': float(metrics['cvar_95']),
180 | },
181 | 'return_statistics': {
182 | 'daily_return_mean': float(returns.mean()),
183 | 'daily_return_std': float(returns.std()),
184 | 'positive_days_ratio': float((returns > 0).mean()),
185 | 'max_single_day_gain': float(returns.max()),
186 | 'max_single_day_loss': float(returns.min()),
187 | },
188 | 'risk_assessment': {
189 | 'risk_level': _assess_risk_level(metrics['annual_volatility'], metrics['max_drawdown']),
190 | 'stability': _assess_stability(returns),
191 | 'trend_strength': _assess_trend_strength(price_change, metrics['annual_volatility']),
192 | },
193 | 'summary_table': risk_summary,
194 | }
195 |
196 | return risk_analysis
197 |
198 |
199 | def _assess_risk_level(volatility: float, max_drawdown: float) -> str:
200 | """评估风险水平"""
201 | if volatility > 0.3 or abs(max_drawdown) > 0.2:
202 | return 'high'
203 | elif volatility > 0.2 or abs(max_drawdown) > 0.1:
204 | return 'medium'
205 | else:
206 | return 'low'
207 |
208 |
209 | def _assess_stability(returns: pd.Series) -> str:
210 | """评估稳定性"""
211 | volatility = returns.std()
212 | skewness = returns.skew()
213 |
214 | if abs(skewness) > 1 or volatility > returns.mean() * 3:
215 | return 'unstable'
216 | elif abs(skewness) > 0.5 or volatility > returns.mean() * 2:
217 | return 'moderate'
218 | else:
219 | return 'stable'
220 |
221 |
222 | def _assess_trend_strength(price_change: float, volatility: float) -> str:
223 | """评估趋势强度"""
224 | if abs(price_change) > volatility * 2:
225 | return 'strong'
226 | elif abs(price_change) > volatility:
227 | return 'moderate'
228 | else:
229 | return 'weak'
230 |
--------------------------------------------------------------------------------
/stock/etf_holdings_fetcher.py:
--------------------------------------------------------------------------------
1 | """
2 | ETF持仓数据获取器
3 | """
4 | import akshare as ak
5 | import pandas as pd
6 | from typing import Dict, List, Any
7 | from datetime import datetime
8 |
9 |
10 | class ETFHoldingsFetcher:
11 | """ETF持仓数据获取器"""
12 |
13 | def __init__(self):
14 | self.name = "ETF Holdings Fetcher"
15 | self.description = "基于akshare获取ETF持仓数据"
16 |
17 | def get_etf_holdings(self, etf_code: str, date: str = None, top_n: int = -1) -> Dict[str, Any]:
18 | """
19 | 获取ETF持仓信息
20 |
21 | Args:
22 | etf_code: ETF代码,如 '510300'
23 | date: 查询年份,默认为当前年份
24 | top_n: 返回前N大持仓,-1表示返回全部
25 |
26 | Returns:
27 | Dict: 包含持仓信息的字典
28 | """
29 | try:
30 | # 如果没有指定日期,使用当前年份
31 | if date is None:
32 | date = str(datetime.now().year)
33 |
34 | print(f"📊 获取 {etf_code} ETF持仓数据({date}年)...")
35 |
36 | # 获取持仓数据
37 | df_holdings = ak.fund_portfolio_hold_em(symbol=etf_code, date=date)
38 |
39 | if df_holdings.empty:
40 | return {
41 | 'error': f'未获取到 {etf_code} 的持仓数据',
42 | 'etf_code': etf_code,
43 | 'holdings_count': 0,
44 | 'holdings': []
45 | }
46 |
47 | # 按季度分组,找到最新的季度
48 | quarters = df_holdings['季度'].unique()
49 | print(f"📅 发现的季度: {list(quarters)}")
50 |
51 | # 字符串比较找到最新季度(最大的季度字符串)
52 | latest_quarter = max(quarters)
53 | print(f"📅 最新季度: {latest_quarter}")
54 |
55 | # 筛选最新季度的数据
56 | df_latest = df_holdings[df_holdings['季度'] == latest_quarter].copy()
57 |
58 | if df_latest.empty:
59 | return {
60 | 'error': f'未获取到 {etf_code} 最新季度的持仓数据',
61 | 'etf_code': etf_code,
62 | 'holdings_count': 0,
63 | 'holdings': []
64 | }
65 |
66 | # 按占净值比例降序排序
67 | df_latest = df_latest.sort_values('占净值比例', ascending=False)
68 |
69 | # 处理持仓数据(使用最新季度的数据)
70 | holdings = []
71 | total_holdings = len(df_latest)
72 |
73 | # 如果指定了top_n,则只取前N条
74 | display_df = df_latest.head(top_n) if top_n > 0 else df_latest
75 |
76 | for _, row in display_df.iterrows():
77 | holding = {
78 | '序号': int(row.get('序号', 0)),
79 | '股票代码': str(row.get('股票代码', '')),
80 | '股票名称': str(row.get('股票名称', '')),
81 | '占净值比例': float(row.get('占净值比例', 0)),
82 | '持股数': row.get('持股数'),
83 | '持仓市值': row.get('持仓市值'),
84 | '季度': str(row.get('季度', ''))
85 | }
86 | holdings.append(holding)
87 |
88 | # 统计信息
89 | top_10_weight = sum([h['占净值比例'] for h in holdings[:10]])
90 | top_20_weight = sum([h['占净值比例'] for h in holdings[:20]])
91 |
92 | result = {
93 | 'etf_code': etf_code,
94 | 'data_date': date,
95 | 'latest_quarter': latest_quarter,
96 | 'update_time': datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
97 | 'total_holdings_count': total_holdings,
98 | 'returned_holdings_count': len(holdings),
99 | 'holdings': holdings,
100 | 'statistics': {
101 | 'top_10_weight': round(top_10_weight, 2),
102 | 'top_20_weight': round(top_20_weight, 2),
103 | 'concentration_analysis': self._analyze_concentration(holdings)
104 | }
105 | }
106 |
107 | print(f"✅ 成功获取 {etf_code} 持仓数据,{latest_quarter},共 {total_holdings} 只股票")
108 | return result
109 |
110 | except Exception as e:
111 | print(f"❌ 获取 {etf_code} 持仓数据失败: {e}")
112 | return {
113 | 'error': str(e),
114 | 'etf_code': etf_code,
115 | 'holdings_count': 0,
116 | 'holdings': []
117 | }
118 |
119 | def _analyze_concentration(self, holdings: List[Dict]) -> Dict[str, Any]:
120 | """分析持仓集中度"""
121 | if not holdings:
122 | return {}
123 |
124 | # 计算不同层级的集中度
125 | top_5_weight = sum([h['占净值比例'] for h in holdings[:5]])
126 | top_10_weight = sum([h['占净值比例'] for h in holdings[:10]])
127 | top_20_weight = sum([h['占净值比例'] for h in holdings[:20]])
128 |
129 | # 集中度分析
130 | concentration_level = "低"
131 | if top_10_weight > 50:
132 | concentration_level = "高"
133 | elif top_10_weight > 30:
134 | concentration_level = "中"
135 |
136 | return {
137 | 'top_5_weight': round(top_5_weight, 2),
138 | 'top_10_weight': round(top_10_weight, 2),
139 | 'top_20_weight': round(top_20_weight, 2),
140 | 'concentration_level': concentration_level,
141 | 'analysis': f"前10大持仓占比{top_10_weight:.1f}%,集中度{concentration_level}"
142 | }
143 |
144 | def get_multiple_etf_holdings(self, etf_codes: List[str], date: str = None, top_n: int = 10) -> Dict[str, Any]:
145 | """
146 | 批量获取多个ETF的持仓信息
147 |
148 | Args:
149 | etf_codes: ETF代码列表
150 | date: 查询年份
151 | top_n: 每个ETF返回前N大持仓
152 |
153 | Returns:
154 | Dict: 包含所有ETF持仓信息的字典
155 | """
156 | print(f"📊 批量获取 {len(etf_codes)} 个ETF的持仓数据...")
157 |
158 | results = {}
159 | success_count = 0
160 |
161 | for etf_code in etf_codes:
162 | holding_data = self.get_etf_holdings(etf_code, date, top_n)
163 | results[etf_code] = holding_data
164 |
165 | if 'error' not in holding_data:
166 | success_count += 1
167 |
168 | summary = {
169 | 'total_etfs': len(etf_codes),
170 | 'success_count': success_count,
171 | 'failed_count': len(etf_codes) - success_count,
172 | 'update_time': datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
173 | 'etf_data': results
174 | }
175 |
176 | print(f"✅ 批量获取完成,成功 {success_count}/{len(etf_codes)} 个ETF")
177 | return summary
178 |
179 | def format_holdings_for_display(self, holdings_data: Dict[str, Any], max_display: int = 20) -> str:
180 | """
181 | 格式化持仓数据为可显示的文本
182 |
183 | Args:
184 | holdings_data: 持仓数据字典
185 | max_display: 最多显示的持仓数量
186 |
187 | Returns:
188 | str: 格式化后的文本
189 | """
190 | if 'error' in holdings_data:
191 | return f"❌ 获取持仓数据失败: {holdings_data['error']}"
192 |
193 | etf_code = holdings_data['etf_code']
194 | holdings = holdings_data['holdings']
195 | statistics = holdings_data.get('statistics', {})
196 |
197 | # 构建显示文本
198 | lines = []
199 | lines.append(f"📊 ETF {etf_code} 持仓分析")
200 | lines.append("=" * 50)
201 | lines.append(f"📅 数据年份: {holdings_data.get('data_date', '')}")
202 | lines.append(f"📆 最新季度: {holdings_data.get('latest_quarter', '')}")
203 | lines.append(f"📈 持仓股票总数: {holdings_data.get('total_holdings_count', 0)}")
204 |
205 | # 集中度分析
206 | if statistics and 'concentration_analysis' in statistics:
207 | conc = statistics['concentration_analysis']
208 | lines.append(f"🎯 集中度分析: {conc.get('analysis', '')}")
209 |
210 | lines.append("")
211 | lines.append("🏆 主要持仓股票:")
212 | lines.append("-" * 50)
213 |
214 | # 显示持仓明细
215 | display_count = min(len(holdings), max_display)
216 | for i in range(display_count):
217 | holding = holdings[i]
218 | lines.append(f"{holding['序号']:2d}. {holding['股票代码']} {holding['股票名称']:10s} {holding['占净值比例']:6.2f}%")
219 |
220 | if len(holdings) > max_display:
221 | lines.append(f"... 还有 {len(holdings) - max_display} 只股票")
222 |
223 | return "\n".join(lines)
224 |
225 | def get_etf_info(self, etf_code: str) -> Dict[str, Any]:
226 | """
227 | 获取ETF基本信息
228 |
229 | Args:
230 | etf_code: ETF代码
231 |
232 | Returns:
233 | Dict: ETF基本信息
234 | """
235 | try:
236 | print(f"📊 获取 {etf_code} ETF基本信息...")
237 |
238 | # 获取ETF现货数据
239 | df_spot = ak.fund_etf_spot_em()
240 |
241 | # 查找对应的ETF
242 | etf_info = df_spot[df_spot['代码'] == etf_code]
243 |
244 | if etf_info.empty:
245 | return {'error': f'未找到ETF {etf_code} 的基本信息'}
246 |
247 | info = etf_info.iloc[0]
248 |
249 | result = {
250 | 'etf_code': etf_code,
251 | 'etf_name': str(info.get('名称', '')),
252 | 'current_price': float(info.get('最新价', 0)),
253 | 'change_percent': float(info.get('涨跌幅', 0)),
254 | 'change_amount': float(info.get('涨跌额', 0)),
255 | 'volume': float(info.get('成交量', 0)),
256 | 'turnover': float(info.get('成交额', 0)),
257 | 'update_time': datetime.now().strftime('%Y-%m-%d %H:%M:%S')
258 | }
259 |
260 | print(f"✅ 成功获取 {etf_code} 基本信息")
261 | return result
262 |
263 | except Exception as e:
264 | print(f"❌ 获取 {etf_code} 基本信息失败: {e}")
265 | return {'error': str(e), 'etf_code': etf_code}
266 |
267 |
268 | # 全局实例
269 | etf_holdings_fetcher = ETFHoldingsFetcher()
270 |
--------------------------------------------------------------------------------
/stock/stock_code_map.py:
--------------------------------------------------------------------------------
1 | import akshare as ak
2 | import os
3 | import json
4 | from pathlib import Path
5 | import time
6 |
7 | PROJECT_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
8 | import sys
9 | if PROJECT_ROOT not in sys.path:
10 | sys.path.insert(0, PROJECT_ROOT)
11 |
12 | from ui.config import INDEX_CODE_MAPPING
13 |
14 |
15 | def determine_board_type(stock_code: str) -> str:
16 | """根据股票代码判断所属板块"""
17 | if not stock_code or not isinstance(stock_code, str):
18 | return '未知板块'
19 |
20 | if stock_code.startswith(('600', '601', '603', '605')):
21 | return '上证主板'
22 | elif stock_code.startswith(('000', '001')):
23 | return '深证主板'
24 | elif stock_code.startswith('002'):
25 | return '深证主板' # 原中小板已并入主板
26 | elif stock_code.startswith('300'):
27 | return '创业板'
28 | elif stock_code.startswith('688'):
29 | return '科创板'
30 | elif stock_code.startswith(('430', '83', '87', '88')):
31 | return '北交所'
32 | else:
33 | return '未知板块'
34 |
35 |
36 | def get_corresponding_index(board_type: str) -> str:
37 | """根据板块类型获取对应的指数名称"""
38 | index_mapping = {
39 | '上证主板': '上证指数',
40 | '深证主板': '深证成指',
41 | '创业板': '创业板指',
42 | '科创板': '科创50', # 使用科创50作为科创板代表指数
43 | '北交所': '北证50',
44 | '未知板块': '上证指数' # 默认
45 | }
46 | return index_mapping.get(board_type, '上证指数')
47 |
48 |
49 | # 全局变量,用于缓存股票代码和名称的映射关系
50 | _STOCK_CODE_NAME_MAP = {}
51 | _STOCK_NAME_CODE_MAP = {}
52 | _HK_STOCK_CODE_NAME_MAP = {}
53 | _HK_STOCK_NAME_CODE_MAP = {}
54 |
55 | _INDEX_CODE_NAME_MAP = {code: name for name, code in INDEX_CODE_MAPPING.items()}
56 | _INDEX_NAME_CODE_MAP = INDEX_CODE_MAPPING.copy()
57 |
58 | _LAST_UPDATE_TIME = 0
59 | _HK_LAST_UPDATE_TIME = 0
60 | _MAP_FILE_PATH = os.path.join(Path(__file__).parent.parent, 'data', 'cache', 'stock_code_name_map.json')
61 | _HK_MAP_FILE_PATH = os.path.join(Path(__file__).parent.parent, 'data', 'cache', 'hk_stock_code_name_map.json')
62 |
63 |
64 | def get_stock_identity(stock_code, market_type='A股'):
65 | """
66 | 获取股票身份信息,包括代码、名称、市场类型、币种、板块类型等
67 | """
68 | stock_name = get_stock_name(stock_code, market_type)
69 | stock_code = get_stock_code(stock_code, market_type)
70 | # 判断如果stock_code格式不是6位以下的数字,则返回空
71 | if not stock_code or not stock_code.isdigit() or len(stock_code) > 6:
72 | return None
73 |
74 | market_name = market_type
75 | currency_name = '人民币' if market_type in ['A股', 'ETF'] else '港币'
76 | currency_symbol = '元' if market_type in ['A股', 'ETF'] else 'HK$'
77 |
78 | # 基础身份信息
79 | identity = {
80 | 'code': stock_code,
81 | 'name': stock_name,
82 | 'market_name': market_name,
83 | 'currency_name': currency_name,
84 | 'currency_symbol': currency_symbol
85 | }
86 |
87 | # 仅对A股计算板块信息
88 | if market_type == 'A股':
89 | board_type = determine_board_type(stock_code)
90 | corresponding_index = get_corresponding_index(board_type)
91 | identity.update({
92 | 'board_type': board_type,
93 | 'corresponding_index': corresponding_index
94 | })
95 |
96 | return identity
97 |
98 | def _ensure_dir_exists(file_path):
99 | """确保文件目录存在"""
100 | os.makedirs(os.path.dirname(file_path), exist_ok=True)
101 |
102 | def _load_stock_map(force_download=False):
103 | """加载股票代码和名称的映射关系"""
104 | global _STOCK_CODE_NAME_MAP, _STOCK_NAME_CODE_MAP, _LAST_UPDATE_TIME
105 |
106 | current_time = time.time()
107 | if _STOCK_CODE_NAME_MAP and (current_time - _LAST_UPDATE_TIME < 86400): # 86400秒 = 24小时
108 | return
109 |
110 | # 尝试从本地文件加载
111 | try:
112 | if os.path.exists(_MAP_FILE_PATH) and not force_download:
113 | with open(_MAP_FILE_PATH, 'r', encoding='utf-8') as f:
114 | data = json.load(f)
115 | _STOCK_CODE_NAME_MAP = data.get('code_to_name', {})
116 | _STOCK_NAME_CODE_MAP = data.get('name_to_code', {})
117 | _LAST_UPDATE_TIME = data.get('update_time', 0)
118 |
119 | if current_time - _LAST_UPDATE_TIME < 604800:
120 | return
121 | except Exception as e:
122 | print(f"加载股票映射文件失败: {e}")
123 |
124 | # 重新获取股票映射数据
125 | try:
126 | print("正在更新股票代码与名称映射表...")
127 |
128 | stock_info_a = ak.stock_info_a_code_name()
129 |
130 | for _, row in stock_info_a.iterrows():
131 | code = row['code']
132 | name = row['name']
133 | _STOCK_CODE_NAME_MAP[code] = name
134 | _STOCK_NAME_CODE_MAP[name] = code
135 |
136 | # 合并指数映射数据
137 | _STOCK_CODE_NAME_MAP.update(_INDEX_CODE_NAME_MAP)
138 | _STOCK_NAME_CODE_MAP.update(_INDEX_NAME_CODE_MAP)
139 |
140 | # 保存到本地文件
141 | _ensure_dir_exists(_MAP_FILE_PATH)
142 | with open(_MAP_FILE_PATH, 'w', encoding='utf-8') as f:
143 | json.dump({
144 | 'code_to_name': _STOCK_CODE_NAME_MAP,
145 | 'name_to_code': _STOCK_NAME_CODE_MAP,
146 | 'update_time': current_time
147 | }, f, ensure_ascii=False, indent=2)
148 |
149 | _LAST_UPDATE_TIME = current_time
150 | print(f"股票映射表更新完成,共有 {len(_STOCK_CODE_NAME_MAP)} 个股票信息")
151 | except Exception as e:
152 | print(f"获取股票映射关系失败: {e}")
153 |
154 | def _load_hk_stock_map(force_download=False):
155 | """加载港股通股票代码和名称的映射关系"""
156 | global _HK_STOCK_CODE_NAME_MAP, _HK_STOCK_NAME_CODE_MAP, _HK_LAST_UPDATE_TIME
157 |
158 | current_time = time.time()
159 | if _HK_STOCK_CODE_NAME_MAP and (current_time - _HK_LAST_UPDATE_TIME < 86400):
160 | return
161 |
162 | # 尝试从本地文件加载
163 | try:
164 | if os.path.exists(_HK_MAP_FILE_PATH) and not force_download:
165 | with open(_HK_MAP_FILE_PATH, 'r', encoding='utf-8') as f:
166 | data = json.load(f)
167 | _HK_STOCK_CODE_NAME_MAP = data.get('code_to_name', {})
168 | _HK_STOCK_NAME_CODE_MAP = data.get('name_to_code', {})
169 | _HK_LAST_UPDATE_TIME = data.get('update_time', 0)
170 |
171 | if current_time - _HK_LAST_UPDATE_TIME < 604800:
172 | return
173 | except Exception as e:
174 | print(f"加载港股通映射文件失败: {e}")
175 |
176 | # 重新获取港股通数据
177 | try:
178 | print("正在更新港股通股票代码与名称映射表...")
179 |
180 | hk_stock_info = ak.stock_hk_ggt_components_em()
181 |
182 | for _, row in hk_stock_info.iterrows():
183 | code = row['代码']
184 | name = row['名称']
185 | _HK_STOCK_CODE_NAME_MAP[code] = name
186 | _HK_STOCK_NAME_CODE_MAP[name] = code
187 |
188 | _ensure_dir_exists(_HK_MAP_FILE_PATH)
189 | with open(_HK_MAP_FILE_PATH, 'w', encoding='utf-8') as f:
190 | json.dump({
191 | 'code_to_name': _HK_STOCK_CODE_NAME_MAP,
192 | 'name_to_code': _HK_STOCK_NAME_CODE_MAP,
193 | 'update_time': current_time
194 | }, f, ensure_ascii=False, indent=2)
195 |
196 | _HK_LAST_UPDATE_TIME = current_time
197 | print(f"港股通映射表更新完成,共有 {len(_HK_STOCK_CODE_NAME_MAP)} 个港股通股票信息")
198 | except Exception as e:
199 | print(f"获取港股通映射关系失败: {e}")
200 |
201 | def _find_fuzzy_match(target, mapping_dict):
202 | """模糊匹配辅助函数"""
203 | matched_items = [key for key in mapping_dict.keys() if target in key]
204 | return mapping_dict[matched_items[0]] if matched_items else None
205 |
206 | def get_stock_code(stock_name_or_code, market_type='A股'):
207 | """
208 | 获取证券代码
209 |
210 | Args:
211 | stock_name_or_code: 证券名称或代码
212 | market_type: 证券类型,可选值为'A股'、'ETF'、'港股',默认为'A股'
213 |
214 | Returns:
215 | 证券代码,如果输入已经是代码则直接返回,如果是名称则转换为代码
216 | """
217 | if not stock_name_or_code:
218 | return None
219 |
220 | _load_stock_map()
221 | _load_hk_stock_map()
222 |
223 | if stock_name_or_code in _STOCK_CODE_NAME_MAP or stock_name_or_code in _HK_STOCK_CODE_NAME_MAP:
224 | return stock_name_or_code
225 |
226 | return (_STOCK_NAME_CODE_MAP.get(stock_name_or_code)
227 | or _HK_STOCK_NAME_CODE_MAP.get(stock_name_or_code)
228 | or _find_fuzzy_match(stock_name_or_code, _STOCK_NAME_CODE_MAP)
229 | or _find_fuzzy_match(stock_name_or_code, _HK_STOCK_NAME_CODE_MAP)
230 | or stock_name_or_code)
231 |
232 | def get_stock_name(stock_name_or_code, market_type='A股'):
233 | """
234 | 获取证券名称
235 |
236 | Args:
237 | stock_name_or_code: 证券名称或代码
238 | market_type: 证券类型,可选值为'A股'、'ETF'、'港股',默认为'A股'
239 |
240 | Returns:
241 | 证券名称,如果输入已经是名称则直接返回,如果是代码则转换为名称
242 | """
243 | if not stock_name_or_code:
244 | return None
245 |
246 | _load_stock_map()
247 | _load_hk_stock_map()
248 |
249 | if stock_name_or_code in _STOCK_NAME_CODE_MAP or stock_name_or_code in _HK_STOCK_NAME_CODE_MAP:
250 | return stock_name_or_code
251 |
252 | return (_STOCK_CODE_NAME_MAP.get(stock_name_or_code)
253 | or _HK_STOCK_CODE_NAME_MAP.get(stock_name_or_code)
254 | or stock_name_or_code)
255 |
256 | def _clear_cache_files_and_vars(file_path, var_names):
257 | """清除缓存文件和全局变量的通用函数"""
258 | try:
259 | if os.path.exists(file_path):
260 | os.remove(file_path)
261 | print(f"已删除缓存文件: {file_path}")
262 | else:
263 | print(f"缓存文件不存在: {file_path}")
264 |
265 | # 清空全局变量
266 | globals()[var_names[0]].clear()
267 | globals()[var_names[1]].clear()
268 | globals()[var_names[2]] = 0
269 |
270 | except Exception as e:
271 | print(f"清除缓存失败: {e}")
272 |
273 | def clear_stock_map_cache():
274 | """清除A股股票代码与名称映射缓存文件"""
275 | _clear_cache_files_and_vars(
276 | _MAP_FILE_PATH,
277 | ['_STOCK_CODE_NAME_MAP', '_STOCK_NAME_CODE_MAP', '_LAST_UPDATE_TIME']
278 | )
279 |
280 | def clear_hk_stock_map_cache():
281 | """清除港股通股票代码与名称映射缓存文件"""
282 | _clear_cache_files_and_vars(
283 | _HK_MAP_FILE_PATH,
284 | ['_HK_STOCK_CODE_NAME_MAP', '_HK_STOCK_NAME_CODE_MAP', '_HK_LAST_UPDATE_TIME']
285 | )
286 |
287 |
--------------------------------------------------------------------------------
/utils/news_tools.py:
--------------------------------------------------------------------------------
1 | """
2 | 新闻工具模块
3 | """
4 |
5 | import akshare as ak
6 | from datetime import datetime, timedelta
7 | from collections import Counter
8 | import json
9 | from llm import openai_client
10 |
11 |
12 | def get_stock_news_by_akshare(stock_code, day = 7, debug=False):
13 | """
14 | 获取股票新闻、公告、研究报告(东财数据源)
15 | """
16 | print(f"📊 获取股票 {stock_code} 的详细信息...")
17 | result = {
18 | 'company_news': [],
19 | 'announcements': [], # 暂时取不到
20 | 'research_reports': [],
21 | 'news_summary': {}
22 | }
23 | try:
24 | print(" 获取公司新闻...")
25 | company_news = ak.stock_news_em(stock_code)
26 | if not company_news.empty:
27 | company_news = company_news.to_dict('records')
28 | company_news = sorted(company_news,
29 | key=lambda x: datetime.strptime(x.get('发布时间', '1900-01-01 00:00:00'), '%Y-%m-%d %H:%M:%S'),
30 | reverse=True)
31 | # 时间过滤,保留最近day天的数据,最多10条
32 | if day > 0:
33 | cutoff_date = datetime.now() - timedelta(days=day)
34 | company_news = [news for news in company_news if datetime.strptime(news.get('发布时间', '1900-01-01 00:00:00'), '%Y-%m-%d %H:%M:%S') >= cutoff_date]
35 | company_news = company_news[:10]
36 | if debug: # 内容有时取的很短,查看获取情况
37 | for news in company_news:
38 | if '新闻内容' not in news or not news['新闻内容']:
39 | news['新闻内容'] = "无内容"
40 | else:
41 | print(f" ✓ 成功获取新闻内容: {news['新闻内容']}, len={len(news['新闻内容'])}")
42 | result['company_news'] = company_news
43 | print(f" ✓ 成功获取 {len(result['company_news'])} 条公司新闻")
44 | if debug:
45 | print(f"✓ 新闻已按发布时间排序,共 {len(result)} 条")
46 | except Exception as e:
47 | print(f" ⚠️ 获取公司新闻失败: {e}")
48 | result['news_summary'] = {
49 | 'total_news_count': len(result['company_news']) + len(result['announcements']) + len(result['research_reports']),
50 | 'company_news_count': len(result['company_news']),
51 | 'announcements_count': len(result['announcements']),
52 | 'research_reports_count': len(result['research_reports']),
53 | 'data_freshness': datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
54 | 'stock_code': stock_code
55 | }
56 | print(f" ✅ 股票信息获取完成,共 {result['news_summary']['total_news_count']} 条信息")
57 | return result
58 |
59 |
60 | def show_stock_by_ak_summary(stock_info):
61 | """
62 | 美化显示股票信息摘要
63 | """
64 | print(f"📈 股票信息摘要报告")
65 | print("=" * 60)
66 | summary = stock_info['news_summary']
67 | stock_code = summary['stock_code']
68 | print(f"🎯 股票代码: {stock_code}")
69 | print(f"📅 数据更新时间: {summary['data_freshness']}")
70 | print(f"\n📊 数据概览:")
71 | print(f" 📰 公司新闻: {summary['company_news_count']} 条")
72 | print(f" 📋 公司公告: {summary['announcements_count']} 条")
73 | print(f" 📄 研究报告: {summary['research_reports_count']} 条")
74 | print(f" 📈 总计: {summary['total_news_count']} 条")
75 | if stock_info['company_news']:
76 | print(f"\n📰 最新公司新闻(前5条):")
77 | for i, news in enumerate(stock_info['company_news'][:5], 1):
78 | title = news.get('新闻标题', news.get('title', '无标题'))
79 | time = news.get('发布时间', news.get('publish_time', '无时间'))
80 | print(f" {i}. {title}")
81 | print(f" 时间: {time}")
82 | if stock_info['announcements']:
83 | print(f"\n📋 最新公司公告(前3条):")
84 | for i, announcement in enumerate(stock_info['announcements'][:3], 1):
85 | title = (announcement.get('公告标题') or
86 | announcement.get('标题') or
87 | announcement.get('title') or
88 | '无标题')
89 | time = (announcement.get('公告日期') or
90 | announcement.get('发布时间') or
91 | announcement.get('日期') or
92 | '无时间')
93 | print(f" {i}. {title}")
94 | print(f" 时间: {time}")
95 | if stock_info['research_reports']:
96 | print(f"\n⭐ 研究报告评级分布:")
97 | ratings = []
98 | for report in stock_info['research_reports']:
99 | rating = (report.get('投资评级') or
100 | report.get('评级') or
101 | report.get('rating') or
102 | report.get('最新评级'))
103 | if rating and rating != 'nan':
104 | ratings.append(rating)
105 | if ratings:
106 | rating_count = Counter(ratings)
107 | for rating, count in rating_count.most_common():
108 | print(f" • {rating}: {count} 个机构")
109 | else:
110 | print(" 暂无评级信息")
111 |
112 |
113 | SENTIMENT_ANALYSIS_PROMPT_TEMPLATE = """请分析以下新闻对股票"{stock_name}"的投资情绪影响:
114 |
115 | 新闻标题: {news_title}
116 | 新闻内容: {news_content}
117 |
118 | 请从该股票投资者的角度分析这条新闻的三项指标:
119 | - 情绪类别:乐观、中性、悲观
120 | - 情绪强度:1-5(1=轻微影响,5=重大影响)
121 | - 预期偏离度:1-5(1=符合预期,5=非常出乎意料)
122 |
123 | 返回JSON格式:{return_format}"""
124 |
125 | def filter_time(news, start_date, end_date):
126 | filtered_news = []
127 | start_dt = datetime.strptime(start_date, '%Y-%m-%d')
128 | end_dt = datetime.strptime(end_date, '%Y-%m-%d')
129 | for news in news:
130 | publish_time_str = news.get('发布时间', '')
131 | if publish_time_str:
132 | news_date_str = publish_time_str.split(' ')[0]
133 | news_dt = datetime.strptime(news_date_str, '%Y-%m-%d')
134 | if start_dt <= news_dt <= end_dt:
135 | filtered_news.append(news)
136 | return filtered_news
137 |
138 | def analyze_news_sentiment(news_base, stock_name, start_date=None, end_date=None, limit=-1, debug=False):
139 | """
140 | 调用大模型分析新闻情绪
141 | """
142 | client = openai_client.OpenAIClient()
143 | ret_array = []
144 | if start_date is not None and end_date is not None:
145 | news = filter_time(news_base, start_date, end_date)
146 | if debug:
147 | print(f"✓ 新闻已按时间范围过滤,{len(news_base)} -> {len(news)} 条")
148 |
149 | for idx, item in enumerate(news):
150 | if '新闻标题' not in item or '新闻内容' not in item:
151 | print("新闻数据不完整,跳过该条新闻")
152 | continue
153 |
154 | return_format = "{'sentiment':'xx', 'intensity': 5, 'deviation': 3}"
155 |
156 | # 使用提示词模板
157 | query = SENTIMENT_ANALYSIS_PROMPT_TEMPLATE.format(
158 | stock_name=stock_name,
159 | news_title=item['新闻标题'],
160 | news_content=item['新闻内容'],
161 | return_format=return_format
162 | )
163 |
164 | try:
165 | # 使用JSON模式
166 | ai_result = client.ask(query, json_mode=True, debug=debug)
167 | if debug:
168 | print(f"新闻标题: {item['新闻标题']}")
169 | print(f"新闻内容: {item['新闻内容']}")
170 | print(f"\nJSON模式返回: {ai_result}")
171 |
172 | # 直接解析JSON,不需要额外的字符串处理
173 | sentiment_data = json.loads(ai_result)
174 | item['情绪类型'] = sentiment_data.get('sentiment', '中性')
175 | item['情绪级别'] = sentiment_data.get('intensity', 3)
176 | item['预期偏离度'] = sentiment_data.get('deviation', 3)
177 | except json.JSONDecodeError as e:
178 | print(f"JSON解析失败: {e}")
179 | print(f"返回内容: {ai_result}")
180 | except Exception as e:
181 | print(f"其他错误: {e}")
182 | if limit != -1 and idx >= limit:
183 | break
184 | ret_array.append(item)
185 |
186 | return ret_array
187 |
188 |
189 | def get_market_news_caixin(limit=10, debug=False):
190 | """
191 | 获取财新网宏观经济和市场新闻(政策面、大盘相关)
192 | """
193 | print("📊 获取财新网宏观经济新闻...")
194 | result = {
195 | 'market_news': [],
196 | 'news_summary': {}
197 | }
198 | ret = True
199 |
200 | try:
201 | # 获取财新网数据
202 | caixin_data = ak.stock_news_main_cx()
203 |
204 | if not caixin_data.empty:
205 | market_news = caixin_data.to_dict('records')
206 |
207 | # 过滤出有实际内容的新闻(有summary和url的)
208 | filtered_news = []
209 | for news in market_news:
210 | if news.get('summary') and news.get('url') and str(news.get('summary')).strip():
211 | # 统一字段名称以便后续处理
212 | formatted_news = {
213 | '新闻标题': news.get('tag', '无标题'),
214 | '新闻内容': news.get('summary', '无内容'),
215 | '发布时间': news.get('pub_time', '无时间'),
216 | '相对时间': news.get('interval_time', ''),
217 | '新闻链接': news.get('url', ''),
218 | '新闻类型': '宏观经济'
219 | }
220 | filtered_news.append(formatted_news)
221 |
222 | # 按发布时间排序(最新的在前面)
223 | try:
224 | filtered_news = sorted(filtered_news,
225 | key=lambda x: datetime.strptime(x.get('发布时间', '1900-01-01 00:00:00.000').split('.')[0], '%Y-%m-%d %H:%M:%S'),
226 | reverse=True)
227 | except:
228 | # 如果时间格式解析失败,保持原有顺序
229 | pass
230 |
231 | result['market_news'] = filtered_news[:limit]
232 |
233 | if debug:
234 | print(f" ✓ 成功获取 {len(result['market_news'])} 条宏观新闻")
235 | for i, news in enumerate(result['market_news'][:3]):
236 | print(f" {i+1}. {news['新闻标题']}")
237 | print(f" 时间: {news['发布时间']} ({news['相对时间']})")
238 | print(f" 内容: {news['新闻内容'][:100]}...")
239 | print()
240 |
241 | result['news_summary'] = {
242 | 'total_market_news_count': len(result['market_news']),
243 | 'data_source': '财新网',
244 | 'data_freshness': datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
245 | 'news_type': '宏观经济、政策面、大盘相关'
246 | }
247 |
248 | print(f" ✅ 财新网新闻获取完成,共 {result['news_summary']['total_market_news_count']} 条信息")
249 |
250 | except Exception as e:
251 | print(f" ⚠️ 获取财新网新闻失败: {e}")
252 | ret = False
253 | result['error'] = str(e)
254 |
255 | return ret, result
256 |
257 |
--------------------------------------------------------------------------------
/ui/components/page_common.py:
--------------------------------------------------------------------------------
1 | import streamlit as st
2 | import sys
3 | import os
4 | import datetime
5 | import pandas as pd
6 | import plotly.graph_objects as go
7 |
8 | project_root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
9 | if project_root not in sys.path:
10 | sys.path.append(project_root)
11 |
12 | from utils.format_utils import format_price
13 | from utils.data_formatters import format_risk_metrics
14 | from utils.string_utils import remove_markdown_format
15 |
16 | def display_technical_indicators(tech_data):
17 | """显示技术指标分析卡片"""
18 |
19 | st.markdown("""
20 |
26 | """, unsafe_allow_html=True)
27 |
28 | st.subheader("技术指标分析")
29 |
30 | if not tech_data:
31 | st.warning("未获取到技术指标数据")
32 | return
33 |
34 | # 基础信息
35 | col1, col2 = st.columns(2)
36 | with col1:
37 | st.metric("MA趋势", tech_data.get('ma_trend', 'N/A'))
38 | with col2:
39 | st.metric("MACD趋势", tech_data.get('macd_trend', 'N/A'))
40 |
41 | # 移动平均线
42 | with st.expander("📈 移动平均线", expanded=True):
43 | ma_col1, ma_col2, ma_col3, ma_col4 = st.columns(4)
44 | with ma_col1:
45 | ma_5 = tech_data.get('ma_5')
46 | st.metric("MA5", format_price(ma_5) if ma_5 else "N/A")
47 | with ma_col2:
48 | ma_10 = tech_data.get('ma_10')
49 | st.metric("MA10", format_price(ma_10) if ma_10 else "N/A")
50 | with ma_col3:
51 | ma_20 = tech_data.get('ma_20')
52 | st.metric("MA20", format_price(ma_20) if ma_20 else "N/A")
53 | with ma_col4:
54 | ma_60 = tech_data.get('ma_60')
55 | st.metric("MA60", format_price(ma_60) if ma_60 else "N/A")
56 |
57 | # 技术指标
58 | with st.expander("🔢 技术指标", expanded=True):
59 | tech_col1, tech_col2, tech_col3 = st.columns(3)
60 | with tech_col1:
61 | rsi_14 = tech_data.get('rsi_14')
62 | st.metric("RSI(14)", format_price(rsi_14) if rsi_14 else "N/A")
63 | with tech_col2:
64 | kdj_k = tech_data.get('kdj_k')
65 | st.metric("KDJ-K", format_price(kdj_k) if kdj_k else "N/A")
66 | with tech_col3:
67 | kdj_d = tech_data.get('kdj_d')
68 | st.metric("KDJ-D", format_price(kdj_d) if kdj_d else "N/A")
69 |
70 | # MACD指标
71 | macd_col1, macd_col2, macd_col3 = st.columns(3)
72 | with macd_col1:
73 | macd = tech_data.get('macd')
74 | st.metric("MACD", f"{macd:.4f}" if macd else "N/A")
75 | with macd_col2:
76 | macd_signal = tech_data.get('macd_signal')
77 | st.metric("MACD信号", f"{macd_signal:.4f}" if macd_signal else "N/A")
78 | with macd_col3:
79 | macd_hist = tech_data.get('macd_histogram')
80 | st.metric("MACD柱状", f"{macd_hist:.4f}" if macd_hist else "N/A")
81 |
82 | # 布林带指标
83 | boll_col1, boll_col2, boll_col3 = st.columns(3)
84 | with boll_col1:
85 | boll_upper = tech_data.get('boll_upper')
86 | st.metric("布林上轨", format_price(boll_upper) if boll_upper else "N/A")
87 | with boll_col2:
88 | boll_middle = tech_data.get('boll_middle')
89 | st.metric("布林中轨", format_price(boll_middle) if boll_middle else "N/A")
90 | with boll_col3:
91 | boll_lower = tech_data.get('boll_lower')
92 | st.metric("布林下轨", format_price(boll_lower) if boll_lower else "N/A")
93 |
94 | # 其他技术指标
95 | other_col1, other_col2, other_col3 = st.columns(3)
96 | with other_col1:
97 | wr_14 = tech_data.get('wr_14')
98 | st.metric("威廉指标", format_price(wr_14) if wr_14 else "N/A")
99 | with other_col2:
100 | cci_14 = tech_data.get('cci_14')
101 | st.metric("CCI指标", format_price(cci_14) if cci_14 else "N/A")
102 | with other_col3:
103 | kdj_j = tech_data.get('kdj_j')
104 | st.metric("KDJ-J", format_price(kdj_j) if kdj_j else "N/A")
105 |
106 |
107 | def display_technical_analysis_tab(stock_identity=None, index_name=None):
108 | """
109 | 显示技术指标分析Tab的完整内容
110 | 适用于股票和大盘指数的技术分析
111 |
112 | Args:
113 | stock_identity: 股票标识信息 (用于股票分析)
114 | index_name: 指数名称 (用于大盘分析,如'上证指数')
115 | """
116 | if stock_identity and index_name:
117 | st.error("stock_identity 和 index_name 不能同时提供")
118 | return
119 |
120 | if not stock_identity and not index_name:
121 | st.error("必须提供 stock_identity 或 index_name 中的一个")
122 | return
123 |
124 | try:
125 | use_cache = st.session_state.get('use_cache', True) or st.session_state.get('market_use_cache', True)
126 | force_refresh = not use_cache
127 |
128 | # 根据类型获取技术指标数据
129 | if stock_identity:
130 | # 股票技术分析
131 | from stock.stock_data_tools import get_stock_tools
132 | stock_tools = get_stock_tools()
133 |
134 | kline_info = stock_tools.get_stock_kline_data(
135 | stock_identity,
136 | period=160,
137 | use_cache=use_cache,
138 | force_refresh=force_refresh
139 | )
140 |
141 | if 'error' in kline_info:
142 | st.error(f"获取K线数据失败: {kline_info['error']}")
143 | return
144 |
145 | indicators = kline_info.get('indicators', {})
146 |
147 | elif index_name:
148 | # 大盘指数技术分析
149 | from market.market_data_tools import get_market_tools
150 | market_tools = get_market_tools()
151 |
152 | indicators = market_tools.get_index_technical_indicators(index_name)
153 |
154 | # 显示技术指标
155 | if indicators:
156 | display_technical_indicators(indicators)
157 | else:
158 | st.warning("未获取到技术指标数据")
159 |
160 | except Exception as e:
161 | st.error(f"加载技术分析数据失败: {str(e)}")
162 | with st.expander("🔍 错误详情", expanded=False):
163 | st.code(str(e), language="text")
164 |
165 |
166 | def display_risk_analysis(risk_metrics):
167 | """显示风险分析"""
168 | if risk_metrics is None or 'error' in risk_metrics:
169 | st.error(f"获取风险指标失败: {risk_metrics.get('error', '未知错误')}")
170 | return
171 |
172 | # 尝试使用格式化的风险指标文本
173 | formatted_risk_text = format_risk_metrics(risk_metrics, with_header=False)
174 |
175 | if formatted_risk_text:
176 | # 显示格式化的风险分析文本
177 | with st.expander("⚠️ 详细风险分析", expanded=True):
178 | formatted_risk_text = remove_markdown_format(formatted_risk_text, only_headers=True)
179 | st.markdown(formatted_risk_text)
180 |
181 | # 如果有summary_table,也显示表格形式
182 | if risk_metrics and 'summary_table' in risk_metrics:
183 | with st.expander("📊 风险分析表格", expanded=False):
184 | st.table(risk_metrics['summary_table'])
185 |
186 | # 如果以上都没有,显示原始数据
187 | elif not formatted_risk_text and 'error' not in risk_metrics:
188 | with st.expander("📊 风险分析摘要", expanded=True):
189 | st.json(risk_metrics)
190 |
191 |
192 | def display_kline_charts(df, chart_type="stock", title_prefix=""):
193 | """
194 | 统一的K线图和成交量图表显示函数
195 |
196 | Args:
197 | df: 包含K线数据的DataFrame,必须包含 datetime, open, high, low, close, volume 列
198 | chart_type: 图表类型,"stock"表示股票,"index"表示指数
199 | title_prefix: 标题前缀,如股票名称或指数名称
200 | """
201 | if df is None or df.empty:
202 | st.warning("无K线数据可显示")
203 | return
204 |
205 | # 转换日期格式
206 | df = df.copy()
207 | df['datetime'] = pd.to_datetime(df['datetime'])
208 |
209 | # 根据类型设置标题和Y轴标签
210 | if chart_type == "index":
211 | price_title = f"{title_prefix}指数K线图与均线" if title_prefix else "指数K线图与均线"
212 | yaxis_title = "指数点位"
213 | else:
214 | price_title = f"{title_prefix}K线图与均线" if title_prefix else "K线图与均线"
215 | yaxis_title = "价格"
216 |
217 | # K线图与均线
218 | fig_price = go.Figure()
219 |
220 | # 添加K线图
221 | fig_price.add_trace(go.Candlestick(
222 | x=df['datetime'],
223 | open=df['open'],
224 | high=df['high'],
225 | low=df['low'],
226 | close=df['close'],
227 | name='K线',
228 | increasing_line_color="#DA1A10",
229 | decreasing_line_color="#14AA06",
230 | increasing_fillcolor="#F51D12",
231 | decreasing_fillcolor="#1BCC0B"
232 | ))
233 |
234 | # 添加均线(如果存在)
235 | ma_lines = [
236 | ('MA5', '#D2FF07'),
237 | ('MA10', '#FF22DA'),
238 | ('MA20', '#0593F1'),
239 | ('MA60', '#FFA500')
240 | ]
241 |
242 | for ma_name, color in ma_lines:
243 | if ma_name in df.columns and not df[ma_name].isna().all():
244 | fig_price.add_trace(go.Scatter(
245 | x=df['datetime'],
246 | y=df[ma_name],
247 | mode='lines',
248 | name=ma_name,
249 | line=dict(color=color, width=1.5)
250 | ))
251 |
252 | # 设置K线图布局
253 | fig_price.update_layout(
254 | title=price_title,
255 | xaxis_title='日期',
256 | yaxis_title=yaxis_title,
257 | height=500,
258 | margin=dict(l=0, r=0, t=40, b=0),
259 | legend=dict(orientation="h", yanchor="bottom", y=1.02, xanchor="right", x=1),
260 | xaxis=dict(rangeslider=dict(visible=False)),
261 | yaxis=dict(fixedrange=True)
262 | )
263 |
264 | st.plotly_chart(fig_price, use_container_width=True)
265 |
266 | # 成交量图
267 | if 'volume' in df.columns and not df['volume'].isna().all():
268 | fig_volume = go.Figure()
269 |
270 | fig_volume.add_trace(go.Bar(
271 | x=df['datetime'],
272 | y=df['volume'],
273 | name='成交量',
274 | marker=dict(color='#90CAF9')
275 | ))
276 |
277 | fig_volume.update_layout(
278 | title='成交量',
279 | xaxis_title='日期',
280 | yaxis_title='成交量',
281 | height=250,
282 | margin=dict(l=0, r=0, t=40, b=0),
283 | xaxis=dict(rangeslider=dict(visible=False)),
284 | yaxis=dict(fixedrange=True)
285 | )
286 |
287 | st.plotly_chart(fig_volume, use_container_width=True)
288 | else:
289 | st.info("暂无成交量数据")
290 |
291 |
--------------------------------------------------------------------------------
/utils/report_utils.py:
--------------------------------------------------------------------------------
1 | import os
2 | import tempfile
3 | import subprocess
4 | import shutil
5 |
6 | try:
7 | import pypandoc
8 | try:
9 | pypandoc.get_pandoc_version()
10 | PANDOC_AVAILABLE = True
11 | print("✅ pandoc可用")
12 | except OSError:
13 | print("⚠️ 未找到pandoc,正在尝试自动下载...")
14 | try:
15 | pypandoc.download_pandoc()
16 | PANDOC_AVAILABLE = True
17 | print("✅ pandoc下载成功!")
18 | except Exception as download_error:
19 | print(f"❌ pandoc下载失败: {download_error}")
20 | PANDOC_AVAILABLE = False
21 | except ImportError:
22 | PANDOC_AVAILABLE = False
23 | print("❌ pypandoc未安装,PDF功能不可用")
24 |
25 | def check_weasyprint_available():
26 | """检查weasyprint是否可用"""
27 | try:
28 | if shutil.which('weasyprint'):
29 | try:
30 | result = subprocess.run(['weasyprint', '--version'],
31 | capture_output=True, text=True, timeout=5)
32 | if result.returncode == 0:
33 | print("✅ weasyprint命令行工具可用")
34 | return True
35 | except (subprocess.TimeoutExpired, subprocess.SubprocessError):
36 | pass
37 |
38 | print("❌ weasyprint不可用")
39 | return False
40 | except Exception as e:
41 | print(f"❌ 检查weasyprint时出错: {e}")
42 | return False
43 |
44 | WEASYPRINT_AVAILABLE = check_weasyprint_available()
45 | PDF_SUPPORT_AVAILABLE = PANDOC_AVAILABLE and WEASYPRINT_AVAILABLE
46 |
47 |
48 | def _clean_markdown_for_pandoc(content):
49 | """清理Markdown内容避免pandoc YAML解析问题"""
50 | if not content:
51 | return ""
52 |
53 | content = content.strip()
54 |
55 | # 处理可能被误认为YAML的内容
56 | lines = content.split('\n')
57 | if lines and (lines[0].startswith('---') or lines[0].startswith('...')):
58 | content = '\n' + content
59 |
60 | # 保护表格分隔符,然后替换其他问题字符
61 | content = content.replace('|------|------|', '|TABLESEP|TABLESEP|')
62 | content = content.replace('|------|', '|TABLESEP|')
63 | content = content.replace('---', '—')
64 | content = content.replace('...', '…')
65 | content = content.replace('|TABLESEP|TABLESEP|', '|------|------|')
66 | content = content.replace('|TABLESEP|', '|------|')
67 |
68 | # 清理特殊引号
69 | content = content.replace('"', '"')
70 | content = content.replace('"', '"')
71 | content = content.replace(''', "'")
72 | content = content.replace(''', "'")
73 |
74 | if not content.startswith('#'):
75 | content = '# 分析报告\n\n' + content
76 |
77 | return content
78 |
79 |
80 | def generate_pdf_report(md_content):
81 | """将Markdown内容转换为PDF"""
82 |
83 | print("📊 开始生成PDF文档...")
84 |
85 | if not PANDOC_AVAILABLE:
86 | print("❌ Pandoc不可用")
87 | raise Exception("Pandoc不可用,无法生成PDF文档。请安装pandoc或使用Markdown格式导出。")
88 |
89 | print(f"✅ Markdown内容生成完成,长度: {len(md_content)} 字符")
90 |
91 | pdf_engines = [
92 | ('weasyprint', '现代HTML转PDF引擎'),
93 | (None, '使用pandoc默认引擎')
94 | ]
95 |
96 | last_error = None
97 |
98 | for engine_info in pdf_engines:
99 | engine, description = engine_info
100 | try:
101 | with tempfile.NamedTemporaryFile(suffix='.pdf', delete=False) as tmp_file:
102 | output_file = tmp_file.name
103 |
104 | extra_args = ['--from=markdown-yaml_metadata_block']
105 |
106 | if engine:
107 | extra_args.append(f'--pdf-engine={engine}')
108 | print(f"🔧 使用PDF引擎: {engine}")
109 | else:
110 | print(f"🔧 使用默认PDF引擎")
111 |
112 | print(f"🔧 PDF参数: {extra_args}")
113 |
114 | # 清理内容避免YAML解析问题
115 | cleaned_content = _clean_markdown_for_pandoc(md_content)
116 |
117 | pypandoc.convert_text(
118 | cleaned_content,
119 | 'pdf',
120 | format='markdown',
121 | outputfile=output_file,
122 | extra_args=extra_args
123 | )
124 |
125 | if os.path.exists(output_file) and os.path.getsize(output_file) > 0:
126 | with open(output_file, 'rb') as f:
127 | pdf_content = f.read()
128 |
129 | os.unlink(output_file)
130 |
131 | print(f"✅ PDF生成成功,使用引擎: {engine or '默认'}")
132 | return pdf_content
133 | else:
134 | raise Exception("PDF文件生成失败或为空")
135 |
136 | except Exception as e:
137 | last_error = str(e)
138 | print(f"PDF引擎 {engine or '默认'} 失败: {e}")
139 |
140 | try:
141 | if 'output_file' in locals() and os.path.exists(output_file):
142 | os.unlink(output_file)
143 | except:
144 | pass
145 |
146 | continue
147 |
148 | # 所有引擎都失败时的错误信息
149 | error_msg = f"""PDF生成失败,最后错误: {last_error}
150 |
151 | 可能的解决方案:
152 | 1. 安装wkhtmltopdf (推荐):
153 | Windows: choco install wkhtmltopdf
154 | macOS: brew install wkhtmltopdf
155 | Linux: sudo apt-get install wkhtmltopdf
156 |
157 | 2. 安装LaTeX:
158 | Windows: choco install miktex
159 | macOS: brew install mactex
160 | Linux: sudo apt-get install texlive-full
161 |
162 | 3. 使用Markdown格式导出作为替代方案
163 | """
164 | raise Exception(error_msg)
165 |
166 |
167 | def generate_docx_report(md_content):
168 | """将Markdown内容转换为Word文档"""
169 |
170 | print("📄 开始生成Word文档...")
171 |
172 | if not PANDOC_AVAILABLE:
173 | print("❌ Pandoc不可用")
174 | raise Exception("Pandoc不可用,无法生成Word文档。请安装pandoc或使用Markdown格式导出。")
175 |
176 | print(f"✅ Markdown内容生成完成,长度: {len(md_content)} 字符")
177 |
178 | try:
179 | with tempfile.NamedTemporaryFile(suffix='.docx', delete=False) as tmp_file:
180 | output_file = tmp_file.name
181 |
182 | extra_args = [
183 | '--from=markdown-yaml_metadata_block',
184 | '--reference-doc=/app/templates/reference.docx' if os.path.exists('/app/templates/reference.docx') else None
185 | ]
186 |
187 | extra_args = [arg for arg in extra_args if arg is not None]
188 |
189 | print(f"🔧 Word转换参数: {extra_args}")
190 |
191 | # 清理内容避免YAML解析问题
192 | cleaned_content = _clean_markdown_for_pandoc(md_content)
193 |
194 | pypandoc.convert_text(
195 | cleaned_content,
196 | 'docx',
197 | format='markdown',
198 | outputfile=output_file,
199 | extra_args=extra_args
200 | )
201 |
202 | if os.path.exists(output_file) and os.path.getsize(output_file) > 0:
203 | with open(output_file, 'rb') as f:
204 | docx_content = f.read()
205 |
206 | os.unlink(output_file)
207 |
208 | print(f"✅ Word文档生成成功")
209 | return docx_content
210 | else:
211 | raise Exception("Word文档生成失败或为空")
212 |
213 | except Exception as e:
214 | error_msg = str(e)
215 | print(f"Word文档生成失败: {error_msg}")
216 |
217 | try:
218 | if 'output_file' in locals() and os.path.exists(output_file):
219 | os.unlink(output_file)
220 | except:
221 | pass
222 |
223 | detailed_error = f"""Word文档生成失败: {error_msg}
224 |
225 | 可能的解决方案:
226 | 1. 确保pandoc已正确安装:
227 | Windows: choco install pandoc
228 | macOS: brew install pandoc
229 | Linux: sudo apt-get install pandoc
230 |
231 | 2. 检查系统权限,确保可以创建临时文件
232 |
233 | 3. 使用Markdown或PDF格式导出作为替代方案
234 | """
235 | raise Exception(detailed_error)
236 |
237 |
238 | def generate_markdown_file(md_content):
239 | """将Markdown内容转换为文件字节数据"""
240 |
241 | print("📝 开始生成Markdown文件...")
242 |
243 | try:
244 | # 将字符串内容编码为UTF-8字节数据
245 | markdown_bytes = md_content.encode('utf-8')
246 |
247 | print(f"✅ Markdown文件生成成功,大小: {len(markdown_bytes)} 字节")
248 | return markdown_bytes
249 |
250 | except Exception as e:
251 | error_msg = str(e)
252 | print(f"Markdown文件生成失败: {error_msg}")
253 |
254 | # 生成错误信息的字节数据
255 | error_content = f"# Markdown文件生成失败\n\n{error_msg}\n"
256 | return error_content.encode('utf-8')
257 |
258 |
259 | def generate_html_report(md_content):
260 | """将Markdown内容转换为HTML"""
261 |
262 | print("🌐 开始生成HTML文档...")
263 |
264 | if not PANDOC_AVAILABLE:
265 | print("❌ Pandoc不可用")
266 | raise Exception("Pandoc不可用,无法生成HTML文档。请安装pandoc或使用Markdown格式导出。")
267 |
268 | print(f"✅ Markdown内容生成完成,长度: {len(md_content)} 字符")
269 |
270 | try:
271 | with tempfile.NamedTemporaryFile(suffix='.html', delete=False) as tmp_file:
272 | output_file = tmp_file.name
273 |
274 | extra_args = [
275 | '--from=markdown-yaml_metadata_block',
276 | '--standalone',
277 | '--css=/app/templates/style.css' if os.path.exists('/app/templates/style.css') else None,
278 | '--metadata', 'title=分析报告',
279 | '--template=/app/templates/template.html' if os.path.exists('/app/templates/template.html') else None
280 | ]
281 |
282 | extra_args = [arg for arg in extra_args if arg is not None]
283 |
284 | print(f"🔧 HTML转换参数: {extra_args}")
285 |
286 | cleaned_content = _clean_markdown_for_pandoc(md_content)
287 |
288 | pypandoc.convert_text(
289 | cleaned_content,
290 | 'html',
291 | format='markdown',
292 | outputfile=output_file,
293 | extra_args=extra_args
294 | )
295 |
296 | if os.path.exists(output_file) and os.path.getsize(output_file) > 0:
297 | with open(output_file, 'rb') as f:
298 | html_content = f.read()
299 |
300 | os.unlink(output_file)
301 |
302 | print(f"✅ HTML文档生成成功")
303 | return html_content
304 | else:
305 | raise Exception("HTML文档生成失败或为空")
306 |
307 | except Exception as e:
308 | error_msg = str(e)
309 | print(f"HTML文档生成失败: {error_msg}")
310 |
311 | try:
312 | if 'output_file' in locals() and os.path.exists(output_file):
313 | os.unlink(output_file)
314 | except:
315 | pass
316 |
317 | detailed_error = f"""HTML文档生成失败: {error_msg}
318 |
319 | 可能的解决方案:
320 | 1. 确保pandoc已正确安装:
321 | Windows: choco install pandoc
322 | macOS: brew install pandoc
323 | Linux: sudo apt-get install pandoc
324 |
325 | 2. 检查系统权限,确保可以创建临时文件
326 |
327 | 3. 使用Markdown格式导出作为替代方案
328 | """
329 | raise Exception(detailed_error)
330 |
--------------------------------------------------------------------------------
/stock/stock_report.py:
--------------------------------------------------------------------------------
1 | import sys
2 | import os
3 | import datetime
4 | from typing import Dict, Any
5 |
6 | project_root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
7 | if project_root not in sys.path:
8 | sys.path.append(project_root)
9 |
10 | from stock.stock_data_tools import get_stock_tools
11 | from utils.report_utils import generate_pdf_report, generate_docx_report, generate_markdown_file, generate_html_report
12 | from utils.data_formatters import get_stock_formatter
13 | from version import get_version, get_full_version
14 |
15 |
16 | def generate_stock_report(stock_identity: Dict[str, Any],
17 | format_type="pdf",
18 | has_fundamental_ai=False, has_market_ai=False,
19 | has_news_ai=False, has_chip_ai=False,
20 | has_company_ai=False, has_comprehensive_ai=False):
21 | """生成完整的股票分析报告(安全版本,完全独立于Streamlit)"""
22 | try:
23 | stock_tools = get_stock_tools()
24 | report_data = {}
25 |
26 | # 收集基本信息
27 | try:
28 | basic_info = stock_tools.get_basic_info(stock_identity, use_cache=True, include_ai_analysis=has_fundamental_ai, include_company_analysis=has_company_ai)
29 | if 'error' not in basic_info and basic_info:
30 | report_data['basic_info'] = basic_info
31 | except Exception as e:
32 | report_data['basic_info'] = {'error': str(e)}
33 |
34 | # 收集行情数据
35 | try:
36 | kline_info = stock_tools.get_stock_kline_data(stock_identity, period=160, use_cache=True, include_ai_analysis=has_market_ai)
37 | if 'error' not in kline_info and kline_info:
38 | report_data['kline_info'] = kline_info
39 | except Exception as e:
40 | report_data['kline_info'] = {'error': str(e)}
41 |
42 | # 收集新闻数据
43 | try:
44 | news_info = stock_tools.get_stock_news_data(stock_identity, use_cache=True, include_ai_analysis=has_news_ai)
45 | if 'error' not in news_info and news_info:
46 | report_data['news_data'] = news_info
47 | except Exception as e:
48 | report_data['news_data'] = {'error': str(e)}
49 |
50 | # 收集筹码数据(仅A股和基金)
51 | if stock_identity.get('market_name', "") != '港股':
52 | try:
53 | chip_data = stock_tools.get_stock_chip_data(stock_identity, use_cache=True, include_ai_analysis=has_chip_ai)
54 | if 'error' not in chip_data and chip_data:
55 | report_data['chip_data'] = chip_data
56 | except Exception as e:
57 | report_data['chip_data'] = {'error': str(e)}
58 |
59 | # 收集综合分析
60 | if has_comprehensive_ai:
61 | try:
62 | comprehensive_analysis = stock_tools.get_comprehensive_ai_analysis(stock_identity, use_cache=True)
63 | if 'error' not in comprehensive_analysis:
64 | report_data['comprehensive_analysis'] = comprehensive_analysis
65 | except Exception as e:
66 | pass
67 |
68 | final_ai_reports = {}
69 |
70 | # 整理AI分析报告
71 | if has_fundamental_ai:
72 | if 'ai_analysis' in report_data.get('basic_info', {}):
73 | final_ai_reports['fundamental'] = report_data['basic_info']['ai_analysis']
74 |
75 | if has_company_ai:
76 | if 'company_analysis' in report_data.get('basic_info', {}):
77 | final_ai_reports['company'] = report_data['basic_info']['company_analysis']
78 |
79 | if has_market_ai:
80 | if 'ai_analysis' in report_data.get('kline_info', {}):
81 | final_ai_reports['market'] = report_data['kline_info']['ai_analysis']
82 |
83 | if has_news_ai:
84 | if 'ai_analysis' in report_data.get('news_data', {}):
85 | final_ai_reports['news'] = report_data['news_data']['ai_analysis']
86 |
87 | if has_chip_ai:
88 | if 'ai_analysis' in report_data.get('chip_data', {}):
89 | final_ai_reports['chip'] = report_data['chip_data']['ai_analysis']
90 |
91 | if has_comprehensive_ai:
92 | if 'comprehensive_analysis' in report_data:
93 | final_ai_reports['comprehensive'] = report_data['comprehensive_analysis']
94 |
95 | report_data['ai_reports'] = final_ai_reports
96 |
97 | md_content = generate_markdown_report(stock_identity, report_data)
98 |
99 | if format_type == "pdf":
100 | return generate_pdf_report(md_content)
101 | elif format_type == "docx":
102 | return generate_docx_report(md_content)
103 | elif format_type == "html":
104 | return generate_html_report(md_content)
105 | elif format_type == "markdown":
106 | return generate_markdown_file(md_content)
107 | else:
108 | return md_content
109 |
110 | except Exception as e:
111 | error_msg = f"生成报告失败: {str(e)}"
112 | if format_type == "pdf":
113 | return generate_pdf_report(f"# 错误\n\n{error_msg}")
114 | elif format_type == "docx":
115 | return generate_docx_report(f"# 错误\n\n{error_msg}")
116 | elif format_type == "html":
117 | return generate_html_report(f"# 错误\n\n{error_msg}")
118 | elif format_type == "markdown":
119 | return generate_markdown_file(f"# 错误\n\n{error_msg}")
120 | else:
121 | return f"# 错误\n\n{error_msg}"
122 |
123 |
124 | def generate_markdown_report(stock_identity: Dict[str, Any], report_data: Dict[str, Any]) -> str:
125 | """生成Markdown格式报告"""
126 | stock_code = stock_identity['code']
127 | stock_name = stock_identity['name']
128 | market_type = stock_identity['market_name']
129 |
130 | current_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
131 | md_content = f"""# {stock_name}({stock_code}) 完整分析报告
132 |
133 | **市场类型**: {market_type}
134 | **报告生成时间**: {current_time}
135 | **生成工具**: {get_full_version()}
136 |
137 | """
138 |
139 | # 综合分析部分
140 | if 'comprehensive' in report_data['ai_reports']:
141 | analysis_data = report_data['ai_reports']['comprehensive']
142 | md_content += """
143 | ---
144 |
145 | # 🎯 综合分析
146 |
147 | """
148 |
149 | if 'analysis_info' in analysis_data:
150 | info = analysis_data['analysis_info']
151 | md_content += f"""## 分析信息
152 |
153 | - **分析时间**: {info.get('analysis_time', '未知')}
154 | - **数据来源**: {info.get('data_sources_count', 0)}个数据源
155 |
156 | """
157 |
158 | if 'report' in analysis_data:
159 | report_text = analysis_data['report']
160 | report_time = analysis_data.get('timestamp', '')
161 |
162 | md_content += f"""{report_text}
163 |
164 | *分析生成时间: {report_time}*
165 |
166 | """
167 |
168 | # 基本信息部分
169 | basic_info = report_data.get('basic_info', {})
170 | if 'error' not in basic_info and basic_info:
171 | md_content += """
172 | ---
173 |
174 | # 参考数据
175 |
176 | ## 公司信息
177 |
178 | ### 基本信息
179 | """
180 |
181 | # 使用统一格式化器
182 | formatter = get_stock_formatter()
183 | basic_info_text = formatter.format_basic_info(basic_info, stock_identity)
184 |
185 | md_content += basic_info_text + "\n\n"
186 |
187 | if 'company' in report_data['ai_reports']:
188 | company_report = report_data['ai_reports']['company']
189 | report_text = company_report['report']
190 | report_time = company_report.get('timestamp', '')
191 |
192 | md_content += f"""## 🏢 AI公司分析
193 |
194 | {report_text}
195 |
196 | *分析生成时间: {report_time}*
197 |
198 | """
199 |
200 | if 'fundamental' in report_data['ai_reports']:
201 | fundamental_report = report_data['ai_reports']['fundamental']
202 | report_text = fundamental_report['report']
203 | report_time = fundamental_report.get('timestamp', '')
204 |
205 | md_content += f"""## 🤖 AI基本面分析
206 |
207 | {report_text}
208 |
209 | *分析生成时间: {report_time}*
210 |
211 | """
212 |
213 | # 行情走势部分
214 | kline_info = report_data.get('kline_info', {})
215 | if 'error' not in kline_info and kline_info:
216 | formatter = get_stock_formatter()
217 | kline_text = formatter.format_kline_data(kline_info)
218 |
219 | #df = pd.DataFrame(kline_info['kline_data']) # later remove
220 | #last_row = df.iloc[-1]
221 |
222 | md_content += """
223 | ---
224 |
225 | # 📈 行情走势
226 |
227 | """
228 |
229 | md_content += kline_text + "\n\n"
230 |
231 | if 'market' in report_data['ai_reports']:
232 | market_report = report_data['ai_reports']['market']
233 | report_text = market_report['report']
234 | report_time = market_report.get('timestamp', '')
235 |
236 | md_content += f"""## 🤖 AI行情分析
237 |
238 | {report_text}
239 |
240 | *分析生成时间: {report_time}*
241 |
242 | """
243 |
244 | # 新闻资讯部分
245 | news_data = report_data.get('news_data', {})
246 | if 'error' not in news_data and news_data and news_data.get('news_data'):
247 | news_list = news_data['news_data']
248 | md_content += f"""
249 | ---
250 |
251 | # 📰 新闻资讯
252 |
253 | """
254 |
255 | # 使用统一格式化器
256 | formatter = get_stock_formatter()
257 | news_text = formatter.format_stock_news_data(news_list, has_content=False)
258 |
259 | md_content += news_text + "\n\n"
260 |
261 | if 'news' in report_data['ai_reports']:
262 | news_report = report_data['ai_reports']['news']
263 | report_text = news_report['report']
264 | report_time = news_report.get('timestamp', '')
265 |
266 | md_content += f"""## 🤖 AI新闻分析
267 |
268 | {report_text}
269 |
270 | *分析生成时间: {report_time}*
271 |
272 | """
273 |
274 | # 筹码分析部分(仅A股)
275 | chip_data = report_data.get('chip_data', {})
276 | if 'error' not in chip_data and chip_data:
277 | md_content += """
278 | ---
279 |
280 | # 🧮 筹码分析
281 |
282 | """
283 |
284 | # 使用统一格式化器
285 | formatter = get_stock_formatter()
286 | chip_text = formatter.format_chip_data(chip_data)
287 |
288 | md_content += chip_text + "\n\n"
289 |
290 | md_content += "\n"
291 |
292 | if 'chip' in report_data['ai_reports']:
293 | chip_report = report_data['ai_reports']['chip']
294 | report_text = chip_report['report']
295 | report_time = chip_report.get('timestamp', '')
296 |
297 | md_content += f"""## 🤖 AI筹码分析
298 |
299 | {report_text}
300 |
301 | *分析生成时间: {report_time}*
302 |
303 | """
304 |
305 | md_content += """---
306 |
307 | *本报告由XYStock股票分析系统自动生成,仅供参考,不构成任何投资建议*
308 | """
309 |
310 | return md_content
311 |
--------------------------------------------------------------------------------
/llm/openai_client.py:
--------------------------------------------------------------------------------
1 | """
2 | OpenAI API 增强封装
3 | 包含 token 使用记录、配置管理、错误处理等功能
4 | """
5 | import time
6 | import logging
7 | import sys
8 | import os
9 | from pathlib import Path
10 | from typing import List, Dict, Any, Optional, Union
11 | from openai import OpenAI
12 | from openai.types.chat import ChatCompletion
13 |
14 | # 添加项目根目录到路径,以便导入配置管理器
15 | sys.path.append(str(Path(__file__).parent.parent))
16 | from config_manager import config
17 | from .usage_logger import UsageLogger
18 |
19 | # 配置日志
20 | logging.basicConfig(
21 | level=getattr(logging, config.get('LLM_LOGGING.LOG_LEVEL', 'INFO')),
22 | format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
23 | )
24 | logger = logging.getLogger(__name__)
25 |
26 | class OpenAIClient:
27 | """增强的 OpenAI API 客户端"""
28 |
29 | def __init__(self, api_key: Optional[str] = None, usage_logger: Optional[UsageLogger] = None):
30 | """
31 | 初始化 OpenAI 客户端
32 |
33 | Args:
34 | api_key: API 密钥,如果为空则从配置文件读取
35 | usage_logger: 使用记录器,如果为空则自动创建
36 | """
37 | # 从配置获取API密钥
38 | self.api_key = api_key or config.get('LLM_OPENAI.API_KEY')
39 | if not self.api_key:
40 | raise ValueError("API密钥未设置,请在配置文件中设置 LLM_OPENAI.API_KEY")
41 |
42 | # 获取其他配置
43 | openai_config = config.get_openai_config()
44 | base_url = openai_config.get('BASE_URL')
45 | timeout = openai_config.get('TIMEOUT', 60)
46 | max_retries = openai_config.get('MAX_RETRIES', 3)
47 |
48 | # 初始化 OpenAI 客户端
49 | kwargs = {'api_key': self.api_key, 'timeout': timeout, 'max_retries': max_retries}
50 | if base_url:
51 | kwargs['base_url'] = base_url
52 |
53 | self.client = OpenAI(**kwargs)
54 |
55 | # 初始化使用记录器
56 | if config.get('LLM_LOGGING.ENABLE_LOGGING', True):
57 | log_file = config.get('LLM_LOGGING.USAGE_LOG_FILE', 'data/logs/openai_usage.csv')
58 | if not Path(log_file).is_absolute():
59 | project_root = Path(__file__).parent.parent
60 | log_file = project_root / log_file
61 | self.usage_logger = usage_logger or UsageLogger(str(log_file))
62 | else:
63 | self.usage_logger = None
64 |
65 | # 默认参数
66 | self.default_model = openai_config.get('DEFAULT_MODEL', 'deepseek-chat')
67 | self.inference_model = openai_config.get('INFERENCE_MODEL', 'deepseek-chat')
68 | self.default_temperature = openai_config.get('DEFAULT_TEMPERATURE', 0.7)
69 |
70 | logger.info("OpenAI 客户端初始化完成")
71 |
72 | def ask(self,
73 | prompt: str,
74 | model: Optional[str] = None,
75 | model_type: Optional[str] = "default", # 新增: 选择模型类型
76 | temperature: Optional[float] = None,
77 | max_tokens: Optional[int] = None,
78 | system_message: Optional[str] = None,
79 | messages: Optional[List[Dict[str, str]]] = None,
80 | json_mode: bool = False,
81 | debug: bool = False) -> str:
82 | """
83 | 发送聊天请求
84 |
85 | Args:
86 | prompt: 用户输入
87 | model: 模型名称,会覆盖model_type
88 | model_type: 模型类型,'default'使用分析模型,'inference'使用推理模型
89 | temperature: 温度参数
90 | max_tokens: 最大token数
91 | system_message: 系统消息
92 | messages: 完整的消息列表(如果提供,将覆盖prompt和system_message)
93 | json_mode: 是否强制返回JSON格式
94 | debug: 是否打印调试信息
95 |
96 | Returns:
97 | AI回复内容
98 | """
99 | start_time = time.time()
100 |
101 | # 根据model_type选择默认模型
102 | if model is None:
103 | if model_type == "inference":
104 | model = self.inference_model
105 | else:
106 | model = self.default_model
107 |
108 | # 使用默认温度
109 | temperature = temperature or self.default_temperature
110 |
111 | try:
112 | # 构建消息列表
113 | if messages is None:
114 | messages = []
115 | if system_message:
116 | messages.append({"role": "system", "content": system_message})
117 | messages.append({"role": "user", "content": prompt})
118 |
119 | # 构建请求参数
120 | kwargs = {
121 | 'model': model,
122 | 'messages': messages,
123 | 'temperature': temperature
124 | }
125 | if max_tokens:
126 | kwargs['max_tokens'] = max_tokens
127 |
128 | # 如果启用了JSON模式,设置response_format
129 | if json_mode:
130 | kwargs['response_format'] = {"type": "json_object"}
131 | # 在JSON模式下,确保系统消息中包含JSON指令
132 | if not any(msg.get('role') == 'system' for msg in messages):
133 | messages.insert(0, {"role": "system", "content": "You must respond with valid JSON."})
134 | elif not any('json' in msg.get('content', '').lower() for msg in messages if msg.get('role') == 'system'):
135 | # 如果已有系统消息但不包含JSON指令,则追加
136 | for msg in messages:
137 | if msg.get('role') == 'system':
138 | msg['content'] += " You must respond with valid JSON."
139 | break
140 |
141 | # 发送请求
142 | response: ChatCompletion = self.client.chat.completions.create(**kwargs)
143 |
144 | # 计算响应时间
145 | response_time = time.time() - start_time
146 |
147 | # 获取回复内容
148 | content = response.choices[0].message.content
149 |
150 | # 记录使用情况
151 | if self.usage_logger:
152 | usage_data = response.usage.model_dump() if response.usage else {}
153 | input_text = prompt if not messages else str(messages)
154 |
155 | self.usage_logger.log_usage(
156 | model=model,
157 | usage_data=usage_data,
158 | input_text=input_text,
159 | output_text=content,
160 | response_time=response_time,
161 | temperature=temperature,
162 | success=True
163 | )
164 |
165 | # 调试输出
166 | if debug:
167 | print(f"模型: {model}")
168 | print(f"输入: {prompt}")
169 | print(f"输出: {content}")
170 | print(f"Token使用: {response.usage.model_dump() if response.usage else 'N/A'}")
171 | print(f"响应时间: {response_time:.2f}秒")
172 |
173 | logger.info(f"API调用成功,模型: {model}, tokens: {response.usage.total_tokens if response.usage else 'N/A'}")
174 |
175 | return content
176 |
177 | except Exception as e:
178 | response_time = time.time() - start_time
179 | error_message = str(e)
180 |
181 | # 记录错误
182 | if self.usage_logger:
183 | input_text = prompt if not messages else str(messages)
184 | self.usage_logger.log_usage(
185 | model=model,
186 | usage_data={},
187 | input_text=input_text,
188 | output_text="",
189 | response_time=response_time,
190 | temperature=temperature,
191 | success=False,
192 | error_message=error_message
193 | )
194 |
195 | logger.error(f"API调用失败: {error_message}")
196 | raise
197 |
198 | def chat(self,
199 | messages: List[Dict[str, str]],
200 | model: Optional[str] = None,
201 | model_type: Optional[str] = "default", # 新增: 选择模型类型
202 | temperature: Optional[float] = None,
203 | max_tokens: Optional[int] = None,
204 | json_mode: bool = False,
205 | debug: bool = False) -> str:
206 | """
207 | 多轮对话
208 |
209 | Args:
210 | messages: 消息列表
211 | model: 模型名称
212 | model_type: 模型类型,'default'使用分析模型,'inference'使用推理模型
213 | temperature: 温度参数
214 | max_tokens: 最大token数
215 | json_mode: 是否强制返回JSON格式
216 | debug: 是否打印调试信息
217 |
218 | Returns:
219 | AI回复内容
220 | """
221 | return self.ask(
222 | prompt="", # 这里prompt为空,因为使用messages
223 | model=model,
224 | model_type=model_type,
225 | temperature=temperature,
226 | max_tokens=max_tokens,
227 | messages=messages,
228 | json_mode=json_mode,
229 | debug=debug
230 | )
231 |
232 | def get_usage_stats(self, days: int = 30) -> Dict[str, Any]:
233 | """
234 | 获取使用统计
235 |
236 | Args:
237 | days: 统计天数
238 |
239 | Returns:
240 | 统计信息
241 | """
242 | if self.usage_logger:
243 | return self.usage_logger.get_usage_stats(days)
244 | return {}
245 |
246 | def export_usage_report(self, output_file: str = "reports/usage_report.html"):
247 | """
248 | 导出使用报告
249 |
250 | Args:
251 | output_file: 输出文件路径
252 | """
253 | if self.usage_logger:
254 | self.usage_logger.export_usage_report(output_file)
255 |
256 | # 示例和测试
257 | if __name__ == "__main__":
258 | try:
259 | # 创建客户端
260 | client = OpenAIClient()
261 |
262 | # 简单问答 - 使用默认分析模型
263 | print("=== 默认分析模型测试 ===")
264 | response = client.ask("用一句话详细评价AAPL股票", debug=True)
265 | print(f"分析模型回复: {response}")
266 |
267 | # 简单问答 - 使用推理模型
268 | print("=== 推理模型测试 ===")
269 | response = client.ask("用一句话评价AAPL股票", model_type="inference", debug=True)
270 | print(f"推理模型回复: {response}")
271 |
272 | # 多轮对话测试 - 使用默认分析模型
273 | print("\n=== 多轮对话测试 ===")
274 | messages = [
275 | {"role": "system", "content": "你是一个专业的股票分析师"},
276 | {"role": "user", "content": "分析一下苹果公司的投资价值"},
277 | {"role": "assistant", "content": "苹果公司作为科技巨头..."},
278 | {"role": "user", "content": "那它的主要风险是什么?"}
279 | ]
280 | response = client.chat(messages, debug=True)
281 | print(f"回复: {response}")
282 |
283 | # 获取使用统计
284 | print("\n=== 使用统计 ===")
285 | stats = client.get_usage_stats()
286 | if stats:
287 | print(f"总请求数: {stats.get('total_requests', 0)}")
288 | print(f"总Token数: {stats.get('total_tokens', 0)}")
289 | print(f"总成本: ${stats.get('total_cost', 0):.4f}")
290 |
291 | # 导出使用报告
292 | client.export_usage_report()
293 | print("使用报告已导出")
294 |
295 | except Exception as e:
296 | print(f"测试失败: {e}")
297 |
--------------------------------------------------------------------------------
/market/kline_data_manager.py:
--------------------------------------------------------------------------------
1 | """
2 | K线数据管理器 - 统一处理指数K线数据的获取、转换和缓存
3 | """
4 | import os
5 | import sys
6 | import warnings
7 | from datetime import datetime
8 | from typing import Dict, List, Optional, Tuple
9 | import pandas as pd
10 | import akshare as ak
11 |
12 | # 添加项目根目录到路径
13 | PROJECT_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
14 | if PROJECT_ROOT not in sys.path:
15 | sys.path.insert(0, PROJECT_ROOT)
16 |
17 | warnings.filterwarnings('ignore')
18 |
19 | from ui.config import INDEX_SYMBOL_MAPPING
20 | from utils.kline_cache import cache_manager, KLineData
21 |
22 |
23 | class KLineDataManager:
24 | """K线数据管理器"""
25 |
26 | def __init__(self):
27 | """初始化K线数据管理器"""
28 | self.index_mapping = INDEX_SYMBOL_MAPPING
29 |
30 | def fetch_index_kline_raw(self, index_name: str, period: int = 250) -> pd.DataFrame:
31 | """
32 | 从akshare获取原始K线数据
33 |
34 | Args:
35 | index_name: 指数名称
36 | period: 获取的数据条数
37 |
38 | Returns:
39 | pd.DataFrame: 原始K线数据
40 | """
41 | if index_name not in self.index_mapping:
42 | raise ValueError(f"不支持的指数名称: {index_name}")
43 |
44 | symbol = self.index_mapping[index_name]
45 | df_raw = ak.stock_zh_index_daily(symbol=symbol)
46 |
47 | if df_raw.empty:
48 | raise ValueError(f"无法获取{index_name}数据")
49 |
50 | # 取最近的数据
51 | df_raw = df_raw.tail(period).copy()
52 |
53 | # 数据类型转换
54 | numeric_columns = ['open', 'high', 'low', 'close', 'volume']
55 | for col in numeric_columns:
56 | if col in df_raw.columns:
57 | df_raw[col] = pd.to_numeric(df_raw[col], errors='coerce')
58 |
59 | # 确保date列是日期时间格式
60 | if 'date' in df_raw.columns:
61 | df_raw['date'] = pd.to_datetime(df_raw['date'])
62 |
63 | return df_raw
64 |
65 | def convert_to_kline_data_list(self, df: pd.DataFrame, index_name: str) -> List[KLineData]:
66 | """
67 | 将DataFrame转换为KLineData列表
68 |
69 | Args:
70 | df: K线数据DataFrame
71 | index_name: 指数名称
72 |
73 | Returns:
74 | List[KLineData]: KLineData对象列表
75 | """
76 | kline_data_list = []
77 |
78 | for _, row in df.iterrows():
79 | # 处理日期,确保datetime字段始终是字符串
80 | if 'date' in df.columns:
81 | if pd.isna(row['date']):
82 | date_str = datetime.now().strftime('%Y-%m-%d')
83 | elif isinstance(row['date'], str):
84 | date_str = row['date']
85 | else:
86 | date_str = row['date'].strftime('%Y-%m-%d')
87 | else:
88 | date_str = datetime.now().strftime('%Y-%m-%d')
89 |
90 | kline_data = KLineData(
91 | symbol=index_name,
92 | datetime=date_str,
93 | open=float(row['open']),
94 | high=float(row['high']),
95 | low=float(row['low']),
96 | close=float(row['close']),
97 | volume=int(row['volume']),
98 | amount=None,
99 | data_type="index"
100 | )
101 | kline_data_list.append(kline_data)
102 |
103 | return kline_data_list
104 |
105 | def convert_from_kline_data_list(self, kline_data_list: List[KLineData],
106 | for_technical_analysis: bool = False) -> pd.DataFrame:
107 | """
108 | 将KLineData列表转换为DataFrame
109 |
110 | Args:
111 | kline_data_list: KLineData对象列表
112 | for_technical_analysis: 是否用于技术分析(影响索引设置)
113 |
114 | Returns:
115 | pd.DataFrame: K线数据DataFrame
116 | """
117 | kline_records = []
118 |
119 | for kdata in kline_data_list:
120 | # 安全处理datetime字段
121 | if isinstance(kdata.datetime, str):
122 | date_str = kdata.datetime.split()[0] # 只取日期部分
123 | datetime_str = kdata.datetime
124 | else:
125 | # 如果是datetime对象,转换为字符串
126 | date_str = kdata.datetime.strftime('%Y-%m-%d')
127 | datetime_str = kdata.datetime.strftime('%Y-%m-%d %H:%M:%S')
128 |
129 | record = {
130 | 'date': date_str,
131 | 'datetime': datetime_str,
132 | 'open': kdata.open,
133 | 'high': kdata.high,
134 | 'low': kdata.low,
135 | 'close': kdata.close,
136 | 'volume': kdata.volume
137 | }
138 | if kdata.amount is not None:
139 | record['amount'] = kdata.amount
140 |
141 | kline_records.append(record)
142 |
143 | df = pd.DataFrame(kline_records)
144 | df['date'] = pd.to_datetime(df['date'])
145 |
146 | # 如果是用于技术分析,设置date为索引
147 | if for_technical_analysis:
148 | df = df.set_index('date')
149 |
150 | return df
151 |
152 | def get_index_kline_data(self, index_name: str, period: int = 250,
153 | use_cache: bool = True, force_refresh: bool = False,
154 | for_technical_analysis: bool = False) -> Tuple[pd.DataFrame, bool]:
155 | """
156 | 获取指数K线数据(统一入口)
157 |
158 | Args:
159 | index_name: 指数名称
160 | period: 获取的数据条数
161 | use_cache: 是否使用缓存
162 | force_refresh: 是否强制刷新
163 | for_technical_analysis: 是否用于技术分析(影响DataFrame格式)
164 |
165 | Returns:
166 | Tuple[pd.DataFrame, bool]: (K线数据DataFrame, 是否来自缓存)
167 | """
168 | from_cache = False
169 |
170 | # 尝试从缓存获取
171 | if use_cache and not force_refresh:
172 | cached_data = cache_manager.get_cached_index_kline(index_name, period)
173 | if cached_data and len(cached_data) >= min(period, 30):
174 | print(f"📋 使用缓存的K线数据: {index_name} ({len(cached_data)}条)")
175 | df = self.convert_from_kline_data_list(cached_data, for_technical_analysis)
176 | # 确保数据量符合要求
177 | df = df.tail(period)
178 | from_cache = True
179 | return df, from_cache
180 |
181 | # 从网络获取最新数据
182 | print(f"📡 获取最新K线数据: {index_name}")
183 | df_raw = self.fetch_index_kline_raw(index_name, period * 2) # 多取一些数据以备缓存
184 |
185 | # 转换为KLineData列表并缓存
186 | if use_cache:
187 | kline_data_list = self.convert_to_kline_data_list(df_raw, index_name)
188 | cache_manager.cache_index_kline(index_name, kline_data_list)
189 |
190 | # 准备返回的DataFrame
191 | df = df_raw.tail(period).copy()
192 |
193 | # 确保DataFrame包含datetime列(用于绘图)
194 | if 'date' in df.columns and 'datetime' not in df.columns:
195 | df['datetime'] = df['date'].dt.strftime('%Y-%m-%d')
196 |
197 | if for_technical_analysis and 'date' in df.columns:
198 | df = df.set_index('date')
199 |
200 | return df, from_cache
201 |
202 | def update_index_cache(self, index_name: str, period: int = 250) -> bool:
203 | """
204 | 更新指数缓存数据
205 |
206 | Args:
207 | index_name: 指数名称
208 | period: 获取的数据条数
209 |
210 | Returns:
211 | bool: 是否更新成功
212 | """
213 | try:
214 | print(f"🔄 更新{index_name}缓存数据...")
215 |
216 | # 获取原始数据
217 | df_raw = self.fetch_index_kline_raw(index_name, period)
218 |
219 | # 转换为KLineData列表
220 | kline_data_list = self.convert_to_kline_data_list(df_raw, index_name)
221 |
222 | # 更新缓存(智能合并)
223 | cache_manager.update_index_kline(index_name, kline_data_list)
224 |
225 | print(f" ✓ 成功更新{index_name}缓存数据: {len(kline_data_list)}条")
226 | return True
227 |
228 | except Exception as e:
229 | print(f" ❌ 更新{index_name}缓存数据失败: {e}")
230 | return False
231 |
232 | def batch_update_indices_cache(self, indices: Optional[List[str]] = None,
233 | period: int = 250) -> Dict:
234 | """
235 | 批量更新指数缓存数据
236 |
237 | Args:
238 | indices: 指数名称列表,None表示更新所有支持的指数
239 | period: 获取的数据条数
240 |
241 | Returns:
242 | Dict: 更新结果统计
243 | """
244 | if indices is None:
245 | from ui.config import FOCUS_INDICES
246 | indices = FOCUS_INDICES
247 |
248 | print(f"📊 批量更新指数缓存数据 ({len(indices)}个指数)...")
249 |
250 | results = {
251 | 'success_count': 0,
252 | 'failed_count': 0,
253 | 'results': {}
254 | }
255 |
256 | for index_name in indices:
257 | success = self.update_index_cache(index_name, period)
258 | results['results'][index_name] = success
259 | if success:
260 | results['success_count'] += 1
261 | else:
262 | results['failed_count'] += 1
263 |
264 | print(f" ✓ 批量更新完成: 成功 {results['success_count']} 个,失败 {results['failed_count']} 个")
265 | return results
266 |
267 | def add_moving_averages(self, df: pd.DataFrame) -> pd.DataFrame:
268 | """
269 | 为DataFrame添加移动平均线
270 |
271 | Args:
272 | df: K线数据DataFrame
273 |
274 | Returns:
275 | pd.DataFrame: 添加了移动平均线的DataFrame
276 | """
277 | try:
278 | # 确保有close列
279 | if 'close' not in df.columns:
280 | print("❌ DataFrame中没有close列,无法计算均线")
281 | return df
282 |
283 | df = df.copy()
284 |
285 | # 计算移动平均线
286 | df['MA5'] = df['close'].rolling(window=5, min_periods=1).mean()
287 | df['MA10'] = df['close'].rolling(window=10, min_periods=1).mean()
288 | df['MA20'] = df['close'].rolling(window=20, min_periods=1).mean()
289 | df['MA60'] = df['close'].rolling(window=60, min_periods=1).mean()
290 |
291 | return df
292 |
293 | except Exception as e:
294 | print(f"❌ 计算均线失败: {e}")
295 | return df
296 |
297 |
298 | # 全局K线数据管理器实例
299 | _kline_manager = None
300 |
301 | def get_kline_manager() -> KLineDataManager:
302 | """获取全局K线数据管理器实例"""
303 | global _kline_manager
304 | if _kline_manager is None:
305 | _kline_manager = KLineDataManager()
306 | return _kline_manager
307 |
308 |
309 | if __name__ == "__main__":
310 | """测试K线数据管理器"""
311 | print("🧪 测试K线数据管理器...")
312 |
313 | manager = get_kline_manager()
314 |
315 | # 测试获取K线数据
316 | try:
317 | df, from_cache = manager.get_index_kline_data('上证指数', period=100, use_cache=True)
318 | print(f"✅ 获取上证指数数据成功: {len(df)}条记录, 来自缓存: {from_cache}")
319 | print(f"数据列: {df.columns.tolist()}")
320 | print(f"最新数据:\n{df.tail(3)}")
321 |
322 | # 测试添加均线
323 | df_with_ma = manager.add_moving_averages(df)
324 | print(f"✅ 添加均线成功, 新增列: {[col for col in df_with_ma.columns if col.startswith('MA')]}")
325 |
326 | except Exception as e:
327 | print(f"❌ 测试失败: {e}")
328 |
329 | print("✅ 测试完成!")
330 |
--------------------------------------------------------------------------------
/stock/stock_utils.py:
--------------------------------------------------------------------------------
1 | import akshare as ak
2 | import pandas as pd
3 | from datetime import datetime
4 | from typing import Dict
5 | from stockstats import wrap
6 |
7 | def get_chip_analysis_data(stock_code):
8 | """获取股票筹码分析数据"""
9 | try:
10 | # 导入筹码缓存管理器
11 | from stock.chip_data_cache import get_chip_cache_manager
12 | chip_cache = get_chip_cache_manager()
13 |
14 | # 尝试从缓存获取原始数据
15 | cached_raw_data = chip_cache.get_cached_raw_data(stock_code)
16 |
17 | if cached_raw_data:
18 | print(f"📋 使用缓存的 {stock_code} 筹码原始数据")
19 | # 从缓存数据重建DataFrame进行计算
20 | cyq_data = pd.DataFrame(cached_raw_data)
21 | # 转换日期列为datetime类型以便处理
22 | if '日期' in cyq_data.columns:
23 | cyq_data['日期'] = pd.to_datetime(cyq_data['日期'])
24 | else:
25 | print(f"📡 获取 {stock_code} 筹码数据...")
26 | cyq_data = ak.stock_cyq_em(stock_code)
27 |
28 | if cyq_data is None or cyq_data.empty:
29 | return {"error": f"无法获取 {stock_code} 的筹码数据"}
30 |
31 | # 保存原始数据到专用缓存
32 | cyq_data_for_cache = cyq_data.copy()
33 | cyq_data_for_cache['日期'] = cyq_data_for_cache['日期'].astype(str)
34 | chip_cache.save_raw_data(stock_code, cyq_data_for_cache.to_dict('records'))
35 |
36 | latest = cyq_data.iloc[-1]
37 | profit_ratio = latest['获利比例']
38 | concentration_90 = latest['90集中度']
39 |
40 | chip_data = {
41 | "latest_date": str(latest['日期']),
42 | "profit_ratio": profit_ratio,
43 | "avg_cost": latest['平均成本'],
44 | "cost_90_low": latest['90成本-低'],
45 | "cost_90_high": latest['90成本-高'],
46 | "concentration_90": concentration_90,
47 | "cost_70_low": latest['70成本-低'],
48 | "cost_70_high": latest['70成本-高'],
49 | "concentration_70": latest['70集中度'],
50 | "support_level": latest['90成本-低'],
51 | "resistance_level": latest['90成本-高'],
52 | "cost_center": latest['平均成本'],
53 | # 不再在主缓存中存储raw_data,改为引用到专用缓存
54 | "raw_data_cached": True,
55 | "raw_data_count": len(cyq_data),
56 | }
57 |
58 | # 添加分析指标
59 | chip_data["analysis"] = {
60 | "profit_status": "高获利" if profit_ratio > 0.7 else ("低获利" if profit_ratio < 0.3 else "中性获利"),
61 | "concentration_status": "高度集中" if concentration_90 < 0.1 else ("分散" if concentration_90 > 0.2 else "适中"),
62 | "risk_level": "高" if profit_ratio > 0.8 and concentration_90 < 0.15 else ("低" if profit_ratio < 0.2 and concentration_90 < 0.15 else "中"),
63 | }
64 |
65 | return chip_data
66 |
67 | except Exception as e:
68 | print(f"获取筹码数据失败: {str(e)}")
69 | return {"error": f"该股票暂不支持获取筹码数据"}
70 |
71 | def _judge_ma_trend(stock_data) -> str:
72 | """判断移动平均线趋势"""
73 | try:
74 | ma5 = stock_data['close_5_sma'].iloc[-1]
75 | ma10 = stock_data['close_10_sma'].iloc[-1]
76 | ma20 = stock_data['close_20_sma'].iloc[-1]
77 | current_price = stock_data['close'].iloc[-1]
78 |
79 | if current_price > ma5 > ma10 > ma20:
80 | return "多头排列"
81 | elif current_price < ma5 < ma10 < ma20:
82 | return "空头排列"
83 | else:
84 | return "震荡整理"
85 | except:
86 | return "无法判断"
87 |
88 | def _judge_macd_trend(stock_data) -> str:
89 | """判断MACD趋势"""
90 | try:
91 | macd = stock_data['macd'].iloc[-1]
92 | macd_signal = stock_data['macds'].iloc[-1]
93 | macd_hist = stock_data['macdh'].iloc[-1]
94 |
95 | if macd > macd_signal and macd_hist > 0:
96 | return "金叉向上"
97 | elif macd < macd_signal and macd_hist < 0:
98 | return "死叉向下"
99 | else:
100 | return "震荡调整"
101 | except:
102 | return "无法判断"
103 |
104 |
105 | def get_indicators(df):
106 | """使用stockstats计算技术指标"""
107 | stock = wrap(df)
108 | stock_len = len(stock)
109 |
110 | indicators = {
111 | # 移动平均线
112 | 'ma_5': stock['close_5_sma'].iloc[-1] if stock_len > 5 else None,
113 | 'ma_10': stock['close_10_sma'].iloc[-1] if stock_len > 10 else None,
114 | 'ma_20': stock['close_20_sma'].iloc[-1] if stock_len > 20 else None,
115 | 'ma_60': stock['close_60_sma'].iloc[-1] if stock_len > 60 else None,
116 |
117 | # 指数移动平均
118 | 'ema_12': stock['close_12_ema'].iloc[-1] if stock_len > 12 else None,
119 | 'ema_26': stock['close_26_ema'].iloc[-1] if stock_len > 26 else None,
120 |
121 | # MACD指标
122 | 'macd': stock['macd'].iloc[-1] if stock_len > 26 else None,
123 | 'macd_signal': stock['macds'].iloc[-1] if stock_len > 26 else None,
124 | 'macd_histogram': stock['macdh'].iloc[-1] if stock_len > 26 else None,
125 |
126 | # KDJ指标
127 | 'kdj_k': stock['kdjk'].iloc[-1] if stock_len > 9 else None,
128 | 'kdj_d': stock['kdjd'].iloc[-1] if stock_len > 9 else None,
129 | 'kdj_j': stock['kdjj'].iloc[-1] if stock_len > 9 else None,
130 |
131 | # RSI指标
132 | 'rsi_14': stock['rsi_14'].iloc[-1] if stock_len > 14 else None,
133 |
134 | # 布林带
135 | 'boll_upper': stock['boll_ub'].iloc[-1] if stock_len > 20 else None,
136 | 'boll_middle': stock['boll'].iloc[-1] if stock_len > 20 else None,
137 | 'boll_lower': stock['boll_lb'].iloc[-1] if stock_len > 20 else None,
138 |
139 | # 威廉指标
140 | 'wr_14': stock['wr_14'].iloc[-1] if stock_len > 14 else None,
141 |
142 | # CCI指标
143 | 'cci_14': stock['cci_14'].iloc[-1] if stock_len > 14 else None,
144 |
145 | # 趋势判断
146 | 'ma_trend': _judge_ma_trend(stock),
147 | 'macd_trend': _judge_macd_trend(stock),
148 | }
149 |
150 | return indicators
151 |
152 | def fetch_stock_basic_info(stock_code: str) -> Dict:
153 | """获取股票基本信息的具体实现"""
154 | from stock.stock_data_fetcher import data_manager
155 |
156 | basic_info = {}
157 |
158 | try:
159 | if not data_manager.is_available() and not data_manager.initialize():
160 | raise Exception("数据提供者初始化失败")
161 |
162 | realtime_data = data_manager.get_realtime_quote(stock_code)
163 | stock_info = data_manager.fetch_stock_info(stock_code)
164 |
165 | if realtime_data:
166 | basic_info.update({
167 | 'current_price': float(realtime_data.current_price),
168 | 'change': float(realtime_data.change),
169 | 'change_percent': float(realtime_data.change_percent),
170 | 'volume': int(realtime_data.volume),
171 | 'amount': float(realtime_data.amount),
172 | 'high': float(realtime_data.high),
173 | 'low': float(realtime_data.low),
174 | 'open': float(realtime_data.open),
175 | 'prev_close': float(realtime_data.prev_close),
176 | 'timestamp': str(realtime_data.timestamp),
177 | })
178 |
179 | if stock_info:
180 | basic_info.update(stock_info)
181 |
182 | except Exception as e:
183 | basic_info['error'] = str(e)
184 |
185 | basic_info['update_time'] = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
186 | return basic_info
187 |
188 | def fetch_stock_technical_indicators(stock_code: str, period: int = 160) -> Dict:
189 | """获取股票技术指标的具体实现(K线数据不缓存,只缓存计算结果)"""
190 | from stock.stock_data_fetcher import data_manager, KLineType
191 | from utils.risk_metrics import calculate_portfolio_risk_summary
192 |
193 | indicators_info = {}
194 |
195 | try:
196 | kline_data = data_manager.get_kline_data(stock_code, KLineType.DAY, period)
197 |
198 | if not kline_data:
199 | indicators_info['error'] = f"未获取到股票 {stock_code} 的K线数据"
200 | else:
201 | df = pd.DataFrame([k.__dict__ for k in kline_data]).sort_values('datetime')
202 |
203 | # 计算移动平均线
204 | for period in [5, 10, 20]:
205 | df[f'MA{period}'] = df['close'].rolling(window=period).mean()
206 |
207 | indicators = get_indicators(df)
208 |
209 | # 风险指标计算
210 | risk_metrics = {}
211 | if len(df) >= 5:
212 | try:
213 | risk_metrics = calculate_portfolio_risk_summary(df, price_col='close')
214 | except Exception as e:
215 | risk_metrics['error'] = str(e)
216 |
217 | # 获取最新数据摘要
218 | latest_row = df.iloc[-1]
219 | latest_data = {
220 | 'date': latest_row['datetime'].isoformat() if hasattr(latest_row['datetime'], 'isoformat') else str(latest_row['datetime']),
221 | 'open': float(latest_row['open']) if pd.notna(latest_row['open']) else None,
222 | 'high': float(latest_row['high']) if pd.notna(latest_row['high']) else None,
223 | 'low': float(latest_row['low']) if pd.notna(latest_row['low']) else None,
224 | 'close': float(latest_row['close']) if pd.notna(latest_row['close']) else None,
225 | 'volume': int(latest_row['volume']) if pd.notna(latest_row['volume']) else None,
226 | }
227 |
228 | indicators_info.update({
229 | 'indicators': indicators,
230 | 'risk_metrics': risk_metrics,
231 | 'data_length': len(df),
232 | 'latest_data': latest_data,
233 | 'has_ma_data': True
234 | })
235 |
236 | except Exception as e:
237 | indicators_info['error'] = str(e)
238 |
239 | indicators_info['update_time'] = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
240 | return indicators_info
241 |
242 | def fetch_stock_news_data(stock_code: str, day=7) -> Dict:
243 | """获取股票新闻数据的具体实现"""
244 | from utils.news_tools import get_stock_news_by_akshare
245 |
246 | news_info = {}
247 |
248 | try:
249 | stock_data = get_stock_news_by_akshare(stock_code, day=day)
250 |
251 | if stock_data and 'company_news' in stock_data:
252 | news_data = stock_data['company_news']
253 | news_info.update({
254 | 'news_data': news_data,
255 | 'news_count': len(news_data),
256 | 'latest_news': news_data[:5] if len(news_data) >= 5 else news_data
257 | })
258 | else:
259 | news_info['error'] = "未能获取到相关新闻"
260 |
261 | except Exception as e:
262 | news_info['error'] = str(e)
263 |
264 | news_info['update_time'] = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
265 | return news_info
266 |
267 | def fetch_stock_chip_data(stock_code: str) -> Dict:
268 | """获取股票筹码数据的具体实现"""
269 | chip_info = {}
270 |
271 | try:
272 | chip_data = get_chip_analysis_data(stock_code)
273 | chip_info.update(chip_data if "error" not in chip_data else {'error': chip_data["error"]})
274 | except Exception as e:
275 | chip_info['error'] = str(e)
276 |
277 | chip_info['update_time'] = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
278 | return chip_info
279 |
280 | def get_chip_raw_data(stock_code):
281 | """获取股票筹码原始数据"""
282 | try:
283 | from stock.chip_data_cache import get_chip_cache_manager
284 | chip_cache = get_chip_cache_manager()
285 |
286 | # 尝试从缓存获取原始数据
287 | cached_raw_data = chip_cache.get_cached_raw_data(stock_code)
288 |
289 | if cached_raw_data:
290 | print(f"📋 使用缓存的 {stock_code} 筹码原始数据")
291 | return cached_raw_data
292 | else:
293 | print(f"📡 获取 {stock_code} 筹码原始数据...")
294 | cyq_data = ak.stock_cyq_em(stock_code)
295 |
296 | if cyq_data is None or cyq_data.empty:
297 | return None
298 |
299 | # 保存原始数据到专用缓存
300 | cyq_data_for_cache = cyq_data.copy()
301 | cyq_data_for_cache['日期'] = cyq_data_for_cache['日期'].astype(str)
302 | raw_data = cyq_data_for_cache.to_dict('records')
303 | chip_cache.save_raw_data(stock_code, raw_data)
304 |
305 | return raw_data
306 |
307 | except Exception as e:
308 | print(f"获取筹码原始数据失败: {str(e)}")
309 | return None
310 |
311 |
--------------------------------------------------------------------------------