├── .gitattributes ├── .gitignore ├── .xet └── config.toml ├── BUILD ├── LICENSE ├── README.md ├── WORKSPACE ├── data ├── BUILD ├── bkt_result │ ├── BUILD │ ├── bkt_result.html │ ├── main.html │ └── main2.html ├── csv │ ├── 000300.SH.csv │ ├── 000827.SH.csv │ ├── 000905.SH.csv │ ├── 000922.SH.csv │ ├── 000993.SH.csv │ ├── 159928.SZ.csv │ ├── 161716.SZ.csv │ ├── 399006.SZ.csv │ ├── 399324.SZ.csv │ ├── 399396.SZ.csv │ ├── 399967.SZ.csv │ ├── 399971.SZ.csv │ ├── 399986.SZ.csv │ ├── 399989.SZ.csv │ ├── 399997.SZ.csv │ ├── 399998.SZ.csv │ ├── 510050.SH.csv │ ├── 511010.SH.csv │ ├── 511220.SH.csv │ ├── 512010.SH.csv │ ├── 513100.SH.csv │ ├── 518880.SH.csv │ ├── BUILD │ ├── GDAXI.csv │ ├── HKTECH.csv │ ├── HSI.csv │ ├── IXIC.csv │ ├── N225.csv │ └── SPX.csv └── hdf5 │ ├── BUILD │ ├── all.h5 │ ├── cache.h5 │ └── index.h5 ├── engine ├── BUILD ├── __init__.py ├── alpha │ ├── BUILD │ ├── __init__.py │ └── alphalens_utils.py ├── bt_engine.py ├── common.py ├── config.py ├── data_utils.py ├── datafeed │ ├── BUILD │ ├── __init__.py │ ├── __pycache__ │ │ └── BUILD │ ├── datafeed_arctic.py │ ├── datafeed_csv.py │ ├── datafeed_hdf5.py │ ├── dataloader.py │ ├── dataset.py │ ├── expr │ │ ├── BUILD │ │ ├── __init__.py │ │ ├── base.py │ │ ├── expr_extend.py │ │ ├── expr_mgr.py │ │ └── ops.py │ └── ts_downloader.py ├── demos │ ├── BUILD │ ├── __init__.py │ ├── a2c_cartpole_tensorboard │ │ ├── A2C_1 │ │ │ ├── BUILD │ │ │ └── events.out.tfevents.1669546684.LAPTOP-3RCHD0KF.29644.0 │ │ ├── A2C_2 │ │ │ ├── BUILD │ │ │ └── events.out.tfevents.1669546881.LAPTOP-3RCHD0KF.15556.0 │ │ └── BUILD │ ├── ddpg_demo.py │ ├── ddpg_pendulum.zip │ ├── env_portfolio_yahoofinance.py │ └── sb3_utils.py ├── indicator │ ├── BUILD │ ├── __init__.py │ ├── indicator_rsrs.py │ ├── signal_double_sma.py │ └── signal_triple_sma.py ├── main.py ├── ml │ ├── BUILD │ ├── __init__.py │ ├── model.py │ ├── model_bak │ │ ├── BUILD │ │ ├── __init__.py │ │ ├── boosting_models.py │ │ ├── dataset.py │ │ ├── keras_dnn.py │ │ └── models.py │ └── model_runner.py ├── model │ ├── BUILD │ ├── __init__.py │ ├── dql_agent.py │ ├── pytorch_lstm.py │ └── stock_ranker.py ├── performance.py ├── qlib_mgr.py ├── rl │ ├── BNB_USDT_5m.csv │ ├── BUILD │ ├── __init__.py │ ├── backtrader_rl.py │ ├── backtrader_rl │ │ ├── BUILD │ │ ├── __init__.py │ │ ├── adapters │ │ │ ├── BUILD │ │ │ ├── __init__.py │ │ │ ├── gymAdapter.py │ │ │ └── tensorforceAdapter.py │ │ ├── engines.py │ │ ├── finance_env.py │ │ ├── strategys.py │ │ └── utils.py │ ├── clock.py │ ├── env_portfolio.py │ ├── finane_env.py │ ├── gym_env_example.py │ └── strategy_rl.py ├── stats.html └── strategy │ ├── BUILD │ ├── __init__.py │ ├── algo_utils.py │ ├── algos.py │ ├── examples │ ├── BUILD │ ├── __init__.py │ ├── stats.html │ ├── strategy_bolling.py │ ├── strategy_portfolio_equal_weights.py │ ├── strategy_portfolio_fix_weights.py │ ├── strategy_portfolio_rp.py │ ├── strategy_roc.py │ └── strategy_roc_portfolio.py │ ├── stragegy_algo.py │ ├── stragegy_buyhold.py │ ├── strategy_base.py │ ├── strategy_picktime.py │ ├── strategy_rotation.py │ └── strategy_turtle.py ├── gui ├── BUILD ├── __init__.py ├── global_event.py ├── gui_utils.py ├── mainapp.py ├── mainframe.py ├── page_timeseries.py ├── panels │ ├── BUILD │ ├── __init__.py │ ├── action_rolling.py │ ├── actions.py │ ├── backtest.py │ ├── panel_backtest.py │ ├── panels.py │ └── results.py ├── widgets.py └── widgets │ ├── BUILD │ ├── __init__.py │ ├── widget_matplotlib.py │ └── widget_web.py ├── images ├── BUILD ├── main_window2.png ├── mainwindow.png ├── weixin.jpg └── xingqiu.png ├── notebook └── Stock_NeurIPS2018_SB3.ipynb ├── qbot └── BUILD ├── qbot_main.py ├── qbot_test.py ├── scripts └── creat_build_file.sh ├── setup.py ├── third_party ├── BUILD ├── requirements.txt └── requirements.txt.tmp └── utils ├── BUILD ├── larkbot.py ├── send_email.py └── wxbot.py /.gitattributes: -------------------------------------------------------------------------------- 1 | # XET LOCK 2 | * filter=xet diff=xet merge=xet -text 3 | *.gitattributes filter= 4 | *.xet/** filter= 5 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | 4 | */__pycache__ 5 | engine/__pycache__ 6 | engine/datafeed/__pycache__ 7 | 8 | *.py[cod] 9 | *$py.class 10 | 11 | **.DS_Store 12 | 13 | -------------------------------------------------------------------------------- /.xet/config.toml: -------------------------------------------------------------------------------- 1 | [upstream] 2 | origin_type = "github" 3 | user_name = "Charmve" 4 | repo_name = "iQuant" 5 | -------------------------------------------------------------------------------- /BUILD: -------------------------------------------------------------------------------- 1 | load("@rules_python//python:defs.bzl", "py_binary", "py_test") 2 | load("@third_party//:requirements.bzl", "requirement") 3 | 4 | # TODO(https://github.com/bazelbuild/bazel-bench/issues/36): Make these work for python3. 5 | py_binary( 6 | name = "qbot", 7 | srcs = ["qbot_main.py"], 8 | deps = [ 9 | "//utils", 10 | requirement("wxPython"), 11 | requirement("pandas"), 12 | requirement("matplotlib"), 13 | requirement("backtrader"), 14 | requirement("backtrader_plotting"), 15 | requirement("scipy"), 16 | requirement("statsmodels"), 17 | requirement("quantstats"), 18 | requirement("requests"), # 这是yahoofinance需要 19 | requirement("loguru"), # 简化logger的使用 20 | requirement("binance-connector"), 21 | requirement("numba"), # pandas 多序列rolling需要 22 | requirement("pykalman"), 23 | requirement("tables"), 24 | requirement("scikit-learn"), 25 | requirement("empyrical"), 26 | requirement("stable-baselines3"), 27 | requirement("jupyter"), 28 | requirement("gym[all]"), 29 | requirement("pyglet"), 30 | requirement("tensorboard"), 31 | requirement("tensortrade"), 32 | requirement("yfinance"), 33 | requirement("pandas_datareader"), 34 | ], 35 | ) 36 | 37 | py_test( 38 | name = "qbot_test", 39 | srcs = ["qbot_test.py"], 40 | deps = [ 41 | ":qbot", 42 | "//testutils", 43 | requirement("mock"), 44 | ], 45 | ) 46 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # iQuant 2 | 3 | [UFund-Me](https://github.com/UFund-Me),专注将前沿人工智能技术(机器学习、深度学习、强化学习、遗传算法、图计算、知识图谱等)应用于金融量化投资。 4 | 5 | 金融投资领域是高度信息密集型,而且信息相对结构化,照理讲是最适合机器计算的领域。可是,当前投资仍然处于“刀耕火种”的年代,有人忙于调研,读报表;有人忙于盯盘,画线条。 6 | 7 | alphago master登顶围棋之巅都过去五年之久了,算法、算力日新月异的发展,不应该是当前这个样子。 8 | 9 | 尽管金融数据低“信噪比”,也不要指望打造一台永动机。 10 | 11 | 但请相信一句话就是: 12 | ``` 13 | No man is better than a machine, but no machine is better than a man with a machine! 14 | ``` 15 | 16 | 让机器辅助我们投资,将无往而不利。 17 | 18 | 按照个人积极参与主动决策的程度,把投资分成三个层次: 19 | - 全天候大类资产配置。 20 | 被动管理,很少参与。在坐好资产后长期持续,只做一些被动再平衡的操作; 21 | - 战术资产配置。 22 | 关注宏观层面大的周期,在周期偏好的资产上持有更多的仓位。 23 | - 择时 24 | 积极判断市场方向,期望做到“低买高卖”。 25 | 26 | 这三个层次,从上到下,越来越不确定,越来越难,风险也越来越高。当然如果做好,收益也是越来越大。 27 | 28 | 建议普通人都从第一层次做起,比较容易做到从理财往投资过渡。 29 | 30 | 但是很遗憾,大部分人一上手就是冲着第三层次来的,“追涨杀跌”却常常做错方向,最终成为韭菜。 31 | 32 | ![图片](./images/mainwindow.png) 33 | ### 项目说明 34 | 35 | 传统的量化投资,使用技术指标比如均值,MACD,RSI,KDJ等以及它们的线性变种来产生信号。 36 | 有几个缺点: 37 | 38 | - 一则这是线性的, 39 | 40 | - 二是参数全凭经验,没有调优的过程, 41 | 42 | - 三是规则是静态的,无法根据市场变化自主进化。 43 | 44 | 我们的目标,是把前沿人工智能技术,包括机器学习,深度学习,深度强化学习,知识图谱,时间序列分析等技术应用于金融大数据挖掘, 45 | 更好的赋能量化投资。 46 | 47 | 金融数据的低信噪比,让这件事情变更很难, 48 | 49 | 难,才有意思。 50 | 51 | ### “积木式”回测引擎 52 | ``` 53 | algo_list_rolling = [ 54 | 55 | SelectFix(instruments=['sh000300', 'sh000905', 'sz399006']), 56 | 57 | SelectBySignal(signal_buy='to_buy', signal_sell='to_sell'), 58 | 59 | SelectTopK(K=1,col='五日动量'), 60 | 61 | WeightEqually() 62 | ] 63 | ``` 64 | ### 开发环境与安装部署 65 | 66 | python3.7~3.10 67 | 68 | 直接git或者下载源码包,安装相关依赖,然后运行``qbot_main.py``即可。 69 | 70 | ``` 71 | git clone https://github.com/Charmve/iQuant 72 | 73 | cd iQuant 74 | 75 | pip install -r requirements.txt 76 | 77 | python qbot_main.py 78 | ``` 79 | 80 | ### 联系我们 81 | 82 | 微信:Yida_Zhang2 83 | 84 | 持续分享前沿**人工智能技术如何赋能金融投资**,并找到一帮志同道合的朋友! 85 | 86 | -------------------------------------------------------------------------------- /WORKSPACE: -------------------------------------------------------------------------------- 1 | load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") 2 | 3 | http_archive( 4 | name = "rules_python", 5 | sha256 = "c03246c11efd49266e8e41e12931090b613e12a59e6f55ba2efd29a7cb8b4258", 6 | strip_prefix = "rules_python-0.11.0", 7 | url = "https://github.com/bazelbuild/rules_python/archive/refs/tags/0.11.0.tar.gz", 8 | ) 9 | 10 | load("@rules_python//python:pip.bzl", "pip_install") 11 | load("@rules_python//python:repositories.bzl", "python_register_toolchains") 12 | 13 | # Use a hermetic Python interpreter so that builds are reproducible 14 | # irrespective of the Python version available on the host machine. 15 | python_register_toolchains( 16 | name = "python3_9", 17 | python_version = "3.9", 18 | ) 19 | 20 | load("@rules_python//python:pip.bzl", pip3_install="pip_install") 21 | 22 | pip3_install( 23 | name = "py_deps", 24 | requirements = "//:requirements.txt", 25 | ) 26 | 27 | load("@python3_9//:defs.bzl", "interpreter") 28 | 29 | # Translate requirements.txt into a @third_party external repository. 30 | pip_install( 31 | name = "third_party", 32 | python_interpreter_target = interpreter, 33 | requirements = "//third_party:requirements.txt", 34 | ) 35 | -------------------------------------------------------------------------------- /data/BUILD: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Charmve/iQuant/b41f2d0066fdb1be8b5435e1dbe904fedd919d30/data/BUILD -------------------------------------------------------------------------------- /data/bkt_result/BUILD: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Charmve/iQuant/b41f2d0066fdb1be8b5435e1dbe904fedd919d30/data/bkt_result/BUILD -------------------------------------------------------------------------------- /data/csv/BUILD: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Charmve/iQuant/b41f2d0066fdb1be8b5435e1dbe904fedd919d30/data/csv/BUILD -------------------------------------------------------------------------------- /data/hdf5/BUILD: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Charmve/iQuant/b41f2d0066fdb1be8b5435e1dbe904fedd919d30/data/hdf5/BUILD -------------------------------------------------------------------------------- /data/hdf5/all.h5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Charmve/iQuant/b41f2d0066fdb1be8b5435e1dbe904fedd919d30/data/hdf5/all.h5 -------------------------------------------------------------------------------- /data/hdf5/cache.h5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Charmve/iQuant/b41f2d0066fdb1be8b5435e1dbe904fedd919d30/data/hdf5/cache.h5 -------------------------------------------------------------------------------- /data/hdf5/index.h5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Charmve/iQuant/b41f2d0066fdb1be8b5435e1dbe904fedd919d30/data/hdf5/index.h5 -------------------------------------------------------------------------------- /engine/BUILD: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Charmve/iQuant/b41f2d0066fdb1be8b5435e1dbe904fedd919d30/engine/BUILD -------------------------------------------------------------------------------- /engine/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Charmve/iQuant/b41f2d0066fdb1be8b5435e1dbe904fedd919d30/engine/__init__.py -------------------------------------------------------------------------------- /engine/alpha/BUILD: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Charmve/iQuant/b41f2d0066fdb1be8b5435e1dbe904fedd919d30/engine/alpha/BUILD -------------------------------------------------------------------------------- /engine/alpha/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Charmve/iQuant/b41f2d0066fdb1be8b5435e1dbe904fedd919d30/engine/alpha/__init__.py -------------------------------------------------------------------------------- /engine/alpha/alphalens_utils.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Author: Charmve yidazhang1@gmail.com 3 | Date: 2023-05-18 22:00:33 4 | LastEditors: Charmve yidazhang1@gmail.com 5 | LastEditTime: 2023-05-18 22:33:59 6 | FilePath: /Qbot/iQuant/engine/alpha/alphalens_utils.py 7 | Version: 1.0.1 8 | Blogs: charmve.blog.csdn.net 9 | GitHub: https://github.com/Charmve 10 | Description: 11 | 12 | Copyright (c) 2023 by Charmve, All Rights Reserved. 13 | Licensed under the MIT License. 14 | ''' 15 | # encoding:utf8 16 | import pandas as pd 17 | from alphalens.utils import get_clean_factor_and_forward_returns 18 | 19 | from pathlib import Path 20 | import sys 21 | TOP_DIR = Path(__file__).parent.parent.joinpath("engine") 22 | sys.path.append(TOP_DIR) 23 | 24 | # 将tears.py中的get_values()函数改为to_numpy() 25 | 26 | def make_alphalens_datas(df, alpha_col): 27 | # alphalens接受的数据必须是双索引,date,code 28 | alpha_df = df.set_index([df.index, 'code']) 29 | alpha_df.sort_index(level=0, inplace=True, ascending=True) 30 | # 使用pivot_table把因子值,按symbol提出来 31 | close_df = df.pivot_table(index='date', columns='code', values='close', dropna=True) 32 | return alpha_df[alpha_col], close_df 33 | 34 | 35 | if __name__ == '__main__': 36 | from datafeed.dataloader import Dataloader 37 | 38 | df = Dataloader().load_one_df(['000300.SH', '399006.SZ', 'SPX'], 39 | names=['close', 'mom_20', 'rate'], fields=['$close', 40 | '$close/Ref($close,20)-1', 41 | '$close/Ref($close,1)-1' 42 | ]) 43 | alpha_df, close_df = make_alphalens_datas(df, 'mom_20') 44 | print(alpha_df, close_df) 45 | 46 | from alphalens.utils import get_clean_factor_and_forward_returns 47 | 48 | # 将tears.py中的get_values()函数改为to_numpy() 49 | ret = get_clean_factor_and_forward_returns(alpha_df, close_df) 50 | print(ret) 51 | 52 | from alphalens.tears import create_returns_tear_sheet 53 | 54 | create_returns_tear_sheet(ret, long_short=False) 55 | -------------------------------------------------------------------------------- /engine/bt_engine.py: -------------------------------------------------------------------------------- 1 | # encoding:utf8 2 | from datetime import datetime 3 | 4 | import backtrader as bt 5 | 6 | from pathlib import Path 7 | import sys 8 | TOP_DIR = Path(__file__).parent.parent.joinpath("engine") 9 | sys.path.append(TOP_DIR) 10 | 11 | from datafeed.datafeed_hdf5 import Hdf5DataFeed 12 | from datafeed.dataloader import Dataloader 13 | from datafeed.dataset import Dataset 14 | from strategy.strategy_base import StratgeyAlgoBase 15 | 16 | 17 | class BacktraderEngine: 18 | def __init__(self, init_cash=1000000.0, benchmark='000300.SH', start=datetime(2010, 1, 1), 19 | end=datetime.now().date()): 20 | self.init_cash = init_cash 21 | self.start = start 22 | self.end = end 23 | self.benchmark = benchmark 24 | self.extra = {} 25 | 26 | cerebro = bt.Cerebro() 27 | cerebro.broker.setcash(init_cash) 28 | 29 | # 设置手续费 30 | cerebro.broker.setcommission(0.0001) 31 | # 滑点:双边各 0.0001 32 | cerebro.broker.set_slippage_perc(perc=0.0001) 33 | 34 | self.cerebro = cerebro 35 | self.cerebro.addanalyzer(bt.analyzers.PyFolio, _name='_PyFolio') 36 | 37 | self.feed = Hdf5DataFeed() 38 | 39 | def add_extra(self, symbol, names, fields): 40 | self.extra[symbol] = self.loader.load_dfs([symbol], names, fields)[0] 41 | # print(self.extra) 42 | 43 | def add_features(self, symbols, names, fields, load_from_cache=False): 44 | # 1.添加数据集,即资产候选集 45 | for s in symbols: 46 | self.add_data(s) 47 | 48 | # 2.特征工程 49 | self.loader = Dataloader(symbols, names, fields, load_from_cache) 50 | self.features = self.loader.data 51 | 52 | def add_model(self, model, split_date, feature_names): 53 | self.dataset = Dataset(dataloader=self.loader, split_date=split_date, feature_names=feature_names) 54 | model.fit(self.dataset) 55 | self.features['pred_score'] = model.predict(self.dataset) 56 | print(self.features['pred_score']) 57 | 58 | def _init_analyzers(self): 59 | ''' 60 | self.cerebro.addanalyzer(bt.analyzers.Returns, _name='_Returns') 61 | self.cerebro.addanalyzer(bt.analyzers.TradeAnalyzer, _name='_TradeAnalyzer') 62 | self.cerebro.addanalyzer(bt.analyzers.AnnualReturn, _name='_AnnualReturn') 63 | self.cerebro.addanalyzer(bt.analyzers.SharpeRatio, riskfreerate=0.0, annualize=True, _name='_SharpeRatio') 64 | self.cerebro.addanalyzer(bt.analyzers.DrawDown, _name='_DrawDown') 65 | ''' 66 | self.cerebro.addanalyzer(bt.analyzers.PyFolio, _name='_PyFolio') 67 | 68 | def add_data(self, code): 69 | # 加载数据 70 | df = self.feed.get_df(code) 71 | df = to_backtrader_dataframe(df) 72 | data = bt.feeds.PandasData(dataname=df, name=code, fromdate=self.start, todate=self.end) 73 | 74 | self.cerebro.adddata(data) 75 | self.cerebro.addobserver(bt.observers.Benchmark, 76 | data=data) 77 | self.cerebro.addobserver(bt.observers.TimeReturn) 78 | 79 | def run_algo_strategy(self, algo_list): 80 | self.cerebro.addstrategy(StratgeyAlgoBase, algo_list=algo_list, features=self.features, extra=self.extra) 81 | self.results = self.cerebro.run() 82 | 83 | def _bokeh_plot(self): 84 | from backtrader_plotting import Bokeh 85 | from backtrader_plotting.schemes import Tradimo 86 | plotconfig = { 87 | 'id:ind#0': dict( 88 | subplot=True, 89 | ), 90 | } 91 | b = Bokeh(style='line', scheme=Tradimo(), plotconfig=plotconfig) 92 | self.cerebro.plot(b) 93 | 94 | def show_result_empyrical(self, returns): 95 | import empyrical 96 | 97 | print('累计收益:', round(empyrical.cum_returns_final(returns), 3)) 98 | print('年化收益:', round(empyrical.annual_return(returns), 3)) 99 | print('最大回撤:', round(empyrical.max_drawdown(returns), 3)) 100 | print('夏普比', round(empyrical.sharpe_ratio(returns), 3)) 101 | print('卡玛比', round(empyrical.calmar_ratio(returns), 3)) 102 | print('omega', round(empyrical.omega_ratio(returns)), 3) 103 | 104 | def analysis(self, pyfolio=False): 105 | portfolio_stats = self.results[0].analyzers.getbyname('_PyFolio') 106 | returns, positions, transactions, _ = portfolio_stats.get_pf_items() 107 | returns.index = returns.index.tz_convert(None) 108 | self.show_result_empyrical(returns) 109 | 110 | if pyfolio: 111 | from pyfolio.tears import create_full_tear_sheet 112 | create_full_tear_sheet(returns, positions=positions, transactions=transactions) 113 | else: 114 | import quantstats 115 | df = self.feed.get_df(self.benchmark) 116 | df['rate'] = df['close'].pct_change() 117 | df = df[['rate']] 118 | quantstats.reports.html(returns, benchmark=df, download_filename='stats.html', output='stats.html', 119 | title='AI量化平台') 120 | import webbrowser 121 | webbrowser.open('stats.html') 122 | 123 | ''' 124 | 125 | import pyfolio as pf 126 | pf.create_full_tear_sheet( 127 | returns, 128 | positions=positions, 129 | transactions=transactions) 130 | ''' 131 | # self.cerebro.plot(volume=False) 132 | 133 | 134 | from data_utils import to_backtrader_dataframe 135 | from strategy.strategy_rotation import StrategyRotation 136 | from strategy.stragegy_buyhold import StratgeyBuyHold 137 | 138 | 139 | # 策略选择类 140 | class StFetcher(object): 141 | _STRATS = [StratgeyBuyHold, StrategyRotation] # 注册策略 142 | 143 | def __new__(cls, *args, **kwargs): 144 | idx = kwargs.pop('idx') # 策略索引 145 | 146 | obj = cls._STRATS[idx](*args, **kwargs) 147 | return obj 148 | 149 | 150 | if __name__ == '__main__': 151 | # symbols = ['399006.SZ'] 152 | symbols = ['399006.SZ', '000300.SH'] 153 | symbols = ['510300.SH', '159915.SZ'] 154 | symbols = [ 155 | '510050.SH', # 上证50ETF 156 | '159928.SZ', # 中证消费ETF 157 | '510300.SH', # 沪深300ETF 158 | '159915.SZ', # 创业板50 159 | '512120.SH', # 医药50ETF 160 | '159806.SZ', # 新能车ETF 161 | '510880.SH', # 红利ETF 162 | ] 163 | 164 | fields = [] 165 | names = [] 166 | feature_names = [] 167 | 168 | fields += ['Slope($close,20)'] 169 | names += ['mom_slope'] 170 | feature_names += ['mom_slope'] 171 | 172 | fields += ['KF($mom_slope)'] 173 | names += ['kf_mom_slope'] 174 | feature_names += ['kf_mom_slope'] 175 | 176 | fields += ["Ref($close,-1)/$close - 1"] 177 | names += ['label'] 178 | 179 | from bt_engine import BacktraderEngine 180 | from datetime import datetime 181 | 182 | e = BacktraderEngine(init_cash=1000000, benchmark='399006.SZ', start=datetime(2014, 1, 1)) 183 | e.add_features(symbols, names, fields, load_from_cache=True) 184 | 185 | from ml.model import SklearnModel 186 | from sklearn.ensemble import RandomForestRegressor,AdaBoostRegressor,HistGradientBoostingRegressor 187 | 188 | e.add_model(SklearnModel(AdaBoostRegressor()), split_date='2020-01-01', feature_names=feature_names) 189 | 190 | RSRS_benchmark = '510300.SH' 191 | # e.add_extra(RSRS_benchmark, fields=['RSRS($high,$low,18)', '$RSRS_beta<0.8'], names=['RSRS', 'signal']) 192 | e.add_extra(RSRS_benchmark, fields=['RSRS($high,$low,18)', 'Norm($RSRS_beta,600)', '$Zscore<0.0'], 193 | names=['RSRS', 'Zscore', 'signal']) 194 | 195 | from strategy.algos import SelectTopK, PickTime, WeightEqually 196 | 197 | e.run_algo_strategy([SelectTopK(K=1, order_by='pred_score', b_ascending=False), WeightEqually()]) 198 | e.analysis(pyfolio=False) 199 | -------------------------------------------------------------------------------- /engine/common.py: -------------------------------------------------------------------------------- 1 | def Singleton(cls): 2 | _instance = {} 3 | 4 | def _singleton(*args, **kwagrs): 5 | if cls not in _instance: 6 | _instance[cls] = cls(*args, **kwagrs) 7 | return _instance[cls] 8 | 9 | return _singleton -------------------------------------------------------------------------------- /engine/config.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | 3 | DATA_DIR = Path(__file__).parent.parent.joinpath("data") 4 | 5 | DATA_DIR_HDF5 = DATA_DIR.joinpath('hdf5') 6 | DATA_DIR_HDF5_ALL = DATA_DIR_HDF5.joinpath('all.h5') 7 | DATA_DIR_HDF5_CACHE = DATA_DIR_HDF5.joinpath('cache.h5') 8 | 9 | DATA_DIR_CSV = DATA_DIR.joinpath('csv') 10 | DATA_DIR_BKT_RESULT = DATA_DIR.joinpath('bkt_result') 11 | 12 | dirs = [DATA_DIR, DATA_DIR_CSV, DATA_DIR_BKT_RESULT] 13 | for dir in dirs: 14 | dir.mkdir(exist_ok=True, parents=True) 15 | 16 | 17 | -------------------------------------------------------------------------------- /engine/data_utils.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | 3 | 4 | def to_backtrader_dataframe(df): 5 | df.index = pd.to_datetime(df.index) 6 | df['openinterest'] = 0 7 | df = df[['open', 'high', 'low', 'close', 'volume', 'openinterest']] 8 | return df 9 | -------------------------------------------------------------------------------- /engine/datafeed/BUILD: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Charmve/iQuant/b41f2d0066fdb1be8b5435e1dbe904fedd919d30/engine/datafeed/BUILD -------------------------------------------------------------------------------- /engine/datafeed/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Charmve/iQuant/b41f2d0066fdb1be8b5435e1dbe904fedd919d30/engine/datafeed/__init__.py -------------------------------------------------------------------------------- /engine/datafeed/__pycache__/BUILD: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Charmve/iQuant/b41f2d0066fdb1be8b5435e1dbe904fedd919d30/engine/datafeed/__pycache__/BUILD -------------------------------------------------------------------------------- /engine/datafeed/datafeed_arctic.py: -------------------------------------------------------------------------------- 1 | # encoding:utf8 2 | import datetime 3 | 4 | from arctic import Arctic, CHUNK_STORE 5 | import pandas as pd 6 | 7 | 8 | class ArcticDataFeed: 9 | def __init__(self, db_name='etf_quotes'): 10 | self.code_dfs = {} 11 | a = Arctic('localhost') 12 | a.initialize_library(db_name, lib_type=CHUNK_STORE) 13 | self.lib = a[db_name] 14 | 15 | def get_df(self, code, cols=None): 16 | if code in self.code_dfs.keys(): 17 | return self.code_dfs[code] 18 | df = self.lib.read(code) 19 | self.code_dfs[code] = df 20 | return df 21 | 22 | def get_one_df_by_codes(self, codes): 23 | dfs = [self.get_df(code) for code in codes] 24 | df_all = pd.concat(dfs, axis=0) 25 | df_all.dropna(inplace=True) 26 | df_all.sort_index(inplace=True) 27 | return df_all 28 | 29 | def get_returns_df(self, codes): 30 | df = self.get_one_df_by_codes(codes) 31 | all = pd.pivot_table(df, index='date', values='close', columns=['code']) 32 | returns_df = all.pct_change() 33 | returns_df.dropna(inplace=True) 34 | return returns_df 35 | 36 | def get_returns_df_ordered(self, codes): 37 | dfs = [] 38 | for code in codes: 39 | df = self.get_df(code,cols=['close']) 40 | close = df['close'] 41 | close.name = code 42 | dfs.append(close) 43 | all = pd.concat(dfs, axis=1) 44 | returns_df = all.pct_change() 45 | returns_df.dropna(inplace=True) 46 | return returns_df 47 | 48 | 49 | 50 | if __name__ == '__main__': 51 | codes = ['159928.SZ', '510050.SH', '512010.SH', '513100.SH', '518880.SH', '511220.SH', '511010.SH', 52 | '161716.SZ'] 53 | #df = ArcticDataFeed().get_df('562310.SH') 54 | df = ArcticDataFeed().get_returns_df(codes) 55 | print(df) 56 | 57 | df = ArcticDataFeed().get_returns_df_ordered(codes) 58 | print(df) 59 | 60 | 61 | 62 | import matplotlib.pyplot as plt 63 | import seaborn as sns 64 | from matplotlib import style 65 | 66 | 67 | # style.use('fivethirtyeight') 68 | # style.use('ggplot') 69 | sns.set_style("whitegrid") 70 | sns.set(palette="muted") 71 | #plt.plot(data, label=data.name, alpha=.6) 72 | plt.rcParams['font.sans-serif'] = ['SimHei'] 73 | plt.rcParams['axes.unicode_minus'] = False 74 | # sns.color_palette("hls", 12) 75 | # sns.set(style ='darkgrid',palette ='deep') 76 | #fig, axes = plt.subplots(1, 1, figsize=(18, 6)) 77 | 78 | 79 | #data = df['2009-01-01':] 80 | data = data / data.iloc[0] * 100 # 统一缩放到100为基点 81 | data.plot() 82 | plt.legend() 83 | plt.show() 84 | -------------------------------------------------------------------------------- /engine/datafeed/datafeed_csv.py: -------------------------------------------------------------------------------- 1 | # encoding:utf8 2 | 3 | from pathlib import Path 4 | import sys 5 | TOP_DIR = Path(__file__).parent.parent.joinpath("../engine") 6 | sys.path.append(TOP_DIR) 7 | 8 | import os 9 | import pandas as pd 10 | from loguru import logger 11 | from engine.config import DATA_DIR_CSV 12 | 13 | 14 | class CSVDatafeed: 15 | _instance = None 16 | 17 | def __new__(cls, *args, **kw): 18 | if cls._instance is None: 19 | cls._instance = object.__new__(cls, *args, **kw) 20 | return cls._instance 21 | 22 | def __init__(self): 23 | self.code_dfs = {} 24 | 25 | def add_data(self, code, csv_file=None): 26 | if not csv_file: 27 | csv_file = DATA_DIR_CSV.joinpath('{}.csv'.format(code)) 28 | 29 | if not os.path.exists(csv_file): 30 | logger.error('{}csv文件不存在!'.format(code)) 31 | return 32 | 33 | df = pd.read_csv(csv_file) 34 | if len(df) == 0: 35 | logger.error('{}没有数据!'.format(code)) 36 | return 37 | 38 | for col in ['date']: 39 | if col not in df.columns: 40 | logger.error('{}字段{}不存在!'.format(code, col)) 41 | return 42 | 43 | df['code'] = code 44 | df['date'] = df['date'].apply(lambda x: str(x)) 45 | df.sort_values(by='date', inplace=True) 46 | 47 | df['rate'] = df['close'].pct_change() 48 | df['equity'] = (df['rate'] + 1).cumprod() 49 | df.set_index('date', inplace=True) 50 | 51 | self.code_dfs[code] = df 52 | return df 53 | 54 | def get_df(self, instrument): 55 | if instrument in self.code_dfs.keys(): 56 | df = self.code_dfs[instrument] 57 | return df 58 | else: 59 | logger.info('{}未加载,现在加载,'.format(instrument)) 60 | df = self.add_data(instrument) 61 | return df 62 | 63 | def get_all_df(self): 64 | df_all = pd.concat(self.code_dfs.values(), axis=0) 65 | df_all.dropna(inplace=True) 66 | #df_all.index = df_all['date'] 67 | df_all.sort_index(inplace=True) 68 | 69 | return df_all 70 | 71 | 72 | # 这里是全局变量 73 | feed = CSVDatafeed() 74 | 75 | if __name__ == '__main__': 76 | from engine.datafeed.expr import ExprMgr 77 | 78 | expr = ExprMgr() 79 | expr.init() 80 | 81 | code = '000300.Sh' 82 | df = feed.get_df(code) 83 | 84 | fields = [] 85 | names = [] 86 | 87 | fields += ["Corr($close/Ref($close,1), Log($volume/Ref($volume, 1)+1), 30)"] 88 | names += ["CORR30"] 89 | fields += ["Corr($close/Ref($close,1), Log($volume/Ref($volume, 1)+1), 60)"] 90 | names += ["CORR60"] 91 | 92 | fields += ["Std($close, 30)/$close"] 93 | names += ["STD30"] 94 | fields += ["Corr($close, Log($volume+1), 5)"] 95 | names += ["CORR5"] 96 | 97 | # fields += ["Resi($close, 10)/$close"] 98 | # names += ["RESI10"] 99 | # fields += ["Resi($close, 5)/$close"] 100 | # names += ["RESI5"] 101 | 102 | fields += ["Std($close, 5)/$close"] 103 | names += ["STD5"] 104 | fields += ["Std($close, 20)/$close"] 105 | names += ["STD20"] 106 | fields += ["Std($close, 60)/$close"] 107 | names += ["STD60"] 108 | 109 | fields += ["Ref($low, 0)/$close"] 110 | names += ["LOW0"] 111 | 112 | fields += [ 113 | "Std(Abs($close/Ref($close, 1)-1)*$volume, 30)/(Mean(Abs($close/Ref($close, 1)-1)*$volume, 30)+1e-12)" 114 | ] 115 | names += ['WVMA30'] 116 | 117 | fields += ["Ref($close, 5)/$close"] 118 | names += ["ROC5"] 119 | 120 | fields += ["(2*$close-$high-$low)/$open"] 121 | names += ['KSFT'] 122 | 123 | fields += ["($close-Min($low, 5))/(Max($high, 5)-Min($low, 5)+1e-12)"] 124 | names += ["RSV5"] 125 | 126 | fields += ["($high-$low)/$open"] 127 | names += ['KLEN'] 128 | 129 | for name, field in zip(names, fields): 130 | exp = expr.get_expression(field) 131 | se = exp.load(code) 132 | df[name] = se 133 | 134 | print(df) 135 | from alphalens.utils import get_clean_factor_and_forward_returns 136 | 137 | # 将tears.py中的get_values()函数改为to_numpy() 138 | ret = get_clean_factor_and_forward_returns(df[['rate']], close) 139 | from alphalens.tears import create_full_tear_sheet 140 | 141 | create_full_tear_sheet(ret, long_short=False) 142 | -------------------------------------------------------------------------------- /engine/datafeed/datafeed_hdf5.py: -------------------------------------------------------------------------------- 1 | # encoding:utf8 2 | import datetime 3 | 4 | from pathlib import Path 5 | import sys 6 | TOP_DIR = Path(__file__).parent.parent.joinpath(".") 7 | sys.path.append(TOP_DIR) 8 | 9 | import pandas as pd 10 | 11 | from common import Singleton 12 | from config import DATA_DIR_HDF5_ALL 13 | from loguru import logger 14 | 15 | @Singleton 16 | class Hdf5DataFeed: 17 | def __init__(self, db_name='index.h5'): 18 | print(self.__class__.__name__, '初始化...') 19 | self.code_dfs = {} 20 | 21 | def get_df(self, code, db=None): 22 | if code in self.code_dfs.keys(): 23 | return self.code_dfs[code] 24 | 25 | with pd.HDFStore(DATA_DIR_HDF5_ALL.resolve()) as store: 26 | logger.debug('从hdf5里读', code) 27 | df = store[code] 28 | df = df[['open', 'high', 'low', 'close', 'volume', 'code']] 29 | self.code_dfs[code] = df 30 | return df 31 | 32 | def get_one_df_by_codes(self, codes): 33 | dfs = [self.get_df(code) for code in codes] 34 | df_all = pd.concat(dfs, axis=0) 35 | df_all.dropna(inplace=True) 36 | df_all.sort_index(inplace=True) 37 | return df_all 38 | 39 | def get_returns_df(self, codes): 40 | df = self.get_one_df_by_codes(codes) 41 | all = pd.pivot_table(df, index='date', values='close', columns=['code']) 42 | returns_df = all.pct_change() 43 | returns_df.dropna(inplace=True) 44 | return returns_df 45 | 46 | def get_returns_df_ordered(self, codes): 47 | dfs = [] 48 | for code in codes: 49 | df = self.get_df(code, cols=['close']) 50 | close = df['close'] 51 | close.name = code 52 | dfs.append(close) 53 | all = pd.concat(dfs, axis=1) 54 | returns_df = all.pct_change() 55 | returns_df.dropna(inplace=True) 56 | return returns_df 57 | 58 | 59 | if __name__ == '__main__': 60 | feed = Hdf5DataFeed() 61 | feed2 = Hdf5DataFeed() 62 | print(feed.get_df('399006.SZ')) 63 | df = feed.get_one_df_by_codes(['000300.SH', '000905.SH', 'SPX']) 64 | print(df) 65 | -------------------------------------------------------------------------------- /engine/datafeed/dataloader.py: -------------------------------------------------------------------------------- 1 | # encoding:utf8 2 | from pathlib import Path 3 | import sys 4 | TOP_DIR = Path(__file__).parent.parent.joinpath(".") 5 | sys.path.append(TOP_DIR) 6 | 7 | import pandas as pd 8 | from loguru import logger 9 | 10 | from datafeed.expr.expr_mgr import ExprMgr 11 | from datafeed.datafeed_hdf5 import Hdf5DataFeed 12 | from config import DATA_DIR_HDF5_CACHE 13 | 14 | 15 | class Dataloader: 16 | def __init__(self, symbols, names, fields, load_from_cache=False): 17 | self.expr = ExprMgr() 18 | self.feed = Hdf5DataFeed() 19 | self.symbols = symbols 20 | self.names = names 21 | self.fields = fields 22 | 23 | with pd.HDFStore(DATA_DIR_HDF5_CACHE.resolve()) as store: 24 | key = 'features' 25 | if load_from_cache and '/' + key in store.keys(): # 注意判断keys需要前面加“/” 26 | 27 | logger.info('从缓存中加载...') 28 | self.data = store[key] 29 | else: 30 | self.data = self.load_one_df() 31 | store[key] = self.data 32 | 33 | def load_one_df(self): 34 | dfs = self.load_dfs() 35 | all = pd.concat(dfs) 36 | all.sort_index(ascending=True, inplace=True) 37 | all.dropna(inplace=True) 38 | self.data = all 39 | return all 40 | 41 | def load_dfs(self, symbols=None, names=None, fields=None): 42 | if not symbols: 43 | symbols = self.symbols 44 | if not names: 45 | names = self.names 46 | if not fields: 47 | fields = self.fields 48 | 49 | dfs = [] 50 | for code in symbols: 51 | # 直接在内存里加上字段,方便复用 52 | df = self.feed.get_df(code) 53 | for name, field in zip(names, fields): 54 | exp = self.expr.get_expression(field) 55 | # 这里可能返回多个序列 56 | se = exp.load(code) 57 | if type(se) is pd.Series: 58 | df[name] = se 59 | if type(se) is tuple: 60 | for i in range(len(se)): 61 | df[name + '_' + se[i].name] = se[i] 62 | df['code'] = code 63 | dfs.append(df) 64 | 65 | return dfs 66 | 67 | 68 | if __name__ == '__main__': 69 | names = [] 70 | fields = [] 71 | 72 | # fields += ['BBands($close)'] 73 | # names += ['BBands'] 74 | 75 | fields += ["RSRS($high,$low,18)"] 76 | names += ['RSRS'] 77 | 78 | fields += ['Norm($RSRS_beta,600)'] 79 | names += ['Norm_beta'] 80 | 81 | # fields += ['OBV($close,$volume)'] 82 | # names += ['obv'] 83 | 84 | fields += ['Slope($close,20)'] 85 | names += ['mom_slope'] 86 | 87 | fields += ['KF($mom_slope)'] 88 | names += ['kf_mom_slope'] 89 | 90 | fields += ["Ref($close,-1)/$close - 1"] 91 | names += ['label'] 92 | 93 | loader = Dataloader(['000300.SH'], names, fields, load_from_cache=True) 94 | print(loader.data) 95 | -------------------------------------------------------------------------------- /engine/datafeed/dataset.py: -------------------------------------------------------------------------------- 1 | # coding:utf8 2 | from pathlib import Path 3 | import sys 4 | TOP_DIR = Path(__file__).parent.parent.joinpath("../engine") 5 | sys.path.append(TOP_DIR) 6 | 7 | import numpy as np 8 | import datetime as dt 9 | from datafeed.dataloader import Dataloader 10 | from loguru import logger 11 | 12 | 13 | class OneStepTimeSeriesSplit: 14 | """Generates tuples of train_idx, test_idx pairs 15 | Assumes the index contains a level labeled 'date'""" 16 | 17 | def __init__(self, n_splits=3, test_period_length=1, shuffle=False): 18 | self.n_splits = n_splits 19 | self.test_period_length = test_period_length 20 | self.shuffle = shuffle 21 | 22 | @staticmethod 23 | def chunks(l, n): 24 | for i in range(0, len(l), n): 25 | print(l[i:i + n]) 26 | yield l[i:i + n] 27 | 28 | def split(self, X, y=None, groups=None): 29 | unique_dates = (X.index 30 | # .get_level_values('date') 31 | .unique() 32 | .sort_values(ascending=False) 33 | [:self.n_splits * self.test_period_length]) 34 | 35 | dates = X.reset_index()[['date']] 36 | for test_date in self.chunks(unique_dates, self.test_period_length): 37 | train_idx = dates[dates.date < min(test_date)].index 38 | test_idx = dates[dates.date.isin(test_date)].index 39 | if self.shuffle: 40 | np.random.shuffle(list(train_idx)) 41 | yield train_idx, test_idx 42 | 43 | def get_n_splits(self, X, y, groups=None): 44 | return self.n_splits 45 | 46 | 47 | def get_date_by_percent(start_date, end_date, percent): 48 | days = (end_date - start_date).days 49 | target_days = np.trunc(days * percent) 50 | target_date = start_date + dt.timedelta(days=target_days) 51 | # print days, target_days,target_date 52 | return target_date 53 | 54 | 55 | def split_df(df, x_cols, y_col, split_date=None, split_ratio=0.8): 56 | if not split_date: 57 | split_date = get_date_by_percent(df.index[0], df.index[df.shape[0] - 1], split_ratio) 58 | 59 | input_data = df[x_cols] 60 | output_data = df[y_col] 61 | 62 | # Create training and test sets 63 | X_train = input_data[input_data.index < split_date] 64 | X_test = input_data[input_data.index >= split_date] 65 | Y_train = output_data[output_data.index < split_date] 66 | Y_test = output_data[output_data.index >= split_date] 67 | 68 | return X_train, X_test, Y_train, Y_test 69 | 70 | 71 | class Dataset: 72 | def __init__(self, dataloader, split_date, feature_names, label_name='label'): 73 | self.split_date = split_date 74 | self.feature_names = feature_names 75 | self.label_name = label_name 76 | 77 | self.loader = dataloader 78 | if dataloader.data is None: 79 | logger.error('dataloader未加载数据。') 80 | self.df = dataloader.data 81 | 82 | def get_split_dataset(self): 83 | X_train, X_test, Y_train, Y_test = split_df(self.df, x_cols=self.feature_names, y_col=self.label_name, 84 | split_date=self.split_date) 85 | return X_train, X_test, Y_train, Y_test 86 | 87 | def get_train_data(self): 88 | X_train, X_test, Y_train, Y_test = split_df(self.df, x_cols=self.feature_names, y_col=self.label_name, 89 | split_date=self.split_date) 90 | return X_train, Y_train 91 | 92 | def get_test_data(self): 93 | X_train, X_test, Y_train, Y_test = split_df(self.df, x_cols=self.feature_names, y_col=self.label_name, 94 | split_date=self.split_date) 95 | return X_test, Y_test 96 | 97 | def get_X_y_data(self): 98 | X = self.df[self.feature_names] 99 | y = self.df[self.label_name] 100 | return X, y 101 | 102 | 103 | if __name__ == '__main__': 104 | codes = ['000300.SH', 'SPX'] 105 | names = [] 106 | fields = [] 107 | fields += ["Corr($close/Ref($close,1), Log($volume/Ref($volume, 1)+1), 30)"] 108 | names += ["CORR30"] 109 | 110 | dataset = Dataset(codes, names, fields, split_date='2020-01-01') 111 | X_train, Y_train = dataset.get_train_data() 112 | print(X_train, Y_train) 113 | -------------------------------------------------------------------------------- /engine/datafeed/expr/BUILD: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Charmve/iQuant/b41f2d0066fdb1be8b5435e1dbe904fedd919d30/engine/datafeed/expr/BUILD -------------------------------------------------------------------------------- /engine/datafeed/expr/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Charmve/iQuant/b41f2d0066fdb1be8b5435e1dbe904fedd919d30/engine/datafeed/expr/__init__.py -------------------------------------------------------------------------------- /engine/datafeed/expr/base.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Microsoft Corporation. 2 | # Licensed under the MIT License. 3 | 4 | 5 | from __future__ import division 6 | from __future__ import print_function 7 | 8 | import abc 9 | import pandas as pd 10 | 11 | from pathlib import Path 12 | import sys 13 | TOP_DIR = Path(__file__).parent.parent.parent.joinpath(".") 14 | sys.path.append(TOP_DIR) 15 | 16 | from datafeed.datafeed_hdf5 import Hdf5DataFeed 17 | 18 | 19 | class Expression(abc.ABC): 20 | """Expression base class""" 21 | 22 | def __str__(self): 23 | return type(self).__name__ 24 | 25 | def __repr__(self): 26 | return str(self) 27 | 28 | def __gt__(self, other): 29 | from .ops import Gt 30 | 31 | return Gt(self, other) 32 | 33 | def __ge__(self, other): 34 | from .ops import Ge 35 | 36 | return Ge(self, other) 37 | 38 | def __lt__(self, other): 39 | from .ops import Lt 40 | 41 | return Lt(self, other) 42 | 43 | def __le__(self, other): 44 | from .ops import Le 45 | 46 | return Le(self, other) 47 | 48 | def __eq__(self, other): 49 | from .ops import Eq 50 | 51 | return Eq(self, other) 52 | 53 | def __ne__(self, other): 54 | from .ops import Ne 55 | 56 | return Ne(self, other) 57 | 58 | def __add__(self, other): 59 | from .ops import Add 60 | 61 | return Add(self, other) 62 | 63 | def __radd__(self, other): 64 | from .ops import Add 65 | 66 | return Add(other, self) 67 | 68 | def __sub__(self, other): 69 | from .ops import Sub 70 | 71 | return Sub(self, other) 72 | 73 | def __rsub__(self, other): 74 | from .ops import Sub 75 | 76 | return Sub(other, self) 77 | 78 | def __mul__(self, other): 79 | from .ops import Mul 80 | 81 | return Mul(self, other) 82 | 83 | def __rmul__(self, other): 84 | from .ops import Mul 85 | 86 | return Mul(self, other) 87 | 88 | def __div__(self, other): 89 | from .ops import Div 90 | 91 | return Div(self, other) 92 | 93 | def __rdiv__(self, other): 94 | from .ops import Div 95 | 96 | return Div(other, self) 97 | 98 | def __truediv__(self, other): 99 | from .ops import Div 100 | 101 | return Div(self, other) 102 | 103 | def __rtruediv__(self, other): 104 | from .ops import Div 105 | 106 | return Div(other, self) 107 | 108 | def __pow__(self, other): 109 | from .ops import Power 110 | 111 | return Power(self, other) 112 | 113 | def __and__(self, other): 114 | from .ops import And 115 | 116 | return And(self, other) 117 | 118 | def __rand__(self, other): 119 | from .ops import And 120 | 121 | return And(other, self) 122 | 123 | def __or__(self, other): 124 | from .ops import Or 125 | 126 | return Or(self, other) 127 | 128 | def __ror__(self, other): 129 | from .ops import Or 130 | 131 | return Or(other, self) 132 | 133 | def load(self, instrument): 134 | 135 | try: 136 | series = self._load_internal(instrument) 137 | except Exception: 138 | print('__load_error') 139 | raise 140 | 141 | if type(series) is pd.Series: 142 | series.name = str(self) 143 | 144 | return series 145 | 146 | @abc.abstractmethod 147 | def _load_internal(self, instrument): 148 | raise NotImplementedError("This function must be implemented in your newly defined feature") 149 | 150 | @abc.abstractmethod 151 | def get_longest_back_rolling(self): 152 | """Get the longest length of historical data the feature has accessed 153 | 154 | This is designed for getting the needed range of the data to calculate 155 | the features in specific range at first. However, situations like 156 | Ref(Ref($close, -1), 1) can not be handled rightly. 157 | 158 | So this will only used for detecting the length of historical data needed. 159 | """ 160 | # TODO: forward operator like Ref($close, -1) is not supported yet. 161 | raise NotImplementedError("This function must be implemented in your newly defined feature") 162 | 163 | @abc.abstractmethod 164 | def get_extended_window_size(self): 165 | """get_extend_window_size 166 | 167 | For to calculate this Operator in range[start_index, end_index] 168 | We have to get the *leaf feature* in 169 | range[start_index - lft_etd, end_index + rght_etd]. 170 | 171 | Returns 172 | ---------- 173 | (int, int) 174 | lft_etd, rght_etd 175 | """ 176 | raise NotImplementedError("This function must be implemented in your newly defined feature") 177 | 178 | 179 | class Feature(Expression): 180 | """Static Expression 181 | 182 | This kind of feature will load data from provider 183 | """ 184 | 185 | def __init__(self, name=None): 186 | if name: 187 | self._name = name 188 | else: 189 | self._name = type(self).__name__ 190 | 191 | def __str__(self): 192 | return self._name 193 | 194 | def _load_internal(self, instrument): 195 | df = Hdf5DataFeed().get_df(instrument) 196 | #print(str(self)) 197 | return df[str(self)] 198 | 199 | def get_longest_back_rolling(self): 200 | return 0 201 | 202 | def get_extended_window_size(self): 203 | return 0, 0 204 | 205 | 206 | class ExpressionOps(Expression): 207 | """Operator Expression 208 | 209 | This kind of feature will use operator for feature 210 | construction on the fly. 211 | """ 212 | 213 | pass 214 | -------------------------------------------------------------------------------- /engine/datafeed/expr/expr_extend.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from pathlib import Path 4 | import sys 5 | TOP_DIR = Path(__file__).parent.parent.parent.joinpath(".") 6 | sys.path.append(TOP_DIR) 7 | 8 | from datafeed.expr.ops import PairOperator, Rolling 9 | import pandas as pd 10 | import statsmodels.api as sm 11 | 12 | 13 | class RSRS(PairOperator): 14 | def __init__(self, feature_left, feature_right, N): 15 | self.N = N 16 | #self.M = M 17 | super(RSRS, self).__init__(feature_left, feature_right) 18 | 19 | def _load_internal(self, instrument): 20 | series_left = self.feature_left.load(instrument) 21 | series_right = self.feature_right.load(instrument) 22 | 23 | slope = [] 24 | R2 = [] 25 | # 计算斜率值 26 | n = self.N 27 | for i in range(len(series_left)): 28 | if i < (self.N - 1): 29 | slope.append(pd.NA) 30 | R2.append(pd.NA) 31 | else: 32 | x = series_right[i - n + 1:i + 1] 33 | # iloc左闭右开 34 | x = sm.add_constant(x) 35 | y = series_left.iloc[i - n + 1:i + 1] 36 | regr = sm.OLS(y, x) 37 | res = regr.fit() 38 | beta = round(res.params[1], 2) # 斜率指标 39 | slope.append(beta) 40 | R2.append(res.rsquared) 41 | 42 | betas = pd.Series(slope, index=series_left.index) 43 | betas.name = 'beta' 44 | r2 = pd.Series(R2, index=series_left.index) 45 | r2.name = 'r2' 46 | return betas, r2 47 | 48 | 49 | class Norm(Rolling): 50 | def __init__(self, feature, N): 51 | super(Norm, self).__init__(feature, N, "slope") 52 | 53 | def _load_internal(self, instrument): 54 | # 因子标准化 55 | def get_zscore(sub_series): 56 | mean = np.mean(sub_series) 57 | std = np.std(sub_series) 58 | return (sub_series[-1] - mean) / std 59 | 60 | series = self.feature.load(instrument) 61 | series = series.fillna(0.0) 62 | result = series.rolling(self.N, min_periods=100).apply(get_zscore) 63 | series = pd.Series(result, index=series.index) 64 | return series 65 | -------------------------------------------------------------------------------- /engine/datafeed/expr/expr_mgr.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Author: Charmve yidazhang1@gmail.com 3 | Date: 2023-05-18 22:00:28 4 | LastEditors: Charmve yidazhang1@gmail.com 5 | LastEditTime: 2023-05-18 22:12:21 6 | FilePath: /Qbot/iQuant/engine/datafeed/expr/expr_mgr.py 7 | Version: 1.0.1 8 | Blogs: charmve.blog.csdn.net 9 | GitHub: https://github.com/Charmve 10 | Description: 11 | 12 | Copyright (c) 2023 by Charmve, All Rights Reserved. 13 | Licensed under the MIT License. 14 | ''' 15 | from pathlib import Path 16 | import sys 17 | TOP_DIR = Path(__file__).parent.parent.parent.joinpath(".") 18 | sys.path.append(TOP_DIR) 19 | 20 | import re 21 | from .ops import Operators, register_all_ops 22 | from .base import Feature 23 | from common import Singleton 24 | 25 | 26 | @Singleton 27 | class ExprMgr: 28 | def __init__(self): 29 | register_all_ops() 30 | 31 | def parse_field(self, field): 32 | # Following patterns will be matched: 33 | # - $close -> Feature("close") 34 | # - $close5 -> Feature("close5") 35 | # - $open+$close -> Feature("open")+Feature("close") 36 | if not isinstance(field, str): 37 | field = str(field) 38 | 39 | re_func = re.sub(r"(\w+\s*)\(", r"Operators.\1(", field) 40 | # print('re_runc',re_func) 41 | return re.sub(r"\$(\w+)", r'Feature("\1")', re_func) 42 | 43 | def get_expression(self, feature): 44 | feature = self.parse_field(feature) 45 | try: 46 | expr = eval(feature) 47 | except: 48 | print('error', feature) 49 | raise 50 | return expr 51 | -------------------------------------------------------------------------------- /engine/datafeed/ts_downloader.py: -------------------------------------------------------------------------------- 1 | # encoding:utf8 2 | # 导入tushare 3 | import pandas as pd 4 | import tushare as ts 5 | 6 | # 初始化pro接口 7 | pro = ts.pro_api('854634d420c0b6aea2907030279da881519909692cf56e6f35c4718c') 8 | 9 | def get_etf(code, offset=0, limit=600): 10 | # 拉取数据 11 | df = pro.fund_daily(**{ 12 | "trade_date": "", 13 | "start_date": "", 14 | "end_date": "", 15 | "ts_code": code, 16 | "limit": limit, 17 | "offset": offset 18 | }, fields=[ 19 | "ts_code", 20 | "trade_date", 21 | "open", 22 | "high", 23 | "low", 24 | "close", 25 | "vol" 26 | ]) 27 | 28 | df.rename(columns={'trade_date': 'date', 'ts_code': 'code', 'vol': 'volume'}, inplace=True) 29 | df.set_index('date', inplace=True) 30 | # 拉取数据 31 | df_adj = pro.fund_adj(**{ 32 | "ts_code": code, 33 | "trade_date": "", 34 | "start_date": "", 35 | "end_date": "", 36 | "offset": offset, 37 | "limit": limit 38 | }, fields=[ 39 | "trade_date", 40 | "adj_factor" 41 | ]) 42 | df_adj.rename(columns={'trade_date': 'date'}, inplace=True) 43 | df_adj.set_index('date', inplace=True) 44 | df = pd.concat([df, df_adj], axis=1) 45 | df.dropna(inplace=True) 46 | for col in ['open', 'high', 'low', 'close']: 47 | df[col] *= df['adj_factor'] 48 | df.index = pd.to_datetime(df.index) 49 | df.sort_index(ascending=True, inplace=True) 50 | return df 51 | 52 | 53 | def get_global_index(code): 54 | # 拉取数据 55 | df = pro.index_global(**{ 56 | "ts_code": code, 57 | "trade_date": "", 58 | "start_date": "", 59 | "end_date": "", 60 | "limit": "", 61 | "offset": "" 62 | }, fields=[ 63 | "ts_code", 64 | "trade_date", 65 | "open", 66 | "close", 67 | "high", 68 | "low", 69 | "vol" 70 | ]) 71 | df.rename(columns={'ts_code': 'code', 'vol': 'volume', 'trade_date': 'date'}, inplace=True) 72 | df.set_index('date', inplace=True) 73 | 74 | df.index = pd.to_datetime(df.index) 75 | df.sort_index(ascending=True, inplace=True) 76 | return df 77 | 78 | 79 | def get_index(code): 80 | # 拉取数据 81 | df = pro.index_daily(**{ 82 | "ts_code": code, 83 | "trade_date": "", 84 | "start_date": "", 85 | "end_date": "", 86 | "limit": "", 87 | "offset": "" 88 | }, fields=[ 89 | "ts_code", 90 | "trade_date", 91 | "close", 92 | "open", 93 | "high", 94 | "low", 95 | "vol", 96 | ]) 97 | df.rename(columns={'ts_code': 'code', 'vol': 'volume', 'trade_date': 'date'}, inplace=True) 98 | df.set_index('date', inplace=True) 99 | 100 | df.index = pd.to_datetime(df.index) 101 | df.sort_index(ascending=True, inplace=True) 102 | return df 103 | 104 | 105 | def download_symbols(symbols, b_index=False): 106 | for symbol in symbols: 107 | if not b_index: # etf 108 | offset = 0 109 | df = get_etf(symbol, offset=offset) 110 | while(offset < 10000): 111 | offset += 600 112 | df_append = get_etf(symbol, offset=offset, limit=600) 113 | if df_append is None or len(df_append) == 0: 114 | break 115 | print(df_append.tail()) 116 | df = df.append(df_append) 117 | df.sort_index(ascending=True, inplace=True) 118 | 119 | else: 120 | if '.' in symbol: 121 | df = get_index(symbol) 122 | else: 123 | df = get_global_index(symbol) 124 | print(df) 125 | if df is None or len(df) == 0: 126 | print('error') 127 | continue 128 | with pd.HDFStore(DATA_DIR_HDF5_ALL.resolve()) as store: 129 | store[symbol] = df 130 | 131 | 132 | if __name__ == '__main__': 133 | from engine.config import DATA_DIR_HDF5_ALL 134 | 135 | print(DATA_DIR_HDF5_ALL.resolve()) 136 | 137 | symbols = ['000300.SH', '000905.SH', 'SPX', '399006.SZ'] 138 | #download_symbols(symbols, b_index=True) 139 | 140 | etfs = ['510300.SH', # 沪深300ETF 141 | '159949.SZ', # 创业板50 142 | '510050.SH', # 上证50ETF 143 | '159928.SZ', # 中证消费ETF 144 | '510500.SH', # 500ETF 145 | '159915.SZ', # 创业板 ETF 146 | '512120.SH', # 医药50ETF 147 | '159806.SZ', # 新能车ETF 148 | '510880.SH', # 红利ETF 149 | ] 150 | 151 | download_symbols(etfs, b_index=False) 152 | 153 | with pd.HDFStore(DATA_DIR_HDF5_ALL.resolve()) as store: 154 | print('读数据') 155 | print(store['510300.SH']) 156 | -------------------------------------------------------------------------------- /engine/demos/BUILD: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Charmve/iQuant/b41f2d0066fdb1be8b5435e1dbe904fedd919d30/engine/demos/BUILD -------------------------------------------------------------------------------- /engine/demos/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Charmve/iQuant/b41f2d0066fdb1be8b5435e1dbe904fedd919d30/engine/demos/__init__.py -------------------------------------------------------------------------------- /engine/demos/a2c_cartpole_tensorboard/A2C_1/BUILD: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Charmve/iQuant/b41f2d0066fdb1be8b5435e1dbe904fedd919d30/engine/demos/a2c_cartpole_tensorboard/A2C_1/BUILD -------------------------------------------------------------------------------- /engine/demos/a2c_cartpole_tensorboard/A2C_1/events.out.tfevents.1669546684.LAPTOP-3RCHD0KF.29644.0: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Charmve/iQuant/b41f2d0066fdb1be8b5435e1dbe904fedd919d30/engine/demos/a2c_cartpole_tensorboard/A2C_1/events.out.tfevents.1669546684.LAPTOP-3RCHD0KF.29644.0 -------------------------------------------------------------------------------- /engine/demos/a2c_cartpole_tensorboard/A2C_2/BUILD: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Charmve/iQuant/b41f2d0066fdb1be8b5435e1dbe904fedd919d30/engine/demos/a2c_cartpole_tensorboard/A2C_2/BUILD -------------------------------------------------------------------------------- /engine/demos/a2c_cartpole_tensorboard/A2C_2/events.out.tfevents.1669546881.LAPTOP-3RCHD0KF.15556.0: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Charmve/iQuant/b41f2d0066fdb1be8b5435e1dbe904fedd919d30/engine/demos/a2c_cartpole_tensorboard/A2C_2/events.out.tfevents.1669546881.LAPTOP-3RCHD0KF.15556.0 -------------------------------------------------------------------------------- /engine/demos/a2c_cartpole_tensorboard/BUILD: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Charmve/iQuant/b41f2d0066fdb1be8b5435e1dbe904fedd919d30/engine/demos/a2c_cartpole_tensorboard/BUILD -------------------------------------------------------------------------------- /engine/demos/ddpg_demo.py: -------------------------------------------------------------------------------- 1 | import gym 2 | import numpy as np 3 | 4 | from stable_baselines3 import DDPG 5 | from stable_baselines3.common.noise import NormalActionNoise, OrnsteinUhlenbeckActionNoise 6 | 7 | env = gym.make("Pendulum-v1") 8 | 9 | # The noise objects for DDPG 10 | n_actions = env.action_space.shape[-1] 11 | action_noise = NormalActionNoise(mean=np.zeros(n_actions), sigma=0.1 * np.ones(n_actions)) 12 | 13 | model = DDPG("MlpPolicy", env, action_noise=action_noise, verbose=1) 14 | model.learn(total_timesteps=10000, log_interval=10) 15 | model.save("ddpg_pendulum") 16 | env = model.get_env() 17 | 18 | del model # remove to demonstrate saving and loading 19 | 20 | model = DDPG.load("ddpg_pendulum") 21 | 22 | obs = env.reset() 23 | while True: 24 | action, _states = model.predict(obs) 25 | obs, rewards, dones, info = env.step(action) 26 | env.render() -------------------------------------------------------------------------------- /engine/demos/ddpg_pendulum.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Charmve/iQuant/b41f2d0066fdb1be8b5435e1dbe904fedd919d30/engine/demos/ddpg_pendulum.zip -------------------------------------------------------------------------------- /engine/demos/env_portfolio_yahoofinance.py: -------------------------------------------------------------------------------- 1 | """From FinRL https://github.com/AI4Finance-LLC/FinRL/tree/master/finrl/env""" 2 | import gym 3 | import matplotlib 4 | import numpy as np 5 | import pandas as pd 6 | from gym import spaces 7 | from gym.utils import seeding 8 | 9 | matplotlib.use("Agg") 10 | import matplotlib.pyplot as plt 11 | from stable_baselines3.common.vec_env import DummyVecEnv 12 | 13 | 14 | class StockPortfolioEnv(gym.Env): 15 | """A single stock trading environment for OpenAI gym 16 | 17 | Attributes 18 | ---------- 19 | df: DataFrame 20 | input data 21 | stock_dim : int 22 | number of unique stocks 23 | hmax : int 24 | maximum number of shares to trade 25 | initial_amount : int 26 | start money 27 | transaction_cost_pct: float 28 | transaction cost percentage per trade 29 | reward_scaling: float 30 | scaling factor for reward, good for training 31 | state_space: int 32 | the dimension of input features 33 | action_space: int 34 | equals stock dimension 35 | tech_indicator_list: list 36 | a list of technical indicator names 37 | turbulence_threshold: int 38 | a threshold to control risk aversion 39 | day: int 40 | an increment number to control date 41 | 42 | Methods 43 | ------- 44 | _sell_stock() 45 | perform sell action based on the sign of the action 46 | _buy_stock() 47 | perform buy action based on the sign of the action 48 | step() 49 | at each step the agent will return actions, then 50 | we will calculate the reward, and return the next observation. 51 | reset() 52 | reset the environment 53 | render() 54 | use render to return other functions 55 | save_asset_memory() 56 | return account value at each time step 57 | save_action_memory() 58 | return actions/positions at each time step 59 | 60 | 61 | """ 62 | 63 | metadata = {"render.modes": ["human"]} 64 | 65 | def __init__( 66 | self, 67 | df, 68 | stock_dim, 69 | hmax, 70 | initial_amount, 71 | transaction_cost_pct, 72 | reward_scaling, 73 | state_space, 74 | action_space, 75 | tech_indicator_list, 76 | turbulence_threshold=None, 77 | lookback=252, 78 | day=0, 79 | ): 80 | # super(StockEnv, self).__init__() 81 | # money = 10 , scope = 1 82 | self.day = day 83 | self.lookback = lookback 84 | self.df = df 85 | self.stock_dim = stock_dim 86 | self.hmax = hmax 87 | self.initial_amount = initial_amount 88 | self.transaction_cost_pct = transaction_cost_pct 89 | self.reward_scaling = reward_scaling 90 | self.state_space = state_space 91 | self.action_space = action_space 92 | self.tech_indicator_list = tech_indicator_list 93 | 94 | # action_space normalization and shape is self.stock_dim 95 | self.action_space = spaces.Box(low=0, high=1, shape=(self.action_space,)) 96 | # Shape = (34, 30) 97 | # covariance matrix + technical indicators 98 | self.observation_space = spaces.Box( 99 | low=-np.inf, 100 | high=np.inf, 101 | shape=( 102 | self.state_space + len(self.tech_indicator_list), 103 | self.state_space, 104 | ), 105 | ) 106 | 107 | # load data from a pandas dataframe 108 | self.data = self.df.loc[self.day, :] 109 | self.covs = self.data["cov_list"].values[0] 110 | self.state = np.append( 111 | np.array(self.covs), 112 | [self.data[tech].values.tolist() for tech in self.tech_indicator_list], 113 | axis=0, 114 | ) 115 | self.terminal = False 116 | self.turbulence_threshold = turbulence_threshold 117 | # initalize state: inital portfolio return + individual stock return + individual weights 118 | self.portfolio_value = self.initial_amount 119 | 120 | # memorize portfolio value each step 121 | self.asset_memory = [self.initial_amount] 122 | # memorize portfolio return each step 123 | self.portfolio_return_memory = [0] 124 | self.actions_memory = [[1 / self.stock_dim] * self.stock_dim] 125 | self.date_memory = [self.data.date.unique()[0]] 126 | 127 | def step(self, actions): 128 | # print(self.day) 129 | self.terminal = self.day >= len(self.df.index.unique()) - 1 130 | # print(actions) 131 | 132 | if self.terminal: 133 | df = pd.DataFrame(self.portfolio_return_memory) 134 | df.columns = ["daily_return"] 135 | plt.plot(df.daily_return.cumsum(), "r") 136 | plt.savefig("results/cumulative_reward.png") 137 | plt.close() 138 | 139 | plt.plot(self.portfolio_return_memory, "r") 140 | plt.savefig("results/rewards.png") 141 | plt.close() 142 | 143 | print("=================================") 144 | print(f"begin_total_asset:{self.asset_memory[0]}") 145 | print(f"end_total_asset:{self.portfolio_value}") 146 | 147 | df_daily_return = pd.DataFrame(self.portfolio_return_memory) 148 | df_daily_return.columns = ["daily_return"] 149 | if df_daily_return["daily_return"].std() != 0: 150 | sharpe = ( 151 | (252**0.5) 152 | * df_daily_return["daily_return"].mean() 153 | / df_daily_return["daily_return"].std() 154 | ) 155 | print("Sharpe: ", sharpe) 156 | print("=================================") 157 | 158 | return self.state, self.reward, self.terminal, {} 159 | 160 | else: 161 | # print("Model actions: ",actions) 162 | # actions are the portfolio weight 163 | # normalize to sum of 1 164 | # if (np.array(actions) - np.array(actions).min()).sum() != 0: 165 | # norm_actions = (np.array(actions) - np.array(actions).min()) / (np.array(actions) - np.array(actions).min()).sum() 166 | # else: 167 | # norm_actions = actions 168 | weights = self.softmax_normalization(actions) 169 | # print("Normalized actions: ", weights) 170 | self.actions_memory.append(weights) 171 | last_day_memory = self.data 172 | 173 | # load next state 174 | self.day += 1 175 | self.data = self.df.loc[self.day, :] 176 | self.covs = self.data["cov_list"].values[0] 177 | self.state = np.append( 178 | np.array(self.covs), 179 | [self.data[tech].values.tolist() for tech in self.tech_indicator_list], 180 | axis=0, 181 | ) 182 | # print(self.state) 183 | # calcualte portfolio return 184 | # individual stocks' return * weight 185 | portfolio_return = sum( 186 | ((self.data.close.values / last_day_memory.close.values) - 1) * weights 187 | ) 188 | # update portfolio value 189 | new_portfolio_value = self.portfolio_value * (1 + portfolio_return) 190 | self.portfolio_value = new_portfolio_value 191 | 192 | # save into memory 193 | self.portfolio_return_memory.append(portfolio_return) 194 | self.date_memory.append(self.data.date.unique()[0]) 195 | self.asset_memory.append(new_portfolio_value) 196 | 197 | # the reward is the new portfolio value or end portfolo value 198 | self.reward = new_portfolio_value 199 | # print("Step reward: ", self.reward) 200 | # self.reward = self.reward*self.reward_scaling 201 | 202 | return self.state, self.reward, self.terminal, {} 203 | 204 | def reset(self): 205 | self.asset_memory = [self.initial_amount] 206 | self.day = 0 207 | self.data = self.df.loc[self.day, :] 208 | # load states 209 | self.covs = self.data["cov_list"].values[0] 210 | self.state = np.append( 211 | np.array(self.covs), 212 | [self.data[tech].values.tolist() for tech in self.tech_indicator_list], 213 | axis=0, 214 | ) 215 | self.portfolio_value = self.initial_amount 216 | # self.cost = 0 217 | # self.trades = 0 218 | self.terminal = False 219 | self.portfolio_return_memory = [0] 220 | self.actions_memory = [[1 / self.stock_dim] * self.stock_dim] 221 | self.date_memory = [self.data.date.unique()[0]] 222 | return self.state 223 | 224 | def render(self, mode="human"): 225 | return self.state 226 | 227 | def softmax_normalization(self, actions): 228 | numerator = np.exp(actions) 229 | denominator = np.sum(np.exp(actions)) 230 | softmax_output = numerator / denominator 231 | return softmax_output 232 | 233 | def save_asset_memory(self): 234 | date_list = self.date_memory 235 | portfolio_return = self.portfolio_return_memory 236 | # print(len(date_list)) 237 | # print(len(asset_list)) 238 | df_account_value = pd.DataFrame( 239 | {"date": date_list, "daily_return": portfolio_return} 240 | ) 241 | return df_account_value 242 | 243 | def save_action_memory(self): 244 | # date and close price length must match actions length 245 | date_list = self.date_memory 246 | df_date = pd.DataFrame(date_list) 247 | df_date.columns = ["date"] 248 | 249 | action_list = self.actions_memory 250 | df_actions = pd.DataFrame(action_list) 251 | df_actions.columns = self.data.tic.values 252 | df_actions.index = df_date.date 253 | # df_actions = pd.DataFrame({'date':date_list,'actions':action_list}) 254 | return df_actions 255 | 256 | def _seed(self, seed=None): 257 | self.np_random, seed = seeding.np_random(seed) 258 | return [seed] 259 | 260 | def get_sb_env(self): 261 | e = DummyVecEnv([lambda: self]) 262 | obs = e.reset() 263 | return e, obs 264 | -------------------------------------------------------------------------------- /engine/demos/sb3_utils.py: -------------------------------------------------------------------------------- 1 | import gym 2 | import numpy as np 3 | from gym import spaces 4 | from stable_baselines3 import A2C 5 | 6 | 7 | class FinanceEnv(gym.Env): 8 | metadata = {"render.modes": ["human"]} 9 | 10 | def __init__(self): 11 | super(FinanceEnv, self).__init__() 12 | # 定义动作与状态空间,都是gym.spaces 对象 13 | # 例:使用离散空间: 14 | N_DISCRETE_ACTIONS = 2 15 | self.action_space = spaces.Discrete(N_DISCRETE_ACTIONS) 16 | # Example for using image as input (channel-first; channel-last also works): 17 | 18 | #N_CHANNELS = 3 19 | rows = 28 20 | cols = 28 21 | self.observation_space = spaces.Box(low=0, high=255, 22 | shape=(rows,), dtype=np.uint8) 23 | 24 | def step(self, action): 25 | observation = self.observation_space.sample() 26 | reward = 1.0 27 | done = True 28 | info = {} 29 | return observation, reward, done, info 30 | 31 | def reset(self): 32 | observation = self.observation_space.sample() 33 | #print(observation) 34 | return observation # reward, done, info can't be included 35 | 36 | def render(self, mode="human"): 37 | pass 38 | 39 | def close(self): 40 | pass 41 | 42 | 43 | if __name__ == '__main__': 44 | from stable_baselines3.common.env_checker import check_env 45 | 46 | env = FinanceEnv() 47 | check_env(env) 48 | model = A2C("MlpPolicy", env).learn(total_timesteps=1000) 49 | -------------------------------------------------------------------------------- /engine/indicator/BUILD: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Charmve/iQuant/b41f2d0066fdb1be8b5435e1dbe904fedd919d30/engine/indicator/BUILD -------------------------------------------------------------------------------- /engine/indicator/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Charmve/iQuant/b41f2d0066fdb1be8b5435e1dbe904fedd919d30/engine/indicator/__init__.py -------------------------------------------------------------------------------- /engine/indicator/indicator_rsrs.py: -------------------------------------------------------------------------------- 1 | import backtrader as bt 2 | import numpy as np 3 | import statsmodels.api as sm 4 | 5 | 6 | class RSRS(bt.Indicator): 7 | lines = ('rsrs', 'R2') 8 | 9 | params = (('N', 18), ('value', 5)) 10 | 11 | def __init__(self): 12 | self.high = self.data.high 13 | self.low = self.data.low 14 | 15 | def next(self): 16 | high_N = self.high.get(ago=0, size=self.p.N) 17 | low_N = self.low.get(ago=0, size=self.p.N) 18 | 19 | try: 20 | X = sm.add_constant(np.array(low_N)) 21 | model = sm.OLS(np.array(high_N), X) 22 | results = model.fit() 23 | self.lines.rsrs[0] = results.params[1] 24 | self.lines.R2[0] = results.rsquared 25 | except: 26 | self.lines.rsrs[0] = 0 27 | 28 | 29 | class RSRS_Norm(bt.Indicator): 30 | lines = ('rsrs_norm','rsrs_r2','beta_right') 31 | params = (('N', 18), ('M', 600)) 32 | 33 | def __init__(self): 34 | self.rsrs = RSRS(self.data) 35 | self.lines.rsrs_norm = (self.rsrs - bt.ind.Average(self.rsrs, period=self.p.M))/bt.ind.StandardDeviation(self.rsrs, period= self.p.M) 36 | self.lines.rsrs_r2 = self.lines.rsrs_norm * self.rsrs.R2 37 | self.lines.beta_right = self.rsrs * self.lines.rsrs_r2 -------------------------------------------------------------------------------- /engine/indicator/signal_double_sma.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Charmve/iQuant/b41f2d0066fdb1be8b5435e1dbe904fedd919d30/engine/indicator/signal_double_sma.py -------------------------------------------------------------------------------- /engine/indicator/signal_triple_sma.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Charmve/iQuant/b41f2d0066fdb1be8b5435e1dbe904fedd919d30/engine/indicator/signal_triple_sma.py -------------------------------------------------------------------------------- /engine/main.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import os 3 | 4 | sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '../engine'))) 5 | 6 | print(os.path) -------------------------------------------------------------------------------- /engine/ml/BUILD: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Charmve/iQuant/b41f2d0066fdb1be8b5435e1dbe904fedd919d30/engine/ml/BUILD -------------------------------------------------------------------------------- /engine/ml/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Charmve/iQuant/b41f2d0066fdb1be8b5435e1dbe904fedd919d30/engine/ml/__init__.py -------------------------------------------------------------------------------- /engine/ml/model.py: -------------------------------------------------------------------------------- 1 | # coding:utf8 2 | from loguru import logger 3 | 4 | from engine.datafeed.dataset import Dataset 5 | 6 | 7 | class Model: 8 | pass 9 | 10 | 11 | class SklearnModel(Model): 12 | def __init__(self, clf): 13 | self.clf = clf 14 | 15 | def fit(self, dataset: Dataset): 16 | X_train, y_train = dataset.get_train_data() 17 | self.clf.fit(X_train, y_train) 18 | score_train = self.clf.score(X_train, y_train) 19 | 20 | X_test, y_test = dataset.get_test_data() 21 | score_test = self.clf.score(X_test, y_test) 22 | logger.info('模型在训练集得分:{},测试集上得分:{}'.format(score_train, score_test)) 23 | 24 | def predict(self, dataset: Dataset): 25 | X, _ = dataset.get_X_y_data() 26 | y_pred = self.clf.predict(X) 27 | return y_pred 28 | 29 | 30 | if __name__ == '__main__': 31 | names = [] 32 | fields = [] 33 | feature_names = [] 34 | 35 | fields += ['BBands($close)'] 36 | names += ['BBands'] 37 | feature_names += ['BBands_up', 'BBands_down'] 38 | 39 | fields += ["RSRS($high,$low,18)"] 40 | names += ['RSRS'] 41 | feature_names += ['RSRS_beta'] 42 | 43 | fields += ['Norm($RSRS_beta,600)'] 44 | names += ['Norm_beta'] 45 | feature_names += ['Norm_beta'] 46 | 47 | fields += ['OBV($close,$volume)'] 48 | names += ['obv'] 49 | feature_names += ['obv'] 50 | 51 | fields += ['Slope($close,20)'] 52 | names += ['mom_slope'] 53 | feature_names += ['mom_slope'] 54 | 55 | fields += ['KF($mom_slope)'] 56 | names += ['kf_mom_slope'] 57 | feature_names += ['kf_mom_slope'] 58 | 59 | fields += ["Ref($close,-1)/$close - 1"] 60 | names += ['label'] 61 | 62 | from engine.datafeed.dataloader import Dataloader 63 | 64 | loader = Dataloader() 65 | loader.load_one_df(['000300.SH', 'SPX'], names, fields) 66 | 67 | ds = Dataset(dataloader=loader, feature_names=feature_names, split_date='2020-01-01') 68 | X, y = ds.get_test_data() 69 | print(X, y) 70 | 71 | from sklearn.ensemble import RandomForestRegressor 72 | 73 | model = SklearnModel(RandomForestRegressor()) 74 | model.fit(ds) 75 | -------------------------------------------------------------------------------- /engine/ml/model_bak/BUILD: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Charmve/iQuant/b41f2d0066fdb1be8b5435e1dbe904fedd919d30/engine/ml/model_bak/BUILD -------------------------------------------------------------------------------- /engine/ml/model_bak/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Charmve/iQuant/b41f2d0066fdb1be8b5435e1dbe904fedd919d30/engine/ml/model_bak/__init__.py -------------------------------------------------------------------------------- /engine/ml/model_bak/boosting_models.py: -------------------------------------------------------------------------------- 1 | from lightgbm import LGBMClassifier 2 | from sklearn.ensemble import HistGradientBoostingClassifier 3 | from xgboost import XGBClassifier 4 | 5 | gb_clf = HistGradientBoostingClassifier(loss='binary_crossentropy', 6 | learning_rate=0.1, # regulates the contribution of each tree 7 | max_iter=100, # number of boosting stages 8 | min_samples_leaf=20, 9 | max_depth=None, 10 | random_state=None, 11 | max_leaf_nodes=31, # opt value depends on feature interaction 12 | warm_start=False, 13 | # early_stopping=True, 14 | # scoring='loss', 15 | # validation_fraction=0.1, 16 | # n_iter_no_change=None, 17 | verbose=0, 18 | tol=0.0001) 19 | 20 | lgb_clf = LGBMClassifier(boosting_type='gbdt', 21 | # device='gpu', 22 | objective='binary', # learning task 23 | metric='auc', 24 | num_leaves=31, # Maximum tree leaves for base learners. 25 | max_depth=-1, # Maximum tree depth for base learners, -1 means no limit. 26 | learning_rate=0.1, # Adaptive lr via callback override in .fit() method 27 | n_estimators=100, # Number of boosted trees to fit 28 | subsample_for_bin=200000, # Number of samples for constructing bins. 29 | class_weight=None, # dict, 'balanced' or None 30 | min_split_gain=0.0, # Minimum loss reduction for further split 31 | min_child_weight=0.001, # Minimum sum of instance weight(hessian) 32 | min_child_samples=20, # Minimum number of data need in a child(leaf) 33 | subsample=1.0, # Subsample ratio of training samples 34 | subsample_freq=0, # Frequency of subsampling, <=0: disabled 35 | colsample_bytree=1.0, # Subsampling ratio of features 36 | reg_alpha=0.0, # L1 regularization term on weights 37 | reg_lambda=0.0, # L2 regularization term on weights 38 | random_state=42, # Random number seed; default: C++ seed 39 | n_jobs=-1, # Number of parallel threads. 40 | silent=False, 41 | importance_type='gain', # default: 'split' or 'gain' 42 | ) 43 | 44 | xgb_clf = XGBClassifier(max_depth=3, # Maximum tree depth for base learners. 45 | learning_rate=0.1, # Boosting learning rate (xgb's "eta") 46 | n_estimators=100, # Number of boosted trees to fit. 47 | silent=True, # Whether to print messages while running 48 | objective='binary:logistic', # Task and objective or custom objective function 49 | booster='gbtree', # Select booster: gbtree, gblinear or dart 50 | # tree_method='gpu_hist', 51 | n_jobs=-1, # Number of parallel threads 52 | gamma=0, # Min loss reduction for further splits 53 | min_child_weight=1, # Min sum of sample weight(hessian) needed 54 | max_delta_step=0, # Max delta step for each tree's weight estimation 55 | subsample=1, # Subsample ratio of training samples 56 | colsample_bytree=1, # Subsample ratio of cols for each tree 57 | colsample_bylevel=1, # Subsample ratio of cols for each split 58 | reg_alpha=0, # L1 regularization term on weights 59 | reg_lambda=1, # L2 regularization term on weights 60 | scale_pos_weight=1, # Balancing class weights 61 | base_score=0.5, # Initial prediction score; global bias 62 | random_state=42) # random seed -------------------------------------------------------------------------------- /engine/ml/model_bak/dataset.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | import numpy as np 3 | import pandas as pd 4 | 5 | from engine.datafeed.expr.expr_mgr import ExprMgr 6 | 7 | 8 | def make_dataset(time_lags=5): 9 | expr = ExprMgr() 10 | 11 | df = pd.DataFrame() 12 | 13 | fields = [] 14 | names = [] 15 | fields += ["Corr($close/Ref($close,1), Log($volume/Ref($volume, 1)+1), 30)"] 16 | names += ["CORR30"] 17 | fields += ["Corr($close/Ref($close,1), Log($volume/Ref($volume, 1)+1), 60)"] 18 | names += ["CORR60"] 19 | 20 | fields += ["Std($close, 30)/$close"] 21 | names += ["STD30"] 22 | fields += ["Corr($close, Log($volume+1), 5)"] 23 | names += ["CORR5"] 24 | 25 | # fields += ["Resi($close, 10)/$close"] 26 | # names += ["RESI10"] 27 | # fields += ["Resi($close, 5)/$close"] 28 | # names += ["RESI5"] 29 | 30 | fields += ["Std($close, 5)/$close"] 31 | names += ["STD5"] 32 | fields += ["Std($close, 20)/$close"] 33 | names += ["STD20"] 34 | fields += ["Std($close, 60)/$close"] 35 | names += ["STD60"] 36 | 37 | fields += ["Ref($low, 0)/$close"] 38 | names += ["LOW0"] 39 | 40 | fields += [ 41 | "Std(Abs($close/Ref($close, 1)-1)*$volume, 30)/(Mean(Abs($close/Ref($close, 1)-1)*$volume, 30)+1e-12)" 42 | ] 43 | names += ['WVMA30'] 44 | 45 | fields += ["Ref($close, 5)/$close"] 46 | names += ["ROC5"] 47 | 48 | fields += ["(2*$close-$high-$low)/$open"] 49 | names += ['KSFT'] 50 | 51 | fields += ["($close-Min($low, 5))/(Max($high, 5)-Min($low, 5)+1e-12)"] 52 | names += ["RSV5"] 53 | 54 | fields += ["($high-$low)/$open"] 55 | names += ['KLEN'] 56 | 57 | fields += ["$close"] 58 | names += ['close'] 59 | 60 | for name, field in zip(names, fields): 61 | exp = expr.get_expression(field) 62 | se = exp.load(code) 63 | df[name] = se 64 | 65 | df['r'] = df['close'].pct_change() 66 | df['label'] = np.where(df['close'].pct_change(-1) > 0, 1, 0)#np.sign(df['close'].pct_change(-1)) 67 | # df_lag["volume_Direction"] = np.sign(df_lag["volume_Lag%s_Change" % str(time_lags)]) 68 | print(names) 69 | return df.dropna(how='any') 70 | 71 | 72 | 73 | 74 | 75 | 76 | 77 | code = 'SPX' 78 | time_lags = 5 79 | #df = feed.get_one_df_by_codes(codes) 80 | #df.index = pd.to_datetime(df.index) 81 | df_dataset = make_dataset(time_lags=time_lags) 82 | 83 | print(df_dataset) 84 | X_train, X_test, Y_train, Y_test = split_dataset(df_dataset, 85 | ['CORR30', 'CORR60', 'STD30', 'CORR5', 'STD5', 'STD20', 'STD60', 86 | 'LOW0', 'WVMA30', 'ROC5', 'KSFT', 'RSV5', 'KLEN'], 87 | "label", 0.85) 88 | #print(X_train.shape[1]) 89 | #print(X_test) 90 | 91 | def norm(raw): 92 | mu, std = raw.mean(), raw.std() 93 | data_ = (raw - mu) / std 94 | return data_ 95 | 96 | train_ = norm(X_train) 97 | test_ = norm(X_test) 98 | 99 | from engine.ml.model.keras_dnn import create_model, set_seeds 100 | 101 | set_seeds() 102 | model = create_model(2, 64, input_dim= X_train.shape[1]) 103 | model.fit(train_, Y_train, 104 | epochs=20, verbose=False, 105 | validation_split=0.2, shuffle=False) 106 | 107 | print(model.evaluate(train_, Y_train)) 108 | print(model.evaluate(test_, Y_test)) 109 | 110 | from engine.ml.model.models import do_svm, do_random_forest, do_logistic_regression, test_predictor 111 | 112 | lr_classifier = do_logistic_regression(train_, Y_train) 113 | lr_hit_ratio, lr_score = test_predictor(lr_classifier, train_, Y_train) 114 | 115 | rf_classifier = do_random_forest(train_, Y_train) 116 | rf_hit_ratio, rf_score = test_predictor(rf_classifier, test_, Y_test) 117 | 118 | svm_classifier = do_svm(train_, Y_train) 119 | svm_hit_ratio, svm_score = test_predictor(rf_classifier, test_, Y_test) 120 | 121 | print("%s : Hit Ratio - Logistic Regreesion=%0.2f, RandomForest=%0.2f, SVM=%0.2f" % ( 122 | 'name', lr_hit_ratio, rf_hit_ratio, svm_hit_ratio)) 123 | 124 | 125 | def backtest(data, data_norm): 126 | data['pos'] = np.where(model.predict(data_norm) > 0.5, 1, 0) 127 | data['pos'] = np.where(data['pos'] == 1, 1, -1) 128 | data['收益率_对数'] = data['pos'] * data['r'] 129 | data['收益率'] = data['pos'] * data['r_'] 130 | # data_bkt['收益率'] = data_bkt['pos'] * data_bkt['r_'] 131 | data['equity_基准'] = data['r'].cumsum().apply(np.exp) 132 | # data_bkt['equity_策略'] = (data_bkt['收益率']+1).cumprod() 133 | data['equity_策略_对数'] = data['收益率_对数'].cumsum().apply(np.exp) 134 | data['equity_策略'] = (data['收益率'] + 1).cumprod() 135 | data[['equity_基准', 'equity_策略_对数', 'equity_策略']].plot(figsize=(10, 6)) 136 | 137 | 138 | backtest(X_train, train_) 139 | -------------------------------------------------------------------------------- /engine/ml/model_bak/keras_dnn.py: -------------------------------------------------------------------------------- 1 | import random 2 | import numpy as np 3 | import tensorflow as tf 4 | from keras.layers import Dense, Dropout 5 | from keras.models import Sequential 6 | from keras.regularizers import l1 7 | from keras.optimizers import Adam 8 | from sklearn.metrics import accuracy_score 9 | 10 | 11 | def set_seeds(seed=100): 12 | random.seed(seed) 13 | np.random.seed(seed) 14 | tf.random.set_seed(seed) 15 | 16 | 17 | set_seeds() 18 | optimizer = Adam(learning_rate=0.0001) 19 | 20 | 21 | def create_model(hl=2, hu=128, dropout=False, rate=0.3, 22 | regularize=False, reg=l1(0.0005), 23 | optimizer=optimizer, input_dim=None): # input_dim = len(features) 24 | if not regularize: 25 | reg = None 26 | model = Sequential() 27 | model.add(Dense(hu, input_dim=input_dim, 28 | activity_regularizer=reg, 29 | activation='relu')) 30 | if dropout: 31 | model.add(Dropout(rate, seed=100)) 32 | for _ in range(hl): 33 | model.add(Dense(hu, activation='relu', 34 | activity_regularizer=reg)) 35 | if dropout: 36 | model.add(Dropout(rate, seed=100)) 37 | model.add(Dense(1, activation='sigmoid')) 38 | model.compile(loss='binary_crossentropy', 39 | optimizer=optimizer, 40 | metrics=['accuracy']) 41 | return model 42 | 43 | 44 | if __name__ == '__main__': 45 | set_seeds() 46 | model = create_model(hl=2, hu=64) 47 | -------------------------------------------------------------------------------- /engine/ml/model_bak/models.py: -------------------------------------------------------------------------------- 1 | from sklearn.ensemble import RandomForestClassifier 2 | from sklearn.linear_model import LogisticRegression 3 | from sklearn.metrics import confusion_matrix 4 | from sklearn.svm import LinearSVC, SVC 5 | 6 | 7 | def do_logistic_regression(x_train, y_train): 8 | classifier = LogisticRegression() 9 | classifier.fit(x_train, y_train) 10 | return classifier 11 | 12 | 13 | def do_random_forest(x_train, y_train): 14 | classifier = RandomForestClassifier() 15 | classifier.fit(x_train, y_train) 16 | return classifier 17 | 18 | 19 | def do_svm(x_train, y_train): 20 | classifier = SVC() 21 | classifier.fit(x_train, y_train) 22 | return classifier 23 | 24 | 25 | def test_predictor(classifier, x_test, y_test): 26 | pred = classifier.predict(x_test) 27 | 28 | hit_count = 0 29 | total_count = len(y_test) 30 | for index in range(total_count): 31 | if (pred[index]) == (y_test[index]): 32 | hit_count = hit_count + 1 33 | 34 | hit_ratio = hit_count / total_count 35 | score = classifier.score(x_test, y_test) 36 | # print "hit_count=%s, total=%s, hit_ratio = %s" % (hit_count,total_count,hit_ratio) 37 | 38 | return hit_ratio, score 39 | -------------------------------------------------------------------------------- /engine/ml/model_runner.py: -------------------------------------------------------------------------------- 1 | # encoding:utf-8 2 | import numpy as np 3 | import pandas as pd 4 | import seaborn as sns 5 | from matplotlib import pyplot as plt 6 | from sklearn.model_selection import cross_validate, GridSearchCV 7 | 8 | from engine.datafeed.dataset import Dataset 9 | from loguru import logger 10 | from time import time 11 | 12 | 13 | class ModelRunner: 14 | def __init__(self, model, ds: Dataset): 15 | self.model = model 16 | self.dataset = ds 17 | 18 | def fit(self): 19 | X_train, y_train = self.dataset.get_train_data() 20 | self.model.fit(X_train, y_train) 21 | 22 | def predict(self): 23 | X_test, y_test = self.dataset.get_test_data() 24 | pred = self.model.predict(X_test) 25 | 26 | score = self.model.score(X_test, y_test) 27 | logger.debug("准确率得分:{}".format(round(score, 5))) 28 | return score 29 | 30 | def run_cv(self, cv, fit_params=None, n_jobs=-1): 31 | start = time() 32 | 33 | metrics = {'balanced_accuracy': 'Accuracy', 34 | 'roc_auc': 'AUC', 35 | 'neg_log_loss': 'Log Loss', 36 | 'f1_weighted': 'F1', 37 | 'precision_weighted': 'Precision', 38 | 'recall_weighted': 'Recall' 39 | } 40 | X,y = self.dataset.get_X_y_data() 41 | scores = cross_validate(estimator=self.model, 42 | X=X, 43 | y=y, 44 | scoring=list(metrics.keys()), 45 | cv=cv, 46 | return_train_score=True, 47 | n_jobs=n_jobs, 48 | verbose=1, 49 | fit_params=fit_params) 50 | duration = time() - start 51 | return scores, duration 52 | 53 | 54 | if __name__ == '__main__': 55 | codes = ['000300.SH', '399006.SZ'] 56 | names = [] 57 | fields = [] 58 | 59 | fields += ["Corr($close/Ref($close,1), Log($volume/Ref($volume, 1)+1), 30)"] 60 | names += ["CORR30"] 61 | 62 | fields += ["Corr($close/Ref($close,1), Log($volume/Ref($volume, 1)+1), 60)"] 63 | names += ["CORR60"] 64 | 65 | fields += ["Ref($close, 5)/$close"] 66 | names += ["ROC5"] 67 | 68 | fields += ["(2*$close-$high-$low)/$open"] 69 | names += ['KSFT'] 70 | 71 | fields += ["($close-Min($low, 5))/(Max($high, 5)-Min($low, 5)+1e-12)"] 72 | names += ["RSV5"] 73 | 74 | fields += ["($high-$low)/$open"] 75 | names += ['KLEN'] 76 | 77 | fields += ["$close"] 78 | names += ['close'] 79 | 80 | fields += ['KF(Slope($close,20))'] 81 | names += ['KF'] 82 | 83 | fields += ['$close/Ref($close,20)-1'] 84 | names += ['ROC_20'] 85 | 86 | fields += ['KF($ROC_20)'] 87 | names += ['KF_ROC_20'] 88 | 89 | dataset = Dataset(codes, names, fields, split_date='2020-01-01') 90 | 91 | from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier 92 | from sklearn.svm import SVC 93 | from sklearn.linear_model import LogisticRegression 94 | from engine.ml.model.boosting_models import gb_clf, xgb_clf 95 | 96 | for model in [xgb_clf, gb_clf, LogisticRegression(), RandomForestClassifier(), SVC()]: 97 | m = ModelRunner(model, dataset) 98 | m.fit() 99 | m.predict() 100 | 101 | 102 | #m = ModelRunner(xgb_clf, dataset) 103 | #cv = OneStepTimeSeriesSplit(n_splits=12) 104 | 105 | #dummy_cv_result,time = m.run_cv(cv=5) 106 | #print(dummy_cv_result, time) 107 | 108 | fi = pd.Series(xgb_clf.feature_importances_, 109 | index=dataset.get_X_y_data()[0].columns) 110 | fi.nlargest(25).sort_values().plot.barh(figsize=(10, 5), 111 | title='Feature Importance') 112 | sns.despine() 113 | plt.tight_layout(); 114 | 115 | 116 | def stack_results(scores): 117 | metrics = {'balanced_accuracy': 'Accuracy', 118 | 'roc_auc': 'AUC', 119 | 'neg_log_loss': 'Log Loss', 120 | 'f1_weighted': 'F1', 121 | 'precision_weighted': 'Precision', 122 | 'recall_weighted': 'Recall' 123 | } 124 | 125 | columns = pd.MultiIndex.from_tuples( 126 | [tuple(m.split('_', 1)) for m in scores.keys()], 127 | names=['Dataset', 'Metric']) 128 | data = np.array(list(scores.values())).T 129 | df = (pd.DataFrame(data=data, 130 | columns=columns) 131 | .iloc[:, 2:]) 132 | results = pd.melt(df, value_name='Value') 133 | results.Metric = results.Metric.apply(lambda x: metrics.get(x)) 134 | results.Dataset = results.Dataset.str.capitalize() 135 | return results 136 | #results = stack_results(dummy_cv_result) 137 | #results = results.groupby(['Metric', 'Dataset']).Value.mean().unstack() 138 | #print(results) 139 | 140 | 141 | 142 | params = {'learning_rate': np.linspace(0.05, 0.25, 5), 'max_depth': [x for x in range(1, 8, 1)], 'min_samples_leaf': 143 | [x for x in range(1, 5, 1)], 'n_estimators': [x for x in range(50, 100, 10)]} 144 | 145 | clf = GradientBoostingClassifier() 146 | grid = GridSearchCV(clf, params, cv=2, scoring="f1") 147 | X,y = dataset.get_X_y_data() 148 | grid.fit(X=X, y=y) 149 | 150 | print(grid.best_score_) # 查看最佳分数(此处为f1_score) 151 | print(grid.best_params_) # 查看最佳参数 152 | 153 | plt.show() 154 | 155 | 156 | -------------------------------------------------------------------------------- /engine/model/BUILD: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Charmve/iQuant/b41f2d0066fdb1be8b5435e1dbe904fedd919d30/engine/model/BUILD -------------------------------------------------------------------------------- /engine/model/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Charmve/iQuant/b41f2d0066fdb1be8b5435e1dbe904fedd919d30/engine/model/__init__.py -------------------------------------------------------------------------------- /engine/model/dql_agent.py: -------------------------------------------------------------------------------- 1 | from collections import deque 2 | import random 3 | 4 | import numpy as np 5 | from keras import Sequential 6 | from keras.layers import Dense 7 | from keras.optimizers import Adam 8 | 9 | 10 | class DQLAgent: 11 | def __init__(self, env, gamma=0.95, hu=24, opt=Adam, 12 | lr=0.001, finish=False): 13 | self.env = env 14 | 15 | self.finish = finish 16 | self.epsilon = 1.0 17 | self.epsilon_min = 0.01 18 | self.epsilon_decay = 0.995 19 | self.gamma = gamma 20 | self.batch_size = 32 21 | self.max_treward = 0 22 | self.averages = list() 23 | self.memory = deque(maxlen=2000) 24 | self.osn = env.observation_space.shape[0] 25 | self.model = self._build_model(hu, opt, lr) 26 | 27 | def _build_model(self, hu, opt, lr): 28 | model = Sequential() 29 | model.add(Dense(hu, input_dim=self.osn, 30 | activation='relu')) 31 | model.add(Dense(hu, activation='relu')) 32 | model.add(Dense(self.env.action_space.n, activation='linear')) 33 | model.compile(loss='mse', optimizer=opt(lr=lr)) 34 | return model 35 | 36 | def act(self, state): 37 | if random.random() <= self.epsilon: 38 | return self.env.action_space.sample() 39 | action = self.model.predict(state)[0] 40 | return np.argmax(action) 41 | 42 | def replay(self): 43 | batch = random.sample(self.memory, self.batch_size) 44 | for state, action, reward, next_state, done in batch: 45 | if not done: 46 | reward += self.gamma * np.amax( 47 | self.model.predict(next_state)[0]) 48 | target = self.model.predict(state) 49 | target[0, action] = reward 50 | self.model.fit(state, target, epochs=1, 51 | verbose=False) 52 | if self.epsilon > self.epsilon_min: 53 | self.epsilon *= self.epsilon_decay 54 | 55 | def learn(self, episodes): 56 | trewards = [] 57 | for e in range(1, episodes + 1): 58 | state = self.env.reset() 59 | state = np.reshape(state, [1, self.osn]) 60 | for _ in range(5000): 61 | action = self.act(state) 62 | next_state, reward, done, info = self.env.step(action) 63 | next_state = np.reshape(next_state, 64 | [1, self.osn]) 65 | self.memory.append([state, action, reward, 66 | next_state, done]) 67 | state = next_state 68 | if done: 69 | treward = _ + 1 70 | trewards.append(treward) 71 | av = sum(trewards[-25:]) / 25 72 | self.averages.append(av) 73 | self.max_treward = max(self.max_treward, treward) 74 | templ = 'episode: {:4d}/{} | treward: {:4d} | ' 75 | templ += 'av: {:6.1f} | max: {:4d}' 76 | print(templ.format(e, episodes, treward, av, 77 | self.max_treward), end='\r') 78 | break 79 | if av > 195 and self.finish: 80 | print() 81 | break 82 | if len(self.memory) > self.batch_size: 83 | self.replay() 84 | 85 | def test(self, episodes): 86 | trewards = [] 87 | for e in range(1, episodes + 1): 88 | state = self.env.reset() 89 | for _ in range(5001): 90 | state = np.reshape(state, [1, self.osn]) 91 | action = np.argmax(self.model.predict(state)[0]) 92 | next_state, reward, done, info = self.env.step(action) 93 | state = next_state 94 | if done: 95 | treward = _ + 1 96 | trewards.append(treward) 97 | print('episode: {:4d}/{} | treward: {:4d}' 98 | .format(e, episodes, treward), end='\r') 99 | break 100 | return trewards 101 | 102 | 103 | -------------------------------------------------------------------------------- /engine/model/stock_ranker.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pandas as pd 3 | import lightgbm as lgb 4 | from sklearn.metrics import r2_score, accuracy_score 5 | 6 | class LGBModel: 7 | def __init__(self, regression = True): 8 | self.regression = regression 9 | def fit(self, dataset): 10 | X_train, X_valid, y_train, y_valid = dataset.split() 11 | 12 | dtrain = lgb.Dataset(X_train, label=y_train) 13 | dvalid = lgb.Dataset(X_valid, label=y_valid) 14 | 15 | #params = {"objective": 'mse', "verbosity": -1} 16 | # 参数 17 | params_regression = { 18 | 'learning_rate': 0.1, 19 | 'metrics':{'auc','mse'}, 20 | 'lambda_l1': 0.1, 21 | 'lambda_l2': 0.2, 22 | 'max_depth': 4, 23 | 'objective': 'mse'#'mse', # 目标函数 24 | } 25 | 26 | params = {'num_leaves': 90, 27 | 'min_data_in_leaf': 30, 28 | 'objective': 'multiclass', 29 | 'num_class': 10, 30 | 'max_depth': -1, 31 | 'learning_rate': 0.03, 32 | "min_sum_hessian_in_leaf": 6, 33 | "boosting": "gbdt", 34 | "feature_fraction": 0.9, 35 | "bagging_freq": 1, 36 | "bagging_fraction": 0.8, 37 | "bagging_seed": 11, 38 | "lambda_l1": 0.1, 39 | "verbosity": -1, 40 | "nthread": 15, 41 | 'metric': {'multi_logloss'}, 42 | "random_state": 2022, 43 | #'device': 'gpu' 44 | } 45 | 46 | if self.regression: 47 | params = params_regression 48 | self.model = lgb.train( 49 | params, 50 | dtrain, 51 | num_boost_round=1000, 52 | valid_sets=[dtrain, dvalid], 53 | valid_names=["train", "valid"], 54 | early_stopping_rounds=50, 55 | verbose_eval=True, 56 | # evals_result=evals_result, 57 | #**kwargs 58 | ) 59 | y_pred = self.model.predict(X_valid) 60 | if not self.regression: 61 | y_pred = np.argmax(y_pred, axis=1) 62 | print('accuracy:',accuracy_score(y_pred, y_valid)) 63 | 64 | y_pred_train = np.argmax(self.model.predict(X_train), axis=1) 65 | print('accuracy_train:',accuracy_score(y_pred_train, y_train)) 66 | else: 67 | print('R2系数:', r2_score(y_valid, y_pred)) 68 | print('训练集——R2系数:', r2_score(y_train, self.model.predict(X_train))) 69 | 70 | def predict(self, dataset): 71 | if self.model is None: 72 | raise ValueError("model is not fitted yet!") 73 | x_test,_ = dataset.get_data(date_range=['20160101', '20211231']) 74 | pred = self.model.predict(x_test) 75 | print(pred) 76 | if not self.regression: 77 | return pd.Series(np.argmax(pred, axis=1), index=x_test.index) 78 | else: 79 | return pd.Series(pred, index=x_test.index) 80 | 81 | 82 | if __name__ == '__main__': 83 | from bak.data.dataset import Dataset 84 | from engine.data.datahandler import DataHandler 85 | 86 | fields = ['Return($close,5)', 'Return($close,20)', 'Ref($close,126)/$close -1','$close','$open','$high','$low','$volume','$amount'] 87 | names = ['return_5', 'return_20', 'return_126','close','open','high','low','volume','amount'] 88 | 89 | #fields += ['Ref($close,-5)/$close -1'] 90 | #names += ['return_-5'] 91 | 92 | #ds = Dataset(codes=, fields=fields, feature_names=names, 93 | # label_expr='QCut(Ref($close,-20)/$close -1,10)') 94 | #print(ds.df) 95 | codes = ['512690.SH', '512170.SH', '512660.SH','159928.SZ','512010.SH'] 96 | codes = ['159915.SZ','510300.SH','512690.SH', '512170.SH', '512660.SH','159928.SZ','512010.SH'] 97 | codes = ['159928.SZ','510050.SH','512010.SH','513100.SH','518880.SH','511220.SH','511010.SH','161716.SZ'] 98 | codes = [ 99 | '000300.SH', 100 | '000905.SH', 101 | '399006.SZ', #创业板 102 | '000852.SH', #中证1000 103 | '399324.SZ', #深证红利 104 | #'000922.SH', #中证红利 105 | '399997.SZ', #中证白酒 106 | '399396.SZ', #食品饮料 107 | 108 | '000013.SH',#上证企债 109 | '000016.SH' #上证50 110 | ] 111 | ds = Dataset(codes=codes, handler=DataHandler()) 112 | print(ds.df) 113 | 114 | m = LGBModel() 115 | m.fit(ds) 116 | pred = m.predict(ds) 117 | print(pred) -------------------------------------------------------------------------------- /engine/performance.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | from datetime import datetime 3 | 4 | 5 | class PerformanceUtils(object): 6 | 7 | def rate2equity(self, df_rates): 8 | df = df_rates.copy(deep=True) 9 | df.dropna(inplace=True) 10 | for col in df.columns: 11 | df[col] = (df[col] + 1).cumprod() 12 | return df 13 | 14 | def equity2rate(self, df_equity): 15 | df = df_equity.copy(deep=True) 16 | df = df.pct_change() 17 | return df 18 | 19 | def calc_equity(self, df_equity): 20 | df_rates = self.equity2rate(df_equity) 21 | return self.calc_rates(df_rates) 22 | 23 | def calc_rates(self, df_rates): 24 | df_equity = self.rate2equity(df_rates) 25 | df_rates.dropna(inplace=True) 26 | df_equity.dropna(inplace=True) 27 | # 累计收益率,年化收益 28 | count = len(df_rates) 29 | accu_return = round(df_equity.iloc[-1] - 1, 3) 30 | annu_ret = round((accu_return + 1) ** (252 / count) - 1, 3) 31 | # 标准差 32 | std = round(df_rates.std() * (252 ** 0.5), 3) 33 | # 夏普比 34 | sharpe = round(annu_ret / std, 3) 35 | # 最大回撤 36 | mdd = round((df_equity / df_equity.expanding(min_periods=1).max()).min() - 1, 3) 37 | 38 | ret_2_mdd = round(annu_ret / abs(mdd), 3) 39 | 40 | ratios = [accu_return, annu_ret, std, sharpe, mdd, ret_2_mdd] 41 | 42 | # df_ratio存放这里计算结果 43 | df_ratios = pd.concat(ratios, axis=1) 44 | # df_ratios.index = list(df_rates.columns) 45 | df_ratios.columns = ['累计收益', '年化收益', '波动率', '夏普比', '最大回撤', '卡玛比率'] 46 | 47 | # 相关系数矩阵 48 | df_corr = round(df_equity.corr(), 2) 49 | 50 | start_dt = df_rates.index[0] 51 | end_dt = df_rates.index[-1] 52 | if isinstance(start_dt, str): 53 | start_year = int(start_dt[:4]) 54 | end_year = int(end_dt[:4]) 55 | df_equity['date'] = df_equity.index 56 | df_equity.index = df_equity['date'].apply(lambda x: datetime.strptime(x, '%Y%m%d')) 57 | del df_equity['date'] 58 | else: 59 | if type(start_dt) is str: 60 | start_year = start_dt[:4] 61 | else: 62 | start_year = start_dt.year 63 | end_year = end_dt.year 64 | 65 | years = [] 66 | for year in range(start_year, end_year + 1): 67 | sub_df = df_equity[str(year)] 68 | if len(sub_df) <= 3: 69 | continue 70 | year_se = round(sub_df.iloc[-1] / sub_df.iloc[0] - 1, 3) 71 | year_se.name = str(year) 72 | years.append(year_se) 73 | if len(years): 74 | df_years = pd.concat(years, axis=1) 75 | else: 76 | df_years = None 77 | 78 | df_ratios['名称'] = df_ratios.index 79 | return df_ratios, df_corr, df_years 80 | 81 | 82 | 83 | -------------------------------------------------------------------------------- /engine/qlib_mgr.py: -------------------------------------------------------------------------------- 1 | import qlib 2 | from qlib.config import REG_CN 3 | from qlib.data import D 4 | 5 | import sys 6 | import codecs 7 | print(sys.stdout.encoding) 8 | 9 | sys.stdout = codecs.getwriter("utf-8")(sys.stdout.detach()) 10 | class QlibMgr: 11 | def __init__(self): 12 | provider_uri = "../data/qlib_data/cn_data" # target_dir 13 | qlib.init(provider_uri=provider_uri, region=REG_CN) 14 | 15 | def load_data(self): 16 | ret = D.calendar(start_time='2010-01-01', end_time='2017-12-31', freq='day')[:2] 17 | print(ret) 18 | 19 | instruments = D.instruments('csi300')# ['SH600570','SH600000'] 20 | fields = ['$close', '$volume', 'Ref($close, 1)', 'Mean($close, 3)', '$high-$low'] 21 | data = D.features(instruments, fields, start_time='2010-01-01', end_time='2017-12-31', freq='day') 22 | #print(type(data)) 23 | #print(data.index) 24 | 25 | 26 | if __name__ == '__main__': 27 | mgr = QlibMgr() 28 | mgr.load_data() -------------------------------------------------------------------------------- /engine/rl/BUILD: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Charmve/iQuant/b41f2d0066fdb1be8b5435e1dbe904fedd919d30/engine/rl/BUILD -------------------------------------------------------------------------------- /engine/rl/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Charmve/iQuant/b41f2d0066fdb1be8b5435e1dbe904fedd919d30/engine/rl/__init__.py -------------------------------------------------------------------------------- /engine/rl/backtrader_rl/BUILD: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Charmve/iQuant/b41f2d0066fdb1be8b5435e1dbe904fedd919d30/engine/rl/backtrader_rl/BUILD -------------------------------------------------------------------------------- /engine/rl/backtrader_rl/__init__.py: -------------------------------------------------------------------------------- 1 | name = "btrl" -------------------------------------------------------------------------------- /engine/rl/backtrader_rl/adapters/BUILD: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Charmve/iQuant/b41f2d0066fdb1be8b5435e1dbe904fedd919d30/engine/rl/backtrader_rl/adapters/BUILD -------------------------------------------------------------------------------- /engine/rl/backtrader_rl/adapters/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Charmve/iQuant/b41f2d0066fdb1be8b5435e1dbe904fedd919d30/engine/rl/backtrader_rl/adapters/__init__.py -------------------------------------------------------------------------------- /engine/rl/backtrader_rl/adapters/gymAdapter.py: -------------------------------------------------------------------------------- 1 | import gym 2 | from gym import spaces 3 | from numpy import infty 4 | 5 | class gymAdapter(gym.Env): 6 | 7 | def __init__(self,engine,**kwargs): 8 | super().__init__(**kwargs) 9 | self.engine = engine 10 | self.action_space = spaces.Discrete(3) 11 | self.engine.action_mapping = {"buy" : 2, "sell" : 0, "hold":1} 12 | self.observation_space = spaces.Box(low = 0,high = infty, shape = self.engine.state_shape) 13 | print(self.observation_space.shape) 14 | 15 | def step(self,action): 16 | observation, reward, self.terminated = self.engine.step(action) 17 | truncated = False 18 | info = {} 19 | return observation, reward, self.terminated, truncated, info 20 | 21 | def reset(self,seed = None, options = {}): 22 | observation = self.engine.reset(seed = seed,**options) 23 | info = {} 24 | return observation 25 | 26 | def render(self): 27 | if self.terminated: 28 | self.engine.plot() 29 | 30 | def close(self): 31 | self.engine.close() 32 | -------------------------------------------------------------------------------- /engine/rl/backtrader_rl/adapters/tensorforceAdapter.py: -------------------------------------------------------------------------------- 1 | from tensorforce import Environment 2 | from numpy import infty 3 | 4 | class tensorforceAdapter(Environment): 5 | 6 | def __init__(self,engine,**kwargs): 7 | super().__init__(**kwargs) 8 | self.engine = engine 9 | 10 | def states(self): 11 | return dict(type='float', min_value = 0, shape=self.engine.state_shape) 12 | 13 | def actions(self): 14 | return dict(type='int', num_values=len(self.engine.actions_mapping)) 15 | 16 | def close(self): 17 | self.engine.close() 18 | 19 | def reset(self,options = {}): 20 | observation = self.engine.reset(seed = None,**options) 21 | return observation 22 | 23 | def execute(self, actions): 24 | observation, reward, self.terminated = self.engine.step(actions) 25 | return observation, self.terminated, reward 26 | 27 | def plot(self): 28 | return self.engine.plot() -------------------------------------------------------------------------------- /engine/rl/backtrader_rl/finance_env.py: -------------------------------------------------------------------------------- 1 | import gym 2 | 3 | from engine.rl.clock import Clock 4 | 5 | 6 | class FinanceEnv(gym.Env): 7 | def __init__(self, 8 | action_scheme: ActionScheme, 9 | reward_scheme: RewardScheme, 10 | observer: Observer, 11 | stopper: Stopper, 12 | informer: Informer, 13 | renderer: Renderer, 14 | ): 15 | 16 | self.action_scheme = action_scheme 17 | self.reward_scheme = reward_scheme 18 | self.observer = observer 19 | self.stopper = stopper 20 | self.informer = informer 21 | self.renderer = renderer 22 | 23 | # 就是游标管理 24 | self.clock = Clock() 25 | 26 | super(FinanceEnv, self).__init__() 27 | 28 | def step(self, action): 29 | # 行动计划 执行动作 30 | self.action_scheme.perform(self, action) 31 | # 观察者 反馈动作执行后的状态 32 | obs = self.observer.observe(self) 33 | # 激励计划得到reward 34 | reward = self.reward_scheme.reward(self) 35 | # stopper决定任务是否结束 36 | done = self.stopper.stop(self) 37 | # infomer是给出信息 38 | info = self.informer.info(self) 39 | 40 | self.clock.increment() 41 | 42 | return obs, reward, done, info 43 | 44 | -------------------------------------------------------------------------------- /engine/rl/backtrader_rl/strategys.py: -------------------------------------------------------------------------------- 1 | import backtrader as bt 2 | import numpy as np 3 | 4 | class BaseStrategy(bt.Strategy): 5 | 6 | def __init__(self): 7 | self.action = 1 8 | self.reward = 0 9 | self._mapping = {"buy" : 2, "sell" : 0, "hold":1} 10 | 11 | def _set_action_mapping(self,mapping): 12 | self._mapping = mapping 13 | 14 | def _setAction(self,action): 15 | self.action = action 16 | 17 | def next(self): 18 | if len(self._mapping) == 2: 19 | if self.action == self._mapping["buy"]: 20 | # if current position is sell 21 | # then we are reversing the trade 22 | if self.position.size < 0: 23 | self.buy(size = 2 * abs(self.position.size)) 24 | elif self.position.size == 0: 25 | self.buy() 26 | elif self.action == self._mapping["sell"]: 27 | # if current position is buy 28 | # then we are closing a trade 29 | if self.position.size > 0: 30 | self.sell(size = 2 * abs(self.position.size)) 31 | elif self.position.size == 0: 32 | self.sell() 33 | elif self.action == -1: 34 | pass 35 | 36 | else: 37 | 38 | if self.action == self._mapping["buy"]: 39 | # if current position is sell 40 | # then we are closing a trade 41 | if self.position.size < 0: 42 | self.close() 43 | elif self.position.size == 0: 44 | self.buy() 45 | elif self.action == self._mapping["sell"]: 46 | # if current position is buy 47 | # then we are closing a trade 48 | if self.position.size > 0: 49 | self.close() 50 | elif self.position.size == 0: 51 | self.sell() 52 | elif self.action == -1: 53 | pass 54 | 55 | def _computeReward(self): 56 | try: 57 | reward = self.computeReward() 58 | except: 59 | reward = 0 60 | self.reward = reward 61 | return self.reward 62 | 63 | class PositionBasedStrategy(BaseStrategy): 64 | 65 | def computeReward(self): 66 | if self.position.size == 0: 67 | return 0 68 | 69 | a = self.position.price 70 | b = self.datas[0].close[0] 71 | d = (b-a)/((b+a)/2) 72 | return d * 100 * (self.position.size/abs(self.position.size)) 73 | 74 | class returnBasedStrategy(BaseStrategy): 75 | 76 | def start(self): 77 | self.start_value = self.broker.get_value() 78 | 79 | def computeReward(self): 80 | 81 | return (self.broker.get_value()-self.start_value)/self.start_value * 100 82 | 83 | class SharpRatioStrategy(BaseStrategy): 84 | 85 | params = (("riskfree_rate" , 0),) 86 | 87 | def computeReward(self): 88 | trades = list(list(self._trades.copy().values())[0].values())[0] 89 | filterd_trades = list(filter(lambda x : x.isclosed, trades)) 90 | 91 | if len(filterd_trades) < 1: 92 | return 0 93 | 94 | ret = list(map(lambda x : ((x.pnl/x.price) * 100) - self.p.riskfree_rate, filterd_trades)) 95 | 96 | if np.std(ret) == 0: 97 | return 0 98 | 99 | sharp_ratio = np.mean(ret)/np.std(ret) 100 | 101 | return sharp_ratio 102 | 103 | class SortinoRatioStrategy(BaseStrategy): 104 | 105 | params = (("riskfree_rate" , 0),) 106 | 107 | def computeReward(self): 108 | trades = list(list(self._trades.copy().values())[0].values())[0] 109 | filterd_trades = list(filter(lambda x : x.isclosed, trades)) 110 | 111 | if len(filterd_trades) < 1: 112 | return 0 113 | 114 | ret = list(map(lambda x : ((x.pnl/x.price) * 100) - self.p.riskfree_rate, filterd_trades)) 115 | 116 | 117 | downside_ret = list(filter(lambda x : x < 0, ret)) 118 | 119 | if len(downside_ret) < 1 or np.std(downside_ret) == 0: 120 | return 0 121 | 122 | sharp_ratio = np.mean(ret)/np.std(downside_ret) 123 | 124 | return sharp_ratio -------------------------------------------------------------------------------- /engine/rl/backtrader_rl/utils.py: -------------------------------------------------------------------------------- 1 | import backtrader as bt 2 | 3 | class minPeriodIndicator(bt.Indicator): 4 | lines = ("state",) 5 | params = (('period',5),) 6 | plotinfo = dict(plot = False, 7 | subplot=False) 8 | 9 | def __init__(self) -> None: 10 | self.addminperiod(self.params.period+1) 11 | 12 | class rewardObserver(bt.Observer): 13 | alias = ("reward",) 14 | lines = ("rewards",) 15 | plotinfo = dict(plot=True, 16 | subplot=True, 17 | plotname = "Reward") 18 | 19 | def next(self): 20 | self.lines.rewards[0] = self._owner.reward 21 | 22 | class cummulativeRewardObserver(bt.Observer): 23 | alias = ("cummulativeReward",) 24 | lines = ("cummulativeRewards",) 25 | plotinfo = dict(plot=True, 26 | subplot=True, 27 | plotname = "Cumulative Reward") 28 | 29 | def next(self): 30 | if len(self.lines.cummulativeRewards) == 1: 31 | self.lines.cummulativeRewards[0] = self._owner.reward 32 | else: 33 | self.lines.cummulativeRewards[0] = self.lines.cummulativeRewards[-1] + self._owner.reward 34 | 35 | class RewardAnalyzer(bt.Analyzer): 36 | 37 | def __init__(self): 38 | self.cummulative_reward = 0 39 | self.reward_history = [] 40 | 41 | def next(self): 42 | self.reward_history.append(self.strategy.reward) 43 | self.cummulative_reward += self.strategy.reward 44 | 45 | def get_analysis(self): 46 | return dict(cummulative_reward = self.cummulative_reward, reward_history = self.reward_history) 47 | 48 | class actionObserver(bt.Observer): 49 | alias = ("action",) 50 | lines = ("actions",) 51 | 52 | plotinfo = dict(plot=True, 53 | subplot=True, 54 | plotname = "Action Space", 55 | plotyticks=(0,1,2), 56 | plothlines = (0,1,2)) 57 | 58 | def next(self): 59 | self.lines.actions[0] = self._owner.action 60 | -------------------------------------------------------------------------------- /engine/rl/clock.py: -------------------------------------------------------------------------------- 1 | from datetime import datetime 2 | 3 | 4 | class Clock(object): 5 | """A class to track the time for a process. 6 | 7 | Attributes 8 | ---------- 9 | start : int 10 | The time of start for the clock. 11 | step : int 12 | The time of the process the clock is at currently. 13 | 14 | Methods 15 | ------- 16 | now(format=None) 17 | Gets the current time in the provided format. 18 | increment() 19 | Increments the clock by specified time increment. 20 | reset() 21 | Resets the clock. 22 | """ 23 | 24 | def __init__(self): 25 | self.start = 0 26 | self.step = self.start 27 | 28 | def now(self, format: str = None) -> datetime: 29 | """Gets the current time in the provided format. 30 | Parameters 31 | ---------- 32 | format : str or None, optional 33 | The format to put the current time into. 34 | 35 | Returns 36 | ------- 37 | datetime 38 | The current time. 39 | """ 40 | return datetime.now().strftime(format) if format else datetime.now() 41 | 42 | def increment(self) -> None: 43 | """Increments the clock by specified time increment.""" 44 | self.step += 1 45 | 46 | def reset(self) -> None: 47 | """Resets the clock.""" 48 | self.step = self.start 49 | -------------------------------------------------------------------------------- /engine/rl/env_portfolio.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import gym 4 | import matplotlib 5 | import matplotlib.pyplot as plt 6 | import numpy as np 7 | import pandas as pd 8 | from gym import spaces 9 | from gym.utils import seeding 10 | from stable_baselines3.common.vec_env import DummyVecEnv 11 | 12 | matplotlib.use("Agg") 13 | 14 | 15 | class StockPortfolioEnv(gym.Env): 16 | """A single stock trading environment for OpenAI gym 17 | 18 | Attributes 19 | ---------- 20 | df: DataFrame 21 | input data 22 | stock_dim : int 23 | number of unique stocks 24 | hmax : int 25 | maximum number of shares to trade 26 | initial_amount : int 27 | start money 28 | transaction_cost_pct: float 29 | transaction cost percentage per trade 30 | reward_scaling: float 31 | scaling factor for reward, good for training 32 | state_space: int 33 | the dimension of input features 34 | action_space: int 35 | equals stock dimension 36 | tech_indicator_list: list 37 | a list of technical indicator names 38 | turbulence_threshold: int 39 | a threshold to control risk aversion 40 | day: int 41 | an increment number to control date 42 | 43 | Methods 44 | ------- 45 | _sell_stock() 46 | perform sell action based on the sign of the action 47 | _buy_stock() 48 | perform buy action based on the sign of the action 49 | step() 50 | at each step the agent will return actions, then 51 | we will calculate the reward, and return the next observation. 52 | reset() 53 | reset the environment 54 | render() 55 | use render to return other functions 56 | save_asset_memory() 57 | return account value at each time step 58 | save_action_memory() 59 | return actions/positions at each time step 60 | 61 | 62 | """ 63 | 64 | metadata = {"render.modes": ["human"]} 65 | 66 | def __init__( 67 | self, 68 | df, 69 | stock_dim, 70 | hmax, 71 | initial_amount, 72 | transaction_cost_pct, 73 | reward_scaling, 74 | state_space, 75 | action_space, 76 | tech_indicator_list, 77 | turbulence_threshold=None, 78 | lookback=252, 79 | day=0, 80 | ): 81 | # super(StockEnv, self).__init__() 82 | # money = 10 , scope = 1 83 | self.day = day 84 | self.lookback = lookback 85 | self.df = df 86 | self.stock_dim = stock_dim 87 | self.hmax = hmax 88 | self.initial_amount = initial_amount 89 | self.transaction_cost_pct = transaction_cost_pct 90 | self.reward_scaling = reward_scaling 91 | self.state_space = state_space 92 | self.action_space = action_space 93 | self.tech_indicator_list = tech_indicator_list 94 | 95 | # action_space normalization and shape is self.stock_dim 96 | self.action_space = spaces.Box(low=0, high=1, shape=(self.action_space,)) 97 | # Shape = (34, 30) 98 | # covariance matrix + technical indicators 99 | self.observation_space = spaces.Box( 100 | low=-np.inf, 101 | high=np.inf, 102 | shape=(self.state_space + len(self.tech_indicator_list), self.state_space), 103 | ) 104 | 105 | # load data from a pandas dataframe 106 | self.data = self.df.loc[self.day, :] 107 | self.covs = self.data["cov_list"].values[0] 108 | self.state = np.append( 109 | np.array(self.covs), 110 | [self.data[tech].values.tolist() for tech in self.tech_indicator_list], 111 | axis=0, 112 | ) 113 | self.terminal = False 114 | self.turbulence_threshold = turbulence_threshold 115 | # initalize state: inital portfolio return + individual stock return + individual weights 116 | self.portfolio_value = self.initial_amount 117 | 118 | # memorize portfolio value each step 119 | self.asset_memory = [self.initial_amount] 120 | # memorize portfolio return each step 121 | self.portfolio_return_memory = [0] 122 | self.actions_memory = [[1 / self.stock_dim] * self.stock_dim] 123 | self.date_memory = [self.data.date.unique()[0]] 124 | 125 | def step(self, actions): 126 | # print(self.day) 127 | self.terminal = self.day >= len(self.df.index.unique()) - 1 128 | # print(actions) 129 | 130 | if self.terminal: 131 | df = pd.DataFrame(self.portfolio_return_memory) 132 | df.columns = ["daily_return"] 133 | plt.plot(df.daily_return.cumsum(), "r") 134 | plt.savefig("results/cumulative_reward.png") 135 | plt.close() 136 | 137 | plt.plot(self.portfolio_return_memory, "r") 138 | plt.savefig("results/rewards.png") 139 | plt.close() 140 | 141 | print("=================================") 142 | print(f"begin_total_asset:{self.asset_memory[0]}") 143 | print(f"end_total_asset:{self.portfolio_value}") 144 | 145 | df_daily_return = pd.DataFrame(self.portfolio_return_memory) 146 | df_daily_return.columns = ["daily_return"] 147 | if df_daily_return["daily_return"].std() != 0: 148 | sharpe = ( 149 | (252**0.5) 150 | * df_daily_return["daily_return"].mean() 151 | / df_daily_return["daily_return"].std() 152 | ) 153 | print("Sharpe: ", sharpe) 154 | print("=================================") 155 | 156 | return self.state, self.reward, self.terminal, {} 157 | 158 | else: 159 | # print("Model actions: ",actions) 160 | # actions are the portfolio weight 161 | # normalize to sum of 1 162 | # if (np.array(actions) - np.array(actions).min()).sum() != 0: 163 | # norm_actions = (np.array(actions) - np.array(actions).min()) / (np.array(actions) - np.array(actions).min()).sum() 164 | # else: 165 | # norm_actions = actions 166 | weights = self.softmax_normalization(actions) 167 | # print("Normalized actions: ", weights) 168 | self.actions_memory.append(weights) 169 | last_day_memory = self.data 170 | 171 | # load next state 172 | self.day += 1 173 | self.data = self.df.loc[self.day, :] 174 | self.covs = self.data["cov_list"].values[0] 175 | self.state = np.append( 176 | np.array(self.covs), 177 | [self.data[tech].values.tolist() for tech in self.tech_indicator_list], 178 | axis=0, 179 | ) 180 | # print(self.state) 181 | # calcualte portfolio return 182 | # individual stocks' return * weight 183 | portfolio_return = sum( 184 | ((self.data.close.values / last_day_memory.close.values) - 1) * weights 185 | ) 186 | # update portfolio value 187 | new_portfolio_value = self.portfolio_value * (1 + portfolio_return) 188 | self.portfolio_value = new_portfolio_value 189 | 190 | # save into memory 191 | self.portfolio_return_memory.append(portfolio_return) 192 | self.date_memory.append(self.data.date.unique()[0]) 193 | self.asset_memory.append(new_portfolio_value) 194 | 195 | # the reward is the new portfolio value or end portfolo value 196 | self.reward = new_portfolio_value 197 | # print("Step reward: ", self.reward) 198 | # self.reward = self.reward*self.reward_scaling 199 | 200 | return self.state, self.reward, self.terminal, {} 201 | 202 | def reset(self): 203 | self.asset_memory = [self.initial_amount] 204 | self.day = 0 205 | self.data = self.df.loc[self.day, :] 206 | # load states 207 | self.covs = self.data["cov_list"].values[0] 208 | self.state = np.append( 209 | np.array(self.covs), 210 | [self.data[tech].values.tolist() for tech in self.tech_indicator_list], 211 | axis=0, 212 | ) 213 | self.portfolio_value = self.initial_amount 214 | # self.cost = 0 215 | # self.trades = 0 216 | self.terminal = False 217 | self.portfolio_return_memory = [0] 218 | self.actions_memory = [[1 / self.stock_dim] * self.stock_dim] 219 | self.date_memory = [self.data.date.unique()[0]] 220 | return self.state 221 | 222 | def render(self, mode="human"): 223 | return self.state 224 | 225 | def softmax_normalization(self, actions): 226 | numerator = np.exp(actions) 227 | denominator = np.sum(np.exp(actions)) 228 | softmax_output = numerator / denominator 229 | return softmax_output 230 | 231 | def save_asset_memory(self): 232 | date_list = self.date_memory 233 | portfolio_return = self.portfolio_return_memory 234 | # print(len(date_list)) 235 | # print(len(asset_list)) 236 | df_account_value = pd.DataFrame( 237 | {"date": date_list, "daily_return": portfolio_return} 238 | ) 239 | return df_account_value 240 | 241 | def save_action_memory(self): 242 | # date and close price length must match actions length 243 | date_list = self.date_memory 244 | df_date = pd.DataFrame(date_list) 245 | df_date.columns = ["date"] 246 | 247 | action_list = self.actions_memory 248 | df_actions = pd.DataFrame(action_list) 249 | df_actions.columns = self.data.tic.values 250 | df_actions.index = df_date.date 251 | # df_actions = pd.DataFrame({'date':date_list,'actions':action_list}) 252 | return df_actions 253 | 254 | def _seed(self, seed=None): 255 | self.np_random, seed = seeding.np_random(seed) 256 | return [seed] 257 | 258 | def get_sb_env(self): 259 | e = DummyVecEnv([lambda: self]) 260 | obs = e.reset() 261 | return e, obs 262 | 263 | if __name__ == '__main__': 264 | from stable_baselines3.common.env_checker import check_env 265 | from stable_baselines3 import A2C 266 | 267 | # obs = env.reset() 268 | # print(obs.shape) 269 | env = StockPortfolioEnv() 270 | check_env(env) 271 | model = A2C("MlpPolicy", env).learn(total_timesteps=1000) -------------------------------------------------------------------------------- /engine/rl/finane_env.py: -------------------------------------------------------------------------------- 1 | from abc import ABC 2 | 3 | import gym 4 | import numpy as np 5 | from gym import spaces 6 | 7 | 8 | class FinanceEnv(gym.Env, ABC): 9 | def __init__(self, symbols, df_features, df_returns, initial_amount=1000000): 10 | super(FinanceEnv, self).__init__() 11 | # 正则化,和=1,长度就是组合里的证券数量 12 | self.action_space = spaces.Box(low=0, high=1, shape=(len(symbols),)) 13 | self.observation_space = spaces.Box( 14 | low=-np.inf, 15 | high=np.inf, 16 | shape=(len(symbols), len(df_features.columns)), dtype=np.float64 17 | ) 18 | #print(self.observation_space) 19 | self.dates = list(df_features.index) 20 | self.df_features = df_features 21 | self.df_returns = df_returns 22 | self.initial_amount = initial_amount 23 | self.portfolio_value = initial_amount 24 | self.index = 0 25 | 26 | def reset(self): 27 | self.index = 0 28 | self.portfolio_value = self.initial_amount 29 | df = self.df_features.loc[self.dates[0]] 30 | print(df.values.shape) 31 | return df.values 32 | 33 | def step(self, actions): 34 | done = False 35 | if self.index >= len(self.dates) - 1: 36 | done = True 37 | print(self.reward) 38 | return self.state, self.reward, done, {} 39 | 40 | self.index += 1 41 | 42 | weights = self.softmax_normalization(actions) 43 | df_return = np.array(self.df_returns.loc[self.dates[self.index]]['return']) 44 | port_return = sum(df_return * np.array(weights)) 45 | self.portfolio_value = self.portfolio_value * (1 + port_return) 46 | 47 | df = self.df_features.loc[self.dates[self.index], :] 48 | 49 | self.state = df.values 50 | self.reward = self.portfolio_value * 1.0 51 | 52 | return self.state, self.reward, done, {} 53 | 54 | def softmax_normalization(self, actions): 55 | numerator = np.exp(actions) 56 | denominator = np.sum(np.exp(actions)) 57 | softmax_output = numerator / denominator 58 | return softmax_output 59 | 60 | 61 | if __name__ == '__main__': 62 | from stable_baselines3.common.env_checker import check_env 63 | from stable_baselines3 import A2C 64 | from engine.datafeed.dataloader import Dataloader 65 | 66 | symbols = ['399006.SZ', '000300.SH'] 67 | names = [] 68 | fields = [] 69 | 70 | features = [] 71 | fields += ['Slope($close,20)'] 72 | names += ['mom_slope'] 73 | features += ['mom_slope'] 74 | 75 | fields += ['KF($mom_slope)'] 76 | names += ['kf_mom_slope'] 77 | features += ['kf_mom_slope'] 78 | 79 | fields += ["$close/Ref($close,1) - 1"] 80 | names += ['return'] 81 | 82 | loader = Dataloader(symbols, names, fields, load_from_cache=True) 83 | data = loader.data 84 | data = data[data.index > '2010-06-02'] 85 | df_features = data[names] 86 | df_return = data[['return']] 87 | print(df_features) 88 | env = FinanceEnv(symbols, df_features, df_return) 89 | # check_env(env) 90 | model = A2C("MlpPolicy", env) 91 | model.learn(total_timesteps=100000) 92 | -------------------------------------------------------------------------------- /engine/rl/gym_env_example.py: -------------------------------------------------------------------------------- 1 | from __future__ import (absolute_import, division, print_function, 2 | unicode_literals) 3 | 4 | import pandas as pd 5 | 6 | from random import randint 7 | 8 | import backtrader as bt 9 | from backtrader_rl.strategys import returnBasedStrategy 10 | from backtrader_rl.utils import actionObserver,rewardObserver,cummulativeRewardObserver 11 | from backtrader_rl.engines import BTEngine 12 | from backtrader_rl.adapters.gymAdapter import gymAdapter 13 | 14 | # ================ 15 | # Defining constants 16 | # ================ 17 | 18 | # the length of data points available to the agent at each step 19 | LOOKBACK = 40 20 | 21 | 22 | # ================ 23 | # Preparing test data 24 | # ================ 25 | 26 | from pathlib import Path 27 | root = Path().absolute() 28 | file = "BNB_USDT_5m.csv" 29 | df = pd.read_csv(Path(root,file),index_col=0) 30 | df["timestamp"] = pd.to_datetime(df["timestamp"]) 31 | df = df.set_index("timestamp",drop=True) 32 | data = bt.feeds.PandasData(dataname=df) 33 | 34 | engine = BTEngine(lookback = LOOKBACK) 35 | engine.broker.set_cash(100) 36 | engine.adddata(data) 37 | 38 | # ================ 39 | # Defining the Strategy 40 | # ================ 41 | 42 | # the strategy defines the reward schema 43 | # the position based strategy uses the position PNL as a reward at each step 44 | engine.addstrategy(returnBasedStrategy) 45 | 46 | # observeres allow us to peak into the actions taken by the agent over the episode 47 | engine.addobserver(actionObserver) 48 | engine.addobserver(rewardObserver) 49 | engine.addobserver(cummulativeRewardObserver) 50 | 51 | 52 | # default backtrader observers also work just fine 53 | engine.addobserver(bt.observers.BuySell) 54 | engine.addobserver(bt.observers.Broker) 55 | engine.addobserver(bt.observers.Trades) 56 | 57 | 58 | # ================ 59 | # use the openai gym environment 60 | # ================ 61 | 62 | env = gymAdapter(engine) 63 | 64 | from stable_baselines3.common.env_checker import check_env 65 | from stable_baselines3 import A2C 66 | #obs = env.reset() 67 | #print(obs.shape) 68 | check_env(env) 69 | model = A2C("MlpPolicy", env).learn(total_timesteps=1000) -------------------------------------------------------------------------------- /engine/rl/strategy_rl.py: -------------------------------------------------------------------------------- 1 | import backtrader as bt 2 | import numpy as np 3 | 4 | 5 | class BaseStrategy(bt.Strategy): 6 | 7 | def __init__(self): 8 | self.action = 1 9 | self.reward = 0 10 | self._mapping = {"buy": 2, "sell": 0, "hold": 1} 11 | 12 | def _set_action_mapping(self, mapping): 13 | self._mapping = mapping 14 | 15 | def _setAction(self, action): 16 | self.action = action 17 | 18 | def next(self): 19 | if len(self._mapping) == 2: 20 | if self.action == self._mapping["buy"]: 21 | # if current position is sell 22 | # then we are reversing the trade 23 | if self.position.size < 0: 24 | self.buy(size=2 * abs(self.position.size)) 25 | elif self.position.size == 0: 26 | self.buy() 27 | elif self.action == self._mapping["sell"]: 28 | # if current position is buy 29 | # then we are closing a trade 30 | if self.position.size > 0: 31 | self.sell(size=2 * abs(self.position.size)) 32 | elif self.position.size == 0: 33 | self.sell() 34 | elif self.action == -1: 35 | pass 36 | 37 | else: 38 | 39 | if self.action == self._mapping["buy"]: 40 | # if current position is sell 41 | # then we are closing a trade 42 | if self.position.size < 0: 43 | self.close() 44 | elif self.position.size == 0: 45 | self.buy() 46 | elif self.action == self._mapping["sell"]: 47 | # if current position is buy 48 | # then we are closing a trade 49 | if self.position.size > 0: 50 | self.close() 51 | elif self.position.size == 0: 52 | self.sell() 53 | elif self.action == -1: 54 | pass 55 | 56 | def _computeReward(self): 57 | try: 58 | reward = self.computeReward() 59 | except: 60 | reward = 0 61 | self.reward = reward 62 | return self.reward 63 | 64 | 65 | class PositionBasedStrategy(BaseStrategy): 66 | 67 | def computeReward(self): 68 | if self.position.size == 0: 69 | return 0 70 | 71 | a = self.position.price 72 | b = self.datas[0].close[0] 73 | d = (b - a) / ((b + a) / 2) 74 | return d * 100 * (self.position.size / abs(self.position.size)) 75 | 76 | 77 | class returnBasedStrategy(BaseStrategy): 78 | 79 | def start(self): 80 | self.start_value = self.broker.get_value() 81 | 82 | def computeReward(self): 83 | return (self.broker.get_value() - self.start_value) / self.start_value * 100 84 | 85 | 86 | class SharpRatioStrategy(BaseStrategy): 87 | params = (("riskfree_rate", 0),) 88 | 89 | def computeReward(self): 90 | trades = list(list(self._trades.copy().values())[0].values())[0] 91 | filterd_trades = list(filter(lambda x: x.isclosed, trades)) 92 | 93 | if len(filterd_trades) < 1: 94 | return 0 95 | 96 | ret = list(map(lambda x: ((x.pnl / x.price) * 100) - self.p.riskfree_rate, filterd_trades)) 97 | 98 | if np.std(ret) == 0: 99 | return 0 100 | 101 | sharp_ratio = np.mean(ret) / np.std(ret) 102 | 103 | return sharp_ratio 104 | 105 | 106 | class SortinoRatioStrategy(BaseStrategy): 107 | params = (("riskfree_rate", 0),) 108 | 109 | def computeReward(self): 110 | trades = list(list(self._trades.copy().values())[0].values())[0] 111 | filterd_trades = list(filter(lambda x: x.isclosed, trades)) 112 | 113 | if len(filterd_trades) < 1: 114 | return 0 115 | 116 | ret = list(map(lambda x: ((x.pnl / x.price) * 100) - self.p.riskfree_rate, filterd_trades)) 117 | 118 | downside_ret = list(filter(lambda x: x < 0, ret)) 119 | 120 | if len(downside_ret) < 1 or np.std(downside_ret) == 0: 121 | return 0 122 | 123 | sharp_ratio = np.mean(ret) / np.std(downside_ret) 124 | 125 | return sharp_ratio -------------------------------------------------------------------------------- /engine/strategy/BUILD: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Charmve/iQuant/b41f2d0066fdb1be8b5435e1dbe904fedd919d30/engine/strategy/BUILD -------------------------------------------------------------------------------- /engine/strategy/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Charmve/iQuant/b41f2d0066fdb1be8b5435e1dbe904fedd919d30/engine/strategy/__init__.py -------------------------------------------------------------------------------- /engine/strategy/algo_utils.py: -------------------------------------------------------------------------------- 1 | # encoding:utf8 2 | import numpy as np 3 | import scipy.optimize as sco 4 | 5 | 6 | def calculate_half_cov_matrix(train_set): 7 | # 计算半衰协方差矩阵 8 | M = 60 9 | train_subset = train_set.iloc[0:60] 10 | cov_matrix = train_subset.cov()*M * (1 / 10) 11 | for i in range(1, 4): 12 | if len(train_set) < i * 60: 13 | break 14 | # print(len(train_set)) 15 | train_subset = train_set.iloc[i * 60:(i + 1) * 60] 16 | #print(train_subset) 17 | sub_cov_matrix = train_subset.cov()*M 18 | if i == 1: 19 | sub_cov_matrix = sub_cov_matrix * (2 / 10) 20 | if i == 2: 21 | sub_cov_matrix = sub_cov_matrix * (3 / 10) 22 | else: 23 | sub_cov_matrix = sub_cov_matrix * (4 / 10) 24 | cov_matrix = cov_matrix + sub_cov_matrix 25 | # print(cov_matrix) 26 | return np.matrix(cov_matrix) 27 | 28 | 29 | # import scipy.interpolate as sci 30 | # 根据资产预期目标风险贡献度来计算各资产的权重 31 | def calculate_portfolio_weight(one_cov_matrix, risk_budget_objective): 32 | ''' 33 | 约束条件的类型只有'eq'和'ineq'两种 34 | eq表示约束方程的返回结果为0 35 | ineq表示约束方程的返回结果为非负数 36 | ''' 37 | num = one_cov_matrix.shape[1] # 一种有num种资产 38 | x0 = np.array([1.0 / num for _ in range(num)]) # 初始资产权重 39 | bounds = tuple((0, 1) for _ in range(num)) # 取值范围(0,1) 40 | 41 | cons_1 = ({'type': 'eq', 'fun': lambda x: sum(x) - 1},) # 权重和为1 42 | RC_set_ratio = np.array([1.0 / num for _ in range(num)]) # 风险平价下每个资产的目标风险贡献度相等 43 | optv = sco.minimize(risk_budget_objective, x0, args=[one_cov_matrix, RC_set_ratio], method='SLSQP', bounds=bounds, 44 | constraints=cons_1) 45 | return optv.x 46 | 47 | 48 | # 标准风险平价下的风险贡献 49 | def calculate_risk_contribution(weight, one_cov_matrix): 50 | weight = np.matrix(weight) 51 | sigma = np.sqrt(weight * one_cov_matrix * weight.T) 52 | # 边际风险贡献 Marginal Risk Contribution (MRC) 53 | MRC = one_cov_matrix * weight.T / sigma 54 | # 风险贡献 Risk Contribution (RC) 55 | RC = np.multiply(MRC, weight.T) 56 | return RC 57 | 58 | 59 | # 定义优化问题的目标函数,即最小化资产之间的风险贡献差 60 | def naive_risk_parity(weight, parameters): 61 | # weight: 待求解的资产权重, 62 | # parameters: 参数列表 63 | # parameters[0]: 协方差矩阵 64 | # parameters[1]: 风险平价下的目标风险贡献度向量 65 | 66 | one_cov_matrix = parameters[0] 67 | RC_target_ratio = parameters[1] 68 | # RC_target为风险平价下的目标风险贡献,一旦参数传递以后,RC_target就是一个常数,不随迭代而改变 69 | sigma_portfolio = np.sqrt(weight * one_cov_matrix * np.matrix(weight).T) # 组合波动率 70 | RC_target = np.asmatrix(np.multiply(sigma_portfolio, RC_target_ratio)) # 目标风险贡献 71 | # RC_real是 每次迭代以后最新的真实风险贡献,随迭代而改变 72 | RC_real = calculate_risk_contribution(weight, one_cov_matrix) 73 | sum_squared_error = sum(np.square(RC_real - RC_target.T))[0, 0] 74 | return sum_squared_error 75 | 76 | 77 | # 基于主成分分析的风险平价下的风险贡献 78 | def calculate_risk_contribution_pca(weight, one_cov_matrix): 79 | weight = np.matrix(weight) 80 | sigma = np.sqrt(weight * one_cov_matrix * weight.T) 81 | # 奇异值分解,其中uv=I ,u,v是特征向量矩阵,是正交阵,d是对角矩阵,对角元素是特征值,tr(d)=tr(one_cov_matrix) 82 | u, d, v = np.linalg.svd(one_cov_matrix) 83 | a = v * weight.T 84 | b = v * (one_cov_matrix * weight.T) 85 | # 风险贡献 Risk Contribution (RC) 86 | RC = np.multiply(a, b) 87 | RC = RC / sigma 88 | return RC 89 | 90 | 91 | # 定义优化问题的目标函数,即最小化资产之间的风险贡献差 92 | def pca_risk_parity(weight, parameters): 93 | # weight: 待求解的资产权重, 94 | # parameters: 参数列表 95 | # parameters[0]: 协方差矩阵 96 | # parameters[1]: 风险平价下的目标风险贡献度向量 97 | 98 | one_cov_matrix = parameters[0] 99 | RC_target_ratio = parameters[1] 100 | # RC_target为风险平价下的目标风险贡献,一旦参数传递以后,RC_target就是一个常数,不随迭代而改变 101 | sigma_portfolio = np.sqrt(weight * one_cov_matrix * np.matrix(weight).T) # 组合波动率 102 | RC_target = np.asmatrix(np.multiply(sigma_portfolio, RC_target_ratio)) # 目标风险贡献 103 | # RC_real是 每次迭代以后最新的真实风险贡献,随迭代而改变 104 | RC_real = calculate_risk_contribution_pca(weight, one_cov_matrix) 105 | sum_squared_error = sum(np.square(RC_real - RC_target.T))[0, 0] 106 | return sum_squared_error 107 | -------------------------------------------------------------------------------- /engine/strategy/algos.py: -------------------------------------------------------------------------------- 1 | # encoding: utf8 2 | from loguru import logger 3 | import pandas as pd 4 | import abc 5 | 6 | 7 | class RunOnce: 8 | def __init__(self): 9 | self.done = False 10 | 11 | def __call__(self, context): 12 | done = self.done 13 | self.done = True 14 | return done 15 | 16 | 17 | class RunPeriod: 18 | 19 | def __call__(self, target): 20 | # get last date 21 | now = target['strategy'].datetime.date(0) 22 | last_date = target['strategy'].datetime.date(-1) 23 | date_to_compare = last_date 24 | now = pd.Timestamp(now) 25 | date_to_compare = pd.Timestamp(date_to_compare) 26 | 27 | result = self.compare_dates(now, date_to_compare) 28 | 29 | return result 30 | 31 | @abc.abstractmethod 32 | def compare_dates(self, now, date_to_compare): 33 | raise (NotImplementedError("RunPeriod Algo is an abstract class!")) 34 | 35 | 36 | # https://github.com/pmorissette/bt/blob/master/bt/algos.py 37 | class RunQuarterly(RunPeriod): 38 | 39 | def compare_dates(self, now, date_to_compare): 40 | if now.quarter != date_to_compare.quarter: 41 | return False 42 | return True 43 | 44 | 45 | import backtrader as bt 46 | 47 | 48 | class AddIndicators: 49 | def __call__(self, stra): 50 | for data in stra.datas: 51 | roc = bt.ind.RateOfChange(data, period=20) 52 | stra.inds[data] = {'mom': roc} 53 | 54 | 55 | class SelectAll: 56 | def __call__(self, context): 57 | stra = context['strategy'] 58 | context['selected'] = list(stra.datas) 59 | return False 60 | 61 | 62 | class SelectBySignal: 63 | def __init__(self, buy_col='buy', sell_col='sell'): 64 | self.buy_col = buy_col 65 | self.sell_col = sell_col 66 | 67 | def __call__(self, context): 68 | stra = context['strategy'] 69 | features = context['features'] 70 | 71 | to_buy = [] 72 | to_sell = [] 73 | holding = [] 74 | 75 | curr_date = stra.get_current_dt() 76 | if curr_date not in features.index: 77 | logger.error('日期不存在{}'.format(curr_date)) 78 | return True 79 | 80 | bar = features.loc[curr_date] 81 | if type(bar) is pd.Series: 82 | bar = bar.to_frame().T 83 | 84 | for row_index, row in bar.iterrows(): 85 | # print(row_index, row) 86 | symbol = row['code'] 87 | data = stra.getdatabyname(symbol) 88 | 89 | if row[self.buy_col]: 90 | to_buy.append(data) 91 | if row[self.sell_col]: 92 | to_sell.append(data) 93 | 94 | if data in stra.get_current_holding_datas(): 95 | holding.append(data) 96 | 97 | new_hold = list(set(to_buy + holding)) 98 | for data in to_sell: 99 | if data in new_hold: 100 | new_hold.remove(data) 101 | 102 | context['selected'] = new_hold 103 | 104 | 105 | def get_current_bar(context): 106 | stra = context['strategy'] 107 | features = context['features'] 108 | 109 | curr_date = stra.get_current_dt() 110 | if curr_date not in features.index: 111 | logger.error('日期不存在{}'.format(curr_date)) 112 | return None 113 | 114 | bar = features.loc[curr_date] 115 | if type(bar) is pd.Series: 116 | bar = bar.to_frame().T 117 | return bar 118 | 119 | 120 | class SelectTopK: 121 | def __init__(self, K=1, order_by='order_by', b_ascending=False): 122 | self.K = K 123 | self.order_by = order_by 124 | self.b_ascending = b_ascending 125 | 126 | def __call__(self, context): 127 | stra = context['strategy'] 128 | features = context['features'] 129 | 130 | if self.order_by not in features.columns: 131 | logger.error('排序字段{}未计算'.format(self.order_by)) 132 | return 133 | 134 | bar = get_current_bar(context) 135 | if bar is None: 136 | logger.error('取不到bar') 137 | return True 138 | bar.sort_values(self.order_by, ascending=self.b_ascending, inplace=True) 139 | 140 | selected = [] 141 | pre_selected = None 142 | if 'selected' in context: 143 | pre_selected = context['selected'] 144 | del context['selected'] 145 | 146 | # 当前全候选集 147 | # 按顺序往下选K个 148 | for code in list(bar.code): 149 | if pre_selected: 150 | if code in pre_selected: 151 | selected.append(code) 152 | else: 153 | selected.append(code) 154 | if len(selected) >= self.K: 155 | break 156 | context['selected'] = selected 157 | 158 | 159 | class PickTime: 160 | def __init__(self, benchmark='000300.SH', signal='signal'): 161 | self.benchmark = benchmark 162 | #self.buy = self.buy 163 | self.signal = signal 164 | 165 | def __call__(self, context): 166 | stra = context['strategy'] 167 | extra = context['extra'] 168 | df = extra[self.benchmark] 169 | 170 | if self.signal not in df.columns: 171 | logger.error('择时信号不存在') 172 | return True 173 | 174 | curr_date = stra.get_current_dt() 175 | if curr_date not in df.index: 176 | logger.error('日期不存在{}'.format(curr_date)) 177 | return None 178 | 179 | bar = df.loc[curr_date] 180 | if type(bar) is pd.Series: 181 | bar = bar.to_frame().T 182 | 183 | if bar[self.signal][0]: 184 | logger.info('择时信号显示,平仓所有。') 185 | context['selected'] = [] 186 | 187 | 188 | 189 | class WeightEqually: 190 | def __init__(self): 191 | pass 192 | 193 | def __call__(self, context): 194 | selected = context["selected"] 195 | stra = context['strategy'] 196 | 197 | # 若有持仓,但未入选,则平仓 198 | curr_holdings = stra.get_current_holding_datas() 199 | 200 | for data_holding in curr_holdings: 201 | # logger.info('已持仓:' + data_holding._name) 202 | if data_holding._name not in selected: 203 | stra.close(data_holding) 204 | 205 | N = len(selected) 206 | if N > 0: 207 | weight = 1 / N 208 | for data in selected: 209 | stra.order_target_percent(data, weight * 0.98) 210 | 211 | return False 212 | 213 | 214 | class WeightFix: 215 | def __init__(self, weights): 216 | self.weights = weights 217 | 218 | def __call__(self, context): 219 | selected = context["selected"] 220 | stra = context['strategy'] 221 | N = len(selected) 222 | if N != len(self.weights): 223 | logger.error('标的个数与权重个数不等') 224 | return True 225 | 226 | for data, w in zip(selected, self.weights): 227 | stra.order_target_percent(data, w) 228 | return False 229 | 230 | 231 | from .algo_utils import * 232 | 233 | 234 | class WeightRP: 235 | def __init__(self, returns_df, method=None, half=False): 236 | self.returns_df = returns_df 237 | self.method = method 238 | self.half = half 239 | 240 | def __call__(self, context): 241 | N = 240 242 | 243 | def get_train_set(change_time, df): 244 | """返回训练样本数据""" 245 | # change_time: 调仓时间 246 | change_time = pd.to_datetime(change_time) 247 | df = df.loc[df.index < change_time] 248 | df = df.iloc[-N:] # 每个调仓前240个交易日 249 | return df 250 | 251 | selected = context["selected"] # select算子返回的需要分配仓位的 data集合 252 | stra = context['strategy'] 253 | 254 | dt = stra.get_current_dt() 255 | # print(dt) 256 | sub_df = get_train_set(dt, self.returns_df) 257 | 258 | one_cov_matrix = None 259 | if self.half: 260 | one_cov_matrix = calculate_half_cov_matrix(sub_df) 261 | else: 262 | one_cov_matrix = np.matrix(sub_df.cov() * N) 263 | 264 | # 1.计算协方差: 取调仓日 前N=240个交易日数据, one_cov_matrix = returns_df.cov()*240,return np.matrix(one_cov_matrix) 265 | 266 | # 2.计算RP权重 267 | weights = None 268 | if self.method and self.method == 'pca': 269 | weights = calculate_portfolio_weight(one_cov_matrix, risk_budget_objective=pca_risk_parity) 270 | else: 271 | weights = calculate_portfolio_weight(one_cov_matrix, risk_budget_objective=naive_risk_parity) 272 | # print(weights) 273 | 274 | # 按动量 加减分 275 | 276 | K = 10 277 | new_weights = [] 278 | for data, w in zip(selected, weights): 279 | mom = stra.inds[data]['mom'][0] 280 | if mom >= 0.08: 281 | new_weights.append(w * K) 282 | elif mom < -0.0: 283 | new_weights.append(w / K) 284 | else: 285 | new_weights.append(w) 286 | 287 | new_weights = [w / sum(new_weights) for w in new_weights] 288 | print(weights, new_weights) 289 | 290 | for data, w in zip(selected, new_weights): 291 | stra.order_target_percent(data, w * 0.95) 292 | 293 | 294 | def run_algos(context, algo_list): 295 | for algo in algo_list: 296 | if algo(context) is True: # 如果algo返回True,直接不运行,本次不调仓 297 | return 298 | 299 | if 'selected' in context: 300 | del context['selected'] 301 | -------------------------------------------------------------------------------- /engine/strategy/examples/BUILD: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Charmve/iQuant/b41f2d0066fdb1be8b5435e1dbe904fedd919d30/engine/strategy/examples/BUILD -------------------------------------------------------------------------------- /engine/strategy/examples/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Charmve/iQuant/b41f2d0066fdb1be8b5435e1dbe904fedd919d30/engine/strategy/examples/__init__.py -------------------------------------------------------------------------------- /engine/strategy/examples/strategy_bolling.py: -------------------------------------------------------------------------------- 1 | # encoding:utf-8 2 | from datetime import datetime 3 | 4 | from engine.bt_engine import BacktraderEngine 5 | from engine.strategy.strategy_base import StrategyBase 6 | import backtrader as bt 7 | from loguru import logger 8 | 9 | 10 | class StrategyBolling(StrategyBase): 11 | def __init__(self): 12 | self.inds = {} 13 | for data in self.datas: 14 | self.inds[data] = {} 15 | top = bt.indicators.BollingerBands(data, period=20).top 16 | self.inds[data]['buy'] = data.close - top 17 | bot = bt.indicators.BollingerBands(data, period=20).bot 18 | self.inds[data]['sell'] = data.close - bot 19 | 20 | def next(self): 21 | # 判断当前已经持仓 22 | to_buy = [] 23 | to_sell = [] 24 | holding = [] 25 | for data, ind in self.inds.items(): 26 | if ind['buy'][0] > 0: 27 | to_buy.append(data) 28 | 29 | if ind['sell'][0] < 0: 30 | to_sell.append(data) 31 | 32 | if self.getposition(data).size > 0: 33 | holding.append(data) 34 | for sell in to_sell: 35 | if self.getposition(sell).size > 0: 36 | logger.debug('清仓' + sell.p.name) 37 | self.close(sell) 38 | 39 | new_hold = list(set(to_buy + holding)) 40 | 41 | for data in to_sell: 42 | if data in new_hold: 43 | new_hold.remove(data) 44 | 45 | K = 1 46 | if len(new_hold) > K: 47 | data_roc = {} 48 | for item in new_hold: 49 | data_roc[item] = self.inds[item][0] 50 | #排序 51 | new_hold = sorted(data_roc.items(), key=lambda x: x[1], reverse=True) 52 | new_hold = new_hold[:K] 53 | new_hold = [item[0] for item in new_hold] 54 | 55 | 56 | # 等权重分配 todo: 已持仓的应应该不变,对cash对新增的等权分配 57 | if len(new_hold) > 0: 58 | weight = 1 / len(new_hold) 59 | for data in new_hold: 60 | self.order_target_percent(data, weight*0.99) 61 | 62 | 63 | if __name__ == '__main__': 64 | 65 | e = BacktraderEngine(benchmark='399006.SZ',start=datetime(2010, 6, 26), end=datetime(2020, 12, 31)) 66 | for code in ['159915.SZ']: 67 | e.add_arctic_data(code) 68 | 69 | e.cerebro.addstrategy(StrategyBolling) 70 | e.run() 71 | e.cerebro.plot(iplot=False) 72 | e.analysis() 73 | -------------------------------------------------------------------------------- /engine/strategy/examples/strategy_portfolio_equal_weights.py: -------------------------------------------------------------------------------- 1 | from engine.datafeed.datafeed_csv import feed 2 | from engine.bt_engine import BacktraderEngine 3 | from engine.strategy.stragegy_algo import StratgeyAlgo 4 | from engine.strategy.algos import * 5 | from datetime import datetime 6 | 7 | if __name__ == '__main__': 8 | codes = ['159928.SZ', '510050.SH', '512010.SH', '513100.SH', '518880.SH', '511220.SH', '511010.SH', 9 | '161716.SZ'] 10 | # weights = [0.03, 0.06, 0.08, 0.05, 0.1, 0.32, 0.26, 0.09] 11 | 12 | e = BacktraderEngine(start=datetime(2016, 1, 1), end=datetime(2020, 12, 31)) 13 | for code in codes: 14 | e.add_arctic_data(code) 15 | 16 | algos = [ 17 | #RunOnce(), 18 | RunQuarterly(), 19 | SelectAll(), 20 | #WeightFix(weights) 21 | WeightEqually() 22 | ] 23 | 24 | e.cerebro.addstrategy(StratgeyAlgo, algos=algos) 25 | e.run() 26 | e.analysis() 27 | -------------------------------------------------------------------------------- /engine/strategy/examples/strategy_portfolio_fix_weights.py: -------------------------------------------------------------------------------- 1 | from engine.datafeed.datafeed_csv import feed 2 | from engine.bt_engine import BacktraderEngine 3 | from engine.strategy.stragegy_algo import StratgeyAlgo 4 | from engine.strategy.algos import * 5 | from datetime import datetime 6 | 7 | if __name__ == '__main__': 8 | codes = ['159928.SZ', '510050.SH', '512010.SH', '513100.SH', '518880.SH', '511220.SH', '511010.SH', 9 | '161716.SZ'] 10 | weights = [0.03, 0.06, 0.08, 0.05, 0.1, 0.32, 0.26, 0.09] 11 | 12 | e = BacktraderEngine(start=datetime(2016, 1, 1), end=datetime(2020, 12, 31)) 13 | for code in codes: 14 | e.add_arctic_data(code) 15 | 16 | algos = [ 17 | #RunOnce(), 18 | RunQuarterly(), 19 | SelectAll(), 20 | WeightFix(weights) 21 | ] 22 | 23 | e.cerebro.addstrategy(StratgeyAlgo, algos=algos) 24 | e.run() 25 | e.analysis() 26 | -------------------------------------------------------------------------------- /engine/strategy/examples/strategy_portfolio_rp.py: -------------------------------------------------------------------------------- 1 | from engine.datafeed.datafeed_csv import feed 2 | from engine.bt_engine import BacktraderEngine 3 | from engine.strategy.stragegy_algo import StratgeyAlgo 4 | from engine.strategy.algos import * 5 | from datetime import datetime 6 | from engine.datafeed.datafeed_arctic import ArcticDataFeed 7 | 8 | if __name__ == '__main__': 9 | codes = ['159928.SZ', '510050.SH', '512010.SH', '513100.SH', '518880.SH', '511220.SH', '511010.SH', 10 | '161716.SZ'] 11 | # weights = [0.03, 0.06, 0.08, 0.05, 0.1, 0.32, 0.26, 0.09] 12 | returns_df = ArcticDataFeed().get_returns_df_ordered(codes) 13 | #print(returns_df) 14 | 15 | end = datetime(2020, 12, 31) 16 | e = BacktraderEngine(start=datetime(2016, 1, 1)) 17 | for code in codes: 18 | e.add_arctic_data(code) 19 | 20 | algos = [ 21 | #RunOnce(), 22 | RunQuarterly(), 23 | SelectAll(), 24 | #WeightFix(weights) 25 | WeightRP(returns_df=returns_df, method='pca', half=True) 26 | ] 27 | 28 | e.cerebro.addstrategy(StratgeyAlgo, algos=algos, algos_init=[AddIndicators()]) 29 | e.run() 30 | e.analysis() 31 | -------------------------------------------------------------------------------- /engine/strategy/examples/strategy_roc.py: -------------------------------------------------------------------------------- 1 | # encoding:utf-8 2 | from datetime import datetime 3 | 4 | from engine.bt_engine import BacktraderEngine 5 | from engine.strategy.strategy_base import StrategyBase 6 | import backtrader as bt 7 | from loguru import logger 8 | 9 | 10 | class StrategyRoc(StrategyBase): 11 | def __init__(self): 12 | self.inds = {} 13 | for data in self.datas: 14 | self.inds[data] = bt.ind.RateOfChange(data, period=20) 15 | 16 | def next(self): 17 | # 判断当前已经持仓 18 | to_buy = [] 19 | to_sell = [] 20 | holding = [] 21 | for data, roc in self.inds.items(): 22 | if roc[0] > 0.08: 23 | to_buy.append(data) 24 | 25 | if roc[0] < -0.: 26 | to_sell.append(data) 27 | 28 | if self.getposition(data).size > 0: 29 | holding.append(data) 30 | for sell in to_sell: 31 | if self.getposition(sell).size > 0: 32 | logger.debug('清仓' + sell.p.name) 33 | self.close(sell) 34 | 35 | new_hold = list(set(to_buy + holding)) 36 | 37 | for data in to_sell: 38 | if data in new_hold: 39 | new_hold.remove(data) 40 | 41 | K = 1 42 | if len(new_hold) > K: 43 | data_roc = {} 44 | for item in new_hold: 45 | data_roc[item] = self.inds[item][0] 46 | #排序 47 | new_hold = sorted(data_roc.items(), key=lambda x: x[1], reverse=True) 48 | new_hold = new_hold[:K] 49 | new_hold = [item[0] for item in new_hold] 50 | 51 | 52 | # 等权重分配 todo: 已持仓的应应该不变,对cash对新增的等权分配 53 | if len(new_hold) > 0: 54 | weight = 1 / len(new_hold) 55 | for data in new_hold: 56 | self.order_target_percent(data, weight*0.99) 57 | 58 | 59 | if __name__ == '__main__': 60 | 61 | e = BacktraderEngine(start=datetime(2012, 6, 26), end=datetime(2020, 12, 31)) 62 | for code in ['159915.SZ']: 63 | e.add_arctic_data(code) 64 | 65 | e.cerebro.addstrategy(StrategyRoc) 66 | e.run() 67 | #e.cerebro.plot(iplot=False) 68 | e.analysis() 69 | -------------------------------------------------------------------------------- /engine/strategy/examples/strategy_roc_portfolio.py: -------------------------------------------------------------------------------- 1 | # encoding:utf-8 2 | from datetime import datetime 3 | 4 | from engine.bt_engine import BacktraderEngine 5 | from engine.strategy.strategy_base import StrategyBase 6 | import backtrader as bt 7 | from loguru import logger 8 | 9 | 10 | class StrategyRocPortfolio(StrategyBase): 11 | def __init__(self): 12 | self.inds = {} 13 | for data in self.datas: 14 | self.inds[data] = bt.ind.RateOfChange(data, period=20) 15 | 16 | def next(self): 17 | # 判断当前已经持仓 18 | to_buy = [] 19 | to_sell = [] 20 | holding = [] 21 | for data, roc in self.inds.items(): 22 | if roc[0] > 0.08: 23 | to_buy.append(data) 24 | 25 | if roc[0] < -0.: 26 | to_sell.append(data) 27 | 28 | if self.getposition(data).size > 0: 29 | holding.append(data) 30 | for sell in to_sell: 31 | if self.getposition(sell).size > 0: 32 | logger.debug('清仓' + sell.p.name) 33 | self.close(sell) 34 | 35 | new_hold = list(set(to_buy + holding)) 36 | 37 | for data in to_sell: 38 | if data in new_hold: 39 | new_hold.remove(data) 40 | 41 | K = 1 42 | if len(new_hold) > K: 43 | data_roc = {} 44 | for item in new_hold: 45 | data_roc[item] = self.inds[item][0] 46 | # 排序 47 | new_hold = sorted(data_roc.items(), key=lambda x: x[1], reverse=True) 48 | new_hold = new_hold[:K] 49 | new_hold = [item[0] for item in new_hold] 50 | 51 | # 等权重分配 todo: 已持仓的应应该不变,对cash对新增的等权分配 52 | if len(new_hold) > 0: 53 | weight = 1 / len(new_hold) 54 | for data in new_hold: 55 | self.order_target_percent(data, weight * 0.99) 56 | 57 | 58 | if __name__ == '__main__': 59 | 60 | e = BacktraderEngine(start=datetime(2015, 1, 1), end=datetime(2022, 12, 31)) 61 | for code in ['511220.SH', '512010.SH', '518880.SH', '163415.SZ', '159928.SZ', '161903.SZ', '513100.SH']: 62 | e.add_arctic_data(code) 63 | 64 | e.cerebro.addstrategy(StrategyRocPortfolio) 65 | e.run() 66 | # e.cerebro.plot(iplot=False) 67 | e.analysis() 68 | -------------------------------------------------------------------------------- /engine/strategy/stragegy_algo.py: -------------------------------------------------------------------------------- 1 | # encoding:utf8 2 | 3 | from pathlib import Path 4 | import sys 5 | TOP_DIR = Path(__file__).parent.parent.joinpath("../engine") 6 | sys.path.append(TOP_DIR) 7 | 8 | import backtrader as bt 9 | from .algos import * 10 | from loguru import logger 11 | from .strategy_base import StrategyBase 12 | 13 | class StratgeyAlgo(StrategyBase): 14 | def __init__(self, algos, algos_init=None): 15 | self.inds = {} 16 | if algos_init: 17 | for algo in algos_init: 18 | algo(self) 19 | 20 | self.algos = algos 21 | 22 | def next(self): 23 | context = { 24 | 'strategy': self 25 | } 26 | run_algos(context, self.algos) 27 | -------------------------------------------------------------------------------- /engine/strategy/stragegy_buyhold.py: -------------------------------------------------------------------------------- 1 | # encoding:utf8 2 | import backtrader as bt 3 | from .algos import * 4 | from loguru import logger 5 | from .strategy_base import StratgeyAlgoBase 6 | 7 | 8 | class StratgeyBuyHold(StratgeyAlgoBase): 9 | def __init__(self, weights=None): 10 | self.algos = [ 11 | RunOnce(), 12 | SelectAll(), 13 | ] 14 | 15 | -------------------------------------------------------------------------------- /engine/strategy/strategy_base.py: -------------------------------------------------------------------------------- 1 | # encoding: utf8 2 | import backtrader as bt 3 | from loguru import logger 4 | import pandas as pd 5 | 6 | 7 | class StrategyBase(bt.Strategy): 8 | def log(self, txt, dt=None): 9 | dt = dt or self.datas[0].datetime.date(0) 10 | logger.info('%s, %s' % (dt.isoformat(), txt)) 11 | 12 | # 取当前的日期 13 | def get_current_dt(self): 14 | return self.datas[0].datetime.date(0).strftime('%Y-%m-%d') 15 | 16 | # 取当前持仓的data列表 17 | def get_current_holding_datas(self): 18 | holdings = [] 19 | for data in self.datas: 20 | if self.getposition(data).size > 0: 21 | holdings.append(data) 22 | return holdings 23 | 24 | # 打印订单日志 25 | def notify_order(self, order): 26 | 27 | return 28 | 29 | order_status = ['Created', 'Submitted', 'Accepted', 'Partial', 30 | 'Completed', 'Canceled', 'Expired', 'Margin', 'Rejected'] 31 | # 未被处理的订单 32 | if order.status in [order.Submitted, order.Accepted]: 33 | return 34 | self.log('未处理订单:订单号:%.0f, 标的: %s, 状态状态: %s' % (order.ref, 35 | order.data._name, 36 | order_status[order.status])) 37 | return 38 | # 已经处理的订单 39 | if order.status in [order.Partial, order.Completed]: 40 | return 41 | if order.isbuy(): 42 | self.log( 43 | 'BUY EXECUTED, 状态: %s, 订单号:%.0f, 标的: %s, 数量: %.2f, 价格: %.2f, 成本: %.2f, 手续费 %.2f' % 44 | (order_status[order.status], # 订单状态 45 | order.ref, # 订单编号 46 | order.data._name, # 股票名称 47 | order.executed.size, # 成交量 48 | order.executed.price, # 成交价 49 | order.executed.value, # 成交额 50 | order.executed.comm)) # 佣金 51 | else: # Sell 52 | self.log( 53 | 'SELL EXECUTED, status: %s, ref:%.0f, name: %s, Size: %.2f, Price: %.2f, Cost: %.2f, Comm %.2f' % 54 | (order_status[order.status], 55 | order.ref, 56 | order.data._name, 57 | order.executed.size, 58 | order.executed.price, 59 | order.executed.value, 60 | order.executed.comm)) 61 | 62 | elif order.status in [order.Canceled, order.Margin, order.Rejected, order.Expired]: 63 | # order.Margin资金不足,订单无法成交 64 | # 订单未完成 65 | self.log('未完成订单,订单号:%.0f, 标的 : %s, 订单状态: %s' % ( 66 | order.ref, order.data._name, order_status[order.status])) 67 | 68 | self.order = None 69 | 70 | def notify_trade(self, trade): 71 | logger.debug('trade......', trade.status) 72 | # 交易刚打开时 73 | if trade.justopened: 74 | self.log('开仓, 标的: %s, 股数: %.2f,价格: %.2f' % ( 75 | trade.getdataname(), trade.size, trade.price)) 76 | # 交易结束 77 | elif trade.isclosed: 78 | self.log('平仓, 标的: %s, GROSS %.2f, NET %.2f, 手续费 %.2f' % ( 79 | trade.getdataname(), trade.pnl, trade.pnlcomm, trade.commission)) 80 | # 更新交易状态 81 | else: 82 | self.log('交易更新, 标的: %s, 仓位: %.2f,价格: %.2f' % ( 83 | trade.getdataname(), trade.size, trade.price)) 84 | 85 | 86 | class StratgeyAlgoBase(StrategyBase): 87 | def __init__(self, algo_list=None, features=None, extra=None): 88 | self.algos = algo_list 89 | self.features = features 90 | self.extra = extra 91 | 92 | def next(self): 93 | context = { 94 | 'strategy': self, 95 | 'features': self.features, 96 | 'extra': self.extra 97 | } 98 | 99 | for algo in self.algos: 100 | if algo(context) is True: # 如果algo返回True,直接不运行,本次不调仓 101 | return 102 | 103 | 104 | class StrategyAdjustTable(StrategyBase): 105 | def __init__(self, weight_table): 106 | self.table = weight_table 107 | self.trade_dates = pd.to_datetime(self.table.index.unique()).tolist() 108 | 109 | def next(self): 110 | dt = self.datas[0].datetime.date(0) # 获取当前的回测时间点 111 | # 如果是调仓日,则进行调仓操作 112 | if dt in self.trade_dates: 113 | dt_weight = self.table[self.table.index == dt] 114 | codes = dt_weight['code'].tolist() 115 | for code in codes: 116 | w = dt_weight.query(f"code=='{code}'")['weight'].iloc[0] # 提取持仓权重 117 | data = self.getdatabyname(code) 118 | order = self.order_target_percent(data=data, target=w * 0.99) # 为减少可用资金不足的情况,留 5% 的现金做备用 119 | -------------------------------------------------------------------------------- /engine/strategy/strategy_picktime.py: -------------------------------------------------------------------------------- 1 | import backtrader as bt 2 | from .algos import * 3 | from loguru import logger 4 | 5 | from .strategy_base import StrategyBase 6 | 7 | 8 | class StrategyPickTime(StrategyBase): 9 | params = ( 10 | ('long', 252), 11 | ('short', 42), 12 | ) 13 | 14 | def __init__(self): 15 | # bt.ind.EMA(self.data, period=self.p.ema_period) 16 | self.long = bt.ind.SMA(period=self.p.long) 17 | self.short = bt.ind.SMA(period=self.p.short) 18 | 19 | def next(self): 20 | if self.short[0] > self.long[0]: 21 | self.buy() 22 | else: 23 | self.close() 24 | -------------------------------------------------------------------------------- /engine/strategy/strategy_rotation.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Author: Charmve yidazhang1@gmail.com 3 | Date: 2023-05-18 22:00:31 4 | LastEditors: Charmve yidazhang1@gmail.com 5 | LastEditTime: 2023-05-18 22:18:05 6 | FilePath: /Qbot/iQuant/engine/strategy/strategy_rotation.py 7 | Version: 1.0.1 8 | Blogs: charmve.blog.csdn.net 9 | GitHub: https://github.com/Charmve 10 | Description: 11 | 12 | Copyright (c) 2023 by Charmve, All Rights Reserved. 13 | Licensed under the MIT License. 14 | ''' 15 | # encoding:utf8 16 | 17 | from pathlib import Path 18 | import sys 19 | TOP_DIR = Path(__file__).parent.parent.joinpath("../engine") 20 | sys.path.append(TOP_DIR) 21 | 22 | import backtrader as bt 23 | from loguru import logger 24 | from indicator.indicator_rsrs import RSRS 25 | 26 | 27 | class StrategyRotation(bt.Strategy): 28 | params = dict( 29 | period=20, # 动量周期 30 | 31 | ) 32 | 33 | def __init__(self): 34 | # 指标计算 35 | self.inds = {} 36 | self.rsrs = RSRS(self.datas[0]) 37 | for data in self.datas: 38 | self.inds[data] = bt.ind.ROC(data, period=self.p.period) 39 | 40 | def next(self): 41 | # 计算to_buy,判断roc>0.02 42 | # 计算to_sell,判断roc<0 43 | # 判断当前已经持仓 44 | to_buy = [] 45 | to_sell = [] 46 | holding = [] 47 | for data, roc in self.inds.items(): 48 | #if roc[0] > 0.02: 49 | if self.rsrs[0] > 1: 50 | to_buy.append(data) 51 | 52 | #if roc[0] < 0: 53 | 54 | if self.rsrs[0] < 0.8: 55 | to_sell.append(data) 56 | 57 | if self.getposition(data).size > 0: 58 | holding.append(data) 59 | 60 | for sell in to_sell: 61 | if self.getposition(sell).size > 0: 62 | logger.debug('清仓'+sell.p.name) 63 | self.close(sell) 64 | 65 | new_hold = list(set(to_buy + holding)) 66 | for data in to_sell: 67 | if data in new_hold: 68 | new_hold.remove(data) 69 | 70 | 71 | 72 | if len(new_hold) == 0: 73 | #logger.info('新仓位为空') 74 | return 75 | 76 | # 等权重分配 todo: 已持仓的应应该不变,对cash对新增的等权分配 77 | weight = 1 / len(new_hold) 78 | for data in new_hold: 79 | self.order_target_percent(data, weight) 80 | 81 | 82 | 83 | 84 | 85 | -------------------------------------------------------------------------------- /engine/strategy/strategy_turtle.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | import sys 3 | TOP_DIR = Path(__file__).parent.parent.joinpath("../engine") 4 | sys.path.append(TOP_DIR) 5 | 6 | import backtrader as bt 7 | from engine.strategy.strategy_base import StrategyBase 8 | 9 | 10 | class TurtleTradingStrategy(StrategyBase): 11 | params = dict( 12 | N1=20, # ���氲ͨ���Ϲ��t 13 | N2=10, # ���氲ͨ���¹��t 14 | ) 15 | 16 | def __init__(self): 17 | self.order = None 18 | self.buy_count = 0 # ��¼������� 19 | self.last_price = 0 # ��¼����۸� 20 | # ׼����һ����Ļ���300������Լ��close��high��low �������� 21 | self.close = self.datas[0].close 22 | self.high = self.datas[0].high 23 | self.low = self.datas[0].low 24 | # �������氲ͨ���Ϲ죺��ȥ20�յ���߼� 25 | self.DonchianH = bt.ind.Highest(self.high(-1), period=self.p.N1, subplot=True) 26 | # �������氲ͨ���¹죺��ȥ10�յ���ͼ� 27 | self.DonchianL = bt.ind.Lowest(self.low(-1), period=self.p.N2, subplot=True) 28 | # �������氲ͨ���Ϲ�ͻ�ƣ�close>DonchianH��ȡֵΪ1.0����֮Ϊ -1.0 29 | self.CrossoverH = bt.ind.CrossOver(self.close(0), self.DonchianH, subplot=False) 30 | # �������氲ͨ���¹�ͻ��: 31 | self.CrossoverL = bt.ind.CrossOver(self.close(0), self.DonchianL, subplot=False) 32 | # ���� ATR 33 | self.TR = bt.ind.Max((self.high(0) - self.low(0)), # ������߼�-������ͼ� 34 | abs(self.high(0) - self.close(-1)), # abs(������߼�?ǰһ�����̼�) 35 | abs(self.low(0) - self.close(-1))) # abs(������ͼ�-ǰһ�����̼�) 36 | self.ATR = bt.ind.SimpleMovingAverage(self.TR, period=self.p.N1, subplot=False) 37 | # ���� ATR��ֱ�ӵ��� talib ��ʹ��ǰ��Ҫ��װ python3 -m pip install TA-Lib 38 | # self.ATR = bt.talib.ATR(self.high, self.low, self.close, timeperiod=self.p.N1, subplot=True) 39 | 40 | def next(self): 41 | # ������ж�����ִ���У��Ͳ����µIJ�λ���� 42 | if self.order: 43 | return 44 | 45 | # �����ǰ���ж൥ 46 | if self.position.size > 0: 47 | # �൥�Ӳ�:�۸�����������۵�0.5��ATR�ҼӲִ������ڵ���3�� 48 | if self.datas[0].close > self.last_price + 0.5 * self.ATR[0] and self.buy_count <= 4: 49 | print('if self.datas[0].close >self.last_price + 0.5*self.ATR[0] and self.buy_count <= 4:') 50 | print('self.buy_count', self.buy_count) 51 | # ���㽨�ֵ�λ��self.ATR*�ڻ���Լ����300*��֤�����0.1 52 | self.buy_unit = max((self.broker.getvalue() * 0.01) / self.ATR[0], 1) 53 | self.buy_unit = int(self.buy_unit) # ���׵�λΪ�� 54 | # self.sizer.p.stake = self.buy_unit 55 | self.order = self.buy(size=self.buy_unit) 56 | self.last_price = self.position.price # ��ȡ����۸� 57 | self.buy_count = self.buy_count + 1 58 | # �൥ֹ�𣺵��۸����2��ATRʱֹ��ƽ�� 59 | elif self.datas[0].close < (self.last_price - 2 * self.ATR[0]): 60 | print('elif self.datas[0].close < (self.last_price - 2*self.ATR[0]):') 61 | self.order = self.sell(size=abs(self.position.size)) 62 | self.buy_count = 0 63 | # �൥ֹӯ�����۸�ͻ��10����͵�ʱֹӯ�볡 ƽ�� 64 | elif self.CrossoverL < 0: 65 | print('self.CrossoverL < 0') 66 | self.order = self.sell(size=abs(self.position.size)) 67 | self.buy_count = 0 68 | 69 | # �����ǰ���пյ� 70 | 71 | else: # ���û�гֲ֣��ȴ��볡ʱ�� 72 | # �볡: �۸�ͻ���Ϲ����ҿղ�ʱ������ 73 | if self.CrossoverH > 0 and self.buy_count == 0: 74 | print('if self.CrossoverH > 0 and self.buy_count == 0:') 75 | # ���㽨�ֵ�λ��self.ATR*�ڻ���Լ����300*��֤�����0.1 76 | self.buy_unit = max((self.broker.getvalue() * 0.01) / self.ATR[0], 1) 77 | self.buy_unit = int(self.buy_unit) # ���׵�λΪ�� 78 | self.order = self.buy(size=self.buy_unit) 79 | self.last_price = self.position.price # ��¼����۸� 80 | self.buy_count = 1 # ��¼���ν��׼۸� 81 | # �볡: �۸�����¹����ҿղ�ʱ 82 | elif self.CrossoverL < 0 and self.buy_count == 0: 83 | print('self.CrossoverL < 0 and self.buy_count == 0') 84 | 85 | -------------------------------------------------------------------------------- /gui/BUILD: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Charmve/iQuant/b41f2d0066fdb1be8b5435e1dbe904fedd919d30/gui/BUILD -------------------------------------------------------------------------------- /gui/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Charmve/iQuant/b41f2d0066fdb1be8b5435e1dbe904fedd919d30/gui/__init__.py -------------------------------------------------------------------------------- /gui/global_event.py: -------------------------------------------------------------------------------- 1 | class GlobalEvent: 2 | MSG_TYPE_SERIES = 1 3 | def __init__(self): 4 | self.observers = {} 5 | 6 | def add_observer(self,msg_type,observer): 7 | if msg_type in self.observers.keys(): 8 | self.observers[msg_type].append(observer) 9 | else: 10 | self.observers[msg_type] = [observer] 11 | 12 | def notify(self,msg_type,data): 13 | if msg_type in self.observers.keys(): 14 | for o in self.observers[msg_type]: 15 | o.handle_data(data) 16 | 17 | g = GlobalEvent() 18 | -------------------------------------------------------------------------------- /gui/gui_utils.py: -------------------------------------------------------------------------------- 1 | import wx 2 | def _pydate2wxdate(date): 3 | import datetime 4 | assert isinstance(date, (datetime.datetime, datetime.date)) 5 | tt = date.timetuple() 6 | dmy = (tt[2], tt[1] - 1, tt[0]) 7 | return wx.DateTimeFromDMY(*dmy) 8 | 9 | def _wxdate2pydate(date): 10 | import datetime 11 | assert isinstance(date, wx.DateTime) 12 | if date.IsValid(): 13 | ymd = map(int, date.FormatISODate().split('-')) 14 | return datetime.date(*ymd) 15 | else: 16 | return None -------------------------------------------------------------------------------- /gui/mainapp.py: -------------------------------------------------------------------------------- 1 | import wx,time 2 | import webbrowser as web 3 | from .page_timeseries import PageTimeSeries 4 | 5 | class MainWindow(wx.Frame): 6 | def __init__(self): 7 | displaySize = wx.DisplaySize() # (1920, 1080) 8 | displaySize = 0.85 * displaySize[0], 0.75 * displaySize[1] 9 | super().__init__(parent=None, title='AI量化投资平台', size=displaySize) 10 | self.init_statusbar() 11 | self.init_menu_bar() 12 | self.init_main_tabs() 13 | 14 | def init_main_tabs(self): 15 | #self.SetBackgroundColour(wx.GREEN) 16 | #创建水平boxsizer,并设置为平铺到整个窗口 17 | self.boxH = wx.BoxSizer(wx.HORIZONTAL) 18 | self.SetSizer(self.boxH) 19 | 20 | self.tabs = wx.Notebook(self) 21 | self.boxH.Add(self.tabs,1,wx.ALL | wx.EXPAND)#??todo propotion==1为何 22 | self.tabs.AddPage(PageTimeSeries(self.tabs),'时间序列分析') 23 | 24 | def on_menu(self,event): 25 | if event.Id == 1: 26 | web.open('https://danjuanapp.com/djmodule/value-center') 27 | if event.Id== 2: 28 | #self.l.load_data() 29 | web.open('https://www.jisilu.cn/') 30 | 31 | def init_menu_bar(self): 32 | # 创建窗口面板 33 | menuBar = wx.MenuBar(style=wx.MB_DOCKABLE) 34 | self.SetMenuBar(menuBar) 35 | 36 | files = wx.Menu() 37 | menuBar.Append(files, '&文件') 38 | 39 | tools = wx.Menu() 40 | menuBar.Append(tools, '&工具') 41 | 42 | help = wx.Menu() 43 | menuBar.Append(help, '&帮助') 44 | 45 | valuation = wx.MenuItem(tools, 1, '&蛋卷估值') 46 | tools.Append(valuation) 47 | self.Bind(wx.EVT_MENU, self.on_menu, valuation) 48 | 49 | tools.AppendSeparator() 50 | 51 | jisilu = wx.MenuItem(tools,2,'&集思录') 52 | tools.Append(jisilu) 53 | self.Bind(wx.EVT_MENU,self.on_menu,jisilu) 54 | 55 | def init_statusbar(self): 56 | self.statusBar = self.CreateStatusBar() # 创建状态条 57 | # 将状态栏分割为3个区域,比例为2:1 58 | self.statusBar.SetFieldsCount(3) 59 | self.statusBar.SetStatusWidths([-2, -1, -1]) 60 | t = time.localtime(time.time()) 61 | self.SetStatusText("公众号:ailabx(七年实现财富自由)", 0) 62 | self.SetStatusText("当前版本:%s" % '1.0.0', 1) 63 | #self.SetStatusText(time.strftime("%Y-%B-%d %I:%M:%S", t), 2) 64 | 65 | class MainApp(wx.App): 66 | def OnInit(self): 67 | frame = MainWindow() 68 | frame.Show(True) 69 | frame.Center() 70 | self.SetTopWindow(frame) 71 | return True 72 | 73 | if __name__ == '__main__': 74 | app = MainApp() 75 | app.MainLoop() -------------------------------------------------------------------------------- /gui/mainframe.py: -------------------------------------------------------------------------------- 1 | import wx 2 | from gui.panels.panel_backtest import PanelBacktest 3 | from gui.widgets.widget_web import WebPanel 4 | 5 | 6 | def OnAbout(event): 7 | wx.MessageBox("公众号:ailabx", 8 | "关于 AI量化投研平台", 9 | wx.OK | wx.ICON_INFORMATION) 10 | 11 | 12 | def OnExit(win, event): 13 | win.Close(True) 14 | 15 | 16 | def make_menubar(win): 17 | # Make a file menu with Hello and Exit items 18 | fileMenu = wx.Menu() 19 | # The "\t..." syntax defines an accelerator key that also triggers 20 | # the same event 21 | helloItem = fileMenu.Append(-1, "&Hello...\tCtrl-H", 22 | "Help string shown in status bar for this menu item") 23 | fileMenu.AppendSeparator() 24 | # When using a stock ID we don't need to specify the menu item's 25 | # label 26 | exitItem = fileMenu.Append(wx.ID_EXIT, '退出') 27 | 28 | # Now a help menu for the about item 29 | helpMenu = wx.Menu() 30 | aboutItem = helpMenu.Append(wx.ID_ABOUT, '本软件') 31 | 32 | menuBar = wx.MenuBar() 33 | menuBar.Append(fileMenu, "&文件") 34 | menuBar.Append(helpMenu, "&帮助") 35 | 36 | win.Bind(wx.EVT_MENU, OnAbout, aboutItem) 37 | win.Bind(wx.EVT_MENU, OnExit, exitItem) 38 | return menuBar 39 | 40 | 41 | class MainFrame(wx.Frame): 42 | def __init__(self, *args, **kw): 43 | super(MainFrame, self).__init__(*args, **kw) 44 | # 设置默认大小 45 | self.SetSize(wx.Size(900, 600)) 46 | # 屏幕居中显示 47 | self.Centre() 48 | self.SetMenuBar(make_menubar(self)) 49 | self.Maximize() 50 | 51 | self.CreateStatusBar() 52 | self.SetStatusText("欢迎使用AI智能量化投研平台!请关注公众号:ailabx") 53 | 54 | # 主窗口notebook 55 | self.m_notebook = wx.Notebook(self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, 0) 56 | 57 | web = WebPanel(self.m_notebook) 58 | self.m_notebook.AddPage(web, '官方网站', True) 59 | web.show_url('http://www.ailabx.com') 60 | 61 | self.m_notebook.AddPage(PanelBacktest(self.m_notebook), "可视化回测系统", True) 62 | 63 | -------------------------------------------------------------------------------- /gui/page_timeseries.py: -------------------------------------------------------------------------------- 1 | import wx 2 | from .panels.results import ResultsPanel 3 | from .panels.actions import TimeSeriesPanel 4 | from .global_event import g 5 | class PageTimeSeries(wx.Panel): 6 | def __init__(self, parent): 7 | wx.Panel.__init__(self, parent) 8 | self.init_ui() 9 | 10 | def init_ui(self): 11 | 12 | self.vbox = wx.BoxSizer(wx.VERTICAL) 13 | self.SetSizer(self.vbox) 14 | 15 | timeseries = TimeSeriesPanel(self) 16 | self.vbox.Add(timeseries, -1, wx.EXPAND) 17 | 18 | results = ResultsPanel(self) 19 | self.vbox.Add(results,-1,wx.EXPAND) 20 | g.add_observer(g.MSG_TYPE_SERIES, results) 21 | 22 | 23 | #colour = [(160, 255, 204), (153, 204, 255), (151, 253, 225), ] 24 | #self.SetBackgroundColour(colour[0]) 25 | #self.tx1 = wx.StaticText(self, -1, "使用说明", (355, 45), 26 | # (100, -1), wx.ALIGN_CENTER) 27 | #font = wx.Font(14, wx.SWISS, wx.NORMAL, wx.BOLD) 28 | #self.tx1.SetFont(font) -------------------------------------------------------------------------------- /gui/panels/BUILD: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Charmve/iQuant/b41f2d0066fdb1be8b5435e1dbe904fedd919d30/gui/panels/BUILD -------------------------------------------------------------------------------- /gui/panels/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Charmve/iQuant/b41f2d0066fdb1be8b5435e1dbe904fedd919d30/gui/panels/__init__.py -------------------------------------------------------------------------------- /gui/panels/action_rolling.py: -------------------------------------------------------------------------------- 1 | import wx 2 | 3 | class RuleMgr(wx.Panel): 4 | def __init__(self,parent): 5 | super(RuleMgr, self).__init__(parent) 6 | 7 | hbox = wx.BoxSizer(wx.HORIZONTAL) 8 | #self.list = wx.ListCtrl(self) 9 | self.list = wx.ListBox(self, -1,size=(200,80), choices=[], style=wx.LB_SINGLE | wx.LB_HSCROLL | wx.LB_ALWAYS_SB | wx.LB_SORT) 10 | hbox.Add(self.list) 11 | 12 | vbox = wx.BoxSizer(wx.VERTICAL) 13 | hbox.Add(vbox) 14 | 15 | btn_add = wx.Button(self,label='添加') 16 | self.Bind(wx.EVT_BUTTON,self.add,btn_add) 17 | btn_modify = wx.Button(self, label='修改') 18 | self.Bind(wx.EVT_BUTTON, self.modify, btn_modify) 19 | btn_del = wx.Button(self,label='删除') 20 | self.Bind(wx.EVT_BUTTON, self.delete, btn_del) 21 | 22 | vbox.Add(btn_add) 23 | vbox.Add(btn_modify) 24 | vbox.Add(btn_del) 25 | 26 | self.SetSizer(hbox) 27 | 28 | def get_rules(self): 29 | return self.list.GetStrings() 30 | 31 | def add(self,event): 32 | dlg = wx.TextEntryDialog(None, u"请在下面文本框中输入规则:", "请输入规则", "MOM(20)>0.02") 33 | if dlg.ShowModal() == wx.ID_OK: 34 | value = dlg.GetValue() # 获取文本框中输入的值 35 | self.list.Append(value) 36 | #self.list.Insert(0,message) 37 | 38 | def modify(self,event): 39 | item = self.list.GetSelections()[0] 40 | value = self.list.GetStrings()[item] 41 | dlg = wx.TextEntryDialog(None, u"请在下面文本框中输入规则:", "请输入规则", value) 42 | if dlg.ShowModal() == wx.ID_OK: 43 | deleted_item = self.list.GetSelection() 44 | self.list.Delete(deleted_item) 45 | 46 | value = dlg.GetValue() # 获取文本框中输入的值 47 | self.list.Append(value) 48 | 49 | def delete(self,event): 50 | deleted_item = self.list.GetSelection() 51 | self.list.Delete(deleted_item) 52 | 53 | 54 | class ActionRollingPanel(wx.Panel): 55 | def __init__(self,parent): 56 | super(ActionRollingPanel, self).__init__(parent) 57 | self.init_ui() 58 | 59 | def init_ui(self): 60 | vbox = wx.BoxSizer(wx.VERTICAL) 61 | self.SetSizer(vbox) 62 | hbox = wx.BoxSizer(wx.HORIZONTAL) 63 | hbox2 = wx.BoxSizer(wx.HORIZONTAL) 64 | vbox.Add(hbox) 65 | vbox.Add(hbox2) 66 | 67 | hbox.Add(wx.StaticText(self, -1, label='请输入基金代码:'), 0) 68 | self.codes = wx.TextCtrl(self, size=(300, 60)) 69 | self.codes.SetValue('510300.SH;510500.SH') 70 | hbox.Add(self.codes, 0, wx.ALL | wx.EXPAND, 5) 71 | 72 | gs = wx.GridSizer(1, 3, 5, 5) 73 | vbox.Add(gs, 0, wx.EXPAND) 74 | 75 | rule_buy = RuleMgr(self) 76 | rule_sell = RuleMgr(self) 77 | rule_orderby = RuleMgr(self) 78 | # vbox.Add(rule_buy) 79 | gs.Add(rule_buy, 0, wx.EXPAND) 80 | gs.Add(rule_sell, 0, wx.EXPAND) 81 | gs.Add(rule_orderby, 0, wx.EXPAND) 82 | self.rule_buy = rule_buy 83 | self.rule_sell = rule_sell 84 | self.rule_orderby = rule_orderby -------------------------------------------------------------------------------- /gui/panels/actions.py: -------------------------------------------------------------------------------- 1 | import wx,os,wx.adv 2 | #from .backtest import CreateStrategy 3 | #import pandas as pd 4 | #from datax.data_loader import load_codes,merge_dfs,dfs2rate 5 | #from ...datax.data_loader import PdUtils 6 | #from datax.performance import PerformanceUtils 7 | from .. import gui_utils 8 | import pandas as pd 9 | #from ...engine.runner import Runner 10 | from .action_rolling import ActionRollingPanel 11 | 12 | from ..global_event import g 13 | 14 | class BacktestPanel(wx.Panel): 15 | def __init__(self,parent): 16 | super(BacktestPanel, self).__init__(parent) 17 | self.init_ui() 18 | def init_ui(self): 19 | vbox = wx.BoxSizer(wx.VERTICAL) 20 | self.SetSizer(vbox) 21 | hbox = wx.BoxSizer(wx.HORIZONTAL) 22 | vbox.Add(hbox,0,wx.ALL,5) 23 | 24 | self.date_start = wx.adv.DatePickerCtrl(self, id=-1, style=wx.adv.DP_DROPDOWN | wx.adv.DP_SHOWCENTURY) 25 | self.date_start.SetValue(wx.DateTime.FromDMY(1, 1, 2005)) 26 | self.date_end = wx.adv.DatePickerCtrl(self, id=-1, style=wx.adv.DP_DROPDOWN | wx.adv.DP_SHOWCENTURY) 27 | hbox.Add(wx.StaticText(self, -1, label='起始时间:'), 0) 28 | hbox.Add(self.date_start, 5) 29 | hbox.AddSpacer(20) 30 | hbox.Add(wx.StaticText(self, -1, label='结束时间:'), 0) 31 | hbox.Add(self.date_end, 5) 32 | self.btn = wx.Button(self, label="回测") 33 | hbox.Add(self.btn,0) 34 | self.Bind(wx.EVT_BUTTON, self.OnClick, self.btn) 35 | 36 | def add_callback(self,o): 37 | self.callback = o 38 | 39 | def OnClick(self,event): 40 | if self.callback: 41 | date_start = self.date_start.GetValue() 42 | date_start = gui_utils._wxdate2pydate(date_start) 43 | date_start = date_start.strftime('%Y%m%d') 44 | date_end = self.date_end.GetValue() 45 | date_end = gui_utils._wxdate2pydate(date_end) 46 | date_end = date_end.strftime('%Y%m%d') 47 | self.callback.onclick(date_start,date_end) 48 | 49 | class TimeSeriesPanel(wx.Panel): 50 | def __init__(self,parent): 51 | super(TimeSeriesPanel, self).__init__(parent) 52 | #self.SetMaxSize((2,100)) 53 | self.init_ui() 54 | 55 | def init_ui(self): 56 | vbox = wx.BoxSizer(wx.VERTICAL) 57 | self.SetSizer(vbox) 58 | # 水平盒子 59 | hbox = wx.BoxSizer(wx.HORIZONTAL) 60 | 61 | self.text_codes = wx.TextCtrl(self, -1, size=(200, 20), style=wx.ALIGN_LEFT) 62 | self.text_codes.SetValue('000300.SH;000905.SH') 63 | hbox.Add(wx.StaticText(self, -1, label="代码:"), 0, wx.ALL | wx.EXPAND, 5) 64 | hbox.Add(self.text_codes, 0, wx.ALL | wx.EXPAND, 5) 65 | 66 | hbox2 = wx.BoxSizer(wx.HORIZONTAL) 67 | 68 | self.date_start = wx.adv.DatePickerCtrl(self, id=-1, style=wx.adv.DP_DROPDOWN | wx.adv.DP_SHOWCENTURY) 69 | self.date_start.SetValue(wx.DateTime.FromDMY(1,1,2005)) 70 | self.date_end = wx.adv.DatePickerCtrl(self, id=-1, style=wx.adv.DP_DROPDOWN | wx.adv.DP_SHOWCENTURY) 71 | hbox2.Add(wx.StaticText(self, -1, label='起始时间:'), 0) 72 | hbox2.Add(self.date_start, 5) 73 | 74 | hbox2.AddSpacer(20) 75 | 76 | hbox2.Add(wx.StaticText(self, -1, label='结束时间:'), 0) 77 | hbox2.Add(self.date_end, 5) 78 | 79 | # 创建按钮 80 | self.btn_ana = wx.Button(self, label="分析") 81 | self.Bind(wx.EVT_BUTTON, self.OnClick, self.btn_ana) 82 | # 在水平盒子里添加查询按钮 83 | hbox2.AddSpacer(20) 84 | hbox2.Add(self.btn_ana, 0) 85 | 86 | vbox.Add(hbox, 0, wx.ALL, 5) 87 | vbox.Add(hbox2, 0, wx.ALL, 5) 88 | 89 | hbox3 = wx.BoxSizer(wx.HORIZONTAL) 90 | vbox.Add(hbox3,0,wx.ALL,5) 91 | 92 | self.btn_indicator = wx.Button(self, label="指标可视化") 93 | hbox3.Add(wx.StaticText(self, -1, label='请选择指标:'), 0) 94 | combo = wx.ComboBox(self, -1, pos=(50, 170), size=(150, -1), 95 | choices=['RSRS','标准RSRS'], style=wx.CB_READONLY) 96 | hbox3.Add(combo, 1) 97 | hbox3.Add(self.btn_indicator,1) 98 | self.Bind(wx.EVT_BUTTON, self.OnClick_indicator, self.btn_indicator) 99 | 100 | hbox4 = wx.BoxSizer(wx.HORIZONTAL) 101 | vbox.Add(hbox4, 0, wx.ALL, 5) 102 | 103 | self.btn_load = wx.Button(self, label="加载数据") 104 | self.btn_feature = wx.Button(self, label="特征提取") 105 | hbox4.Add(self.btn_load,1) 106 | hbox4.Add(self.btn_feature,2) 107 | 108 | self.Bind(wx.EVT_BUTTON, self.on_load, self.btn_load) 109 | self.Bind(wx.EVT_BUTTON, self.on_feature, self.btn_feature) 110 | 111 | def OnClick_indicator(self,event): 112 | pass 113 | 114 | def on_feature(self): 115 | pass 116 | 117 | def on_load(self,event): 118 | codes = self.text_codes.GetValue() 119 | codes = codes.split(';') 120 | #path = os.path.abspath(os.path.dirname(os.getcwd()) + os.path.sep + "datas") 121 | dfs = load_codes(codes) 122 | df_all = merge_dfs(dfs) 123 | print(df_all) 124 | self.df = df_all 125 | 126 | g.notify(g.MSG_TYPE_SERIES, { 127 | 'raw': df_all, 128 | #'corr': df_corr, 129 | #'plot': df_equity, 130 | #'yearly': df_years 131 | }) 132 | 133 | 134 | def OnClick(self,event): 135 | codes = self.text_codes.GetValue() 136 | codes = codes.split(';') 137 | 138 | date_start = self.date_start.GetValue() 139 | date_start = gui_utils._wxdate2pydate(date_start) 140 | date_start = date_start.strftime('%Y%m%d') 141 | date_end = self.date_end.GetValue() 142 | date_end = gui_utils._wxdate2pydate(date_end) 143 | date_end = date_end.strftime('%Y%m%d') 144 | 145 | dfs = load_codes(codes) 146 | print(dfs) 147 | df_all = merge_dfs(dfs) 148 | print(df_all) 149 | df_rates = dfs2rate(df_all) 150 | print(df_rates) 151 | 152 | 153 | 154 | #df_prices = df_prices[df_prices.index > date_start] 155 | df_rates.dropna(inplace=True) 156 | df_rates = df_rates[date_start:date_end] 157 | 158 | #for col in df_prices.columns: 159 | # df_prices[col] = df_prices[col].pct_change() 160 | 161 | df_equity = PerformanceUtils().rate2equity(df_rates) 162 | df_ratios,df_corr,df_years = PerformanceUtils().calc_rates(df_rates) 163 | 164 | g.notify(g.MSG_TYPE_SERIES,{ 165 | 'ratio':df_ratios, 166 | 'corr':df_corr, 167 | 'plot':df_equity, 168 | 'yearly':df_years 169 | }) 170 | 171 | class PortfolioPanel(wx.Panel): 172 | def __init__(self,parent): 173 | super(PortfolioPanel,self).__init__(parent) 174 | self.init_ui() 175 | 176 | def init_ui(self): 177 | vbox = wx.BoxSizer(wx.VERTICAL) 178 | self.SetSizer(vbox) 179 | hbox = wx.BoxSizer(wx.HORIZONTAL) 180 | hbox2 = wx.BoxSizer(wx.HORIZONTAL) 181 | vbox.Add(hbox) 182 | #vbox.Add(hbox2) 183 | 184 | hbox.Add(wx.StaticText(self, -1, label='请输入策略代码:'), 0) 185 | self.codes = wx.TextCtrl(self, size=(300, 200),style=wx.TE_MULTILINE) 186 | self.codes.SetValue(''' 187 | { 188 | 'universe': ['510300.SH', '159915.SZ'], 189 | 'benchmarks': ['510300.SH','159915.SZ'], 190 | 'factors': [('mom_20', 'Mom($close,20)'), ('buy_1', '$mom_20>0.02'), ('sell_1', '$mom_20<0')], 191 | 'factors_date': [('rank', 'Rank($mom_20)')], 192 | 'buy': (['buy_1'], 1), 193 | 'sell': (['sell_1'], 1), 194 | 'order_by': ('rank', 2) # 从大到小,取前2 195 | } 196 | ''') 197 | hbox.Add(self.codes) 198 | 199 | btn = wx.Button(self,-1,label='回测') 200 | hbox.Add(btn,0) 201 | self.Bind(wx.EVT_BUTTON,self.onclick,btn) 202 | ''' 203 | 204 | hbox.Add(self.codes, 0, wx.ALL | wx.EXPAND, 5) # 固定组合权重 205 | hbox.Add(wx.StaticText(self, -1, label='请输入权重(与基金数量一定,逗号分隔):'), 0) 206 | self.weights = wx.TextCtrl(self, size=(300, 60)) 207 | self.weights.SetValue('0.6,0.4') 208 | hbox.Add(self.weights, 0, wx.ALL | wx.EXPAND, 5) 209 | 210 | bkt = BacktestPanel(self) 211 | hbox2.Add(bkt, 0) 212 | bkt.add_callback(self) 213 | ''' 214 | 215 | def onclick(self,event): 216 | codes = self.codes.GetValue() 217 | if codes == '': 218 | wx.MessageBox('请输入策略代码!') 219 | self.codes.SetFocus() 220 | return 221 | 222 | strategy = eval(codes.strip()) 223 | print(strategy) 224 | 225 | #print(strategy) 226 | #strategy['benchmarks'] = ['000300.SH'] 227 | 228 | df_ratios, df_corr, df_years, df_equities = Runner().run(strategy) 229 | g.notify(g.MSG_TYPE_SERIES, { 230 | 'ratio': df_ratios, 231 | 'corr': df_corr, 232 | 'plot': df_equities, 233 | 'yearly': df_years 234 | }) 235 | 236 | 237 | 238 | class ActionsPanel(wx.Panel): 239 | def __init__(self,parent): 240 | super(ActionsPanel, self).__init__(parent) 241 | self.init_ui() 242 | 243 | def init_ui(self): 244 | vbox = wx.BoxSizer(wx.VERTICAL) 245 | self.SetSizer(vbox) 246 | 247 | self.tabs = wx.Notebook(self) 248 | self.ana = TimeSeriesPanel(self.tabs) 249 | self.tabs.AddPage(self.ana, "分析&回测") 250 | self.tabs.AddPage(PortfolioPanel(self.tabs), "资产配置策略") 251 | #self.tabs.AddPage(ActionRollingPanel(self.tabs),'资产轮动策略') 252 | 253 | vbox.Add(self.tabs, 1, flag=wx.EXPAND | wx.ALL, border=5) -------------------------------------------------------------------------------- /gui/panels/backtest.py: -------------------------------------------------------------------------------- 1 | 2 | import wx 3 | from ...engine.runner import Runner 4 | from ..global_event import g 5 | 6 | class CreateStrategy(wx.Panel): 7 | def __init__(self,parent): 8 | super(CreateStrategy, self).__init__(parent) 9 | 10 | vbox = wx.BoxSizer(wx.VERTICAL) 11 | hbox = wx.BoxSizer(wx.HORIZONTAL) 12 | vbox.Add(hbox) 13 | 14 | hbox.Add(wx.StaticText(self, -1, label='请输入基金代码:'), 0) 15 | self.codes = wx.TextCtrl(self,size=(300,60)) 16 | self.codes.SetValue('510300.SH;510500.SH') 17 | hbox.Add(self.codes, 0, wx.ALL | wx.EXPAND, 5) 18 | 19 | #固定组合权重 20 | hbox.Add(wx.StaticText(self, -1, label='请输入权重(与基金数量一定,逗号分隔):'), 0) 21 | self.weights = wx.TextCtrl(self, size=(300, 60)) 22 | self.weights.SetValue('0.6,0.4') 23 | hbox.Add(self.weights, 0, wx.ALL | wx.EXPAND, 5) 24 | 25 | self.box = wx.CheckBox(self, -1, "全部基金",) # 创建控件 26 | vbox.Add(self.box) 27 | self.Bind(wx.EVT_CHECKBOX, self.ChooseAll, self.box) # 绑定事件 28 | self.box.SetValue(False) # 设置当前是否被选中 29 | self.SetSizer(vbox) 30 | 31 | #===================各种策略================== 32 | 33 | 34 | 35 | 36 | self.btn_save = wx.Button(self,label='保存策略') 37 | self.Bind(wx.EVT_BUTTON,self.SaveClicked,self.btn_save) 38 | vbox.Add(self.btn_save) 39 | #dlg = wx.TextEntryDialog(self, 'Enter a URL', 'HTMLWindow') 40 | 41 | def SaveClicked(self,event): 42 | #wx.MessageBox('saved!') 43 | codes = self.codes.GetValue() 44 | if codes == '': 45 | wx.MessageBox('请输入基金代码!') 46 | self.codes.SetFocus() 47 | return 48 | codes = codes.split(';') 49 | weights = self.weights.GetValue().split(',') 50 | weights = [float(w) for w in weights] 51 | buy_rules = self.rule_buy.get_rules() 52 | sell_rules = self.rule_sell.get_rules() 53 | order_rules = self.rule_orderby.get_rules() 54 | print(buy_rules,sell_rules,order_rules) 55 | 56 | strategy = { 57 | 'universe':codes, 58 | 'weights': weights, 59 | 'buy_rules':buy_rules, 60 | 'sell_rules':sell_rules, 61 | 'order_rules':order_rules 62 | } 63 | 64 | print(strategy) 65 | strategy['benchmarks'] = ['000300.SH'] 66 | 67 | 68 | def ChooseAll(self,e): 69 | all = self.box.GetValue() 70 | if all: 71 | self.codes.Enabled = False 72 | else: 73 | self.codes.Enabled = True 74 | -------------------------------------------------------------------------------- /gui/panels/panel_backtest.py: -------------------------------------------------------------------------------- 1 | import wx 2 | from gui.widgets.widget_matplotlib import MatplotlibPanel 3 | from gui.widgets.widget_web import WebPanel 4 | from engine.config import DATA_DIR_BKT_RESULT 5 | from bokeh.plotting import figure, output_file, show, save 6 | 7 | 8 | # https://zhuanlan.zhihu.com/p/376248349 9 | def OnBkt(event): 10 | wx.MessageBox('ok') 11 | 12 | 13 | class PanelBacktest(wx.Panel): 14 | def __init__(self, parent): 15 | super(PanelBacktest, self).__init__(parent) 16 | 17 | # 回测按钮 18 | # self.btn_bkt = wx.Button(self, label="回测") 19 | # self.Bind(wx.EVT_BUTTON, OnBkt, self.btn_bkt) 20 | 21 | # 进度条 22 | 23 | self.layout() 24 | 25 | def layout(self): 26 | vbox = wx.BoxSizer(wx.VERTICAL) 27 | self.SetSizer(vbox) 28 | 29 | hbox = wx.BoxSizer(wx.HORIZONTAL) 30 | vbox.Add(hbox) 31 | 32 | hbox.Add(wx.StaticText(self,label='请选择基准:')) 33 | combo_benchmarks = wx.ComboBox(self, size=(180, 25)) 34 | combo_benchmarks.SetItems(['沪深300指数(000300.SH)','标普500指数(SPY)']) 35 | hbox.Add(combo_benchmarks) 36 | 37 | # 上面是一个panel 38 | # panel = wx.Panel(self) 39 | # gauge = wx.Gauge(panel, range=100, pos=(0, 50), 40 | # size=(180, -1)) 41 | # gauge.SetValue(88) 42 | 43 | # vbox.Add(panel, 0) 44 | btn = wx.Button(self, label="回测分析") 45 | self.Bind(wx.EVT_BUTTON, self.OnClick, btn) 46 | vbox.Add(btn) 47 | 48 | # 底部是一个浏览器 49 | web = WebPanel(self) 50 | vbox.Add(web, 1, wx.EXPAND) 51 | web.show_file(DATA_DIR_BKT_RESULT.joinpath('bkt_result.html')) 52 | 53 | # web.show_url('http://www.jisilu.cn') 54 | 55 | self.web = web 56 | 57 | def OnClick(self, event): 58 | pass 59 | -------------------------------------------------------------------------------- /gui/panels/panels.py: -------------------------------------------------------------------------------- 1 | import wx,wx.adv 2 | from .. import widgets 3 | from ...dataloader.dataloader import CSVLoader 4 | from ...dataloader.pd_utils import PdUtils 5 | from ...analysis.performance import PerformanceUtils 6 | import os 7 | import pandas as pd 8 | 9 | class TimeSeriesAnalysis(wx.Panel): 10 | def __init__(self,parent): 11 | super(TimeSeriesAnalysis, self).__init__(parent) 12 | #self.SetBackgroundColour('green') 13 | 14 | vbox = wx.BoxSizer(wx.VERTICAL) 15 | # 水平盒子 16 | hbox = wx.BoxSizer(wx.HORIZONTAL) 17 | 18 | 19 | self.text_codes = wx.TextCtrl(self, -1, size=(200, 20), style=wx.ALIGN_LEFT) 20 | self.text_codes.SetValue('000300.SH;000905.SH') 21 | hbox.Add(wx.StaticText(self, -1, label="代码:"), 0, wx.ALL | wx.EXPAND, 5) 22 | hbox.Add(self.text_codes, 0, wx.ALL | wx.EXPAND, 5) 23 | 24 | # 创建按钮 25 | self.btn_ana = wx.Button(self, label="分析") 26 | self.Bind(wx.EVT_BUTTON, self.OnClick, self.btn_ana) 27 | # 在水平盒子里添加查询按钮 28 | hbox.AddSpacer(20) 29 | hbox.Add(self.btn_ana, 0) 30 | 31 | hbox2 = wx.BoxSizer(wx.HORIZONTAL) 32 | 33 | self.date_start = wx.adv.DatePickerCtrl(self, id = -1,style=wx.adv.DP_DROPDOWN|wx.adv.DP_SHOWCENTURY) 34 | self.date_end = wx.adv.DatePickerCtrl(self, id=-1, style=wx.adv.DP_DROPDOWN | wx.adv.DP_SHOWCENTURY) 35 | hbox2.Add(wx.StaticText(self,-1,label='起始时间:'),0) 36 | hbox2.Add(self.date_start, 5) 37 | hbox2.Add(wx.StaticText(self, -1, label='结束时间:'), 0) 38 | hbox2.Add(self.date_end, 5) 39 | hbox2.Add(wx.StaticText(self, -1, label='基准:'), 0) 40 | # 创建下拉框 41 | self.languages = ['沪深300', '中证500', '中证800', '创业板'] 42 | self.choice = self.languages[0] 43 | self.combo = wx.ComboBox(self, choices=self.languages, value=self.languages[0]) 44 | # 在水平盒子添加下拉框 45 | hbox2.Add(self.combo, 5) 46 | 47 | hbox2.Add(wx.StaticText(self, -1, label='策略:'), 0) 48 | self.strategies = ['大小盘轮动', '中证500', '中证800', '创业板'] 49 | #self.choice = self.languages[0] 50 | self.combo_strategy = wx.ComboBox(self, choices=self.strategies, value=self.strategies[0]) 51 | # 在水平盒子添加下拉框 52 | hbox2.Add(self.combo_strategy, 5) 53 | # 创建按钮 54 | self.btn_bkt = wx.Button(self, label="开始回测") 55 | self.Bind(wx.EVT_BUTTON, self.OnClickBkt, self.btn_bkt) 56 | hbox2.Add(self.btn_bkt, 5) 57 | 58 | self.btn_create = wx.Button(self, label='新建策略') 59 | self.Bind(wx.EVT_BUTTON, self.OnClickCreate, self.btn_create) 60 | hbox2.Add(self.btn_create, 5) 61 | 62 | 63 | # 在垂直盒子里添加水平盒子 64 | vbox.Add(hbox, 0, wx.ALL, 5) 65 | vbox.Add(hbox2, 0, wx.ALL, 5) 66 | 67 | self.init_cmd(self,vbox) 68 | 69 | 70 | self.SetSizer(vbox) 71 | 72 | def init_cmd(self,parent,vbox): 73 | hbox = wx.BoxSizer(wx.HORIZONTAL) 74 | vbox.Add(hbox, 0, wx.ALL, 5) 75 | 76 | #self.date_start = wx.DatePickerCtrl(self) 77 | #hbox.Add(self.date_start, 1, wx.EXPAND | wx.ALL, 5) 78 | 79 | 80 | 81 | 82 | def OnClickBkt(self,event): 83 | wx.MessageBox('开始回测!') 84 | 85 | def OnClickCreate(self,event): 86 | wx.MessageBox('开始创建!') 87 | 88 | def OnClick(self,event): 89 | codes = self.text_codes.GetValue() 90 | codes = codes.split(';') 91 | path = os.path.abspath(os.path.dirname(os.getcwd())+os.path.sep+"datas") 92 | CSVLoader.check_and_load(codes,path) 93 | dfs = CSVLoader.load_csvs(path,codes) 94 | 95 | df_prices = PdUtils.dfs_to_prices(dfs) 96 | 97 | date_start = self.date_start.GetValue() 98 | date_start = pd.Timestamp(_wxdate2pydate(date_start)) 99 | date_end = self.date_end.GetValue() 100 | date_end = pd.Timestamp(_wxdate2pydate(date_end)) 101 | 102 | #df_prices = df_prices[df_prices.index > date_start] 103 | df_prices = df_prices[df_prices.index < date_end] 104 | 105 | for col in df_prices.columns: 106 | df_prices[col] = df_prices[col].pct_change() 107 | 108 | df_equity = PerformanceUtils().rate2equity(df_prices) 109 | df_ratios,df_corr,df_years = PerformanceUtils().calc_rates(df_prices) 110 | print(df_ratios,df_corr,df_years) 111 | self.pd.show_df(df_ratios) 112 | self.pd_yearly.show_df(df_years) 113 | self.pd_corr.show_df(df_corr) 114 | self.plot.show_data(df_equity) 115 | -------------------------------------------------------------------------------- /gui/panels/results.py: -------------------------------------------------------------------------------- 1 | import wx 2 | from .. import widgets 3 | class ResultsPanel(wx.Panel): 4 | def __init__(self,parent): 5 | super(ResultsPanel, self).__init__(parent) 6 | self.init_tabs() 7 | 8 | def handle_data(self,data_dict): 9 | if 'raw' in data_dict.keys(): 10 | raw = data_dict['raw'] 11 | self.panel_raw.show_df(raw) 12 | 13 | if 'ratio' in data_dict.keys(): 14 | radio = data_dict['ratio'] 15 | self.pd.show_df(radio) 16 | 17 | if 'corr' in data_dict.keys(): 18 | corr = data_dict['corr'] 19 | self.pd_corr.show_df(corr) 20 | 21 | if 'plot' in data_dict.keys(): 22 | plot = data_dict['plot'] 23 | self.plot.show_data(plot) 24 | 25 | if 'yearly' in data_dict.keys(): 26 | yearly = data_dict['yearly'] 27 | self.pd_yearly.show_df(yearly) 28 | 29 | 30 | 31 | #self.pd.show_df() 32 | 33 | def init_tabs(self): 34 | vbox = wx.BoxSizer(wx.VERTICAL) 35 | self.SetSizer(vbox) 36 | 37 | tabs = wx.Notebook(self) 38 | vbox.Add(tabs, 1, wx.EXPAND) 39 | 40 | self.panel_raw = widgets.PandasGrid(tabs) 41 | 42 | panel_tab = wx.Panel(tabs) 43 | panel_yearly = wx.Panel(tabs) 44 | panel_corr = wx.Panel(tabs) 45 | panel_plot = wx.Panel(tabs) 46 | 47 | tabs.AddPage(self.panel_raw,'原始数据') 48 | tabs.AddPage(panel_plot, '序列绘图') 49 | tabs.AddPage(panel_tab, '风险收益') 50 | tabs.AddPage(panel_yearly, '年度收益') 51 | tabs.AddPage(panel_corr, '相关性分析') 52 | 53 | self.pd = widgets.PandasGrid(panel_tab) 54 | self.pd_yearly = widgets.PandasGrid(panel_yearly) 55 | 56 | vbox_panel = wx.BoxSizer(wx.VERTICAL) 57 | vbox_panel.Add(self.pd, 1, wx.EXPAND) 58 | 59 | vbox_yearly = wx.BoxSizer(wx.VERTICAL) 60 | vbox_yearly.Add(self.pd_yearly, 1, wx.EXPAND) 61 | panel_tab.SetSizer(vbox_panel) 62 | panel_yearly.SetSizer(vbox_yearly) 63 | 64 | self.init_corr(panel_corr) 65 | self.init_plot(panel_plot) 66 | 67 | 68 | def init_corr(self,parent): 69 | vbox = wx.BoxSizer(wx.VERTICAL) 70 | parent.SetSizer(vbox) 71 | self.pd_corr = widgets.PandasGrid(parent) 72 | vbox.Add(self.pd_corr,1,wx.EXPAND) 73 | 74 | def init_plot(self,parent): 75 | vbox = wx.BoxSizer(wx.VERTICAL) 76 | parent.SetSizer(vbox) 77 | self.plot = widgets.MatplotlibPanel(parent) 78 | vbox.Add(self.plot, 1, wx.EXPAND) -------------------------------------------------------------------------------- /gui/widgets.py: -------------------------------------------------------------------------------- 1 | import wx.grid 2 | 3 | class RuleGrid(wx.grid.Grid): 4 | def __init__(self,parent): 5 | super(RuleGrid, self).__init__(parent,-1) 6 | 7 | def add_row(self): 8 | wx.grid.GridTableMessage(self, 9 | wx.grid.GRIDTABLE_NOTIFY_ROWS_APPENDED 10 | , 1#插入一行记录 11 | ) 12 | 13 | def del_row(self,index): 14 | wx.grid.GridTableMessage(self, wx.grid.GRIDTABLE_NOTIFY_ROWS_DELETED, 15 | index, #改行所在的索引 16 | 1#只删除一行 17 | ) 18 | 19 | 20 | class PandasGrid(wx.grid.Grid): 21 | def __init__(self,parent,nrow=10,ncol=20): 22 | super().__init__(parent,-1) 23 | self.CreateGrid(numRows=nrow, numCols=ncol) 24 | 25 | def show_df(self,df): 26 | self.ClearGrid() 27 | self.df = df 28 | 29 | self.SetRowSize(0, 60) 30 | self.SetColSize(0, 150) 31 | 32 | for i,col in enumerate(list(df.columns)): 33 | self.SetColLabelValue(i,col) 34 | 35 | for i,row in enumerate(list(df.index)): 36 | self.SetRowLabelValue(i, row) 37 | 38 | i = 0 39 | for index, row in df.iterrows(): 40 | for j in range(len(row)): 41 | self.SetCellValue(i,j,str(row[j])) 42 | i += 1 43 | 44 | import wx 45 | 46 | import matplotlib 47 | matplotlib.use("WXAgg") 48 | from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigureCanvas 49 | 50 | class MatplotlibPanel(wx.Panel): 51 | def __init__(self,parent,id=-1): 52 | super(MatplotlibPanel, self).__init__(parent,id) 53 | 54 | self.TopBoxSizer = wx.BoxSizer(wx.VERTICAL) 55 | self.SetSizer(self.TopBoxSizer) 56 | 57 | self.figure = matplotlib.figure.Figure(figsize=(4, 3)) 58 | self.ax = self.figure.add_subplot(111) 59 | 60 | self.canvas = FigureCanvas(self, -1, self.figure) 61 | self.TopBoxSizer.Add(self.canvas, proportion=-10, border=2, flag=wx.ALL | wx.EXPAND) 62 | 63 | def show_data(self,data): 64 | #print(data) 65 | self.ax.clear() 66 | data.plot(ax=self.ax) 67 | self.ax.grid(True) 68 | self.canvas.draw() 69 | 70 | 71 | if __name__ == '__main__': 72 | import pandas as pd 73 | app = wx.App() 74 | fr = wx.Frame(None) 75 | df = pd.DataFrame([['a1', 1], ['a2', 4]], columns=['uid', 'score']) 76 | grid = PandasGrid(fr) 77 | grid.show_df(df) 78 | fr.Show() 79 | app.MainLoop() 80 | -------------------------------------------------------------------------------- /gui/widgets/BUILD: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Charmve/iQuant/b41f2d0066fdb1be8b5435e1dbe904fedd919d30/gui/widgets/BUILD -------------------------------------------------------------------------------- /gui/widgets/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Charmve/iQuant/b41f2d0066fdb1be8b5435e1dbe904fedd919d30/gui/widgets/__init__.py -------------------------------------------------------------------------------- /gui/widgets/widget_matplotlib.py: -------------------------------------------------------------------------------- 1 | import matplotlib 2 | import wx 3 | from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigureCanvas 4 | 5 | 6 | class MatplotlibPanel(wx.ScrolledWindow): 7 | def __init__(self, parent, id=-1): 8 | super(MatplotlibPanel, self).__init__(parent, id) 9 | self.TopBoxSizer = wx.BoxSizer(wx.VERTICAL) 10 | self.SetSizer(self.TopBoxSizer) 11 | 12 | self.btn_bkt = wx.Button(self, label="回测5555", pos=(100,10)) 13 | 14 | self.figure = matplotlib.figure.Figure(figsize=(4, 3)) 15 | self.canvas = FigureCanvas(self, -1, self.figure) 16 | self.TopBoxSizer.Add(self.canvas, proportion=-10, border=2, flag=wx.ALL | wx.EXPAND) 17 | 18 | -------------------------------------------------------------------------------- /gui/widgets/widget_web.py: -------------------------------------------------------------------------------- 1 | import wx 2 | import wx.html2 as web 3 | 4 | 5 | class WebPanel(wx.Panel): 6 | def __init__(self, parent, id=-1): 7 | super(WebPanel, self).__init__(parent, id) 8 | 9 | vbox = wx.BoxSizer(wx.VERTICAL) 10 | self.SetSizer(vbox) 11 | self.browser = web.WebView.New(self) 12 | vbox.Add(self.browser, proportion=-1, flag=wx.EXPAND | wx.ALL, border=10) 13 | 14 | def show_url(self, url): 15 | self.browser.LoadURL(url) 16 | 17 | def show_file(self, filename): 18 | with open(filename, 'r') as f: 19 | html_cont = f.read() 20 | self.browser.SetPage(html_cont, "") 21 | self.browser.Show() 22 | -------------------------------------------------------------------------------- /images/BUILD: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Charmve/iQuant/b41f2d0066fdb1be8b5435e1dbe904fedd919d30/images/BUILD -------------------------------------------------------------------------------- /images/main_window2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Charmve/iQuant/b41f2d0066fdb1be8b5435e1dbe904fedd919d30/images/main_window2.png -------------------------------------------------------------------------------- /images/mainwindow.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Charmve/iQuant/b41f2d0066fdb1be8b5435e1dbe904fedd919d30/images/mainwindow.png -------------------------------------------------------------------------------- /images/weixin.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Charmve/iQuant/b41f2d0066fdb1be8b5435e1dbe904fedd919d30/images/weixin.jpg -------------------------------------------------------------------------------- /images/xingqiu.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Charmve/iQuant/b41f2d0066fdb1be8b5435e1dbe904fedd919d30/images/xingqiu.png -------------------------------------------------------------------------------- /qbot/BUILD: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Charmve/iQuant/b41f2d0066fdb1be8b5435e1dbe904fedd919d30/qbot/BUILD -------------------------------------------------------------------------------- /qbot_main.py: -------------------------------------------------------------------------------- 1 | import wx 2 | from gui.mainframe import MainFrame 3 | 4 | if __name__ == '__main__': 5 | app = wx.App() 6 | frm = MainFrame(None, title='AI智能量化投研平台') 7 | frm.Show() 8 | app.MainLoop() 9 | 10 | -------------------------------------------------------------------------------- /qbot_test.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Charmve/iQuant/b41f2d0066fdb1be8b5435e1dbe904fedd919d30/qbot_test.py -------------------------------------------------------------------------------- /scripts/creat_build_file.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # 判断当前目录是否存在BUILD文件 4 | if [ ! -f "./BUILD" ]; then 5 | # 如果不存在,则创建BUILD文件 6 | touch ./BUILD 7 | fi 8 | 9 | # 遍历子目录,查找是否存在BUILD文件 10 | for dir in $(find . -type d); do 11 | if [ "${dir}" != "." ]; then 12 | if [ ! -f "${dir}/BUILD" ]; then 13 | # 如果不存在,则创建BUILD文件 14 | touch "${dir}/BUILD" 15 | fi 16 | fi 17 | done 18 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup, find_packages 2 | 3 | setup( 4 | name='qbot', 5 | version='1.0.0', 6 | packages=find_packages(), 7 | install_requires=[ 8 | 'requests', 9 | 'numpy' 10 | ] 11 | ) -------------------------------------------------------------------------------- /third_party/BUILD: -------------------------------------------------------------------------------- 1 | exports_files(glob(["**"])) 2 | -------------------------------------------------------------------------------- /third_party/requirements.txt: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /third_party/requirements.txt.tmp: -------------------------------------------------------------------------------- 1 | absl-py==1.2.0 2 | cachetools==3.1.0 3 | certifi==2022.12.7 4 | chardet==3.0.4 5 | funcsigs==1.0.2 6 | futures==3.1.1 7 | gitdb2==2.0.0 8 | GitPython==2.1.11 9 | google-api-core==1.8.0 10 | google-auth==1.6.3 11 | google-cloud-bigquery==1.9.0 12 | google-cloud-storage==1.13.2 13 | google-cloud-core==0.29.1 14 | google-resumable-media==1.3.1 15 | googleapis-common-protos==1.6.0 16 | idna==2.8 17 | mock==2.0.0 18 | numpy==1.23.1 19 | pbr==5.1.3 20 | protobuf==3.18.3 21 | psutil==5.8.0 22 | pyasn1==0.4.5 23 | pyasn1-modules==0.2.4 24 | pytz==2018.9 25 | requests==2.25.1 26 | rsa==4.7 27 | scipy==1.9.0 28 | six==1.12.0 29 | urllib3==1.26.5 30 | PyYAML==6.0 31 | cython==0.29.24 32 | -------------------------------------------------------------------------------- /utils/BUILD: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Charmve/iQuant/b41f2d0066fdb1be8b5435e1dbe904fedd919d30/utils/BUILD -------------------------------------------------------------------------------- /utils/larkbot.py: -------------------------------------------------------------------------------- 1 | # !/usr/bin/env python3 2 | # coding:utf-8 3 | 4 | # larkbot.py 5 | 6 | import base64 7 | import hashlib 8 | import hmac 9 | from datetime import datetime 10 | 11 | import requests 12 | 13 | WEBHOOK_URL="https://open.feishu.cn/open-apis/bot/v2/hook/efe3d146-84c4-4d84-aba9-3c4d1d9e4f1a" 14 | 15 | # 发送更加个性化的消息 16 | # https://open.feishu.cn/document/ukTMukTMukTM/ucTM5YjL3ETO24yNxkjN#383d6e48 17 | content_json = { 18 | "msg_type": "interactive", 19 | "timestamp": "timestamp", 20 | "sign": "sign", 21 | "card": { 22 | "elements": [{ 23 | "tag": "div", 24 | "text": { 25 | "content": "**西湖**,位于浙江省杭州市西湖区龙井路1号,杭州市区西部,景区总面积49平方千米,汇水面积为21.22平方千米,湖面面积为6.38平方千米。", 26 | "tag": "lark_md" 27 | } 28 | }, { 29 | "actions": [{ 30 | "tag": "button", 31 | "text": { 32 | "content": "更多景点介绍 :玫瑰:", 33 | "tag": "lark_md" 34 | }, 35 | "url": "https://www.example.com", 36 | "type": "default", 37 | "value": {} 38 | }], 39 | "tag": "action" 40 | }], 41 | "header": { 42 | "title": { 43 | "content": "今日旅游推荐", 44 | "tag": "plain_text" 45 | } 46 | } 47 | } 48 | } 49 | 50 | class LarkBot: 51 | def __init__(self, secret: str) -> None: 52 | if not secret: 53 | raise ValueError("invalid secret key") 54 | self.secret = secret 55 | 56 | def gen_sign(self, timestamp: int) -> str: 57 | string_to_sign = '{}\n{}'.format(timestamp, self.secret) 58 | hmac_code = hmac.new( 59 | string_to_sign.encode("utf-8"), digestmod=hashlib.sha256 60 | ).digest() 61 | sign = base64.b64encode(hmac_code).decode('utf-8') 62 | 63 | return sign 64 | 65 | def send(self, content: str) -> None: 66 | timestamp = int(datetime.now().timestamp()) 67 | sign = self.gen_sign(timestamp) 68 | 69 | params = { 70 | "timestamp": timestamp, 71 | "sign": sign, 72 | "msg_type": "text", 73 | "content": {"text": content}, 74 | } 75 | resp = requests.post(url=WEBHOOK_URL, json=params) 76 | resp.raise_for_status() 77 | result = resp.json() 78 | if result.get("code") and result["code"] != 0: 79 | print(result["msg"]) 80 | return 81 | print("消息发送成功") 82 | 83 | def main(): 84 | WEBHOOK_SECRET = "wNMVU3ewSm2F0G2TwTX4Fd" 85 | bot = LarkBot(secret=WEBHOOK_SECRET) 86 | bot.send(content="[测试] 我是一只高级鸽子!") 87 | 88 | if __name__ == '__main__': 89 | main() 90 | -------------------------------------------------------------------------------- /utils/send_email.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # -*- coding: UTF-8 -*- 3 | 4 | # **************************************************************************** 5 | # Description: Send email unit test 6 | # How to use: python -m unittest -v test_send_semail.TestSendEmail 7 | # 8 | # Copyright 2022 Charmve. All Rights Reserved. 9 | # Licensed under the MIT License. 10 | # **************************************************************************** 11 | 12 | import os 13 | import smtplib 14 | import datetime 15 | from email.mime.image import MIMEImage 16 | from email.mime.multipart import MIMEMultipart 17 | from email.mime.text import MIMEText 18 | from pathlib import Path 19 | 20 | TOP_DIR = Path(__file__).parent.parent.joinpath("stock") 21 | 22 | # 发件人邮箱 23 | mail_sender = "1144262839@qq.com" 24 | # 邮箱授权码,注意这里不是邮箱密码,如何获取邮箱授权码,请看本文最后教程 25 | # mail_license = os.getenv("MAIL_LICENSE") 26 | mail_license = "uptgfoagathxbabb" 27 | # 收件人邮箱,可以为多个收件人 28 | mail_receivers = ["yidazhang1@gmail.com", "zhangwei@qcraft.ai"] 29 | # 邮件主题 30 | subject = """Python邮件测试""" 31 | 32 | # 邮件正文内容 33 | body_content = """你好,这是一个测试邮件!""" 34 | # 构造文本,参数1:正文内容,参数2:文本格式,参数3:编码方式 35 | message_text = MIMEText(body_content, "plain", "utf-8") 36 | 37 | # 构造附件 38 | attachment = MIMEText( 39 | open(TOP_DIR.joinpath("quantstats_report.html"), "rb").read(), "base64", "utf-8" 40 | ) 41 | # 设置附件信息 42 | attachment["Content-Disposition"] = 'attachment; filename="bkt_result.html"' 43 | 44 | # 发送 html 格式的邮件 45 | now_time = datetime.datetime.now() 46 | year = now_time.year 47 | month = now_time.month 48 | day = now_time.day 49 | mytime = str(year) + " 年 " + str(month) + " 月 " + str(day) + " 日 " 50 | fayanren = "爱因斯坦" 51 | zhuchiren = "牛顿" 52 | 53 | # 构造HTML 54 | html_content = """ 55 | 56 | 57 |

这个是标题,xxxx通知

58 |

您好:

59 |

以下内容是本次会议的纪要,请查收!

60 |

发言人:{fayanren}

61 |

主持人:{zhuchiren}

62 |

{mytime}

63 | 64 | 65 | """.format( 66 | fayanren=fayanren, zhuchiren=zhuchiren, mytime=mytime 67 | ) 68 | message_html = MIMEText(html_content, "html", "utf-8") 69 | 70 | 71 | def send_email(email_subject, mail_receivers, content): 72 | 73 | # 创建SMTP对象 74 | if "qq" in mail_sender: 75 | server = smtplib.SMTP_SSL("smtp.qq.com", 465) 76 | elif "gmail" in mail_sender: 77 | server = smtplib.SMTP("smtp.gmail.com", 587) # Connect to the server 78 | server.starttls() 79 | elif "163" in mail_sender: 80 | server = smtplib.SMTP() 81 | # 设置发件人邮箱的域名和端口,端口地址为25 82 | server.connect("smtp.163.com", 25) 83 | else: 84 | print("Please check your sender email.") 85 | 86 | # set_debuglevel(1)可以打印出和SMTP服务器交互的所有信息 87 | # server.set_debuglevel(1) 88 | 89 | # Connect and login to the email server 90 | server.login(mail_sender, mail_license) 91 | 92 | # Loop over each email to send to 93 | for mail_receiver in mail_receivers: 94 | # Setup MIMEMultipart for each email address (if we don't do this, the emails will concatenate on each email sent) 95 | msg = MIMEMultipart() 96 | msg["From"] = mail_sender 97 | msg["To"] = mail_receiver 98 | msg["Subject"] = email_subject 99 | 100 | print("Send to: ", mail_receiver) 101 | 102 | # # Attach the message to the MIMEMultipart object 103 | # msg.attach(message_text) 104 | # msg.attach(message_image) 105 | # # Attach the attachment file 106 | # msg.attach(attachment) 107 | # msg.attach(content) 108 | msg.attach(MIMEText(content)) 109 | 110 | # Send the email to this specific email address 111 | server.sendmail(mail_sender, mail_receiver, msg.as_string()) 112 | print("邮件发送成功! 主题: {}".format(email_subject)) 113 | return True 114 | 115 | # Quit the email server when everything is done 116 | server.quit() 117 | 118 | 119 | if __name__ == "__main__": 120 | send_email(mail_sender, mail_receivers, message_text) 121 | # send_email(mail_sender, mail_receivers, message_image) 122 | # send_email(mail_sender, mail_receivers, attachment) 123 | # send_email(mail_sender, mail_receivers, message_html) 124 | -------------------------------------------------------------------------------- /utils/wxbot.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # -*- coding: UTF-8 -*- 3 | 4 | # 导入模块 5 | from wxpy import * 6 | 7 | # 初始化机器人,扫码登陆 8 | bot = Bot() 9 | 10 | # 搜索名称含有 "游否" 的男性深圳好友 11 | my_friend = bot.friends().search('Chamrve', sex=MALE, city="苏州")[0] 12 | 13 | 14 | image_file=""/Users/charmve/Qbot/gui/imgs/UFund.png 15 | 16 | # 发送文本给好友 17 | my_friend.send('Hello WeChat!') 18 | # 发送图片 19 | my_friend.send_image(image_file) 20 | 21 | 22 | # 打印来自其他好友、群聊和公众号的消息 23 | @bot.register() 24 | def print_others(msg): 25 | print(msg) 26 | 27 | # 回复 my_friend 的消息 (优先匹配后注册的函数!) 28 | @bot.register(my_friend) 29 | def reply_my_friend(msg): 30 | return 'received: {} ({})'.format(msg.text, msg.type) 31 | 32 | # 自动接受新的好友请求 33 | @bot.register(msg_types=FRIENDS) 34 | def auto_accept_friends(msg): 35 | # 接受好友请求 36 | new_friend = msg.card.accept() 37 | # 向新的好友发送消息 38 | new_friend.send('你好👋,我是迈微AI研习社助理,其实也是Charmve本人 哈哈哈😂') 39 | 40 | # 进入 Python 命令行、让程序保持运行 41 | embed() 42 | 43 | # 或者仅仅堵塞线程 44 | # bot.join() 45 | --------------------------------------------------------------------------------