├── .gitignore ├── README.md ├── analysers ├── __init__.py └── performance.py ├── backtests ├── buynhold │ └── buynhold_cfg.py ├── macross │ └── macross_cfg.py └── mrpairs │ └── mrpairs_cfg.py ├── core ├── __init__.py ├── backtest.py ├── baseclasses.py ├── datahandler.py ├── parser.py └── stack.py ├── portfolios ├── __init__.py ├── equalweights.py └── hedgeratioweights.py ├── research └── meanreversion.py ├── setup.sh ├── strategies ├── __init__.py ├── buynhold.py ├── macross.py └── mrpairs.py └── utils ├── __init__.py ├── plotting.py └── timeseries.py /.gitignore: -------------------------------------------------------------------------------- 1 | *.pyc 2 | *.swp 3 | output/ 4 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # QuantCode 2 | 3 | Quant research and backtesting system 4 | 5 | ## Installation 6 | 7 | Requires pandas, numpy, and matplotlib 8 | 9 | Source setup in every new session 10 | ```shell 11 | source setup.sh 12 | ``` 13 | 14 | ## Run a backtest 15 | 16 | 1. Define backtest in config file, e.g. backtests/buynhold/buynhold_cfg.py 17 | 18 | 2. Choose symbols, date start and end, trading frequency, price to trade on (open or close) 19 | 20 | 3. Define a Strategy, Portfolio, and Analyser to backtest on and run with: 21 | ```shell 22 | python buynhold_cfg.py 23 | ``` 24 | 25 | ## Create your own backtest modules 26 | 27 | - Strategy class generates signals 28 | 29 | +1 long, -1 short, 0 cash 30 | 31 | - Portfolio class generates positions and compute returns 32 | 33 | e.g. define in dollar amount the fractions (weights) of total capital invested in each asset 34 | 35 | - Analyser class analyses the performnance of the backtest 36 | 37 | e.g. equity curve, Sharpe ratio, etc. 38 | 39 | -------------------------------------------------------------------------------- /analysers/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Xtian9/QuantCode/97f98d2f502f4583d038e264b79abbec5e3cf3ad/analysers/__init__.py -------------------------------------------------------------------------------- /analysers/performance.py: -------------------------------------------------------------------------------- 1 | from core.stack import * 2 | from core.baseclasses import Analyser 3 | from collections import OrderedDict as odict 4 | from utils import timeseries, plotting 5 | import matplotlib.gridspec as gridspec 6 | import os 7 | 8 | def format_string(value, fmt, pct=False): 9 | return '{:{}}'.format(value*(100 if pct else 1), fmt) + '%'*pct 10 | 11 | 12 | class PerformanceAnalyser(Analyser): 13 | """ 14 | Compute performance measures like Sharpe ratio, 15 | drawdown, etc. and make performance plots like 16 | equity curve, etc. 17 | """ 18 | def __init__(self, **kwargs): 19 | self.__dict__.update(kwargs) 20 | self.rfrate = 0.04 21 | self.results = odict() 22 | 23 | def begin(self): 24 | pass 25 | 26 | def generate_analysis(self): 27 | # Calculations 28 | self.portfolio_returns() 29 | self.benchmark_returns() 30 | 31 | # Measures 32 | self.alpha_beta() 33 | self.sharpe_ratio() 34 | self.information_ratio() 35 | self.drawdown() 36 | self.log_results() 37 | 38 | # Plots 39 | self.create_tearsheet() 40 | 41 | #________________________________________________________________________|| 42 | # Input/Calculations 43 | 44 | def portfolio_returns(self): 45 | self.cumreturns = timeseries.cumulate_returns(self.returns) 46 | 47 | self.annual_return = timeseries.annualised_return( 48 | self.returns, self.frequency) 49 | self.results['APR'] = format_string(self.annual_return, 50 | '.2f', True) 51 | 52 | self.annual_std = timeseries.annualised_volatility( 53 | self.returns, self.frequency) 54 | self.results['Volatility'] = format_string(self.annual_std, 55 | '.2f', True) 56 | 57 | self.total_return = timeseries.total_return(self.returns) 58 | self.results['Total return'] = format_string(self.total_return, 59 | '.2f', True) 60 | 61 | def benchmark_returns(self): 62 | self.returns_bm = self.prices_bm.pct_change().iloc[:,0] 63 | self.cumreturns_bm = timeseries.cumulate_returns(self.returns_bm) 64 | 65 | self.results['Total return bmark'] = format_string( 66 | self.cumreturns_bm[-1], 67 | '.2f', True) 68 | 69 | #________________________________________________________________________|| 70 | # Measures 71 | 72 | def alpha_beta(self): 73 | self.alpha, self.beta = timeseries.alpha_beta( 74 | self.returns, self.returns_bm) 75 | self.results['Alpha'] = format_string(self.alpha, '.2f') 76 | self.results['Beta'] = format_string(self.beta, '.2f') 77 | 78 | def sharpe_ratio(self): 79 | self.sharpe = timeseries.sharpe_ratio( 80 | self.returns, self.frequency, self.rfrate) 81 | self.results['Sharpe ratio'] = format_string(self.sharpe, '.2f') 82 | 83 | def information_ratio(self): 84 | self.information_ratio = timeseries.information_ratio(self.returns, 85 | self.returns_bm, self.frequency) 86 | self.results['Information ratio'] = format_string( 87 | self.information_ratio, '.2f') 88 | 89 | def drawdown(self): 90 | self.max_dd = timeseries.max_drawdown(self.cumreturns) 91 | self.max_ddd = timeseries.max_drawdown_duration(self.cumreturns) 92 | self.results['Max DD'] = format_string(self.max_dd, '.2f', True) 93 | self.results['Max DD duration'] = format_string(self.max_ddd, '.0f') 94 | 95 | #________________________________________________________________________|| 96 | # Plots 97 | 98 | def create_tearsheet(self): 99 | vertical_sections = 7 100 | fig = plt.figure(figsize=(14, 5 * vertical_sections), facecolor='w') 101 | gs = gridspec.GridSpec(vertical_sections, 3, wspace=0.5, hspace=0.5) 102 | 103 | # Equity curve 104 | ax_ref = plt.subplot(gs[:2, :]) 105 | plotting.plot_equity_curve( 106 | self.cumreturns, self.cumreturns_bm, ax=ax_ref) 107 | 108 | # Rolling Sharpe 109 | i = 2 110 | ax = plt.subplot(gs[i, :], sharex=ax_ref) 111 | plotting.plot_rolling_sharpe( 112 | self.returns, self.frequency, self.rfrate, window=6, ax=ax) 113 | 114 | # Rolling drawdown 115 | i += 1 116 | ax = plt.subplot(gs[i, :], sharex=ax_ref) 117 | plotting.plot_drawdown(self.cumreturns, ax=ax) 118 | 119 | # Top drawdowns by magnitude 120 | i += 1 121 | ax = plt.subplot(gs[i, :], sharex=ax_ref) 122 | plotting.plot_top_drawdowns( 123 | self.cumreturns, ntop=5, ddtype='magnitude', ax=ax) 124 | 125 | # Top drawdowns by duration 126 | i += 1 127 | ax = plt.subplot(gs[i, :], sharex=ax_ref) 128 | plotting.plot_top_drawdowns( 129 | self.cumreturns, ntop=5, ddtype='duration', ax=ax) 130 | 131 | i += 1 132 | 133 | # Returns distribution 134 | ax = plt.subplot(gs[i, 1]) 135 | plotting.plot_returns_distr(self.returns, 'monthly', ax=ax) 136 | 137 | for ax in fig.axes: 138 | plt.setp(ax.get_xticklabels(), visible=True) 139 | 140 | self.save_fig(fig, 'tearsheet', ['.png', '.pdf'], bbox_inches='tight') 141 | 142 | #________________________________________________________________________|| 143 | # Output 144 | 145 | def log_results(self): 146 | fout = open(os.path.join(self.outdir, 'log.txt'), 'w') 147 | fout.write("Start date: {}\n".format(self.date_start)) 148 | fout.write("End date: {}\n\n".format(self.date_end)) 149 | fout.write("Symbols: {}\n\n".format(self.symbols)) 150 | print "\n\nPerformance:" 151 | for metric, value in self.results.iteritems(): 152 | s = '{:20} {}'.format(metric, value) 153 | print s 154 | fout.write(s+"\n") 155 | 156 | def save_fig(self, fig, name, exts=['.png'], **kwargs): 157 | if not isinstance(exts, list): 158 | exts = [exts] 159 | for ext in exts: 160 | fig.savefig(os.path.join(self.outdir, name+ext), **kwargs) 161 | 162 | -------------------------------------------------------------------------------- /backtests/buynhold/buynhold_cfg.py: -------------------------------------------------------------------------------- 1 | from core.backtest import Backtest 2 | from strategies.buynhold import BuyAndHoldStrategy 3 | from portfolios.equalweights import EqualWeightsPortfolio 4 | from analysers.performance import PerformanceAnalyser 5 | 6 | from core.parser import parser 7 | 8 | 9 | options = parser.parse_args() 10 | 11 | #____________________________________________________________________________|| 12 | 13 | symbols = ['SPY','DIA'][:] 14 | qcodes = ['GOOG/NYSE_'+s for s in symbols] 15 | date_start, date_end = "2010-01-01", "2015-12-31" 16 | frequency = "daily" 17 | datas = ['Close'] 18 | trade_time = ['Close','Open'][0] 19 | 20 | #____________________________________________________________________________|| 21 | 22 | strategy = BuyAndHoldStrategy()#mykwargs 23 | 24 | portfolio = EqualWeightsPortfolio() 25 | 26 | analyser = PerformanceAnalyser() 27 | 28 | backtest = Backtest(strategy = strategy, 29 | portfolio = portfolio, 30 | analyser = analyser, 31 | symbols = symbols, 32 | qcodes = qcodes, 33 | date_start = date_start, 34 | date_end = date_end, 35 | frequency = frequency, 36 | datas = datas, 37 | trade_time = trade_time, 38 | options = options) 39 | 40 | backtest.run() 41 | 42 | -------------------------------------------------------------------------------- /backtests/macross/macross_cfg.py: -------------------------------------------------------------------------------- 1 | from core.backtest import Backtest 2 | from strategies.macross import MovingAverageCrossoverStrategy 3 | from portfolios.equalweights import EqualWeightsPortfolio 4 | from analysers.performance import PerformanceAnalyser 5 | 6 | from core.parser import parser 7 | 8 | 9 | options = parser.parse_args() 10 | 11 | #____________________________________________________________________________|| 12 | 13 | symbols = ['AAPL'] 14 | qcodes = ['GOOG/NASDAQ_'+s for s in symbols] 15 | date_start, date_end = "2010-01-01", "2015-12-31" 16 | frequency = "daily" 17 | datas = ['Close'] 18 | trade_time = ['Close','Open'][0] 19 | 20 | short_window = 9 21 | long_window = 200 22 | 23 | #____________________________________________________________________________|| 24 | 25 | strategy = MovingAverageCrossoverStrategy(short_window,long_window) 26 | 27 | portfolio = EqualWeightsPortfolio() 28 | 29 | analyser = [PerformanceAnalyser()]#,MovingAverageCrossoverAnalyser()] 30 | 31 | backtest = Backtest(strategy = strategy, 32 | portfolio = portfolio, 33 | analyser = analyser, 34 | symbols = symbols, 35 | qcodes = qcodes, 36 | date_start = date_start, 37 | date_end = date_end, 38 | frequency = frequency, 39 | datas = datas, 40 | trade_time = trade_time, 41 | options = options) 42 | 43 | backtest.run() 44 | 45 | -------------------------------------------------------------------------------- /backtests/mrpairs/mrpairs_cfg.py: -------------------------------------------------------------------------------- 1 | from core.backtest import Backtest 2 | from strategies.mrpairs import MeanReversionPairsStrategy 3 | from portfolios.hedgeratioweights import HedgeRatioWeightsPortfolio 4 | from analysers.performance import PerformanceAnalyser 5 | from core.parser import parser 6 | 7 | parser.add_argument('-w', '--window', action='store', dest='window', default=None, help='lookback window') 8 | parser.add_argument('--zentry', action='store', dest='zentry', default=None, help='z entry threshold') 9 | parser.add_argument('--zexit', action='store', dest='zexit', default=None, help='z exit threshold') 10 | options = parser.parse_args() 11 | 12 | #____________________________________________________________________________|| 13 | 14 | symbols = ['GLD','GDX'][::-1] # x,y 15 | #symbols = ['SPY','IWM'] 16 | qcodes = ['GOOG/NYSEARCA_'+s for s in symbols] 17 | date_start = "2006-05-23" 18 | date_end = "2007-11-30" 19 | #date_end = "2007-05-23" 20 | frequency = "daily" 21 | datas = ['Close'] 22 | trade_time = 'Close' 23 | 24 | window = -1 25 | zentry, zexit = 1, 0.5 26 | 27 | if options.window is not None: window = options.window 28 | if options.zentry is not None: zentry = options.zentry 29 | if options.zexit is not None: zexit = options.zexit 30 | 31 | #____________________________________________________________________________|| 32 | 33 | strategy = MeanReversionPairsStrategy(window, zentry, zexit) 34 | 35 | portfolio = HedgeRatioWeightsPortfolio() 36 | 37 | analyser = PerformanceAnalyser() 38 | 39 | backtest = Backtest(strategy = strategy, 40 | portfolio = portfolio, 41 | analyser = analyser, 42 | symbols = symbols, 43 | qcodes = qcodes, 44 | date_start = date_start, 45 | date_end = date_end, 46 | frequency = frequency, 47 | datas = datas, 48 | trade_time = trade_time, 49 | options = options) 50 | 51 | backtest.run() 52 | 53 | -------------------------------------------------------------------------------- /core/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Xtian9/QuantCode/97f98d2f502f4583d038e264b79abbec5e3cf3ad/core/__init__.py -------------------------------------------------------------------------------- /core/backtest.py: -------------------------------------------------------------------------------- 1 | from datahandler import DataHandler 2 | import os, time 3 | 4 | class Backtest(object): 5 | 6 | def __init__(self, strategy, portfolio, analyser, **kwargs): 7 | self.strategy = strategy 8 | self.portfolio = portfolio 9 | self.analyser = [analyser] if type(analyser) != list else analyser 10 | self.backtest_modules = [self, self.strategy, self.portfolio] 11 | self.backtest_modules.extend(self.analyser) 12 | 13 | self.symbols = None 14 | self.qcodes = None 15 | self.date_start = None 16 | self.date_end = None 17 | self.frequency = None 18 | self.datas = None 19 | self.trade_time = None 20 | self.benchmark = None 21 | self.benchmark_qcode = None 22 | 23 | for module in self.backtest_modules: 24 | module.__dict__.update(kwargs) 25 | #self.__dict__.update(kwargs) 26 | 27 | self.validate_input() 28 | self.create_outdir() 29 | 30 | self.data_handler = DataHandler(self.symbols, self.qcodes, self.date_start, self.date_end, self.frequency, self.datas) 31 | self.benchmark_handler = DataHandler([self.benchmark], [self.benchmark_qcode], self.date_start, self.date_end, self.frequency, self.datas) 32 | 33 | def validate_input(self): 34 | if self.symbols is None: 35 | raise ValueError, "Need to choose symbols to trade" 36 | 37 | if self.benchmark is None: 38 | print "No benchmark specified. Default is SPY" 39 | self.benchmark = 'SPY' 40 | self.benchmark_qcode = 'GOOG/NYSE_SPY' 41 | 42 | def create_outdir(self): 43 | outdir = self.options.outdir 44 | 45 | if self.options.save: 46 | date = time.strftime("%Y_%m_%d") 47 | outdirbase = os.path.join(self.options.outdir, date) 48 | revision = 1 49 | while os.path.exists(outdir): 50 | outdir = outdirbase + "_" + str(revision) 51 | revision += 1 52 | 53 | if not os.path.exists(outdir): 54 | os.makedirs(outdir) 55 | 56 | for module in self.backtest_modules: 57 | module.outdir = outdir 58 | 59 | def run(self): 60 | print "\n\nHandling data" 61 | datas_symbols = self.data_handler.generate_data() 62 | datas_benchmark = self.benchmark_handler.generate_data() 63 | for module in self.backtest_modules: 64 | module.datas_symbols = datas_symbols 65 | module.datas_benchmark = datas_benchmark 66 | module.prices = datas_symbols[self.trade_time] 67 | module.prices_bm = datas_benchmark[self.trade_time] 68 | 69 | print "\n\nGenerating signals" 70 | self.strategy.begin() 71 | self.strategy.generate_signals() 72 | for module in self.backtest_modules: 73 | module.__dict__.update(self.strategy.__dict__) 74 | 75 | print "\n\nBacktesting portfolio" 76 | self.portfolio.begin() 77 | self.portfolio.generate_returns() 78 | for module in self.backtest_modules: 79 | module.__dict__.update(self.portfolio.__dict__) 80 | 81 | print "\n\nAnalysing results" 82 | for analyser in self.analyser: 83 | analyser.begin() 84 | analyser.generate_analysis() 85 | -------------------------------------------------------------------------------- /core/baseclasses.py: -------------------------------------------------------------------------------- 1 | from abc import ABCMeta, abstractmethod 2 | import pandas as pd 3 | 4 | class Strategy(): 5 | """ 6 | Base class for strategies. 7 | Need to implement generate_signals in derived classes. 8 | """ 9 | __metaclass__ = ABCMeta 10 | 11 | def __init__(self, **kwargs): 12 | pass 13 | 14 | def begin(self): 15 | pass 16 | 17 | @abstractmethod 18 | def generate_signals(self): 19 | """ 20 | Initialise signals to empty dataframe. 21 | Needs to filled in derived class. 22 | """ 23 | self.signals = pd.DataFrame(index=self.prices.index, columns=self.prices.columns) 24 | 25 | 26 | class Portfolio(): 27 | """ 28 | Base class for portfolios. 29 | Need to implement generate_positions in derived classes. 30 | """ 31 | __metaclass__ = ABCMeta 32 | 33 | def __init__(self, **kwargs): 34 | pass 35 | 36 | def begin(self): 37 | self._carry_forward_signals() 38 | self.generate_positions() 39 | 40 | def _carry_forward_signals(self): 41 | self.signals.fillna(method='ffill', inplace=True) 42 | 43 | def generate_returns(self): 44 | """ 45 | Default return calculation 46 | = weighted average of asset returns 47 | """ 48 | self.asset_returns = self.prices.pct_change() 49 | self.returns = (self.weights.shift(1) * self.asset_returns * self.signals.shift(1)).sum(1) 50 | 51 | @abstractmethod 52 | def generate_positions(self): 53 | """ 54 | Initialise positions to empty dataframe. 55 | Needs to be filled in derived class 56 | """ 57 | self.weights = pd.DataFrame(index=self.signals.index, columns=self.signals.columns) 58 | 59 | 60 | class Analyser(): 61 | """ 62 | Base class for analysers 63 | Need to implement generate_analysis in derived classes 64 | """ 65 | def __init__(self): 66 | pass 67 | 68 | def begin(self): 69 | pass 70 | 71 | @abstractmethod 72 | def generate_analysis(self): 73 | pass 74 | 75 | -------------------------------------------------------------------------------- /core/datahandler.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | import Quandl 3 | 4 | AUTHTOKEN = 'HpLYEUhzrjV84uXkT9wu' 5 | 6 | class DataHandler(object): 7 | 8 | def __init__(self, symbols, qcodes, date_start, date_end, collapse, datas): ##sampling_rate 9 | self.symbols = symbols 10 | self.qcodes = qcodes 11 | self.date_start = date_start 12 | self.date_end = date_end 13 | self.collapse = collapse 14 | self.datas = datas 15 | 16 | def fetch_data(self): 17 | self.symbols_datas = {} 18 | for symbol, qcode in zip(self.symbols, self.qcodes): 19 | datas = Quandl.get(qcode, 20 | trim_start = self.date_start, 21 | trim_end = self.date_end, 22 | collapse = self.collapse, 23 | authtoken = AUTHTOKEN) 24 | self.symbols_datas[symbol] = datas 25 | 26 | def generate_data(self): 27 | self.fetch_data() 28 | self.datas_symbols = {} 29 | for data in self.datas: 30 | df = pd.DataFrame() 31 | for symbol in self.symbols: 32 | df_tojoin = self.symbols_datas[symbol][data].to_frame() 33 | df_tojoin.rename(columns={data:symbol}, inplace=True) 34 | if df.empty: 35 | df = df_tojoin 36 | else: 37 | df = df.join(df_tojoin, how='outer') 38 | self.datas_symbols[data] = df 39 | return self.datas_symbols 40 | 41 | 42 | def resample(self): 43 | pass 44 | 45 | 46 | -------------------------------------------------------------------------------- /core/parser.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | 3 | parser = argparse.ArgumentParser(description='General arguments for running strategy backtests') 4 | 5 | parser.add_argument('-o', '--outdir' , action='store' , dest='outdir' , default='output', help='output directory') 6 | parser.add_argument('-s', '--save' , action='store_true', dest='save' , help='save backtest results') 7 | parser.add_argument('-f', '--full' , action='store_true', dest='full' , help='save full backtest results') 8 | parser.add_argument('-v', '--verbose', action='store_true', dest='verbose', help='verbose output') 9 | parser.add_argument('-d', '--debug' , action='store_true', dest='debug' , help='debugging output/plots') 10 | 11 | -------------------------------------------------------------------------------- /core/stack.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | import numpy as np 3 | import matplotlib.pyplot as plt 4 | -------------------------------------------------------------------------------- /portfolios/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Xtian9/QuantCode/97f98d2f502f4583d038e264b79abbec5e3cf3ad/portfolios/__init__.py -------------------------------------------------------------------------------- /portfolios/equalweights.py: -------------------------------------------------------------------------------- 1 | from core.stack import * 2 | from core.baseclasses import Portfolio 3 | 4 | class EqualWeightsPortfolio(Portfolio): 5 | 6 | def __init__(self): 7 | #self.__dict__.update(kwargs) 8 | super(EqualWeightsPortfolio, self).__init__() 9 | 10 | def generate_positions(self): 11 | super(EqualWeightsPortfolio, self).generate_positions() 12 | nassets = len(self.weights.columns) 13 | #FIXME: should only assign weights to assets which have signals 14 | self.weights.loc[:,:] = 1. / nassets 15 | 16 | -------------------------------------------------------------------------------- /portfolios/hedgeratioweights.py: -------------------------------------------------------------------------------- 1 | from core.stack import * 2 | from core.baseclasses import Portfolio 3 | 4 | class HedgeRatioWeightsPortfolio(Portfolio): 5 | 6 | def __init__(self): 7 | super(HedgeRatioWeightsPortfolio, self).__init__() 8 | 9 | def generate_positions(self): 10 | super(HedgeRatioWeightsPortfolio, self).generate_positions() 11 | 12 | # note absolute number of shares cancels out 13 | total_invested = self.beta * self.x_prices + self.y_prices 14 | 15 | weight_x = self.x_prices * self.beta / total_invested 16 | weight_y = self.y_prices / total_invested 17 | 18 | self.weights.iloc[:, 0] = weight_x 19 | self.weights.iloc[:, 1] = weight_y 20 | 21 | -------------------------------------------------------------------------------- /research/meanreversion.py: -------------------------------------------------------------------------------- 1 | from core.stack import * 2 | from datahandler import DataHandler 3 | from utils import plotting 4 | import statsmodels.tsa.stattools as ts 5 | #import statsmodels.formula.api as sm 6 | import math 7 | 8 | 9 | ## __________________________________________________________________________|| 10 | ## Config 11 | 12 | #symbols = ['GOOG'] 13 | #qcodes = ['GOOG/NASDAQ_GOOG'] 14 | #date_start = "2000-01-01" 15 | #date_end = "2013-01-01" 16 | 17 | symbols = ['AREX','WLL'] 18 | qcodes = ['GOOG/NASDAQ_AREX','GOOG/NYSE_WLL'] 19 | date_start = "2012-01-01" 20 | date_end = "2013-01-01" 21 | 22 | frequency = "daily" 23 | datas = ['Close'] 24 | trade_time = 'Close' 25 | 26 | #TEST = 'adf' 27 | 28 | 29 | ## __________________________________________________________________________|| 30 | ## Main 31 | 32 | def main(): 33 | 34 | data_handler = DataHandler(symbols, qcodes, date_start, date_end, 35 | frequency, datas) 36 | datas_symbols = data_handler.generate_data() 37 | 38 | if len(symbols) == 1: 39 | prices = datas_symbols[trade_time].iloc[:,0] 40 | else: 41 | prices = datas_symbols[trade_time] 42 | 43 | #plot_time_series(prices) 44 | 45 | #___ Single asset ___# 46 | #print adf(prices) 47 | #print hurst(prices, True) 48 | 49 | #___ Multiple assets ___# 50 | #plot_scatter(prices) 51 | plot_residuals(prices) 52 | #print cadf(prices) 53 | print halflife(residuals(prices)) 54 | 55 | 56 | ## __________________________________________________________________________|| 57 | ## Statistical tools 58 | 59 | def adf(prices, nlags=1): 60 | """ 61 | Augmented Dickey Fuller test 62 | """ 63 | adf_res = ts.adfuller(prices, nlags) 64 | 65 | ret = {} 66 | ret['Test statistic'] = adf_res[0] 67 | ret['p-value'] = adf_res[1] 68 | #ret['No. lags'] = adf_res[2] 69 | #ret['No. observations'] = adf_res[3] 70 | ret['Critical values'] = adf_res[4] 71 | 72 | return ret 73 | 74 | 75 | def cadf(prices): 76 | """ 77 | Cointegrating Augmented Dickey Fuller test 78 | """ 79 | return adf(residuals(prices)) 80 | 81 | 82 | def hurst(prices, plot=False): 83 | """ 84 | Hurst exponent test 85 | """ 86 | prices = np.log(prices) 87 | 88 | lags = range(1, 100) 89 | 90 | # Variance of differences for each lag tau 91 | variances = [((prices.shift(-lag) - prices)**2).mean() for lag in lags] 92 | 93 | loglags = np.log(lags) 94 | logvars = np.log(variances) 95 | 96 | # Fit a straight line 97 | b, a = np.polyfit(loglags, logvars, 1) 98 | 99 | if plot: 100 | fig, ax = plt.subplots() 101 | 102 | plt.scatter(loglags, logvars) 103 | plt.plot(loglags, a + b * loglags, lw=2, color='orange') 104 | 105 | plotting.style_default(ax, fig, 106 | xlabel='log(Lag)', ylabel='log(Variance)') 107 | plt.show() 108 | 109 | # Gradient is 2H 110 | hurst = b / 2 111 | 112 | return hurst 113 | 114 | 115 | def vratiotest(): 116 | """ 117 | Variance ratio test 118 | """ 119 | raise NotImplementedError, "See EPChan2 p.45" 120 | 121 | 122 | def halflife(prices): 123 | """ 124 | Half life of mean reversion 125 | using Ornstein-Uhlenbeck 126 | """ 127 | prices_lag1 = prices.shift(1) 128 | delta_prices = prices - prices_lag1 129 | 130 | # EPChan2 Eq. (2.5) 131 | lamda = pd.ols(x=prices_lag1, y=delta_prices).beta.x 132 | 133 | return -math.log(2) / lamda 134 | 135 | 136 | def ols(prices): 137 | """ 138 | Ordinary Least Squares 139 | """ 140 | return pd.ols(x=prices.iloc[:,0], y=prices.iloc[:,1]) 141 | 142 | 143 | def beta(prices): 144 | """ 145 | Slope of linear regression 146 | """ 147 | return ols(prices).beta.x 148 | 149 | 150 | def residuals(prices, beta=None): 151 | """ 152 | Residuals of linear regression 153 | residual = y - beta * x 154 | """ 155 | if beta is None: 156 | beta = globals()['beta'](prices) 157 | 158 | return prices.iloc[:,1] - beta * prices.iloc[:,0] 159 | 160 | 161 | ## __________________________________________________________________________|| 162 | ## Plotting tools 163 | 164 | def plot_time_series(prices, *names): 165 | fig, ax = plt.subplots() 166 | 167 | prices.plot(ax=ax) 168 | plotting.style_default(ax, fig, ylabel='Price') 169 | 170 | plt.show() 171 | 172 | 173 | def plot_scatter(prices): 174 | assert len(prices.columns) == 2 175 | 176 | xprices = prices.iloc[:, 0] 177 | yprices = prices.iloc[:, 1] 178 | 179 | fig, ax = plt.subplots() 180 | ax.scatter(xprices, yprices, label=None) 181 | 182 | b, a = ols(prices).beta 183 | #print sm.ols(formula='{} ~ {}'.format(*reversed(prices.columns)), 184 | # data=prices).fit().summary() 185 | 186 | xmin, xmax = xprices.min(), xprices.max() 187 | axrange = np.arange(xmin, xmax, 0.01 * (xmax - xmin)) 188 | 189 | ax.plot(axrange, a + b * axrange, 190 | lw=4, color='orange', 191 | label='y = {:.2f}x + {:.2f}'.format(b, a)) 192 | 193 | axlabel = 'Price of {}' 194 | plotting.style_default(ax, fig, 195 | xlabel=axlabel.format(prices.columns[0]), 196 | ylabel=axlabel.format(prices.columns[1])) 197 | 198 | plt.show() 199 | 200 | 201 | def plot_residuals(prices): 202 | fig, ax = plt.subplots() 203 | 204 | #residuals(prices).plot() 205 | ax.plot(residuals(prices)) 206 | 207 | plotting.style_default(ax, fig, ylabel='Residual') 208 | 209 | plt.show() 210 | 211 | 212 | ## __________________________________________________________________________|| 213 | if __name__ == "__main__": 214 | main() 215 | 216 | -------------------------------------------------------------------------------- /setup.sh: -------------------------------------------------------------------------------- 1 | export QUANTCODEDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) 2 | cd $QUANTCODEDIR 3 | 4 | export PYTHONPATH=${PYTHONPATH}:${PWD} 5 | export PYTHONPATH=${PYTHONPATH}:${PWD}/analysers 6 | export PYTHONPATH=${PYTHONPATH}:${PWD}/backtests 7 | export PYTHONPATH=${PYTHONPATH}:${PWD}/core 8 | export PYTHONPATH=${PYTHONPATH}:${PWD}/portfolios 9 | export PYTHONPATH=${PYTHONPATH}:${PWD}/strategies 10 | export PYTHONPATH=${PYTHONPATH}:${PWD}/utils 11 | 12 | #python splash.py 13 | echo "Welcome to QuantCode" 14 | 15 | cd - > /dev/null 16 | -------------------------------------------------------------------------------- /strategies/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Xtian9/QuantCode/97f98d2f502f4583d038e264b79abbec5e3cf3ad/strategies/__init__.py -------------------------------------------------------------------------------- /strategies/buynhold.py: -------------------------------------------------------------------------------- 1 | from core.stack import * 2 | from core.baseclasses import Strategy 3 | 4 | class BuyAndHoldStrategy(Strategy): 5 | 6 | def __init__(self, **kwargs):#, symbols, datas_symbols): 7 | #self.symbols = symbols 8 | #self.datas_symbols = datas_symbols 9 | ## make abovehis block super().__init__() 10 | #self.datas_symbols = None 11 | #self.cls = self.datas_symbols['Close'] 12 | self.__dict__.update(kwargs) 13 | 14 | def generate_signals(self): 15 | #self.cls = self.datas_symbols[self.trade_time] 16 | #signals = pd.DataFrame(index=self.cls.index, columns=self.cls.columns) 17 | signals = pd.DataFrame(index=self.prices.index, columns=self.prices.columns) 18 | signals.loc[:,:] = 1 19 | return signals 20 | 21 | -------------------------------------------------------------------------------- /strategies/macross.py: -------------------------------------------------------------------------------- 1 | from core.stack import * 2 | from core.baseclasses import Strategy 3 | 4 | class MovingAverageCrossoverStrategy(Strategy): 5 | 6 | def __init__(self, short_window=None, long_window=None): 7 | super(MovingAverageCrossoverStrategy, self).__init__() 8 | 9 | if short_window is None or long_window is None: 10 | raise ValueError, "Need to choose a MA window" 11 | self.short_window = short_window 12 | self.long_window = long_window 13 | 14 | def generate_signals(self): 15 | super(MovingAverageCrossoverStrategy, self).generate_signals() 16 | 17 | mavg_short = pd.rolling_mean(self.prices, self.short_window, min_periods=1) 18 | mavg_long = pd.rolling_mean(self.prices, self.long_window, min_periods=1) 19 | 20 | self.signals[mavg_short > mavg_long] = 1 21 | self.signals[mavg_long > mavg_short] = -1 22 | 23 | #signals = 1 * (mavg_short > mavg_long) - 1 * (mavg_long > mavg_short) 24 | #signals.iloc[:, :] = np.where(mavg_short > mavg_long, 1, -1) 25 | 26 | self.signals.iloc[:self.long_window-1, :] = np.nan 27 | 28 | -------------------------------------------------------------------------------- /strategies/mrpairs.py: -------------------------------------------------------------------------------- 1 | from core.stack import * 2 | from core.baseclasses import Strategy 3 | 4 | class MeanReversionPairsStrategy(Strategy): 5 | """ 6 | Pairs trading mean reversion strategy using Bollinger Bands. 7 | Compute hedge ratio on a rolling basis with lookback window. 8 | Go long/short the spread when it's below/above the entry threshold, 9 | exit position when spread is less than exit threshold. 10 | """ 11 | def __init__(self, window=None, zentry=None, zexit=None): 12 | super(MeanReversionPairsStrategy, self).__init__() 13 | 14 | if window is None: 15 | raise ValueError, "Need to choose a lookback window" 16 | if zentry is None or zexit is None: 17 | raise ValueError, "Need to choose an entry/exit z-score" 18 | if window == -1: 19 | print "\nWARNING: Performing regression over entire time frame", \ 20 | "- lookahead bias!\n" 21 | 22 | self.window = window 23 | self.zentry = zentry 24 | self.zexit = zexit 25 | 26 | print "\nRunning strategy with parameters:", \ 27 | "\n\tWindow:", self.window, \ 28 | "\n\tEntry z:", self.zentry, \ 29 | "\n\tExit z:", self.zexit, \ 30 | "\n" 31 | 32 | def begin(self): 33 | super(MeanReversionPairsStrategy, self).begin() 34 | 35 | if len(self.symbols) != 2: 36 | raise ValueError, "Can only handle two assets" 37 | 38 | def generate_signals(self): 39 | super(MeanReversionPairsStrategy, self).generate_signals() 40 | 41 | self.x_prices = self.prices.iloc[:, 0] 42 | self.y_prices = self.prices.iloc[:, 1] 43 | 44 | # Perform linear regression 45 | ols_arg = dict(x=self.x_prices, y=self.y_prices) 46 | if self.window != -1: ols_arg['window'] = self.window 47 | ols_res = pd.ols(**ols_arg) 48 | 49 | # Regression coefficients 50 | self.beta = ols_res.beta.x 51 | alpha = ols_res.beta.intercept 52 | 53 | # Residuals (absorb intercept) 54 | #spread = y_prices - beta * x_prices - alpha 55 | spread = self.y_prices - self.beta * self.x_prices 56 | 57 | # Mean and standard deviation of residuals 58 | if self.window == -1: 59 | spread_mean = spread.mean() 60 | spread_std = spread.std() 61 | else: 62 | spread_mean = pd.rolling_mean(spread, self.window) 63 | spread_std = pd.rolling_std (spread, self.window) 64 | 65 | # Deviation of residuals from mean 66 | z_score = (spread - spread_mean) / spread_std 67 | 68 | longs = z_score < -self.zentry 69 | shorts = z_score > self.zentry 70 | exits = abs(z_score) < self.zexit 71 | 72 | self.signals.loc[longs , :] = np.array( ([-1, 1],)*len(self.signals[longs]) ) 73 | self.signals.loc[shorts, :] = np.array( ([ 1,-1],)*len(self.signals[shorts]) ) 74 | self.signals.loc[exits , :] = np.array( ([ 0, 0],)*len(self.signals[exits]) ) 75 | 76 | if self.options.debug: 77 | #self.debug_output() 78 | plt.scatter(self.x_prices,self.y_prices); plt.show() 79 | if self.window != -1: 80 | self.beta.plot(); plt.show() 81 | spread.plot(); plt.show() 82 | z_score.plot(); plt.show() 83 | 84 | def debug_output(self): 85 | pass 86 | -------------------------------------------------------------------------------- /utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Xtian9/QuantCode/97f98d2f502f4583d038e264b79abbec5e3cf3ad/utils/__init__.py -------------------------------------------------------------------------------- /utils/plotting.py: -------------------------------------------------------------------------------- 1 | from core.stack import * 2 | from utils import timeseries 3 | 4 | def style_default(ax, fig=None, title='', xlabel='', ylabel='', 5 | legend=True): 6 | """ 7 | Default plotting style/stuff 8 | """ 9 | if fig: 10 | fig.patch.set_facecolor('white') 11 | 12 | ax.set_title(title) 13 | ax.set_xlabel(xlabel) 14 | ax.set_ylabel(ylabel) 15 | 16 | if legend: 17 | ax.legend(loc='best', frameon=False) 18 | 19 | 20 | def plot_equity_curve(cumrets, cumrets_bm, ax=None): 21 | """ 22 | Plot equity curve 23 | cumrets = series of strategy cumulative returns 24 | cumrets_bm = series of benchmark cumulative returns 25 | """ 26 | if ax is None: 27 | ax = plt.gca() 28 | 29 | ax.plot(cumrets, label='Equity') 30 | ax.plot(cumrets_bm, label='Benchmark') 31 | 32 | ax.axhline(0, linestyle='--', color='black', lw=2) 33 | 34 | style_default(ax, title='Equity curve', 35 | ylabel='Portfolio value growth (%)') 36 | 37 | 38 | def plot_rolling_sharpe(returns, nperiods, rfrate, window, ax=None): 39 | """ 40 | Plot rolling Sharpe ratio 41 | returns = series of returns 42 | nperiods = number of returns in a year 43 | rfrate = risk free rate 44 | window = number of months in sliding window 45 | """ 46 | if ax is None: 47 | ax = plt.gca() 48 | 49 | overall_sharpe = timeseries.sharpe_ratio(returns, nperiods, rfrate) 50 | rolling_sharpe = timeseries.rolling_sharpe(returns, nperiods, 51 | rfrate, 21*window) 52 | 53 | ax.plot(rolling_sharpe, color='orangered', lw=2, 54 | label='Rolling Sharpe') 55 | 56 | ax.axhline(overall_sharpe, 57 | color='steelblue', linestyle='--', lw=3, 58 | label='Overall Sharpe') 59 | 60 | ax.axhline(0, color='black', linestyle='-', lw=2) 61 | 62 | style_default(ax, 63 | title='Rolling Sharpe ratio ({} months)'.format(window), 64 | ylabel='Sharpe ratio') 65 | 66 | 67 | def plot_drawdown(cumrets, ax=None): 68 | """ 69 | Plot drawdown vs time 70 | cumrets = series of cumulative returns 71 | """ 72 | if ax is None: 73 | ax = plt.gca() 74 | 75 | dd = -1 * timeseries.rolling_drawdown(cumrets) 76 | 77 | dd.plot(ax=ax, kind='area', color='coral', alpha=.7) 78 | 79 | style_default(ax, title='Drawdown', 80 | ylabel='Drawdown (%)', legend=False) 81 | 82 | 83 | def plot_top_drawdowns(cumrets, ntop, ddtype, ax=None): 84 | """ 85 | Plot top drawdowns by magntidue and duration 86 | cumrets = series of cumulative returns 87 | ntop = # top drawdowns to plot 88 | ret = dictionary of (fig, ax) for 89 | magnitude and duration plots 90 | """ 91 | if ax is None: 92 | ax = plt.gca() 93 | 94 | colors = [plt.get_cmap('rainbow')(i) for i in np.linspace(0, 1, ntop)] 95 | 96 | dd_info, ddd_info = timeseries.sort_drawdowns(cumrets) 97 | 98 | if ddtype == 'magnitude': 99 | info = dd_info 100 | elif ddtype == 'duration': 101 | info = ddd_info 102 | 103 | ax.plot(cumrets) 104 | 105 | for i, (date_start, date_end, dd) in enumerate(info[:ntop]): 106 | if ddtype == 'magnitude': 107 | label = '{:.1f}%'.format(dd*100) 108 | elif ddtype == 'duration': 109 | label = '{:.0f}'.format(dd) 110 | 111 | ax.fill_between((date_start, date_end), 112 | *ax.get_ylim(), 113 | alpha=.3, color=colors[ntop-i-1], 114 | label=label) 115 | 116 | style_default(ax, 117 | title='Top drawdown periods by {}'.format(ddtype), 118 | ylabel='Cumulative net return (%)') 119 | 120 | 121 | def plot_returns_distr(returns, frequency=None, bins=20, ax=None, **kwargs): 122 | """ 123 | Plot distribution of returns 124 | returns = series of daily returns 125 | frequency = frequency of returns to be plotted 126 | e.g. daily, monthly 127 | bins = number of histogram bins 128 | kwargs = optional arguments for hist plot 129 | """ 130 | if ax is None: 131 | ax = plt.gca() 132 | 133 | aggrets = 100 * (timeseries.aggregate_returns( 134 | returns, frequency) if frequency else returns) 135 | 136 | aggrets.dropna(inplace=True) 137 | 138 | aggrets_mean = aggrets.mean() 139 | 140 | ax.hist(aggrets, color='orangered', alpha=.8, bins=bins, **kwargs) 141 | 142 | ax.axvline(aggrets_mean, 143 | color='gold', linestyle='--', lw=2, 144 | label='Mean: {:.1f}%'.format(aggrets_mean)) 145 | 146 | ax.axvline(0, color='black', linestyle='-', lw=1) 147 | 148 | style_default(ax, 149 | title='Distribution of {} returns'.format(frequency), 150 | xlabel='Return (%)') 151 | 152 | -------------------------------------------------------------------------------- /utils/timeseries.py: -------------------------------------------------------------------------------- 1 | from core.stack import * 2 | import math 3 | 4 | # Number of periods in a year 5 | ANNUALISATION_FACTORS = { 6 | 'hourly' : 252 * 6.5, 7 | 'daily' : 252, 8 | 'weekly' : 52, 9 | 'monthly' : 12, 10 | 'yearly' : 1 11 | } 12 | 13 | 14 | def cumulate_returns(returns): 15 | """ 16 | Calculate cumulative returns 17 | returns = series of returns 18 | """ 19 | return (1 + returns).cumprod() - 1 20 | 21 | 22 | def aggregate_returns(returns, frequency): 23 | """ 24 | Aggregate returns by day, month, etc. 25 | returns = series of returns 26 | frequency = frequency for aggregation 27 | (daily, monthly, yearly) 28 | """ 29 | dates = returns.index 30 | 31 | if frequency == 'yearly': 32 | to_groupby = [dates.year] 33 | 34 | elif frequency == 'monthly': 35 | to_groupby = [dates.year, dates.month] 36 | 37 | elif frequency == 'daily': 38 | to_groupby = [dates.year, dates.month, dates.day] 39 | 40 | elif frequency == 'weekly': 41 | to_groupby = [dates.year, dates.weekofyear] 42 | 43 | else: 44 | raise ValueError( 45 | 'Frequency "{}" not valid or supported'.format(frequency)) 46 | 47 | return returns.groupby(to_groupby).apply(total_return) 48 | 49 | 50 | def total_return(returns): 51 | """ 52 | Calculate overall return 53 | returns = series of returns 54 | """ 55 | return cumulate_returns(returns)[-1] 56 | 57 | 58 | def annualised_return(returns, frequency): 59 | """ 60 | Calculate annualised return 61 | returns = series of returns 62 | frequency = frequency of returns 63 | e.g. daily, monthly 64 | """ 65 | nperiods = ANNUALISATION_FACTORS[frequency] 66 | return nperiods * returns.mean() 67 | 68 | 69 | def annualised_volatility(returns, frequency): 70 | """ 71 | Calculate annualised volatility 72 | returns = series of returns 73 | frequency = frequency of returns 74 | e.g. daily, monthly 75 | """ 76 | nperiods = ANNUALISATION_FACTORS[frequency] 77 | return math.sqrt(nperiods) * returns.std() 78 | 79 | 80 | def sharpe_ratio(returns, frequency, rfrate): 81 | """ 82 | Calculate annualised Sharpe ratio 83 | returns = series of returns 84 | frequency = frequency of returns 85 | e.g. daily, monthly 86 | rfrate = risk free rate 87 | """ 88 | mu = annualised_return(returns, frequency) 89 | sigma = annualised_volatility(returns, frequency) 90 | 91 | return (mu - rfrate) / sigma 92 | 93 | 94 | def rolling_sharpe(returns, frequency, rfrate, window): 95 | """ 96 | Calculate rolling Sharpe ratio 97 | returns = series of returns 98 | frequency = frequency of returns 99 | e.g. daily, monthly 100 | rfrate = risk free rate 101 | window = number of periods in rolling window 102 | """ 103 | nperiods = ANNUALISATION_FACTORS[frequency] 104 | 105 | mu = nperiods * pd.rolling_mean(returns, window) 106 | sigma = math.sqrt(nperiods) * pd.rolling_std(returns, window) 107 | 108 | return (mu - rfrate) / sigma 109 | 110 | 111 | def information_ratio(returns, bmreturns, frequency): 112 | """ 113 | Calculate annualised information ratio 114 | returns = series of portfolio returns 115 | bmreturns = series of benchmark returns 116 | frequency = frequency of returns 117 | e.g. daily, monthly 118 | """ 119 | nperiods = ANNUALISATION_FACTORS[frequency] 120 | 121 | # Excess returns over benchmark 122 | excreturns = returns - bmreturns 123 | 124 | # Annualised mean excess return and volatility 125 | mu = nperiods * excreturns.mean() 126 | sigma = math.sqrt(nperiods) * excreturns.std() 127 | 128 | return mu / sigma 129 | 130 | 131 | def alpha_beta(returns, bmreturns): 132 | """ 133 | Calculate porfolio alpha and beta 134 | returns = series of portfolio returns 135 | bmreturns = series of benchmark returns 136 | """ 137 | # Regress portfolio returns against market returns 138 | ols_res = pd.ols(y=returns, x=bmreturns) 139 | 140 | # alpha is intercept, beta is gradient 141 | beta, alpha = ols_res.beta 142 | 143 | return alpha, beta 144 | 145 | 146 | def rolling_max(series): 147 | """ 148 | Calculate rolling maximum of a series 149 | """ 150 | return series.cummax() 151 | 152 | 153 | def rolling_drawdown(cumrets): 154 | """ 155 | Calculate rolling drawdown 156 | cumrets = series of cumulative returns 157 | dd = series of drawdowns (positive) 158 | """ 159 | cummax = rolling_max(cumrets) 160 | dd = (1 + cummax) / (1 + cumrets) - 1 161 | 162 | return dd 163 | 164 | 165 | def rolling_drawdown_duration(cumrets): 166 | """ 167 | Calculate rolling drawdown duration 168 | cumrets = series of cumulative returns 169 | dd = series of drawdown durations 170 | """ 171 | drawdown = rolling_drawdown(cumrets) 172 | drawdownduration = pd.Series(index=cumrets.index) 173 | 174 | prev_date = None 175 | for i, (date, ret) in enumerate(cumrets.iteritems()): 176 | 177 | if i == 0: 178 | drawdownduration[date] = 0 179 | continue 180 | 181 | else: 182 | dd = drawdown[date] 183 | ddd = drawdownduration[prev_date] + 1 if dd > 0 else 0 184 | drawdownduration[date] = ddd 185 | 186 | prev_date = date 187 | 188 | return drawdownduration 189 | 190 | 191 | def max_drawdown(cumrets): 192 | """ 193 | Calculate maximum drawdown size 194 | cumrets = series of cumulative returns 195 | """ 196 | return rolling_drawdown(cumrets).max() 197 | 198 | 199 | def max_drawdown_duration(cumrets): 200 | """ 201 | Calculate maximum drawdown duration 202 | cumrets = series of cumulative returns 203 | """ 204 | return rolling_drawdown_duration(cumrets).max() 205 | 206 | 207 | def sort_drawdowns(cumrets): 208 | """ 209 | Return drawdown periods ordered by 210 | drawdown size and drawdown duration 211 | cumrets = series of cumulative returns 212 | dd_info = list of tuples 213 | (start_date, end_date, dd) 214 | ordered by drawdown size 215 | ddd_info = list of tuples 216 | (start_date, end_date, ddd) 217 | ordered by drawdown duration 218 | """ 219 | dd_info, ddd_info = [], [] 220 | 221 | dd = rolling_drawdown(cumrets) 222 | ddd = rolling_drawdown_duration(cumrets) 223 | 224 | # Holders 225 | start_date, end_date = None, None 226 | dd_vec, ddd_vec = [], [] 227 | 228 | for t, date in enumerate(cumrets.index): 229 | if np.isnan(cumrets[date]): 230 | continue 231 | 232 | # No drawdown 233 | if ddd[date] == 0: 234 | # End of drawdown 235 | if start_date is not None: 236 | end_date = date 237 | dd_info.append((start_date, end_date, max(dd_vec))) 238 | ddd_info.append((start_date, end_date, max(ddd_vec))) 239 | 240 | start_date, end_date = None, None 241 | dd_vec, ddd_vec = [], [] 242 | 243 | # Continuation or start of drawdown 244 | else: 245 | # Start of drawdown 246 | if start_date is None: 247 | start_date = date 248 | 249 | dd_vec.append(dd[date]) 250 | ddd_vec.append(ddd[date]) 251 | 252 | for info in (dd_info, ddd_info): 253 | info.sort(key=lambda tup: tup[2], reverse=True) 254 | 255 | return dd_info, ddd_info 256 | 257 | --------------------------------------------------------------------------------