├── README.md ├── pairtrade.py ├── OLMAR_universe.py └── OLMAR.py /README.md: -------------------------------------------------------------------------------- 1 | quantopian-algos 2 | ================ 3 | 4 | Library of algorithm scripts for Quantopian -------------------------------------------------------------------------------- /pairtrade.py: -------------------------------------------------------------------------------- 1 | """ 2 | Ports Thomas Wiecki's pairtrade algorithm from PyData 2012. 3 | 4 | Original source: 5 | https://github.com/quantopian/zipline/blob/master/zipline/examples/pairtrade.py 6 | """ 7 | 8 | import numpy as np 9 | from statsmodels import api as sm 10 | 11 | @batch_transform(refresh_period=100, days=100) 12 | def ols_transform(datapanel, sid1, sid2): 13 | """ 14 | Computes regression coefficient (slope and intercept) 15 | via Ordinary Least Squares between two SIDs. 16 | """ 17 | p0 = datapanel.price[sid1] 18 | 19 | p1 = sm.add_constant(datapanel.price[sid2]) 20 | slope, intercept = sm.OLS(p0, p1).fit().params 21 | 22 | return slope, intercept 23 | 24 | """Pairtrading relies on cointegration of two stocks. 25 | 26 | The expectation is that once the two stocks drifted apart 27 | (i.e. there is spread), they will eventually revert again. Thus, 28 | if we short the upward drifting stock and long the downward 29 | drifting stock (in short, we buy the spread) once the spread 30 | widened we can sell the spread with profit once they converged 31 | again. A nice property of this algorithm is that we enter the 32 | market in a neutral position. 33 | 34 | This specific algorithm tries to exploit the cointegration of 35 | Pepsi and Coca Cola by estimating the correlation between the 36 | two. Divergence of the spread is evaluated by z-scoring. 37 | """ 38 | 39 | def initialize(context): 40 | context.spreads = [] 41 | context.zscores = [] 42 | context.invested = 0 43 | context.window_length = 100 44 | 45 | 46 | def handle_data(context, data): 47 | ###################################################### 48 | # 1. Compute regression coefficients between PEP and KO 49 | params = ols_transform(data, sid(5885), sid(4283)) 50 | if params is None: 51 | return 52 | slope, intercept = params 53 | 54 | ###################################################### 55 | # 2. Compute spread and zscore 56 | zscore = compute_zscore(context, data, slope, intercept) 57 | context.zscores.append(zscore) 58 | 59 | ###################################################### 60 | # 3. Place orders 61 | place_orders(context, data, zscore) 62 | 63 | 64 | def compute_zscore(context, data, slope, intercept): 65 | """1. Compute the spread given slope and intercept. 66 | 2. zscore the spread. 67 | """ 68 | spread = (data[sid(5885)].price - \ 69 | (slope * data[sid(4283)].price + intercept)) 70 | context.spreads.append(spread) 71 | spread_wind = context.spreads[-context.window_length:] 72 | zscore = (spread - np.mean(spread_wind)) / np.std(spread_wind) 73 | return zscore 74 | 75 | 76 | def place_orders(context, data, zscore): 77 | """Buy spread if zscore is > 2, sell if zscore < .5. 78 | """ 79 | 80 | if zscore >= 2.0 and not context.invested: 81 | log.info("buying over zscore") 82 | order(sid(5885), int(100000 / data[sid(5885)].price)) 83 | order(sid(4283), -int(100000 / data[sid(4283)].price)) 84 | context.invested = True 85 | elif zscore <= -2.0 and not context.invested: 86 | log.info("buying with under zscore") 87 | order(sid(4283), -int(100000 / data[sid(4283)].price)) 88 | order(sid(5885), int(100000 / data[sid(5885)].price)) 89 | context.invested = True 90 | elif abs(zscore) < .5 and context.invested: 91 | sell_spread(context) 92 | context.invested = False 93 | 94 | def sell_spread(context): 95 | """ 96 | decrease exposure, regardless of position long/short. 97 | buy for a short position, sell for a long. 98 | """ 99 | ko_amount = context.portfolio.positions[sid(4283)].amount 100 | order(sid(4283), -1 * ko_amount) 101 | pep_amount = context.portfolio.positions[sid(5885)].amount 102 | order(sid(4283), -1 * pep_amount) 103 | -------------------------------------------------------------------------------- /OLMAR_universe.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import datetime 3 | 4 | def initialize(context): 5 | context.eps = 2 #change epsilon here 6 | context.init = False 7 | context.counter = 0 8 | context.stocks = [] 9 | set_slippage(slippage.VolumeShareSlippage(volume_limit=0.25, price_impact=0, delay=datetime.timedelta(minutes=0))) 10 | set_commission(commission.PerShare(cost=0)) 11 | set_universe(universe.DollarVolumeUniverse(floor_percentile=98.0, ceiling_percentile=100.0)) 12 | 13 | def handle_data(context, data): 14 | context.counter += 1 15 | if context.counter <= 5: 16 | return 17 | 18 | context.stocks = [sid for sid in data] 19 | m = len(context.stocks) 20 | 21 | if not context.init: 22 | context.b_t = np.ones(m) / m 23 | rebalance_portfolio(context, data, context.b_t) 24 | context.init = True 25 | return 26 | 27 | if len(context.b_t) > m: 28 | # need to decrease portfolio vector 29 | context.b_t = context.b_t[:m] 30 | elif len(context.b_t) < m: 31 | # need to grow portfolio vector 32 | len_bt = len(context.b_t) 33 | context.b_t = np.concatenate([context.b_t, np.ones(m-len_bt) / m]) 34 | 35 | assert len(context.b_t) == m 36 | 37 | x_tilde = np.zeros(m) 38 | 39 | b = np.zeros(m) 40 | 41 | # find relative moving average price for each security 42 | for i, stock in enumerate(context.stocks): 43 | price = data[stock].price 44 | x_tilde[i] = data[stock].mavg(5) / price 45 | 46 | ########################### 47 | # Inside of OLMAR (algo 2) 48 | x_bar = x_tilde.mean() 49 | 50 | # market relative deviation 51 | mark_rel_dev = x_tilde - x_bar 52 | 53 | # Expected return with current portfolio 54 | exp_return = np.dot(context.b_t, x_tilde) 55 | log.debug("Expected Return: {exp_return}".format(exp_return=exp_return)) 56 | weight = context.eps - exp_return 57 | log.debug("Weight: {weight}".format(weight=weight)) 58 | variability = (np.linalg.norm(mark_rel_dev))**2 59 | log.debug("Variability: {norm}".format(norm=variability)) 60 | # test for divide-by-zero case 61 | if variability == 0.0: 62 | step_size = 0 # no portolio update 63 | else: 64 | step_size = max(0, weight/variability) 65 | log.debug("Step-size: {size}".format(size=step_size)) 66 | log.debug("Market relative deviation:") 67 | log.debug(mark_rel_dev) 68 | log.debug("Weighted market relative deviation:") 69 | log.debug(step_size*mark_rel_dev) 70 | b = context.b_t + step_size*mark_rel_dev 71 | b_norm = simplex_projection(b) 72 | #np.testing.assert_almost_equal(b_norm.sum(), 1) 73 | 74 | rebalance_portfolio(context, data, b_norm) 75 | 76 | # Predicted return with new portfolio 77 | pred_return = np.dot(b_norm, x_tilde) 78 | log.debug("Predicted return: {pred_return}".format(pred_return=pred_return)) 79 | 80 | # Make sure that we actually optimized our objective 81 | #assert exp_return-.001 <= pred_return, "{new} <= {old}".format(new=exp_return, old=pred_return) 82 | # update portfolio 83 | context.b_t = b_norm 84 | 85 | def rebalance_portfolio(context, data, desired_port): 86 | print 'desired' 87 | print desired_port 88 | desired_amount = np.zeros_like(desired_port) 89 | current_amount = np.zeros_like(desired_port) 90 | prices = np.zeros_like(desired_port) 91 | 92 | if context.init: 93 | positions_value = context.portfolio.starting_cash 94 | else: 95 | positions_value = context.portfolio.positions_value + context.portfolio.cash 96 | 97 | 98 | for i, stock in enumerate(context.stocks): 99 | current_amount[i] = context.portfolio.positions[stock].amount 100 | prices[i] = data[stock].price 101 | 102 | desired_amount = np.round(desired_port * positions_value / prices) 103 | diff_amount = desired_amount - current_amount 104 | for i, stock in enumerate(context.stocks): 105 | order(stock, diff_amount[i]) #order_stock 106 | 107 | def simplex_projection(v, b=1): 108 | """Projection vectors to the simplex domain 109 | 110 | Implemented according to the paper: Efficient projections onto the 111 | l1-ball for learning in high dimensions, John Duchi, et al. ICML 2008. 112 | Implementation Time: 2011 June 17 by Bin@libin AT pmail.ntu.edu.sg 113 | Optimization Problem: min_{w}\| w - v \|_{2}^{2} 114 | s.t. sum_{i=1}^{m}=z, w_{i}\geq 0 115 | 116 | Input: A vector v \in R^{m}, and a scalar z > 0 (default=1) 117 | Output: Projection vector w 118 | 119 | :Example: 120 | >>> proj = simplex_projection([.4 ,.3, -.4, .5]) 121 | >>> print proj 122 | array([ 0.33333333, 0.23333333, 0. , 0.43333333]) 123 | >>> print proj.sum() 124 | 1.0 125 | 126 | Original matlab implementation: John Duchi (jduchi@cs.berkeley.edu) 127 | Python-port: Copyright 2012 by Thomas Wiecki (thomas.wiecki@gmail.com). 128 | """ 129 | 130 | v = np.asarray(v) 131 | p = len(v) 132 | 133 | # Sort v into u in descending order 134 | v = (v > 0) * v 135 | u = np.sort(v)[::-1] 136 | sv = np.cumsum(u) 137 | 138 | rho = np.where(u > (sv - b) / np.arange(1, p+1))[0][-1] 139 | theta = np.max([0, (sv[rho] - b) / (rho+1)]) 140 | w = (v - theta) 141 | w[w<0] = 0 142 | return w 143 | -------------------------------------------------------------------------------- /OLMAR.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import datetime 3 | 4 | def initialize(context): 5 | # http://money.usnews.com/funds/etfs/rankings/small-cap-funds 6 | #context.stocks = [sid(27796),sid(33412),sid(38902),sid(21508),sid(39458),sid(25899),sid(40143),sid(21519),sid(39143),sid(26449)] 7 | # http://www.minyanville.com/sectors/technology/articles/facebook-broadcom-ezchip-among-top-tech/1/23/2013/id/47014?page=full 8 | #context.stocks = [sid(26578),sid(42950),sid(19831),sid(18529),sid(4507),sid(32724),sid(16453),sid(20387),sid(20866),sid(23821)] 9 | #['AMD', 'CERN', 'COST', 'DELL', 'GPS', 'INTC', 'MMM'] 10 | context.stocks = [sid(351), sid(1419), sid(1787), sid(25317), sid(3321), sid(3951), sid(4922)] 11 | context.m = len(context.stocks) 12 | context.b_t = np.ones(context.m) / context.m 13 | context.eps = 1 #change epsilon here 14 | context.init = False 15 | context.counter = 0 16 | 17 | set_slippage(slippage.VolumeShareSlippage(volume_limit=0.25, price_impact=0, delay=datetime.timedelta(minutes=0))) 18 | set_commission(commission.PerShare(cost=0)) 19 | 20 | def handle_data(context, data): 21 | context.counter += 1 22 | if context.counter <= 5: 23 | return 24 | 25 | if not context.init: 26 | rebalance_portfolio(context, data, context.b_t) 27 | context.init = True 28 | return 29 | 30 | m = context.m 31 | 32 | x_tilde = np.zeros(m) 33 | 34 | b = np.zeros(m) 35 | 36 | # find relative moving average price for each security 37 | for i, stock in enumerate(context.stocks): 38 | price = data[stock].price 39 | x_tilde[i] = data[stock].mavg(5) / price 40 | 41 | ########################### 42 | # Inside of OLMAR (algo 2) 43 | x_bar = x_tilde.mean() 44 | 45 | # market relative deviation 46 | mark_rel_dev = x_tilde - x_bar 47 | 48 | # Expected return with current portfolio 49 | exp_return = np.dot(context.b_t, x_tilde) 50 | log.debug("Expected Return: {exp_return}".format(exp_return=exp_return)) 51 | weight = context.eps - exp_return 52 | log.debug("Weight: {weight}".format(weight=weight)) 53 | variability = (np.linalg.norm(mark_rel_dev))**2 54 | log.debug("Variability: {norm}".format(norm=variability)) 55 | # test for divide-by-zero case 56 | if variability == 0.0: 57 | step_size = 0 # no portolio update 58 | else: 59 | step_size = max(0, weight/variability) 60 | log.debug("Step-size: {size}".format(size=step_size)) 61 | log.debug("Market relative deviation:") 62 | log.debug(mark_rel_dev) 63 | log.debug("Weighted market relative deviation:") 64 | log.debug(step_size*mark_rel_dev) 65 | b = context.b_t + step_size*mark_rel_dev 66 | b_norm = simplex_projection(b) 67 | np.testing.assert_almost_equal(b_norm.sum(), 1) 68 | 69 | rebalance_portfolio(context, data, b_norm) 70 | 71 | # Predicted return with new portfolio 72 | pred_return = np.dot(b_norm, x_tilde) 73 | log.debug("Predicted return: {pred_return}".format(pred_return=pred_return)) 74 | 75 | # Make sure that we actually optimized our objective 76 | assert exp_return-.001 <= pred_return, "{new} <= {old}".format(new=exp_return, old=pred_return) 77 | # update portfolio 78 | context.b_t = b_norm 79 | 80 | def rebalance_portfolio(context, data, desired_port): 81 | print 'desired' 82 | print desired_port 83 | desired_amount = np.zeros_like(desired_port) 84 | current_amount = np.zeros_like(desired_port) 85 | prices = np.zeros_like(desired_port) 86 | 87 | if context.init: 88 | positions_value = context.portfolio.starting_cash 89 | else: 90 | positions_value = context.portfolio.positions_value + context.portfolio.cash 91 | 92 | 93 | for i, stock in enumerate(context.stocks): 94 | current_amount[i] = context.portfolio.positions[stock].amount 95 | prices[i] = data[stock].price 96 | 97 | desired_amount = np.round(desired_port * positions_value / prices) 98 | diff_amount = desired_amount - current_amount 99 | for i, stock in enumerate(context.stocks): 100 | order(stock, diff_amount[i]) #order_stock 101 | 102 | def simplex_projection(v, b=1): 103 | """Projection vectors to the simplex domain 104 | 105 | Implemented according to the paper: Efficient projections onto the 106 | l1-ball for learning in high dimensions, John Duchi, et al. ICML 2008. 107 | Implementation Time: 2011 June 17 by Bin@libin AT pmail.ntu.edu.sg 108 | Optimization Problem: min_{w}\| w - v \|_{2}^{2} 109 | s.t. sum_{i=1}^{m}=z, w_{i}\geq 0 110 | 111 | Input: A vector v \in R^{m}, and a scalar z > 0 (default=1) 112 | Output: Projection vector w 113 | 114 | :Example: 115 | >>> proj = simplex_projection([.4 ,.3, -.4, .5]) 116 | >>> print proj 117 | array([ 0.33333333, 0.23333333, 0. , 0.43333333]) 118 | >>> print proj.sum() 119 | 1.0 120 | 121 | Original matlab implementation: John Duchi (jduchi@cs.berkeley.edu) 122 | Python-port: Copyright 2012 by Thomas Wiecki (thomas.wiecki@gmail.com). 123 | """ 124 | 125 | v = np.asarray(v) 126 | p = len(v) 127 | 128 | # Sort v into u in descending order 129 | v = (v > 0) * v 130 | u = np.sort(v)[::-1] 131 | sv = np.cumsum(u) 132 | 133 | rho = np.where(u > (sv - b) / np.arange(1, p+1))[0][-1] 134 | theta = np.max([0, (sv[rho] - b) / (rho+1)]) 135 | w = (v - theta) 136 | w[w<0] = 0 137 | return w 138 | --------------------------------------------------------------------------------