├── lucas_trades.json ├── eval.py ├── train_reinforce.py ├── get_data.py ├── agent.py ├── env.py ├── README.md └── trader.py /lucas_trades.json: -------------------------------------------------------------------------------- 1 | { 2 | "username": "lucas", 3 | "balance": 10000, 4 | "stocks": { 5 | } 6 | } -------------------------------------------------------------------------------- /eval.py: -------------------------------------------------------------------------------- 1 | import trader 2 | from env import Environment 3 | from agent import Agent 4 | import datetime 5 | import sys 6 | 7 | WINDOW_SIZE = 10 8 | EPOCHS = 200 9 | BATCH_SIZE = 95 10 | MODEL_NAME = 'model_ep199' 11 | 12 | stock_name = sys.argv[1] 13 | 14 | 15 | def run_trader(): 16 | 17 | p = trader.Portfolio('lucas') 18 | a = Agent(WINDOW_SIZE, is_eval=True, model_name=MODEL_NAME) 19 | e = Environment(WINDOW_SIZE, EPOCHS, BATCH_SIZE, stock_name, train=False) 20 | 21 | 22 | t_330pm = datetime.time(hour=15, minute=30) 23 | 24 | 25 | while True: 26 | now = datetime.datetime.now() 27 | now = now.time() 28 | 29 | if now == t_330pm and trader.is_market_open(): # Perform the action near the end of the day 30 | 31 | state = e.get_state() 32 | action = a.act(state) 33 | 34 | if action == 1: # BUY 35 | p.place_buy_order('AMD', e.stock.vec[-1]) 36 | elif action == 2: # SELL 37 | p.place_sell_order('AMD', e.stock.vec[-1]) 38 | 39 | 40 | 41 | # Main program that will use the trained model to trade 42 | if __name__ == "__main__": 43 | 44 | run_trader() 45 | -------------------------------------------------------------------------------- /train_reinforce.py: -------------------------------------------------------------------------------- 1 | from agent import Agent 2 | from env import Environment 3 | import sys 4 | 5 | 6 | WINDOW_SIZE = 10 7 | EPOCHS = 200 8 | BATCH_SIZE = 30 9 | stock_symbol = sys.argv[1] 10 | 11 | def train_stock_model(agent, stockenv): 12 | 13 | for e in range(EPOCHS + 1): 14 | 15 | print("Episode " + str(e) + "/" + str(EPOCHS)) 16 | state = stockenv.get_state(t=WINDOW_SIZE + 1) 17 | 18 | agent.inventory = [] 19 | stockenv.reset_params() 20 | 21 | 22 | for t in range(WINDOW_SIZE, stockenv.data_len-1): 23 | 24 | action = agent.act(state) 25 | next_state = stockenv.get_state(t = t + 1) 26 | reward = stockenv.step(agent, action, t) 27 | 28 | 29 | done = True if t == stockenv.data_len - 1 else False 30 | agent.memorize(state, action, reward, next_state, done) 31 | state = next_state 32 | 33 | if len(agent.memory) > BATCH_SIZE: 34 | agent.replay(BATCH_SIZE) 35 | 36 | dir = stock_symbol +'_models' 37 | agent.model.save(dir+ "/model_ep" + str(e)) 38 | 39 | 40 | price = stockenv.stock.vec[len(stockenv.stock.vec) - 1] 41 | book, net = stockenv.p.get_net_worth(stockenv.symbol, price) 42 | print(' Net Profit : ' + str(net - 100000)) 43 | print('History is : ', stockenv.history) 44 | print('Buys : ' + str(stockenv.buy_count) +' Sells: ' + str(stockenv.sell_count) + '\n') 45 | 46 | 47 | 48 | 49 | env = Environment(WINDOW_SIZE, EPOCHS, BATCH_SIZE, stock_symbol) 50 | agent = Agent(WINDOW_SIZE, stock_symbol) 51 | 52 | train_stock_model(agent, env) 53 | 54 | 55 | 56 | -------------------------------------------------------------------------------- /get_data.py: -------------------------------------------------------------------------------- 1 | import datetime 2 | import pandas as pd 3 | import yfinance as yf 4 | from finta import TA 5 | 6 | 7 | 8 | class GetData: 9 | """ 10 | Class to retrieve the training data for a given stock 11 | """ 12 | NUM_DAYS = 365 13 | INDICATORS = ['EMA', 'RSI', 'MACD'] 14 | NOTUSED_STATE = ['high', 'low', 'open', 'Adj Close', 'volume'] 15 | 16 | def __init__(self, stock, train): 17 | """ 18 | Function to get the past 5 days of data for a stock, minute by minute for training 19 | For live data, just need todays 1m interval data 20 | :param stock: symbol of the stock 21 | """ 22 | if train: 23 | start = (datetime.date.today() - datetime.timedelta( self.NUM_DAYS ) ) 24 | end = datetime.datetime.today() 25 | self.data = yf.download(stock, start=start, end=end, interval='1d') 26 | self.data.rename(columns={"Close": 'close', "High": 'high', "Low": 'low', 'Volume': 'volume', 'Open': 'open'}, inplace=True) 27 | print(self.data) 28 | else: 29 | start = datetime.date.today() 30 | end = datetime.datetime.today() + datetime.timedelta( 1 ) 31 | self.data = yf.download(stock, start=start, end=end, interval='1d') 32 | self.data.rename(columns={"Close": 'close', "High": 'high', "Low": 'low', 'Volume': 'volume', 'Open': 'open'}, inplace=True) 33 | 34 | 35 | def get_indicator_data(self): 36 | """ 37 | Function that adds the indicators to the data table used in analysis 38 | Can add whichever indicators you would need 39 | :return: 40 | """ 41 | 42 | for indicator in self.INDICATORS: 43 | ind_data = eval('TA.' + indicator + '(self.data)') 44 | if not isinstance(ind_data, pd.DataFrame): 45 | ind_data = ind_data.to_frame() 46 | self.data = self.data.merge(ind_data, left_index=True, right_index=True) 47 | 48 | def update_data(self, symbol): 49 | start = datetime.date.today() 50 | end = datetime.datetime.today() + datetime.timedelta(1) 51 | self.data = yf.download(symbol, start=start, end=end, interval='1m') 52 | self.data.rename(columns={"Close": 'close', "High": 'high', "Low": 'low', 'Volume': 'volume', 'Open': 'open'},inplace=True) 53 | 54 | return self.format_data() 55 | 56 | def format_data(self): 57 | """ 58 | Return the data in a form that can be passed into the neural net (numpy array) 59 | :return: 60 | """ 61 | 62 | # Filter out the other columns and transform into a np array 63 | state = self.data.drop( self.NOTUSED_STATE, axis=1 ) 64 | self.vec = state.values.flatten() 65 | return self.vec 66 | 67 | 68 | -------------------------------------------------------------------------------- /agent.py: -------------------------------------------------------------------------------- 1 | from keras.models import Sequential 2 | from keras.models import load_model 3 | from keras.layers import Dense 4 | from keras.optimizers import Adam 5 | 6 | import numpy as np 7 | import random 8 | from collections import deque 9 | 10 | class Agent: 11 | def __init__(self, state_size, is_eval=False, model_name=""): 12 | """ 13 | Initialization of the Agent 14 | 15 | :param state_size: WINDOW_LENGTH, used for the input dimension of the model 16 | :param is_eval: bool variable to determine if we are using a saved model or not 17 | :param model_name: name of the model in the directory models/ 18 | """ 19 | self.state_size = state_size # normalized previous days 20 | self.action_size = 3 # sit, buy, sell 21 | self.memory = deque(maxlen=1000) 22 | self.inventory = [] 23 | self.model_name = model_name 24 | self.is_eval = is_eval 25 | 26 | # RL variables 27 | self.gamma = 0.95 28 | self.epsilon = 1.0 29 | self.epsilon_min = 0.01 30 | self.epsilon_decay = 0.995 31 | 32 | self.model = load_model("amd_models/" + model_name) if is_eval else self.model() 33 | 34 | def model(self): 35 | """ 36 | Creation of the ANN using Tensorflow 37 | """ 38 | 39 | model = Sequential() 40 | model.add(Dense(units=64, input_dim=self.state_size, activation="relu")) 41 | model.add(Dense(units=32, activation="relu")) 42 | model.add(Dense(units=8, activation="relu")) 43 | model.add(Dense(self.action_size, activation="linear")) 44 | model.compile(loss="mse", optimizer=Adam(lr=0.001)) 45 | 46 | return model 47 | 48 | def act(self, state): 49 | """ 50 | Function where the model outputs a prediction given the state 51 | :param state: numpy array representation of the current stae to be fed into the network 52 | :return: Value of 0 : Hold 1 : Buy 2 : Sell 53 | """ 54 | 55 | if not self.is_eval and np.random.rand() <= self.epsilon: 56 | return random.randrange(self.action_size) 57 | 58 | options = self.model.predict(state) 59 | return np.argmax(options[0]) 60 | 61 | 62 | def replay(self, batch_size): 63 | """ 64 | Function where the training takes place 65 | Uses accumulated data from the memory to fit the agent's model 66 | 67 | :param batch_size: 68 | """ 69 | 70 | mini_batch = [] 71 | l = len(self.memory) 72 | 73 | for i in range(l - batch_size + 1, l): 74 | mini_batch.append(self.memory[i]) 75 | 76 | for state, action, reward, next_state, done in mini_batch: 77 | target = reward 78 | if not done: 79 | target = reward + self.gamma * np.amax(self.model.predict(next_state)[0]) 80 | 81 | target_f = self.model.predict(state) 82 | target_f[0][action] = target 83 | self.model.fit(state, target_f, epochs=1, verbose=0) 84 | 85 | if self.epsilon > self.epsilon_min: 86 | self.epsilon *= self.epsilon_decay 87 | 88 | def memorize(self, state, action, reward, next_state, done): 89 | self.memory.append((state, action, reward, next_state, done)) -------------------------------------------------------------------------------- /env.py: -------------------------------------------------------------------------------- 1 | import math 2 | import numpy as np 3 | from get_data import GetData 4 | from trader import Portfolio 5 | 6 | 7 | 8 | class Environment: 9 | 10 | def __init__(self, WINDOW_LENGTH, EPOCHS, BATCH_SIZE, symbol, train=True): 11 | 12 | """ 13 | Constants used for things like 14 | - WINDOW_LENGTH : how many elements of data are in a state 15 | - EPOCHS : number of episodes the model is trained for 16 | - BATCH_SIZE : how many cycles we go until we fit the agent's model 17 | """ 18 | self.WINDOW_LENGTH = WINDOW_LENGTH 19 | self.EPOCHS = EPOCHS 20 | self.BATCH_SIZE = BATCH_SIZE 21 | self.AMPLIFIER = 1000 22 | 23 | """ 24 | Variables for the environment data (prices of the stock) 25 | Train variable to determine if were being run in training mode or trading mode 26 | """ 27 | self.stock = GetData(symbol, train) 28 | self.data = self.stock.format_data() 29 | self.symbol = symbol 30 | self.data_len = len(self.data) 31 | self.train = train 32 | self.p = Portfolio('lucas') 33 | 34 | """ 35 | Parameters that are subject to reset after every training episode 36 | """ 37 | self.buy_count = 0 38 | self.sell_count = 0 39 | self.active_positions = 0 40 | self.history = [] 41 | 42 | 43 | 44 | def get_state(self, t=0): 45 | """ 46 | Function to break the data up into window sized chunks 47 | Returns an n sized array up until t 48 | 49 | If we are in train mode, we already have all of the data, so we use the t iterator to determine where we want the state to end 50 | Otherwise, we need to pull the next minute of data, and retrieve the last WINDOW_LENGTH elements 51 | 52 | :return: a numpy array of length WINDOW_SIZE with the sigmoid transformed data 53 | """ 54 | 55 | def sigmoid(x): 56 | return 1 / (1 + math.exp(-x)) 57 | 58 | n = self.WINDOW_LENGTH + 1 59 | 60 | if self.train: # A length check for when we run the live trader 61 | d = t - n 62 | block = self.data[d:t + 1] if d >= 0 else np.append(-d * [self.data[0]], self.data[0:t + 1]) # pad with t0 63 | 64 | else: # If we are not training, we just need to grab the last WINDOW_SIZE + 1 # of elements 65 | self.data = self.stock.update_data(self.symbol) # Get the updated minute-by-minute data 66 | block = self.data[ self.data_len - n : self.data_len + 1 ] 67 | 68 | res = [] 69 | for i in range(n - 1): 70 | res.append(sigmoid(block[i + 1] - block[i])) 71 | 72 | return np.array([res]) 73 | 74 | 75 | def step(self, agent, action, t): 76 | """ 77 | Function that will determine the reward for the agent depending on the action 78 | *** Only used during training *** 79 | 80 | -For a buy, we check how many better options there were (where the price was cheaper) 81 | -For a sell, we check if there was a better time to sell (where the price was more expensive 82 | 83 | :param agent: 84 | :param action: 85 | :param t: index of data 86 | :return: reward value, the more positive -> better 87 | """ 88 | 89 | if action == 0: # Hold 90 | 91 | self.history.append('H') 92 | return 0 93 | 94 | if action == 1: # Buy 95 | 96 | self.buy_count += 1 97 | self.history.append('B') 98 | 99 | buy_price = self.stock.vec[t] 100 | self.p.place_buy_order(self.symbol, buy_price) 101 | diff = self.p.get_avg_price(self.symbol) - buy_price 102 | # If we are buying at a lower price than our avg price, give it a reward 103 | return (max(diff, 0) + 1) * self.AMPLIFIER * 3 104 | 105 | if action == 2: # Sell 106 | 107 | if not self.p.have_stock(self.symbol): 108 | return 0 109 | 110 | self.sell_count += 1 111 | self.history.append('S') 112 | 113 | sell_price = self.stock.vec[t] 114 | self.p.place_sell_order(self.symbol, sell_price) 115 | diff = sell_price - self.p.get_avg_price(self.symbol) 116 | 117 | # If we are selling at a gain, give it a reward 118 | return max(diff, 0) * self.AMPLIFIER 119 | 120 | 121 | def reset_params(self): 122 | """ 123 | Function to reset some parameters at the beginning of every episode 124 | :return: 125 | """ 126 | 127 | self.buy_count = 0 128 | self.sell_count = 0 129 | self.history = [] 130 | self.p.reset_info() 131 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # algorithmicTrader 2 | This is my first project on algorithmic trading with the use of reinforcement learning. I've designed this as an automated function that could be run on the cloud and simulate actual trading. Though this project doesn't account for transaction costs and price increases/decrases when buying/selling, it does a decent job at approximating real time trading. 3 | 4 | 5 | The general flow of this program is as follows. 6 | First you will want to train a model with a particular stock. For example, AMD. This will begin the training process of the model. It may take some time. The environment settings are in the train_reinforce.py file. So if you want, you can change things like, WINDOW_SIZE (amount of data in a state), EPOCHS (or episode count, how many iterations the training process will do), and BATCH_SIZE (how many states pass before the model is fitted). 7 | 8 | ``` 9 | python train_reinforce.py AMD 10 | ``` 11 | Models are saved in a directory specified by the stock name you give (ex : amd_models). 12 | 13 | Transactions are done through the trader.py class called Portfolio. There are functions that place buy and sell orders based on the current value of cash or book value of stock (ex : buying AMD shares that evaluate to 1% of our current cash or selling 1% of AMD shares based on how much we already own). You can change the amount of stock you buy with the parameter, self.PCT_OF_MAX. 14 | 15 | One episode should produce something similar to the following 16 | ``` 17 | Episode 0/200 18 | Net Profit : 28433.811742782593 19 | History is : ['H', 'H', 'B', 'H', 'S', 'H', 'H', 'H', 'S', 'S', 'S', 'H', 'B', 'B', 'S', 'H', 'H', 'B', 'H', 'S', 'H', 'H', 'S', 'H', 'H', 'S', 'B', 'S', 'B', 'H', 'S', 'B', 'S', 'H', 'B', 'S', 'H', 'S', 'B', 'B', 'B', 'B', 'H', 'B', 'H', 'S', 'S', 'S', 'S', 'S', 'B', 'H', 'B', 'B', 'B', 'S', 'S', 'H', 'H', 'S', 'B', 'B', 'S', 'H', 'S', 'S', 'H', 'H', 'H', 'H', 'H', 'B', 'S', 'S', 'H', 'S', 'B', 'S', 'B', 'S', 'S', 'B', 'H', 'S', 'S', 'S', 'B', 'B', 'S', 'B', 'S', 'S', 'B', 'H', 'S', 'S', 'S', 'S', 'S', 'S', 'H', 'B', 'H', 'B', 'S', 'B', 'H', 'B', 'S', 'B', 'B', 'B', 'H', 'S', 'B', 'S', 'B', 'B', 'B', 'B', 'S', 'S', 'S', 'B', 'B', 'B', 'B', 'B', 'S', 'S', 'B', 'H', 'B', 'B', 'B', 'H', 'B', 'H', 'S', 'S', 'S', 'S', 'S', 'H', 'S', 'S', 'S', 'S', 'H', 'S', 'B', 'S', 'B', 'B', 'S', 'S', 'S', 'S', 'B', 'H', 'S', 'B', 'S', 'H', 'S', 'S', 'H', 'S', 'S', 'S', 'B', 'S', 'B', 'S', 'S', 'H', 'H', 'B', 'S', 'B', 'B', 'S', 'S', 'S', 'B', 'B', 'H', 'H', 'B', 'B', 'S', 'B', 'B', 'B', 'H', 'B', 'H', 'S', 'B', 'S', 'S', 'H', 'S', 'B', 'S', 'S', 'S', 'S', 'S', 'S', 'S', 'S', 'S', 'S', 'S', 'S', 'B', 'H', 'H', 'S', 'S', 'S', 'B', 'S', 'S', 'S', 'H', 'S', 'S', 'B', 'B', 'S', 'B', 'S', 'S', 'S', 'S', 'B', 'S', 'S'] 20 | Buys : 75 Sells: 112 21 | ``` 22 | This indicates that on the 0th episode, we made ~ $28,433, given a starting portfolio value of $100,000 (not bad). We also see the history of actions the model took as well as the buy and sell count. 23 | 24 | Throughout the training process you will have some models that produce very good results (for AMD I had a model that gave me almost an 80% return over 1 year). You will also see some models that dont perform well, some not even buying or selling. I'm not sure why that is, as this is my first project using reinforcmenet learning so my understanding on the subject matter is limited. However, as the episodes go on, you will notice incrementally better results - an indication that the training process is working. 25 | 26 | 27 | The next part of the porject is actually using one of the models to run this on real, live data. The models produced seem to work well in the short term (buying and selling after a few days) so it's a little hard to show those results. But the way to deploy the model in real-time is by doing. 28 | ``` 29 | python eval.py AMD 30 | ``` 31 | The following code segment is essentially how the real-time trader works. Since during training we only buy, sell, or hold in one day, we need to reflect that here. A loop continues to run until the current time is 3:30 pm, and the market is open. At that time, we get the current state, and perform the corresponding action. 32 | 33 | ``` 34 | t_330pm = datetime.time(hour=15, minute=30) 35 | 36 | 37 | while True: 38 | now = datetime.datetime.now() 39 | now = now.time() 40 | 41 | if now == t_330pm and trader.is_market_open(): # Perform the action near the end of the day 42 | 43 | state = e.get_state() 44 | action = a.act(state) 45 | 46 | if action == 1: # BUY 47 | p.place_buy_order('AMD', e.stock.vec[-1]) 48 | elif action == 2: # SELL 49 | p.place_sell_order('AMD', e.stock.vec[-1]) 50 | ``` 51 | Make sure to reset the json file that hold your portfolio into before running in real time. That is also where you should see all your orders being filled. 52 | -------------------------------------------------------------------------------- /trader.py: -------------------------------------------------------------------------------- 1 | # """" 2 | # This file will be the main point of control for the trader 3 | # This will call upon the models build in trading.py to generate predictions and place buy and sell orders 4 | # """" 5 | 6 | import datetime 7 | import json 8 | import pandas as pd 9 | import pytz, holidays 10 | import yfinance as yf 11 | import time 12 | import os 13 | 14 | 15 | # Global data and DS, dict to keep stocks and their current bullish or bearish status 16 | # True to indicate a bullish status, False to indicate a bearish one 17 | _stock_list = {'AMD':True, 'ATZ.TO':True, 'DOO.TO':True, 'QSR.TO':True} 18 | 19 | 20 | def is_market_open(now = None): 21 | tz = pytz.timezone('US/Eastern') 22 | us_holidays = holidays.US() 23 | if not now: 24 | now = datetime.datetime.now(tz) 25 | openTime = datetime.time(hour = 9, minute = 30, second = 0) 26 | closeTime = datetime.time(hour = 16, minute = 0, second = 0) 27 | # If a holiday 28 | if now.strftime('%Y-%m-%d') in us_holidays: 29 | return False 30 | # If before 09:30 or after 16:00 31 | if (now.time() < openTime) or (now.time() > closeTime): 32 | return False 33 | # If it's a weekend 34 | if now.date().weekday() > 4: 35 | return False 36 | return True 37 | 38 | class Portfolio: 39 | 40 | """ 41 | The class that represents the users trading portfolio 42 | Contains information about their balance and functions to process trades 43 | 44 | To retain information about the portfolio when the program stops, it saves all info to a json 45 | """ 46 | 47 | def __init__(self, username): 48 | """ 49 | Constructor that checks if the username already has an 'account' (json file with their info). If they do 50 | it pulls the info from there to be used in this class 51 | 52 | Otherwise, we create a new user with a default balance of $10,000 to be traded with 53 | 54 | :param username: the username you would like your json file to be called 55 | """ 56 | 57 | self.filename = username + "_trades.json" 58 | self.PCT_OF_MAX = 0.14 59 | self.STOP_LOSS = 0.92 60 | 61 | if os.path.exists(self.filename): 62 | print("File exists for " + username) 63 | 64 | with open(self.filename, 'r') as f: 65 | json_obj = json.load(f) 66 | 67 | self.username = username 68 | self.balance = json_obj['balance'] 69 | self.stocks = json_obj['stocks'] 70 | 71 | else: 72 | print("User doesn't exist, creating a new portfolio") 73 | 74 | self.balance = 10_000 # Initial balance to trade with 75 | self.username = username 76 | self.stocks = {} 77 | 78 | self.write_to_json() 79 | 80 | 81 | def place_buy_order(self, symbol, price): 82 | """ 83 | Function that takes the steps to process a buy 84 | - remove the amount of: amt = quantity * price, from the users balance 85 | - add 'quantity' number of shares of 'symbol' (new dictionary key-value to self.stocks) 86 | - add the total book value (amt) of that specific stock 87 | TODO : Add in support that indicates stop loss point 88 | 89 | :param symbol: symbol of the stock being bought 90 | :param quantity: number of shares of the stock 91 | :param price: the price the stock was bought at 92 | 93 | :return: no return value, function will modify the data and rewrite json to store the info 94 | """ 95 | 96 | # Determine the number of shares, 97 | max_possible = int(self.balance / price) 98 | quantity = int(self.PCT_OF_MAX * max_possible) # Only allow 5% of the max possible shares to be bought at a time 99 | amt = price * quantity 100 | 101 | if self.balance >= amt: 102 | self.balance -= amt 103 | if self.have_stock(symbol): 104 | # We have the stock, just add it to our current balance 105 | self.stocks[symbol]['num_shares'] += quantity 106 | self.stocks[symbol]['book_value'] += amt 107 | else: # We don't currently own the stock, so we need to add it 108 | self.stocks[symbol] = {'num_shares' : quantity, 'book_value' : amt} 109 | self.write_to_json() 110 | else: 111 | #print("Insufficient funds to buy " + str(quantity) + " shares of " + str(symbol) + " at " + str(price)) 112 | pass 113 | 114 | def place_sell_order(self, symbol, price): 115 | 116 | # First make sure we have a sufficient number of shares 117 | if self.have_stock(symbol): 118 | 119 | # Determine the number of shares, 120 | quantity = int(self.PCT_OF_MAX * self.stocks[symbol]['num_shares']) # Only allow 10% of the max possible shares to be sold at a time 121 | amt = price * quantity 122 | 123 | curr_avg = (self.stocks[symbol]['book_value'] / self.stocks[symbol]['num_shares']) 124 | diff = (price - curr_avg) / curr_avg 125 | 126 | if self.stocks[symbol]['num_shares'] >= quantity: 127 | 128 | self.stocks[symbol]['book_value'] -= curr_avg * quantity 129 | self.stocks[symbol]['num_shares'] -= quantity 130 | 131 | amt = quantity * price # Get the amount to return to the account 132 | self.balance += amt 133 | self.write_to_json() 134 | else: 135 | #print("We dont have the stock or we tried selling more shares than we own") 136 | pass 137 | 138 | def write_to_json(self): # We write to the json after we buy and sell, so we can also update the running stop loss 139 | 140 | 141 | f = open(self.filename, "w") 142 | 143 | user_info = {'username': self.username, 144 | 'balance': self.balance, 145 | 'stocks': self.stocks} 146 | 147 | json_obj = json.dumps(user_info, indent=4) 148 | f.write(json_obj) 149 | f.close() 150 | 151 | def get_avg_price(self, symbol): 152 | if self.have_stock(symbol): 153 | return (self.stocks[symbol]['book_value'] / self.stocks[symbol]['num_shares']) 154 | else: 155 | return 0 156 | 157 | def get_stop_loss_point(self, symbol): 158 | if self.have_stock(symbol): 159 | return self.get_avg_price(symbol) * self.STOP_LOSS 160 | 161 | def have_stock(self, symbol): 162 | # We want to check our stocks to see if we have one we are either buying or selling 163 | if symbol in self.stocks.keys(): 164 | return True 165 | return False 166 | 167 | def get_net_worth(self, symbol, price): 168 | 169 | if self.have_stock(symbol): 170 | total_bookval = self.stocks[symbol]['book_value'] # The current price of the stock, times the number of shares, gives the actual value of all the shares 171 | total_worth = self.stocks[symbol]['num_shares'] * price 172 | return self.balance + total_bookval, self.balance + total_worth 173 | 174 | def get_balance(self): 175 | return self.balance 176 | 177 | def reset_info(self): 178 | self.balance = 100000 179 | self.stocks = {} 180 | self.write_to_json() 181 | 182 | def 183 | 184 | def filter_for_day_trading(path): 185 | """ 186 | Filter out any stocks that may not be a good fit for day trading, such as low volume, low price fluctuation, and high price per share 187 | 188 | Parameters 189 | ---------- 190 | path : `str` 191 | The path to the csv that contains stock symbol data 192 | 193 | Returns 194 | ------- 195 | None 196 | Generates a new csv with the updated symbols, does not return a value 197 | """ 198 | symbols = pd.read_csv(path) 199 | 200 | # Get the string values for today's date and tomorrow's date 201 | today = datetime.datetime.today() 202 | last_week = today - datetime.timedelta(days=7) 203 | 204 | today = today.strftime('%Y-%m-%d') 205 | last_week = last_week.strftime('%Y-%m-%d') 206 | 207 | symbol_lst = [] 208 | for index, row in symbols.iterrows(): 209 | symbol = row['Symbol'] 210 | 211 | data = yf.download(symbol, start=last_week, end=today, interval='1m') 212 | index = data.index 213 | 214 | if len(index) > 1400: # Need to filter out data where there isn't a lot of information 215 | # For a week (5 days) that has every minute documented, it accounts for about 1900 entries 216 | min = data['Close'].min() 217 | max = data['Close'].max() 218 | avg = (min + max) / 2 219 | percent_diff = (max - min) / avg * 100 220 | 221 | vol_mean = data['Volume'].mean() 222 | 223 | if vol_mean > 15000 and percent_diff > 5.0: # Candidate for day trading 224 | if max < 80: 225 | symbol_lst.append(symbol) 226 | print(percent_diff, vol_mean, max, symbol) 227 | 228 | df = pd.DataFrame(symbol_lst, columns=['Symbol']) 229 | df.to_csv('filtered_symbols.csv', index=False, header=True) 230 | 231 | 232 | 233 | 234 | 235 | 236 | 237 | 238 | 239 | 240 | 241 | 242 | 243 | 244 | 245 | 246 | 247 | 248 | 249 | 250 | 251 | 252 | --------------------------------------------------------------------------------