├── .ipynb_checkpoints └── ARIMA modelling -checkpoint.ipynb ├── Images ├── 20_day_ma_plot.png ├── 5_SMA.png ├── LSTM.png ├── LSTM_20.png ├── SPY_train_valid_test_plot.png ├── Season_Trend_Decomposition.png ├── acf_plot.png ├── arima_autocorrelation.png ├── arima_error_plot.png ├── arima_pacf.png ├── arima_predictions.png ├── arima_predictions_zoom.png ├── arima_stationary_test.png ├── arima_stationary_test_differenced.png ├── cnn_preprocess_rnn_model.png ├── cnn_preprocess_rnn_model_30.png ├── dense_forecast.png ├── download.png ├── full_cnn_wavenet.png ├── linear_model.png ├── lstm_30day_window.png ├── model_results.png ├── naive_forecast_plot.png ├── naive_forecast_plot_zoom.png ├── pacf_plot.png ├── rnn_forecast.png ├── seqtoseq_rnn.png ├── seqtovec_rnn.png ├── spy_plot.png └── underfit_rnn.png ├── LICENSE ├── MSFT.CSV ├── Models ├── arima_111.pkl └── my_checkpoint.h5 ├── Notebooks ├── .ipynb_checkpoints │ ├── 1. Time Series Forecasting with Naive, Moving Averages, and ARIMA-checkpoint.ipynb │ ├── 2. Linear_Model_Forecast-checkpoint.ipynb │ ├── 3. Dense_Forecast-checkpoint.ipynb │ └── 5. LSTM_Model-checkpoint.ipynb ├── 1. Time Series Forecasting with Naive, Moving Averages, and ARIMA.ipynb ├── 2. Linear_Model_Forecast.ipynb ├── 3. Dense_Forecast.ipynb ├── 4. RNN_seqtovec_seqtoseq.ipynb ├── 5. LSTM_Model.ipynb ├── 6.Preprocess_CNN.ipynb ├── 7. Full_CNN_Wavenet.ipynb ├── SPY.csv ├── __pycache__ │ └── formulas.cpython-37.pyc ├── arima_111.pkl ├── formulas.py └── my_checkpoint │ ├── saved_model.pb │ └── variables │ ├── variables.data-00000-of-00001 │ └── variables.index ├── README.md ├── SPY and DIA ETF Analysis.ipynb ├── SPY.csv ├── TSLA.CSV ├── Time Series Modeling Presentation.pdf ├── my_checkpoint ├── saved_model.pb └── variables │ ├── variables.data-00000-of-00001 │ └── variables.index └── pyrobot ├── indicators.py ├── portfolio.py ├── robot.py ├── stock_frame.py └── trades.py /Images/20_day_ma_plot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/0xpranjal/Stock-Prediction-using-different-models/86b123b6354c936de18cf96d456dd382b3fed35e/Images/20_day_ma_plot.png -------------------------------------------------------------------------------- /Images/5_SMA.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/0xpranjal/Stock-Prediction-using-different-models/86b123b6354c936de18cf96d456dd382b3fed35e/Images/5_SMA.png -------------------------------------------------------------------------------- /Images/LSTM.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/0xpranjal/Stock-Prediction-using-different-models/86b123b6354c936de18cf96d456dd382b3fed35e/Images/LSTM.png -------------------------------------------------------------------------------- /Images/LSTM_20.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/0xpranjal/Stock-Prediction-using-different-models/86b123b6354c936de18cf96d456dd382b3fed35e/Images/LSTM_20.png -------------------------------------------------------------------------------- /Images/SPY_train_valid_test_plot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/0xpranjal/Stock-Prediction-using-different-models/86b123b6354c936de18cf96d456dd382b3fed35e/Images/SPY_train_valid_test_plot.png -------------------------------------------------------------------------------- /Images/Season_Trend_Decomposition.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/0xpranjal/Stock-Prediction-using-different-models/86b123b6354c936de18cf96d456dd382b3fed35e/Images/Season_Trend_Decomposition.png -------------------------------------------------------------------------------- /Images/acf_plot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/0xpranjal/Stock-Prediction-using-different-models/86b123b6354c936de18cf96d456dd382b3fed35e/Images/acf_plot.png -------------------------------------------------------------------------------- /Images/arima_autocorrelation.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/0xpranjal/Stock-Prediction-using-different-models/86b123b6354c936de18cf96d456dd382b3fed35e/Images/arima_autocorrelation.png -------------------------------------------------------------------------------- /Images/arima_error_plot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/0xpranjal/Stock-Prediction-using-different-models/86b123b6354c936de18cf96d456dd382b3fed35e/Images/arima_error_plot.png -------------------------------------------------------------------------------- /Images/arima_pacf.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/0xpranjal/Stock-Prediction-using-different-models/86b123b6354c936de18cf96d456dd382b3fed35e/Images/arima_pacf.png -------------------------------------------------------------------------------- /Images/arima_predictions.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/0xpranjal/Stock-Prediction-using-different-models/86b123b6354c936de18cf96d456dd382b3fed35e/Images/arima_predictions.png -------------------------------------------------------------------------------- /Images/arima_predictions_zoom.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/0xpranjal/Stock-Prediction-using-different-models/86b123b6354c936de18cf96d456dd382b3fed35e/Images/arima_predictions_zoom.png -------------------------------------------------------------------------------- /Images/arima_stationary_test.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/0xpranjal/Stock-Prediction-using-different-models/86b123b6354c936de18cf96d456dd382b3fed35e/Images/arima_stationary_test.png -------------------------------------------------------------------------------- /Images/arima_stationary_test_differenced.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/0xpranjal/Stock-Prediction-using-different-models/86b123b6354c936de18cf96d456dd382b3fed35e/Images/arima_stationary_test_differenced.png -------------------------------------------------------------------------------- /Images/cnn_preprocess_rnn_model.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/0xpranjal/Stock-Prediction-using-different-models/86b123b6354c936de18cf96d456dd382b3fed35e/Images/cnn_preprocess_rnn_model.png -------------------------------------------------------------------------------- /Images/cnn_preprocess_rnn_model_30.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/0xpranjal/Stock-Prediction-using-different-models/86b123b6354c936de18cf96d456dd382b3fed35e/Images/cnn_preprocess_rnn_model_30.png -------------------------------------------------------------------------------- /Images/dense_forecast.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/0xpranjal/Stock-Prediction-using-different-models/86b123b6354c936de18cf96d456dd382b3fed35e/Images/dense_forecast.png -------------------------------------------------------------------------------- /Images/download.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/0xpranjal/Stock-Prediction-using-different-models/86b123b6354c936de18cf96d456dd382b3fed35e/Images/download.png -------------------------------------------------------------------------------- /Images/full_cnn_wavenet.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/0xpranjal/Stock-Prediction-using-different-models/86b123b6354c936de18cf96d456dd382b3fed35e/Images/full_cnn_wavenet.png -------------------------------------------------------------------------------- /Images/linear_model.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/0xpranjal/Stock-Prediction-using-different-models/86b123b6354c936de18cf96d456dd382b3fed35e/Images/linear_model.png -------------------------------------------------------------------------------- /Images/lstm_30day_window.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/0xpranjal/Stock-Prediction-using-different-models/86b123b6354c936de18cf96d456dd382b3fed35e/Images/lstm_30day_window.png -------------------------------------------------------------------------------- /Images/model_results.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/0xpranjal/Stock-Prediction-using-different-models/86b123b6354c936de18cf96d456dd382b3fed35e/Images/model_results.png -------------------------------------------------------------------------------- /Images/naive_forecast_plot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/0xpranjal/Stock-Prediction-using-different-models/86b123b6354c936de18cf96d456dd382b3fed35e/Images/naive_forecast_plot.png -------------------------------------------------------------------------------- /Images/naive_forecast_plot_zoom.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/0xpranjal/Stock-Prediction-using-different-models/86b123b6354c936de18cf96d456dd382b3fed35e/Images/naive_forecast_plot_zoom.png -------------------------------------------------------------------------------- /Images/pacf_plot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/0xpranjal/Stock-Prediction-using-different-models/86b123b6354c936de18cf96d456dd382b3fed35e/Images/pacf_plot.png -------------------------------------------------------------------------------- /Images/rnn_forecast.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/0xpranjal/Stock-Prediction-using-different-models/86b123b6354c936de18cf96d456dd382b3fed35e/Images/rnn_forecast.png -------------------------------------------------------------------------------- /Images/seqtoseq_rnn.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/0xpranjal/Stock-Prediction-using-different-models/86b123b6354c936de18cf96d456dd382b3fed35e/Images/seqtoseq_rnn.png -------------------------------------------------------------------------------- /Images/seqtovec_rnn.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/0xpranjal/Stock-Prediction-using-different-models/86b123b6354c936de18cf96d456dd382b3fed35e/Images/seqtovec_rnn.png -------------------------------------------------------------------------------- /Images/spy_plot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/0xpranjal/Stock-Prediction-using-different-models/86b123b6354c936de18cf96d456dd382b3fed35e/Images/spy_plot.png -------------------------------------------------------------------------------- /Images/underfit_rnn.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/0xpranjal/Stock-Prediction-using-different-models/86b123b6354c936de18cf96d456dd382b3fed35e/Images/underfit_rnn.png -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2020 Pranjal Bhardwaj 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /Models/arima_111.pkl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/0xpranjal/Stock-Prediction-using-different-models/86b123b6354c936de18cf96d456dd382b3fed35e/Models/arima_111.pkl -------------------------------------------------------------------------------- /Models/my_checkpoint.h5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/0xpranjal/Stock-Prediction-using-different-models/86b123b6354c936de18cf96d456dd382b3fed35e/Models/my_checkpoint.h5 -------------------------------------------------------------------------------- /Notebooks/4. RNN_seqtovec_seqtoseq.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": { 6 | "colab_type": "text", 7 | "id": "w1kHW6x-_b4d" 8 | }, 9 | "source": [ 10 | "# What is a Recurrent Neural Network?\n", 11 | "\n", 12 | "A recurrent layer is just a memory cell that computes. In diagrams, you may see it represented as having many cells, but it is just one cell used to calculate an output over a set amount of time steps. The cell calculates Y_0 at X_0 then moves on to calculating Y_1 at X_1 and so on. However, from X_0 to X_1 the memory cell produces a state vector. This state factor is used in the next time step as an additional input factor. This state factor is why it is called a recurrent neural network. Just as x +=1 is a recurrent function to add 1 every time to X, every time step receives an state vector from the last time step.\n", 13 | "\n", 14 | "This architecture enables you to use any sequence length as long as the parameters remain constant. An RNN can be though of as word context of reading a sentence. As you read the a sentence you may only be focusing on one word, however, if you encounter a word that is a homphone such as lead you will understand the context. You can immediate notice if lead is the verb for \"guiding\" or the soft metal which is a noun." 15 | ] 16 | }, 17 | { 18 | "cell_type": "markdown", 19 | "metadata": { 20 | "colab_type": "text", 21 | "id": "vidayERjaO5q" 22 | }, 23 | "source": [ 24 | "## Setup" 25 | ] 26 | }, 27 | { 28 | "cell_type": "code", 29 | "execution_count": 1, 30 | "metadata": { 31 | "colab": { 32 | "base_uri": "https://localhost:8080/", 33 | "height": 50 34 | }, 35 | "colab_type": "code", 36 | "id": "gqWabzlJ63nL", 37 | "outputId": "579a6636-417a-4bee-fc59-228021c05566" 38 | }, 39 | "outputs": [ 40 | { 41 | "name": "stderr", 42 | "output_type": "stream", 43 | "text": [ 44 | "/usr/local/lib/python3.6/dist-packages/statsmodels/tools/_testing.py:19: FutureWarning: pandas.util.testing is deprecated. Use the functions in the public API at pandas.testing instead.\n", 45 | " import pandas.util.testing as tm\n" 46 | ] 47 | } 48 | ], 49 | "source": [ 50 | "import numpy as np\n", 51 | "import pandas as pd\n", 52 | "import matplotlib.pyplot as plt\n", 53 | "import tensorflow as tf\n", 54 | "import seaborn as sns\n", 55 | "\n", 56 | "from sklearn.preprocessing import MinMaxScaler\n", 57 | "\n", 58 | "keras = tf.keras\n", 59 | "\n", 60 | "# set style of charts\n", 61 | "sns.set(style=\"darkgrid\")\n", 62 | "plt.rcParams['figure.figsize'] = [10, 10]" 63 | ] 64 | }, 65 | { 66 | "cell_type": "code", 67 | "execution_count": 2, 68 | "metadata": { 69 | "colab": {}, 70 | "colab_type": "code", 71 | "id": "cg1hfKCPldZG" 72 | }, 73 | "outputs": [], 74 | "source": [ 75 | "def plot_series(time, series, format=\"-\", start=0, end=None, label=None):\n", 76 | " plt.plot(time[start:end], series[start:end], format, label=label)\n", 77 | " plt.xlabel(\"Time\")\n", 78 | " plt.ylabel(\"Value\")\n", 79 | " if label:\n", 80 | " plt.legend(fontsize=14)\n", 81 | " plt.grid(True)\n", 82 | " \n", 83 | " \n", 84 | "def window_dataset(series, window_size, batch_size=128,\n", 85 | " shuffle_buffer=1000):\n", 86 | " dataset = tf.data.Dataset.from_tensor_slices(series)\n", 87 | " dataset = dataset.window(window_size + 1, shift=1, drop_remainder=True)\n", 88 | " dataset = dataset.flat_map(lambda window: window.batch(window_size + 1))\n", 89 | " dataset = dataset.shuffle(shuffle_buffer)\n", 90 | " dataset = dataset.map(lambda window: (window[:-1], window[-1]))\n", 91 | " dataset = dataset.batch(batch_size).prefetch(1)\n", 92 | " return dataset\n", 93 | " \n", 94 | "def model_forecast(model, series, window_size):\n", 95 | " ds = tf.data.Dataset.from_tensor_slices(series)\n", 96 | " ds = ds.window(window_size, shift=1, drop_remainder=True)\n", 97 | " ds = ds.flat_map(lambda w: w.batch(window_size))\n", 98 | " ds = ds.batch(32).prefetch(1)\n", 99 | " forecast = model.predict(ds)\n", 100 | " return forecast" 101 | ] 102 | }, 103 | { 104 | "cell_type": "code", 105 | "execution_count": 3, 106 | "metadata": { 107 | "colab": {}, 108 | "colab_type": "code", 109 | "id": "iL2DDjV3lel6" 110 | }, 111 | "outputs": [], 112 | "source": [ 113 | "# Read in data\n", 114 | "spy = pd.read_csv('SPY.csv')\n", 115 | "\n", 116 | "# Convert series into datetime type\n", 117 | "spy['Date'] = pd.to_datetime(spy['Date'])\n", 118 | "\n", 119 | "# Save target series\n", 120 | "series = spy['Close']\n", 121 | "\n", 122 | "# Create train data set\n", 123 | "train_split_date = '2014-12-31'\n", 124 | "train_split_index = np.where(spy.Date == train_split_date)[0][0]\n", 125 | "x_train = spy.loc[spy['Date'] <= train_split_date]['Close']\n", 126 | "\n", 127 | "# Create test data set\n", 128 | "test_split_date = '2019-01-02'\n", 129 | "test_split_index = np.where(spy.Date == test_split_date)[0][0]\n", 130 | "x_test = spy.loc[spy['Date'] >= test_split_date]['Close']\n", 131 | "\n", 132 | "# Create valid data set\n", 133 | "valid_split_index = (train_split_index.max(),test_split_index.min())\n", 134 | "x_valid = spy.loc[(spy['Date'] < test_split_date) & (spy['Date'] > train_split_date)]['Close']" 135 | ] 136 | }, 137 | { 138 | "cell_type": "code", 139 | "execution_count": 4, 140 | "metadata": { 141 | "colab": { 142 | "base_uri": "https://localhost:8080/", 143 | "height": 285 144 | }, 145 | "colab_type": "code", 146 | "id": "Zmp1JXKxk9Vb", 147 | "outputId": "6130497e-14f5-4804-bc5b-1fc1e8fb55df" 148 | }, 149 | "outputs": [ 150 | { 151 | "name": "stdout", 152 | "output_type": "stream", 153 | "text": [ 154 | "5521 5522 6527 6528 6949\n" 155 | ] 156 | }, 157 | { 158 | "data": { 159 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXwAAAD7CAYAAABpJS8eAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4yLjIsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy+WH4yJAAAgAElEQVR4nO3dd3hUVfrA8e+UNEhCCaFKLwekV+kC9gJrL7tgQQV7d22r6+q6Yl11RUVWfiK6rquuwIqiuyoCgiJIEZVDDR0SQktC2szc3x8zmcwkM8nMZGryfp6Hh7nn3DvzzmV458y5555jMgwDIYQQ9Z851gEIIYSIDkn4QgjRQEjCF0KIBkISvhBCNBCS8IUQooGwxjoAP1KAocB+wB7jWIQQIlFYgDbAD0Bp1cp4TfhDgWWxDkIIIRLUGGB51cJ4Tfj7AY4cKcLhCP4+gaysdPLzC8MeVKRIvJGTSLGCxBtp9T1es9lEs2aNwZVDq4rXhG8HcDiMkBJ+xbGJROKNnESKFSTeSGsg8frsCpeLtkII0UBIwhdCiAYiXrt0fDIMgyNH8igrKwH8/8zJzTXjcDiiF1gdRT9eE8nJqTRrlo3JZIri6wohYimhEn5h4TFMJhOtWp2EyeT/x4nVasZmS5yEH+14DcPB0aOHKCw8RkZG06i9rhAithKqS6e4uJCMjKY1JntRO5PJTEZGM4qLE2e0ghCi7hIqczocdiyWhPpRErcsFisOh9zTJkRDklAJH5A+5zCR8yhE/Fmf9zMzVr2Iw4hMF680l0N0ww1XU15ejs1Wzu7du+jcuSsAPXooHnroj7UeP3/+h5SWlnL55b+LdKhCiASxr/AAuwv3YRgGRKBNFlDCV0rNBzoDDqAQuE1rvU4plQOUuP4A3K+1/tx1zHBgFpAG5ACTtda54Qw+lmbPngvA/v37uP76Kbz11j+86m02G1ar/9N7wQWXRDQ+IUTicRjOblZzhK5TBtrCv1prfQxAKfUbYA4wyFV3idZ6o+fOSikz8A5wjdZ6uVLqD8AMYGp4wo5Pl1wykdNOO5Mff/yBLl26MW3azTz22MMUFRVRVlbGyJGjuPnmOwB4881ZFBcXc+utd/LJJwv5/PPPyMjIZPv2bWRkpPPnPz9DVlaLGL8jIUQ02Q0HZpM5Yl2uASX8imTv0gRnS78mg4ESrXXF5D2v42zlhzXhf/vTfpZvqD5lhMkEdV2qd3S/Nozq2ybo44qKipg9+20ASktLefrpv9KoUSNsNht3330r3323guHDR1Y77tdff2Hu3Pdo1ao1Tz/9Zz788H2mT7+lbm9CCJFQfj28OWL99xBEH75S6u/AmTh7ls72qHpXKWXCOTPbQ1rro0AHYGfFDlrrQ0ops1Kqudb6cKCvmZWV7rWdm2vGaq38qWOxmPD3RVjXL0iLxeT1Wv73MwOV+55//vnux+Xl8MorL/PTT+sxDIPDh/PZvn0Lo0ePxmw2YTZXHte/f3/atWsLQN++/Vi16ruAXr8uzGYz2dkZIR0b6nGxkEixgsQbafEc766CPYB3jOGMN+CEr7W+HkApNQV4FjgXGKO13q2USgFeBF4BJocruPz8Qq+JgxwOh9cNSsNPbs3wk1tXOy5cNzIF8hx2uwMw3PsmJ6e6H7/77jyOHz/GrFlvkZKSwtNPP0lxcQk2m8M9MVzFvklJyR6vZ8Jms0X8ZiyHw0FeXkHQx2VnZ4R0XCwkUqwg8UZavMfbMaM9Owt2u2MMNl6z2VStoexVH2xAWut5wHilVJbWererrBR4FRjl2m0X0LHiGKVUC8ARTOu+PigoKCArqwUpKSnk5eWyfPk3sQ5JCBHHmqRk0i49+K7kQNWa8JVS6Uqp9h7bE4HDQIlSqomrzARcAaxz7bYGSFNKjXZt3wh8EM7AE8Gll17BTz+tZ8qUy3jqqScYPHhorEMSQsSxckc5VnPkRsubjFqubiqlWgELgMY451g+DNwLHAU+wrmklgX4Bbhda73fddxInMMyU6kclnkwwLg6ATuqdukcOLCT1q07+j2ogsylE5hAz2dV8f6z2FMixQoSb6TFe7wvrHkNs8nEnYNuBOrUpdMZZ971UutXiStJD/dTPbCG41YAfQOKUgghBOWOMjKSI3dROeGmVhBCiPpqV8Fe8osjd6lTEr4QQsSBMnsZAAdORG5CAkn4QggRZSW2EnYe3+1VtrfQ57rjYSUJXwghouyepY/yzOq/Uepq1QN8t381AJnShy+EEPVPub3c/TjNmgZAx8z2/navM0n4QggRI4bH2twVN1xd0PWciL2eJPwQ3XPP7cyf/6FXmWEYXHrpb1i7do3PY5588jE++uh9wDkf/vvvv+tzv08//Q9/+MPva41h6dIl/PLLxlr3E0LEj2Olx92PPSdKK7E7Z5lPszaK2GtLwg/ReedN4tNPP/EqW7t2DWaziQEDBvk5qtIFF1xS58VPli1bwq+//lyn5xBCRNff1s12P/ZM+OtynY23FEtSxF47oVe8Kt/8LeV6abVyk8lEbXcQ1yZJjSWpxyi/9WPGnMrzzz9FTs4OOnXqDMCiRQs566xzueWWGygpKaasrIxJky7ksst+W+14z/nwy8vLefbZGfz442qaNGlK9+7Kvd+2bVt5/vkZ1Z7v++9Xsnz5UlavXsV//rOAyy//Leeccz6fffYJ//73B9jtdtLT07n33gfo0KFTnc6FECJ8mqY0YX+Rc9IBzy6dTUe2AJBsSY7Yayd0wo+lpKQkzjjjHD79dCE333wHJ04UsWzZN8yb9z6TJ19DcnIyJ06cYNq0qxk2bIT7S8GXjz/+iP379/HOOx9gs9m45ZYbaNPG2Z/Xpk0bXnzx1WrPd8opIxg9eiw9e/bi4osvB2D9+rV89dV/mTlzNsnJyaxc+S1PPfU4r702JyrnRAhRu18Pb3Y/dvhomEZqtStI8ISf1GOUz1Z4tOamOe+8Sdx7721Mn34rX375X/r27U9SUhIzZjzB1q2bMZnMHDqUx9atm2tM+GvW/MA555yP1WrFarVy1lnnsGGDcx66kpISXnllRkDP9+23S9m6dQvTpl0DOK8pFBQcr7afECI+OAwHhmHwzd4VUXm9hE74sda9ew+ysrL57rsVfPrpQi699LfMmjWT5s2zmDPnXaxWK3fddQtlZWW1P5kfwTyfYTi/hK6//saQX08IET0GBluP7uCDzQui8npy0baOzjtvEnPmvMHu3bsYM+ZUCgsLaNmyFVarle3bt7J+/bpan2PIkKEsXvwpNpuN0tIS/vvfxe66mp6vcePGFBYWurdHjRrD4sWLyM119g/a7XY2bfo1jO9WCFEXJbZSr23DcHhduG2a0iSiry8t/Do644yzmTnzJSZNupCkpCSuvvo6nnjiURYtWkD79h0YMMDvhKJuF1xwMZs3b2Hy5Etp0qQpPXv25siRfIAan++ss87lySf/xNdff+m+aDtt2s088MDd2O0ObLZyxo8/nZ49e0Xs/QshAldQVui17TAMLGaLe/u09mMi+vq1zocfI52Q+fAjTubDjz8Sb2TFOt4dx3by3JqZDG7ZnzW563lw6J2U2st44cdXAejetIt7LnwI/3z40qUjhBBRUtHCz27UAgAH3l06jZIid9MVSMIXQoioKSwvAionSDMMw2ss/jmdTovo60sfvhBCRMm7m5zTsaS7WvIltlJ2Fexx16cnNY7o60sLXwghosxicl6ofXPjO15dOmaTxd8hYSEJXwghosBzhI7dleSLbCdoltrUXW41RzbhB9Slo5Saj/OqrwMoBG7TWq9TSvUA5gJZQD5wldZ6i+sYv3VCCNHQbD+W435sMpncj20Om/uxJYLTKkDgffhXa62PASilfgPMAQYBrwMztdbvKKUmA7OACa5jaqpLeDfccDXl5eXYbOXs3r2Lzp27AtCjh+Khh/4Y0HP8+ONqbDYbI0eOjGSoQog4UFhW5H5swk/CN0f2smpAz16R7F2aAA6lVEucSf8MV/l7wCtKqWzA5K9Oa50XlshjbPbsuQDs37+P66+fwltv/SPo51i7dg3FxcWS8IVoAD7L+RKAl8b9hY2HKu+Aj8cWPkqpvwNn4kzmZwPtgb1aazuA1tqulNrnKjfVUFcvEr4vK1cu5+2351BaWkZSUhK33XY3ffr0ZdeuHJ588k+UlJTgcNg555yJnHLKCBYs+DcOh4M1a1YxYcKZTJlyTazfghAiwqxmK3h16djdjyM5UyYEkfC11tcDKKWmAM8Cj0QqqAquO8bccnPNWK2VJ2TlvtWs2LsqIq89st0wRrQdUut+FosZMHHgwF7mzn2Tl16aSePG6Wzfvo277rqNBQs+Zf78jxg79lSuvnoqAMePHyczM5MLL7yY4uJibr/9roi8h9qYzWays0NbMDnU42IhkWIFiTfSYhXvkdKj7tfPLE11l6c0qrxQ6yu2cMYbdIeR1nqeUuoNYA/QTillcbXgLUBbYDfOFr6/uoBVnVrB4XB4TUHgsBv4mhnCZMJneTAcdiOg6Q7sdgdgsGLFCvbs2cP06de762w2G7m5efTrN4BXX32ZEyeKGTRoCIMGDcFmc+BwGO73F4upFRwOR0i3mcf69vRgJFKsIPFGWrTjPVF+Apthd99olWS2kpdXwPFjxe59jhecAOCFU/9cLbY6TK3gU60JXymVDjTTWu92bU8EDgO5wDrgSuAd199rK/rolVJ+68LllDaDOaXN4GrlsZibxjAMTjllBI888ni1unHjTqNPn36sWvUd77zzFosWLeTRR5+IanxCiOi7b9ljXtv9s/sA4NkeXbzzKwBSIrjSVYVAOowaAx8opX5yJfG7gIlaawO4EbhNKbUZuM21XaGmunpn2LDhfP/9SrZv3+Yuq1hvds+e3TRvnsW5507k2mtv4JdfnOWNGzemqKjQ5/MJIRLbvsID1cpWH6x9uvRIqrWFr7U+CAz3U7cJOCXYuvqoffsOPProE8yY8QSlpaXYbOX07dufXr1689VX/+WLLxaTlGTFZDJxxx33ADB27Hgeeug+pky5Qi7aClHPVEyj4Oni7hMB6NbUuWJdVmpz8ksORy0mmUunjtq0acuiRc7hVsOGDWfYsOrfjVddNZWrrpparbxt23a89dY/Em46ZyFE7XxNPd8psz0AGcnpJJuToprsQaZWEEKIiNhfVL1LJ9VSOTrH827baJEWvhBChNHugn3M+OFFn3XJHhdmPe+2jRZp4QshRBj5S/bgPTlaLFr4CZfw43RJxoQj51GI6EuRFn7grNZkioqOS7KqI8MwKCo6jtUa+XG/QtRXBW9cQ8GbN3iV1ZSbzu98FmnWNPe29OHXolmzbI4cyaOw8GiN+5nNZhyOxBn1Eot4rdZkmjXLjuprClFflG/+1vnAXu5VXmIv8XvMOZ29ly+MRQs/oRK+xWKlRYs2te4nt3sLISKpZMlsn+WHio8E/ByeCf++IbfWOaZAJFSXjhBCxBvPbpyqF2zP6DAOgNsGeHf9gHeXTseM9pEJroqEauELIUSsGbayKgV2MPlOpb2a9+CCbuf6rPNs4UerP19a+EII4YPhcFC6Zj5Gifd8VyXL5nrvaLfhj6WGNWplWKYQQsQJ+671lK2ZT8l3//Qqt2351mu7/NdvANh+bGe15zAM/4Mx5KKtEELECaPcNWe9w38LHqD0u/cwZWbz/PZ/Vqsrr+HYihZ+pFe58iQtfCGE8MGoGHJpTqosK61ciNzauXJFvJIvXvZK3L2a93Duj/9x+RUt/FPbRW9Na0n4Qgjhg+PwHueD0kKM8hIMh43CubcAkHTyaWBJ8tp/eOvKL4BTT3Im8Xbp/oeRV8yUGc2+fEn4Qgjhg233T86/d66l6F8P4ji8111n7dDPayFygBX7netr39L/Ovq2OJmZE56haUqTWl9HEr4QQsSYteMA92Oj6IhXdw5JqX4Xzj45SwX1OuYopmFJ+EII4UP5z//z2i5e9Kz7sSkplaQeo8LyOtLCF0KIWKs2vt7wemxp17tyV9ffE9qPCfplNh/ZVvtOYSIJXwghfDC37OK3ztKik1fLfFnTRgCU2sv8HeJXzvFdwQcXIkn4Qgjhg3Es12d5o4sfr1a2uEU6AN/u+z6iMdVVrTdeKaWygHlAV6AM2AJM11rnKaUM4Ceg4nayKVrrn1zHTQSedb3GGuBarfWJ8L8FIYQIr3K9DKO0sFp58sCJWLI6+D2uc6b/ungQSAvfAJ7RWiutdV9gGzDDo36k1nqA609Fsk8HZgMTtdbdgALg3jDHLoQQEWHb96vPcqPce7775KEXAzDusHMEzx0Dp0c2sDqqNeFrrQ9rrZd4FH0HdKzlsHOA1VrrLa7t14HLQ4pQCCGizLZlhe+KKtMgpAycSMqIK9nayLl6nNUc37PVBBWdUsoM3AQs9CheopSyAp8Bj2mtS4EOgOdMQruA6Ez4LIQQEZLc5/RqZabkRuxJdd51G8oQy4yk9DrHFahgv47+BhQCr7i2O2itdyulMnH28z8C/CFcwWVlhX4isrMzwhVGVEi8kZNIsYLEG2mBxOtv/blWXaqP3Ck40JjMnXaOWy0hnQuLxVzjceE8vwEnfKXUc0B3nP3yDgCt9W7X38eVUn8H7nbtvgsY73F4B2B3sMHl5xficAS/YHmiLRko8UZOIsUKEm+kBRKvUeacJdPaZRi27avc5ZYO/X0eW15UTla5newmHUI7F4bJ73HBnl+z2VRjQzmgYZlKqb8Ag4ELXF02KKWaKaXSXI+twCXAOtchi4GhSqnuru0bgX8FHLUQQsRI4Vs3OR9YvSdHSx17re8DTGZKTSZSq0ymFqgmKZkhHReKQIZl9gYeBDYDK5RSADuAZ4BZrqGZScAKnF06aK0LlFLTgE+UUhZgLXBHRN6BEEKEiefyhZasjqRMvhRMZsxp/pOyyWxlX2oSRUUHQ3rN3/W8JKTjQlFrwtda/wx+l2bpV8NxC4AFIcYlhBBRV7b+U/fjJDUGU3Ja7Qe5ljE8Zi8O6TUbWQN4jTCRO22FEMKlbI2zjdr4imcCS/bgXMS8DqI5eVp8DxoVQogosrTpgaPwMObMloEfFPy4Ei/RXNtWEr4QQgBGSSH2/Tr4A10t9MZGaB0m0sIXQogoMgwH5a4hmOam/pcl9Mlk4sEdh0hre3JQh5lNZhyGQ1r4QggRTYWzp7ofp46fFuTRJprYHViC7Nqxmq2U2cuimvDloq0QokGzH9jitW3KaBHcE1R0yfhZ8tDvYdUeRJ4kfCFEg2WUFXNi4ZNeZebUIKcycE+oFmzCd2Z6s7TwhRAi8kq+nee1bW52UvBPEmILv7JpL334QggRcbbtP7gfp515B5aO/YN+DnOWcyLg5D5nBnVcFAfnuEnCF0I0KIZhYNu+ClNKY7CXA5A26SGsrXuE9Hzm1Awypr0VwpEVGb+OA/mDIAlfCNGg2Pf+TMmXr3mVhZrs6yL66V768IUQDYxt14ZYhwBApybO9W8tpuilYWnhCyEaFPven722086NzXLb1/WezMETuaRaU6P2mtLCF0I0LFVa1NaT+sQkjFRrCh0zo7vyq7TwhRANitl1Y1XKyN85L9w2IJLwhRANhr24ENvOtQBY2/aMcTTRJ106QogGI3fBS7EOIaakhS+EaBCKv5qFbduPAKSMmhLjaGJDWvhCiAbBtnWl+3FSr/ExjCR2JOELIRoEa+chACT1GofJ3DBTX8N810KIBse2YzWYLaSOuSbWocRMrX34SqksYB7QFSgDtgDTtdZ5SqnhwCwgDcgBJmutc13H+a0TQohoMmxlzgeOui04nugCaeEbwDNaa6W17gtsA2YopczAO8AtWusewFJgBkBNdUIIEQ2Gw0HBG9dQ+I97sO3ZGOtw4kKtCV9rfVhrvcSj6DugIzAYKNFaL3eVvw5c5npcU50QQkRc4d+dyxYahfmUfPEyAK0uuT+WIcVcUH34rpb7TcBCoAOws6JOa30IMCulmtdSJ4QQEWWUFfssT23fK8qRxJdgx+H/DSgEXgEuDH843rKy0kM+Njs7yGXKYkzijZxEihUk3nAoyz1CoY9yc1pjshsl1liVcJ7fgBO+Uuo5oDswUWvtUErtwtm1U1HfAnBorQ/XVBdMcPn5hTgcwc8WnZ2dQV5eQdDHxYrEGzmJFCtIvOFSttG5klXygPMoW7fIXW4ymeMyXn+CPb9ms6nGhnJAX3VKqb/g7Je/QGtd6ipeA6QppUa7tm8EPgigTgghIsa252dKV/4DAGvnoaRf82qMI4ofgQzL7A08CGwGViilAHZorS9USk0BZimlUnENvQRw/QLwWSeEEMEq37EaAON4HtYuQzBnZOMoPg4OO6ZGTTC5pjwufO9ejIJDHkcamJIbYe040D1pWkNWa8LXWv+Mn2XVtdYrgL7B1gkhRCCM0iKMkgJK/vuKu6z0+/dJnzqLonm3u8sypr2FYRhVkj2Ys5yrSqWeeVt0Ao5zMnmaECIuFX/5GrZt3/usO/Hxn7y2yzcvB7Ol2n4mV5kpissIxjNJ+EKIuLNxRz4d/SR7AFOK94XJkiV/j3RI9YJ87Qkh4srC5Tt44f31lJj9r0Zlad2jSonPXmdRhSR8IUTcOFFiY/7yHbzU/G1SHUXu8rRz78Xcsot7235gs/eBliT3w4xpb0U6zIQlXTpCiLix95Cv26WcC41b2vbCcewAJz54uHrCt5d5bTa64BEcBfmRCjNhSQtfCBE3ftrufW+mKa0J6de85nxstmBp1s6rPvX0WwAwZ7X3Kre07EpS12ERjDQxSQtfCBFzJ0rKmffFZg4cPuFVbs7uhCk5ze9x1s5DMDc/CUeh84si9dTrIhpnopOEL4SIuVtfXOazPLnXuBqPM5lMOI7sB8M5z7398B6SajyiYZMuHSFEzGzfd5ypM76qVn7f4SsxX/Is1o4Da38So3JRE2u7hj0bZm2khS+EiJk/v726WtnIPq0Z2ac1jZsHP5u6pX3/cIRVb0kLXwgRE0cLS32Wt2+ZzsmdQls6w2SS8fg1kYQvhIiJRSt2em03aZwMQEpy9SkSRHhIwhdCxERSkjP9ZDZyXmZtk9XIWW4JLC2lnXM34BypIwIjffhCiKhyGAZb9xxj8fe7AHhs6jA2bMtn+Yb9AFgsNXfLpJ5+C+aMFliyOzu3x91A4Y7q1wJEdZLwhRBRYxgGD85aSd7REndZ0/QUxvZvyzfr9gFQXu6o8TmSugz12jYlpZDU72yZETMAkvCFEFGzJ6/IK9l7uu/KAXyxajcj+rQO+nlTh19R19AaBEn4QoiIMwyD657+ulr5nAcmuB+nJluZNLpzNMNqcOQ3kBAi4nYd9D0pmoguSfhCiIhb9N3OamXnj+wYg0gaNunSEUJE1Na9x1i9Kde9bTLBm/dPqOEIESnSwhdCRNRf5q3x2jaMGAUiAmvhK6WeAy4GOgF9tdYbXeU5QInrD8D9WuvPXXXDgVlAGpADTNZa5yKEaDAMj+zeoWU6u3ILSbJKOzNWAj3z84GxQPWOOLhEaz3A9aci2ZuBd4BbtNY9gKXAjHAELIRIHPnHK4dg3n3FAACG9WoZq3AavIBa+Frr5QBKqUCfdzBQUnEc8DrOVv7UIOMTQiSocpudmR9vBOCy8d3IbJTMn6YOc0+hIKIvHBdt31VKmYDlwENa66NABzx+DWitDymlzEqp5lrrw/6eqKqsrPSQg8rOzgj52FiQeCMnkWKF+hHvtj1H+deXm9l5oACAgb1akZ2dERfvLR5iCEY4461rwh+jtd6tlEoBXgReASbXPSyn/PxCHI7gr/BkZ2eQl1cQrjAiTuKNnESKFepHvA7D4M6/fuNVZi+zxcX7qg/ntyZms6nGhnKdrp5orXe7/i4FXgVGuap2Ae5BtkqpFoAjmNa9ECLxlJTZ2O3jJqvW0o0TF0Ju4SulGgNWrfUxV5fOFcA6V/UaIE0pNdrVj38j8EGdoxVCxLWbX1jqs9wsC5PEhUCHZb4MXAS0Bv6nlMoHJgIfKaUsgAX4BbgZQGvtUEpNAWYppVJxDcsMf/hCiHhhs/ue5fKJ64ZFORLhT6CjdG4HbvdR5XeFYa31CqBviHEJIRLM+q351cpOyk6nXXbogy9EeMnUCkKIsCgutXlte86EKeKD3PImhKizwuJy5nz6a6zDELWQhC+EqLPfv7bCa7tzm8Qa695QSJeOEKJOHA6DkjK7e3v278dhQkblxCNJ+EKIOln+03734wHdWmAxS8dBvJKEL4QImWEYvPXZJgDu/+1AVIdmMY5I1ES+ioUQIXvlg/Xux80yU2MYiQiEJHwhRMg857tvlp4Sw0hEIKRLRwgRsHKbndn/+QXDgOm/6c1/V+0CZMx9opCEL4QI2OpNeazWeQA8+bZz6cLkJOkoSBTyLyWECNjnP+xyP9550Dltb+9OzWMVjgiSJHwhImDDtnzWuFrCiep4URlzF29yr0lxosTGLh9TH1stkkYShfxLCREmh44Vc8tfv+FESTkvfrCemR//xLa9x2IdVsju/Ntyvlm3j+uf+ZoTJTaWb9jnc7/fnt49ypGJUEnCFyIM5n2h+f1rKykutfPGf35xlz85b01CJ/0K67ce4p9fbQXgsWuHusufv2MsTWR0TsKQhC9EHW3dc4yvf9zr3t6wzXua4CfnraHcZq96WFyruJmqwuxPKr/EOrTKYGz/tgCkN0qKalyibiThC1FHf3lnTa37bN0TP6384lIbO/Yfx2EY1aY0rrB0ve/umwq/O6M7D00ZTNsWMtd9IpFhmULUgcPjxqOa2ByV+5WU2UhOskR92T+Hw8BhGDz73lpyDlQujH3TBX0Y2rOle7vgRJn78QO/G8SMd390b199tgIgyWqhW7smUYhahJO08IUIgd3hYOqMr7jz5eUATBjUjjfvH+93/4qW9MYd+dz8wlIWf7/L776R8sx7a5n27BKvZA/w2vyNXtt3uN7T6L5t6NG+qVfdqQPaRTZIEVGS8IUI0vETZdzwzBLAufAHQO/OzTF5tNhP7uQ9idjrC34G4IX3nXPPfLhkm9e0BNGweffRGuvLbXb3EEyAy0/r5lX/4ORBEYlLRI8kfCGC4DAMd6veU/9uLQCYNKoTANec3ZNHrhuE8xsAABW+SURBVB7itY9n1whAWbnvRb8jYdHKHL91mY2TAZj+3Ddc/8zX7vLGqc4LsvdcMYCMRkmcJGvTJjxJ+MLtWGEpRwpKvcqi3QqNdzd4JERPFf3xk0Z15pkbR9CiaRqd22R6zTFTtYX9c87hyAVaxUffbPfa7tsly/24caq1xn/n3p2a89LtY0hLkUt+ia7Wf0Gl1HPAxUAnoK/WeqOrvAcwF8gC8oGrtNZbaqsT8euuV74F4M37x1NcamfRyhw++34XndtkVmutNkSrN+VSkRevOacnX/24h10HC72Sp9lsokXTtICe79PvdjKoR3YkQvWyN6/63bG3XtSX6c8tAcBuN6r16//ujB4Rj0tEXyBf2fOBl4BlVcpfB2Zqrd9RSk0GZgETAqgTcW7J2r3M+2Kze7tiCF+0R5XEm1c9Lm6O6deGU3q14tedRxjQvUVQz3P12Yq5izXb9x1n6oyveGracFo1bxTucN0efXMV4BxxYzaZaJxmJclqZs4DE5jz6a8s37CfJ+au9jpGVblYK+qHWrt0tNbLtda7PcuUUi2BQcB7rqL3gEFKqeya6sIXtggXh8Pg7c+1192gnsm+Qomf8doNxfGiyqGKs38/DpPJREqyJaBkb7V4f1FWHeny4BvfhSdIH95evImKzpoe7ZvS7aQmtMlq7K739+9a0a8v6pdQO+XaA3u11nYArbVdKbXPVW6qoS6o2aSyskK/SJSdnRHysbEQq3jf/69mydq9LFm7t8b9bn1xGX+6YQSDXOO1E+n8BhtrwYkypj/1JQUnynjr0TNpmpHK1BkLAejZsRmtWwU3/vz/HjmLT1fs4L0vtN94PMuCjdcwDCbd64zvP8//xqtuyTrnDVRN01N8Pu9qHxO8dW6bSddOWdXK/UmkzwI07Hjj+ipMfn6h1zCxQGVnZ5CXV1D7jnEimvF6ds1s3XuMdxZvquWISn+cvZI5D0xIqPMbTKx78grZk1fIGwsrpxH41xeaIwUl7u17Lu8f0ns/bUBb1ulcfndGD5/Hr9qwl85tMkM6t3s8+uj3Hzjmnr3yqx/3uMuPFpYG/LzNM1IC3jeRPgtQ/+M1m001NpRDHaWzG2inlLIAuP5u6yqvqU7E0LtfbOb6p7/muOtOyr/Mq3lKgMvGd+P+3w70Ktux/zg//HKg3o3e2Z9fxKNvrvJK9uC8sLry54MAXHdeLyzm0P7LmM0m7rtyIG1bOLtTKv6u8HUtv7BqUtFHD3hNX/yOR9fcn6YOC+i5br6gD9ee0yvkWER8C+nTq7XOBdYBV7qKrgTWaq3zaqqra7AidDa7gy9dLb6XP9zAMY8+aX/OPqUDqoP3DURPzF3N429+z4+b688/54Zt+Tw8+3uvsgvHdqm238g+rcP2muMHevfjL9+wP+jnWLp+H5+v8r5j919fOQfD5R+r/FXy5v3jad8ysO7RIT1b0ig1rn/4izoIZFjmy8BFQGvgf0qpfK11b+BGYK5S6lHgCHCVx2E11YkoOlZY6h5uWWH7vuPc9bfqNw95qu2uyk9W7GSwalnjPvFs6oyvABg3oC26yvj42y/ux4DuLfh4aeXY9VMHtPW6k7auJgxqR8+OzXj5w/XkHS2p/YAqDMOoNqMlwOY9x3h9wUZW/ZrrLqsp7gcnD+Kpd370Wy/ql1oTvtb6duB2H+WbgFP8HOO3TkTX3MW6xvqszBQevWaoe/6Ux64dSpusxiRZK3/8JVvNlNm87wrdebAAwzB4Yu5qRvRuzRlD24c/+DApLbez6peDjOrXBrPJxFyP6xYVFzUBnrj+FNpmNXInyD9eM5Q/vfUDt1zYJ+xfbiaTiXYtGjOsVysWrdwZ1LHlNjvTn/vGq6xru0y27T0O4JXs/3rrqBqfq/tJTZnzwARWb8qlgY+6bRDkt1s9ZrM7WLf1UI37TDlLkdEomcevG8ahoyV0aFV9RMDLd4zhaGEpD8yqHD5oMZsoLrWTc6CAnAMFnD7kpLC2gMNh4fIdzF++w72de7TYb3JNTbbQrkq/esfWGV53ykbCb0Z3DirhHy8q404fv85uvbBvtV9yQMCLkwzpmbi/1kTgZGqFeqq0zM76WpI9wO5c50W+k7LT/Y4pT06y0LKZ941BdofBrS8udW8XuCYRixc/bMr1SvZAtcR656X93I9bBnh3bLhZzMF9ST46Z5XPcll1SgRCEn49dPxEGTe98A0zP668MzQlyUKbrEbceWk/r9v5Tzm5VcDPe+el/ejVsZnvupeXsz+/KPSgw6zqlL+eOrRKZ+ZdY+nXtQU3X9AHgIevGhyt0LxU/Crq2aH2O1s3bs/3ugHsuZtHAtCqWWy+rETikS6demhNlQFRl43vxtmndHBv9+vagsf+bxW7DhbSokngyaJf1xb069qCz37YzQdfVp8a6eHZ30e8CwScQ0MrpgK4+YI+9O2SxScrcxg/sB3NM1NZ9etB9763XjqAnL1HMZtg4bc5ANx2UT/3RGBDeraMSsw1OSm7cUATk1WdAK15Zip3XtqfTm2c3XCv3DnW/atr2qST6dAysW4wEpEnCb8emve594Xas4ZVv6D62LXDQh5Lf/7oLu6En5WZQv7xyhk2v1m3l1MHtGPlxgP8nHOY80Z05OHZ3/PwVYPp2jY8KyR5zvviOb9N1S6bS8Z15azhHcnLa45hGCz8NoezhrUnq0lqWOIIF6vFjL2WGwwLTpSx82DlDTiTz3RObtava+UdsY1Srdx2UV+27j3G8JPDN4RU1B+S8BOcYRgUl9rdY6dzDhz3qr/23J5+L6aGepG1UZXW6E0X9HF3ocxdrFn580H3VMArNh4A4Mm314SlJb3zQOB3HTbPrOzXNplMMW/J+1NabmfXwZrf1x0ec/A/f8sommX47rMf2CObgVGYgVMkJunDT3AffrONW19c6r7R5vG3Klu/cx6YwJh+bcP+mqkeCX/S6M4M7dmSl24f7S7zt7LSwioXUUNRMYf8OcM71LIn9Org+3pDvNmff4KjhWUBnR8T+E32QtRGEn6C++w7552Wa7fE5s7Xin7ijEa1z644f/kOlqwLfQqBX3MO8+GSbQBccmpXbru4L1NcXRuPe0wdMOveU5nzwISEG7lSdVRRhZKyyhktZ903LkrRiPpIunQS0K6DBTz2fz94lf3jf1uYMPgk93agt9LXlec0ukNUtnv2xWYZKWSkJbEr13vxjbcXa8YFuRB2WbmdJ+etcQ8hBWcXzcDuzq6L8YOc7/u2i/uyO7eQJKslpPcSjyruCAa44fyT3ROjCREK+fQkoKrJvsIWj66UP14zNKIxvHHfOJ67eaRX94Jnou3SJpPfuyZemz6pt9exdkdwa7ne+Pw3Xsn+lTvH+txvYPdsJo3qHNRzx5uJ9yzgf6ud8wxW/TXUNF3mqBd1Iy38BFNYww1OT/9jLQBXnaUwB3lDT7CsFjPNM71Hu5SW292PLzq1C41Sk9wXSnOPnODjZc4uixU/HWBM/5qvLWzZc9TnHC+PXjOk3k/u9fGy7Zw+pD1vV5kWo3FaUowiEvWFtPATzBsLfwbgwjGdaennhps+XZpHMyS3Pa5W+HXn9fJaVQlg4qjOtMt2lgWyeLevZH/npf3p1DozDJHGN7vdOUSz6t2/soi4qCv5BCWYjTucyXLcwHZMdHVf7M8v8preN5ibqSKhasu/ws0X9OHh2d+z6tdcfjO6qNqXQk3idUhlJJTZHLz7xWZyjxYDzl81Ow8UkB2j6R9E/SEt/ATiOQe956iYRqnx8VO/d2fnLwvP8e+ePBPWw7O/Z8O2fAqLy/nXV1s5dKzY3SX05iLnIiRWi5npk3pz9+X9Ixx5bN19WfX396XHalWdWmdWWwdXiFBICz/O/WdFDna7wz0tAMCzN4302ifYCbgi5YrTujOiT2taVZlorULVESY/bs7jxQ/WA7B41S4yGyfz4m2j+fYn581aJ3dqFtRcP4mqhbTcRZRIwo9TKzbup7Tc4bUIR4WqUwMkeSTSM2M4L32S1Uy3djVPn2Axm9zTCCxdv8+r7nhRGY95zAZZdXRPfdW6eSNaNksj90hxtbpnbhoRg4hEfSUJPw5t2JbP3z/5NeD9U5ItPHbtUFo1a0RKcnyPQT9jSHsWV1mWz1PFuP3MxskN6iJlx1YZ5B4p5uLx3fjo663u8iw/10OECIX04cehim4OX6ae63uB6Q6tMuI+2QNeK2lVuPjU6uvHPjVteDTCiRsVE9l1a+89TXK8LSojEpsk/DjzTx/TDoOzK2TOAxMY3a9NlCMKL1/567wRnUhJqvyyatEktUG17gF3V1hrj5FLtXWPCREsSfhxpmJOnA4t07nvyoFMm3gyAA/Usqh4ojD7abG+fEfl5GsTBp3kc5/67Iyh7Xlq+nC6ndSU011TZPRoX/uiKEIEo87NKKVUDlDi+gNwv9b6c6XUcGAWkAbkAJO11rm+nkM4fbIih7yjztP4x2uHYjKZMAyDXp2a06Rx/bitvqur1XrHJf146cMN7vIkq4WZd411LmQyqOENQTSZTO7RTU1cUyh43rksRDiE63fzJVpr90oUSikz8A5wjdZ6uVLqD8AMYGqYXq/ecBgGE+9Z4FXWu3Nzd9+tyWSqN8kenO/tr7eNpknjZM4d3tHrbuG0FCuXjusWw+jiQ8U9FkkyUZoIs0h1lA4GSrTWFas2vI6zlS8Jv4o3P/mlWtk9lw+IQSTRU/EFdsm4rjGOJD6N6deG5CQzQ1TLWIci6plwJfx3lVImYDnwENABcK83p7U+pJQyK6Waa61rn0ilAcg/VsLLH23wmgVSCHD+qpMlCkUkhCPhj9Fa71ZKpQAvAq8AH4fhecnKCn1O9+zs+F7A+fG5q30m+wHds+M+doj/8+spkWIFiTfSGnK8dU74Wuvdrr9LlVKvAguBl4COFfsopVoAjmBb9/n5hThqWdzZl+zsDPLyAl/7NNqWbdhHzv7KtWf/9ZfzWLZ6F68t2MgN5/eK69gh/s+vp0SKFSTeSKvv8ZrNphobynW6KqSUaqyUauJ6bAKuANYBa4A0pVTFWLsbgQ/q8lqJrNzmcN9Ys0bn8X+fbgJgUI9snrj+FNJSrAzskc0b941vcOPPhRDRU9fs0gr4SCllASzAL8DNWmuHUmoKMEsplYprWGYdXythGIbBLzlH6HZSE/KOFPOox/wwFdJSLNxyYR+5k1IIETV1Svha6+3AQD91K4C+dXn+RPXDplxeX/BzjfvMvOvUKEUjhBBO0n8QRsWlNm7569Jq5VVnQnz6RpkBUQgRfZLww6CwuJxfcg5Xa9XfdVl/zCYTvTs3J+9oMfOX7eC683v5nV5ACCEiSRJ+HZSU2Vi/NZ9ZC70TfVqKhcennuI1b3120zRucM2LI4QQsSAJP0Trth7iZY+5YAA6ts7g4SmDq63sJIQQ8UASfpAMw+C1BT+zelPlPHAXje3CuSM6SleNECKuScIPwsYd+bzwfuXiJN3aNeGhKYNjGJEQQgSuwSZ8h8PgvS+3kHukmFsv6uu1ElNxqY05i34lMz2ZTTuPMG1ib178cD3HCsvc+7x4+2gyG9WfWSyFEPVfvUz4ZeV2ikttpKVYOV5Uhs3uYOeBAv7275987j/9uSU1Pt+f3vrB/fiK07rHdKFwIYQIVb1L+F+s2sU/v9pa+461GN67Fd/9fNC93aJJKjNuHCH99EKIhFXvEv5Xa/f6LO/SNpMpZypaNkurNl/Nhm2H+CXnCKcOaMu+QycY1KMFJpOJaRN7RyNkIYSIinqX8GdMH0FyWjJlxWXYHQ4s5tqHSPbr2oJ+XVsA0MZjEWkhhKhP6uWA8SbpKQABJXshhGgoJCMKIUQDIQlfCCEaCEn4QgjRQEjCF0KIBkISvhBCNBCS8IUQooGI13H4FnCuwB6quhwbCxJv5CRSrCDxRlp9jtdjX4uvepNhGGEIKexGA8tiHYQQQiSoMcDyqoXxmvBTgKHAfsAe41iEECJRWIA2wA9AadXKeE34Qgghwkwu2gohRAMhCV8IIRoISfhCCNFASMIXQogGQhK+EEI0EJLwhRCigZCEL4QQDUS8Tq0QEqVUD2AukAXkA1dprbdEOYbngIuBTkBfrfXG2mILtS4MsWYB84CuQBmwBZiutc5TSg0HZgFpQA4wWWud6zoupLowxTwf6Aw4gELgNq31ung8v1Xi/iPwGK7PRByf3xygxPUH4H6t9efxGK9SKhX4K3C6K96VWutp8fhZUEp1AuZ7FDUFMrXWzaMZb31r4b8OzNRa9wBm4vygRdt8YCyws0p5TbGFWldXBvCM1lpprfsC24AZSikz8A5wi+t1lwIzAEKtC6Ortdb9tdYDgeeAOa7yeDy/ACilBgHDcX0m4vz8AlyitR7g+vN5HMf7DM5E38P1+X3EVR53nwWtdY7HOR2AM0/8I9rx1puEr5RqCQwC3nMVvQcMUkplRzMOrfVyrfXuQGMLtS5MsR7WWi/xKPoO6AgMBkq01hVzcbwOXOZ6HGpdWGitj3lsNgEc8Xp+AZRSKTj/M97kURy359ePuItXKZUOXAU8orU2ALTWB+P5s+ARezLwO2BOtOOtNwkfaA/s1VrbAVx/73OVx1pNsYVaF1au1thNwEKgAx6/ULTWhwCzUqp5HerCGevflVK7gCeBq4nv8/s48I7WOsejLK7PL/CuUmqDUupVpVTTOI23K85ujD8qpVYrpZYopUYT35+FCpNcr/VjtOOtTwlf1M3fcPaJvxLrQGqjtb5ea90BeAh4Ntbx+KOUGgEMAV6NdSxBGKO17o9z8kIT8ft5sABdgLVa6yHA/cC/gfSYRhWYqVR2RUZVfUr4u4F2SikLgOvvtq7yWKsptlDrwsZ1obk7cLnW2gHswtm1U1HfAnBorQ/XoS7stNbzgPHAHuLz/J4K9AJ2uC6GngR8DnQjTs9vRXek1roU5xfVqDrEFMl4dwE2XF0aWuvvgUNAMfH5WcD1vO1wfi7edRVFNTfUm4TvuvK/DrjSVXQlzm//vNhF5VRTbKHWhSs2pdRfcPa1XuD6Tw6wBkhz/UQGuBH4oI514Yg1XSnV3mN7InAYiMvzq7WeobVuq7XupLXuhPOL6Sycv0ri8fw2Vko1cT02AVfgPD9x93lwdQ99DZzhircH0BLYTBx+FjxcDSzSWue73kdUP7v1anpkpVRPnMOUmgFHcA5T0lGO4WXgIqA1zhZHvta6d02xhVoXhlh7Axtx/icpdhXv0FpfqJQaifOqfyqVw+kOuo4LqS4M8bYCFgCNca6TcBi4V2v9YzyeXx/x5wDna+ewzHg8v12Aj3B2l1iAX4Dbtdb74zjeOTiHJZYDD2utP4vnz4JSajPOc7rYoyxq8darhC+EEMK/etOlI4QQomaS8IUQooGQhC+EEA2EJHwhhGggJOELIUQDIQlfCCEaCEn4QgjRQEjCF0KIBuL/Af94erlV74ofAAAAAElFTkSuQmCC\n", 160 | "text/plain": [ 161 | "
" 162 | ] 163 | }, 164 | "metadata": { 165 | "needs_background": "light", 166 | "tags": [] 167 | }, 168 | "output_type": "display_data" 169 | } 170 | ], 171 | "source": [ 172 | "# Plot all lines on one chart to see where one segment starts and another ends\n", 173 | "plt.plot(x_train, label = 'Train')\n", 174 | "plt.plot(x_valid, label = 'Validate')\n", 175 | "plt.plot(x_test, label = 'Test')\n", 176 | "plt.legend()\n", 177 | "print(x_train.index.max(),x_valid.index.min(),x_valid.index.max(),x_test.index.min(),x_test.index.max())" 178 | ] 179 | }, 180 | { 181 | "cell_type": "code", 182 | "execution_count": 5, 183 | "metadata": { 184 | "colab": {}, 185 | "colab_type": "code", 186 | "id": "D-8aiQRzmG7O" 187 | }, 188 | "outputs": [], 189 | "source": [ 190 | "# Reshape values\n", 191 | "x_train_values = x_train.values.reshape(-1, 1)\n", 192 | "x_valid_values = x_valid.values.reshape(-1, 1)\n", 193 | "x_test_values = x_test.values.reshape(-1, 1)\n", 194 | "\n", 195 | "# Create Scaler Object\n", 196 | "x_train_scaler = MinMaxScaler(feature_range=(0, 1))\n", 197 | "\n", 198 | "# Fit x_train values\n", 199 | "normalized_x_train = x_train_scaler.fit_transform(x_train_values)\n", 200 | "\n", 201 | "# Fit x_valid values\n", 202 | "normalized_x_valid = x_train_scaler.transform(x_valid_values)\n", 203 | "\n", 204 | "# Fit x_test values\n", 205 | "normalized_x_test = x_train_scaler.transform(x_test_values)\n", 206 | "\n", 207 | "# All values normalized to training data\n", 208 | "spy_normalized_to_traindata = x_train_scaler.transform(series.values.reshape(-1, 1))\n", 209 | "\n", 210 | "# Example of how to iverse\n", 211 | "# inversed = scaler.inverse_transform(normalized_x_train).flatten()" 212 | ] 213 | }, 214 | { 215 | "cell_type": "markdown", 216 | "metadata": { 217 | "colab_type": "text", 218 | "id": "vDs_w3kZ8OIw" 219 | }, 220 | "source": [ 221 | "## Simple RNN Forecasting" 222 | ] 223 | }, 224 | { 225 | "cell_type": "markdown", 226 | "metadata": { 227 | "colab_type": "text", 228 | "id": "-rKEOyuFHN0N" 229 | }, 230 | "source": [ 231 | "The input for the memory cell at each time step is the batch size by our feature dimensionality (1). The output is thiese same two dimensions times the number of units in the memory cell. Our memory cell is comprised of 100 units in both layers. So the output of our RNN layer is batch(128), window_size(30), and number of units (100) which is obviously 3 dimensional. The output Y_0 is the state vector which is used when calculating Y_1 at the next time step. \n", 232 | "\n", 233 | "In this instance we are doing sequence to vector which means we ignore all outputs except for the one at the very last time step. This is the default behavior of all reccurent layers in Keras unless return_sequences = True is selected. This sequence to vector takes in a batch (128) of time windows and outputs the next time step of window of values. This one at a time output proves to be very slow when training.\n", 234 | "\n", 235 | "For a faster training convergence, we use a sequence to sequence RNN. Compared to the sequence to vector which adjusted the gradient of the loss from the very end of the model all the at layer 2 unit 100, the sequence to sequence RNN calculates the loss at each time step and backpropagates the loss from there. This provides much more gradients and speeds up training. It is important to note we still ignore all outputs besides the last one. We just calculate all intermediate values to update the gradient more quickly." 236 | ] 237 | }, 238 | { 239 | "cell_type": "code", 240 | "execution_count": 1, 241 | "metadata": { 242 | "colab": { 243 | "base_uri": "https://localhost:8080/", 244 | "height": 33 245 | }, 246 | "colab_type": "code", 247 | "id": "vbmXjeXSZr96", 248 | "outputId": "4f39e4e3-1cd6-4bc2-d3f0-19be3cb632b9" 249 | }, 250 | "outputs": [ 251 | { 252 | "data": { 253 | "text/plain": [ 254 | "3" 255 | ] 256 | }, 257 | "execution_count": 1, 258 | "metadata": { 259 | "tags": [] 260 | }, 261 | "output_type": "execute_result" 262 | } 263 | ], 264 | "source": [] 265 | }, 266 | { 267 | "cell_type": "markdown", 268 | "metadata": { 269 | "colab_type": "text", 270 | "id": "f9L64bJOiZhK" 271 | }, 272 | "source": [] 273 | }, 274 | { 275 | "cell_type": "code", 276 | "execution_count": null, 277 | "metadata": { 278 | "colab": {}, 279 | "colab_type": "code", 280 | "id": "YU4xRp9G8OIx" 281 | }, 282 | "outputs": [], 283 | "source": [ 284 | "# Clear any back end stored data due to multiple iterations\n", 285 | "keras.backend.clear_session()\n", 286 | "tf.random.set_seed(42)\n", 287 | "np.random.seed(42)\n", 288 | "\n", 289 | "# Set window size\n", 290 | "window_size = 30\n", 291 | "\n", 292 | "# Create 2D batches of batch size and features (1 feature = 1 time step in window)\n", 293 | "train_set = window_dataset(normalized_x_train, window_size, batch_size=128)\n", 294 | "\n", 295 | "# Establish Model\n", 296 | "model = keras.models.Sequential([\n", 297 | " keras.layers.Lambda(lambda x: tf.expand_dims(x, axis=-1), # Add a 3rd dimension (feature dimensionality which in this case in univariate)\n", 298 | " input_shape=[None]), # 1st dimension is batch size, input shape = None allows windows of any size to be used\n", 299 | " keras.layers.SimpleRNN(100, return_sequences=True), # takes in sequence (batch size, time, dimensionality per time step (univariate))\n", 300 | " keras.layers.SimpleRNN(100), # produces a single vector\n", 301 | " keras.layers.Dense(1), # produces 1 output \n", 302 | "])\n", 303 | "\n", 304 | "# create standard learning rate scheduler\n", 305 | "lr_schedule = keras.callbacks.LearningRateScheduler(\n", 306 | " lambda epoch: 1e-5 * 10**(epoch / 20))\n", 307 | "\n", 308 | "# establish optimizer\n", 309 | "optimizer = keras.optimizers.Nadam(lr=1e-7)\n", 310 | "\n", 311 | "# Put model all together\n", 312 | "model.compile(loss=keras.losses.Huber(),\n", 313 | " optimizer=optimizer,\n", 314 | " metrics=[\"mae\"])\n", 315 | "history = model.fit(train_set, epochs=100, callbacks=[lr_schedule])" 316 | ] 317 | }, 318 | { 319 | "cell_type": "code", 320 | "execution_count": null, 321 | "metadata": { 322 | "colab": {}, 323 | "colab_type": "code", 324 | "id": "YJTlFAXF8OIy" 325 | }, 326 | "outputs": [], 327 | "source": [ 328 | "plt.semilogx(history.history[\"lr\"], history.history[\"loss\"])\n", 329 | "plt.axis([1e-5, 1, 0, .1])" 330 | ] 331 | }, 332 | { 333 | "cell_type": "code", 334 | "execution_count": null, 335 | "metadata": { 336 | "colab": {}, 337 | "colab_type": "code", 338 | "id": "T3yNjxWE8OIz" 339 | }, 340 | "outputs": [], 341 | "source": [ 342 | "keras.backend.clear_session()\n", 343 | "tf.random.set_seed(42)\n", 344 | "np.random.seed(42)\n", 345 | "\n", 346 | "window_size = 30\n", 347 | "train_set = window_dataset(normalized_x_train, window_size, batch_size=128)\n", 348 | "valid_set = window_dataset(normalized_x_valid, window_size, batch_size=128)\n", 349 | "\n", 350 | "model = keras.models.Sequential([\n", 351 | " keras.layers.Lambda(lambda x: tf.expand_dims(x, axis=-1),\n", 352 | " input_shape=[None]),\n", 353 | " keras.layers.SimpleRNN(100, return_sequences=True),\n", 354 | " keras.layers.SimpleRNN(100),\n", 355 | " keras.layers.Dense(1),\n", 356 | "])\n", 357 | "optimizer = keras.optimizers.Nadam(lr=5e-5)\n", 358 | "\n", 359 | "model.compile(loss=keras.losses.Huber(),\n", 360 | " optimizer=optimizer,\n", 361 | " metrics=[\"mae\"])\n", 362 | "\n", 363 | "# Set early stopping to prevent over fitting\n", 364 | "early_stopping = keras.callbacks.EarlyStopping(patience=50)\n", 365 | "\n", 366 | "# save best model to load later\n", 367 | "model_checkpoint = keras.callbacks.ModelCheckpoint(\n", 368 | " \"my_checkpoint\", save_best_only=True)\n", 369 | "\n", 370 | "# compile model\n", 371 | "model.fit(train_set, epochs=500,\n", 372 | " validation_data=valid_set,\n", 373 | " callbacks=[early_stopping, model_checkpoint])" 374 | ] 375 | }, 376 | { 377 | "cell_type": "code", 378 | "execution_count": null, 379 | "metadata": { 380 | "colab": {}, 381 | "colab_type": "code", 382 | "id": "4KuPtKFe8OI0" 383 | }, 384 | "outputs": [], 385 | "source": [ 386 | "model = keras.models.load_model(\"my_checkpoint\")" 387 | ] 388 | }, 389 | { 390 | "cell_type": "code", 391 | "execution_count": null, 392 | "metadata": { 393 | "colab": {}, 394 | "colab_type": "code", 395 | "id": "cxq09qOg8OI1" 396 | }, 397 | "outputs": [], 398 | "source": [ 399 | "rnn_forecast = model_forecast(\n", 400 | " model,\n", 401 | " spy_normalized_to_traindata[x_test.index.min() - window_size:-1],\n", 402 | " window_size)[:, 0]" 403 | ] 404 | }, 405 | { 406 | "cell_type": "code", 407 | "execution_count": null, 408 | "metadata": { 409 | "colab": { 410 | "base_uri": "https://localhost:8080/", 411 | "height": 33 412 | }, 413 | "colab_type": "code", 414 | "id": "p-ddUraXnXJo", 415 | "outputId": "110e2be8-b894-4b8e-ea29-bedde6f1a657" 416 | }, 417 | "outputs": [ 418 | { 419 | "data": { 420 | "text/plain": [ 421 | "(422,)" 422 | ] 423 | }, 424 | "execution_count": 12, 425 | "metadata": { 426 | "tags": [] 427 | }, 428 | "output_type": "execute_result" 429 | } 430 | ], 431 | "source": [ 432 | "rnn_unscaled_forecast = x_train_scaler.inverse_transform(rnn_forecast.reshape(-1,1)).flatten()\n", 433 | "rnn_unscaled_forecast.shape" 434 | ] 435 | }, 436 | { 437 | "cell_type": "code", 438 | "execution_count": null, 439 | "metadata": { 440 | "colab": {}, 441 | "colab_type": "code", 442 | "id": "PkC_JssS8OI2" 443 | }, 444 | "outputs": [], 445 | "source": [ 446 | "plt.figure(figsize=(10,6))\n", 447 | "\n", 448 | "plt.title('SeqtoVec RNN Forecast')\n", 449 | "plt.ylabel('Dollars $')\n", 450 | "plt.xlabel('Timestep in Days')\n", 451 | "plot_series(x_test.index, x_test)\n", 452 | "plot_series(x_test.index, rnn_unscaled_forecast)" 453 | ] 454 | }, 455 | { 456 | "cell_type": "code", 457 | "execution_count": null, 458 | "metadata": { 459 | "colab": {}, 460 | "colab_type": "code", 461 | "id": "1mwfgEK08OI3" 462 | }, 463 | "outputs": [], 464 | "source": [ 465 | "keras.metrics.mean_absolute_error(x_test, rnn_unscaled_forecast).numpy()" 466 | ] 467 | }, 468 | { 469 | "cell_type": "markdown", 470 | "metadata": { 471 | "colab_type": "text", 472 | "id": "KNG7s8jt8OI4" 473 | }, 474 | "source": [ 475 | "## Sequence-to-Sequence Forecasting" 476 | ] 477 | }, 478 | { 479 | "cell_type": "code", 480 | "execution_count": null, 481 | "metadata": { 482 | "colab": {}, 483 | "colab_type": "code", 484 | "id": "bsKGxfiE8OI4" 485 | }, 486 | "outputs": [], 487 | "source": [ 488 | "def seq2seq_window_dataset(series, window_size, batch_size=128,\n", 489 | " shuffle_buffer=1000):\n", 490 | " series = tf.expand_dims(series, axis=-1)\n", 491 | " ds = tf.data.Dataset.from_tensor_slices(series)\n", 492 | " ds = ds.window(window_size + 1, shift=1, drop_remainder=True)\n", 493 | " ds = ds.flat_map(lambda w: w.batch(window_size + 1))\n", 494 | " ds = ds.shuffle(shuffle_buffer)\n", 495 | " ds = ds.map(lambda w: (w[:-1], w[1:]))\n", 496 | " return ds.batch(batch_size).prefetch(1)" 497 | ] 498 | }, 499 | { 500 | "cell_type": "markdown", 501 | "metadata": { 502 | "colab_type": "text", 503 | "id": "EmbEO5kkcUnS" 504 | }, 505 | "source": [ 506 | "The cell below illustrates exactly what the function above is doing. the cell above creates batches and laebls for those batches. The point Y[0] is the label for X[0] to try to calculate." 507 | ] 508 | }, 509 | { 510 | "cell_type": "code", 511 | "execution_count": 2, 512 | "metadata": { 513 | "colab": { 514 | "base_uri": "https://localhost:8080/", 515 | "height": 212 516 | }, 517 | "colab_type": "code", 518 | "id": "5Nk2C7WP8OI5", 519 | "outputId": "bd1382ca-6f4a-4dda-8378-72c8e3f711d2" 520 | }, 521 | "outputs": [ 522 | { 523 | "ename": "NameError", 524 | "evalue": "ignored", 525 | "output_type": "error", 526 | "traceback": [ 527 | "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", 528 | "\u001b[0;31mNameError\u001b[0m Traceback (most recent call last)", 529 | "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m for X_batch, Y_batch in seq2seq_window_dataset(tf.range(10), 3,\n\u001b[0m\u001b[1;32m 2\u001b[0m batch_size=1):\n\u001b[1;32m 3\u001b[0m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"X:\"\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mX_batch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mnumpy\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 4\u001b[0m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"Y:\"\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mY_batch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mnumpy\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", 530 | "\u001b[0;31mNameError\u001b[0m: name 'seq2seq_window_dataset' is not defined" 531 | ] 532 | } 533 | ], 534 | "source": [ 535 | "# Show example of what seq-seq looks like\n", 536 | "for X_batch, Y_batch in seq2seq_window_dataset(tf.range(10), 3, batch_size=1):\n", 537 | " print(\"X:\", X_batch.numpy())\n", 538 | " print(\"Y:\", Y_batch.numpy())" 539 | ] 540 | }, 541 | { 542 | "cell_type": "code", 543 | "execution_count": null, 544 | "metadata": { 545 | "colab": {}, 546 | "colab_type": "code", 547 | "id": "4JSc-Btk8OI7" 548 | }, 549 | "outputs": [], 550 | "source": [ 551 | "keras.backend.clear_session()\n", 552 | "tf.random.set_seed(42)\n", 553 | "np.random.seed(42)\n", 554 | "\n", 555 | "window_size = 30\n", 556 | "\n", 557 | "# in the seq2seq_window we removed the need for the lambda layer to expand dimensions as it is already 3D \n", 558 | "train_set = seq2seq_window_dataset(normalized_x_train.flatten(), window_size,\n", 559 | " batch_size=128)\n", 560 | "\n", 561 | "# Create model\n", 562 | "model = keras.models.Sequential([\n", 563 | " keras.layers.SimpleRNN(100, return_sequences=True,\n", 564 | " input_shape=[None, 1]),\n", 565 | " keras.layers.SimpleRNN(100, return_sequences=True),\n", 566 | " keras.layers.Dense(1), # now dense layer is applied at every time step\n", 567 | "])\n", 568 | "\n", 569 | "lr_schedule = keras.callbacks.LearningRateScheduler(\n", 570 | " lambda epoch: 1e-5 * 10**(epoch / 30))\n", 571 | "\n", 572 | "# choose optimizer\n", 573 | "optimizer = keras.optimizers.Nadam(lr=1e-5)\n", 574 | "\n", 575 | "# compile model\n", 576 | "model.compile(loss=keras.losses.Huber(),\n", 577 | " optimizer=optimizer,\n", 578 | " metrics=[\"mae\"])\n", 579 | "\n", 580 | "# create history callback from fit\n", 581 | "history = model.fit(train_set, epochs=100, callbacks=[lr_schedule])" 582 | ] 583 | }, 584 | { 585 | "cell_type": "code", 586 | "execution_count": null, 587 | "metadata": { 588 | "colab": {}, 589 | "colab_type": "code", 590 | "id": "YGNsWceq8OI8" 591 | }, 592 | "outputs": [], 593 | "source": [ 594 | "plt.semilogx(history.history[\"lr\"], history.history[\"loss\"])\n", 595 | "plt.axis([1e-5, 1, 0, .1])" 596 | ] 597 | }, 598 | { 599 | "cell_type": "code", 600 | "execution_count": null, 601 | "metadata": { 602 | "colab": {}, 603 | "colab_type": "code", 604 | "id": "G9lDnb0X8OI9" 605 | }, 606 | "outputs": [], 607 | "source": [ 608 | "keras.backend.clear_session()\n", 609 | "tf.random.set_seed(42)\n", 610 | "np.random.seed(42)\n", 611 | "\n", 612 | "# set window size and create input batch sequences\n", 613 | "window_size = 20\n", 614 | "train_set = seq2seq_window_dataset(normalized_x_train.flatten(), window_size,\n", 615 | " batch_size=128)\n", 616 | "valid_set = seq2seq_window_dataset(normalized_x_valid.flatten(), window_size,\n", 617 | " batch_size=128)\n", 618 | "\n", 619 | "# Create model for seq:seq RNN\n", 620 | "model = keras.models.Sequential([\n", 621 | " keras.layers.SimpleRNN(100, return_sequences=True,\n", 622 | " input_shape=[None, 1]),\n", 623 | " keras.layers.SimpleRNN(100, return_sequences=True),\n", 624 | " keras.layers.Dense(1),\n", 625 | "])\n", 626 | "\n", 627 | "# choose optimizer and LR\n", 628 | "optimizer = keras.optimizers.Nadam(lr=1e-3)\n", 629 | "\n", 630 | "# set model params\n", 631 | "model.compile(loss=keras.losses.Huber(),\n", 632 | " optimizer=optimizer,\n", 633 | " metrics=[\"mae\"])\n", 634 | "\n", 635 | "# set early stopping\n", 636 | "early_stopping = keras.callbacks.EarlyStopping(patience=20)\n", 637 | "\n", 638 | "# set model checkpoint to save best model\n", 639 | "model_checkpoint = keras.callbacks.ModelCheckpoint(\n", 640 | " \"my_checkpoint\", save_best_only=True)\n", 641 | "\n", 642 | "# fit model\n", 643 | "model.fit(train_set, epochs=500,\n", 644 | " validation_data=valid_set,\n", 645 | " callbacks=[early_stopping, model_checkpoint])" 646 | ] 647 | }, 648 | { 649 | "cell_type": "code", 650 | "execution_count": null, 651 | "metadata": { 652 | "colab": {}, 653 | "colab_type": "code", 654 | "id": "iGlOWOtPz3fs" 655 | }, 656 | "outputs": [], 657 | "source": [ 658 | "# recall best model\n", 659 | "model = keras.models.load_model(\"my_checkpoint\")" 660 | ] 661 | }, 662 | { 663 | "cell_type": "code", 664 | "execution_count": null, 665 | "metadata": { 666 | "colab": {}, 667 | "colab_type": "code", 668 | "id": "4mglBRex8OI_" 669 | }, 670 | "outputs": [], 671 | "source": [ 672 | "# create forecast and clip to only show test values\n", 673 | "rnn_forecast = model_forecast(model, spy_normalized_to_traindata.flatten()[..., np.newaxis], window_size)\n", 674 | "rnn_forecast = rnn_forecast[x_test.index.min() - window_size:-1, -1, 0]" 675 | ] 676 | }, 677 | { 678 | "cell_type": "code", 679 | "execution_count": null, 680 | "metadata": { 681 | "colab": { 682 | "base_uri": "https://localhost:8080/", 683 | "height": 33 684 | }, 685 | "colab_type": "code", 686 | "id": "W3QO5IfmxmSD", 687 | "outputId": "b40ee48f-2c98-45f3-c4e6-791b8af628a3" 688 | }, 689 | "outputs": [ 690 | { 691 | "data": { 692 | "text/plain": [ 693 | "(422,)" 694 | ] 695 | }, 696 | "execution_count": 29, 697 | "metadata": { 698 | "tags": [] 699 | }, 700 | "output_type": "execute_result" 701 | } 702 | ], 703 | "source": [ 704 | "# Get data back to normal scale\n", 705 | "rnn_unscaled_forecast = x_train_scaler.inverse_transform(rnn_forecast.reshape(-1,1)).flatten()\n", 706 | "rnn_unscaled_forecast.shape" 707 | ] 708 | }, 709 | { 710 | "cell_type": "code", 711 | "execution_count": null, 712 | "metadata": { 713 | "colab": {}, 714 | "colab_type": "code", 715 | "id": "Zl_FkcdI8OJA" 716 | }, 717 | "outputs": [], 718 | "source": [ 719 | "# Plot results\n", 720 | "plt.figure(figsize=(10, 6))\n", 721 | "plt.title('SeqtoSeq RNN')\n", 722 | "plt.ylabel('Dollars $')\n", 723 | "plt.xlabel('Timestep in Days')\n", 724 | "plot_series(x_test.index, x_test)\n", 725 | "plot_series(x_test.index, rnn_unscaled_forecast)" 726 | ] 727 | }, 728 | { 729 | "cell_type": "code", 730 | "execution_count": null, 731 | "metadata": { 732 | "colab": {}, 733 | "colab_type": "code", 734 | "id": "cznEtSVK8OJB" 735 | }, 736 | "outputs": [], 737 | "source": [ 738 | "keras.metrics.mean_absolute_error(x_test, rnn_unscaled_forecast).numpy()" 739 | ] 740 | }, 741 | { 742 | "cell_type": "code", 743 | "execution_count": null, 744 | "metadata": { 745 | "colab": {}, 746 | "colab_type": "code", 747 | "id": "Bnb6Ngxcx8t-" 748 | }, 749 | "outputs": [], 750 | "source": [] 751 | } 752 | ], 753 | "metadata": { 754 | "colab": { 755 | "collapsed_sections": [ 756 | "vidayERjaO5q" 757 | ], 758 | "name": "RNN_seqtovec_seqtoseq.ipynb", 759 | "provenance": [], 760 | "toc_visible": true 761 | }, 762 | "kernelspec": { 763 | "display_name": "Python 3", 764 | "language": "python", 765 | "name": "python3" 766 | }, 767 | "language_info": { 768 | "codemirror_mode": { 769 | "name": "ipython", 770 | "version": 3 771 | }, 772 | "file_extension": ".py", 773 | "mimetype": "text/x-python", 774 | "name": "python", 775 | "nbconvert_exporter": "python", 776 | "pygments_lexer": "ipython3", 777 | "version": "3.7.1" 778 | } 779 | }, 780 | "nbformat": 4, 781 | "nbformat_minor": 1 782 | } 783 | -------------------------------------------------------------------------------- /Notebooks/__pycache__/formulas.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/0xpranjal/Stock-Prediction-using-different-models/86b123b6354c936de18cf96d456dd382b3fed35e/Notebooks/__pycache__/formulas.cpython-37.pyc -------------------------------------------------------------------------------- /Notebooks/arima_111.pkl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/0xpranjal/Stock-Prediction-using-different-models/86b123b6354c936de18cf96d456dd382b3fed35e/Notebooks/arima_111.pkl -------------------------------------------------------------------------------- /Notebooks/formulas.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | import numpy as np 3 | import matplotlib.pyplot as plt 4 | from scipy import stats 5 | from statsmodels.tsa.seasonal import STL 6 | from statsmodels.tsa.stattools import adfuller 7 | import tensorflow as tf 8 | 9 | 10 | def seq2seq_window_dataset(series, window_size, batch_size=32, 11 | shuffle_buffer=1000): 12 | series = tf.expand_dims(series, axis=-1) 13 | ds = tf.data.Dataset.from_tensor_slices(series) 14 | ds = ds.window(window_size + 1, shift=1, drop_remainder=True) 15 | ds = ds.flat_map(lambda w: w.batch(window_size + 1)) 16 | ds = ds.shuffle(shuffle_buffer) 17 | ds = ds.map(lambda w: (w[:-1], w[1:])) 18 | return ds.batch(batch_size).prefetch(1) 19 | 20 | def window_dataset(series, window_size, batch_size=32, 21 | shuffle_buffer=1000): 22 | dataset = tf.data.Dataset.from_tensor_slices(series) 23 | dataset = dataset.window(window_size + 1, shift=1, drop_remainder=True) 24 | dataset = dataset.flat_map(lambda window: window.batch(window_size + 1)) 25 | dataset = dataset.shuffle(shuffle_buffer) 26 | dataset = dataset.map(lambda window: (window[:-1], window[-1])) 27 | dataset = dataset.batch(batch_size).prefetch(1) 28 | return dataset 29 | 30 | 31 | def model_forecast(model, series, window_size): 32 | ds = tf.data.Dataset.from_tensor_slices(series) 33 | ds = ds.window(window_size, shift=1, drop_remainder=True) 34 | ds = ds.flat_map(lambda w: w.batch(window_size)) 35 | ds = ds.batch(32).prefetch(1) 36 | forecast = model.predict(ds) 37 | return forecast 38 | 39 | def test_stationarity(timeseries, window = 12, cutoff = 0.01): 40 | 41 | #Determing rolling statistics 42 | rolmean = timeseries.rolling(window).mean() 43 | rolstd = timeseries.rolling(window).std() 44 | 45 | #Plot rolling statistics: 46 | fig = plt.figure(figsize=(12, 8)) 47 | orig = plt.plot(timeseries, color='blue',label='Original') 48 | mean = plt.plot(rolmean, color='red', label='Rolling Mean') 49 | std = plt.plot(rolstd, color='black', label = 'Rolling Std') 50 | plt.legend(loc='best') 51 | plt.title('Rolling Mean & Standard Deviation') 52 | plt.show() 53 | 54 | #Perform Dickey-Fuller test: 55 | print('Results of Dickey-Fuller Test:') 56 | dftest = adfuller(timeseries, autolag='AIC', maxlag = 20 ) 57 | dfoutput = pd.Series(dftest[0:4], index=['Test Statistic','p-value','#Lags Used','Number of Observations Used']) 58 | for key,value in dftest[4].items(): 59 | dfoutput['Critical Value (%s)'%key] = value 60 | pvalue = dftest[1] 61 | if pvalue < cutoff: 62 | print('p-value = %.4f. The series is likely stationary.' % pvalue) 63 | else: 64 | print('p-value = %.4f. The series is likely non-stationary.' % pvalue) 65 | 66 | print(dfoutput) 67 | 68 | def plot_series(time, series, format="-", start=0, end=None, label=None): 69 | """[Plot the series data over a time range] 70 | 71 | Args: 72 | time (data range): [The entire time span of the data in range format] 73 | series ([integers]): [Series value corresponding to its point on the time axis] 74 | format (str, optional): [Graph type]. Defaults to "-". 75 | start (int, optional): [Time to start time series data]. Defaults to 0. 76 | end ([type], optional): [Where to stop time data]. Defaults to None. 77 | label ([str], optional): [Label name of series]. Defaults to None. 78 | """ 79 | plt.plot(time[start:end], series[start:end], format, label=label) 80 | plt.xlabel("Time") 81 | plt.ylabel("Value") 82 | if label: 83 | plt.legend(fontsize=14) 84 | plt.grid(True) 85 | 86 | 87 | def moving_average_forecast(series, window_size): 88 | """Forecasts the mean of the last few values. 89 | If window_size=1, then this is equivalent to naive forecast 90 | This implementation is *much* faster than the previous one""" 91 | mov = np.cumsum(series) 92 | mov[window_size:] = mov[window_size:] - mov[:-window_size] 93 | return mov[window_size - 1:-1] / window_size 94 | 95 | 96 | def sequential_window_dataset(series, window_size): 97 | ds = tf.data.Dataset.from_tensor_slices(series) 98 | ds = ds.window(window_size + 1, shift=window_size, drop_remainder=True) 99 | ds = ds.flat_map(lambda window: window.batch(window_size + 1)) 100 | ds = ds.map(lambda window: (window[:-1], window[1:])) 101 | return ds.batch(1).prefetch(1) 102 | 103 | 104 | 105 | 106 | def calculate_returns(close): 107 | """ 108 | Compute returns for each ticker and date in close. 109 | 110 | Parameters 111 | ---------- 112 | close : DataFrame 113 | Close prices for each ticker and date 114 | 115 | Returns 116 | ------- 117 | returns : DataFrame 118 | Returns for each ticker and date 119 | """ 120 | # TODO: Implement Function 121 | 122 | return (close - close.shift(1))/close.shift(1) 123 | 124 | 125 | def resample_prices(close_prices, freq='M'): 126 | """ 127 | Resample close prices for each ticker at specified frequency. 128 | 129 | Parameters 130 | ---------- 131 | close_prices : DataFrame 132 | Close prices for each ticker and date 133 | freq : str 134 | What frequency to sample at 135 | For valid freq choices, see http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases 136 | 137 | Returns 138 | ------- 139 | prices_resampled : DataFrame 140 | Resampled prices for each ticker and date 141 | """ 142 | 143 | return close_prices.resample(freq).last() 144 | 145 | def compute_log_returns(prices): 146 | """ 147 | Compute log returns for each ticker. 148 | 149 | Parameters 150 | ---------- 151 | prices : DataFrame 152 | Prices for each ticker and date 153 | 154 | Returns 155 | ------- 156 | log_returns : DataFrame 157 | Log returns for each ticker and date 158 | """ 159 | r_t = np.log(prices) - np.log(prices.shift(1)) 160 | return r_t 161 | 162 | 163 | def portfolio_returns(df_long, df_short, lookahead_returns, n_stocks): 164 | """ 165 | Compute expected returns for the portfolio, assuming equal investment in each long/short stock. 166 | 167 | Parameters 168 | ---------- 169 | df_long : DataFrame 170 | Top stocks for each ticker and date marked with a 1 171 | df_short : DataFrame 172 | Bottom stocks for each ticker and date marked with a 1 173 | lookahead_returns : DataFrame 174 | Lookahead returns for each ticker and date 175 | n_stocks: int 176 | The number number of stocks chosen for each month 177 | 178 | Returns 179 | ------- 180 | portfolio_returns : DataFrame 181 | Expected portfolio returns for each ticker and date 182 | """ 183 | 184 | 185 | return (lookahead_returns*(df_long - df_short)) / n_stocks 186 | 187 | 188 | def get_top_n(prev_returns, top_n): 189 | """ 190 | Select the top performing stocks 191 | 192 | Parameters 193 | ---------- 194 | prev_returns : DataFrame 195 | Previous shifted returns for each ticker and date 196 | top_n : int 197 | The number of top performing stocks to get 198 | 199 | Returns 200 | ------- 201 | top_stocks : DataFrame 202 | Top stocks for each ticker and date marked with a 1 203 | """ 204 | # TODO: Implement Function 205 | top_stocks = prev_returns.apply(lambda x: x.nlargest(top_n), axis=1) 206 | top_stocks = top_stocks.applymap(lambda x: 0 if pd.isna(x) else 1) 207 | top_stocks = top_stocks.astype(int) 208 | 209 | 210 | return top_stocks 211 | 212 | def analyze_alpha(expected_portfolio_returns_by_date): 213 | """ 214 | Perform a t-test with the null hypothesis being that the expected mean return is zero. 215 | 216 | Parameters 217 | ---------- 218 | expected_portfolio_returns_by_date : Pandas Series 219 | Expected portfolio returns for each date 220 | 221 | Returns 222 | ------- 223 | t_value 224 | T-statistic from t-test 225 | p_value 226 | Corresponding p-value 227 | """ 228 | 229 | t_statistic,p_value = stats.ttest_1samp(expected_portfolio_returns_by_date, 0) 230 | return t_statistic,p_value/2 231 | 232 | def seasonal_trend_decomp_plot(dataframe,target_series, freq, seasonal_smoother, period): 233 | """[summary] 234 | 235 | Args: 236 | dataframe ([pandas dataframe]): [dataframe holding all of your data] 237 | target_series ([series]): [Name of column in data frame you want to build plot from like 'Adj Close'] 238 | freq ([int]): [How do you want to resample data - 'D', 'W','M'] 239 | seasonal_smoother ([type]): [Length of smoother in whatever units as defined by freq] 240 | period ([type]): [Periodicity of the sequence (for monthly = 12/year)] 241 | 242 | Returns: 243 | [STL plot]: [Seasonal-Trend decomposition using LOESS (STL)] 244 | """ 245 | 246 | df = dataframe.set_index('Date') 247 | df = df.resample(freq).last() 248 | target = df[target_series] 249 | stl = STL(target, seasonal = seasonal_smoother) 250 | res = stl.fit() 251 | res.plot() 252 | return 253 | 254 | def resample_series(dataframe, target,freq): 255 | df = dataframe.set_index('Date') 256 | df = df.resample(freq).last() 257 | target = df[target] 258 | return target -------------------------------------------------------------------------------- /Notebooks/my_checkpoint/saved_model.pb: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/0xpranjal/Stock-Prediction-using-different-models/86b123b6354c936de18cf96d456dd382b3fed35e/Notebooks/my_checkpoint/saved_model.pb -------------------------------------------------------------------------------- /Notebooks/my_checkpoint/variables/variables.data-00000-of-00001: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/0xpranjal/Stock-Prediction-using-different-models/86b123b6354c936de18cf96d456dd382b3fed35e/Notebooks/my_checkpoint/variables/variables.data-00000-of-00001 -------------------------------------------------------------------------------- /Notebooks/my_checkpoint/variables/variables.index: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/0xpranjal/Stock-Prediction-using-different-models/86b123b6354c936de18cf96d456dd382b3fed35e/Notebooks/my_checkpoint/variables/variables.index -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # ARIMA model for Stock Prediction 2 | 3 | ## Intro 4 | To begin my journey into algorithmic trading I have made multiple models in varying complexity ranging from naive predictions to a fully convolutional neural network. The purpose of building all these models is to create a framework to build future models off of and eventually a whole training system. In addition, this was an experiment designed to see if complex models such as deep neural networks outperform simple models. 5 |
6 |
7 | ![Spy Chart](/Images/spy_plot.png) 8 | 9 | 10 | ## Relevant Files 11 | 1. [SPY.csv](https://github.com/bsamaha/Python-Trading-Robot/blob/master/SPY.csv) - This file contains the pulled data on the SPY ETF from its inception until 8/31/2020 12 | 2. [SPY Time Series Forecasting.ipynb](https://github.com/bsamaha/Python-Trading-Robot/blob/master/Notebooks/1.%20Time%20Series%20Forecasting%20with%20Naive%2C%20Moving%20Averages%2C%20and%20ARIMA.ipynb) - This notebook contains code showing how to update the SPY.csv to present day and also contains a naive model, 5 day moving average, 20 day moving average,an ARIMA model, and a Recurrent Neural Network model. 13 | 3. [Linear Model Forecast](https://github.com/bsamaha/Python-Trading-Robot/blob/master/Notebooks/2.%20Linear_Model_Forecast.ipynb) - This notebook was created in google colab and may need to be loaded into colab to run as may all notebooks succeeding this one. This is a linear model with a single dense neuron. 14 | 4. [Dense Forecasting](https://github.com/bsamaha/Python-Trading-Robot/blob/master/Notebooks/3.%20Dense_Forecast.ipynb) - This was created in colab and is a model of two dense layers containing 10 units each. 15 | 5. [RNN Notebook](https://github.com/bsamaha/Python-Trading-Robot/blob/master/Notebooks/4.%20RNN_seqtovec_seqtoseq.ipynb) - This notebook was created in colab and shows how I ran this model using RNNs. 16 | 6. [LSTM.ipynb](https://github.com/bsamaha/Python-Trading-Robot/blob/master/Notebooks/5.%20LSTM_Model.ipynb) - This model contains a model built using Long Short Term Memory cells in the recurrent neural network . This notebook was built in Google Colab and is intended to be used in Google Colab for GPU purposes. 17 | 7. [Preprocessing with CNN](https://github.com/bsamaha/Python-Trading-Robot/blob/master/Notebooks/6.Preprocess_CNN.ipynb) - In colab, this notebooks hows how to use a 1D conv net to preprocess data for a RNN. 18 | 8. [Full CNN - WaveNet](https://github.com/bsamaha/Python-Trading-Robot/blob/master/Notebooks/7.%20Full_CNN_Wavenet.ipynb) - This notebooks created in colab shows how to create a full CNN to use for time series analysis using a wavenet architecture. 19 | 4. [Formulas.py](https://github.com/bsamaha/Python-Trading-Robot/blob/master/Notebooks/formulas.py) - This .py file contains a miscellaneous group of functions I thought I would be using throughout this project. This file is imported into only the "Time Series Forecasting.ipynb". In the notebooks authored using Colab the relevant functions are located inside. 20 | 21 | ## Project Summary 22 | The resulting error from all models built and tested are shown in the bar graph below. As you can see the most simple model, a Naive forecast outperformed many of the complex deep learning algorithms in terms of error. Surprisingly to me, the LSTM with a 30 day rolling window outperformed all models. 23 | 24 | ![Spy Chart](/Images/model_results.png) 25 | 26 | 27 | 28 | ### Data 29 | The data was pulled using the yfinance API. The time period of the data is the entire existance of SPY ETF in January of 1993 until today's date of 9/1/2020. To update this data simply uncomment all cells in the "update data" cell and rerun. If enough time has passed you may want to alter the train,test,validate data splits. 30 | 31 | This project was a univariate time series focused on predicting the close price of the next day through various methods. The data is in daily time steps. As you can see the data is just shy of 7,000 data points. The graph below shows the entire data range and how it is broken up into the train,validate,test segments. 32 | 33 | ![Spy Chart](/Images/SPY_train_valid_test_plot.png) 34 | 35 | ### Model Results 36 | 37 | #### 1. ***Naive Forecast Model***
38 | Naive models are naive due to the fact they dont actually "predict". The naive model uses the price from day before as it's prediction as tomorrows price. Since there is not a large change from day to day (usually) in the stock market, this model performs really well. 39 | 40 | 41 | ![Naive Model](/Images/naive_forecast_plot.png) 42 | This shows a full view of the entire training period. However, since the predicted and the actual prices are so close it is really hard to see the differences here. 43 | 44 | ![Naive Model Zoom](/Images/naive_forecast_plot_zoom.png) 45 | This is a zoomed in view of the same model focusing on only the last 10 data points. Here you can easily see the Forecast values mimic the Actual values with a 1 day lag. 46 | 47 | #### 2. ***Moving Average Models*** 48 | Simple Moving Averages (SMA) are a way of smoothing out the noise in the data to get a better idea of which way the signal is trending. These are not good predictive models, but I wanted to showcase these models as they are often used in conjunction with other models to generate trading signals. The Naive Forecast is actually the same thing as a 1 day moving average. 49 | 50 | There are a variety of moving average types most commonly simple moving averages or exponential. Simple moving averages take the average of the price over a certain span of time while exponential applies a weight factor to the average that decreases over time. 51 | 52 | **EMA=Price(t)×k+EMA(y)×(1−k)
53 | *where:* 54 | t=today
55 | y=yesterday
56 | N=number of days in EMA
57 | k=2÷(N+1)** 58 |
59 |
60 | 61 | ![20 SMA](/Images/20_day_ma_plot.png) 62 | 63 | Here we can see that the 20 day moving average is not a good predictor but it is indicative of a trend. 20 Days may sound like an arbitrary number, but it is important to remember there are only 5 trading days in a week and not 7. This means 20 days is a full trading month. 64 | 65 | ![5 SMA](/Images/5_SMA.png) 66 | 67 | The 5 SMA follows the actualy values much more closely than the 20 SMA as expected. As 20 days is a trading month, 5 days is a full trading week. 68 | 69 | #### 3. ***ARIMA Model*** 70 | There is a lot of information about the ARIMA model in the notebook so I will not go into too great of detail here. ARIMA stands for AutoRegressive Integrated Moving Average. This means has 3 main inputs. 71 | 72 | For more information on ARIMA models check out my blog [Build an ARIMA Model to Predict a Stock’s Price](https://levelup.gitconnected.com/build-an-arima-model-to-predict-a-stocks-price-c9e1e49367d3) 73 | 74 | - 1st input(p) uses the dependent relationship between an observation and some number of lagged observations. An example of this would be movie theaters typically sell the most tickets on Fridays so there is a correlation between the spike in ticket sales every 7 days. 75 | - 2nd input (d) stands for the differencing required to get the data to become stationary. Stationary is just a fancy way of saying the mean of the data does not change over time. The difference is simply Day(T) - Day(t-1). 76 | - 3rd input (q) is the size of the moving average window 77 | 78 | An important factor in analyzing time series data is breaking down the seasonality, and trend. Here is the of a season-trend decomposition plot of our data. 79 | 80 | ![Trend Decomp](/Images/Season_Trend_Decomposition.png) 81 | 82 | 83 | The ARIMA model used was a (p=1,d=1,q=1) model as this was the quickest to train and the difference in performance was very minute. For more information how I came to decide on this model please go examine the SPY Time Series Forecasting.ipynb.
84 | ![ARIMA Prediction](/Images/arima_predictions.png) 85 | 86 | This is a zoomed in image of the same model pictured above. 87 | ![ARIMA Prediction](/Images/arima_predictions_zoom.png) 88 | 89 | #### 4. ***Linear and Dense Model using Keras/Tensorflow*** 90 | 91 | Output = activation(dot(input, kernel) + bias 92 | That looks familiar doesn't it? It looks almost identical to y = mx+b. The dot product is sum of the products in two sequences. Well, if there is only two sequences with a length of 1 then it is just the product of those two numbers. This simplifies down to the all to familiar y = mx + b. 93 | 94 | A dense layer is just a regular layer of neurons in a neural network. Each neuron recieves input from all the neurons in the previous layer, thus densely connected. The layer has a weight matrix W, a bias vector b, and the activations of previous layer a. The following is te docstring of class Dense from the keras documentation: 95 | 96 | This first model is a linear model using only one dense layer with a single neuron. This creates a linear model. 97 | 98 | ![ARIMA Prediction](/Images/linear_model.png) 99 | 100 | 101 | This model is a dense model consisting of 2 different layers with 10 neurons each. 102 | 103 | ![ARIMA Prediction](/Images/dense_forecast.png) 104 | 105 | #### 5. ***Reccurent neural network*** 106 | 107 | Using Keras, this model was build using 2 SimpleRNN layers with 100 neurons each. One model was constructed with a sequence to vector frame work and another model was created with a sequence to sequence framework. The RNNs are where our models begin to get much more complicated. Our input features from here on are 3 dimesnional. Those dimensions are batch size, # of time steps, and the # of input features. Since we are only using the closing price our input features = 1, also know as univariate. 108 | 109 | ![RNN Prediction](/Images/rnn_forecast.png) 110 | 111 | #### 6. ***LSTM Model*** 112 | 113 | LSTM stands for Long Short-Term Memory. This means the cell actually has a memory and is therefore much better at remembering patterns and making predictions based on patterns. This was the best performing model I have built in this project. It is interesting how poorly the 20 day window did, but the 30 day window was the best model by far. 114 | 115 | ![LSTM Prediction](/Images/LSTM_20.png) 116 | ![LSTM Prediction](/Images/lstm_30day_window.png) 117 | 118 | 119 | #### 7. ***CNN preprocessing for RNN and Full CNN with Wavenet like architecture*** 120 | 121 | I was surprised at how poorly the CNN preprocessing model performed. It was the worst performing model by far. The model seems to underpredict upward moves and downward. This is because CNNs have a moving average like trait and it clearly shows in this model. 122 | ![CNN Preprocess](/Images/cnn_preprocess_rnn_model.png) 123 | 124 | Finally, the last model I created was a full CNN with a wavenet like architecture. The wavenet like architecture is further explained in the notebook. This model performed relatively well as it had the 2nd lowest MAE. This model seemed to consistently predict a higher price than actual no matter what direction the general trend was moving in. 125 | 126 | ![CNN Preprocess](/Images/full_cnn_wavenet.png) 127 | -------------------------------------------------------------------------------- /Time Series Modeling Presentation.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/0xpranjal/Stock-Prediction-using-different-models/86b123b6354c936de18cf96d456dd382b3fed35e/Time Series Modeling Presentation.pdf -------------------------------------------------------------------------------- /my_checkpoint/saved_model.pb: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/0xpranjal/Stock-Prediction-using-different-models/86b123b6354c936de18cf96d456dd382b3fed35e/my_checkpoint/saved_model.pb -------------------------------------------------------------------------------- /my_checkpoint/variables/variables.data-00000-of-00001: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/0xpranjal/Stock-Prediction-using-different-models/86b123b6354c936de18cf96d456dd382b3fed35e/my_checkpoint/variables/variables.data-00000-of-00001 -------------------------------------------------------------------------------- /my_checkpoint/variables/variables.index: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/0xpranjal/Stock-Prediction-using-different-models/86b123b6354c936de18cf96d456dd382b3fed35e/my_checkpoint/variables/variables.index -------------------------------------------------------------------------------- /pyrobot/indicators.py: -------------------------------------------------------------------------------- 1 | import operator 2 | import numpy as np 3 | import pandas as pd 4 | 5 | from typing import Any 6 | from typing import List 7 | from typing import Dict 8 | from typing import Tuple 9 | from typing import Union 10 | from typing import Optional 11 | from typing import Iterable 12 | 13 | from pyrobot.stock_frame import StockFrame 14 | 15 | 16 | class Indicators(): 17 | 18 | """ 19 | Represents an Indicator Object which can be used 20 | to easily add technical indicators to a StockFrame. 21 | """ 22 | 23 | def __init__(self, price_data_frame: StockFrame) -> None: 24 | """Initalizes the Indicator Client. 25 | 26 | Arguments: 27 | ---- 28 | price_data_frame {pyrobot.StockFrame} -- The price data frame which is used to add indicators to. 29 | At a minimum this data frame must have the following columns: `['timestamp','close','open','high','low']`. 30 | 31 | Usage: 32 | ---- 33 | >>> historical_prices_df = trading_robot.grab_historical_prices( 34 | start=start_date, 35 | end=end_date, 36 | bar_size=1, 37 | bar_type='minute' 38 | ) 39 | >>> price_data_frame = pd.DataFrame(data=historical_prices) 40 | >>> indicator_client = Indicators(price_data_frame=price_data_frame) 41 | >>> indicator_client.price_data_frame 42 | """ 43 | 44 | self._stock_frame: StockFrame = price_data_frame 45 | self._price_groups = price_data_frame.symbol_groups 46 | self._current_indicators = {} 47 | self._indicator_signals = {} 48 | self._frame = self._stock_frame.frame 49 | 50 | if self.is_multi_index: 51 | True 52 | 53 | def get_indicator_signal(self, indicator: Optional[str]= None) -> Dict: 54 | """Return the raw Pandas Dataframe Object. 55 | 56 | Arguments: 57 | ---- 58 | indicator {Optional[str]} -- The indicator key, for example `ema` or `sma`. 59 | 60 | Returns: 61 | ---- 62 | {dict} -- Either all of the indicators or the specified indicator. 63 | """ 64 | 65 | if indicator and indicator in self._indicator_signals: 66 | return self._indicator_signals[indicator] 67 | else: 68 | return self._indicator_signals 69 | 70 | 71 | def set_indicator_signal(self, indicator: str, buy: float, sell: float, condition_buy: Any, condition_sell: Any, 72 | buy_max: float = None, sell_max: float = None, condition_buy_max: Any = None, condition_sell_max: Any = None) -> None: 73 | """Return the raw Pandas Dataframe Object. 74 | 75 | Arguments: 76 | ---- 77 | indicator {str} -- The indicator key, for example `ema` or `sma`. 78 | 79 | buy {float} -- The buy signal threshold for the indicator. 80 | 81 | sell {float} -- The sell signal threshold for the indicator. 82 | 83 | condition_buy {str} -- The operator which is used to evaluate the `buy` condition. For example, `">"` would 84 | represent greater than or from the `operator` module it would represent `operator.gt`. 85 | 86 | condition_sell {str} -- The operator which is used to evaluate the `sell` condition. For example, `">"` would 87 | represent greater than or from the `operator` module it would represent `operator.gt`. 88 | 89 | buy_max {float} -- If the buy threshold has a maximum value that needs to be set, then set the `buy_max` threshold. 90 | This means if the signal exceeds this amount it WILL NOT PURCHASE THE INSTRUMENT. (defaults to None). 91 | 92 | sell_max {float} -- If the sell threshold has a maximum value that needs to be set, then set the `buy_max` threshold. 93 | This means if the signal exceeds this amount it WILL NOT SELL THE INSTRUMENT. (defaults to None). 94 | 95 | condition_buy_max {str} -- The operator which is used to evaluate the `buy_max` condition. For example, `">"` would 96 | represent greater than or from the `operator` module it would represent `operator.gt`. (defaults to None). 97 | 98 | condition_sell_max {str} -- The operator which is used to evaluate the `sell_max` condition. For example, `">"` would 99 | represent greater than or from the `operator` module it would represent `operator.gt`. (defaults to None). 100 | """ 101 | 102 | # Add the key if it doesn't exist. 103 | if indicator not in self._indicator_signals: 104 | self._indicator_signals[indicator] = {} 105 | 106 | # Add the signals. 107 | self._indicator_signals[indicator]['buy'] = buy 108 | self._indicator_signals[indicator]['sell'] = sell 109 | self._indicator_signals[indicator]['buy_operator'] = condition_buy 110 | self._indicator_signals[indicator]['sell_operator'] = condition_sell 111 | 112 | # Add the max signals 113 | self._indicator_signals[indicator]['buy_max'] = buy_max 114 | self._indicator_signals[indicator]['sell_max'] = sell_max 115 | self._indicator_signals[indicator]['buy_operator_max'] = condition_buy_max 116 | self._indicator_signals[indicator]['sell_operator_max'] = condition_sell_max 117 | 118 | @property 119 | def price_data_frame(self) -> pd.DataFrame: 120 | """Return the raw Pandas Dataframe Object. 121 | 122 | Returns: 123 | ---- 124 | {pd.DataFrame} -- A multi-index data frame. 125 | """ 126 | 127 | return self._frame 128 | 129 | @price_data_frame.setter 130 | def price_data_frame(self, price_data_frame: pd.DataFrame) -> None: 131 | """Sets the price data frame. 132 | 133 | Arguments: 134 | ---- 135 | price_data_frame {pd.DataFrame} -- A multi-index data frame. 136 | """ 137 | 138 | self._frame = price_data_frame 139 | 140 | @property 141 | def is_multi_index(self) -> bool: 142 | """Specifies whether the data frame is a multi-index dataframe. 143 | 144 | Returns: 145 | ---- 146 | {bool} -- `True` if the data frame is a `pd.MultiIndex` object. `False` otherwise. 147 | """ 148 | 149 | if isinstance(self._frame.index, pd.MultiIndex): 150 | return True 151 | else: 152 | return False 153 | 154 | def change_in_price(self) -> pd.DataFrame: 155 | """Calculates the Change in Price. 156 | 157 | Returns: 158 | ---- 159 | {pd.DataFrame} -- A data frame with the Change in Price included. 160 | """ 161 | 162 | locals_data = locals() 163 | del locals_data['self'] 164 | 165 | column_name = 'change_in_price' 166 | self._current_indicators[column_name] = {} 167 | self._current_indicators[column_name]['args'] = locals_data 168 | self._current_indicators[column_name]['func'] = self.change_in_price 169 | 170 | self._frame[column_name] = self._price_groups['close'].transform( 171 | lambda x: x.diff() 172 | ) 173 | 174 | return self._frame 175 | 176 | def rsi(self, period: int, method: str = 'wilders') -> pd.DataFrame: 177 | """Calculates the Relative Strength Index (RSI). 178 | 179 | Arguments: 180 | ---- 181 | period {int} -- The number of periods to use to calculate the RSI. 182 | 183 | Keyword Arguments: 184 | ---- 185 | method {str} -- The calculation methodology. (default: {'wilders'}) 186 | 187 | Returns: 188 | ---- 189 | {pd.DataFrame} -- A Pandas data frame with the RSI indicator included. 190 | 191 | Usage: 192 | ---- 193 | >>> historical_prices_df = trading_robot.grab_historical_prices( 194 | start=start_date, 195 | end=end_date, 196 | bar_size=1, 197 | bar_type='minute' 198 | ) 199 | >>> price_data_frame = pd.DataFrame(data=historical_prices) 200 | >>> indicator_client = Indicators(price_data_frame=price_data_frame) 201 | >>> indicator_client.rsi(period=14) 202 | >>> price_data_frame = inidcator_client.price_data_frame 203 | """ 204 | 205 | locals_data = locals() 206 | del locals_data['self'] 207 | 208 | column_name = 'rsi' 209 | self._current_indicators[column_name] = {} 210 | self._current_indicators[column_name]['args'] = locals_data 211 | self._current_indicators[column_name]['func'] = self.rsi 212 | 213 | # First calculate the Change in Price. 214 | if 'change_in_price' not in self._frame.columns: 215 | self.change_in_price() 216 | 217 | # Define the up days. 218 | self._frame['up_day'] = self._price_groups['change_in_price'].transform( 219 | lambda x : np.where(x >= 0, x, 0) 220 | ) 221 | 222 | # Define the down days. 223 | self._frame['down_day'] = self._price_groups['change_in_price'].transform( 224 | lambda x : np.where(x < 0, x.abs(), 0) 225 | ) 226 | 227 | # Calculate the EWMA for the Up days. 228 | self._frame['ewma_up'] = self._price_groups['up_day'].transform( 229 | lambda x: x.ewm(span = period).mean() 230 | ) 231 | 232 | # Calculate the EWMA for the Down days. 233 | self._frame['ewma_down'] = self._price_groups['down_day'].transform( 234 | lambda x: x.ewm(span = period).mean() 235 | ) 236 | 237 | # Calculate the Relative Strength 238 | relative_strength = self._frame['ewma_up'] / self._frame['ewma_down'] 239 | 240 | # Calculate the Relative Strength Index 241 | relative_strength_index = 100.0 - (100.0 / (1.0 + relative_strength)) 242 | 243 | # Add the info to the data frame. 244 | self._frame['rsi'] = np.where(relative_strength_index == 0, 100, 100 - (100 / (1 + relative_strength_index))) 245 | 246 | # Clean up before sending back. 247 | self._frame.drop( 248 | labels=['ewma_up', 'ewma_down', 'down_day', 'up_day', 'change_in_price'], 249 | axis=1, 250 | inplace=True 251 | ) 252 | 253 | return self._frame 254 | 255 | def sma(self, period: int) -> pd.DataFrame: 256 | """Calculates the Simple Moving Average (SMA). 257 | 258 | Arguments: 259 | ---- 260 | period {int} -- The number of periods to use when calculating the SMA. 261 | 262 | Returns: 263 | ---- 264 | {pd.DataFrame} -- A Pandas data frame with the SMA indicator included. 265 | 266 | Usage: 267 | ---- 268 | >>> historical_prices_df = trading_robot.grab_historical_prices( 269 | start=start_date, 270 | end=end_date, 271 | bar_size=1, 272 | bar_type='minute' 273 | ) 274 | >>> price_data_frame = pd.DataFrame(data=historical_prices) 275 | >>> indicator_client = Indicators(price_data_frame=price_data_frame) 276 | >>> indicator_client.sma(period=100) 277 | """ 278 | 279 | locals_data = locals() 280 | del locals_data['self'] 281 | 282 | column_name = 'sma' 283 | self._current_indicators[column_name] = {} 284 | self._current_indicators[column_name]['args'] = locals_data 285 | self._current_indicators[column_name]['func'] = self.sma 286 | 287 | # Add the SMA 288 | self._frame[column_name] = self._price_groups['close'].transform( 289 | lambda x: x.rolling(window=period).mean() 290 | ) 291 | 292 | return self._frame 293 | 294 | def ema(self, period: int, alpha: float = 0.0) -> pd.DataFrame: 295 | """Calculates the Exponential Moving Average (EMA). 296 | 297 | Arguments: 298 | ---- 299 | period {int} -- The number of periods to use when calculating the EMA. 300 | 301 | alpha {float} -- The alpha weight used in the calculation. (default: {0.0}) 302 | 303 | Returns: 304 | ---- 305 | {pd.DataFrame} -- A Pandas data frame with the EMA indicator included. 306 | 307 | Usage: 308 | ---- 309 | >>> historical_prices_df = trading_robot.grab_historical_prices( 310 | start=start_date, 311 | end=end_date, 312 | bar_size=1, 313 | bar_type='minute' 314 | ) 315 | >>> price_data_frame = pd.DataFrame(data=historical_prices) 316 | >>> indicator_client = Indicators(price_data_frame=price_data_frame) 317 | >>> indicator_client.ema(period=50, alpha=1/50) 318 | """ 319 | 320 | locals_data = locals() 321 | del locals_data['self'] 322 | 323 | column_name = 'ema' 324 | self._current_indicators[column_name] = {} 325 | self._current_indicators[column_name]['args'] = locals_data 326 | self._current_indicators[column_name]['func'] = self.ema 327 | 328 | # Add the EMA 329 | self._frame[column_name] = self._price_groups['close'].transform( 330 | lambda x: x.ewm(span=period).mean() 331 | ) 332 | 333 | return self._frame 334 | 335 | def rate_of_change(self, period: int = 1) -> pd.DataFrame: 336 | """Calculates the Rate of Change (ROC). 337 | 338 | Arguments: 339 | ---- 340 | period {int} -- The number of periods to use when calculating 341 | the ROC. (default: {1}) 342 | 343 | Returns: 344 | ---- 345 | {pd.DataFrame} -- A Pandas data frame with the ROC indicator included. 346 | 347 | Usage: 348 | ---- 349 | >>> historical_prices_df = trading_robot.grab_historical_prices( 350 | start=start_date, 351 | end=end_date, 352 | bar_size=1, 353 | bar_type='minute' 354 | ) 355 | >>> price_data_frame = pd.DataFrame(data=historical_prices) 356 | >>> indicator_client = Indicators(price_data_frame=price_data_frame) 357 | >>> indicator_client.rate_of_change() 358 | """ 359 | locals_data = locals() 360 | del locals_data['self'] 361 | 362 | column_name = 'rate_of_change' 363 | self._current_indicators[column_name] = {} 364 | self._current_indicators[column_name]['args'] = locals_data 365 | self._current_indicators[column_name]['func'] = self.rate_of_change 366 | 367 | # Add the Momentum indicator. 368 | self._frame[column_name] = self._price_groups['close'].transform( 369 | lambda x: x.pct_change(periods=period) 370 | ) 371 | 372 | return self._frame 373 | 374 | def bollinger_bands(self, period: int = 20) -> pd.DataFrame: 375 | """Calculates the Bollinger Bands. 376 | 377 | Arguments: 378 | ---- 379 | period {int} -- The number of periods to use when calculating 380 | the Bollinger Bands. (default: {20}) 381 | 382 | Returns: 383 | ---- 384 | {pd.DataFrame} -- A Pandas data frame with the Lower and Upper band 385 | indicator included. 386 | 387 | Usage: 388 | ---- 389 | >>> historical_prices_df = trading_robot.grab_historical_prices( 390 | start=start_date, 391 | end=end_date, 392 | bar_size=1, 393 | bar_type='minute' 394 | ) 395 | >>> price_data_frame = pd.DataFrame(data=historical_prices) 396 | >>> indicator_client = Indicators(price_data_frame=price_data_frame) 397 | >>> indicator_client.bollinger_bands() 398 | """ 399 | locals_data = locals() 400 | del locals_data['self'] 401 | 402 | column_name = 'bollinger_bands' 403 | self._current_indicators[column_name] = {} 404 | self._current_indicators[column_name]['args'] = locals_data 405 | self._current_indicators[column_name]['func'] = self.bollinger_bands 406 | 407 | # Define the Moving Avg. 408 | self._frame['moving_avg'] = self._price_groups['close'].transform( 409 | lambda x : x.rolling(window=period).mean() 410 | ) 411 | 412 | # Define Moving Std. 413 | self._frame['moving_std'] = self._price_groups['close'].transform( 414 | lambda x : x.rolling(window=period).std() 415 | ) 416 | 417 | # Define the Upper Band. 418 | self._frame['band_upper'] = 4 * (self._frame['moving_std'] / self._frame['moving_avg']) 419 | 420 | # Define the lower band 421 | self._frame['band_lower'] = ( 422 | (self._frame['close'] - self._frame['moving_avg']) + 423 | (2 * self._frame['moving_std']) / 424 | (4 * self._frame['moving_std']) 425 | ) 426 | 427 | # Clean up before sending back. 428 | self._frame.drop( 429 | labels=['moving_avg', 'moving_std'], 430 | axis=1, 431 | inplace=True 432 | ) 433 | 434 | return self._frame 435 | 436 | def average_true_range(self, period: int = 14) -> pd.DataFrame: 437 | """Calculates the Average True Range (ATR). 438 | 439 | Arguments: 440 | ---- 441 | period {int} -- The number of periods to use when calculating 442 | the ATR. (default: {14}) 443 | 444 | Returns: 445 | ---- 446 | {pd.DataFrame} -- A Pandas data frame with the ATR included. 447 | 448 | Usage: 449 | ---- 450 | >>> historical_prices_df = trading_robot.grab_historical_prices( 451 | start=start_date, 452 | end=end_date, 453 | bar_size=1, 454 | bar_type='minute' 455 | ) 456 | >>> price_data_frame = pd.DataFrame(data=historical_prices) 457 | >>> indicator_client = Indicators(price_data_frame=price_data_frame) 458 | >>> indicator_client.average_true_range() 459 | """ 460 | 461 | locals_data = locals() 462 | del locals_data['self'] 463 | 464 | column_name = 'average_true_range' 465 | self._current_indicators[column_name] = {} 466 | self._current_indicators[column_name]['args'] = locals_data 467 | self._current_indicators[column_name]['func'] = self.average_true_range 468 | 469 | 470 | # Calculate the different parts of True Range. 471 | self._frame['true_range_0'] = abs(self._frame['high'] - self._frame['low']) 472 | self._frame['true_range_1'] = abs(self._frame['high'] - self._frame['close'].shift()) 473 | self._frame['true_range_2'] = abs(self._frame['low'] - self._frame['close'].shift()) 474 | 475 | # Grab the Max. 476 | self._frame['true_range'] = self._frame[['true_range_0', 'true_range_1', 'true_range_2']].max(axis=1) 477 | 478 | # Calculate the Average True Range. 479 | self._frame['average_true_range'] = self._frame['true_range'].transform( 480 | lambda x: x.ewm(span = period, min_periods = period).mean() 481 | ) 482 | 483 | # Clean up before sending back. 484 | self._frame.drop( 485 | labels=['true_range_0', 'true_range_1', 'true_range_2', 'true_range'], 486 | axis=1, 487 | inplace=True 488 | ) 489 | 490 | return self._frame 491 | 492 | def stochastic_oscillator(self) -> pd.DataFrame: 493 | """Calculates the Stochastic Oscillator. 494 | 495 | Returns: 496 | ---- 497 | {pd.DataFrame} -- A Pandas data frame with the Stochastic Oscillator included. 498 | 499 | Usage: 500 | ---- 501 | >>> historical_prices_df = trading_robot.grab_historical_prices( 502 | start=start_date, 503 | end=end_date, 504 | bar_size=1, 505 | bar_type='minute' 506 | ) 507 | >>> price_data_frame = pd.DataFrame(data=historical_prices) 508 | >>> indicator_client = Indicators(price_data_frame=price_data_frame) 509 | >>> indicator_client.stochastic_oscillator() 510 | """ 511 | 512 | locals_data = locals() 513 | del locals_data['self'] 514 | 515 | column_name = 'stochastic_oscillator' 516 | self._current_indicators[column_name] = {} 517 | self._current_indicators[column_name]['args'] = locals_data 518 | self._current_indicators[column_name]['func'] = self.stochastic_oscillator 519 | 520 | # Calculate the stochastic_oscillator. 521 | self._frame['stochastic_oscillator'] = ( 522 | self._frame['close'] - self._frame['low'] / 523 | self._frame['high'] - self._frame['low'] 524 | ) 525 | 526 | return self._frame 527 | 528 | def macd(self, fast_period: int = 12, slow_period: int = 26) -> pd.DataFrame: 529 | """Calculates the Moving Average Convergence Divergence (MACD). 530 | 531 | Arguments: 532 | ---- 533 | fast_period {int} -- The number of periods to use when calculating 534 | the fast moving MACD. (default: {12}) 535 | 536 | slow_period {int} -- The number of periods to use when calculating 537 | the slow moving MACD. (default: {26}) 538 | 539 | Returns: 540 | ---- 541 | {pd.DataFrame} -- A Pandas data frame with the MACD included. 542 | 543 | Usage: 544 | ---- 545 | >>> historical_prices_df = trading_robot.grab_historical_prices( 546 | start=start_date, 547 | end=end_date, 548 | bar_size=1, 549 | bar_type='minute' 550 | ) 551 | >>> price_data_frame = pd.DataFrame(data=historical_prices) 552 | >>> indicator_client = Indicators(price_data_frame=price_data_frame) 553 | >>> indicator_client.macd(fast_period=12, slow_period=26) 554 | """ 555 | 556 | locals_data = locals() 557 | del locals_data['self'] 558 | 559 | column_name = 'macd' 560 | self._current_indicators[column_name] = {} 561 | self._current_indicators[column_name]['args'] = locals_data 562 | self._current_indicators[column_name]['func'] = self.macd 563 | 564 | # Calculate the Fast Moving MACD. 565 | self._frame['macd_fast'] = self._frame['close'].transform( 566 | lambda x: x.ewm(span = fast_period, min_periods = fast_period).mean() 567 | ) 568 | 569 | # Calculate the Slow Moving MACD. 570 | self._frame['macd_slow'] = self._frame['close'].transform( 571 | lambda x: x.ewm(span = slow_period, min_periods = slow_period).mean() 572 | ) 573 | 574 | # Calculate the difference between the fast and the slow. 575 | self._frame['macd_diff'] = self._frame['macd_fast'] - self._frame['macd_slow'] 576 | 577 | # Calculate the Exponential moving average of the fast. 578 | self._frame['macd'] = self._frame['macd_diff'].transform( 579 | lambda x: x.ewm(span = 9, min_periods = 8).mean() 580 | ) 581 | 582 | return self._frame 583 | 584 | def mass_index(self, period: int = 9) -> pd.DataFrame: 585 | """Calculates the Mass Index indicator. 586 | 587 | Arguments: 588 | ---- 589 | period {int} -- The number of periods to use when calculating 590 | the mass index. (default: {9}) 591 | 592 | Returns: 593 | ---- 594 | {pd.DataFrame} -- A Pandas data frame with the Mass Index included. 595 | 596 | Usage: 597 | ---- 598 | >>> historical_prices_df = trading_robot.grab_historical_prices( 599 | start=start_date, 600 | end=end_date, 601 | bar_size=1, 602 | bar_type='minute' 603 | ) 604 | >>> price_data_frame = pd.DataFrame(data=historical_prices) 605 | >>> indicator_client = Indicators(price_data_frame=price_data_frame) 606 | >>> indicator_client.mass_index(period=9) 607 | """ 608 | 609 | locals_data = locals() 610 | del locals_data['self'] 611 | 612 | column_name = 'mass_index' 613 | self._current_indicators[column_name] = {} 614 | self._current_indicators[column_name]['args'] = locals_data 615 | self._current_indicators[column_name]['func'] = self.mass_index 616 | 617 | # Calculate the Diff. 618 | self._frame['diff'] = self._frame['high'] - self._frame['low'] 619 | 620 | # Calculate Mass Index 1 621 | self._frame['mass_index_1'] = self._frame['diff'].transform( 622 | lambda x: x.ewm(span = period, min_periods = period - 1).mean() 623 | ) 624 | 625 | # Calculate Mass Index 2 626 | self._frame['mass_index_2'] = self._frame['mass_index_1'].transform( 627 | lambda x: x.ewm(span = period, min_periods = period - 1).mean() 628 | ) 629 | 630 | # Grab the raw index. 631 | self._frame['mass_index_raw'] = self._frame['mass_index_1'] / self._frame['mass_index_2'] 632 | 633 | # Calculate the Mass Index. 634 | self._frame['mass_index'] = self._frame['mass_index_raw'].transform( 635 | lambda x: x.rolling(window=25).sum() 636 | ) 637 | 638 | # Clean up before sending back. 639 | self._frame.drop( 640 | labels=['diff', 'mass_index_1', 'mass_index_2', 'mass_index_raw'], 641 | axis=1, 642 | inplace=True 643 | ) 644 | 645 | return self._frame 646 | 647 | def force_index(self, period: int) -> pd.DataFrame: 648 | """Calculates the Force Index. 649 | 650 | Arguments: 651 | ---- 652 | period {int} -- The number of periods to use when calculating 653 | the force index. 654 | 655 | Returns: 656 | ---- 657 | {pd.DataFrame} -- A Pandas data frame with the force index included. 658 | 659 | Usage: 660 | ---- 661 | >>> historical_prices_df = trading_robot.grab_historical_prices( 662 | start=start_date, 663 | end=end_date, 664 | bar_size=1, 665 | bar_type='minute' 666 | ) 667 | >>> price_data_frame = pd.DataFrame(data=historical_prices) 668 | >>> indicator_client = Indicators(price_data_frame=price_data_frame) 669 | >>> indicator_client.force_index(period=9) 670 | """ 671 | 672 | locals_data = locals() 673 | del locals_data['self'] 674 | 675 | column_name = 'force_index' 676 | self._current_indicators[column_name] = {} 677 | self._current_indicators[column_name]['args'] = locals_data 678 | self._current_indicators[column_name]['func'] = self.force_index 679 | 680 | # Calculate the Force Index. 681 | self._frame[column_name] = self._frame['close'].diff(period) * self._frame['volume'].diff(period) 682 | 683 | return self._frame 684 | 685 | def ease_of_movement(self, period: int) -> pd.DataFrame: 686 | """Calculates the Ease of Movement. 687 | 688 | Arguments: 689 | ---- 690 | period {int} -- The number of periods to use when calculating 691 | the Ease of Movement. 692 | 693 | Returns: 694 | ---- 695 | {pd.DataFrame} -- A Pandas data frame with the Ease of Movement included. 696 | 697 | Usage: 698 | ---- 699 | >>> historical_prices_df = trading_robot.grab_historical_prices( 700 | start=start_date, 701 | end=end_date, 702 | bar_size=1, 703 | bar_type='minute' 704 | ) 705 | >>> price_data_frame = pd.DataFrame(data=historical_prices) 706 | >>> indicator_client = Indicators(price_data_frame=price_data_frame) 707 | >>> indicator_client.ease_of_movement(period=9) 708 | """ 709 | 710 | locals_data = locals() 711 | del locals_data['self'] 712 | 713 | column_name = 'ease_of_movement' 714 | self._current_indicators[column_name] = {} 715 | self._current_indicators[column_name]['args'] = locals_data 716 | self._current_indicators[column_name]['func'] = self.ease_of_movement 717 | 718 | # Calculate the ease of movement. 719 | high_plus_low = (self._frame['high'].diff(1) + self._frame['low'].diff(1)) 720 | diff_divi_vol = (self._frame['high'] - self._frame['low']) / (2 * self._frame['volume']) 721 | self._frame['ease_of_movement_raw'] = high_plus_low * diff_divi_vol 722 | 723 | # Calculate the Rolling Average of the Ease of Movement. 724 | self._frame['ease_of_movement'] = self._frame['ease_of_movement_raw'].transform( 725 | lambda x: x.rolling(window=period).mean() 726 | ) 727 | 728 | # Clean up before sending back. 729 | self._frame.drop( 730 | labels=['ease_of_movement_raw'], 731 | axis=1, 732 | inplace=True 733 | ) 734 | 735 | return self._frame 736 | 737 | def commodity_channel_index(self, period: int) -> pd.DataFrame: 738 | """Calculates the Commodity Channel Index. 739 | 740 | Arguments: 741 | ---- 742 | period {int} -- The number of periods to use when calculating 743 | the Commodity Channel Index. 744 | 745 | Returns: 746 | ---- 747 | {pd.DataFrame} -- A Pandas data frame with the Commodity Channel Index included. 748 | 749 | Usage: 750 | ---- 751 | >>> historical_prices_df = trading_robot.grab_historical_prices( 752 | start=start_date, 753 | end=end_date, 754 | bar_size=1, 755 | bar_type='minute' 756 | ) 757 | >>> price_data_frame = pd.DataFrame(data=historical_prices) 758 | >>> indicator_client = Indicators(price_data_frame=price_data_frame) 759 | >>> indicator_client.commodity_channel_index(period=9) 760 | """ 761 | 762 | locals_data = locals() 763 | del locals_data['self'] 764 | 765 | column_name = 'commodity_channel_index' 766 | self._current_indicators[column_name] = {} 767 | self._current_indicators[column_name]['args'] = locals_data 768 | self._current_indicators[column_name]['func'] = self.commodity_channel_index 769 | 770 | # Calculate the Typical Price. 771 | self._frame['typical_price'] = (self._frame['high'] + self._frame['low'] + self._frame['close']) / 3 772 | 773 | # Calculate the Rolling Average of the Typical Price. 774 | self._frame['typical_price_mean'] = self._frame['pp'].transform( 775 | lambda x: x.rolling(window=period).mean() 776 | ) 777 | 778 | # Calculate the Rolling Standard Deviation of the Typical Price. 779 | self._frame['typical_price_std'] = self._frame['pp'].transform( 780 | lambda x: x.rolling(window=period).std() 781 | ) 782 | 783 | # Calculate the Commodity Channel Index. 784 | self._frame[column_name] = self._frame['typical_price_mean'] / self._frame['typical_price_std'] 785 | 786 | # Clean up before sending back. 787 | self._frame.drop( 788 | labels=['typical_price', 'typical_price_mean', 'typical_price_std'], 789 | axis=1, 790 | inplace=True 791 | ) 792 | 793 | return self._frame 794 | 795 | def standard_deviation(self, period: int) -> pd.DataFrame: 796 | """Calculates the Standard Deviation. 797 | 798 | Arguments: 799 | ---- 800 | period {int} -- The number of periods to use when calculating 801 | the standard deviation. 802 | 803 | Returns: 804 | ---- 805 | {pd.DataFrame} -- A Pandas data frame with the Standard Deviation included. 806 | 807 | Usage: 808 | ---- 809 | >>> historical_prices_df = trading_robot.grab_historical_prices( 810 | start=start_date, 811 | end=end_date, 812 | bar_size=1, 813 | bar_type='minute' 814 | ) 815 | >>> price_data_frame = pd.DataFrame(data=historical_prices) 816 | >>> indicator_client = Indicators(price_data_frame=price_data_frame) 817 | >>> indicator_client.standard_deviation(period=9) 818 | """ 819 | 820 | locals_data = locals() 821 | del locals_data['self'] 822 | 823 | column_name = 'standard_deviation' 824 | self._current_indicators[column_name] = {} 825 | self._current_indicators[column_name]['args'] = locals_data 826 | self._current_indicators[column_name]['func'] = self.standard_deviation 827 | 828 | # Calculate the Standard Deviation. 829 | self._frame[column_name] = self._frame['close'].transform( 830 | lambda x: x.ewm(span=period).std() 831 | ) 832 | 833 | return self._frame 834 | 835 | def chaikin_oscillator(self, period: int) -> pd.DataFrame: 836 | """Calculates the Chaikin Oscillator. 837 | 838 | Arguments: 839 | ---- 840 | period {int} -- The number of periods to use when calculating 841 | the Chaikin Oscillator. 842 | 843 | Returns: 844 | ---- 845 | {pd.DataFrame} -- A Pandas data frame with the Chaikin Oscillator included. 846 | 847 | Usage: 848 | ---- 849 | >>> historical_prices_df = trading_robot.grab_historical_prices( 850 | start=start_date, 851 | end=end_date, 852 | bar_size=1, 853 | bar_type='minute' 854 | ) 855 | >>> price_data_frame = pd.DataFrame(data=historical_prices) 856 | >>> indicator_client = Indicators(price_data_frame=price_data_frame) 857 | >>> indicator_client.chaikin_oscillator(period=9) 858 | """ 859 | 860 | locals_data = locals() 861 | del locals_data['self'] 862 | 863 | column_name = 'chaikin_oscillator' 864 | self._current_indicators[column_name] = {} 865 | self._current_indicators[column_name]['args'] = locals_data 866 | self._current_indicators[column_name]['func'] = self.chaikin_oscillator 867 | 868 | # Calculate the Money Flow Multiplier. 869 | money_flow_multiplier_top = 2 * (self._frame['close'] - self._frame['high'] - self._frame['low']) 870 | money_flow_multiplier_bot = (self._frame['high'] - self._frame['low']) 871 | 872 | # Calculate Money Flow Volume 873 | self._frame['money_flow_volume'] = (money_flow_multiplier_top / money_flow_multiplier_bot) * self._frame['volume'] 874 | 875 | # Calculate the 3-Day moving average of the Money Flow Volume. 876 | self._frame['money_flow_volume_3'] = self._frame['money_flow_volume'].transform( 877 | lambda x: x.ewm(span=3, min_periods=2).mean() 878 | ) 879 | 880 | # Calculate the 10-Day moving average of the Money Flow Volume. 881 | self._frame['money_flow_volume_10'] = self._frame['money_flow_volume'].transform( 882 | lambda x: x.ewm(span=10, min_periods=9).mean() 883 | ) 884 | 885 | # Calculate the Chaikin Oscillator. 886 | self._frame[column_name] = self._frame['money_flow_volume_3'] - self._frame['money_flow_volume_10'] 887 | 888 | # Clean up before sending back. 889 | self._frame.drop( 890 | labels=['money_flow_volume_3', 'money_flow_volume_10', 'money_flow_volume'], 891 | axis=1, 892 | inplace=True 893 | ) 894 | 895 | return self._frame 896 | 897 | def kst_oscillator(self, r1: int, r2: int, r3: int, r4: int, n1: int, n2: int, n3: int, n4: int) -> pd.DataFrame: 898 | """Calculates the Mass Index indicator. 899 | 900 | Arguments: 901 | ---- 902 | period {int} -- The number of periods to use when calculating 903 | the mass index. (default: {9}) 904 | 905 | Returns: 906 | ---- 907 | {pd.DataFrame} -- A Pandas data frame with the Mass Index included. 908 | 909 | Usage: 910 | ---- 911 | >>> historical_prices_df = trading_robot.grab_historical_prices( 912 | start=start_date, 913 | end=end_date, 914 | bar_size=1, 915 | bar_type='minute' 916 | ) 917 | >>> price_data_frame = pd.DataFrame(data=historical_prices) 918 | >>> indicator_client = Indicators(price_data_frame=price_data_frame) 919 | >>> indicator_client.mass_index(period=9) 920 | """ 921 | 922 | locals_data = locals() 923 | del locals_data['self'] 924 | 925 | column_name = 'kst_oscillator' 926 | self._current_indicators[column_name] = {} 927 | self._current_indicators[column_name]['args'] = locals_data 928 | self._current_indicators[column_name]['func'] = self.kst_oscillator 929 | 930 | # Calculate the ROC 1. 931 | self._frame['roc_1'] = self._frame['close'].diff(r1 - 1) / self._frame['close'].shift(r1 - 1) 932 | 933 | # Calculate the ROC 2. 934 | self._frame['roc_2'] = self._frame['close'].diff(r2 - 1) / self._frame['close'].shift(r2 - 1) 935 | 936 | # Calculate the ROC 3. 937 | self._frame['roc_3'] = self._frame['close'].diff(r3 - 1) / self._frame['close'].shift(r3 - 1) 938 | 939 | # Calculate the ROC 4. 940 | self._frame['roc_4'] = self._frame['close'].diff(r4 - 1) / self._frame['close'].shift(r4 - 1) 941 | 942 | 943 | # Calculate the Mass Index. 944 | self._frame['roc_1_n'] = self._frame['roc_1'].transform( 945 | lambda x: x.rolling(window=n1).sum() 946 | ) 947 | 948 | # Calculate the Mass Index. 949 | self._frame['roc_2_n'] = self._frame['roc_2'].transform( 950 | lambda x: x.rolling(window=n2).sum() 951 | ) 952 | 953 | # Calculate the Mass Index. 954 | self._frame['roc_3_n'] = self._frame['roc_3'].transform( 955 | lambda x: x.rolling(window=n3).sum() 956 | ) 957 | 958 | # Calculate the Mass Index. 959 | self._frame['roc_4_n'] = self._frame['roc_4'].transform( 960 | lambda x: x.rolling(window=n4).sum() 961 | ) 962 | 963 | self._frame[column_name] = 100 * (self._frame['roc_1_n'] + 2 * self._frame['roc_2_n'] + 3 * self._frame['roc_3_n'] + 4 * self._frame['roc_4_n']) 964 | self._frame[column_name + "_signal"] = self._frame['column_name'].transform( 965 | lambda x: x.rolling().mean() 966 | ) 967 | 968 | # Clean up before sending back. 969 | self._frame.drop( 970 | labels=['roc_1', 'roc_2', 'roc_3', 'roc_4', 'roc_1_n', 'roc_2_n', 'roc_3_n', 'roc_4_n'], 971 | axis=1, 972 | inplace=True 973 | ) 974 | 975 | return self._frame 976 | 977 | 978 | # #KST Oscillator 979 | # def KST(df, r1, r2, r3, r4, n1, n2, n3, n4): 980 | # M = df['Close'].diff(r1 - 1) 981 | # N = df['Close'].shift(r1 - 1) 982 | # ROC1 = M / N 983 | # M = df['Close'].diff(r2 - 1) 984 | # N = df['Close'].shift(r2 - 1) 985 | # ROC2 = M / N 986 | # M = df['Close'].diff(r3 - 1) 987 | # N = df['Close'].shift(r3 - 1) 988 | # ROC3 = M / N 989 | # M = df['Close'].diff(r4 - 1) 990 | # N = df['Close'].shift(r4 - 1) 991 | # ROC4 = M / N 992 | # KST = pd.Series(pd.rolling_sum(ROC1, n1) + pd.rolling_sum(ROC2, n2) * 2 + pd.rolling_sum(ROC3, n3) * 3 + pd.rolling_sum(ROC4, n4) * 4, name = 'KST_' + str(r1) + '_' + str(r2) + '_' + str(r3) + '_' + str(r4) + '_' + str(n1) + '_' + str(n2) + '_' + str(n3) + '_' + str(n4)) 993 | # df = df.join(KST) 994 | # return df 995 | 996 | def refresh(self): 997 | """Updates the Indicator columns after adding the new rows.""" 998 | 999 | # First update the groups since, we have new rows. 1000 | self._price_groups = self._stock_frame.symbol_groups 1001 | 1002 | # Grab all the details of the indicators so far. 1003 | for indicator in self._current_indicators: 1004 | 1005 | # Grab the function. 1006 | indicator_argument = self._current_indicators[indicator]['args'] 1007 | 1008 | # Grab the arguments. 1009 | indicator_function = self._current_indicators[indicator]['func'] 1010 | 1011 | # Update the function. 1012 | indicator_function(**indicator_argument) 1013 | 1014 | def check_signals(self) -> Union[pd.DataFrame, None]: 1015 | """Checks to see if any signals have been generated. 1016 | 1017 | Returns: 1018 | ---- 1019 | {Union[pd.DataFrame, None]} -- If signals are generated then a pandas.DataFrame 1020 | is returned otherwise nothing is returned. 1021 | """ 1022 | 1023 | signals_df = self._stock_frame._check_signals(indicators=self._indicator_signals) 1024 | 1025 | return signals_df 1026 | 1027 | -------------------------------------------------------------------------------- /pyrobot/portfolio.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from pandas import DataFrame 4 | from typing import Tuple 5 | from typing import List 6 | from typing import Optional 7 | from typing import Iterable 8 | 9 | 10 | from pyrobot.stock_frame import StockFrame 11 | from td.client import TDClient 12 | 13 | 14 | class Portfolio(): 15 | 16 | def __init__(self, account_number: Optional[str] = None) -> None: 17 | """Initalizes a new instance of the Portfolio object. 18 | 19 | Keyword Arguments: 20 | ---- 21 | account_number {str} -- An accout number to associate with the Portfolio. (default: {None}) 22 | """ 23 | 24 | self.positions = {} 25 | self.positions_count = 0 26 | 27 | self.profit_loss = 0.00 28 | self.market_value = 0.00 29 | self.risk_tolerance = 0.00 30 | self.account_number = account_number 31 | 32 | self._historical_prices = [] 33 | 34 | self._td_client: TDClient = None 35 | self._stock_frame: StockFrame = None 36 | self._stock_frame_daily: StockFrame = None 37 | 38 | def add_positions(self, positions: List[dict]) -> dict: 39 | """Add Multiple positions to the portfolio at once. 40 | 41 | This method will take an iterable containing the values 42 | normally passed through in the `add_position` endpoint and 43 | then adds each position to the portfolio. 44 | 45 | Arguments: 46 | ---- 47 | positions {list[dict]} -- Multiple positions with the required arguments to be added. 48 | 49 | Returns: 50 | ---- 51 | {dict} -- The current positions in the portfolio. 52 | 53 | Usage: 54 | ---- 55 | >>> # Define mutliple positions to add. 56 | >>> multi_position = [ 57 | { 58 | 'asset_type': 'equity', 59 | 'quantity': 2, 60 | 'purchase_price': 4.00, 61 | 'symbol': 'TSLA', 62 | 'purchase_date': '2020-01-31' 63 | }, 64 | { 65 | 'asset_type': 'equity', 66 | 'quantity': 2,0 67 | 'purchase_price': 4.00, 68 | 'symbol': 'SQ', 69 | 'purchase_date': '2020-01-31' 70 | } 71 | ] 72 | >>> new_positions = trading_robot.portfolio.add_positions(positions=multi_position) 73 | { 74 | 'SQ': { 75 | 'asset_type': 'equity', 76 | 'purchase_date': '2020-01-31', 77 | 'purchase_price': 4.00, 78 | 'quantity': 2, 79 | 'symbol': 'SQ' 80 | }, 81 | 'TSLA': { 82 | 'asset_type': 'equity', 83 | 'purchase_date': '2020-01-31', 84 | 'purchase_price': 4.00, 85 | 'quantity': 2, 86 | 'symbol': 'TSLA' 87 | } 88 | } 89 | """ 90 | 91 | if isinstance(positions, list): 92 | 93 | # Loop through each position. 94 | for position in positions: 95 | 96 | # Add the position. 97 | self.add_position( 98 | symbol=position['symbol'], 99 | asset_type=position['asset_type'], 100 | quantity=position.get('quantity', 0), 101 | purchase_price=position.get('purchase_price', 0.0), 102 | purchase_date=position.get('purchase_date', None) 103 | ) 104 | 105 | return self.positions 106 | 107 | else: 108 | raise TypeError('Positions must be a list of dictionaries.') 109 | 110 | def add_position(self, symbol: str, asset_type: str, purchase_date: Optional[str] = None, quantity: int = 0, purchase_price: float = 0.0) -> dict: 111 | """Adds a single new position to the the portfolio. 112 | 113 | Arguments: 114 | ---- 115 | symbol {str} -- The Symbol of the Financial Instrument. Example: 'AAPL' or '/ES' 116 | 117 | asset_type {str} -- The type of the financial instrument to be added. For example, 118 | 'equity', 'forex', 'option', 'futures' 119 | 120 | Keyword Arguments: 121 | ---- 122 | quantity {int} -- The number of shares or contracts you own. (default: {0}) 123 | 124 | purchase_price {float} -- The price at which the position was purchased. (default: {0.00}) 125 | 126 | purchase_date {str} -- The date which the asset was purchased. Must be ISO Format "YYYY-MM-DD" 127 | For example, "2020-04-01" (default: {None}) 128 | 129 | Returns: 130 | ---- 131 | {dict} -- A dictionary object that represents a position in the portfolio. 132 | 133 | Usage: 134 | ---- 135 | >>> portfolio = Portfolio() 136 | >>> new_position = Portfolio.add_position(symbol='MSFT', 137 | asset_type='equity', 138 | quantity=2, 139 | purchase_price=4.00, 140 | purchase_date="2020-01-31" 141 | ) 142 | >>> new_position 143 | { 144 | 'asset_type': 'equity', 145 | 'quantity': 2, 146 | 'purchase_price': 4.00, 147 | 'symbol': 'MSFT', 148 | 'purchase_date': '2020-01-31' 149 | } 150 | """ 151 | 152 | self.positions[symbol] = {} 153 | self.positions[symbol]['symbol'] = symbol 154 | self.positions[symbol]['quantity'] = quantity 155 | self.positions[symbol]['purchase_price'] = purchase_price 156 | self.positions[symbol]['purchase_date'] = purchase_date 157 | self.positions[symbol]['asset_type'] = asset_type 158 | 159 | if purchase_date: 160 | self.positions[symbol]['ownership_status'] = True 161 | else: 162 | self.positions[symbol]['ownership_status'] = False 163 | 164 | return self.positions[symbol] 165 | 166 | def remove_position(self, symbol: str) -> Tuple[bool, str]: 167 | """Deletes a single position from the portfolio. 168 | 169 | Arguments: 170 | ---- 171 | symbol {str} -- The symbol of the instrument to be deleted. Example: 'AAPL' or '/ES' 172 | 173 | Returns: 174 | ---- 175 | {Tuple[bool, str]} -- Returns `True` if successfully deleted, `False` otherwise 176 | along with a message. 177 | 178 | Usage: 179 | ---- 180 | >>> portfolio = Portfolio() 181 | 182 | >>> new_position = Portfolio.add_position( 183 | symbol='MSFT', 184 | asset_type='equity', 185 | quantity=2, 186 | purchase_price=4.00, 187 | purchase_date="2020-01-31" 188 | ) 189 | >>> delete_status = Portfolio.delete_position(symbol='MSFT') 190 | >>> delete_status 191 | (True, 'MSFT was successfully removed.') 192 | 193 | >>> delete_status = Portfolio.delete_position(symbol='AAPL') 194 | >>> delete_status 195 | (False, 'AAPL did not exist in the porfolio.') 196 | """ 197 | 198 | if symbol in self.positions: 199 | del self.positions[symbol] 200 | return (True, "{symbol} was successfully removed.".format(symbol=symbol)) 201 | else: 202 | return (False, "{symbol} did not exist in the porfolio.".format(symbol=symbol)) 203 | 204 | def total_allocation(self) -> dict: 205 | """Returns a summary of the portfolio by asset allocation.""" 206 | 207 | total_allocation = { 208 | 'stocks': [], 209 | 'fixed_income': [], 210 | 'options': [], 211 | 'futures': [], 212 | 'furex': [] 213 | } 214 | 215 | if len(self.positions.keys()) > 0: 216 | for symbol in self.positions: 217 | total_allocation[self.positions[symbol]['asset_type']].append(self.positions[symbol]) 218 | 219 | def portfolio_variance(self, weights: dict, covariance_matrix: DataFrame) -> dict: 220 | 221 | sorted_keys = list(weights.keys()) 222 | sorted_keys.sort() 223 | 224 | sorted_weights = np.array([weights[symbol] for symbol in sorted_keys]) 225 | portfolio_variance = np.dot( 226 | sorted_weights.T, 227 | np.dot(covariance_matrix, sorted_weights) 228 | ) 229 | 230 | return portfolio_variance 231 | 232 | def portfolio_metrics(self) -> dict: 233 | """Calculates different portfolio risk metrics using daily data. 234 | 235 | Overview: 236 | ---- 237 | To build an effective summary of our portfolio we will need to 238 | calculate different metrics that help represent the risk of our 239 | portfolio and it's performance. The following metrics will be calculated 240 | in this method: 241 | 242 | 1. Standard Deviation of Percent Returns. 243 | 2. Covariance of Percent Returns. 244 | 2. Variance of Percent Returns. 245 | 3. Average Percent Return 246 | 4. Weighted Average Percent Return. 247 | 5. Portfolio Variance. 248 | 249 | Returns: 250 | ---- 251 | dict -- [description] 252 | """ 253 | 254 | if not self._stock_frame_daily: 255 | self._grab_daily_historical_prices() 256 | 257 | # Calculate the weights. 258 | porftolio_weights = self.portfolio_weights() 259 | 260 | # Calculate the Daily Returns (%) 261 | self._stock_frame_daily.frame['daily_returns_pct'] = self._stock_frame_daily.symbol_groups['close'].transform( 262 | lambda x: x.pct_change() 263 | ) 264 | 265 | # Calculate the Daily Returns (Mean) 266 | self._stock_frame_daily.frame['daily_returns_avg'] = self._stock_frame_daily.symbol_groups['daily_returns_pct'].transform( 267 | lambda x: x.mean() 268 | ) 269 | 270 | # Calculate the Daily Returns (Standard Deviation) 271 | self._stock_frame_daily.frame['daily_returns_std'] = self._stock_frame_daily.symbol_groups['daily_returns_pct'].transform( 272 | lambda x: x.std() 273 | ) 274 | 275 | # Calculate the Covariance. 276 | returns_cov = self._stock_frame_daily.frame.unstack( 277 | level=0)['daily_returns_pct'].cov() 278 | 279 | # Take the other columns and get ready to add them to our dictionary. 280 | returns_avg = self._stock_frame_daily.symbol_groups['daily_returns_avg'].tail( 281 | n=1 282 | ).to_dict() 283 | 284 | returns_std = self._stock_frame_daily.symbol_groups['daily_returns_std'].tail( 285 | n=1 286 | ).to_dict() 287 | 288 | metrics_dict = {} 289 | 290 | portfolio_variance = self.portfolio_variance( 291 | weights=porftolio_weights, 292 | covariance_matrix=returns_cov 293 | ) 294 | 295 | for index_tuple in returns_std: 296 | 297 | symbol = index_tuple[0] 298 | metrics_dict[symbol] = {} 299 | metrics_dict[symbol]['weight'] = porftolio_weights[symbol] 300 | metrics_dict[symbol]['average_returns'] = returns_avg[index_tuple] 301 | metrics_dict[symbol]['weighted_returns'] = returns_avg[index_tuple] * \ 302 | metrics_dict[symbol]['weight'] 303 | metrics_dict[symbol]['standard_deviation_of_returns'] = returns_std[index_tuple] 304 | metrics_dict[symbol]['variance_of_returns'] = returns_std[index_tuple] ** 2 305 | metrics_dict[symbol]['covariance_of_returns'] = returns_cov.loc[[ 306 | symbol]].to_dict() 307 | 308 | metrics_dict['portfolio'] = {} 309 | metrics_dict['portfolio']['variance'] = portfolio_variance 310 | 311 | return metrics_dict 312 | 313 | def portfolio_weights(self) -> dict: 314 | """Calculate the weights for each position in the portfolio 315 | 316 | Returns: 317 | ---- 318 | {dict} -- Each symbol with their designated weights. 319 | """ 320 | 321 | weights = {} 322 | 323 | # First grab all the symbols. 324 | symbols = self.positions.keys() 325 | 326 | # Grab the quotes. 327 | quotes = self.td_client.get_quotes(instruments=list(symbols)) 328 | 329 | # Grab the projected market value. 330 | projected_market_value_dict = self.projected_market_value( 331 | current_prices=quotes 332 | ) 333 | 334 | # Loop through each symbol. 335 | for symbol in projected_market_value_dict: 336 | 337 | # Calculate the weights. 338 | if symbol != 'total': 339 | weights[symbol] = projected_market_value_dict[symbol]['total_market_value'] / \ 340 | projected_market_value_dict['total']['total_market_value'] 341 | 342 | return weights 343 | 344 | def portfolio_summary(self): 345 | """Generates a summary of our portfolio.""" 346 | 347 | # First grab all the symbols. 348 | symbols = self.positions.keys() 349 | 350 | # Grab the quotes. 351 | quotes = self.td_client.get_quotes(instruments=list(symbols)) 352 | 353 | portfolio_summary_dict = {} 354 | portfolio_summary_dict['projected_market_value'] = self.projected_market_value( 355 | current_prices=quotes 356 | ) 357 | portfolio_summary_dict['portfolio_weights'] = self.portfolio_weights() 358 | portfolio_summary_dict['portfolio_risk'] = "" 359 | 360 | return portfolio_summary_dict 361 | 362 | def in_portfolio(self, symbol: str) -> bool: 363 | """checks if the symbol is in the portfolio. 364 | 365 | Arguments: 366 | ---- 367 | symbol {str} -- The symbol of the instrument to be deleted. Example: 'AAPL' or '/ES' 368 | 369 | Returns: 370 | ---- 371 | bool -- `True` if the position is in the portfolio, `False` otherwise. 372 | 373 | Usage: 374 | ---- 375 | >>> portfolio = Portfolio() 376 | >>> new_position = Portfolio.add_position( 377 | symbol='MSFT', 378 | asset_type='equity' 379 | ) 380 | >>> in_position_flag = Portfolio.in_portfolio(symbol='MSFT') 381 | >>> in_position_flag 382 | True 383 | """ 384 | 385 | if symbol in self.positions: 386 | return True 387 | else: 388 | return False 389 | 390 | def get_ownership_status(self, symbol: str) -> bool: 391 | """Gets the ownership status for a position in the portfolio. 392 | 393 | Arguments: 394 | ---- 395 | symbol {str} -- The symbol you want to grab the ownership status for. 396 | 397 | Returns: 398 | ---- 399 | {bool} -- `True` if the we own the position, `False` if we do not own it. 400 | """ 401 | 402 | if self.in_portfolio(symbol=symbol) and self.positions[symbol]['ownership_status']: 403 | return self.positions[symbol]['ownership_status'] 404 | else: 405 | return False 406 | 407 | def set_ownership_status(self, symbol: str, ownership: bool) -> None: 408 | """Sets the ownership status for a position in the portfolio. 409 | 410 | Arguments: 411 | ---- 412 | symbol {str} -- The symbol you want to change the ownership status for. 413 | 414 | ownership {bool} -- The ownership status you want the symbol to have. Can either 415 | be `True` or `False`. 416 | 417 | Raises: 418 | ---- 419 | KeyError: If the symbol does not exist in the portfolio it will return an error. 420 | """ 421 | 422 | if self.in_portfolio(symbol=symbol) and self.positions[symbol]['ownership_status']: 423 | self.positions[symbol]['ownership_status'] = ownership 424 | else: 425 | raise KeyError( 426 | "Can't set ownership status, as you do not have the symbol in your portfolio." 427 | ) 428 | 429 | def is_profitable(self, symbol: str, current_price: float) -> bool: 430 | """Specifies whether a position is profitable. 431 | 432 | Arguments: 433 | ---- 434 | symbol {str} -- The symbol of the instrument, to check profitability. 435 | 436 | current_price {float} -- The current trading price of the instrument. 437 | 438 | Returns: 439 | ---- 440 | {bool} -- Specifies whether the position is profitable or flat `True` or not 441 | profitable `False`. 442 | 443 | Raises: 444 | ---- 445 | KeyError: If the Symbol does not exist it will return a key error. 446 | 447 | Usage: 448 | ---- 449 | >>> portfolio = Portfolio() 450 | >>> new_position = Portfolio.add_position( 451 | symbol='MSFT', 452 | asset_type='equity', 453 | purchase_price=4.00, 454 | purchase_date="2020-01-31" 455 | ) 456 | >>> is_profitable_flag = Portfolio.is_profitable( 457 | symbol='MSFT', 458 | current_price=7.00 459 | ) 460 | >>> is_profitable_flag 461 | True 462 | """ 463 | 464 | # Grab the purchase price, if it exists. 465 | if self.in_portfolio(symbol=symbol): 466 | purchase_price = self.positions[symbol]['purchase_price'] 467 | else: 468 | raise KeyError("The Symbol you tried to request does not exist.") 469 | 470 | if (purchase_price <= current_price): 471 | return True 472 | elif (purchase_price > current_price): 473 | return False 474 | 475 | def projected_market_value(self, current_prices: dict) -> dict: 476 | """Returns the Projected market value for all the positions in the portfolio. 477 | 478 | Arguments: 479 | ---- 480 | current_prices {dict} -- A dictionary of current quotes for each of the symbols 481 | in the portfolio. 482 | 483 | Returns: 484 | ---- 485 | dict -- A summarized version of the portfolio with each position, purchase price, current price, 486 | and projected values. 487 | 488 | Usage: 489 | ---- 490 | >>> portfolio = Portfolio() 491 | >>> new_position = portfolio.add_position( 492 | symbol='MSFT', 493 | asset_type='equity', 494 | purchase_price=4.00, 495 | purchase_date="2020-01-31" 496 | ) 497 | >>> portfolio_summary = portfolio.projected_market_value(current_prices={'MSFT':{'lastPrice': 8.00, 'openPrice': 7.50}}) 498 | """ 499 | 500 | projected_value = {} 501 | total_value = 0.0 502 | total_invested_capital = 0.0 503 | total_profit_or_loss = 0.0 504 | 505 | position_count_profitable = 0 506 | position_count_not_profitable = 0 507 | position_count_break_even = 0 508 | 509 | for symbol in current_prices: 510 | 511 | if self.in_portfolio(symbol=symbol): 512 | 513 | projected_value[symbol] = {} 514 | current_quantity = self.positions[symbol]['quantity'] 515 | purchase_price = self.positions[symbol]['purchase_price'] 516 | current_price = current_prices[symbol]['lastPrice'] 517 | is_profitable = self.is_profitable( 518 | symbol=symbol, current_price=current_price) 519 | 520 | projected_value[symbol]['purchase_price'] = purchase_price 521 | projected_value[symbol]['current_price'] = current_prices[symbol]['lastPrice'] 522 | projected_value[symbol]['quantity'] = current_quantity 523 | projected_value[symbol]['is_profitable'] = is_profitable 524 | 525 | # Calculate total market value. 526 | projected_value[symbol]['total_market_value'] = ( 527 | current_price * current_quantity 528 | ) 529 | 530 | # Calculate total invested capital. 531 | projected_value[symbol]['total_invested_capital'] = ( 532 | current_quantity * purchase_price 533 | ) 534 | 535 | projected_value[symbol]['total_loss_or_gain_$'] = ((current_price - purchase_price) * current_quantity) 536 | projected_value[symbol]['total_loss_or_gain_%'] = round(((current_price - purchase_price) / purchase_price), 4) 537 | 538 | total_value += projected_value[symbol]['total_market_value'] 539 | total_profit_or_loss += projected_value[symbol]['total_loss_or_gain_$'] 540 | total_invested_capital += projected_value[symbol]['total_invested_capital'] 541 | 542 | if projected_value[symbol]['total_loss_or_gain_$'] > 0: 543 | position_count_profitable += 1 544 | elif projected_value[symbol]['total_loss_or_gain_$'] < 0: 545 | position_count_not_profitable += 1 546 | else: 547 | position_count_break_even += 1 548 | 549 | projected_value['total'] = {} 550 | projected_value['total']['total_positions'] = len(self.positions) 551 | projected_value['total']['total_market_value'] = total_value 552 | projected_value['total']['total_invested_capital'] = total_invested_capital 553 | projected_value['total']['total_profit_or_loss'] = total_profit_or_loss 554 | projected_value['total']['number_of_profitable_positions'] = position_count_profitable 555 | projected_value['total']['number_of_non_profitable_positions'] = position_count_not_profitable 556 | projected_value['total']['number_of_breakeven_positions'] = position_count_break_even 557 | 558 | return projected_value 559 | 560 | @property 561 | def historical_prices(self) -> List[dict]: 562 | """Gets the historical prices for the Portfolio 563 | 564 | Returns: 565 | ---- 566 | List[dict] -- A list of historical candle prices. 567 | """ 568 | 569 | return self._historical_prices 570 | 571 | @historical_prices.setter 572 | def historical_prices(self, historical_prices: List[dict]) -> None: 573 | """Sets the historical prices for the Portfolio 574 | 575 | Arguments: 576 | ---- 577 | historical_prices {List[dict]} -- A list of historical candle prices. 578 | """ 579 | 580 | self._historical_prices = historical_prices 581 | 582 | @property 583 | def stock_frame(self) -> StockFrame: 584 | """Gets the StockFrame object for the Portfolio 585 | 586 | Returns: 587 | ---- 588 | {StockFrame} -- A StockFrame object with symbol groups, and rolling windows. 589 | """ 590 | 591 | return self._stock_frame 592 | 593 | @stock_frame.setter 594 | def stock_frame(self, stock_frame: StockFrame) -> None: 595 | """Sets the StockFrame object for the Portfolio 596 | 597 | Arguments: 598 | ---- 599 | stock_frame {StockFrame} -- A StockFrame object with symbol groups, and rolling windows. 600 | """ 601 | 602 | self._stock_frame = stock_frame 603 | 604 | @property 605 | def td_client(self) -> TDClient: 606 | """Gets the TDClient object for the Portfolio 607 | 608 | Returns: 609 | ---- 610 | {TDClient} -- An authenticated session with the TD API. 611 | """ 612 | 613 | return self._td_client 614 | 615 | @td_client.setter 616 | def td_client(self, td_client: TDClient) -> None: 617 | """Sets the TDClient object for the Portfolio 618 | 619 | Arguments: 620 | ---- 621 | td_client {TDClient} -- An authenticated session with the TD API. 622 | """ 623 | 624 | self._td_client: TDClient = td_client 625 | 626 | def _grab_daily_historical_prices(self) -> StockFrame: 627 | """Grabs the daily historical prices for each position. 628 | 629 | Returns: 630 | ---- 631 | {StockFrame} -- A StockFrame object with data organized, grouped, and sorted. 632 | """ 633 | 634 | new_prices = [] 635 | 636 | # Loop through each position. 637 | for symbol in self.positions: 638 | 639 | # Grab the historical prices. 640 | historical_prices_response = self.td_client.get_price_history( 641 | symbol=symbol, 642 | period_type='year', 643 | period=1, 644 | frequency_type='daily', 645 | frequency=1, 646 | extended_hours=True 647 | ) 648 | 649 | # Loop through the chandles. 650 | for candle in historical_prices_response['candles']: 651 | 652 | new_price_mini_dict = {} 653 | new_price_mini_dict['symbol'] = symbol 654 | new_price_mini_dict['open'] = candle['open'] 655 | new_price_mini_dict['close'] = candle['close'] 656 | new_price_mini_dict['high'] = candle['high'] 657 | new_price_mini_dict['low'] = candle['low'] 658 | new_price_mini_dict['volume'] = candle['volume'] 659 | new_price_mini_dict['datetime'] = candle['datetime'] 660 | new_prices.append(new_price_mini_dict) 661 | 662 | # Create and set the StockFrame 663 | self._stock_frame_daily = StockFrame(data=new_prices) 664 | self._stock_frame_daily.create_frame() 665 | 666 | return self._stock_frame_daily 667 | -------------------------------------------------------------------------------- /pyrobot/robot.py: -------------------------------------------------------------------------------- 1 | import json 2 | import time as time_true 3 | import pprint 4 | import pathlib 5 | import pandas as pd 6 | import pkg_resources 7 | 8 | from datetime import time 9 | from datetime import datetime 10 | from datetime import timezone 11 | from datetime import timedelta 12 | 13 | from typing import List 14 | from typing import Dict 15 | from typing import Union 16 | from typing import Optional 17 | 18 | from pyrobot.trades import Trade 19 | from pyrobot.portfolio import Portfolio 20 | from pyrobot.stock_frame import StockFrame 21 | 22 | current_td_version = pkg_resources.get_distribution('td-ameritrade-python-api').version 23 | 24 | from td.client import TDClient 25 | 26 | if current_td_version == '0.3.0': 27 | from td.utils import TDUtilities 28 | milliseconds_since_epoch = TDUtilities().milliseconds_since_epoch 29 | else: 30 | from td.utils import milliseconds_since_epoch 31 | 32 | 33 | class PyRobot(): 34 | 35 | def __init__(self, client_id: str, redirect_uri: str, paper_trading: bool = True, credentials_path: Optional[str] = None, trading_account: Optional[str] = None) -> None: 36 | """Initalizes a new instance of the robot and logs into the API platform specified. 37 | 38 | Arguments: 39 | ---- 40 | client_id {str} -- The Consumer ID assigned to you during the App registration. 41 | This can be found at the app registration portal. 42 | 43 | redirect_uri {str} -- This is the redirect URL that you specified when you created your 44 | TD Ameritrade Application. 45 | 46 | Keyword Arguments: 47 | ---- 48 | credentials_path {str} -- The path to the session state file used to prevent a full 49 | OAuth workflow. (default: {None}) 50 | 51 | trading_account {str} -- Your TD Ameritrade account number. (default: {None}) 52 | 53 | """ 54 | 55 | # Set the attirbutes 56 | self.trading_account = trading_account 57 | self.client_id = client_id 58 | self.redirect_uri = redirect_uri 59 | self.credentials_path = credentials_path 60 | self.session: TDClient = self._create_session() 61 | self.trades = {} 62 | self.historical_prices = {} 63 | self.stock_frame: StockFrame = None 64 | self.paper_trading = paper_trading 65 | 66 | self._bar_size = None 67 | self._bar_type = None 68 | 69 | def _create_session(self) -> TDClient: 70 | """Start a new session. 71 | 72 | Creates a new session with the TD Ameritrade API and logs the user into 73 | the new session. 74 | 75 | Returns: 76 | ---- 77 | TDClient -- A TDClient object with an authenticated sessions. 78 | 79 | """ 80 | 81 | # Create a new instance of the client 82 | td_client = TDClient( 83 | client_id=self.client_id, 84 | redirect_uri=self.redirect_uri, 85 | credentials_path=self.credentials_path 86 | ) 87 | 88 | # log the client into the new session 89 | td_client.login() 90 | 91 | return td_client 92 | 93 | @property 94 | def pre_market_open(self) -> bool: 95 | """Checks if pre-market is open. 96 | 97 | Uses the datetime module to create US Pre-Market Equity hours in 98 | UTC time. 99 | 100 | Usage: 101 | ---- 102 | >>> trading_robot = PyRobot( 103 | client_id=CLIENT_ID, 104 | redirect_uri=REDIRECT_URI, 105 | credentials_path=CREDENTIALS_PATH 106 | ) 107 | >>> pre_market_open_flag = trading_robot.pre_market_open 108 | >>> pre_market_open_flag 109 | True 110 | 111 | Returns: 112 | ---- 113 | bool -- True if pre-market is open, False otherwise. 114 | 115 | """ 116 | 117 | pre_market_start_time = datetime.utcnow().replace( 118 | hour=8, 119 | minute=00, 120 | second=00 121 | ).timestamp() 122 | 123 | market_start_time = datetime.utcnow().replace( 124 | hour=13, 125 | minute=30, 126 | second=00 127 | ).timestamp() 128 | 129 | right_now = datetime.utcnow().timestamp() 130 | 131 | if market_start_time >= right_now >= pre_market_start_time: 132 | return True 133 | else: 134 | return False 135 | 136 | @property 137 | def post_market_open(self): 138 | """Checks if post-market is open. 139 | 140 | Uses the datetime module to create US Post-Market Equity hours in 141 | UTC time. 142 | 143 | Usage: 144 | ---- 145 | >>> trading_robot = PyRobot( 146 | client_id=CLIENT_ID, 147 | redirect_uri=REDIRECT_URI, 148 | credentials_path=CREDENTIALS_PATH 149 | ) 150 | >>> post_market_open_flag = trading_robot.post_market_open 151 | >>> post_market_open_flag 152 | True 153 | 154 | Returns: 155 | ---- 156 | bool -- True if post-market is open, False otherwise. 157 | 158 | """ 159 | 160 | post_market_end_time = datetime.utcnow().replace( 161 | hour=00, 162 | minute=00, 163 | second=00 164 | ).timestamp() 165 | 166 | market_end_time = datetime.utcnow().replace( 167 | hour=20, 168 | minute=00, 169 | second=00 170 | ).timestamp() 171 | 172 | right_now = datetime.utcnow().timestamp() 173 | 174 | if post_market_end_time >= right_now >= market_end_time: 175 | return True 176 | else: 177 | return False 178 | 179 | @property 180 | def regular_market_open(self): 181 | """Checks if regular market is open. 182 | 183 | Uses the datetime module to create US Regular Market Equity hours in 184 | UTC time. 185 | 186 | Usage: 187 | ---- 188 | >>> trading_robot = PyRobot( 189 | client_id=CLIENT_ID, 190 | redirect_uri=REDIRECT_URI, 191 | credentials_path=CREDENTIALS_PATH 192 | ) 193 | >>> market_open_flag = trading_robot.market_open 194 | >>> market_open_flag 195 | True 196 | 197 | Returns: 198 | ---- 199 | bool -- True if post-market is open, False otherwise. 200 | 201 | """ 202 | 203 | market_start_time = datetime.utcnow().replace( 204 | hour=13, 205 | minute=30, 206 | second=00 207 | ).timestamp() 208 | 209 | market_end_time = datetime.utcnow().replace( 210 | hour=20, 211 | minute=00, 212 | second=00 213 | ).timestamp() 214 | 215 | right_now = datetime.utcnow().timestamp() 216 | 217 | if market_end_time >= right_now >= market_start_time: 218 | return True 219 | else: 220 | return False 221 | 222 | def create_portfolio(self) -> Portfolio: 223 | """Create a new portfolio. 224 | 225 | Creates a Portfolio Object to help store and organize positions 226 | as they are added and removed during trading. 227 | 228 | Usage: 229 | ---- 230 | >>> trading_robot = PyRobot( 231 | client_id=CLIENT_ID, 232 | redirect_uri=REDIRECT_URI, 233 | credentials_path=CREDENTIALS_PATH 234 | ) 235 | >>> portfolio = trading_robot.create_portfolio() 236 | >>> portfolio 237 | 238 | 239 | Returns: 240 | ---- 241 | Portfolio -- A pyrobot.Portfolio object with no positions. 242 | """ 243 | 244 | # Initalize the portfolio. 245 | self.portfolio = Portfolio(account_number=self.trading_account) 246 | 247 | # Assign the Client 248 | self.portfolio.td_client = self.session 249 | 250 | return self.portfolio 251 | 252 | def create_trade(self, trade_id: str, enter_or_exit: str, long_or_short: str, order_type: str = 'mkt', price: float = 0.0, stop_limit_price=0.0) -> Trade: 253 | """Initalizes a new instance of a Trade Object. 254 | 255 | This helps simplify the process of building an order by using pre-built templates that can be 256 | easily modified to incorporate more complex strategies. 257 | 258 | Arguments: 259 | ---- 260 | trade_id {str} -- The ID associated with the trade, this can then be used to access the trade during runtime. 261 | 262 | enter_or_exit {str} -- Defines whether this trade will be used to enter or exit a position. 263 | If used to enter, specify `enter`. If used to exit, speicfy `exit`. 264 | 265 | long_or_short {str} -- Defines whether this trade will be used to go long or short a position. 266 | If used to go long, specify `long`. If used to go short, speicfy `short`. 267 | 268 | Keyword Arguments: 269 | ---- 270 | order_type {str} -- Defines the type of order to initalize. Possible values 271 | are `'mkt', 'lmt', 'stop', 'stop-lmt', 'trailign-stop'` (default: {'mkt'}) 272 | 273 | price {float} -- The Price to be associate with the order. If the order type is `stop` or `stop-lmt` then 274 | it is the stop price, if it is a `lmt` order then it is the limit price, and `mkt` is the market 275 | price.(default: {0.0}) 276 | 277 | stop_limit_price {float} -- Only used if the order is a `stop-lmt` and represents the limit price of 278 | the `stop-lmt` order. (default: {0.0}) 279 | 280 | Usage: 281 | ---- 282 | >>> trading_robot = PyRobot( 283 | client_id=CLIENT_ID, 284 | redirect_uri=REDIRECT_URI, 285 | credentials_path=CREDENTIALS_PATH 286 | ) 287 | >>> new_trade = trading_robot_portfolio.create_trade( 288 | trade_id='long_1', 289 | enter_or_exit='enter', 290 | long_or_short='long', 291 | order_type='mkt' 292 | ) 293 | >>> new_trade 294 | 295 | >>> new_market_trade = trading_robot_portfolio.create_trade( 296 | trade_id='long_2', 297 | enter_or_exit='enter', 298 | long_or_short='long', 299 | order_type='mkt', 300 | price=12.00 301 | ) 302 | >>> new_market_trade 303 | 304 | >>> new_stop_trade = trading_robot_portfolio.create_trade( 305 | trade_id='long_3', 306 | enter_or_exit='enter', 307 | long_or_short='long', 308 | order_type='stop', 309 | price=2.00 310 | ) 311 | >>> new_stop_trade 312 | 313 | >>> new_stop_limit_trade = trading_robot_portfolio.create_trade( 314 | trade_id='long_4', 315 | enter_or_exit='enter', 316 | long_or_short='long', 317 | order_type='stop-lmt', 318 | price=2.00, 319 | stop_limit_price=1.90 320 | ) 321 | >>> new_stop_limit_trade 322 | 323 | Returns: 324 | ---- 325 | Trade -- A pyrobot.Trade object with the specified template. 326 | """ 327 | 328 | # Initalize a new trade object. 329 | trade = Trade() 330 | 331 | # Create a new trade. 332 | trade.new_trade( 333 | trade_id=trade_id, 334 | order_type=order_type, 335 | side=long_or_short, 336 | enter_or_exit=enter_or_exit, 337 | price=price, 338 | stop_limit_price=stop_limit_price 339 | ) 340 | 341 | self.trades[trade_id] = trade 342 | 343 | return trade 344 | 345 | def delete_trade(self, index: int) -> None: 346 | """Deletes an exisiting trade from the `trades` collection. 347 | 348 | Arguments: 349 | ---- 350 | index {int} -- The index of the order. 351 | 352 | Usage: 353 | ---- 354 | >>> trading_robot = PyRobot( 355 | client_id=CLIENT_ID, 356 | redirect_uri=REDIRECT_URI, 357 | credentials_path=CREDENTIALS_PATH 358 | ) 359 | >>> new_trade = trading_robot_portfolio.create_trade( 360 | enter_or_exit='enter', 361 | long_or_short='long', 362 | order_type='mkt' 363 | ) 364 | >>> trading_robot.delete_trade(index=1) 365 | """ 366 | 367 | if index in self.trades: 368 | del self.trades[index] 369 | 370 | def grab_current_quotes(self) -> dict: 371 | """Grabs the current quotes for all positions in the portfolio. 372 | 373 | Makes a call to the TD Ameritrade Get Quotes endpoint with all 374 | the positions in the portfolio. If only one position exist it will 375 | return a single dicitionary, otherwise a nested dictionary. 376 | 377 | Usage: 378 | ---- 379 | >>> trading_robot = PyRobot( 380 | client_id=CLIENT_ID, 381 | redirect_uri=REDIRECT_URI, 382 | credentials_path=CREDENTIALS_PATH 383 | ) 384 | >>> trading_robot_portfolio.add_position( 385 | symbol='MSFT', 386 | asset_type='equity' 387 | ) 388 | >>> current_quote = trading_robot.grab_current_quotes() 389 | >>> current_quote 390 | { 391 | "MSFT": { 392 | "assetType": "EQUITY", 393 | "assetMainType": "EQUITY", 394 | "cusip": "594918104", 395 | ... 396 | "regularMarketPercentChangeInDouble": 0, 397 | "delayed": true 398 | } 399 | } 400 | 401 | >>> trading_robot = PyRobot( 402 | client_id=CLIENT_ID, 403 | redirect_uri=REDIRECT_URI, 404 | credentials_path=CREDENTIALS_PATH 405 | ) 406 | >>> trading_robot_portfolio.add_position( 407 | symbol='MSFT', 408 | asset_type='equity' 409 | ) 410 | >>> trading_robot_portfolio.add_position( 411 | symbol='AAPL', 412 | asset_type='equity' 413 | ) 414 | >>> current_quote = trading_robot.grab_current_quotes() 415 | >>> current_quote 416 | 417 | { 418 | "MSFT": { 419 | "assetType": "EQUITY", 420 | "assetMainType": "EQUITY", 421 | "cusip": "594918104", 422 | ... 423 | "regularMarketPercentChangeInDouble": 0, 424 | "delayed": False 425 | }, 426 | "AAPL": { 427 | "assetType": "EQUITY", 428 | "assetMainType": "EQUITY", 429 | "cusip": "037833100", 430 | ... 431 | "regularMarketPercentChangeInDouble": 0, 432 | "delayed": False 433 | } 434 | } 435 | 436 | Returns: 437 | ---- 438 | dict -- A dictionary containing all the quotes for each position. 439 | 440 | """ 441 | 442 | # First grab all the symbols. 443 | symbols = self.portfolio.positions.keys() 444 | 445 | # Grab the quotes. 446 | quotes = self.session.get_quotes(instruments=list(symbols)) 447 | 448 | return quotes 449 | 450 | def grab_historical_prices(self, start: datetime, end: datetime, bar_size: int = 1, 451 | bar_type: str = 'minute', symbols: Optional[List[str]] = None) -> List[dict]: 452 | """Grabs the historical prices for all the postions in a portfolio. 453 | 454 | Overview: 455 | ---- 456 | Any of the historical price data returned will include extended hours 457 | price data by default. 458 | 459 | Arguments: 460 | ---- 461 | start {datetime} -- Defines the start date for the historical prices. 462 | 463 | end {datetime} -- Defines the end date for the historical prices. 464 | 465 | Keyword Arguments: 466 | ---- 467 | bar_size {int} -- Defines the size of each bar. (default: {1}) 468 | 469 | bar_type {str} -- Defines the bar type, can be one of the following: 470 | `['minute', 'week', 'month', 'year']` (default: {'minute'}) 471 | 472 | symbols {List[str]} -- A list of ticker symbols to pull. (default: None) 473 | 474 | Returns: 475 | ---- 476 | {List[Dict]} -- The historical price candles. 477 | 478 | Usage: 479 | ---- 480 | >>> trading_robot = PyRobot( 481 | client_id=CLIENT_ID, 482 | redirect_uri=REDIRECT_URI, 483 | credentials_path=CREDENTIALS_PATH 484 | ) 485 | >>> start_date = datetime.today() 486 | >>> end_date = start_date - timedelta(days=30) 487 | >>> historical_prices = trading_robot.grab_historical_prices( 488 | start=end_date, 489 | end=start_date, 490 | bar_size=1, 491 | bar_type='minute' 492 | ) 493 | """ 494 | 495 | self._bar_size = bar_size 496 | self._bar_type = bar_type 497 | 498 | start = str(milliseconds_since_epoch(dt_object=start)) 499 | end = str(milliseconds_since_epoch(dt_object=end)) 500 | 501 | new_prices = [] 502 | 503 | if not symbols: 504 | symbols = self.portfolio.positions 505 | 506 | for symbol in symbols: 507 | 508 | historical_prices_response = self.session.get_price_history( 509 | symbol=symbol, 510 | period_type='day', 511 | start_date=start, 512 | end_date=end, 513 | frequency_type=bar_type, 514 | frequency=bar_size, 515 | extended_hours=True 516 | ) 517 | 518 | self.historical_prices[symbol] = {} 519 | self.historical_prices[symbol]['candles'] = historical_prices_response['candles'] 520 | 521 | for candle in historical_prices_response['candles']: 522 | 523 | new_price_mini_dict = {} 524 | new_price_mini_dict['symbol'] = symbol 525 | new_price_mini_dict['open'] = candle['open'] 526 | new_price_mini_dict['close'] = candle['close'] 527 | new_price_mini_dict['high'] = candle['high'] 528 | new_price_mini_dict['low'] = candle['low'] 529 | new_price_mini_dict['volume'] = candle['volume'] 530 | new_price_mini_dict['datetime'] = candle['datetime'] 531 | new_prices.append(new_price_mini_dict) 532 | 533 | self.historical_prices['aggregated'] = new_prices 534 | 535 | return self.historical_prices 536 | 537 | def get_latest_bar(self) -> List[dict]: 538 | """Returns the latest bar for each symbol in the portfolio. 539 | 540 | Returns: 541 | --- 542 | {List[dict]} -- A simplified quote list. 543 | 544 | Usage: 545 | ---- 546 | >>> trading_robot = PyRobot( 547 | client_id=CLIENT_ID, 548 | redirect_uri=REDIRECT_URI, 549 | credentials_path=CREDENTIALS_PATH 550 | ) 551 | >>> latest_bars = trading_robot.get_latest_bar() 552 | >>> latest_bars 553 | """ 554 | 555 | # Grab the info from the last quest. 556 | bar_size = self._bar_size 557 | bar_type = self._bar_type 558 | 559 | # Define the start and end date. 560 | start_date = datetime.today() 561 | end_date = start_date - timedelta(minutes=bar_size * 15) 562 | start = str(milliseconds_since_epoch(dt_object=start_date)) 563 | end = str(milliseconds_since_epoch(dt_object=end_date)) 564 | 565 | latest_prices = [] 566 | 567 | # Loop through each symbol. 568 | for symbol in self.portfolio.positions: 569 | 570 | # Grab the request. 571 | historical_prices_response = self.session.get_price_history( 572 | symbol=symbol, 573 | period_type='day', 574 | start_date=start, 575 | end_date=end, 576 | frequency_type=bar_type, 577 | frequency=bar_size, 578 | extended_hours=True 579 | ) 580 | 581 | if 'error' in historical_prices_response: 582 | 583 | time_true.sleep(2) 584 | 585 | # Grab the request. 586 | historical_prices_response = self.session.get_price_history( 587 | symbol=symbol, 588 | period_type='day', 589 | start_date=start, 590 | end_date=end, 591 | frequency_type=bar_type, 592 | frequency=bar_size, 593 | extended_hours=True 594 | ) 595 | 596 | # parse the candles. 597 | for candle in historical_prices_response['candles'][-1:]: 598 | 599 | new_price_mini_dict = {} 600 | new_price_mini_dict['symbol'] = symbol 601 | new_price_mini_dict['open'] = candle['open'] 602 | new_price_mini_dict['close'] = candle['close'] 603 | new_price_mini_dict['high'] = candle['high'] 604 | new_price_mini_dict['low'] = candle['low'] 605 | new_price_mini_dict['volume'] = candle['volume'] 606 | new_price_mini_dict['datetime'] = candle['datetime'] 607 | latest_prices.append(new_price_mini_dict) 608 | 609 | return latest_prices 610 | 611 | def wait_till_next_bar(self, last_bar_timestamp: pd.DatetimeIndex) -> None: 612 | """Waits the number of seconds till the next bar is released. 613 | 614 | Arguments: 615 | ---- 616 | last_bar_timestamp {pd.DatetimeIndex} -- The last bar's timestamp. 617 | """ 618 | 619 | last_bar_time = last_bar_timestamp.to_pydatetime()[ 620 | 0].replace(tzinfo=timezone.utc) 621 | next_bar_time = last_bar_time + timedelta(seconds=60) 622 | curr_bar_time = datetime.now(tz=timezone.utc) 623 | 624 | last_bar_timestamp = int(last_bar_time.timestamp()) 625 | next_bar_timestamp = int(next_bar_time.timestamp()) 626 | curr_bar_timestamp = int(curr_bar_time.timestamp()) 627 | 628 | _time_to_wait_bar = next_bar_timestamp - last_bar_timestamp 629 | time_to_wait_now = next_bar_timestamp - curr_bar_timestamp 630 | 631 | if time_to_wait_now < 0: 632 | time_to_wait_now = 0 633 | 634 | print("="*80) 635 | print("Pausing for the next bar") 636 | print("-"*80) 637 | print("Curr Time: {time_curr}".format( 638 | time_curr=curr_bar_time.strftime("%Y-%m-%d %H:%M:%S") 639 | ) 640 | ) 641 | print("Next Time: {time_next}".format( 642 | time_next=next_bar_time.strftime("%Y-%m-%d %H:%M:%S") 643 | ) 644 | ) 645 | print("Sleep Time: {seconds}".format(seconds=time_to_wait_now)) 646 | print("-"*80) 647 | print('') 648 | 649 | time_true.sleep(time_to_wait_now) 650 | 651 | def create_stock_frame(self, data: List[dict]) -> StockFrame: 652 | """Generates a new StockFrame Object. 653 | 654 | Arguments: 655 | ---- 656 | data {List[dict]} -- The data to add to the StockFrame object. 657 | 658 | Returns: 659 | ---- 660 | StockFrame -- A multi-index pandas data frame built for trading. 661 | """ 662 | 663 | # Create the Frame. 664 | self.stock_frame = StockFrame(data=data) 665 | 666 | return self.stock_frame 667 | 668 | def execute_signals(self, signals: List[pd.Series], trades_to_execute: dict) -> List[dict]: 669 | """Executes the specified trades for each signal. 670 | 671 | Arguments: 672 | ---- 673 | signals {list} -- A pandas.Series object representing the buy signals and sell signals. 674 | Will check if series is empty before making any trades. 675 | 676 | Trades: 677 | ---- 678 | trades_to_execute {dict} -- the trades you want to execute if signals are found. 679 | 680 | Returns: 681 | ---- 682 | {List[dict]} -- Returns all order responses. 683 | 684 | Usage: 685 | ---- 686 | >>> trades_dict = { 687 | 'MSFT': { 688 | 'trade_func': trading_robot.trades['long_msft'], 689 | 'trade_id': trading_robot.trades['long_msft'].trade_id 690 | } 691 | } 692 | >>> signals = indicator_client.check_signals() 693 | >>> trading_robot.execute_signals( 694 | signals=signals, 695 | trades_to_execute=trades_dict 696 | ) 697 | """ 698 | 699 | buys: pd.Series = signals[0][1] 700 | sells: pd.Series = signals[1][1] 701 | 702 | order_responses = [] 703 | 704 | # If we have buys or sells continue. 705 | if not buys.empty: 706 | 707 | # Grab the buy Symbols. 708 | symbols_list = buys.index.get_level_values(0).to_list() 709 | 710 | # Loop through each symbol. 711 | for symbol in symbols_list: 712 | 713 | # Check to see if there is a Trade object. 714 | if symbol in trades_to_execute: 715 | 716 | if self.portfolio.in_portfolio(symbol=symbol): 717 | self.portfolio.set_ownership_status( 718 | symbol=symbol, 719 | ownership=True 720 | ) 721 | 722 | # Set the Execution Flag. 723 | trades_to_execute[symbol]['has_executed'] = True 724 | trade_obj: Trade = trades_to_execute[symbol]['trade_func'] 725 | 726 | if not self.paper_trading: 727 | 728 | # Execute the order. 729 | order_response = self.execute_orders( 730 | trade_obj=trade_obj 731 | ) 732 | 733 | order_response = { 734 | 'order_id': order_response['order_id'], 735 | 'request_body': order_response['request_body'], 736 | 'timestamp': datetime.now().isoformat() 737 | } 738 | 739 | order_responses.append(order_response) 740 | 741 | else: 742 | 743 | order_response = { 744 | 'order_id': trade_obj._generate_order_id(), 745 | 'request_body': trade_obj.order, 746 | 'timestamp': datetime.now().isoformat() 747 | } 748 | 749 | order_responses.append(order_response) 750 | 751 | elif not sells.empty: 752 | 753 | # Grab the buy Symbols. 754 | symbols_list = sells.index.get_level_values(0).to_list() 755 | 756 | # Loop through each symbol. 757 | for symbol in symbols_list: 758 | 759 | # Check to see if there is a Trade object. 760 | if symbol in trades_to_execute: 761 | 762 | # Set the Execution Flag. 763 | trades_to_execute[symbol]['has_executed'] = True 764 | 765 | if self.portfolio.in_portfolio(symbol=symbol): 766 | self.portfolio.set_ownership_status( 767 | symbol=symbol, 768 | ownership=False 769 | ) 770 | 771 | trade_obj: Trade = trades_to_execute[symbol]['trade_func'] 772 | 773 | if not self.paper_trading: 774 | 775 | # Execute the order. 776 | order_response = self.execute_orders( 777 | trade_obj=trade_obj 778 | ) 779 | 780 | order_response = { 781 | 'order_id': order_response['order_id'], 782 | 'request_body': order_response['request_body'], 783 | 'timestamp': datetime.now().isoformat() 784 | } 785 | 786 | order_responses.append(order_response) 787 | 788 | else: 789 | 790 | order_response = { 791 | 'order_id': trade_obj._generate_order_id(), 792 | 'request_body': trade_obj.order, 793 | 'timestamp': datetime.now().isoformat() 794 | } 795 | 796 | order_responses.append(order_response) 797 | 798 | # Save the response. 799 | self.save_orders(order_response_dict=order_responses) 800 | 801 | return order_responses 802 | 803 | def execute_orders(self, trade_obj: Trade) -> dict: 804 | """Executes a Trade Object. 805 | 806 | Overview: 807 | ---- 808 | The `execute_orders` method will execute trades as they're signaled. When executed, 809 | the `Trade` object will have the order response saved to it, and the order response will 810 | be saved to a JSON file for further analysis. 811 | 812 | Arguments: 813 | ---- 814 | trade_obj {Trade} -- A trade object with the `order` property filled out. 815 | 816 | Returns: 817 | ---- 818 | {dict} -- An order response dicitonary. 819 | """ 820 | 821 | # Execute the order. 822 | order_dict = self.session.place_order( 823 | account=self.trading_account, 824 | order=trade_obj.order 825 | ) 826 | 827 | return order_dict 828 | 829 | def save_orders(self, order_response_dict: dict) -> bool: 830 | """Saves the order to a JSON file for further review. 831 | 832 | Arguments: 833 | ---- 834 | order_response {dict} -- A single order response. 835 | 836 | Returns: 837 | ---- 838 | {bool} -- `True` if the orders were successfully saved. 839 | """ 840 | 841 | # Define the folder. 842 | folder: pathlib.PurePath = pathlib.Path( 843 | __file__).parents[1].joinpath("data") 844 | 845 | # See if it exist, if not create it. 846 | if not folder.exists(): 847 | folder.mkdir() 848 | 849 | # Define the file path. 850 | file_path = folder.joinpath('orders.json') 851 | 852 | # First check if the file alread exists. 853 | if file_path.exists(): 854 | with open('data/orders.json', 'r') as order_json: 855 | orders_list = json.load(order_json) 856 | else: 857 | orders_list = [] 858 | 859 | # Combine both lists. 860 | orders_list = orders_list + order_response_dict 861 | 862 | # Write the new data back. 863 | with open(file='data/orders.json', mode='w+') as order_json: 864 | json.dump(obj=orders_list, fp=order_json, indent=4) 865 | 866 | return True 867 | 868 | def get_accounts(self, account_number: str = None, all_accounts: bool = False) -> dict: 869 | """Returns all the account balances for a specified account. 870 | 871 | Keyword Arguments: 872 | ---- 873 | account_number {str} -- The account number you want to query. (default: {None}) 874 | 875 | all_accounts {bool} -- Specifies whether you want to grab all accounts `True` or not 876 | `False`. (default: {False}) 877 | 878 | Returns: 879 | ---- 880 | Dict -- A dictionary containing all the information in your account. 881 | 882 | Usage: 883 | ---- 884 | 885 | >>> trading_robot = PyRobot( 886 | client_id=CLIENT_ID, 887 | redirect_uri=REDIRECT_URI, 888 | credentials_path=CREDENTIALS_PATH 889 | ) 890 | >>> trading_robot_accounts = tradeconsole_session.get_accounts( 891 | account_number="" 892 | ) 893 | >>> trading_robot_accounts 894 | [ 895 | { 896 | 'account_number': 'ACCOUNT_ID', 897 | 'account_type': 'CASH', 898 | 'available_funds': 0.0, 899 | 'buying_power': 0.0, 900 | 'cash_available_for_trading': 0.0, 901 | 'cash_available_for_withdrawl': 0.0, 902 | 'cash_balance': 0.0, 903 | 'day_trading_buying_power': 0.0, 904 | 'long_market_value': 0.0, 905 | 'maintenance_call': 0.0, 906 | 'maintenance_requirement': 0.0, 907 | 'short_balance': 0.0, 908 | 'short_margin_value': 0.0, 909 | 'short_market_value': 0.0 910 | } 911 | ] 912 | """ 913 | 914 | # Depending on how the client was initalized, either use the state account 915 | # or the one passed through the function. 916 | if all_accounts: 917 | account = 'all' 918 | elif self.trading_account: 919 | account = self.trading_account 920 | else: 921 | account = account_number 922 | 923 | # Grab the accounts. 924 | accounts = self.session.get_accounts( 925 | account=account 926 | ) 927 | 928 | # Parse the account info. 929 | accounts_parsed = self._parse_account_balances( 930 | accounts_response=accounts 931 | ) 932 | 933 | return accounts_parsed 934 | 935 | def _parse_account_balances(self, accounts_response: Union[Dict, List]) -> List[Dict]: 936 | """Parses an Account response into a more simplified dictionary. 937 | 938 | Arguments: 939 | ---- 940 | accounts_response {Union[Dict, List]} -- A response from the `get_accounts` call. 941 | 942 | Returns: 943 | ---- 944 | List[Dict] -- A list of simplified account dictionaries. 945 | """ 946 | 947 | account_lists = [] 948 | 949 | if isinstance(accounts_response, dict): 950 | 951 | account_dict = {} 952 | 953 | for account_type_key in accounts_response: 954 | 955 | account_info = accounts_response[account_type_key] 956 | 957 | account_id = account_info['accountId'] 958 | account_type = account_info['type'] 959 | account_current_balances = account_info['currentBalances'] 960 | # account_inital_balances = account_info['initialBalances'] 961 | 962 | account_dict['account_number'] = account_id 963 | account_dict['account_type'] = account_type 964 | account_dict['cash_balance'] = account_current_balances['cashBalance'] 965 | account_dict['long_market_value'] = account_current_balances['longMarketValue'] 966 | 967 | account_dict['cash_available_for_trading'] = account_current_balances.get( 968 | 'cashAvailableForTrading', 0.0 969 | ) 970 | account_dict['cash_available_for_withdrawl'] = account_current_balances.get( 971 | 'cashAvailableForWithDrawal', 0.0 972 | ) 973 | account_dict['available_funds'] = account_current_balances.get( 974 | 'availableFunds', 0.0 975 | ) 976 | account_dict['buying_power'] = account_current_balances.get( 977 | 'buyingPower', 0.0 978 | ) 979 | account_dict['day_trading_buying_power'] = account_current_balances.get( 980 | 'dayTradingBuyingPower', 0.0 981 | ) 982 | account_dict['maintenance_call'] = account_current_balances.get( 983 | 'maintenanceCall', 0.0 984 | ) 985 | account_dict['maintenance_requirement'] = account_current_balances.get( 986 | 'maintenanceRequirement', 0.0 987 | ) 988 | 989 | account_dict['short_balance'] = account_current_balances.get( 990 | 'shortBalance', 0.0 991 | ) 992 | account_dict['short_market_value'] = account_current_balances.get( 993 | 'shortMarketValue', 0.0 994 | ) 995 | account_dict['short_margin_value'] = account_current_balances.get( 996 | 'shortMarginValue', 0.0 997 | ) 998 | 999 | account_lists.append(account_dict) 1000 | 1001 | elif isinstance(accounts_response, list): 1002 | 1003 | for account in accounts_response: 1004 | 1005 | account_dict = {} 1006 | 1007 | for account_type_key in account: 1008 | 1009 | account_info = account[account_type_key] 1010 | 1011 | account_id = account_info['accountId'] 1012 | account_type = account_info['type'] 1013 | account_current_balances = account_info['currentBalances'] 1014 | # account_inital_balances = account_info['initialBalances'] 1015 | 1016 | account_dict['account_number'] = account_id 1017 | account_dict['account_type'] = account_type 1018 | account_dict['cash_balance'] = account_current_balances['cashBalance'] 1019 | account_dict['long_market_value'] = account_current_balances['longMarketValue'] 1020 | 1021 | account_dict['cash_available_for_trading'] = account_current_balances.get( 1022 | 'cashAvailableForTrading', 0.0 1023 | ) 1024 | account_dict['cash_available_for_withdrawl'] = account_current_balances.get( 1025 | 'cashAvailableForWithDrawal', 0.0 1026 | ) 1027 | account_dict['available_funds'] = account_current_balances.get( 1028 | 'availableFunds', 0.0 1029 | ) 1030 | account_dict['buying_power'] = account_current_balances.get( 1031 | 'buyingPower', 0.0 1032 | ) 1033 | account_dict['day_trading_buying_power'] = account_current_balances.get( 1034 | 'dayTradingBuyingPower', 0.0 1035 | ) 1036 | account_dict['maintenance_call'] = account_current_balances.get( 1037 | 'maintenanceCall', 0.0 1038 | ) 1039 | account_dict['maintenance_requirement'] = account_current_balances.get( 1040 | 'maintenanceRequirement', 0.0 1041 | ) 1042 | account_dict['short_balance'] = account_current_balances.get( 1043 | 'shortBalance', 0.0 1044 | ) 1045 | account_dict['short_market_value'] = account_current_balances.get( 1046 | 'shortMarketValue', 0.0 1047 | ) 1048 | account_dict['short_margin_value'] = account_current_balances.get( 1049 | 'shortMarginValue', 0.0 1050 | ) 1051 | 1052 | account_lists.append(account_dict) 1053 | 1054 | return account_lists 1055 | 1056 | def get_positions(self, account_number: str = None, all_accounts: bool = False) -> List[Dict]: 1057 | """Gets all the positions for a specified account number. 1058 | 1059 | Arguments: 1060 | ---- 1061 | account_number (str, optional): The account number of the account you want 1062 | to pull positions for. Defaults to None. 1063 | 1064 | all_accounts (bool, optional): If you want to return all the positions for every 1065 | account then set to `True`. Defaults to False. 1066 | 1067 | Returns: 1068 | ---- 1069 | List[Dict]: A list of Position objects. 1070 | 1071 | Usage: 1072 | ---- 1073 | 1074 | >>> trading_robot = PyRobot( 1075 | client_id=CLIENT_ID, 1076 | redirect_uri=REDIRECT_URI, 1077 | credentials_path=CREDENTIALS_PATH 1078 | ) 1079 | >>> trading_robot_positions = tradeconsole_session.get_positions( 1080 | account_number="" 1081 | ) 1082 | >>> trading_robot_positions 1083 | [ 1084 | { 1085 | 'account_number': '111111111', 1086 | 'asset_type': 'EQUITY', 1087 | 'average_price': 0.00, 1088 | 'current_day_profit_loss': -0.96, 1089 | 'current_day_profit_loss_percentage': -5.64, 1090 | 'cusip': '565849106', 1091 | 'description': '', 1092 | 'long_quantity': 3.0, 1093 | 'market_value': 16.05, 1094 | 'settled_long_quantity': 3.0, 1095 | 'settled_short_quantity': 0.0, 1096 | 'short_quantity': 0.0, 1097 | 'sub_asset_type': '', 1098 | 'symbol': 'MRO', 1099 | 'type': '' 1100 | }, 1101 | { 1102 | 'account_number': '111111111', 1103 | 'asset_type': 'EQUITY', 1104 | 'average_price': 5.60667, 1105 | 'current_day_profit_loss': -0.96, 1106 | 'current_day_profit_loss_percentage': -5.64, 1107 | 'cusip': '565849106', 1108 | 'description': '', 1109 | 'long_quantity': 3.0, 1110 | 'market_value': 16.05, 1111 | 'settled_long_quantity': 3.0, 1112 | 'settled_short_quantity': 0.0, 1113 | 'short_quantity': 0.0, 1114 | 'sub_asset_type': '', 1115 | 'symbol': 'MRO', 1116 | 'type': '' 1117 | } 1118 | ] 1119 | """ 1120 | 1121 | if all_accounts: 1122 | account = 'all' 1123 | elif self.trading_account and account_number is None: 1124 | account = self.trading_account 1125 | else: 1126 | account = account_number 1127 | 1128 | # Grab the positions. 1129 | positions = self.session.get_accounts( 1130 | account=account, 1131 | fields=['positions'] 1132 | ) 1133 | 1134 | # Parse the positions. 1135 | positions_parsed = self._parse_account_positions( 1136 | positions_response=positions 1137 | ) 1138 | 1139 | return positions_parsed 1140 | 1141 | def _parse_account_positions(self, positions_response: Union[List, Dict]) -> List[Dict]: 1142 | """Parses the response from the `get_positions` into a more simplified list. 1143 | 1144 | Arguments: 1145 | ---- 1146 | positions_response {Union[List, Dict]} -- Either a list or a dictionary that represents a position. 1147 | 1148 | Returns: 1149 | ---- 1150 | List[Dict] -- A more simplified list of positions. 1151 | """ 1152 | 1153 | positions_lists = [] 1154 | 1155 | if isinstance(positions_response, dict): 1156 | 1157 | position_dict = {} 1158 | 1159 | for account_type_key in positions_response: 1160 | 1161 | account_info = positions_response[account_type_key] 1162 | 1163 | account_id = account_info['accountId'] 1164 | positions = account_info['positions'] 1165 | 1166 | for position in positions: 1167 | position_dict['account_number'] = account_id 1168 | position_dict['average_price'] = position['averagePrice'] 1169 | position_dict['market_value'] = position['marketValue'] 1170 | position_dict['current_day_profit_loss_percentage'] = position['currentDayProfitLossPercentage'] 1171 | position_dict['current_day_profit_loss'] = position['currentDayProfitLoss'] 1172 | position_dict['long_quantity'] = position['longQuantity'] 1173 | position_dict['short_quantity'] = position['shortQuantity'] 1174 | position_dict['settled_long_quantity'] = position['settledLongQuantity'] 1175 | position_dict['settled_short_quantity'] = position['settledShortQuantity'] 1176 | 1177 | position_dict['symbol'] = position['instrument']['symbol'] 1178 | position_dict['cusip'] = position['instrument']['cusip'] 1179 | position_dict['asset_type'] = position['instrument']['assetType'] 1180 | position_dict['sub_asset_type'] = position['instrument'].get( 1181 | 'subAssetType', "" 1182 | ) 1183 | position_dict['description'] = position['instrument'].get( 1184 | 'description', "" 1185 | ) 1186 | position_dict['type'] = position['instrument'].get( 1187 | 'type', "" 1188 | ) 1189 | 1190 | positions_lists.append(position_dict) 1191 | 1192 | elif isinstance(positions_response, list): 1193 | 1194 | for account in positions_response: 1195 | 1196 | position_dict = {} 1197 | 1198 | for account_type_key in account: 1199 | 1200 | account_info = account[account_type_key] 1201 | 1202 | account_id = account_info['accountId'] 1203 | positions = account_info['positions'] 1204 | 1205 | for position in positions: 1206 | position_dict['account_number'] = account_id 1207 | position_dict['average_price'] = position['averagePrice'] 1208 | position_dict['market_value'] = position['marketValue'] 1209 | position_dict['current_day_profit_loss_percentage'] = position['currentDayProfitLossPercentage'] 1210 | position_dict['current_day_profit_loss'] = position['currentDayProfitLoss'] 1211 | position_dict['long_quantity'] = position['longQuantity'] 1212 | position_dict['short_quantity'] = position['shortQuantity'] 1213 | position_dict['settled_long_quantity'] = position['settledLongQuantity'] 1214 | position_dict['settled_short_quantity'] = position['settledShortQuantity'] 1215 | 1216 | position_dict['symbol'] = position['instrument']['symbol'] 1217 | position_dict['cusip'] = position['instrument']['cusip'] 1218 | position_dict['asset_type'] = position['instrument']['assetType'] 1219 | position_dict['sub_asset_type'] = position['instrument'].get( 1220 | 'subAssetType', "" 1221 | ) 1222 | position_dict['description'] = position['instrument'].get( 1223 | 'description', "" 1224 | ) 1225 | position_dict['type'] = position['instrument'].get( 1226 | 'type', "" 1227 | ) 1228 | 1229 | positions_lists.append(position_dict) 1230 | 1231 | return positions_lists 1232 | -------------------------------------------------------------------------------- /pyrobot/stock_frame.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pandas as pd 3 | 4 | from datetime import time 5 | from datetime import datetime 6 | from datetime import timezone 7 | 8 | from typing import List 9 | from typing import Dict 10 | from typing import Union 11 | 12 | from pandas.core.groupby import DataFrameGroupBy 13 | from pandas.core.window import RollingGroupby 14 | from pandas.core.window import Window 15 | 16 | 17 | class StockFrame(): 18 | 19 | def __init__(self, data: List[Dict]) -> None: 20 | """Initalizes the Stock Data Frame Object. 21 | 22 | Arguments: 23 | ---- 24 | data {List[Dict]} -- The data to convert to a frame. Normally, this is 25 | returned from the historical prices endpoint. 26 | """ 27 | 28 | self._data = data 29 | self._frame: pd.DataFrame = self.create_frame() 30 | self._symbol_groups = None 31 | self._symbol_rolling_groups = None 32 | 33 | @property 34 | def frame(self) -> pd.DataFrame: 35 | """The frame object. 36 | 37 | Returns: 38 | ---- 39 | pd.DataFrame -- A pandas data frame with the price data. 40 | """ 41 | return self._frame 42 | 43 | @property 44 | def symbol_groups(self) -> DataFrameGroupBy: 45 | """Returns the Groups in the StockFrame. 46 | 47 | Overview: 48 | ---- 49 | Often we will want to apply operations to a each symbol. The 50 | `symbols_groups` property will return the dataframe grouped by 51 | each symbol. 52 | 53 | Returns: 54 | ---- 55 | {DataFrameGroupBy} -- A `pandas.core.groupby.GroupBy` object with each symbol. 56 | """ 57 | 58 | # Group by Symbol. 59 | self._symbol_groups: DataFrameGroupBy = self._frame.groupby( 60 | by='symbol', 61 | as_index=False, 62 | sort=True 63 | ) 64 | 65 | return self._symbol_groups 66 | 67 | def symbol_rolling_groups(self, size: int) -> RollingGroupby: 68 | """Grabs the windows for each group. 69 | 70 | Arguments: 71 | ---- 72 | size {int} -- The size of the window. 73 | 74 | Returns: 75 | ---- 76 | {RollingGroupby} -- A `pandas.core.window.RollingGroupby` object. 77 | """ 78 | 79 | # If we don't a symbols group, then create it. 80 | if not self._symbol_groups: 81 | self.symbol_groups 82 | 83 | self._symbol_rolling_groups: RollingGroupby = self._symbol_groups.rolling(size) 84 | 85 | return self._symbol_rolling_groups 86 | 87 | 88 | def create_frame(self) -> pd.DataFrame: 89 | """Creates a new data frame with the data passed through. 90 | 91 | Returns: 92 | ---- 93 | {pd.DataFrame} -- A pandas dataframe. 94 | """ 95 | 96 | # Make a data frame. 97 | price_df = pd.DataFrame(data=self._data) 98 | price_df = self._parse_datetime_column(price_df=price_df) 99 | price_df = self._set_multi_index(price_df=price_df) 100 | 101 | return price_df 102 | 103 | def _parse_datetime_column(self, price_df: pd.DataFrame) -> pd.DataFrame: 104 | """Parses the datetime column passed through. 105 | 106 | Arguments: 107 | ---- 108 | price_df {pd.DataFrame} -- The price data frame with a 109 | datetime column. 110 | 111 | Returns: 112 | ---- 113 | {pd.DataFrame} -- A pandas dataframe. 114 | """ 115 | 116 | price_df['datetime'] = pd.to_datetime(price_df['datetime'], unit='ms', origin='unix') 117 | 118 | return price_df 119 | 120 | def _set_multi_index(self, price_df: pd.DataFrame) -> pd.DataFrame: 121 | """Converts the dataframe to a multi-index data frame. 122 | 123 | Arguments: 124 | ---- 125 | price_df {pd.DataFrame} -- The price data frame. 126 | 127 | Returns: 128 | ---- 129 | pd.DataFrame -- A pandas dataframe. 130 | """ 131 | 132 | price_df = price_df.set_index(keys=['symbol','datetime']) 133 | 134 | return price_df 135 | 136 | def add_rows(self, data: Dict) -> None: 137 | """Adds a new row to our StockFrame. 138 | 139 | Arguments: 140 | ---- 141 | data {Dict} -- A list of quotes. 142 | 143 | Usage: 144 | ---- 145 | >>> # Create a StockFrame object. 146 | >>> stock_frame = trading_robot.create_stock_frame( 147 | data=historical_prices['aggregated'] 148 | ) 149 | >>> fake_data = { 150 | "datetime": 1586390396750, 151 | "symbol": "MSFT", 152 | "close": 165.7, 153 | "open": 165.67, 154 | "high": 166.67, 155 | "low": 163.5, 156 | "volume": 48318234 157 | } 158 | >>> # Add to the Stock Frame. 159 | >>> stock_frame.add_rows(data=fake_data) 160 | """ 161 | 162 | column_names = ['open', 'close', 'high', 'low', 'volume'] 163 | 164 | for quote in data: 165 | 166 | # Parse the Timestamp. 167 | time_stamp = pd.to_datetime( 168 | quote['datetime'], 169 | unit='ms', 170 | origin='unix' 171 | ) 172 | 173 | # Define the Index Tuple. 174 | row_id = (quote['symbol'], time_stamp) 175 | 176 | # Define the values. 177 | row_values = [ 178 | quote['open'], 179 | quote['close'], 180 | quote['high'], 181 | quote['low'], 182 | quote['volume'] 183 | ] 184 | 185 | # Create a new row. 186 | new_row = pd.Series(data=row_values) 187 | 188 | # Add the row. 189 | self.frame.loc[row_id, column_names] = new_row.values 190 | 191 | self.frame.sort_index(inplace=True) 192 | 193 | def do_indicator_exist(self, column_names: List[str]) -> bool: 194 | """Checks to see if the indicator columns specified exist. 195 | 196 | Overview: 197 | ---- 198 | The user can add multiple indicator columns to their StockFrame object 199 | and in some cases we will need to modify those columns before making trades. 200 | In those situations, this method, will help us check if those columns exist 201 | before proceeding on in the code. 202 | 203 | Arguments: 204 | ---- 205 | column_names {List[str]} -- A list of column names that will be checked. 206 | 207 | Raises: 208 | ---- 209 | KeyError: If a column is not found in the StockFrame, a KeyError will be raised. 210 | 211 | Returns: 212 | ---- 213 | bool -- `True` if all the columns exist. 214 | """ 215 | 216 | if set(column_names).issubset(self._frame.columns): 217 | return True 218 | else: 219 | raise KeyError("The following indicator columns are missing from the StockFrame: {missing_columns}".format( 220 | missing_columns=set(column_names).difference(self._frame.columns) 221 | )) 222 | 223 | def _check_signals(self, indicators: dict) -> Union[pd.DataFrame, None]: 224 | """Returns the last row of the StockFrame if conditions are met. 225 | 226 | Overview: 227 | ---- 228 | Before a trade is executed, we must check to make sure if the 229 | conditions that warrant a `buy` or `sell` signal are met. This 230 | method will take last row for each symbol in the StockFrame and 231 | compare the indicator column values with the conditions specified 232 | by the user. 233 | 234 | If the conditions are met the row will be returned back to the user. 235 | 236 | Arguments: 237 | ---- 238 | indicators {dict} -- A dictionary containing all the indicators to be checked 239 | along with their buy and sell criteria. 240 | 241 | Returns: 242 | ---- 243 | {Union[pd.DataFrame, None]} -- If signals are generated then, a pandas.DataFrame object 244 | will be returned. If no signals are found then nothing will be returned. 245 | """ 246 | 247 | # Grab the last rows. 248 | last_rows = self._symbol_groups.tail(1) 249 | 250 | conditions = [] 251 | 252 | # Check to see if all the columns exist. 253 | if self.do_indicator_exist(column_names=indicators.keys()): 254 | 255 | for indicator in indicators: 256 | 257 | column = last_rows[indicator] 258 | 259 | buy_condition_target = indicators[indicator]['buy'] 260 | sell_condition_target = indicators[indicator]['sell'] 261 | 262 | buy_condition_operator = indicators[indicator]['buy_operator'] 263 | sell_condition_operator = indicators[indicator]['sell_operator'] 264 | 265 | condition_1: pd.Series = buy_condition_operator(column, buy_condition_target) 266 | condition_2: pd.Series = sell_condition_operator(column, sell_condition_target) 267 | 268 | condition_1 = condition_1.where(lambda x : x == True).dropna() 269 | condition_2 = condition_2.where(lambda x : x == True).dropna() 270 | 271 | conditions.append(('buys', condition_1)) 272 | conditions.append(('sells', condition_2)) 273 | 274 | return conditions -------------------------------------------------------------------------------- /pyrobot/trades.py: -------------------------------------------------------------------------------- 1 | from datetime import datetime 2 | 3 | from typing import List 4 | from typing import Union 5 | from typing import Optional 6 | 7 | class Trade(): 8 | 9 | """ 10 | Object Type: 11 | ---- 12 | `pyrobot.Trade` 13 | 14 | Overview: 15 | ---- 16 | Reprsents the Trade Object which is used to create new trades, 17 | add customizations to them, and easily modify existing content. 18 | """ 19 | 20 | def __init__(self): 21 | """Initalizes a new order.""" 22 | 23 | self.order = {} 24 | self.trade_id = "" 25 | 26 | self.side = "" 27 | self.side_opposite = "" 28 | self.enter_or_exit = "" 29 | self.enter_or_exit_opposite = "" 30 | 31 | self._order_response = {} 32 | self._triggered_added = False 33 | self._multi_leg = False 34 | 35 | def new_trade(self, trade_id: str, order_type: str, side: str, enter_or_exit: str, price: float = 0.00, stop_limit_price: float = 0.00) -> dict: 36 | """Creates a new Trade object template. 37 | 38 | A trade object is a template that can be used to help build complex trades 39 | that normally are prone to errors when writing the JSON. Additionally, it 40 | will help the process of storing trades easier. 41 | 42 | Arguments: 43 | ---- 44 | order_type {str} -- The type of order you would like to create. Can be 45 | one of the following: ['mkt', 'lmt', 'stop', 'stop_lmt', 'trailing_stop'] 46 | 47 | side {str} -- The side the trade will take, can be one of the 48 | following: ['long', 'short'] 49 | 50 | enter_or_exit {str} -- Specifices whether this trade will enter a new position 51 | or exit an existing position. If used to enter then specify, 'enter'. If 52 | used to exit a trade specify, 'exit'. 53 | 54 | Returns: 55 | ---- 56 | {dict} -- [description] 57 | """ 58 | 59 | self.trade_id = trade_id 60 | 61 | self.order_types = { 62 | 'mkt':'MARKET', 63 | 'lmt':'LIMIT', 64 | 'stop':'STOP', 65 | 'stop_lmt':'STOP_LIMIT', 66 | 'trailing_stop':'TRAILING_STOP' 67 | } 68 | 69 | self.order_instructions = { 70 | 'enter':{ 71 | 'long':'BUY', 72 | 'short':'SELL_SHORT' 73 | }, 74 | 'exit':{ 75 | 'long':'SELL', 76 | 'short':'BUY_TO_COVER' 77 | } 78 | } 79 | 80 | self.order = { 81 | "orderStrategyType": "SINGLE", 82 | "orderType": self.order_types[order_type], 83 | "session": "NORMAL", 84 | "duration": "DAY", 85 | "orderLegCollection": [ 86 | { 87 | "instruction": self.order_instructions[enter_or_exit][side], 88 | "quantity": 0, 89 | "instrument": { 90 | "symbol": None, 91 | "assetType": None 92 | } 93 | } 94 | ] 95 | } 96 | 97 | if self.order['orderType'] == 'STOP': 98 | self.order['stopPrice'] = price 99 | 100 | elif self.order['orderType'] == 'LIMIT': 101 | self.order['price'] = price 102 | 103 | elif self.order['orderType'] == 'STOP_LIMIT': 104 | self.order['price'] = stop_limit_price 105 | self.order['stopPrice'] = price 106 | 107 | elif self.order['orderType'] == 'TRAILING_STOP': 108 | self.order['stopPriceLinkBasis'] = "" 109 | self.order['stopPriceLinkType'] = "" 110 | self.order['stopPriceOffset'] = 0.00 111 | self.order['stopType'] = 'STANDARD' 112 | 113 | # Make a refrence to the side we take, useful when adding other components. 114 | self.enter_or_exit = enter_or_exit 115 | self.side = side 116 | self.order_type = order_type 117 | self.price = price 118 | 119 | # If it's a stop limit order or stop order, set the stop price. 120 | if self.is_stop_order or self.is_stop_limit_order: 121 | self.stop_price = price 122 | else: 123 | self.stop_price = 0.0 124 | 125 | # If it's a stop limit order set the stop limit price. 126 | if self.is_stop_limit_order: 127 | self.stop_limit_price = stop_limit_price 128 | else: 129 | self.stop_limit_price = 0.0 130 | 131 | # If it's a limit price set the limit price. 132 | if self.is_limit_order: 133 | self.limit_price = price 134 | else: 135 | self.limit_price = 0.0 136 | 137 | # Set the enter or exit state. 138 | if self.enter_or_exit == 'enter': 139 | self.enter_or_exit_opposite = 'exit' 140 | if self.enter_or_exit == 'exit': 141 | self.enter_or_exit_opposite = 'enter' 142 | 143 | # Set the side state. 144 | if self.side == 'long': 145 | self.side_opposite = 'short' 146 | if self.side == 'short': 147 | self.side_opposite = 'long' 148 | 149 | return self.order 150 | 151 | def instrument(self, symbol: str, quantity: int, asset_type: str, sub_asset_type: str = None, order_leg_id: int = 0) -> dict: 152 | """Adds an instrument to a trade. 153 | 154 | Arguments: 155 | ---- 156 | symbol {str} -- The instrument ticker symbol. 157 | 158 | quantity {int} -- The quantity of shares to be purchased. 159 | 160 | asset_type {str} -- The instrument asset type. For example, `EQUITY`. 161 | 162 | Keyword Arguments: 163 | ---- 164 | sub_asset_type {str} -- The instrument sub-asset type, not always needed. For example, `ETF`. (default: {None}) 165 | 166 | Returns: 167 | ---- 168 | {dict} -- A dictionary with the instrument. 169 | """ 170 | 171 | leg = self.order['orderLegCollection'][order_leg_id] 172 | 173 | leg['instrument']['symbol'] = symbol 174 | leg['instrument']['assetType'] = asset_type 175 | leg['quantity'] = quantity 176 | 177 | self.order_size = quantity 178 | self.symbol = symbol 179 | self.asset_type = asset_type 180 | 181 | return leg 182 | 183 | def add_option_instrument(self, symbol: str, quantity: int, order_leg_id: int = 0) -> dict: 184 | """Adds an Option instrument to the Trade object. 185 | 186 | Args: 187 | ---- 188 | symbol (str): The option symbol to be added. 189 | 190 | quantity (int): The number of option contracts to purchase or sell. 191 | 192 | order_leg_id (int, optional): The position of the instrument within the 193 | the Order Leg Collection.. Defaults to 0. 194 | 195 | Returns: 196 | ---- 197 | dict: The order leg containing the option contract. 198 | """ 199 | 200 | self.instrument( 201 | symbol=symbol, 202 | quantity=quantity, 203 | asset_type='OPTION', 204 | order_leg_id=order_leg_id 205 | ) 206 | 207 | leg = self.order['orderLegCollection'][order_leg_id] 208 | 209 | return leg 210 | 211 | def good_till_cancel(self, cancel_time: datetime) -> None: 212 | """Converts an order to a `Good Till Cancel` order. 213 | 214 | Arguments: 215 | ---- 216 | cancel_time {datetime.datetime} -- A datetime object representing the 217 | cancel time of the order. 218 | """ 219 | 220 | self.order['duration'] = 'GOOD_TILL_CANCEL' 221 | self.order['cancelTime'] = cancel_time.isoformat() 222 | 223 | def modify_side(self, side: Optional[str] , leg_id: int = 0) -> None: 224 | """Modifies the Side the order takes. 225 | 226 | Arguments: 227 | ---- 228 | side {str} -- The side to be set. Can be one of the following: 229 | `['buy', 'sell', 'sell_short', 'buy_to_cover']`. 230 | 231 | Keyword Arguments: 232 | ---- 233 | leg_id {int} -- The leg you want to adjust. (default: {0}) 234 | 235 | Raises: 236 | ---- 237 | ValueError -- If the `side` argument does not match one of the valid sides, 238 | then a ValueError will be raised. 239 | """ 240 | 241 | # Validate the Side. 242 | if side and side not in ['buy', 'sell', 'sell_short', 'buy_to_cover', 'sell_to_close', 'buy_to_open']: 243 | raise ValueError( 244 | "The side you have specified is not valid. Please choose a valid side: ['buy', 'sell', 'sell_short', 'buy_to_cover','sell_to_close', 'buy_to_open']" 245 | ) 246 | 247 | # Set the Order. 248 | if side: 249 | self.order['orderLegCollection'][leg_id]['instruction'] = side.upper() 250 | else: 251 | self.order['orderLegCollection'][leg_id]['instruction'] = self.order_instructions[self.enter_or_exit][self.side_opposite] 252 | 253 | def add_box_range(self, profit_size: float = 0.00, percentage: bool = False, stop_limit: bool = False): 254 | """Adds a Stop Loss(or Stop-Limit order), and a limit Order 255 | 256 | Arguments: 257 | ---- 258 | profit_size {float} -- The size of desired profit. For example, `0.10`. 259 | 260 | percentage {float} -- Specifies whether the `profit_size` is in absolute dollars `False` or 261 | in percentage terms `True`. 262 | 263 | Keyword Arguments: 264 | ---- 265 | stop_limit {bool} -- If `True` makes the stop-loss a stop-limit. (default: {False}) 266 | """ 267 | 268 | if not self._triggered_added: 269 | self._convert_to_trigger() 270 | 271 | # Add a take profit Limit order. 272 | self.add_take_profit(profit_size=profit_size, percentage=percentage) 273 | 274 | # Add a stop Loss Order. 275 | if not stop_limit: 276 | self.add_stop_loss(stop_size=profit_size, percentage=percentage) 277 | 278 | def add_stop_loss(self, stop_size: float, percentage: bool = False) -> bool: 279 | """Add's a stop loss order to exit the position when a certain loss is reached. 280 | 281 | Arguments: 282 | ---- 283 | stop_size {float} -- The size of the stop from the current trading price. For example, `0.10`. 284 | 285 | Keyword Arguments: 286 | ---- 287 | percentage {bool} -- Specifies whether the `stop_size` adjustment is a 288 | `percentage` or an `absolute dollar amount`. If `True` will calculate the 289 | stop size as a percentage of the current price. (default: {False}) 290 | 291 | Returns: 292 | ---- 293 | {bool} -- `True` if the order was added. 294 | """ 295 | 296 | if not self._triggered_added: 297 | self._convert_to_trigger() 298 | 299 | if self.order_type == 'mkt': 300 | # Have to make a call to Get Quotes. 301 | price = self.price 302 | elif self.order_type == 'lmt': 303 | price = self.price 304 | 305 | if percentage: 306 | adjustment = 1.0 - stop_size 307 | new_price = self._calculate_new_price(price=price, adjustment=adjustment, percentage=True) 308 | else: 309 | adjustment = -stop_size 310 | new_price = self._calculate_new_price(price=price, adjustment=adjustment, percentage=False) 311 | 312 | stop_loss_order = { 313 | "orderType": "STOP", 314 | "session": "NORMAL", 315 | "duration": "DAY", 316 | "stopPrice": new_price, 317 | "orderStrategyType": "SINGLE", 318 | "orderLegCollection": [ 319 | { 320 | "instruction": self.order_instructions[self.enter_or_exit_opposite][self.side], 321 | "quantity": self.order_size, 322 | "instrument": { 323 | "symbol": self.symbol, 324 | "assetType": self.asset_type 325 | } 326 | } 327 | ] 328 | } 329 | 330 | self.stop_loss_order = stop_loss_order 331 | self.order['childOrderStrategies'].append(self.stop_loss_order) 332 | 333 | return True 334 | 335 | def add_stop_limit(self, stop_size: float, limit_size: float, stop_percentage: bool = False, limit_percentage: bool = False): 336 | """Add's a Stop Limit Order to exit a trade when a stop price is reached but does not exceed the limit. 337 | 338 | Arguments: 339 | ---- 340 | stop_size {float} -- The size of the stop from the current trading price. For example, `0.10`. 341 | 342 | limit_size {float} -- The size of the limit from the current stop price. For example, `0.10`. 343 | 344 | Keyword Arguments: 345 | ---- 346 | stop_percentage {bool} -- Specifies whether the `stop_size` adjustment is a 347 | `percentage` or an `absolute dollar amount`. If `True` will calculate the 348 | stop size as a percentage of the current price. (default: {False}) 349 | 350 | limit_percentage {bool} -- Specifies whether the `limit_size` adjustment is a 351 | `percentage` or an `absolute dollar amount`. If `True` will calculate the 352 | limit size as a percentage of the current stop price. (default: {False}) 353 | 354 | Returns: 355 | ---- 356 | {bool} -- `True` if the order was added. 357 | """ 358 | 359 | # Check to see if there is a trigger. 360 | if not self._triggered_added: 361 | self._convert_to_trigger() 362 | 363 | # Grab the price. 364 | if self.order_type == 'mkt': 365 | # Have to make a call to Get Quotes. 366 | price = self.price 367 | 368 | elif self.order_type == 'lmt': 369 | price = self.price 370 | 371 | # Calculate the Stop Price. 372 | if stop_percentage: 373 | adjustment = 1.0 - stop_size 374 | stop_price = self._calculate_new_price( 375 | price=price, 376 | adjustment=adjustment, 377 | percentage=True 378 | ) 379 | else: 380 | adjustment = -stop_size 381 | stop_price = self._calculate_new_price( 382 | price=price, 383 | adjustment=adjustment, 384 | percentage=False 385 | ) 386 | 387 | # Calculate the Limit Price. 388 | if limit_percentage: 389 | adjustment = 1.0 - limit_size 390 | limit_price = self._calculate_new_price( 391 | price=price, 392 | adjustment=adjustment, 393 | percentage=True 394 | ) 395 | else: 396 | adjustment = -limit_size 397 | limit_price = self._calculate_new_price( 398 | price=price, 399 | adjustment=adjustment, 400 | percentage=False 401 | ) 402 | 403 | # Add the order. 404 | stop_limit_order = { 405 | "orderType": "STOP_LIMIT", 406 | "session": "NORMAL", 407 | "duration": "DAY", 408 | "price":limit_price, 409 | "stopPrice": stop_price, 410 | "orderStrategyType": "SINGLE", 411 | "orderLegCollection": [ 412 | { 413 | "instruction": self.order_instructions[self.enter_or_exit_opposite][self.side], 414 | "quantity": self.order_size, 415 | "instrument": { 416 | "symbol": self.symbol, 417 | "assetType": self.asset_type 418 | } 419 | } 420 | ] 421 | } 422 | 423 | self.stop_limit_order = stop_limit_order 424 | self.order['childOrderStrategies'].append(self.stop_limit_order) 425 | 426 | return True 427 | 428 | def _calculate_new_price(self, price: float, adjustment: float, percentage: bool) -> float: 429 | """Calculates an adjusted price given an old price. 430 | 431 | Arguments: 432 | ---- 433 | price {float} -- The original price. 434 | 435 | adjustment {float} -- The adjustment to be made to the new price. 436 | 437 | percentage {bool} -- Specifies whether the adjustment is a percentage adjustment `True` or 438 | an absolute dollar adjustment `False`. 439 | 440 | Returns: 441 | ---- 442 | {float} -- The new price after the adjustment has been made. 443 | """ 444 | 445 | if percentage: 446 | new_price = price * adjustment 447 | else: 448 | new_price = price + adjustment 449 | 450 | # For orders below $1.00, can only have 4 decimal places. 451 | if new_price < 1: 452 | new_price = round(new_price,4) 453 | 454 | # For orders above $1.00, can only have 2 decimal places. 455 | else: 456 | new_price = round(new_price, 2) 457 | 458 | return new_price 459 | 460 | def add_take_profit(self, profit_size: float, percentage: bool = False) -> bool: 461 | """Add's a Limit Order to exit a trade when a profit threshold is reached. 462 | 463 | Arguments: 464 | ---- 465 | profit_size {float} -- The size of the profit you want to make. For example, `0.10`. 466 | 467 | Keyword Arguments: 468 | ---- 469 | percentage {bool} -- Specifies whether the `profit_size` passed through is a 470 | `percentage` or an `absolute dollar amount`. If `True` will calculate the 471 | profit as a percentage of the current price. (default: {False}) 472 | 473 | Returns: 474 | ---- 475 | {bool} -- `True` if the order was added. 476 | """ 477 | 478 | # Check to see if we have a trigger order. 479 | if not self._triggered_added: 480 | self._convert_to_trigger() 481 | 482 | # We need to basis to calculate off of. Use the price. 483 | if self.order_type == 'mkt': 484 | # Have to make a call to Get Quotes. 485 | price = self.price 486 | elif self.order_type == 'lmt': 487 | price = self.price 488 | 489 | # Calculate the new price. 490 | if percentage: 491 | adjustment = 1.0 + profit_size 492 | new_price = self._calculate_new_price( 493 | price=price, 494 | adjustment=adjustment, 495 | percentage=True 496 | ) 497 | else: 498 | adjustment = profit_size 499 | new_price = self._calculate_new_price( 500 | price=price, 501 | adjustment=adjustment, 502 | percentage=False 503 | ) 504 | 505 | # Build the order. 506 | take_profit_order = { 507 | "orderType": "LIMIT", 508 | "session": "NORMAL", 509 | "price": new_price, 510 | "duration": "DAY", 511 | "orderStrategyType": "SINGLE", 512 | "orderLegCollection": [ 513 | { 514 | "instruction": self.order_instructions[self.enter_or_exit_opposite][self.side], 515 | "quantity": self.order_size, 516 | "instrument": { 517 | "symbol": self.symbol, 518 | "assetType": self.asset_type 519 | } 520 | } 521 | ] 522 | } 523 | 524 | # Add the order. 525 | self.take_profit_order = take_profit_order 526 | self.order['childOrderStrategies'].append(self.take_profit_order) 527 | 528 | return True 529 | 530 | def _convert_to_trigger(self): 531 | """Converts a regular order to a trigger order. 532 | 533 | Overview: 534 | ---- 535 | Trigger orders can be used to have a stop loss orders, or take profit orders 536 | placed right after the main order has been placed. This helps protect the order 537 | when possible and take profit when thresholds are reached. 538 | """ 539 | 540 | # Only convert to a trigger order, if it already isn't one. 541 | if self.order and self._triggered_added == False: 542 | self.order['orderStrategyType'] = 'TRIGGER' 543 | 544 | # Trigger orders will have child strategies, so initalize that list. 545 | self.order['childOrderStrategies'] = [] 546 | 547 | # Update the state. 548 | self._triggered_added = True 549 | 550 | def modify_session(self, session: str) -> None: 551 | """Changes which session the order is for. 552 | 553 | Description 554 | ---- 555 | Orders are able to be active during different trading sessions. 556 | If you would like the order to be active during a different session, 557 | then choose one of the following: 558 | 559 | 1. 'am' - This is for pre-market hours. 560 | 2. 'pm' - This is for post-market hours. 561 | 3. 'normal' - This is for normal market hours. 562 | 4. 'seamless' - This makes the order active all of the sessions. 563 | 564 | Arguments: 565 | ---- 566 | session {str} -- The session you want the order to be active. Possible values 567 | are ['am', 'pm', 'normal', 'seamless'] 568 | """ 569 | 570 | if session in ['am', 'pm', 'normal', 'seamless']: 571 | self.order['session'] = session.upper() 572 | else: 573 | raise ValueError('Invalid session, choose either am, pm, normal, or seamless') 574 | 575 | @property 576 | def order_response(self) -> dict: 577 | """Returns the order response from submitting an order. 578 | 579 | Returns: 580 | ---- 581 | {dict} -- The order response dictionary. 582 | """ 583 | 584 | return self._order_response 585 | 586 | @order_response.setter 587 | def order_response(self, order_response_dict: dict) -> None: 588 | """Sets the order response from submitting an order. 589 | 590 | Arguments: 591 | ---- 592 | order_response_dict {dict} -- The order response dictionary. 593 | """ 594 | 595 | self._order_response = order_response_dict 596 | 597 | def _generate_order_id(self) -> str: 598 | """Generates an ID that can be used to identify the order. 599 | 600 | Returns: 601 | ---- 602 | {str} -- The order ID that was generated. 603 | """ 604 | 605 | # If we have an order, then generate it. 606 | if self.order: 607 | 608 | order_id = "{symbol}_{side}_{enter_or_exit}_{timestamp}" 609 | 610 | order_id = order_id.format( 611 | symbol=self.symbol, 612 | side=self.side, 613 | enter_or_exit=self.enter_or_exit, 614 | timestamp=datetime.now().timestamp() 615 | ) 616 | 617 | return order_id 618 | 619 | else: 620 | return "" 621 | 622 | def add_leg(self, order_leg_id: int, symbol: str, quantity: int, asset_type: str, sub_asset_type: str = None) -> List[dict]: 623 | """Adds an instrument to a trade. 624 | 625 | Arguments: 626 | ---- 627 | order_leg_id {int} -- The position you want the new leg to be in the leg collection. 628 | 629 | symbol {str} -- The instrument ticker symbol. 630 | 631 | quantity {int} -- The quantity of shares to be purchased. 632 | 633 | asset_type {str} -- The instrument asset type. For example, `EQUITY`. 634 | 635 | Keyword Arguments: 636 | ---- 637 | sub_asset_type {str} -- The instrument sub-asset type, not always needed. For example, `ETF`. (default: {None}) 638 | 639 | Returns: 640 | ---- 641 | {dict} -- The order's order leg collection. 642 | """ 643 | 644 | # Define the leg. 645 | leg = {} 646 | leg['instrument']['symbol'] = symbol 647 | leg['instrument']['assetType'] = asset_type 648 | leg['quantity'] = quantity 649 | 650 | if sub_asset_type: 651 | leg['instrument']['subAssetType'] = sub_asset_type 652 | 653 | 654 | # If 0, call instrument. 655 | if order_leg_id == 0: 656 | self.instrument( 657 | symbol=symbol, 658 | asset_type=asset_type, 659 | quantity=quantity, 660 | sub_asset_type=sub_asset_type, 661 | order_leg_id=0 662 | ) 663 | else: 664 | # Insert it. 665 | order_leg_colleciton: list = self.order['orderLegCollection'] 666 | order_leg_colleciton.insert(order_leg_id, leg) 667 | 668 | return self.order['orderLegCollection'] 669 | 670 | @property 671 | def number_of_legs(self) -> int: 672 | """Returns the number of legs in the Order Leg Collection. 673 | 674 | Returns: 675 | ---- 676 | int: The count of legs in the collection. 677 | """ 678 | 679 | return len(self.order['orderLegCollection']) 680 | 681 | def modify_price(self, new_price: float, price_type: str) -> None: 682 | """Used to change the price that is specified. 683 | 684 | Arguments: 685 | ---- 686 | new_price (float): The new price to be set. 687 | 688 | price_type (str): The type of price that should be modified. Can 689 | be one of the following: [ 690 | 'price', 691 | 'stop-price', 692 | 'limit-price', 693 | 'stop-limit-stop-price', 694 | 'stop-limit-limit-price' 695 | ] 696 | """ 697 | 698 | if price_type == 'price': 699 | self.order['price'] = new_price 700 | elif price_type == 'stop-price' and self.is_stop_order: 701 | self.order['stopPrice'] = new_price 702 | self.stop_price = new_price 703 | elif price_type == 'limit-price' and self.is_limit_order: 704 | self.order['price'] = new_price 705 | self.price = new_price 706 | elif price_type == 'stop-limit-limit-price' and self.is_stop_limit_order: 707 | self.order['price'] = new_price 708 | self.stop_limit_price = new_price 709 | elif price_type == 'stop-limit-stop-price' and self.is_stop_limit_order: 710 | self.order['stopPrice'] = new_price 711 | self.stop_price = new_price 712 | 713 | @property 714 | def is_stop_order(self) -> bool: 715 | """Specifies whether the order is a Stop Loss Order. 716 | 717 | Returns: 718 | ---- 719 | bool: `True` if the order is a Stop order, `False` otherwise. 720 | """ 721 | 722 | if self.order_type != 'stop': 723 | return False 724 | else: 725 | return True 726 | 727 | @property 728 | def is_stop_limit_order(self) -> bool: 729 | """Specifies whether the order is a Stop Limit Order. 730 | 731 | Returns: 732 | ---- 733 | bool: `True` if the order is a Stop Limit order, `False` otherwise. 734 | """ 735 | 736 | if self.order_type != 'stop-lmt': 737 | return False 738 | else: 739 | return True 740 | 741 | @property 742 | def is_limit_order(self) -> bool: 743 | """Specifies whether the order is a Limit Order. 744 | 745 | Returns: 746 | ---- 747 | bool: `True` if the order is a Limit order, `False` otherwise. 748 | """ 749 | 750 | if self.order_type != 'lmt': 751 | return False 752 | else: 753 | return True --------------------------------------------------------------------------------