├── .gitignore ├── README.md ├── agent_baseline.py ├── agent_baseline_backtest.py ├── agent_dqn.py ├── agent_keras_rl.py ├── agent_qlearn.py ├── ctc_executioner ├── action.py ├── action_space.py ├── action_space_env.py ├── action_state.py ├── agent_utils │ ├── action_plot_callback.py │ ├── action_reward_log.py │ ├── live_plot_callback.py │ └── ui.py ├── feature_generator.py ├── feature_type.py ├── match_engine.py ├── order.py ├── order_side.py ├── order_type.py ├── orderbook.py ├── orderbook_bitfinex_btcusd_view.tsv ├── qlearn.py ├── test │ ├── test.py │ ├── test_action.py │ └── test_qlearn.py └── trade.py ├── data ├── events.js ├── events.ts ├── events │ ├── ob-test.tsv │ └── ob-train.tsv ├── example-ob-test.tsv ├── example-ob-train.tsv ├── trades │ ├── query_result_test_15m.tsv │ └── query_result_train_15m.tsv └── tsconfig.json ├── docs ├── Poster.odg ├── Poster.pdf ├── Poster2.odg ├── Poster2.pdf ├── final-presentation.pdf ├── final-presentation_notes.pdf ├── presentation-midterm.pdf └── report.pdf ├── gym_ctc_executioner ├── __init__.py └── envs │ ├── __init__.py │ └── execution_env.py ├── gym_ctc_marketmaker ├── __init__.py └── envs │ ├── __init__.py │ └── marketmaker_env.py ├── images ├── analysis-limit-impatient.png ├── analysis-limit-wide-spread-buy.png ├── analysis-limit-wide-spread-sell.png ├── ba-ob-min.png ├── behaviour-100s-buy.png ├── behaviour-100s-sell.png ├── behaviour-10s-buy.png ├── behaviour-10s-sell.png ├── behaviour-30s-buy.png ├── behaviour-30s-sell.png ├── behaviour-60s-buy.png ├── behaviour-60s-sell.png ├── behaviour-price.png ├── behaviour-up-100s-buy.png ├── behaviour-up-100s-sell.png ├── behaviour-up-10s-buy.png ├── behaviour-up-10s-sell.png ├── behaviour-up-30s-buy.png ├── behaviour-up-30s-sell.png ├── behaviour-up-60s-buy.png ├── behaviour-up-60s-sell.png ├── bidask-price-correlation.png ├── bidask-price-entropy.png ├── bidask-size-correlation.png ├── bidask-size-entropy.png ├── cnn_1_buy_mean_actions.png ├── cnn_1_buy_rewards.png ├── cnn_1_buy_trades_mean_actions.png ├── cnn_1_buy_trades_rewards.png ├── cnn_1_sell_mean_actions.png ├── cnn_1_sell_rewards.png ├── cnn_1_sell_trades_mean_actions.png ├── cnn_1_sell_trades_rewards.png ├── cnn_2_buy_mean_actions.png ├── cnn_2_buy_rewards.png ├── cnn_2_buy_trades_mean_actions.png ├── cnn_2_buy_trades_rewards.png ├── cnn_2_sell_mean_actions.png ├── cnn_2_sell_rewards.png ├── cnn_2_sell_trades_mean_actions.png ├── cnn_2_sell_trades_rewards.png ├── cnn_nn_1_buy_bidask_mean_actions.png ├── cnn_nn_1_buy_bidask_rewards.png ├── cnn_nn_1_buy_trades_mean_actions.png ├── cnn_nn_1_buy_trades_rewards.png ├── cnn_nn_1_sell_bidask_mean_actions.png ├── cnn_nn_1_sell_bidask_rewards.png ├── cnn_nn_1_sell_trades_mean_actions.png ├── cnn_nn_1_sell_trades_rewards.png ├── cnn_nn_2_buy_bidask_mean_actions.png ├── cnn_nn_2_buy_bidask_rewards.png ├── cnn_nn_2_buy_trades_mean_actions.png ├── cnn_nn_2_buy_trades_rewards.png ├── cnn_nn_2_sell_bidask_mean_actions.png ├── cnn_nn_2_sell_bidask_rewards.png ├── cnn_nn_2_sell_trades_mean_actions.png ├── cnn_nn_2_sell_trades_rewards.png ├── data-pipeline.png ├── data-trade-volume.png ├── data-volmap-cancelled.png ├── data-volmap-created.png ├── data-volmap-traded.png ├── discuss-rl-sv.png ├── dqn_hyperparameters.png ├── drl-pipeline.png ├── drl-qvalues.png ├── eval-limit-down.png ├── eval-limit-sine.png ├── evaluation-orderbook.png ├── features-bidask.png ├── kearns-frontier.png ├── kearns-return.png ├── kearns-std.png ├── lob-simple.png ├── ml-rl.png ├── ob-ba-max.png ├── ob-ba-min.png ├── ob-ba-pie-all.png ├── ob-ba-pie-cancelled.png ├── ob-ba-pie-created.png ├── ob-ba-pie-trades.png ├── ob-price-bars-rejected.png ├── ob-price-bars.png ├── ob-price.png ├── orderbook-gdax.png ├── q_1_10000_BUY_acc_rewards.png ├── q_1_10000_BUY_mean_actions.png ├── q_1_10000_BUY_mean_backtest.png ├── q_1_10000_BUY_mean_rewards.png ├── q_1_10000_BUY_rewards.png ├── q_1_10000_SELL_acc_rewards.png ├── q_1_10000_SELL_mean_actions.png ├── q_1_10000_SELL_mean_backtest.png ├── q_1_10000_SELL_mean_rewards.png ├── q_1_10000_SELL_rewards.png ├── q_2_10000_BUY_acc_rewards.png ├── q_2_10000_BUY_mean_actions.png ├── q_2_10000_BUY_mean_backtest.png ├── q_2_10000_BUY_mean_rewards.png ├── q_2_10000_BUY_rewards.png ├── q_2_10000_SELL_acc_rewards.png ├── q_2_10000_SELL_mean_actions.png ├── q_2_10000_SELL_mean_backtest.png ├── q_2_10000_SELL_mean_rewards.png ├── q_2_10000_SELL_rewards.png ├── rl-dqn-agent.png ├── rl-dqn.png ├── rl-env-overview.png ├── rl-environment.png ├── rl-overview.png ├── rl-pipeline.png ├── sample-down-price.png ├── sample-up-price.png ├── setup-actions.png ├── setup-cnn-output.png ├── setup-inventory.png ├── setup-limit-levels.png └── setup-time-horizon.png ├── notebooks ├── .DS_Store ├── analysis_average_price.ipynb ├── analysis_dqn_order_placement.ipynb ├── analysis_profit_vs_reward.ipynb ├── autoencoder.ipynb ├── img │ └── price-return.png ├── order_execution_behaviour.ipynb ├── order_execution_behaviour_artificial.ipynb ├── orderbook_features.ipynb └── understanding_events.ipynb ├── report ├── .DS_Store ├── README.txt ├── abstract.tex ├── chapter-1.tex ├── chapter-2.tex ├── chapter-3.tex ├── chapter-4.tex ├── chapter-5.tex ├── chapter-6.tex ├── chapter-7.tex ├── cover.jpg ├── cover.tex ├── cover │ ├── back.pdf │ ├── front.pdf │ ├── logo.pdf │ ├── logo_black.pdf │ └── logo_white.pdf ├── fonts │ ├── Apache_License.txt │ ├── Arial.afm │ ├── Arial.pfa │ ├── Arial.tfm │ ├── Arial.ttf │ ├── Arial_Bold.ttf │ ├── Arial_Bold_Italic.ttf │ ├── Arial_Italic.ttf │ ├── BookmanOldStyle-Bold.ttf │ ├── BookmanOldStyle-BoldItalic.ttf │ ├── BookmanOldStyle-Italic.ttf │ ├── BookmanOldStyle.ttf │ ├── CambriaMath.ttf │ ├── CourierNewPS-BoldItalicMT.ttf │ ├── CourierNewPS-BoldMT.ttf │ ├── CourierNewPS-ItalicMT.ttf │ ├── CourierNewPSMT.ttf │ ├── Georgia.ttf │ ├── Georgia_Bold.ttf │ ├── Georgia_Bold_Italic.ttf │ ├── Georgia_Italic.ttf │ ├── TUDelft-UltraLight.afm │ ├── TUDelft-UltraLight.pfa │ ├── TUDelft-UltraLight.tfm │ ├── TUDelft-UltraLight.ttf │ ├── Tahoma-Bold.ttf │ └── Tahoma.ttf ├── preface.tex ├── report.bib ├── report.tex ├── roboto.sty ├── tank.jpg ├── title.tex ├── tudelft-report.bst └── tudelft-report.cls ├── requirements.txt ├── setup.py └── strategy.py /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | env/ 12 | build/ 13 | develop-eggs/ 14 | dist/ 15 | downloads/ 16 | eggs/ 17 | .eggs/ 18 | lib/ 19 | lib64/ 20 | parts/ 21 | sdist/ 22 | var/ 23 | wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | 28 | # PyInstaller 29 | # Usually these files are written by a python script from a template 30 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 31 | *.manifest 32 | *.spec 33 | 34 | # Installer logs 35 | pip-log.txt 36 | pip-delete-this-directory.txt 37 | 38 | # Unit test / coverage reports 39 | htmlcov/ 40 | .tox/ 41 | .coverage 42 | .coverage.* 43 | .cache 44 | nosetests.xml 45 | coverage.xml 46 | *.cover 47 | .hypothesis/ 48 | 49 | # Translations 50 | *.mo 51 | *.pot 52 | 53 | # Django stuff: 54 | *.log 55 | local_settings.py 56 | 57 | # Flask stuff: 58 | instance/ 59 | .webassets-cache 60 | 61 | # Scrapy stuff: 62 | .scrapy 63 | 64 | # Sphinx documentation 65 | docs/_build/ 66 | 67 | # PyBuilder 68 | target/ 69 | 70 | # Jupyter Notebook 71 | .ipynb_checkpoints 72 | notebooks/.ipynb_checkpoints 73 | 74 | # pyenv 75 | .python-version 76 | 77 | # celery beat schedule file 78 | celerybeat-schedule 79 | 80 | # SageMath parsed files 81 | *.sage.py 82 | 83 | # dotenv 84 | .env 85 | 86 | # virtualenv 87 | .venv 88 | venv/ 89 | ENV/ 90 | 91 | # Spyder project settings 92 | .spyderproject 93 | .spyproject 94 | 95 | # Rope project settings 96 | .ropeproject 97 | 98 | # mkdocs documentation 99 | /site 100 | 101 | # mypy 102 | .mypy_cache/ 103 | 104 | # project related 105 | node_modules 106 | models 107 | monitor 108 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Order placement with Reinforcement Learning 2 | 3 | CTC-Executioner is a tool that provides an on-demand execution/placement strategy for limit orders on crypto currency markets using Reinforcement Learning techniques. The underlying framework provides functionalities which allow to analyse order book data and derive features thereof. Those findings can then be used in order to dynamically update the decision making process of the execution strategy. 4 | 5 | The methods being used are based on a research project (master thesis) currently proceeding at TU Delft. 6 | 7 | ## Documentation 8 | 9 | Comprehensive documentation and concepts explained in the [academic report](https://github.com/backender/ctc-executioner/blob/master/docs/report.pdf) 10 | 11 | For hands-on documentation and examples see [Wiki](https://github.com/backender/ctc-executioner/wiki) 12 | 13 | ## Usage 14 | 15 | Load orderbooks 16 | 17 | ```python 18 | orderbook = Orderbook() 19 | orderbook.loadFromEvents('data/example-ob-train.tsv') 20 | orderbook.summary() 21 | orderbook.plot(show_bidask=True) 22 | 23 | orderbook_test = Orderbook() 24 | orderbook_test.loadFromEvents('data/example-ob-test.tsv') 25 | orderbook_test.summary() 26 | ``` 27 | 28 | Create and configure environments 29 | 30 | ```python 31 | import gym_ctc_executioner 32 | env = gym.make("ctc-executioner-v0") 33 | env.setOrderbook(orderbook) 34 | 35 | env_test = gym.make("ctc-executioner-v0") 36 | env_test.setOrderbook(orderbook_test) 37 | ``` 38 | -------------------------------------------------------------------------------- /agent_baseline.py: -------------------------------------------------------------------------------- 1 | import gym 2 | from ctc_executioner.orderbook import Orderbook 3 | from baselines import deepq 4 | import gym_ctc_executioner 5 | import gym_ctc_marketmaker 6 | import numpy as np 7 | from ctc_executioner.agent_utils.action_plot_callback import ActionPlotCallback 8 | from ctc_executioner.agent_utils.live_plot_callback import LivePlotCallback 9 | from ctc_executioner.agent_utils.action_reward_log import ActionRewardLog 10 | from ctc_executioner.order_side import OrderSide 11 | from ctc_executioner.feature_type import FeatureType 12 | 13 | def main(): 14 | side = OrderSide.SELL 15 | dataset = "1" 16 | file_name_prefix = "cnn_"+str(dataset)+"_"+str(side) 17 | # Load orderbook 18 | orderbook = Orderbook() 19 | orderbook.loadFromEvents('data/events/ob-'+dataset+'-small-train.tsv') 20 | 21 | # import datetime 22 | # orderbook = Orderbook() 23 | # config = { 24 | # 'startPrice': 10000.0, 25 | # # 'endPrice': 9940.0, 26 | # 'priceFunction': lambda p0, s, samples: p0 + 10 * np.sin(2*np.pi*10 * (s/samples)), 27 | # 'levels': 50, 28 | # 'qtyPosition': 0.1, 29 | # 'startTime': datetime.datetime.now(), 30 | # 'duration': datetime.timedelta(seconds=1000), 31 | # 'interval': datetime.timedelta(seconds=1) 32 | # } 33 | # orderbook.createArtificial(config) 34 | # orderbook.summary() 35 | #orderbook.plot(show_bidask=True) 36 | 37 | env = gym.make("ctc-executioner-v0") 38 | #env = gym.make("ctc-marketmaker-v0") 39 | 40 | #liveplot = LivePlotCallback(nb_episodes=10000, avgwindow=10) 41 | #liveplot.plot() 42 | 43 | actionRewardLog = ActionRewardLog(file_name_prefix=file_name_prefix) 44 | 45 | env._configure( 46 | orderbook=orderbook, 47 | callbacks=[actionRewardLog],#liveplot, 48 | side=side, 49 | featureType=FeatureType.ORDERS 50 | ) 51 | print(env.observation_space.shape) 52 | model = deepq.models.cnn_to_mlp( convs=[(int(env.observation_space.shape[1]/2), int(env.observation_space.shape[1]/2), env.observation_space.shape[0])], hiddens=[200]) 53 | act = deepq.learn( 54 | env, 55 | q_func=model, 56 | lr=1e-3, 57 | max_timesteps=50000, 58 | buffer_size=50000, 59 | exploration_fraction=0.1, 60 | exploration_final_eps=0.05, 61 | #target_network_update_freq=1, 62 | print_freq=10, 63 | #callback=liveplot.baseline_callback 64 | ) 65 | print("Saving model as "+file_name_prefix+".pkl") 66 | act.save("models/"+file_name_prefix+".pkl") 67 | 68 | 69 | if __name__ == '__main__': 70 | main() 71 | -------------------------------------------------------------------------------- /agent_baseline_backtest.py: -------------------------------------------------------------------------------- 1 | import gym 2 | from ctc_executioner.orderbook import Orderbook 3 | from baselines import deepq 4 | import gym_ctc_executioner 5 | import gym_ctc_marketmaker 6 | import numpy as np 7 | from ctc_executioner.agent_utils.action_plot_callback import ActionPlotCallback 8 | from ctc_executioner.agent_utils.live_plot_callback import LivePlotCallback 9 | from ctc_executioner.agent_utils.action_reward_log import ActionRewardLog 10 | from ctc_executioner.order_side import OrderSide 11 | 12 | def main(): 13 | epochs = 1000 14 | side = OrderSide.SELL 15 | dataset = "2" 16 | file_name_prefix = "cnn_"+str(dataset)+"_"+str(side) 17 | # Load orderbook 18 | orderbook = Orderbook() 19 | orderbook.loadFromEvents('data/events/ob-'+dataset+'-small-test.tsv') 20 | 21 | env = gym.make("ctc-executioner-v0") 22 | #env = gym.make("ctc-marketmaker-v0") 23 | 24 | #liveplot = LivePlotCallback(nb_episodes=10000, avgwindow=10) 25 | #liveplot.plot() 26 | #actionRewardLog = ActionRewardLog(file_name_prefix=file_name_prefix+'_backtest') 27 | 28 | env._configure( 29 | orderbook=orderbook, 30 | callbacks=[],#[liveplot],#[actionRewardLog], 31 | side=side 32 | ) 33 | 34 | act = deepq.load("models/"+file_name_prefix+".pkl") 35 | rewards = [] 36 | episode = 0 37 | for _ in range(epochs): 38 | episode += 1 39 | obs, done = env.reset(), False 40 | episode_rew = 0 41 | while not done: 42 | env.render() 43 | obs, rew, done, _ = env.step(act(obs[None])[0]) 44 | episode_rew += rew 45 | print("Episode "+str(episode)+" reward", episode_rew) 46 | rewards.append(episode_rew) 47 | print(rewards) 48 | print("Mean reward: " + str(np.mean(rewards))) 49 | 50 | if __name__ == '__main__': 51 | main() 52 | -------------------------------------------------------------------------------- /agent_dqn.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import numpy as np 3 | from ctc_executioner.order_side import OrderSide 4 | from ctc_executioner.orderbook import Orderbook 5 | from ctc_executioner.agent_utils.ui import UI 6 | from keras.models import Sequential 7 | from keras.layers import Dense, Activation, Flatten 8 | from keras import optimizers 9 | import random 10 | from collections import deque 11 | import gym 12 | #logging.basicConfig(level=logging.DEBUG) 13 | 14 | class AgentDQN: 15 | def __init__(self, env): #, state_size, action_size): 16 | # self.state_size = state_size 17 | self.env = env 18 | self.actions = env.levels 19 | self.action_size = len(env.levels) 20 | self.memory = deque(maxlen=2000) 21 | self.gamma = 0.95 # discount rate 22 | self.epsilon = 1.0 # exploration rate 23 | self.epsilon_min = 0.01 24 | self.epsilon_decay = 0.995 25 | self.learning_rate = 0.001 26 | self.model = self._build_model() 27 | self.batch_size = 32 #len(self.env.T) * (len(self.env.I) - 1) 28 | 29 | def _build_model(self): 30 | # Neural Net for Deep-Q learning Model 31 | model = Sequential() 32 | model.add(Flatten(input_shape=self.env.observation_space.shape)) 33 | model.add(Dense(self.env.bookSize)) 34 | model.add(Dense(self.action_size)) 35 | model.compile(optimizers.SGD(lr=.1), "mae") 36 | model.summary() 37 | return model 38 | 39 | def remember(self, state, action, reward, next_state, done): 40 | self.memory.append((state, action, reward, next_state, done)) 41 | 42 | def act(self, state): 43 | if np.random.rand() <= self.epsilon: 44 | return random.choice(range(self.action_size)) 45 | return self.guess(state) 46 | 47 | def guess(self, state): 48 | act_values = self.model.predict(state) 49 | # print(act_values) 50 | action = np.argmax(act_values[0]) 51 | return action 52 | 53 | def replay(self): 54 | minibatch = random.sample(self.memory, self.batch_size) 55 | for state, action, reward, next_state, done in minibatch: 56 | target = reward 57 | #print("reward: " + str(reward)) 58 | if not done: 59 | #print("not done") 60 | #rewards_next = self.model.predict(next_state) 61 | #print("state_next: " + str(next_state)) 62 | #print('rewards_next ' + str(rewards_next)) 63 | #print('reward_next ' + str(np.amax(self.model.predict(next_state)[0]))) 64 | target = reward + self.gamma * \ 65 | np.amax(self.model.predict(next_state)[0]) 66 | 67 | target_f = self.model.predict(state) 68 | #action_index = self.actions[action] 69 | target_f[0][action] = target 70 | history = self.model.fit(state, target_f, epochs=1, verbose=0) 71 | print('loss: ' + str(history.history['loss'])) 72 | if self.epsilon > self.epsilon_min: 73 | self.epsilon *= self.epsilon_decay 74 | 75 | def train(self, episodes=1, force_execution=False): 76 | for episode in range(int(episodes)): 77 | for t in self.env.T: 78 | logging.info("\n"+"t=="+str(t)) 79 | for i in self.env.I[1:]: 80 | logging.info(" i=="+str(i)) 81 | #print("Action run " + str((t, i))) 82 | state = self.env._reset(t, i) 83 | action = self.act(state) 84 | state_next, reward, done, _ = self.env.step(action) 85 | self.remember(state, action, reward, state_next, done) 86 | while not done: 87 | #print("Action update") 88 | state = state_next 89 | action = self.act(state) 90 | state_next, reward, done, _ = self.env.step(action) 91 | self.remember(state, action, reward, state_next, done) 92 | 93 | # train the agent with the experience of the episode 94 | print("\nREPLAY\n") 95 | self.replay() 96 | 97 | 98 | def backtest(self, episodes=1, fixed_a=None): 99 | Ms = [] 100 | t = self.env.T[-1] 101 | i = self.env.I[-1] 102 | for episode in range(int(episodes)): 103 | actions = [] 104 | state = self.env._reset(t, i) 105 | action = self.guess(state) 106 | state_next, reward, done, _ = self.env.step(action) 107 | actions.append(action) 108 | midPrice = self.env.execution.getReferencePrice() 109 | while not done: 110 | action_next = self.guess(state_next) 111 | # print("Q action for next state " + str(state_next) + ": " + str(a_next)) 112 | i_next = self.env.actionState.getI() 113 | t_next = self.env.actionState.getT() 114 | print("t: " + str(t_next)) 115 | print("i: " + str(i_next)) 116 | print("Action: " + str(action_next)) 117 | actions.append(action_next) 118 | #print("Action transition " + str((t, i)) + " -> " + str(aiState_next) + " with " + str(runtime_next) + "s runtime.") 119 | state_next, reward, done, _ = self.env.step(action_next) 120 | #print(action) 121 | 122 | price = self.env.execution.getAvgPrice() 123 | if self.env.execution.getOrder().getSide() == OrderSide.BUY: 124 | profit = midPrice - price 125 | else: 126 | profit = price - midPrice 127 | Ms.append([state, midPrice, actions, price, profit]) 128 | return Ms 129 | 130 | def run(self, epochs_train=1, epochs_test=10): 131 | if epochs_train > 0: 132 | agent.train(episodes=epochs_train) 133 | M = agent.backtest(episodes=epochs_test) 134 | M = np.array(M) 135 | return np.mean(M[0:, 4]) 136 | 137 | def simulate(self, epochs_train=1, epochs_test=10, interval=100): 138 | UI.animate(lambda : self.run(epochs_train, epochs_test), interval=interval) 139 | 140 | 141 | # Load orderbook 142 | orderbook = Orderbook() 143 | orderbook.loadFromEvents('data/events/ob-1.tsv') 144 | orderbook_test = orderbook 145 | #orderbook.plot() 146 | 147 | import gym_ctc_executioner 148 | env = gym.make("ctc-executioner-v0") 149 | env.configure(orderbook) 150 | 151 | agent = AgentDQN(env=env) 152 | agent.simulate() 153 | #agent.train(10) 154 | -------------------------------------------------------------------------------- /agent_keras_rl.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import numpy as np 3 | 4 | from rl.agents.dqn import DQNAgent 5 | from rl.policy import EpsGreedyQPolicy 6 | from rl.memory import SequentialMemory 7 | 8 | from ctc_executioner.order_side import OrderSide 9 | from ctc_executioner.orderbook import Orderbook 10 | from ctc_executioner.agent_utils.action_plot_callback import ActionPlotCallback 11 | from ctc_executioner.agent_utils.live_plot_callback import LivePlotCallback 12 | 13 | from keras.models import Sequential 14 | from keras.layers import Dense, Activation, Flatten, LSTM, Reshape 15 | from keras.optimizers import Adam, SGD 16 | from keras import regularizers 17 | from keras import optimizers 18 | from collections import deque 19 | import gym 20 | 21 | #logging.basicConfig(level=logging.INFO) 22 | 23 | from rl.callbacks import Callback 24 | class EpsDecayCallback(Callback): 25 | def __init__(self, eps_poilcy, decay_rate=0.95): 26 | self.eps_poilcy = eps_poilcy 27 | self.decay_rate = decay_rate 28 | def on_episode_begin(self, episode, logs={}): 29 | self.eps_poilcy.eps *= self.decay_rate 30 | print('eps = %s' % self.eps_poilcy.eps) 31 | 32 | def createModel(): 33 | # Neural Net for Deep-Q learning Model 34 | model = Sequential() 35 | model.add(Reshape((env.observation_space.shape[0], env.observation_space.shape[1]*2), input_shape=(1, 1)+env.observation_space.shape)) 36 | #model.add(Flatten(input_shape=(env.observation_space.shape[0], env.observation_space.shape[1], env.observation_space.shape[2]))) 37 | model.add(LSTM(512, activation='tanh', recurrent_activation='tanh')) 38 | #model.add(Dense(4*env.bookSize*env.lookback)) 39 | #model.add(Dense(env.bookSize*env.lookback))#, kernel_regularizer=regularizers.l2(0.01), activity_regularizer=regularizers.l1(0.01))) 40 | #model.add(Dense(4*env.bookSize)) 41 | #model.add(Activation('relu')) 42 | model.add(Dense(len(env.levels))) 43 | model.add(Activation('linear')) 44 | #model.compile(optimizers.SGD(lr=.1), "mae") 45 | model.summary() 46 | return model 47 | 48 | def loadModel(name): 49 | # load json and create model 50 | from keras.models import model_from_json 51 | json_file = open(name + '.json', 'r') 52 | loaded_model_json = json_file.read() 53 | json_file.close() 54 | model = model_from_json(loaded_model_json) 55 | model.load_weights(name + '.h5') 56 | print('Loaded model "' + name + '" from disk') 57 | return model 58 | 59 | def saveModel(model, name): 60 | # serialize model to JSON 61 | model_json = model.to_json() 62 | with open(name + '.json', "w") as json_file: 63 | json_file.write(model_json) 64 | # serialize weights to HDF5 65 | model.save_weights(name + '.h5') 66 | print('Saved model "' + name + '" to disk') 67 | 68 | 69 | 70 | # # Load orderbook 71 | # orderbook = Orderbook() 72 | # orderbook.loadFromEvents('data/events/ob-train.tsv') 73 | # orderbook_test = orderbook 74 | # orderbook.summary() 75 | 76 | import datetime 77 | orderbook = Orderbook() 78 | config = { 79 | 'startPrice': 10000.0, 80 | # 'endPrice': 9940.0, 81 | 'priceFunction': lambda p0, s, samples: p0 + 10 * np.sin(2*np.pi*10 * (s/samples)), 82 | 'levels': 50, 83 | 'qtyPosition': 0.1, 84 | 'startTime': datetime.datetime.now(), 85 | 'duration': datetime.timedelta(seconds=1000), 86 | 'interval': datetime.timedelta(seconds=1) 87 | } 88 | orderbook.createArtificial(config) 89 | orderbook.summary() 90 | #orderbook.plot(show_bidask=True) 91 | 92 | 93 | import gym_ctc_executioner 94 | env = gym.make("ctc-executioner-v0") 95 | import gym_ctc_marketmaker 96 | #env = gym.make("ctc-marketmaker-v0") 97 | env.setOrderbook(orderbook) 98 | 99 | #model = loadModel(name='model-sell-artificial-2') 100 | model = loadModel(name='model-sell-artificial-sine') 101 | model = createModel() 102 | nrTrain = 100000 103 | nrTest = 10 104 | 105 | policy = EpsGreedyQPolicy() 106 | memory = SequentialMemory(limit=5000, window_length=1) 107 | # nb_steps_warmup: the default value for that in the DQN OpenAI baselines implementation is 1000 108 | dqn = DQNAgent(model=model, nb_actions=len(env.levels), memory=memory, nb_steps_warmup=100, target_model_update=1e-2, policy=policy) 109 | dqn.compile(Adam(lr=1e-3), metrics=['mae']) 110 | 111 | # cbs_train = [] 112 | # cbs_train = [LivePlotCallback(nb_episodes=20000, avgwindow=20)] 113 | # dqn.fit(env, nb_steps=nrTrain, visualize=True, verbose=2, callbacks=cbs_train) 114 | # saveModel(model=model, name='model-sell-artificial-sine') 115 | 116 | cbs_train = [] 117 | cbs_test = [] 118 | cbs_test = [ActionPlotCallback(nb_episodes=nrTest)] 119 | dqn.test(env, nb_episodes=nrTest, visualize=True, verbose=2, callbacks=cbs_test) 120 | -------------------------------------------------------------------------------- /agent_qlearn.py: -------------------------------------------------------------------------------- 1 | import pickle 2 | import logging 3 | import numpy as np 4 | from ctc_executioner.action_space_env import ActionSpace 5 | from ctc_executioner.action_state import ActionState 6 | from ctc_executioner.order_side import OrderSide 7 | from ctc_executioner.qlearn import QLearn 8 | from ctc_executioner.orderbook import Orderbook 9 | from ctc_executioner.agent_utils.ui import UI 10 | 11 | class AgentQlearn: 12 | def __init__(self, env): 13 | self.env = env 14 | self.levels = levels 15 | self.ai = QLearn(self.levels) 16 | self.orderbookIndex = None 17 | self.logRewards = [] 18 | self.logActions = [] 19 | 20 | def update(self, t, i, force_execution=False): 21 | aiState = ActionState(t, i) 22 | a = self.ai.chooseAction(aiState) 23 | self.logActions.append(a) 24 | # print('Random action: ' + str(level) + ' for state: ' + str(aiState)) 25 | action = self.env.createAction(level=a, state=aiState, force_execution=force_execution, orderbookIndex=self.orderbookIndex) 26 | action.run(self.env.orderbook) 27 | i_next = self.env.determineNextInventory(action) 28 | t_next = self.env.determineNextTime(t) 29 | reward = action.getReward() 30 | self.logRewards.append(reward) 31 | state_next = ActionState(action.getState().getT(), action.getState().getI(), action.getState().getMarket()) 32 | state_next.setT(t_next) 33 | state_next.setI(i_next) 34 | #print("Reward " + str(reward) + ": " + str(action.getState()) + " with " + str(action.getA()) + " -> " + str(state_next)) 35 | self.ai.learn( 36 | state1=action.getState(), 37 | action1=action.getA(), 38 | reward=reward, 39 | state2=state_next 40 | ) 41 | return (t_next, i_next) 42 | 43 | 44 | def train(self, episodes=1, force_execution=False): 45 | self.logRewards = [] 46 | self.logActions = [] 47 | for episode in range(int(episodes)): 48 | _, self.orderbookIndex = self.env.getRandomOrderbookState() 49 | for t in self.env.T: 50 | logging.info("\n"+"t=="+str(t)) 51 | for i in self.env.I: 52 | self.orderbookIndex = self.orderbookIndex + 1 53 | logging.info(" i=="+str(i)) 54 | logging.info("Action run " + str((t, i))) 55 | (t_next, i_next) = self.update(t, i, force_execution) 56 | while i_next != 0: 57 | if force_execution: 58 | raise Exception("Enforced execution left " + str(i_next) + " unexecuted.") 59 | logging.info("Action transition " + str((t, i)) + " -> " + str((t_next, i_next))) 60 | (t_next, i_next) = self.update(t_next, i_next, force_execution) 61 | 62 | def backtest(self, q=None, episodes=10, average=False, fixed_a=None): 63 | Ms = [] 64 | for _ in range(episodes): 65 | actions = [] 66 | t = self.env.T[-1] 67 | i = self.env.I[-1] 68 | state = ActionState(t, i, {}) 69 | #print(state) 70 | if fixed_a is not None: 71 | a = fixed_a 72 | else: 73 | a = self.ai.getQAction(state, 0) 74 | 75 | actions.append(a) 76 | action = self.env.createAction(level=a, state=state, force_execution=True) 77 | midPrice = action.getReferencePrice() 78 | 79 | #print("before...") 80 | #print(action) 81 | action.run(self.env.orderbook) 82 | #print("after...") 83 | #print(action) 84 | i_next = self.env.determineNextInventory(action) 85 | t_next = self.env.determineNextTime(t) 86 | # print("i_next: " + str(i_next)) 87 | while i_next != 0: 88 | state_next = ActionState(t_next, i_next, {}) 89 | if fixed_a is not None: 90 | a_next = fixed_a 91 | else: 92 | a_next = self.ai.getQAction(state_next, 0) 93 | 94 | actions.append(a_next) 95 | #print("Action transition " + str((t, i)) + " -> " + str(aiState_next) + " with " + str(runtime_next) + "s runtime.") 96 | 97 | runtime_next = self.env.determineRuntime(t_next) 98 | action.setState(state_next) 99 | action.update(a_next, runtime_next) 100 | action.run(self.env.orderbook) 101 | #print(action) 102 | i_next = self.env.determineNextInventory(action) 103 | t_next = self.env.determineNextTime(t_next) 104 | 105 | price = action.getAvgPrice() 106 | if action.getOrder().getSide() == OrderSide.BUY: 107 | profit = midPrice - price 108 | else: 109 | profit = price - midPrice 110 | Ms.append([state, midPrice, actions, price, profit]) 111 | if not average: 112 | return Ms 113 | return self.averageBacktest(Ms) 114 | 115 | def averageBacktest(self, M): 116 | # Average states within M 117 | N = [] 118 | observed = [] 119 | for x in M: 120 | state = x[0] 121 | if state in observed: 122 | continue 123 | observed.append(state) 124 | paid = [] 125 | reward = [] 126 | for y in M: 127 | if y[0] == state: 128 | paid.append(y[3]) 129 | reward.append(y[4]) 130 | N.append([state, x[1], x[2], np.average(paid), np.average(reward)]) 131 | return N 132 | 133 | def run(self, epochs_train=1, epochs_test=10): 134 | if epochs_train > 0: 135 | agent.train(episodes=epochs_train) 136 | rewards = agent.logRewards 137 | actions = agent.logActions 138 | #print(actions) 139 | return np.mean(rewards) 140 | 141 | if epochs_test > 0: 142 | M = agent.backtest(episodes=epochs_test, average=False) 143 | M = np.array(M) 144 | return np.mean(M[0:, 4]) 145 | 146 | def simulate(self, epochs_train=1, epochs_test=10, interval=100): 147 | UI.animate(lambda : self.run(epochs_train, epochs_test), interval=interval, title="Mean backtest reward") 148 | 149 | 150 | def _generate_Sequence(min, max, step): 151 | """ Generate sequence (that unlike xrange supports float) 152 | 153 | max: defines the sequence maximum 154 | step: defines the interval 155 | """ 156 | i = min 157 | I = [] 158 | while i <= max: 159 | I.append(i) 160 | i = i + step 161 | return I 162 | 163 | 164 | side = OrderSide.SELL 165 | dataset = "2" 166 | name = "experiments/q_"+dataset+"_10000_" + str(side) 167 | levels = _generate_Sequence(min=-50, max=50, step=1) 168 | ai = None 169 | T = _generate_Sequence(min=0, max=100, step=10) 170 | T_test = _generate_Sequence(min=0, max=100, step=10) 171 | I = _generate_Sequence(min=0, max=1, step=0.1) 172 | 173 | # Load orderbook 174 | cols = ["ts", "seq", "size", "price", "is_bid", "is_trade", "ttype"] 175 | import pandas as pd 176 | events = pd.read_table('data/events/ob-'+dataset+'-small-train.tsv', sep='\t', names=cols, index_col="seq") 177 | d = Orderbook.generateDictFromEvents(events) 178 | orderbook = Orderbook() 179 | orderbook.loadFromDict(d) 180 | # clean first n states (due to lack of bids and asks) 181 | print("#States: " + str(len(orderbook.states))) 182 | 183 | 184 | events_test = pd.read_table('data/events/ob-'+dataset+'-small-test.tsv', sep='\t', names=cols, index_col="seq") 185 | d_test = Orderbook.generateDictFromEvents(events_test) 186 | orderbook_test = Orderbook() 187 | orderbook_test.loadFromDict(d_test) 188 | 189 | for i in range(25): 190 | orderbook.states.pop(0) 191 | orderbook_test.states.pop(0) 192 | del d[list(d.keys())[0]] 193 | del d_test[list(d_test.keys())[0]] 194 | 195 | #orderbook.plot() 196 | #orderbook_test.plot() 197 | 198 | actionSpace = ActionSpace(orderbook, side, T, I, levels=levels) 199 | actionSpace_test = ActionSpace(orderbook_test, side, T_test, I, levels=levels) 200 | agent = AgentQlearn(actionSpace) 201 | 202 | # TRAIN 203 | # actions = [] 204 | # rewards = [] 205 | # print("Learn " + name) 206 | # for i in range(6000): 207 | # print("Epoch: " + str(i)) 208 | # try: 209 | # agent.train(episodes=1) 210 | # actions.append(agent.logActions) 211 | # rewards.append(agent.logRewards) 212 | # except: 213 | # print("Index error") 214 | # 215 | # np.save(name+'.npy', agent.ai.q) 216 | # 217 | # with open(name + '_actions', 'wb') as fp: 218 | # pickle.dump(actions, fp) 219 | # with open(name + '_rewards', 'wb') as fp: 220 | # pickle.dump(rewards, fp) 221 | 222 | 223 | #agent.simulate(epochs_train=1, epochs_test=0) 224 | 225 | # TEST 226 | agent_test = AgentQlearn(actionSpace_test) 227 | q = np.load(name+'.npy').item() 228 | agent_test.ai.q = q 229 | 230 | print("Test " + name) 231 | backtest = [] 232 | for i in range(1000): 233 | print("Test: " + str(i)) 234 | #try: 235 | M = agent.backtest(episodes=1, average=False, fixed_a=0) 236 | M = np.array(M) 237 | reward = np.mean(M[0:, 4]) 238 | #print(reward) 239 | backtest.append(reward) 240 | #except: 241 | # print("Index error") 242 | 243 | print(np.mean(backtest)) 244 | with open(name + '_backtest', 'wb') as fp: 245 | pickle.dump(backtest, fp) 246 | 247 | #agent_test.simulate(epochs_train=0, epochs_test=10) 248 | -------------------------------------------------------------------------------- /ctc_executioner/action.py: -------------------------------------------------------------------------------- 1 | import copy 2 | from ctc_executioner.order_side import OrderSide 3 | from ctc_executioner.order_type import OrderType 4 | from ctc_executioner.match_engine import MatchEngine 5 | import numpy as np 6 | 7 | class Action(object): 8 | 9 | def __init__(self, a, runtime): 10 | self.a = a 11 | self.runtime = runtime 12 | self.order = None 13 | self.trades = [] # filled order 14 | self.orderbookState = None 15 | self.orderbookIndex = None 16 | self.state = None 17 | self.referencePrice = None 18 | 19 | def __str__(self): 20 | s = '----------ACTION----------\n' 21 | s = s + 'Level: ' + str(self.a) + '\n' 22 | s = s + 'Runtime: ' + str(self.runtime) + '\n' 23 | s = s + 'State: ' + str(self.state) + '\n' 24 | s = s + 'Order: ' + str(self.order) + '\n' 25 | s = s + 'Reference Price: ' + str(self.referencePrice) + '\n' 26 | s = s + 'Book index: ' + str(self.orderbookIndex) + '\n' 27 | s = s + 'Book state: \n' + str(self.orderbookState) + '\n' 28 | s = s + '----------ACTION----------\n' 29 | return s 30 | 31 | def __repr__(self): 32 | return self.__str__() 33 | 34 | def getA(self): 35 | return self.a 36 | 37 | def setA(self, a): 38 | self.a = a 39 | 40 | def getRuntime(self): 41 | return self.runtime 42 | 43 | def setRuntime(self, runtime): 44 | self.runtime = runtime 45 | 46 | def getState(self): 47 | return self.state 48 | 49 | def setState(self, state): 50 | self.state = state 51 | 52 | def setOrderbookState(self, state): 53 | self.orderbookState = state 54 | 55 | def getOrderbookState(self): 56 | return self.orderbookState 57 | 58 | def setOrderbookIndex(self, index): 59 | self.orderbookIndex = index 60 | 61 | def getOrderbookIndex(self): 62 | return self.orderbookIndex 63 | 64 | def getReferencePrice(self): 65 | return self.referencePrice 66 | 67 | def setReferencePrice(self, referencePrice): 68 | self.referencePrice = referencePrice 69 | 70 | def getOrder(self): 71 | return self.order 72 | 73 | def setOrder(self, order): 74 | self.order = order 75 | 76 | def getTrades(self): 77 | return self.trades 78 | 79 | def setTrades(self, trades): 80 | self.trades = trades 81 | 82 | def getAvgPrice(self): 83 | return self.calculateAvgPrice(self.getTrades()) 84 | 85 | def calculateAvgPrice(self, trades): 86 | """Returns the average price paid for the executed order.""" 87 | if self.calculateQtyExecuted(trades) == 0: 88 | return 0.0 89 | 90 | price = 0.0 91 | for trade in trades: 92 | price = price + trade.getCty() * trade.getPrice() 93 | return price / self.calculateQtyExecuted(trades) 94 | 95 | def getQtyExecuted(self): 96 | return self.calculateQtyExecuted(self.getTrades()) 97 | 98 | def calculateQtyExecuted(self, trades): 99 | qty = 0.0 100 | for trade in trades: 101 | qty = qty + trade.getCty() 102 | return qty 103 | 104 | def getQtyNotExecuted(self): 105 | return self.getOrder().getCty() - self.getQtyExecuted() 106 | 107 | def isFilled(self): 108 | return self.getQtyExecuted() == self.order.getCty() 109 | 110 | def getTotalPaidReceived(self): 111 | return self.getAvgPrice() * self.getQtyExecuted() 112 | 113 | def getReward(self): 114 | return self.calculateReward(self.getTrades()) 115 | 116 | @DeprecationWarning 117 | def getValueAvg(self): 118 | return self.getReward() 119 | 120 | def calculateReward(self, trades): 121 | """Retuns difference of the average paid price to bid/ask-mid price. 122 | The higher, the better, 123 | For BUY: total paid at mid price - total paid 124 | For SELL: total received - total received at mid price 125 | """ 126 | # In case of no executed trade, the value is the negative reference 127 | if self.calculateQtyExecuted(trades) == 0.0: 128 | return 0.0 129 | 130 | if self.getOrder().getSide() == OrderSide.BUY: 131 | reward = self.getReferencePrice() - self.calculateAvgPrice(trades) 132 | else: 133 | reward = self.calculateAvgPrice(trades) - self.getReferencePrice() 134 | 135 | return reward 136 | 137 | def calculateRewardWeighted(self, trades, inventory): 138 | reward = self.calculateReward(trades) 139 | if reward == 0.0: 140 | return reward, 0.0 141 | 142 | volumeExecuted = self.calculateQtyExecuted(trades) 143 | volumeRatio = volumeExecuted / inventory 144 | rewardWeighted = reward * volumeRatio 145 | return rewardWeighted, volumeRatio 146 | 147 | def getPcFilled(self): 148 | return 100 * (self.getQtyExecuted() / self.getOrder().getCty()) 149 | 150 | def update(self, a, runtime): 151 | """Updates an action to be ready for the next run.""" 152 | if runtime <= 0.0: 153 | price = None 154 | self.getOrder().setType(OrderType.MARKET) 155 | else: 156 | price = self.getOrderbookState().getPriceAtLevel(self.getOrder().getSide(), a) 157 | 158 | self.getOrder().setPrice(price) 159 | self.getOrder().setCty(self.getQtyNotExecuted()) 160 | self.setRuntime(runtime) 161 | return self 162 | 163 | def getMatchEngine(self, orderbook): 164 | return MatchEngine(orderbook, self.getOrderbookIndex()) 165 | 166 | def run(self, orderbook): 167 | """Runs action using match engine. 168 | The orderbook is provided and being used in the match engine along with 169 | the prviously determined index where the action should start matching. 170 | The matching process returns the trades and the remaining quantity 171 | along with the index the matching stopped. 172 | The action gets updated with those values accordingly such that it can 173 | be evaluated or run over again (e.g. with a new runtime). 174 | """ 175 | matchEngine = self.getMatchEngine(orderbook) 176 | counterTrades, qtyRemain, index = matchEngine.matchOrder(self.getOrder(), self.getRuntime()) 177 | self.setTrades(self.getTrades() + counterTrades) # appends trades! 178 | #self.setTrades(counterTrades) # only current trades! 179 | self.setOrderbookIndex(index=index) 180 | self.setOrderbookState(orderbook.getState(index)) 181 | return self, counterTrades 182 | -------------------------------------------------------------------------------- /ctc_executioner/action_space.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import numpy as np 3 | from ctc_executioner.action import Action 4 | from ctc_executioner.order import Order 5 | from ctc_executioner.order_type import OrderType 6 | from ctc_executioner.order_side import OrderSide 7 | from ctc_executioner.qlearn import QLearn 8 | from ctc_executioner.action_state import ActionState 9 | 10 | 11 | class ActionSpace(object): 12 | """DEPRECATED: use ctc-executioner-v0 instead. 13 | 14 | This class still contains some logic which was moved to the agent. 15 | """ 16 | 17 | def __init__(self, orderbook, side, T, I, ai=None, levels=None): 18 | self.orderbook = orderbook 19 | self.side = side 20 | self.levels = levels 21 | if not ai: 22 | ai = QLearn(self.levels) # levels are our qlearn actions 23 | self.ai = ai 24 | self.T = T 25 | self.I = I 26 | 27 | def getRandomOrderbookState(self): 28 | return self.orderbook.getRandomState(max(self.T)) 29 | 30 | def createAction(self, level, state, orderbookIndex=None, force_execution=False): 31 | # Determines whether to run and force execution of given t, or if 32 | # segmentation of t into multiple runtimes is allowed. 33 | if force_execution: 34 | runtime = state.getT() 35 | ot = OrderType.LIMIT_T_MARKET 36 | else: 37 | runtime = self.determineRuntime(state.getT()) 38 | ot = OrderType.LIMIT 39 | 40 | if orderbookIndex is None: 41 | orderbookState, orderbookIndex = self.getRandomOrderbookState() 42 | else: 43 | orderbookState = self.orderbook.getState(orderbookIndex) 44 | 45 | if runtime <= 0.0 or level is None: 46 | price = None 47 | ot = OrderType.MARKET 48 | else: 49 | price = orderbookState.getPriceAtLevel(self.side, level) 50 | 51 | order = Order( 52 | orderType=ot, 53 | orderSide=self.side, 54 | cty=state.getI(), 55 | price=price 56 | ) 57 | action = Action(a=level, runtime=runtime) 58 | action.setState(state) 59 | action.setOrder(order) 60 | action.setOrderbookState(orderbookState) 61 | action.setOrderbookIndex(orderbookIndex) 62 | action.setReferencePrice(orderbookState.getBestAsk()) 63 | return action 64 | 65 | def updateAction(self, action, level, state, orderbookIndex=None, force_execution=False): 66 | if force_execution: 67 | runtime = state.getT() 68 | ot = OrderType.LIMIT_T_MARKET 69 | else: 70 | runtime = self.determineRuntime(state.getT()) 71 | ot = OrderType.LIMIT 72 | 73 | if orderbookIndex is not None: 74 | orderbookState = self.orderbook.getState(orderbookIndex) 75 | action.setOrderbookState(orderbookState) 76 | action.setOrderbookIndex(orderbookIndex) 77 | 78 | if runtime <= 0.0 or level is None: 79 | price = None 80 | ot = OrderType.MARKET 81 | else: 82 | price = action.getOrderbookState().getPriceAtLevel(self.side, level) 83 | 84 | order = Order( 85 | orderType=ot, 86 | orderSide=self.side, 87 | cty=state.getI(), 88 | price=price 89 | ) 90 | action.setState(state) 91 | action.setOrder(order) 92 | return action 93 | 94 | def createActions(self, runtime, qty, force_execution=False): 95 | actions = [] 96 | for level in self.levels: 97 | actions.append(self.createAction(level, runtime, qty, force_execution)) 98 | return actions 99 | 100 | def determineBestAction(self, actions): 101 | bestAction = None 102 | for action in actions: 103 | if not bestAction: 104 | bestAction = action 105 | continue 106 | if action.getReward() < bestAction.getReward(): 107 | bestAction = action 108 | return bestAction 109 | 110 | def determineRuntime(self, t): 111 | if t != 0: 112 | T_index = self.T.index(t) 113 | runtime = self.T[T_index] - self.T[T_index - 1] 114 | else: 115 | runtime = t 116 | return runtime 117 | 118 | def determineNextTime(self, t): 119 | if t > 0: 120 | t_next = self.T[self.T.index(t) - 1] 121 | else: 122 | t_next = t 123 | 124 | logging.info('Next timestep for action: ' + str(t_next)) 125 | return t_next 126 | 127 | def determineNextInventory(self, action): 128 | qty_remaining = action.getQtyNotExecuted() 129 | 130 | # TODO: Working with floats requires such an ugly threshold 131 | if qty_remaining > 0.0000001: 132 | # Approximate next closest inventory given remaining and I 133 | i_next = min([0.0] + self.I, key=lambda x: abs(x - qty_remaining)) 134 | logging.info('Qty remain: ' + str(qty_remaining) 135 | + ' -> inventory: ' + str(qty_remaining) 136 | + ' -> next i: ' + str(i_next)) 137 | else: 138 | i_next = 0.0 139 | 140 | logging.info('Next inventory for action: ' + str(i_next)) 141 | return i_next 142 | 143 | def update(self, t, i, force_execution=False): 144 | aiState = ActionState(t, i) 145 | a = self.ai.chooseAction(aiState) 146 | # print('Random action: ' + str(level) + ' for state: ' + str(aiState)) 147 | action = self.createAction(level=a, state=aiState, force_execution=force_execution) 148 | action, counterTrades = action.run(self.orderbook) 149 | i_next = self.determineNextInventory(action) 150 | t_next = self.determineNextTime(t) 151 | reward = action.getReward() 152 | # reward = action.getValueExecuted() 153 | # reward = action.getTestReward() 154 | state_next = ActionState(action.getState().getT(), action.getState().getI(), action.getState().getMarket()) 155 | state_next.setT(t_next) 156 | state_next.setI(i_next) 157 | #print("Reward " + str(reward) + ": " + str(action.getState()) + " with " + str(action.getA()) + " -> " + str(state_next)) 158 | self.ai.learn( 159 | state1=action.getState(), 160 | action1=action.getA(), 161 | reward=reward, 162 | state2=state_next 163 | ) 164 | return (t_next, i_next) 165 | 166 | 167 | def train(self, episodes=1, force_execution=False): 168 | for episode in range(int(episodes)): 169 | for t in self.T: 170 | logging.info("\n"+"t=="+str(t)) 171 | for i in self.I: 172 | logging.info(" i=="+str(i)) 173 | logging.info("Action run " + str((t, i))) 174 | (t_next, i_next) = self.update(t, i, force_execution) 175 | while i_next != 0: 176 | if force_execution: 177 | raise Exception("Enforced execution left " + str(i_next) + " unexecuted.") 178 | logging.info("Action transition " + str((t, i)) + " -> " + str((t_next, i_next))) 179 | (t_next, i_next) = self.update(t_next, i_next, force_execution) 180 | 181 | 182 | def backtest(self, q=None, episodes=10, average=False, fixed_a=None): 183 | if q is None: 184 | q = self.ai.q 185 | else: 186 | self.ai.q = q 187 | 188 | if not q: 189 | raise Exception('Q-Table is empty, please train first.') 190 | 191 | Ms = [] 192 | #T = self.T[1:len(self.T)] 193 | for t in [self.T[-1]]: 194 | logging.info("\n"+"t=="+str(t)) 195 | for i in [self.I[-1]]: 196 | logging.info(" i=="+str(i)) 197 | actions = [] 198 | state = ActionState(t, i, {}) 199 | #print(state) 200 | if fixed_a is not None: 201 | a = fixed_a 202 | else: 203 | try: 204 | a = self.ai.getQAction(state, 0) 205 | # print("Q action for state " + str(state) + ": " + str(a)) 206 | except: 207 | # State might not be in Q-Table yet, more training requried. 208 | logging.info("State " + str(state) + " not in Q-Table.") 209 | break 210 | actions.append(a) 211 | action = self.createAction(level=a, state=state, force_execution=False) 212 | midPrice = action.getReferencePrice() 213 | 214 | #print("before...") 215 | #print(action) 216 | action.run(self.orderbook) 217 | #print("after...") 218 | #print(action) 219 | i_next = self.determineNextInventory(action) 220 | t_next = self.determineNextTime(t) 221 | # print("i_next: " + str(i_next)) 222 | while i_next != 0: 223 | state_next = ActionState(t_next, i_next, {}) 224 | if fixed_a is not None: 225 | a_next = fixed_a 226 | else: 227 | try: 228 | a_next = self.ai.getQAction(state_next, 0) 229 | # print("t: " + str(t_next)) 230 | # print("i: " + str(i_next)) 231 | # print("Action: " + str(a_next)) 232 | # print("Q action for next state " + str(state_next) + ": " + str(a_next)) 233 | except: 234 | # State might not be in Q-Table yet, more training requried. 235 | # print("State " + str(state_next) + " not in Q-Table.") 236 | break 237 | actions.append(a_next) 238 | #print("Action transition " + str((t, i)) + " -> " + str(aiState_next) + " with " + str(runtime_next) + "s runtime.") 239 | 240 | runtime_next = self.determineRuntime(t_next) 241 | action.setState(state_next) 242 | action.update(a_next, runtime_next) 243 | action.run(self.orderbook) 244 | #print(action) 245 | i_next = self.determineNextInventory(action) 246 | t_next = self.determineNextTime(t_next) 247 | 248 | price = action.getAvgPrice() 249 | # TODO: last column is for for the BUY scenario only 250 | if action.getOrder().getSide() == OrderSide.BUY: 251 | profit = midPrice - price 252 | else: 253 | profit = price - midPrice 254 | Ms.append([state, midPrice, actions, price, profit]) 255 | if not average: 256 | return Ms 257 | return self.averageBacktest(Ms) 258 | 259 | def averageBacktest(self, M): 260 | # Average states within M 261 | N = [] 262 | observed = [] 263 | for x in M: 264 | state = x[0] 265 | if state in observed: 266 | continue 267 | observed.append(state) 268 | paid = [] 269 | reward = [] 270 | for y in M: 271 | if y[0] == state: 272 | paid.append(y[3]) 273 | reward.append(y[4]) 274 | N.append([state, x[1], x[2], np.average(paid), np.average(reward)]) 275 | return N 276 | -------------------------------------------------------------------------------- /ctc_executioner/action_space_env.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from ctc_executioner.action import Action 3 | from ctc_executioner.order import Order 4 | from ctc_executioner.order_type import OrderType 5 | 6 | class ActionSpace(object): 7 | 8 | def __init__(self, orderbook, side, T, I, ai=None, levels=None): 9 | self.orderbook = orderbook 10 | self.side = side 11 | self.levels = levels 12 | self.T = T 13 | self.I = I 14 | 15 | def getRandomOrderbookState(self): 16 | return self.orderbook.getRandomState(max(self.T)) 17 | 18 | def createAction(self, level, state, orderbookIndex=None, force_execution=False): 19 | # Determines whether to run and force execution of given t, or if 20 | # segmentation of t into multiple runtimes is allowed. 21 | if force_execution: 22 | runtime = state.getT() 23 | ot = OrderType.LIMIT_T_MARKET 24 | else: 25 | runtime = self.determineRuntime(state.getT()) 26 | ot = OrderType.LIMIT 27 | 28 | if orderbookIndex is None: 29 | orderbookState, orderbookIndex = self.getRandomOrderbookState() 30 | else: 31 | orderbookState = self.orderbook.getState(orderbookIndex) 32 | 33 | if runtime <= 0.0 or level is None: 34 | price = None 35 | ot = OrderType.MARKET 36 | else: 37 | price = orderbookState.getPriceAtLevel(self.side, level) 38 | 39 | order = Order( 40 | orderType=ot, 41 | orderSide=self.side, 42 | cty=state.getI(), 43 | price=price 44 | ) 45 | action = Action(a=level, runtime=runtime) 46 | action.setState(state) 47 | action.setOrder(order) 48 | action.setOrderbookState(orderbookState) 49 | action.setOrderbookIndex(orderbookIndex) 50 | action.setReferencePrice(orderbookState.getBestAsk()) 51 | return action 52 | 53 | def updateAction(self, action, level, state, orderbookIndex=None, force_execution=False): 54 | if force_execution: 55 | runtime = state.getT() 56 | ot = OrderType.LIMIT_T_MARKET 57 | else: 58 | runtime = self.determineRuntime(state.getT()) 59 | ot = OrderType.LIMIT 60 | 61 | if orderbookIndex is not None: 62 | orderbookState = self.orderbook.getState(orderbookIndex) 63 | action.setOrderbookState(orderbookState) 64 | action.setOrderbookIndex(orderbookIndex) 65 | 66 | if runtime <= 0 or level is None: 67 | price = None 68 | ot = OrderType.MARKET 69 | else: 70 | price = action.getOrderbookState().getPriceAtLevel(self.side, level) 71 | 72 | order = Order( 73 | orderType=ot, 74 | orderSide=self.side, 75 | cty=state.getI(), 76 | price=price 77 | ) 78 | action.setState(state) 79 | action.setOrder(order) 80 | return action 81 | 82 | def createActions(self, runtime, qty, force_execution=False): 83 | actions = [] 84 | for level in self.levels: 85 | actions.append(self.createAction(level, runtime, qty, force_execution)) 86 | return actions 87 | 88 | def determineBestAction(self, actions): 89 | bestAction = None 90 | for action in actions: 91 | if not bestAction: 92 | bestAction = action 93 | continue 94 | if action.getValueAvg() < bestAction.getValueAvg(): 95 | bestAction = action 96 | return bestAction 97 | 98 | def determineRuntime(self, t): 99 | if t != 0: 100 | T_index = self.T.index(t) 101 | runtime = self.T[T_index] - self.T[T_index - 1] 102 | else: 103 | runtime = t 104 | return runtime 105 | 106 | def determineNextTime(self, t): 107 | if t > 0: 108 | t_next = self.T[self.T.index(t) - 1] 109 | else: 110 | t_next = t 111 | 112 | logging.info('Next timestep for action: ' + str(t_next)) 113 | return t_next 114 | 115 | def determineNextInventory(self, action): 116 | qty_remaining = action.getQtyNotExecuted() 117 | 118 | # TODO: Working with floats requires such an ugly threshold 119 | if qty_remaining > 0.0000001: 120 | # Approximate next closest inventory given remaining and I 121 | i_next = min([0.0] + self.I, key=lambda x: abs(x - qty_remaining)) 122 | logging.info('Qty remain: ' + str(qty_remaining) 123 | + ' -> inventory: ' + str(qty_remaining) 124 | + ' -> next i: ' + str(i_next)) 125 | else: 126 | i_next = 0.0 127 | 128 | logging.info('Next inventory for action: ' + str(i_next)) 129 | return i_next 130 | -------------------------------------------------------------------------------- /ctc_executioner/action_state.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from ctc_executioner.feature_type import FeatureType 3 | 4 | class ActionState(object): 5 | 6 | def __init__(self, t, i, market={}): 7 | self.t = t 8 | self.i = i 9 | self.market = market 10 | 11 | def __hash__(self): 12 | return hash((self.t, self.i, frozenset(self.market.items()))) 13 | 14 | def __eq__(self, other): 15 | return (self.t, self.i, frozenset(self.market.items())) == (other.t, other.i, frozenset(self.market.items())) 16 | 17 | def __ne__(self, other): 18 | # Not strictly necessary, but to avoid having both x==y and x!=y 19 | # True at the same time 20 | return not(self == other) 21 | 22 | def __str__(self): 23 | return str((self.t, self.i, str(self.market))) 24 | 25 | def __repr__(self): 26 | return self.__str__() 27 | 28 | def toArray(self): 29 | if FeatureType.ORDERS.value in self.market: 30 | # arr = [np.array([self.getT()]), np.array([self.getI()])] 31 | # for k, v in self.getMarket().items(): 32 | # arr.append(v) 33 | # return np.array([arr]) 34 | features = self.market[FeatureType.ORDERS.value] 35 | arr = np.zeros(shape=(1,features.shape[1],2), dtype=float) 36 | arr[0,0] = np.array([self.t, self.i]) 37 | features = np.vstack((arr, features)) 38 | #return features.reshape((1, features.shape[0], features.shape[1], features.shape[2])) # required for custom DQN 39 | return features.reshape((features.shape[0], features.shape[1], features.shape[2])) # required for baseline DQN 40 | #return features # (2*lookback, levels, count(features)) 41 | elif FeatureType.TRADES.value in self.market: 42 | features = self.market[FeatureType.TRADES.value] 43 | features = np.vstack((np.array([self.t, self.i, 0]), features)) 44 | return features 45 | else: 46 | Exception("Feature not known to ActionState.") 47 | 48 | def getT(self): 49 | return self.t 50 | 51 | def setT(self, t): 52 | self.t = t 53 | 54 | def getI(self): 55 | return self.i 56 | 57 | def setI(self, i): 58 | self.i = i 59 | 60 | def getMarket(self): 61 | return self.market 62 | -------------------------------------------------------------------------------- /ctc_executioner/agent_utils/action_plot_callback.py: -------------------------------------------------------------------------------- 1 | import matplotlib.pyplot as plt 2 | import numpy as np 3 | from rl.callbacks import Callback 4 | from ctc_executioner.order_side import OrderSide 5 | 6 | class ActionPlotCallback(Callback): 7 | def __init__(self, nb_episodes=10): 8 | self.nb_episodes = nb_episodes 9 | self.episodes = {} 10 | self.episode = {} 11 | self.step = None 12 | self.plt = None 13 | 14 | def on_episode_begin(self, episode, logs): 15 | self.episode = {'episode': episode, 'steps': {}} 16 | 17 | def on_episode_end(self, episode, logs): 18 | if episode == 0: 19 | self.plt = self.env.orderbook.plot(show_bidask=True, max_level=0, show=False) 20 | self.plot(self.episode) 21 | if episode == (self.nb_episodes - 1): 22 | self.plt.show() 23 | self.episodes[episode] = self.episode 24 | 25 | def on_step_begin(self, step, logs): 26 | self.step = {} 27 | 28 | def on_step_end(self, step, logs): 29 | self.step['reward'] = logs['reward'] 30 | self.episode['steps'][step] = self.step 31 | 32 | def on_action_begin(self, action, logs): 33 | self.step['action'] = action 34 | self.step['index'] = self.env.orderbookIndex 35 | self.step['t'] = self.env.actionState.getT() 36 | self.step['i'] = self.env.actionState.getI() 37 | 38 | def plot(self, episode): 39 | indices, times, actions, prices, order_prices, runtimes, inventories, rewards = [], [], [], [], [], [], [], [] 40 | for key, value in episode['steps'].items(): 41 | index = value['index'] 42 | indices.append(index) 43 | runtimes.append(value['t']) 44 | inventories.append(value['i']) 45 | rewards.append(value['reward']) 46 | actions.append(value['action']) 47 | state = self.env.orderbook.getState(index) 48 | prices.append(state.getBidAskMid()) 49 | times.append(state.getTimestamp()) 50 | action_delta = 0.1*self.env.levels[value['action']] 51 | if self.env.side == OrderSide.BUY: 52 | order_prices.append(state.getBidAskMid() + action_delta) 53 | else: 54 | order_prices.append(state.getBidAskMid() - action_delta) 55 | 56 | # order placement 57 | #plt.scatter(times, prices, s=60) 58 | self.plt.scatter(times, order_prices, s=20) 59 | 60 | for i, time in enumerate(times): 61 | if i == 0 or i == len(times)-1: 62 | style = 'k-' 63 | else: 64 | style = 'k--' 65 | # line at order placement 66 | self.plt.plot([time, time], [prices[i]-0.005*prices[i], prices[i]+0.005*prices[i]], style, lw=1) 67 | 68 | for i, action in enumerate(actions): 69 | # action, resulted reward 70 | txt = 'a='+str(self.env.levels[action]) + '\nr=' + str(round(rewards[i], 2)) 71 | self.plt.annotate(txt, (times[i],prices[i])) 72 | # runtime, inventory 73 | txt = 't=' + str(runtimes[i]) + '\ni='+ str(round(inventories[i], 2)) 74 | self.plt.annotate(txt, (times[i], prices[i]-0.005*prices[i])) 75 | -------------------------------------------------------------------------------- /ctc_executioner/agent_utils/action_reward_log.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib.pyplot as plt 3 | from rl.callbacks import Callback 4 | 5 | class ActionRewardLog(Callback): 6 | def __init__(self, file_name_prefix, nb_episodes=10000, avgwindow=20): 7 | self.rewards = np.zeros(nb_episodes) - 1000.0 8 | self.X = np.arange(1, nb_episodes+1) 9 | self.avgrewards = np.zeros(nb_episodes) - 1000.0 10 | self.avgwindow = avgwindow 11 | self.rewardbuf = [] 12 | self.episode = 0 13 | self.nb_episodes = nb_episodes 14 | self.file_actions = file_name_prefix + "_actions.py" 15 | self.file_rewards = file_name_prefix + "_rewards.py" 16 | self.file_rewards_mean = file_name_prefix + "_rewards_mean.py" 17 | 18 | def on_episode_end(self, episode, logs): 19 | if self.episode >= self.nb_episodes: 20 | return 21 | rw = logs['episode_reward'] 22 | actions = logs['episode_actions'] 23 | self.rewardbuf.append(rw) 24 | if len(self.rewardbuf) > self.avgwindow: 25 | del self.rewardbuf[0] 26 | self.rewards[self.episode] = rw 27 | rw_avg = np.mean(self.rewardbuf) 28 | self.avgrewards[self.episode] = rw_avg 29 | self.episode += 1 30 | self.write(self.file_actions, actions) 31 | self.write(self.file_rewards, rw) 32 | self.write(self.file_rewards_mean, rw_avg) 33 | 34 | def write(self, file, value): 35 | f = open(file, "a") 36 | f.write(str(value) + ',\n') 37 | -------------------------------------------------------------------------------- /ctc_executioner/agent_utils/live_plot_callback.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib.pyplot as plt 3 | from rl.callbacks import Callback 4 | 5 | class LivePlotCallback(Callback): 6 | def __init__(self, nb_episodes=4000, avgwindow=20): 7 | self.rewards = np.zeros(nb_episodes) - 1000.0 8 | self.X = np.arange(1, nb_episodes+1) 9 | self.avgrewards = np.zeros(nb_episodes) - 1000.0 10 | self.avgwindow = avgwindow 11 | self.rewardbuf = [] 12 | self.episode = 0 13 | self.nb_episodes = nb_episodes 14 | self.filename = "liveplot" 15 | plt.ion() 16 | self.fig = plt.figure() 17 | self.grphinst = plt.plot(self.X, self.rewards, color='b')[0] 18 | self.grphavg = plt.plot(self.X, self.avgrewards, color='r')[0] 19 | plt.ylim([-450.0, 350.0]) 20 | plt.xlabel('Episodes') 21 | plt.legend([self.grphinst, self.grphavg], ['Episode rewards', '20-episode-average-rewards']) 22 | plt.grid(b=True, which='major', color='k', linestyle='-') 23 | plt.minorticks_on() 24 | plt.grid(b=True, which='minor', color='k', linestyle='--') 25 | 26 | def __del__(self): 27 | self.fig.savefig('monitor/plot.png') 28 | 29 | def on_episode_end(self, episode, logs): 30 | if self.episode >= self.nb_episodes: 31 | return 32 | rw = logs['episode_reward'] 33 | self.rewardbuf.append(rw) 34 | if len(self.rewardbuf) > self.avgwindow: 35 | del self.rewardbuf[0] 36 | self.rewards[self.episode] = rw 37 | self.avgrewards[self.episode] = np.mean(self.rewardbuf) 38 | self.plot() 39 | self.episode += 1 40 | 41 | def plot(self): 42 | self.grphinst.set_ydata(self.rewards) 43 | self.grphavg.set_ydata(self.avgrewards) 44 | plt.draw() 45 | #if self.episode == 0: 46 | plt.pause(0.01) 47 | -------------------------------------------------------------------------------- /ctc_executioner/agent_utils/ui.py: -------------------------------------------------------------------------------- 1 | import matplotlib.pyplot as plt 2 | import matplotlib.animation as animation 3 | 4 | class UI: 5 | 6 | @staticmethod 7 | def animate(f, interval=5000, axis=[0, 100, -50, 50], frames=None, title=""): 8 | fig = plt.figure()#(figsize=(24, 18)) 9 | ax1 = fig.add_subplot(1, 1, 1) 10 | ax1.tick_params(axis='both', which='major', labelsize=25) 11 | 12 | ax1.tick_params(axis='both', which='minor', labelsize=25) 13 | ax1.axis(axis) 14 | ax1.autoscale(True) 15 | xs = [] 16 | ys = [] 17 | 18 | def do_animate(i, f, ax1, xs, ys): 19 | y = f() 20 | if len(xs) == 0: 21 | xs.append(0) 22 | else: 23 | xs.append(xs[-1] + 1) 24 | ys.append(y) 25 | ax1.clear() 26 | ax1.plot(xs, ys) 27 | ax1.grid(linestyle='-', linewidth=2) 28 | ax1.legend([title], prop={'size': 30}) 29 | 30 | ani = animation.FuncAnimation( 31 | fig, 32 | lambda i: do_animate(i, f, ax1, xs, ys), 33 | interval=interval, 34 | frames=frames 35 | ) 36 | # from IPython.display import HTML 37 | # HTML(ani.to_jshtml()) 38 | plt.show() 39 | -------------------------------------------------------------------------------- /ctc_executioner/feature_generator.py: -------------------------------------------------------------------------------- 1 | from ctc_executioner.orderbook import Orderbook 2 | import numpy as np 3 | import pandas as pd 4 | import os 5 | 6 | book = 'data/trades/query_result_test.tsv' 7 | tmp='feature.tsv' 8 | orderbook = Orderbook() 9 | orderbook.loadFromFile(book) 10 | states = orderbook.getStates() 11 | 12 | 13 | def stateDiff(start, end): 14 | """Calculate time difference between two states.""" 15 | consumed = (end.getTimestamp() - start.getTimestamp()).total_seconds() 16 | return consumed 17 | 18 | 19 | def getPastState(i, t): 20 | """Find state at index i - time t.""" 21 | endState = states[i] 22 | state = endState 23 | while(stateDiff(state, endState) < t): 24 | i = i - 1 25 | if i < 0: 26 | raise Exception("Not enough states available for diff.") 27 | state = states[i] 28 | return i 29 | 30 | 31 | def traverse(f, g, default=0.0, t=60): 32 | """Traverse states and apply g(i, f(i-t, i)) for states at time e.g. i.""" 33 | startState = states[0] 34 | consumed = 0.0 35 | for i in range(len(states)): 36 | state = states[i] 37 | consumed = stateDiff(startState, state) 38 | # print("consumed: " + str(consumed) + " at i=" + str(i)) 39 | if consumed < t: 40 | g(i, default) 41 | else: 42 | pastState = getPastState(i, t) 43 | g(i, f(pastState, i+1)) 44 | 45 | 46 | def calcVolume(start, end): 47 | """Calculate volume for range of states.""" 48 | vol = 0.0 49 | for j in range(start, end): 50 | tempState = states[j] 51 | vol = vol + tempState.getVolume() 52 | return vol 53 | 54 | 55 | def calcStdPrice(start, end): 56 | """Calculate standard deviation for prices in range of states.""" 57 | prices = map(lambda x: states[x].getTradePrice(), range(start, end)) 58 | return np.std(list(prices)) 59 | 60 | 61 | def calcMeanPrice(start, end): 62 | """Calculate mean for prices in range of states.""" 63 | prices = map(lambda x: states[x].getTradePrice(), range(start, end)) 64 | return np.mean(list(prices)) 65 | 66 | 67 | def toFile(i, x): 68 | """Write feature to temp file.""" 69 | output = str(x) + '\n' 70 | with open(tmp, 'a') as fa: 71 | fa.write(output) 72 | 73 | 74 | def concatFeature(f, default=0.0, t=60): 75 | """Appends feature f as a new column to orderbook.""" 76 | traverse(f, toFile, default, t) 77 | # Append feature column 78 | df1 = pd.read_csv(book, sep='\t') 79 | df2 = pd.read_csv(tmp, sep='\t') 80 | concat_df = pd.concat([df1, df2], axis=1) 81 | # Overwrite book 82 | concat_df.to_csv(book, sep='\t', header=False, index=False, float_format='%.8f') 83 | # Cleanup tmp 84 | os.remove(tmp) 85 | 86 | 87 | def printFeature(f, default=0.0, t=60): 88 | """Prints feature f.""" 89 | traverse(f, lambda i, x: print(str((i, x))), default, t) 90 | 91 | 92 | # printFeature(calcMeanPrice) 93 | # concatFeature(calcMeanPrice) 94 | # concatFeature(calcVolume) 95 | concatFeature(calcStdPrice) 96 | -------------------------------------------------------------------------------- /ctc_executioner/feature_type.py: -------------------------------------------------------------------------------- 1 | from enum import Enum 2 | 3 | class FeatureType(Enum): 4 | ORDERS = 'bidask' 5 | TRADES = 'trades' 6 | -------------------------------------------------------------------------------- /ctc_executioner/match_engine.py: -------------------------------------------------------------------------------- 1 | from ctc_executioner.trade import Trade 2 | from ctc_executioner.order import Order 3 | from ctc_executioner.order_type import OrderType 4 | from ctc_executioner.order_side import OrderSide 5 | import copy 6 | import logging 7 | import numpy as np 8 | 9 | class MatchEngine(object): 10 | 11 | def __init__(self, orderbook, index=0, maxRuntime=100): 12 | self.orderbook = orderbook 13 | self.index = index 14 | self.maxRuntime = maxRuntime 15 | self.matches = set() 16 | self.recordMatches = False 17 | 18 | def _removePosition(self, side, price, qty): 19 | if self.recordMatches == True: 20 | self.matches.add((side, price, qty)) 21 | 22 | def _isRemoved(self, side, price, qty): 23 | return (side, price, qty) in self.matches 24 | 25 | def setIndex(self, index): 26 | self.index = index 27 | 28 | def matchLimitOrder(self, order, orderbookState): 29 | """ 30 | Attempts to match a limit Order in an order book state. 31 | 32 | Parameters 33 | ---------- 34 | order : Order 35 | Order defines the will to buy or sell under certain conditions. 36 | orderbookState : OrderbookState 37 | The state of the order book to attempt matching the provided order 38 | 39 | Returns 40 | ------- 41 | [Trades] 42 | A list of the resulted trades resulted during the matching process. 43 | """ 44 | if order.getSide() == OrderSide.BUY: 45 | bookSide = orderbookState.getSellers() 46 | else: 47 | bookSide = orderbookState.getBuyers() 48 | 49 | def isMatchingPosition(p): 50 | if order.getSide() == OrderSide.BUY: 51 | return bookSide[sidePosition].getPrice() <= order.getPrice() 52 | else: 53 | return bookSide[sidePosition].getPrice() >= order.getPrice() 54 | 55 | partialTrades = [] 56 | remaining = order.getCty() 57 | sidePosition = 0 58 | while len(bookSide) > sidePosition and isMatchingPosition(sidePosition) and remaining > 0.0: 59 | p = bookSide[sidePosition] 60 | price = p.getPrice() 61 | qty = p.getQty() 62 | 63 | # skip if position was already machted 64 | if self._isRemoved(side=order.getSide(), price=price, qty=qty): 65 | continue 66 | 67 | if not partialTrades and qty >= order.getCty(): 68 | logging.debug("Full execution: " + str(qty) + " pcs available") 69 | t = Trade(orderSide=order.getSide(), orderType=OrderType.LIMIT, cty=remaining, price=price, timestamp=orderbookState.getTimestamp()) 70 | #self._removePosition(side=order.getSide(), price=price, qty=qty) 71 | return [t] 72 | else: 73 | logging.debug("Partial execution: " + str(qty) + " pcs available") 74 | t = Trade(orderSide=order.getSide(), orderType=OrderType.LIMIT, cty=min(qty, remaining), price=price, timestamp=orderbookState.getTimestamp()) 75 | partialTrades.append(t) 76 | #self._removePosition(side=order.getSide(), price=price, qty=qty) 77 | sidePosition = sidePosition + 1 78 | remaining = remaining - qty 79 | 80 | if sidePosition == len(bookSide) - 1: 81 | # At this point there is no more liquidity in this state of the order 82 | # book (data) but the order price might actually be still higher than 83 | # what was available. For convenience sake we assume that there would 84 | # be liquidity in the subsequent levels above. 85 | # Therefore we linearly interpolate and place fake orders from 86 | # imaginary traders in the book with an increased price (according to 87 | # derivative) and similar qty. 88 | average_qty = np.mean([x.getCty() for x in partialTrades]) 89 | logging.debug("On average executed qty: " + str(average_qty)) 90 | if average_qty == 0.0: 91 | average_qty = 0.5 92 | logging.debug("Since no trades were executed (e.g. true average executed qty == 0.0), defaul is choosen: " + str(average_qty)) 93 | derivative_price = abs(np.mean(np.gradient([x.getPrice() for x in partialTrades]))) 94 | logging.debug("Derivative of price from executed trades: " + str(derivative_price)) 95 | if derivative_price == 0.0: 96 | derivative_price = 10.0 97 | logging.debug("Since no trades were executed (e.g. derivative executed price == 0.0), defaul is choosen: " + str(derivative_price)) 98 | while remaining > 0.0: 99 | if order.getSide() == OrderSide.BUY: 100 | price = price + derivative_price 101 | if price > order.getPrice(): 102 | break 103 | elif order.getSide() == OrderSide.SELL: 104 | price = price - derivative_price 105 | if price < order.getPrice(): 106 | break 107 | 108 | qty = min(average_qty, remaining) 109 | logging.debug("Partial execution: assume " + str(qty) + " available") 110 | partialTrades.append(Trade(orderSide=order.getSide(), orderType=OrderType.LIMIT, cty=qty, price=price, timestamp=orderbookState.getTimestamp())) 111 | remaining = remaining - qty 112 | 113 | return partialTrades 114 | 115 | def matchMarketOrder(self, order, orderbookState): 116 | """ 117 | Matches an within an order book state. 118 | 119 | Parameters 120 | ---------- 121 | order : Order 122 | Order defines the will to buy or sell under certain conditions. 123 | orderbookState : OrderbookState 124 | The state of the order book to attempt matching the provided order 125 | 126 | Returns 127 | ------- 128 | [Trades] 129 | A list of the resulted trades resulted during the matching process. 130 | """ 131 | if order.getSide() == OrderSide.BUY: 132 | bookSide = orderbookState.getSellers() 133 | else: 134 | bookSide = orderbookState.getBuyers() 135 | 136 | partialTrades = [] 137 | remaining = order.getCty() 138 | sidePosition = 0 139 | price = 0.0 140 | while len(bookSide) > sidePosition and remaining > 0.0: 141 | p = bookSide[sidePosition] 142 | derivative_price = p.getPrice() - price 143 | price = p.getPrice() 144 | qty = p.getQty() 145 | if not partialTrades and qty >= order.getCty(): 146 | logging.debug("Full execution: " + str(qty) + " pcs available") 147 | return [Trade(orderSide=order.getSide(), orderType=OrderType.MARKET, cty=remaining, price=price, timestamp=orderbookState.getTimestamp())] 148 | else: 149 | logging.debug("Partial execution: " + str(qty) + " pcs available") 150 | qtyExecute = min(qty, remaining) 151 | partialTrades.append(Trade(orderSide=order.getSide(), orderType=OrderType.MARKET, cty=qtyExecute, price=price, timestamp=orderbookState.getTimestamp())) 152 | sidePosition = sidePosition + 1 153 | remaining = remaining - qtyExecute 154 | logging.debug("Remaining: " + str(remaining)) 155 | 156 | return partialTrades 157 | 158 | def matchOrder(self, order, seconds=None): 159 | """ 160 | Matches an Order according to its type. 161 | 162 | This function serves as the main interface for Order matching. 163 | Orders are being matched differently according to their OrderType. 164 | In addition, an optional time interval can be defines from how long the 165 | matching process should run and therefore simulates what is generally 166 | known as *Good Till Time (GTT)*. 167 | After the time is consumed, the order is either removed (e.g. neglected) 168 | in case of a standard OrderType.LIMIT or a matching on market follows in 169 | case OrderType.LIMIT_T_MARKET was defined. 170 | 171 | Parameters 172 | ---------- 173 | order : Order 174 | Order defines the will to buy or sell under certain conditions. 175 | seconds : int 176 | Good Till Time (GTT) 177 | 178 | Returns 179 | ------- 180 | [Trades] 181 | A list of the resulted trades resulted during the matching process. 182 | float 183 | Quantity of unexecuted assets. 184 | int 185 | Index of order book where matching stopped. 186 | """ 187 | order = copy.deepcopy(order) # Do not modify original order! 188 | i = self.index 189 | remaining = order.getCty() 190 | trades = [] 191 | 192 | while len(self.orderbook.getStates()) - 1 > i and remaining > 0: 193 | orderbookState = self.orderbook.getState(i) 194 | logging.debug("Evaluate state " + str(i) + ":\n" + str(orderbookState)) 195 | 196 | # Stop matching process after defined seconds are consumed 197 | if seconds is not None: 198 | t_start = self.orderbook.getState(self.index).getTimestamp() 199 | t_now = orderbookState.getTimestamp() 200 | t_delta = (t_now - t_start).total_seconds() 201 | logging.debug(str(t_delta) + " of " + str(seconds) + " consumed.") 202 | if t_delta >= seconds: 203 | logging.debug("Time delta consumed, stop matching.\n") 204 | break 205 | 206 | if order.getType() == OrderType.LIMIT: 207 | counterTrades = self.matchLimitOrder(order, orderbookState) 208 | elif order.getType() == OrderType.MARKET: 209 | counterTrades = self.matchMarketOrder(order, orderbookState) 210 | elif order.getType() == OrderType.LIMIT_T_MARKET: 211 | if seconds is None: 212 | raise Exception(str(OrderType.LIMIT_T_MARKET) + ' requires a time limit.') 213 | counterTrades = self.matchLimitOrder(order, orderbookState) 214 | else: 215 | raise Exception('Order type not known or not implemented yet.') 216 | 217 | if counterTrades: 218 | trades = trades + counterTrades 219 | logging.debug("Trades executed:") 220 | for counterTrade in counterTrades: 221 | logging.debug(counterTrade) 222 | remaining = remaining - counterTrade.getCty() 223 | order.setCty(remaining) 224 | logging.debug("Remaining: " + str(remaining) + "\n") 225 | else: 226 | logging.debug("No orders matched.\n") 227 | i = i + 1 228 | 229 | # Execute remaining qty as market if LIMIT_T_MARKET 230 | if remaining > 0.0 and (order.getType() == OrderType.LIMIT_T_MARKET or order.getType() == OrderType.MARKET): 231 | logging.debug('Execute remaining as MARKET order.') 232 | #i = i - 1 # back to previous state 233 | if not len(self.orderbook.getStates()) > i: 234 | raise Exception('Not enough data for following market order.') 235 | 236 | orderbookState = self.orderbook.getState(i) 237 | logging.debug("Evaluate state " + str(i) + ":\n" + str(orderbookState)) 238 | counterTrades = self.matchMarketOrder(order, orderbookState) 239 | if not counterTrades: 240 | raise Exception('Remaining market order matching failed.') 241 | trades = trades + counterTrades 242 | logging.debug("Trades executed:") 243 | for counterTrade in counterTrades: 244 | logging.debug(counterTrade) 245 | remaining = remaining - counterTrade.getCty() 246 | order.setCty(remaining) 247 | logging.debug("Remaining: " + str(remaining) + "\n") 248 | 249 | logging.debug("Total number of trades: " + str(len(trades))) 250 | logging.debug("Remaining qty of order: " + str(remaining)) 251 | logging.debug("Index at end of match period: " + str(i)) 252 | return trades, remaining, i-1 253 | 254 | 255 | # logging.basicConfig(level=logging.DEBUG) 256 | # from orderbook import Orderbook 257 | # orderbook = Orderbook(extraFeatures=False) 258 | # orderbook.loadFromFile('query_result_small.tsv') 259 | # engine = MatchEngine(orderbook, index=0) 260 | # 261 | # #order = Order(orderType=OrderType.LIMIT, orderSide=OrderSide.BUY, cty=11.0, price=16559.0) 262 | # #order = Order(orderType=OrderType.MARKET, orderSide=OrderSide.BUY, cty=25.5, price=None) 263 | # order = Order(orderType=OrderType.LIMIT_T_MARKET, orderSide=OrderSide.SELL, cty=1.0, price=16559.0) 264 | # trades, remaining, i = engine.matchOrder(order, seconds=1.0) 265 | # c = 0.0 266 | # for trade in trades: 267 | # c = c + trade.getCty() 268 | # print(c) 269 | -------------------------------------------------------------------------------- /ctc_executioner/order.py: -------------------------------------------------------------------------------- 1 | from datetime import datetime 2 | from ctc_executioner.order_type import OrderType 3 | 4 | 5 | class Order: 6 | """ 7 | An order indicates the purchases to be made. 8 | 9 | The MatchEngine will try to execute an order given orders from other 10 | parties. Therefore, an order may result in one or many Trades. 11 | """ 12 | 13 | def __init__(self, orderType, orderSide, cty, price=None, timestamp=None): 14 | self.timestamp = timestamp 15 | if not self.timestamp: 16 | self.timestamp = str(datetime.now()).split('.')[0] 17 | self.orderType = orderType 18 | self.orderSide = orderSide 19 | self.cty = cty 20 | self.price = price # None for OrderSide.MARKET 21 | self.timestamp = timestamp 22 | if self.orderType == OrderType.MARKET and self.price is not None: 23 | raise Exception('Market order must not have a price.') 24 | if self.orderType == OrderType.LIMIT and self.price is None: 25 | raise Exception('Limit order must have a price.') 26 | 27 | def __str__(self): 28 | return (str(self.timestamp) + ',' + 29 | str(self.getType()) + ',' + 30 | str(self.getCty()) + ',' + 31 | str(self.getPrice())) 32 | 33 | def __repr__(self): 34 | return str(self) 35 | 36 | def getType(self): 37 | return self.orderType 38 | 39 | def setType(self, type): 40 | self.orderType = type 41 | 42 | def getSide(self): 43 | return self.orderSide 44 | 45 | def getCty(self): 46 | return self.cty 47 | 48 | def setCty(self, cty): 49 | self.cty = cty 50 | 51 | def getPrice(self): 52 | return self.price 53 | 54 | def setPrice(self, price): 55 | self.price = price 56 | 57 | def getTimeStamp(self): 58 | return self.timestamp 59 | -------------------------------------------------------------------------------- /ctc_executioner/order_side.py: -------------------------------------------------------------------------------- 1 | from enum import Enum 2 | 3 | 4 | class OrderSide(Enum): 5 | BUY = 'buy' 6 | SELL = 'sell' 7 | 8 | def opposite(self): 9 | if self == OrderSide.BUY: 10 | return OrderSide.SELL 11 | elif self == OrderSide.SELL: 12 | return OrderSide.BUY 13 | -------------------------------------------------------------------------------- /ctc_executioner/order_type.py: -------------------------------------------------------------------------------- 1 | from enum import Enum 2 | 3 | 4 | class OrderType(Enum): 5 | MARKET = 'market' 6 | LIMIT = 'limit' 7 | LIMIT_T_MARKET = 'limit_t_market' 8 | CANCEL = 'cancel' 9 | -------------------------------------------------------------------------------- /ctc_executioner/qlearn.py: -------------------------------------------------------------------------------- 1 | import random 2 | from ctc_executioner.action_state import ActionState 3 | 4 | class QLearn: 5 | """Qlearner.""" 6 | 7 | def __init__(self, actions, epsilon=0.1, alpha=0.1, gamma=0.1, exploration_decay=1.000001): 8 | """Initialize Q-table and assign parameters.""" 9 | self.q = {} 10 | self.epsilon = epsilon 11 | self.alpha = alpha 12 | self.gamma = gamma 13 | self.exploration_decay = exploration_decay 14 | self.actions = actions 15 | 16 | def getQ(self, state, action, default=0.0): 17 | """Q-value lookup for state and action, or else returns default.""" 18 | return self.q.get((state, action), default) 19 | 20 | def getQAction(self, state, default=0.0): 21 | """Best action based on Q-Table for given state.""" 22 | values = [] 23 | for x in list(reversed(self.actions)): 24 | q_value = self.q.get((state, x), 0.0) 25 | #if q_value is not 0.0: 26 | values.append(q_value) 27 | # else: 28 | # raise Exception("Q-Table does not contain: " + str((state, x))) 29 | 30 | if len(values) == 0: 31 | return default 32 | 33 | maxQ = max(values) 34 | a = list(reversed(self.actions))[values.index(maxQ)] 35 | return a 36 | 37 | def learnQ(self, state, action, reward, value): 38 | oldv = self.q.get((state, action), 0.0) 39 | if oldv is 0.0: 40 | self.q[(state, action)] = reward 41 | else: 42 | self.q[(state, action)] = oldv + self.alpha * (value - oldv) 43 | 44 | def learn(self, state1, action1, reward, state2): 45 | maxqnew = max([self.getQ(state2, a) for a in self.actions]) 46 | self.learnQ(state1, action1, reward, reward + self.gamma * maxqnew) 47 | 48 | def chooseAction(self, state, return_q=False): 49 | """Chooses most rewarding action.""" 50 | self.epsilon = self.exploration_decay * self.epsilon 51 | 52 | if random.random() > self.epsilon: 53 | action = random.choice(self.actions) 54 | else: 55 | q = [self.getQ(state, a) for a in self.actions] 56 | maxQ = max(q) 57 | count = q.count(maxQ) 58 | if count > 1: 59 | best = [i for i in range(len(self.actions)) if q[i] == maxQ] 60 | i = random.choice(best) 61 | else: 62 | i = q.index(maxQ) 63 | 64 | action = self.actions[i] 65 | return action 66 | -------------------------------------------------------------------------------- /ctc_executioner/test/test.py: -------------------------------------------------------------------------------- 1 | from order_side import OrderSide 2 | from orderbook import Orderbook 3 | 4 | orderbook = Orderbook() 5 | orderbook.loadFromEvents('ob-1-small.tsv') 6 | orderbook_test = orderbook 7 | orderbook.summary() 8 | 9 | side = OrderSide.SELL 10 | levels = list(range(-20,21)) 11 | 12 | episode = {'episode': 9, 'steps': { 13 | 0: {'action': 2, 'index': 167, 't': 100, 'i': 0.9999999999999999, 'reward': -6.224232140000822}, 14 | 1: {'action': 39, 'index': 173, 't': 90, 'i': 0.9999999999999999, 'reward': 1.9899999999997817}, 15 | 2: {'action': 21, 'index': 179, 't': 80, 'i': 0.7, 'reward': -6.224232140000822}, 16 | 3: {'action': 39, 'index': 185, 't': 70, 'i': 0.7, 'reward': -0.18051749500045844}, 17 | 4: {'action': 39, 'index': 193, 't': 60, 'i': 0.7, 'reward': -0.3610349900009169} 18 | } 19 | } 20 | 21 | import matplotlib.pyplot as plt 22 | 23 | indices = [] 24 | times = [] 25 | actions = [] 26 | prices = [] 27 | order_prices = [] 28 | runtimes = [] 29 | inventories = [] 30 | rewards = [] 31 | for key, value in episode['steps'].items(): 32 | index = value['index'] 33 | indices.append(index) 34 | runtimes.append(value['t']) 35 | inventories.append(value['i']) 36 | rewards.append(value['reward']) 37 | actions.append(value['action']) 38 | state = orderbook.getState(index) 39 | prices.append(state.getBidAskMid()) 40 | action_delta = 0.1*levels[value['action']] 41 | if side == OrderSide.BUY: 42 | order_prices.append(state.getBidAskMid() + action_delta) 43 | else: 44 | order_prices.append(state.getBidAskMid() - action_delta) 45 | times.append(state.getTimestamp()) 46 | 47 | # price chart 48 | ps = [x.getBidAskMid() for x in orderbook.getStates()] 49 | ts = [x.getTimestamp() for x in orderbook.getStates()] 50 | plt.plot(ts, ps) 51 | # if show_bidask: 52 | max_level = 0 53 | buyer = [x.getBuyers()[max_level].getPrice() for x in orderbook.getStates()] 54 | seller = [x.getSellers()[max_level].getPrice() for x in orderbook.getStates()] 55 | plt.plot(ts, buyer) 56 | plt.plot(ts, seller) 57 | 58 | # order placement 59 | #plt.scatter(times, prices, s=60) 60 | plt.scatter(times, order_prices, s=60) 61 | 62 | for i, time in enumerate(times): 63 | if i == 0 or i == len(times)-1: 64 | style = 'k-' 65 | else: 66 | style = 'k--' 67 | # line at order placement 68 | plt.plot([time, time], [prices[i]-0.005*prices[i], prices[i]+0.005*prices[i]], style, lw=1) 69 | 70 | 71 | for i, action in enumerate(actions): 72 | # action, resulted reward 73 | txt = 'a='+str(levels[action]) + '\nr=' + str(round(rewards[i], 2)) 74 | plt.annotate(txt, (times[i],prices[i])) 75 | # runtime, inventory 76 | txt = 't=' + str(runtimes[i]) + '\ni='+ str(round(inventories[i], 2)) 77 | plt.annotate(txt, (times[i], prices[i]-0.005*prices[i])) 78 | 79 | plt.show() 80 | -------------------------------------------------------------------------------- /ctc_executioner/test/test_action.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | from action import Action 3 | from orderbook import Orderbook 4 | from order import Order 5 | from order_type import OrderType 6 | from order_side import OrderSide 7 | 8 | orderbook = Orderbook(extraFeatures=False) 9 | orderbook.loadFromFile('test_orderbook_10s.tsv') 10 | 11 | class MatchEngineMock(): 12 | 13 | def matchOrder(self, order, seconds=None): 14 | trades = [] 15 | qtyRemain = 0 16 | index = 1 17 | return trades, qtyRemain, index 18 | 19 | 20 | class ActionMock(Action): 21 | 22 | def __init__(self, a, runtime): 23 | Action.__init__(self, a, runtime) 24 | 25 | def getMatchEngine(self, orderbook): 26 | return MatchEngineMock() 27 | 28 | 29 | class ActionTest(unittest.TestCase): 30 | 31 | def testRun(self): 32 | a = 1 33 | i = 1.0 34 | t = 10.0 35 | orderbookIndex = 0 36 | orderbookState = orderbook.getState(orderbookIndex) 37 | orderSide = OrderSide.BUY 38 | orderType = OrderType.LIMIT 39 | price = orderbookState.getPriceAtLevel(orderSide, a) 40 | order = Order( 41 | orderType=orderType, 42 | orderSide=orderSide, 43 | cty=i, 44 | price=price 45 | ) 46 | action = ActionMock(a=a, runtime=t) 47 | action.setOrder(order) 48 | action.setOrderbookState(orderbookState) 49 | action.setOrderbookIndex(orderbookIndex) 50 | action.run(orderbook) 51 | -------------------------------------------------------------------------------- /ctc_executioner/test/test_qlearn.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | from qlearn import QLearn 3 | from action_state import ActionState 4 | import numpy as np 5 | 6 | 7 | class QlearnTest(unittest.TestCase): 8 | def testStateEquality(self): 9 | ai = QLearn([-1, 0, 1]) 10 | a1 = ActionState(1.0, 1.0, {'vol60': 1}) 11 | a2 = ActionState(1.0, 1.0, {'vol60': 1}) 12 | ai.learn(a1, 1, 1.0, a2) 13 | self.assertEqual(ai.getQAction(a2), 1) 14 | 15 | #def testQTableLookup(self): 16 | actions = [5, 4, 3, 2, 1, 0, -1, -2, -3, -4, -5, -7, -10, -15, -20] 17 | ai = QLearn(actions) 18 | ai.q = np.load('test_q.npy').item() 19 | ai.q 20 | state = ActionState(30, 0.9, {}) 21 | ai.q.get((state, -10)) 22 | print(ai.getQAction(state)) 23 | -------------------------------------------------------------------------------- /ctc_executioner/trade.py: -------------------------------------------------------------------------------- 1 | from datetime import datetime 2 | 3 | """ A trade is an extended version of a Position, indicating the purchases made """ 4 | class Trade: 5 | def __init__(self, orderSide, orderType, cty, price, fee=0.0, timestamp=str(datetime.now()).split('.')[0]): 6 | self.orderSide = orderSide 7 | self.orderType = orderType 8 | self.cty = cty 9 | self.price = price 10 | self.fee = fee 11 | self.timestamp = timestamp 12 | 13 | def __str__(self): 14 | return (str(self.timestamp) + ',' + 15 | str(self.getSide()) + ',' + 16 | str(self.getType()) + ',' + 17 | str(self.getCty()) + ',' + 18 | str(self.getPrice()) + ',' + 19 | str(self.getFee())) 20 | 21 | def __repr__(self): 22 | return str(self) 23 | 24 | def getSide(self): 25 | return self.orderSide 26 | 27 | def getType(self): 28 | return self.orderType 29 | 30 | def getCty(self): 31 | return self.cty 32 | 33 | def setCty(self, cty): 34 | self.cty = cty 35 | 36 | def getPrice(self): 37 | return self.price 38 | 39 | def getFee(self): 40 | return self.fee 41 | 42 | def getTimeStamp(self): 43 | return self.timestamp 44 | -------------------------------------------------------------------------------- /data/events.js: -------------------------------------------------------------------------------- 1 | "use strict"; 2 | exports.__esModule = true; 3 | var bittrex = require("node-bittrex-api"); 4 | bittrex.options({ 5 | 'apikey': 'API_KEY', 6 | 'apisecret': 'API_SECRET' 7 | }); 8 | // Formats a JSON object into a DBUpdate object 9 | function formatUpdate(v) { 10 | var updates = []; 11 | var pair = (v.MarketName); 12 | var seq = v.Nounce; 13 | var timestamp = Date.now() / 1000; 14 | v.Buys.forEach(function (buy) { 15 | updates.push({ 16 | pair: pair, 17 | seq: seq, 18 | is_trade: false, 19 | is_bid: true, 20 | price: buy.Rate, 21 | size: buy.Quantity, 22 | timestamp: timestamp, 23 | type: buy.Type 24 | }); 25 | }); 26 | v.Sells.forEach(function (sell) { 27 | updates.push({ 28 | pair: pair, 29 | seq: seq, 30 | is_trade: false, 31 | is_bid: false, 32 | price: sell.Rate, 33 | size: sell.Quantity, 34 | timestamp: timestamp, 35 | type: sell.Type 36 | }); 37 | }); 38 | v.Fills.forEach(function (fill) { 39 | updates.push({ 40 | pair: pair, 41 | seq: seq, 42 | is_trade: true, 43 | is_bid: fill.OrderType === "BUY", 44 | price: fill.Rate, 45 | size: fill.Quantity, 46 | timestamp: (new Date(fill.TimeStamp)).getTime() / 1000, 47 | type: null 48 | }); 49 | }); 50 | return updates; 51 | } 52 | function watch() { 53 | try { 54 | //let mkts = await allMarkets() 55 | var mkts = ['USDT-BTC']; 56 | bittrex.websockets.subscribe(mkts, function (data, client) { 57 | if (data.M === 'updateExchangeState') { 58 | var state = data; 59 | state.A.forEach(function (v) { 60 | var updates = formatUpdate(v); 61 | updates.forEach(function (u) { 62 | //console.log(u) 63 | console.log(//u.pair + '\t' + 64 | u.timestamp + '\t' + 65 | u.seq + '\t' + 66 | u.size + '\t' + 67 | u.price + '\t' + 68 | String(+u.is_bid) + '\t' + 69 | String(+u.is_trade) + '\t' + 70 | u.type); 71 | }); 72 | }); 73 | } 74 | }); 75 | } 76 | catch (e) { 77 | console.log(e); 78 | throw e; 79 | } 80 | } 81 | var main = watch; 82 | main(); 83 | -------------------------------------------------------------------------------- /data/events.ts: -------------------------------------------------------------------------------- 1 | import bittrex = require('node-bittrex-api'); 2 | bittrex.options({ 3 | 'apikey' : 'API_KEY', 4 | 'apisecret' : 'API_SECRET', 5 | }); 6 | 7 | export interface ExchangeState { 8 | H: string, // Hub 9 | M: "updateExchangeState", 10 | A: [ExchangeStateUpdate] 11 | } 12 | 13 | export type Side = "SELL" | "BUY"; 14 | export type UpdateType = 0 // new order entries at matching price, add to orderbook 15 | | 1 // cancelled / filled order entries at matching price, delete from orderbook 16 | | 2 // changed order entries at matching price (partial fills, cancellations), edit in orderbook 17 | ; 18 | 19 | export interface ExchangeStateUpdate { 20 | MarketName: string, 21 | Nounce: number, 22 | Buys: [Buy], 23 | Sells: [Sell], 24 | Fills: [Fill] 25 | } 26 | 27 | export type Sell = Buy; 28 | 29 | export interface Buy { 30 | Type: UpdateType, 31 | Rate: number, 32 | Quantity: number 33 | } 34 | 35 | export interface Fill { 36 | OrderType: Side, 37 | Rate: number, 38 | Quantity: number, 39 | TimeStamp: string, 40 | } 41 | 42 | //================================ 43 | 44 | export interface SummaryState { 45 | H: string, 46 | M: "updateSummaryState", 47 | A: [SummaryStateUpdate] 48 | } 49 | 50 | export interface SummaryStateUpdate { 51 | Nounce: number, 52 | Deltas: [PairUpdate] 53 | } 54 | 55 | export interface PairUpdate { 56 | MarketName: string, 57 | High: number 58 | Low: number, 59 | Volume: number, 60 | Last: number, 61 | BaseVolume: number, 62 | TimeStamp: string, 63 | Bid: number, 64 | Ask: number, 65 | OpenBuyOrders: number, 66 | OpenSellOrders: number, 67 | PrevDay: number, 68 | Created: string 69 | } 70 | 71 | //================================ 72 | 73 | export interface UnhandledData { 74 | unhandled_data: { 75 | R: boolean, // true, 76 | I: string, // '1' 77 | } 78 | } 79 | 80 | //================================ 81 | //callbacks 82 | 83 | export type ExchangeCallback = (value: ExchangeStateUpdate, index?: number, array?: ExchangeStateUpdate[]) => void 84 | export type SummaryCallback = (value: PairUpdate, index?: number, array?: PairUpdate[]) => void 85 | 86 | 87 | //================================ 88 | //db updates 89 | 90 | export interface DBUpdate { 91 | pair: string, 92 | seq: number, 93 | is_trade: boolean, 94 | is_bid: boolean, 95 | price: number, 96 | size: number, 97 | timestamp: number, 98 | type: number 99 | } 100 | 101 | 102 | // Formats a JSON object into a DBUpdate object 103 | function formatUpdate(v : ExchangeStateUpdate) : DBUpdate[] { 104 | let updates : DBUpdate[] = []; 105 | 106 | const pair = (v.MarketName); 107 | const seq = v.Nounce; 108 | const timestamp = Date.now() / 1000; 109 | 110 | v.Buys.forEach(buy => { 111 | updates.push( 112 | { 113 | pair, 114 | seq, 115 | is_trade: false, 116 | is_bid: true, 117 | price: buy.Rate, 118 | size: buy.Quantity, 119 | timestamp, 120 | type: buy.Type 121 | } 122 | ); 123 | }); 124 | 125 | v.Sells.forEach(sell => { 126 | updates.push( 127 | { 128 | pair, 129 | seq, 130 | is_trade: false, 131 | is_bid: false, 132 | price: sell.Rate, 133 | size: sell.Quantity, 134 | timestamp, 135 | type: sell.Type 136 | } 137 | ); 138 | }); 139 | 140 | v.Fills.forEach(fill => { 141 | updates.push( 142 | { 143 | pair, 144 | seq, 145 | is_trade: true, 146 | is_bid: fill.OrderType === "BUY", 147 | price: fill.Rate, 148 | size: fill.Quantity, 149 | timestamp: (new Date(fill.TimeStamp)).getTime() / 1000, 150 | type: null 151 | } 152 | ); 153 | }) 154 | 155 | return updates; 156 | } 157 | 158 | function watch() { 159 | try { 160 | //let mkts = await allMarkets() 161 | let mkts = ['USDT-BTC'] 162 | bittrex.websockets.subscribe(mkts, function(data, client) { 163 | if (data.M === 'updateExchangeState') { 164 | const state = data; 165 | state.A.forEach(v => { 166 | let updates : DBUpdate[] = formatUpdate(v); 167 | updates.forEach(u => { 168 | //console.log(u) 169 | console.log(//u.pair + '\t' + 170 | u.timestamp + '\t' + 171 | u.seq + '\t' + 172 | u.size + '\t' + 173 | u.price + '\t' + 174 | String(+ u.is_bid) + '\t' + 175 | String(+ u.is_trade) + '\t' + 176 | u.type 177 | ) 178 | }); 179 | }) 180 | } 181 | }); 182 | } catch (e) { 183 | console.log(e); 184 | throw e; 185 | } 186 | } 187 | 188 | let main = watch; 189 | 190 | main(); 191 | -------------------------------------------------------------------------------- /data/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "target": "es5", 4 | "module": "commonjs", 5 | "sourceMap": true 6 | } 7 | } -------------------------------------------------------------------------------- /docs/Poster.odg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/docs/Poster.odg -------------------------------------------------------------------------------- /docs/Poster.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/docs/Poster.pdf -------------------------------------------------------------------------------- /docs/Poster2.odg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/docs/Poster2.odg -------------------------------------------------------------------------------- /docs/Poster2.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/docs/Poster2.pdf -------------------------------------------------------------------------------- /docs/final-presentation.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/docs/final-presentation.pdf -------------------------------------------------------------------------------- /docs/final-presentation_notes.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/docs/final-presentation_notes.pdf -------------------------------------------------------------------------------- /docs/presentation-midterm.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/docs/presentation-midterm.pdf -------------------------------------------------------------------------------- /docs/report.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/docs/report.pdf -------------------------------------------------------------------------------- /gym_ctc_executioner/__init__.py: -------------------------------------------------------------------------------- 1 | from gym.envs.registration import register 2 | 3 | register( 4 | id='ctc-executioner-v0', 5 | entry_point='gym_ctc_executioner.envs:ExecutionEnv' 6 | ) 7 | -------------------------------------------------------------------------------- /gym_ctc_executioner/envs/__init__.py: -------------------------------------------------------------------------------- 1 | from gym_ctc_executioner.envs.execution_env import ExecutionEnv 2 | -------------------------------------------------------------------------------- /gym_ctc_executioner/envs/execution_env.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import copy 3 | import random 4 | import numpy as np 5 | import gym 6 | from gym import error, spaces, utils 7 | from gym.utils import seeding 8 | from ctc_executioner.action import Action 9 | from ctc_executioner.action_state import ActionState 10 | from ctc_executioner.order import Order 11 | from ctc_executioner.order_type import OrderType 12 | from ctc_executioner.order_side import OrderSide 13 | from ctc_executioner.feature_type import FeatureType 14 | from ctc_executioner.agent_utils.live_plot_callback import LivePlotCallback 15 | 16 | #logging.basicConfig(level=logging.INFO) 17 | 18 | class ExecutionEnv(gym.Env): 19 | 20 | def __init__(self): 21 | self.orderbookIndex = None 22 | self.actionState = None 23 | self.execution = None 24 | self.episode = 0 25 | self._configure() 26 | 27 | def _generate_Sequence(self, min, max, step): 28 | """ Generate sequence (that unlike xrange supports float) 29 | 30 | max: defines the sequence maximum 31 | step: defines the interval 32 | """ 33 | i = min 34 | I = [] 35 | while i <= max: 36 | I.append(i) 37 | i = i + step 38 | return I 39 | 40 | def _configure(self, 41 | orderbook=None, 42 | side=OrderSide.SELL, 43 | levels=(-50, 50, 1), 44 | T=(0, 100, 10), 45 | I=(0, 1, 0.1), 46 | lookback=25, 47 | bookSize=10, 48 | featureType=FeatureType.ORDERS, 49 | callbacks = [] 50 | ): 51 | self.orderbook = orderbook 52 | self.side = side 53 | self.levels = self._generate_Sequence(min=levels[0], max=levels[1], step=levels[2]) 54 | self.action_space = spaces.Discrete(len(self.levels)) 55 | self.T = self._generate_Sequence(min=T[0], max=T[1], step=T[2]) 56 | self.I = self._generate_Sequence(min=I[0], max=I[1], step=I[2]) 57 | self.lookback = lookback # results in (bid|size, ask|size) -> 4*5 58 | self.bookSize = bookSize 59 | self.featureType = featureType 60 | if self.featureType == FeatureType.ORDERS: 61 | self.observation_space = spaces.Box(low=0.0, high=10.0, shape=(2*self.lookback+1, self.bookSize, 2)) 62 | else: 63 | self.observation_space = spaces.Box(low=0.0, high=100.0, shape=(self.lookback+1, 3)) 64 | self.callbacks = callbacks 65 | self.episodeActions = [] 66 | 67 | def setOrderbook(self, orderbook): 68 | self.orderbook = orderbook 69 | 70 | def setSide(self, side): 71 | self.side = side 72 | 73 | def setLevels(self, min, max, step): 74 | self.levels = self._generate_Sequence(min=min, max=max, step=step) 75 | self.action_space = spaces.Discrete(len(self.levels)) 76 | 77 | def setT(self, min, max, step): 78 | self.T = self._generate_Sequence(min=min, max=max, step=step) 79 | 80 | def setI(self, min, max, step): 81 | self.I = self._generate_Sequence(min=min, max=max, step=step) 82 | 83 | def setLookback(self, lookback): 84 | self.lookback = lookback 85 | if self.bookSize is not None: 86 | self.observation_space = spaces.Box(low=0.0, high=10.0, shape=(2*self.lookback, self.bookSize, 2)) 87 | 88 | def setBookSize(self, bookSize): 89 | self.bookSize = bookSize 90 | if self.lookback is not None: 91 | self.observation_space = spaces.Box(low=0.0, high=10.0, shape=(2*self.lookback, self.bookSize, 2)) 92 | 93 | 94 | 95 | def _determine_next_inventory(self, execution): 96 | qty_remaining = execution.getQtyNotExecuted() 97 | # TODO: Working with floats requires such an ugly threshold 98 | if qty_remaining > 0.0000001: 99 | # Approximate next closest inventory given remaining and I 100 | i_next = min([0.0] + self.I, key=lambda x: abs(x - qty_remaining)) 101 | logging.info('Qty remain: ' + str(qty_remaining) 102 | + ' -> inventory: ' + str(qty_remaining) 103 | + ' -> next i: ' + str(i_next)) 104 | else: 105 | i_next = 0.0 106 | 107 | logging.info('Next inventory for execution: ' + str(i_next)) 108 | return i_next 109 | 110 | def _determine_next_time(self, t): 111 | if t > 0: 112 | t_next = self.T[self.T.index(t) - 1] 113 | else: 114 | t_next = t 115 | 116 | logging.info('Next timestep for execution: ' + str(t_next)) 117 | return t_next 118 | 119 | def _determine_runtime(self, t): 120 | if t != 0: 121 | T_index = self.T.index(t) 122 | runtime = self.T[T_index] - self.T[T_index - 1] 123 | else: 124 | runtime = t 125 | return runtime 126 | 127 | def _get_random_orderbook_state(self): 128 | return self.orderbook.getRandomState(runtime=max(self.T), min_head=self.lookback) 129 | 130 | def _create_execution(self, a): 131 | runtime = self._determine_runtime(self.actionState.getT()) 132 | orderbookState = self.orderbook.getState(self.orderbookIndex) 133 | 134 | if runtime <= 0.0 or a is None: 135 | price = None 136 | ot = OrderType.MARKET 137 | else: 138 | price = orderbookState.getPriceAtLevel(self.side, a) 139 | ot = OrderType.LIMIT 140 | 141 | order = Order( 142 | orderType=ot, 143 | orderSide=self.side, 144 | cty=self.actionState.getI(), 145 | price=price 146 | ) 147 | execution = Action(a=a, runtime=runtime) 148 | execution.setState(self.actionState) 149 | execution.setOrder(order) 150 | execution.setOrderbookState(orderbookState) 151 | execution.setOrderbookIndex(self.orderbookIndex) 152 | execution.setReferencePrice(orderbookState.getBestAsk()) 153 | return execution 154 | 155 | def _update_execution(self, execution, a): 156 | runtime = self._determine_runtime(self.actionState.getT()) 157 | orderbookState = self.orderbook.getState(self.orderbookIndex) 158 | 159 | if runtime <= 0.0 or a is None: 160 | price = None 161 | ot = OrderType.MARKET 162 | else: 163 | price = execution.getOrderbookState().getPriceAtLevel(self.side, a) 164 | ot = OrderType.LIMIT 165 | 166 | order = Order( 167 | orderType=ot, 168 | orderSide=self.side, 169 | cty=self.actionState.getI(), 170 | price=price 171 | ) 172 | execution.setRuntime(runtime) 173 | execution.setState(self.actionState) 174 | execution.setOrder(order) 175 | execution.setOrderbookState(orderbookState) 176 | execution.setOrderbookIndex(self.orderbookIndex) 177 | return execution 178 | 179 | def _makeFeature(self, orderbookIndex, qty): 180 | if self.featureType == FeatureType.ORDERS: 181 | return self.orderbook.getBidAskFeatures( 182 | state_index=orderbookIndex, 183 | lookback=self.lookback, 184 | qty=self.I[-1],#i_next+0.0001, 185 | normalize=True, 186 | price=True, 187 | size=True, 188 | levels = self.bookSize 189 | ) 190 | else: 191 | state = self.orderbook.getState(orderbookIndex) 192 | return self.orderbook.getHistTradesFeature( 193 | ts=state.getUnixTimestamp(), 194 | lookback=self.lookback, 195 | normalize=False, 196 | norm_size=qty, 197 | norm_price=state.getBidAskMid() 198 | ) 199 | 200 | def step(self, action): 201 | self.episode += 1 202 | action = self.levels[action] 203 | self.episodeActions.append(action) 204 | if self.execution is None: 205 | self.execution = self._create_execution(action) 206 | else: 207 | self.execution = self._update_execution(self.execution, action) 208 | 209 | logging.info( 210 | 'Created/Updated execution.' + 211 | '\nAction: ' + str(action) + ' (' + str(self.execution.getOrder().getType()) + ')' + 212 | '\nt: ' + str(self.actionState.getT()) + 213 | '\nruntime: ' + str(self.execution.getRuntime()) + 214 | '\ni: ' + str(self.actionState.getI()) 215 | ) 216 | self.execution, counterTrades = self.execution.run(self.orderbook) 217 | 218 | i_next = self._determine_next_inventory(self.execution) 219 | t_next = self._determine_next_time(self.execution.getState().getT()) 220 | 221 | feature = self._makeFeature(orderbookIndex=self.execution.getOrderbookIndex(), qty=i_next) 222 | state_next = ActionState(t_next, i_next, {self.featureType.value: feature}) 223 | done = self.execution.isFilled() or state_next.getI() == 0 224 | if done: 225 | reward = self.execution.getReward() 226 | volumeRatio = 1.0 227 | if self.callbacks is not []: 228 | for cb in self.callbacks: 229 | cb.on_episode_end(self.episode, {'episode_reward': reward, 'episode_actions': self.episodeActions}) 230 | self.episodeActions = [] 231 | else: 232 | reward, volumeRatio = self.execution.calculateRewardWeighted(counterTrades, self.I[-1]) 233 | 234 | logging.info( 235 | 'Run execution.' + 236 | '\nTrades: ' + str(len(counterTrades)) + 237 | '\nReward: ' + str(reward) + ' (Ratio: ' + str(volumeRatio) + ')' + 238 | '\nDone: ' + str(done) 239 | ) 240 | self.orderbookIndex = self.execution.getOrderbookIndex() 241 | self.actionState = state_next 242 | return state_next.toArray(), reward, done, {} 243 | 244 | def reset(self): 245 | return self._reset(t=self.T[-1], i=self.I[-1]) 246 | 247 | def _reset(self, t, i): 248 | orderbookState, orderbookIndex = self._get_random_orderbook_state() 249 | feature = self._makeFeature(orderbookIndex=orderbookIndex, qty=i) 250 | state = ActionState(t, i, {self.featureType.value: feature}) #np.array([[t, i]]) 251 | self.execution = None 252 | self.orderbookIndex = orderbookIndex 253 | self.actionState = state 254 | return state.toArray() 255 | 256 | def render(self, mode='human', close=False): 257 | pass 258 | 259 | def seed(self, seed): 260 | pass 261 | 262 | 263 | # import gym_ctc_executioner 264 | # env = gym.make("ctc-executioner-v0") 265 | # env.reset() 266 | -------------------------------------------------------------------------------- /gym_ctc_marketmaker/__init__.py: -------------------------------------------------------------------------------- 1 | from gym.envs.registration import register 2 | 3 | register( 4 | id='ctc-marketmaker-v0', 5 | entry_point='gym_ctc_marketmaker.envs:MarketMakerEnv' 6 | ) 7 | -------------------------------------------------------------------------------- /gym_ctc_marketmaker/envs/__init__.py: -------------------------------------------------------------------------------- 1 | from gym_ctc_marketmaker.envs.marketmaker_env import MarketMakerEnv 2 | -------------------------------------------------------------------------------- /gym_ctc_marketmaker/envs/marketmaker_env.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import itertools 3 | from gym import spaces 4 | from ctc_executioner.action import Action 5 | from ctc_executioner.action_state import ActionState 6 | from ctc_executioner.order import Order 7 | from ctc_executioner.order_type import OrderType 8 | from ctc_executioner.order_side import OrderSide 9 | import gym_ctc_executioner.envs.execution_env as execution_env 10 | #logging.basicConfig(level=logging.INFO) 11 | 12 | class MarketMakerEnv(execution_env.ExecutionEnv): 13 | 14 | def __init__(self): 15 | self.orderbookIndexBuy = None 16 | self.orderbookIndexSell = None 17 | self.actionStateBuy = None 18 | self.actionStateSell = None 19 | self.executionBuy = None 20 | self.executionSell = None 21 | self._configure() 22 | 23 | def _configure(self, 24 | orderbook=None, 25 | levels=(-50, 50, 1), 26 | T=(0, 100, 10), 27 | I=(0, 1, 0.1), 28 | lookback=25, 29 | bookSize=10 30 | ): 31 | self.orderbook = orderbook 32 | self.levels = self._generate_Sequence(min=levels[0], max=levels[1], step=levels[2]) 33 | self.levels = list(itertools.product(self.levels, self.levels)) 34 | self.T = self._generate_Sequence(min=T[0], max=T[1], step=T[2]) 35 | self.I = self._generate_Sequence(min=I[0], max=I[1], step=I[2]) 36 | self.lookback = lookback # results in (bid|size, ask|size) -> 4*5 37 | self.bookSize = bookSize 38 | self.action_space = spaces.Discrete(len(self.levels)) 39 | self.observation_space = spaces.Box(low=0.0, high=10.0, shape=(2*self.lookback, self.bookSize, 2)) 40 | 41 | def setSide(self, side): 42 | pass 43 | 44 | def _create_execution(self, a, actionState, orderbookIndex, side): 45 | runtime = self._determine_runtime(actionState.getT()) 46 | orderbookState = self.orderbook.getState(orderbookIndex) 47 | 48 | if runtime <= 0.0 or a is None: 49 | price = None 50 | ot = OrderType.MARKET 51 | else: 52 | price = orderbookState.getPriceAtLevel(side, a) 53 | ot = OrderType.LIMIT 54 | 55 | order = Order( 56 | orderType=ot, 57 | orderSide=side, 58 | cty=actionState.getI(), 59 | price=price 60 | ) 61 | execution = Action(a=a, runtime=runtime) 62 | execution.setState(actionState) 63 | execution.setOrder(order) 64 | execution.setOrderbookState(orderbookState) 65 | execution.setOrderbookIndex(orderbookIndex) 66 | execution.setReferencePrice(orderbookState.getBestAsk()) 67 | return execution 68 | 69 | def _update_execution(self, execution, a, actionState, orderbookIndex, side): 70 | runtime = self._determine_runtime(actionState.getT()) 71 | orderbookState = self.orderbook.getState(orderbookIndex) 72 | 73 | if runtime <= 0.0 or a is None: 74 | price = None 75 | ot = OrderType.MARKET 76 | else: 77 | price = execution.getOrderbookState().getPriceAtLevel(side, a) 78 | ot = OrderType.LIMIT 79 | 80 | order = Order( 81 | orderType=ot, 82 | orderSide=side, 83 | cty=actionState.getI(), 84 | price=price 85 | ) 86 | execution.setRuntime(runtime) 87 | execution.setState(actionState) 88 | execution.setOrder(order) 89 | execution.setOrderbookState(orderbookState) 90 | execution.setOrderbookIndex(orderbookIndex) 91 | return execution 92 | 93 | def step(self, action): 94 | # print('action') 95 | # print(action) 96 | actionBuy = self.levels[action][0] 97 | actionSell = self.levels[action][1] 98 | 99 | if self.executionBuy is None or self.executionSell is None: 100 | self.executionBuy = self._create_execution(a=actionBuy, actionState=self.actionStateBuy, orderbookIndex=self.orderbookIndexBuy, side=OrderSide.BUY) 101 | self.executionSell = self._create_execution(a=actionSell, actionState=self.actionStateSell, orderbookIndex=self.orderbookIndexSell, side=OrderSide.SELL) 102 | else: 103 | if not self.executionBuy.isFilled(): 104 | self.executionBuy = self._update_execution(execution=self.executionBuy, a=actionBuy, actionState=self.actionStateBuy, orderbookIndex=self.orderbookIndexBuy, side=OrderSide.BUY) 105 | if not self.executionSell.isFilled(): 106 | self.executionSell = self._update_execution(execution=self.executionSell, a=actionSell, actionState=self.actionStateSell, orderbookIndex=self.orderbookIndexSell, side=OrderSide.SELL) 107 | 108 | # logging.info( 109 | # 'Created/Updated execution.' + 110 | # '\nAction: ' + str(action) + ' (' + str(self.execution.getOrder().getType()) + ')' + 111 | # '\nt: ' + str(self.actionState.getT()) + 112 | # '\nruntime: ' + str(self.execution.getRuntime()) + 113 | # '\ni: ' + str(self.actionState.getI()) 114 | # ) 115 | if not self.executionBuy.isFilled(): 116 | self.executionBuy, counterTradesBuy = self.executionBuy.run(self.orderbook) 117 | i_next_buy = self._determine_next_inventory(self.executionBuy) 118 | t_next_buy = self._determine_next_time(self.executionBuy.getState().getT()) 119 | bidAskFeatureBuy = self._makeFeature(orderbookIndex=self.executionBuy.getOrderbookIndex()) 120 | self.actionStateBuy = ActionState(t_next_buy, i_next_buy, {'bidask': bidAskFeatureBuy}) 121 | self.orderbookIndexBuy = self.executionBuy.getOrderbookIndex() 122 | price_buy = self.executionBuy.calculateAvgPrice(counterTradesBuy) 123 | else: 124 | price_buy = self.executionBuy.getAvgPrice() 125 | 126 | if not self.executionSell.isFilled(): 127 | self.executionSell, counterTradesSell = self.executionSell.run(self.orderbook) 128 | i_next_sell = self._determine_next_inventory(self.executionSell) 129 | t_next_sell = self._determine_next_time(self.executionSell.getState().getT()) 130 | bidAskFeatureSell = self._makeFeature(orderbookIndex=self.executionSell.getOrderbookIndex()) 131 | self.actionStateSell = ActionState(t_next_sell, i_next_sell, {'bidask': bidAskFeatureSell}) 132 | self.orderbookIndexSell = self.executionSell.getOrderbookIndex() 133 | price_sell = self.executionSell.calculateAvgPrice(counterTradesSell) 134 | else: 135 | price_sell = self.executionSell.getAvgPrice() 136 | 137 | 138 | done_buy = self.executionBuy.isFilled() or self.actionStateBuy.getI() == 0 139 | done_sell = self.executionSell.isFilled() or self.actionStateSell.getI() == 0 140 | 141 | print('price buy: ' + str(price_buy)) 142 | print('price sell: ' + str(price_sell)) 143 | if price_buy == 0 or price_sell == 0: 144 | reward = 0.0 145 | else: 146 | reward = price_sell - price_buy 147 | print('reward: ' + str(reward)) 148 | 149 | # logging.info( 150 | # 'Run execution.' + 151 | # '\nTrades: ' + str(len(counterTrades)) + 152 | # '\nReward: ' + str(reward) + ' (Ratio: ' + str(volumeRatio) + ')' + 153 | # '\nDone: ' + str(done) 154 | # ) 155 | 156 | if self.orderbookIndexBuy >= self.orderbookIndexSell: 157 | state_next = self.actionStateBuy 158 | else: 159 | state_next = self.actionStateSell 160 | return state_next.toArray(), reward, (done_buy and done_sell), {} 161 | 162 | def reset(self): 163 | return self._reset(t=self.T[-1], i=self.I[-1]) 164 | 165 | def _reset(self, t, i): 166 | #self.orderbook = copy.deepcopy(self.orderbookOriginal) # TODO: Slow but currently required to reset after every episode due to change of order book states during matching 167 | orderbookState, orderbookIndex = self._get_random_orderbook_state() 168 | bidAskFeature = self._makeFeature(orderbookIndex=orderbookIndex) 169 | state = ActionState(t, i, {'bidask': bidAskFeature}) #np.array([[t, i]]) 170 | 171 | self.executionBuy = None 172 | self.executionSell = None 173 | 174 | self.orderbookIndexBuy = orderbookIndex 175 | self.orderbookIndexSell = orderbookIndex 176 | 177 | self.actionStateBuy = state 178 | self.actionStateSell = state 179 | 180 | return state.toArray() 181 | 182 | def render(self, mode='human', close=False): 183 | pass 184 | 185 | def seed(self, seed): 186 | pass 187 | -------------------------------------------------------------------------------- /images/analysis-limit-impatient.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/analysis-limit-impatient.png -------------------------------------------------------------------------------- /images/analysis-limit-wide-spread-buy.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/analysis-limit-wide-spread-buy.png -------------------------------------------------------------------------------- /images/analysis-limit-wide-spread-sell.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/analysis-limit-wide-spread-sell.png -------------------------------------------------------------------------------- /images/ba-ob-min.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/ba-ob-min.png -------------------------------------------------------------------------------- /images/behaviour-100s-buy.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/behaviour-100s-buy.png -------------------------------------------------------------------------------- /images/behaviour-100s-sell.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/behaviour-100s-sell.png -------------------------------------------------------------------------------- /images/behaviour-10s-buy.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/behaviour-10s-buy.png -------------------------------------------------------------------------------- /images/behaviour-10s-sell.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/behaviour-10s-sell.png -------------------------------------------------------------------------------- /images/behaviour-30s-buy.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/behaviour-30s-buy.png -------------------------------------------------------------------------------- /images/behaviour-30s-sell.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/behaviour-30s-sell.png -------------------------------------------------------------------------------- /images/behaviour-60s-buy.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/behaviour-60s-buy.png -------------------------------------------------------------------------------- /images/behaviour-60s-sell.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/behaviour-60s-sell.png -------------------------------------------------------------------------------- /images/behaviour-price.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/behaviour-price.png -------------------------------------------------------------------------------- /images/behaviour-up-100s-buy.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/behaviour-up-100s-buy.png -------------------------------------------------------------------------------- /images/behaviour-up-100s-sell.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/behaviour-up-100s-sell.png -------------------------------------------------------------------------------- /images/behaviour-up-10s-buy.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/behaviour-up-10s-buy.png -------------------------------------------------------------------------------- /images/behaviour-up-10s-sell.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/behaviour-up-10s-sell.png -------------------------------------------------------------------------------- /images/behaviour-up-30s-buy.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/behaviour-up-30s-buy.png -------------------------------------------------------------------------------- /images/behaviour-up-30s-sell.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/behaviour-up-30s-sell.png -------------------------------------------------------------------------------- /images/behaviour-up-60s-buy.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/behaviour-up-60s-buy.png -------------------------------------------------------------------------------- /images/behaviour-up-60s-sell.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/behaviour-up-60s-sell.png -------------------------------------------------------------------------------- /images/bidask-price-correlation.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/bidask-price-correlation.png -------------------------------------------------------------------------------- /images/bidask-price-entropy.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/bidask-price-entropy.png -------------------------------------------------------------------------------- /images/bidask-size-correlation.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/bidask-size-correlation.png -------------------------------------------------------------------------------- /images/bidask-size-entropy.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/bidask-size-entropy.png -------------------------------------------------------------------------------- /images/cnn_1_buy_mean_actions.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/cnn_1_buy_mean_actions.png -------------------------------------------------------------------------------- /images/cnn_1_buy_rewards.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/cnn_1_buy_rewards.png -------------------------------------------------------------------------------- /images/cnn_1_buy_trades_mean_actions.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/cnn_1_buy_trades_mean_actions.png -------------------------------------------------------------------------------- /images/cnn_1_buy_trades_rewards.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/cnn_1_buy_trades_rewards.png -------------------------------------------------------------------------------- /images/cnn_1_sell_mean_actions.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/cnn_1_sell_mean_actions.png -------------------------------------------------------------------------------- /images/cnn_1_sell_rewards.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/cnn_1_sell_rewards.png -------------------------------------------------------------------------------- /images/cnn_1_sell_trades_mean_actions.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/cnn_1_sell_trades_mean_actions.png -------------------------------------------------------------------------------- /images/cnn_1_sell_trades_rewards.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/cnn_1_sell_trades_rewards.png -------------------------------------------------------------------------------- /images/cnn_2_buy_mean_actions.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/cnn_2_buy_mean_actions.png -------------------------------------------------------------------------------- /images/cnn_2_buy_rewards.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/cnn_2_buy_rewards.png -------------------------------------------------------------------------------- /images/cnn_2_buy_trades_mean_actions.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/cnn_2_buy_trades_mean_actions.png -------------------------------------------------------------------------------- /images/cnn_2_buy_trades_rewards.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/cnn_2_buy_trades_rewards.png -------------------------------------------------------------------------------- /images/cnn_2_sell_mean_actions.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/cnn_2_sell_mean_actions.png -------------------------------------------------------------------------------- /images/cnn_2_sell_rewards.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/cnn_2_sell_rewards.png -------------------------------------------------------------------------------- /images/cnn_2_sell_trades_mean_actions.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/cnn_2_sell_trades_mean_actions.png -------------------------------------------------------------------------------- /images/cnn_2_sell_trades_rewards.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/cnn_2_sell_trades_rewards.png -------------------------------------------------------------------------------- /images/cnn_nn_1_buy_bidask_mean_actions.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/cnn_nn_1_buy_bidask_mean_actions.png -------------------------------------------------------------------------------- /images/cnn_nn_1_buy_bidask_rewards.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/cnn_nn_1_buy_bidask_rewards.png -------------------------------------------------------------------------------- /images/cnn_nn_1_buy_trades_mean_actions.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/cnn_nn_1_buy_trades_mean_actions.png -------------------------------------------------------------------------------- /images/cnn_nn_1_buy_trades_rewards.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/cnn_nn_1_buy_trades_rewards.png -------------------------------------------------------------------------------- /images/cnn_nn_1_sell_bidask_mean_actions.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/cnn_nn_1_sell_bidask_mean_actions.png -------------------------------------------------------------------------------- /images/cnn_nn_1_sell_bidask_rewards.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/cnn_nn_1_sell_bidask_rewards.png -------------------------------------------------------------------------------- /images/cnn_nn_1_sell_trades_mean_actions.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/cnn_nn_1_sell_trades_mean_actions.png -------------------------------------------------------------------------------- /images/cnn_nn_1_sell_trades_rewards.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/cnn_nn_1_sell_trades_rewards.png -------------------------------------------------------------------------------- /images/cnn_nn_2_buy_bidask_mean_actions.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/cnn_nn_2_buy_bidask_mean_actions.png -------------------------------------------------------------------------------- /images/cnn_nn_2_buy_bidask_rewards.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/cnn_nn_2_buy_bidask_rewards.png -------------------------------------------------------------------------------- /images/cnn_nn_2_buy_trades_mean_actions.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/cnn_nn_2_buy_trades_mean_actions.png -------------------------------------------------------------------------------- /images/cnn_nn_2_buy_trades_rewards.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/cnn_nn_2_buy_trades_rewards.png -------------------------------------------------------------------------------- /images/cnn_nn_2_sell_bidask_mean_actions.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/cnn_nn_2_sell_bidask_mean_actions.png -------------------------------------------------------------------------------- /images/cnn_nn_2_sell_bidask_rewards.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/cnn_nn_2_sell_bidask_rewards.png -------------------------------------------------------------------------------- /images/cnn_nn_2_sell_trades_mean_actions.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/cnn_nn_2_sell_trades_mean_actions.png -------------------------------------------------------------------------------- /images/cnn_nn_2_sell_trades_rewards.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/cnn_nn_2_sell_trades_rewards.png -------------------------------------------------------------------------------- /images/data-pipeline.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/data-pipeline.png -------------------------------------------------------------------------------- /images/data-trade-volume.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/data-trade-volume.png -------------------------------------------------------------------------------- /images/data-volmap-cancelled.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/data-volmap-cancelled.png -------------------------------------------------------------------------------- /images/data-volmap-created.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/data-volmap-created.png -------------------------------------------------------------------------------- /images/data-volmap-traded.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/data-volmap-traded.png -------------------------------------------------------------------------------- /images/discuss-rl-sv.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/discuss-rl-sv.png -------------------------------------------------------------------------------- /images/dqn_hyperparameters.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/dqn_hyperparameters.png -------------------------------------------------------------------------------- /images/drl-pipeline.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/drl-pipeline.png -------------------------------------------------------------------------------- /images/drl-qvalues.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/drl-qvalues.png -------------------------------------------------------------------------------- /images/eval-limit-down.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/eval-limit-down.png -------------------------------------------------------------------------------- /images/eval-limit-sine.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/eval-limit-sine.png -------------------------------------------------------------------------------- /images/evaluation-orderbook.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/evaluation-orderbook.png -------------------------------------------------------------------------------- /images/features-bidask.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/features-bidask.png -------------------------------------------------------------------------------- /images/kearns-frontier.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/kearns-frontier.png -------------------------------------------------------------------------------- /images/kearns-return.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/kearns-return.png -------------------------------------------------------------------------------- /images/kearns-std.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/kearns-std.png -------------------------------------------------------------------------------- /images/lob-simple.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/lob-simple.png -------------------------------------------------------------------------------- /images/ml-rl.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/ml-rl.png -------------------------------------------------------------------------------- /images/ob-ba-max.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/ob-ba-max.png -------------------------------------------------------------------------------- /images/ob-ba-min.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/ob-ba-min.png -------------------------------------------------------------------------------- /images/ob-ba-pie-all.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/ob-ba-pie-all.png -------------------------------------------------------------------------------- /images/ob-ba-pie-cancelled.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/ob-ba-pie-cancelled.png -------------------------------------------------------------------------------- /images/ob-ba-pie-created.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/ob-ba-pie-created.png -------------------------------------------------------------------------------- /images/ob-ba-pie-trades.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/ob-ba-pie-trades.png -------------------------------------------------------------------------------- /images/ob-price-bars-rejected.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/ob-price-bars-rejected.png -------------------------------------------------------------------------------- /images/ob-price-bars.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/ob-price-bars.png -------------------------------------------------------------------------------- /images/ob-price.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/ob-price.png -------------------------------------------------------------------------------- /images/orderbook-gdax.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/orderbook-gdax.png -------------------------------------------------------------------------------- /images/q_1_10000_BUY_acc_rewards.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/q_1_10000_BUY_acc_rewards.png -------------------------------------------------------------------------------- /images/q_1_10000_BUY_mean_actions.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/q_1_10000_BUY_mean_actions.png -------------------------------------------------------------------------------- /images/q_1_10000_BUY_mean_backtest.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/q_1_10000_BUY_mean_backtest.png -------------------------------------------------------------------------------- /images/q_1_10000_BUY_mean_rewards.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/q_1_10000_BUY_mean_rewards.png -------------------------------------------------------------------------------- /images/q_1_10000_BUY_rewards.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/q_1_10000_BUY_rewards.png -------------------------------------------------------------------------------- /images/q_1_10000_SELL_acc_rewards.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/q_1_10000_SELL_acc_rewards.png -------------------------------------------------------------------------------- /images/q_1_10000_SELL_mean_actions.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/q_1_10000_SELL_mean_actions.png -------------------------------------------------------------------------------- /images/q_1_10000_SELL_mean_backtest.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/q_1_10000_SELL_mean_backtest.png -------------------------------------------------------------------------------- /images/q_1_10000_SELL_mean_rewards.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/q_1_10000_SELL_mean_rewards.png -------------------------------------------------------------------------------- /images/q_1_10000_SELL_rewards.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/q_1_10000_SELL_rewards.png -------------------------------------------------------------------------------- /images/q_2_10000_BUY_acc_rewards.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/q_2_10000_BUY_acc_rewards.png -------------------------------------------------------------------------------- /images/q_2_10000_BUY_mean_actions.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/q_2_10000_BUY_mean_actions.png -------------------------------------------------------------------------------- /images/q_2_10000_BUY_mean_backtest.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/q_2_10000_BUY_mean_backtest.png -------------------------------------------------------------------------------- /images/q_2_10000_BUY_mean_rewards.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/q_2_10000_BUY_mean_rewards.png -------------------------------------------------------------------------------- /images/q_2_10000_BUY_rewards.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/q_2_10000_BUY_rewards.png -------------------------------------------------------------------------------- /images/q_2_10000_SELL_acc_rewards.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/q_2_10000_SELL_acc_rewards.png -------------------------------------------------------------------------------- /images/q_2_10000_SELL_mean_actions.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/q_2_10000_SELL_mean_actions.png -------------------------------------------------------------------------------- /images/q_2_10000_SELL_mean_backtest.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/q_2_10000_SELL_mean_backtest.png -------------------------------------------------------------------------------- /images/q_2_10000_SELL_mean_rewards.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/q_2_10000_SELL_mean_rewards.png -------------------------------------------------------------------------------- /images/q_2_10000_SELL_rewards.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/q_2_10000_SELL_rewards.png -------------------------------------------------------------------------------- /images/rl-dqn-agent.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/rl-dqn-agent.png -------------------------------------------------------------------------------- /images/rl-dqn.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/rl-dqn.png -------------------------------------------------------------------------------- /images/rl-env-overview.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/rl-env-overview.png -------------------------------------------------------------------------------- /images/rl-environment.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/rl-environment.png -------------------------------------------------------------------------------- /images/rl-overview.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/rl-overview.png -------------------------------------------------------------------------------- /images/rl-pipeline.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/rl-pipeline.png -------------------------------------------------------------------------------- /images/sample-down-price.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/sample-down-price.png -------------------------------------------------------------------------------- /images/sample-up-price.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/sample-up-price.png -------------------------------------------------------------------------------- /images/setup-actions.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/setup-actions.png -------------------------------------------------------------------------------- /images/setup-cnn-output.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/setup-cnn-output.png -------------------------------------------------------------------------------- /images/setup-inventory.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/setup-inventory.png -------------------------------------------------------------------------------- /images/setup-limit-levels.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/setup-limit-levels.png -------------------------------------------------------------------------------- /images/setup-time-horizon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/images/setup-time-horizon.png -------------------------------------------------------------------------------- /notebooks/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/notebooks/.DS_Store -------------------------------------------------------------------------------- /notebooks/img/price-return.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/notebooks/img/price-return.png -------------------------------------------------------------------------------- /report/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/report/.DS_Store -------------------------------------------------------------------------------- /report/README.txt: -------------------------------------------------------------------------------- 1 | Using the template 2 | ------------------ 3 | 4 | This is the TU Delft LaTeX template for reports and theses. It is designed to 5 | work with all versions of LaTeX, but if you want to adhere to the TU Delft house 6 | style, you need to use XeLaTeX, as it supports TrueType and OpenType fonts. The 7 | document can be compiled with 8 | 9 | xelatex report 10 | bibtex report 11 | xelatex report 12 | xelatex report 13 | 14 | This is equivalent to selecting 'XeLaTeX+BibTeX' or similar in your favorite TeX 15 | editing program. 16 | 17 | A sample document, as well as documentation for template options, can be found 18 | in example.pdf. An example with the native LaTeX fonts, compiled using the 19 | 'nativefonts' option (or with pdflatex), can be found in 20 | example-nativefonts.pdf. 21 | 22 | A separate example document is available which generates a cover image (front, 23 | back and spine). This document can be generated with 24 | 25 | xelatex cover 26 | xelatex cover 27 | 28 | or simply with the 'XeLaTeX' option in TeXworks or an equivalent program. 29 | 30 | 31 | 32 | Installation on Windows 33 | ----------------------- 34 | 35 | The TU Delft LaTeX template has been tested to work with the most recent version 36 | of MiKTeX at the time of this writing (2.9). The following packages are required 37 | on top of a basic MiKTeX installation to make full use of the template: 38 | 39 | caption, fancyhdr, filehook, footmisc, fourier, l3kernel, l3packages, 40 | metalogo, mptopdf, ms, natbib, pgf, realscripts, tipa, titlesec, tocbibind, 41 | unicode-math, url, xcolor, xetex-def 42 | 43 | Note that MiKTeX will generally automatically install these packages if they are 44 | missing from your installation. 45 | 46 | 47 | 48 | Installation on Linux (Debian/Ubuntu) 49 | ------------------------------------- 50 | 51 | Recent versions of Debian, and derived distributions such as Ubuntu, use the TeX 52 | Live system. Install the following packages to make full use of the this 53 | template: 54 | 55 | texlive, texlive-fonts-extra, texlive-math-extra, texlive-lang-dutch, 56 | texlive-lang-english, texlive-latex-extra, texlive-xetex 57 | 58 | -------------------------------------------------------------------------------- /report/abstract.tex: -------------------------------------------------------------------------------- 1 | \chapter*{Abstract} 2 | \setheader{Abstract} 3 | 4 | \begin{abstract} 5 | For various reasons, financial institutions often make use of high-level trading strategies when buying and selling assets. 6 | Many individuals, irrespective or their level of prior trading knowledge, have recently entered the field of trading due to the increasing popularity of cryptocurrencies, which offer a low entry barrier for trading. 7 | Regardless of the intention or trading strategy of these traders, the invariable outcome is their attempt to buy or sell assets. 8 | However, in such a competitive field, experienced market participants seek to exploit any advantage over those who are less experienced, for financial gain. 9 | Therefore, this work aims to make a contribution to the important issue of how to optimize the process of buying and selling assets on exchanges, and to do so in a form that is accessible to other traders. 10 | 11 | This research concerns the optimization of limit order placement within a given time horizon of 100 seconds and how to transpose this process into an end-to-end learning pipeline in the context of reinforcement learning. 12 | Features were constructed from raw market event data that related to movements of the Bitcoin/USD trading pair on the Bittrex cryptocurrency exchange. These features were then used by deep reinforcement learning agents in order to learn a limit order placement policy. 13 | To facilitate the implementation of this process, a reinforcement learning environment that emulates a local broker was developed as part of this work. 14 | Furthermore, we defined an evaluation procedure which can determine the capabilities and limitations of the policies learned by the reinforcement learning agents and ultimately provides means to quantify the optimization achieved with our approach. 15 | 16 | Our analysis of the results of this work includes the identification of patterns in cryptocurrency trading that were formed by market participants who posted orders, and a conceptual framework to construct data features containing these patterns. 17 | We developed a fully-functioning reinforcement learning environment that emulates a local broker and, by means of this process, we identified which components are essential. 18 | With the use of this environment, we were able to train and test multiple reinforcement learning agents whose aims were to optimize the placement of buy and sell limit orders. 19 | During the evaluation, we were able to improve the parameter settings of the constructed reinforcement learning environment and therefore improve the policy learned by the agents. 20 | Ultimately, we achieved a significant improvement in limit order placement with the application of a state-of-the-art deep Q-network agent and were able to simulate purchases and sales of 1.0 BTC at a price that was up to \$33.89 better than the market price. 21 | 22 | We have made use of the OpenAI Gym\footnote{https://github.com/openai/gym} library and contributed our work to the community\footnote{https://github.com/backender/ctc-executioner} to enable further investigations to be carried out. 23 | The work done in this thesis can be used as a framework to (1) build a component that acts as an intermediary between trader and exchange and (2) to enable exchanges to provide a new order type to be used by traders. 24 | \end{abstract} -------------------------------------------------------------------------------- /report/cover.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/report/cover.jpg -------------------------------------------------------------------------------- /report/cover.tex: -------------------------------------------------------------------------------- 1 | \documentclass{tudelft-report} 2 | 3 | %% We need to increase the paper size to slightly larger than twice A4 to make 4 | %% room for a front and back cover, including the spine. 5 | \geometry{papersize={1226bp,851bp}} 6 | 7 | \begin{document} 8 | 9 | \title{Title} 10 | \subtitle{Optional subtitle} 11 | \author{J.\ Random Author} 12 | \affiliation{Technische Universiteit Delft} 13 | \coverimage{cover.jpg} 14 | \covertext{ 15 | \textbf{Cover Text} \\ 16 | possibly \\ 17 | spanning \\ 18 | multiple \\ 19 | lines 20 | \vfill 21 | ISBN 000-00-0000-000-0 22 | } 23 | 24 | %% The 'back' option enables the generation of the back cover. 25 | \makecover[back,whitelogo] 26 | 27 | \end{document} 28 | 29 | -------------------------------------------------------------------------------- /report/cover/back.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/report/cover/back.pdf -------------------------------------------------------------------------------- /report/cover/front.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/report/cover/front.pdf -------------------------------------------------------------------------------- /report/cover/logo.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/report/cover/logo.pdf -------------------------------------------------------------------------------- /report/cover/logo_black.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/report/cover/logo_black.pdf -------------------------------------------------------------------------------- /report/cover/logo_white.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/report/cover/logo_white.pdf -------------------------------------------------------------------------------- /report/fonts/Apache_License.txt: -------------------------------------------------------------------------------- 1 | Font data copyright Google 2012 2 | 3 | Apache License 4 | Version 2.0, January 2004 5 | http://www.apache.org/licenses/ 6 | 7 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 8 | 9 | 1. Definitions. 10 | 11 | "License" shall mean the terms and conditions for use, reproduction, 12 | and distribution as defined by Sections 1 through 9 of this document. 13 | 14 | "Licensor" shall mean the copyright owner or entity authorized by 15 | the copyright owner that is granting the License. 16 | 17 | "Legal Entity" shall mean the union of the acting entity and all 18 | other entities that control, are controlled by, or are under common 19 | control with that entity. For the purposes of this definition, 20 | "control" means (i) the power, direct or indirect, to cause the 21 | direction or management of such entity, whether by contract or 22 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 23 | outstanding shares, or (iii) beneficial ownership of such entity. 24 | 25 | "You" (or "Your") shall mean an individual or Legal Entity 26 | exercising permissions granted by this License. 27 | 28 | "Source" form shall mean the preferred form for making modifications, 29 | including but not limited to software source code, documentation 30 | source, and configuration files. 31 | 32 | "Object" form shall mean any form resulting from mechanical 33 | transformation or translation of a Source form, including but 34 | not limited to compiled object code, generated documentation, 35 | and conversions to other media types. 36 | 37 | "Work" shall mean the work of authorship, whether in Source or 38 | Object form, made available under the License, as indicated by a 39 | copyright notice that is included in or attached to the work 40 | (an example is provided in the Appendix below). 41 | 42 | "Derivative Works" shall mean any work, whether in Source or Object 43 | form, that is based on (or derived from) the Work and for which the 44 | editorial revisions, annotations, elaborations, or other modifications 45 | represent, as a whole, an original work of authorship. For the purposes 46 | of this License, Derivative Works shall not include works that remain 47 | separable from, or merely link (or bind by name) to the interfaces of, 48 | the Work and Derivative Works thereof. 49 | 50 | "Contribution" shall mean any work of authorship, including 51 | the original version of the Work and any modifications or additions 52 | to that Work or Derivative Works thereof, that is intentionally 53 | submitted to Licensor for inclusion in the Work by the copyright owner 54 | or by an individual or Legal Entity authorized to submit on behalf of 55 | the copyright owner. For the purposes of this definition, "submitted" 56 | means any form of electronic, verbal, or written communication sent 57 | to the Licensor or its representatives, including but not limited to 58 | communication on electronic mailing lists, source code control systems, 59 | and issue tracking systems that are managed by, or on behalf of, the 60 | Licensor for the purpose of discussing and improving the Work, but 61 | excluding communication that is conspicuously marked or otherwise 62 | designated in writing by the copyright owner as "Not a Contribution." 63 | 64 | "Contributor" shall mean Licensor and any individual or Legal Entity 65 | on behalf of whom a Contribution has been received by Licensor and 66 | subsequently incorporated within the Work. 67 | 68 | 2. Grant of Copyright License. Subject to the terms and conditions of 69 | this License, each Contributor hereby grants to You a perpetual, 70 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 71 | copyright license to reproduce, prepare Derivative Works of, 72 | publicly display, publicly perform, sublicense, and distribute the 73 | Work and such Derivative Works in Source or Object form. 74 | 75 | 3. Grant of Patent License. Subject to the terms and conditions of 76 | this License, each Contributor hereby grants to You a perpetual, 77 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 78 | (except as stated in this section) patent license to make, have made, 79 | use, offer to sell, sell, import, and otherwise transfer the Work, 80 | where such license applies only to those patent claims licensable 81 | by such Contributor that are necessarily infringed by their 82 | Contribution(s) alone or by combination of their Contribution(s) 83 | with the Work to which such Contribution(s) was submitted. If You 84 | institute patent litigation against any entity (including a 85 | cross-claim or counterclaim in a lawsuit) alleging that the Work 86 | or a Contribution incorporated within the Work constitutes direct 87 | or contributory patent infringement, then any patent licenses 88 | granted to You under this License for that Work shall terminate 89 | as of the date such litigation is filed. 90 | 91 | 4. Redistribution. You may reproduce and distribute copies of the 92 | Work or Derivative Works thereof in any medium, with or without 93 | modifications, and in Source or Object form, provided that You 94 | meet the following conditions: 95 | 96 | (a) You must give any other recipients of the Work or 97 | Derivative Works a copy of this License; and 98 | 99 | (b) You must cause any modified files to carry prominent notices 100 | stating that You changed the files; and 101 | 102 | (c) You must retain, in the Source form of any Derivative Works 103 | that You distribute, all copyright, patent, trademark, and 104 | attribution notices from the Source form of the Work, 105 | excluding those notices that do not pertain to any part of 106 | the Derivative Works; and 107 | 108 | (d) If the Work includes a "NOTICE" text file as part of its 109 | distribution, then any Derivative Works that You distribute must 110 | include a readable copy of the attribution notices contained 111 | within such NOTICE file, excluding those notices that do not 112 | pertain to any part of the Derivative Works, in at least one 113 | of the following places: within a NOTICE text file distributed 114 | as part of the Derivative Works; within the Source form or 115 | documentation, if provided along with the Derivative Works; or, 116 | within a display generated by the Derivative Works, if and 117 | wherever such third-party notices normally appear. The contents 118 | of the NOTICE file are for informational purposes only and 119 | do not modify the License. You may add Your own attribution 120 | notices within Derivative Works that You distribute, alongside 121 | or as an addendum to the NOTICE text from the Work, provided 122 | that such additional attribution notices cannot be construed 123 | as modifying the License. 124 | 125 | You may add Your own copyright statement to Your modifications and 126 | may provide additional or different license terms and conditions 127 | for use, reproduction, or distribution of Your modifications, or 128 | for any such Derivative Works as a whole, provided Your use, 129 | reproduction, and distribution of the Work otherwise complies with 130 | the conditions stated in this License. 131 | 132 | 5. Submission of Contributions. Unless You explicitly state otherwise, 133 | any Contribution intentionally submitted for inclusion in the Work 134 | by You to the Licensor shall be under the terms and conditions of 135 | this License, without any additional terms or conditions. 136 | Notwithstanding the above, nothing herein shall supersede or modify 137 | the terms of any separate license agreement you may have executed 138 | with Licensor regarding such Contributions. 139 | 140 | 6. Trademarks. This License does not grant permission to use the trade 141 | names, trademarks, service marks, or product names of the Licensor, 142 | except as required for reasonable and customary use in describing the 143 | origin of the Work and reproducing the content of the NOTICE file. 144 | 145 | 7. Disclaimer of Warranty. Unless required by applicable law or 146 | agreed to in writing, Licensor provides the Work (and each 147 | Contributor provides its Contributions) on an "AS IS" BASIS, 148 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 149 | implied, including, without limitation, any warranties or conditions 150 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 151 | PARTICULAR PURPOSE. You are solely responsible for determining the 152 | appropriateness of using or redistributing the Work and assume any 153 | risks associated with Your exercise of permissions under this License. 154 | 155 | 8. Limitation of Liability. In no event and under no legal theory, 156 | whether in tort (including negligence), contract, or otherwise, 157 | unless required by applicable law (such as deliberate and grossly 158 | negligent acts) or agreed to in writing, shall any Contributor be 159 | liable to You for damages, including any direct, indirect, special, 160 | incidental, or consequential damages of any character arising as a 161 | result of this License or out of the use or inability to use the 162 | Work (including but not limited to damages for loss of goodwill, 163 | work stoppage, computer failure or malfunction, or any and all 164 | other commercial damages or losses), even if such Contributor 165 | has been advised of the possibility of such damages. 166 | 167 | 9. Accepting Warranty or Additional Liability. While redistributing 168 | the Work or Derivative Works thereof, You may choose to offer, 169 | and charge a fee for, acceptance of support, warranty, indemnity, 170 | or other liability obligations and/or rights consistent with this 171 | License. However, in accepting such obligations, You may act only 172 | on Your own behalf and on Your sole responsibility, not on behalf 173 | of any other Contributor, and only if You agree to indemnify, 174 | defend, and hold each Contributor harmless for any liability 175 | incurred by, or claims asserted against, such Contributor by reason 176 | of your accepting any such warranty or additional liability. 177 | 178 | END OF TERMS AND CONDITIONS 179 | 180 | APPENDIX: How to apply the Apache License to your work. 181 | 182 | To apply the Apache License to your work, attach the following 183 | boilerplate notice, with the fields enclosed by brackets "[]" 184 | replaced with your own identifying information. (Don't include 185 | the brackets!) The text should be enclosed in the appropriate 186 | comment syntax for the file format. We also recommend that a 187 | file or class name and description of purpose be included on the 188 | same "printed page" as the copyright notice for easier 189 | identification within third-party archives. 190 | 191 | Copyright [yyyy] [name of copyright owner] 192 | 193 | Licensed under the Apache License, Version 2.0 (the "License"); 194 | you may not use this file except in compliance with the License. 195 | You may obtain a copy of the License at 196 | 197 | http://www.apache.org/licenses/LICENSE-2.0 198 | 199 | Unless required by applicable law or agreed to in writing, software 200 | distributed under the License is distributed on an "AS IS" BASIS, 201 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 202 | See the License for the specific language governing permissions and 203 | limitations under the License. -------------------------------------------------------------------------------- /report/fonts/Arial.afm: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/report/fonts/Arial.afm -------------------------------------------------------------------------------- /report/fonts/Arial.pfa: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/report/fonts/Arial.pfa -------------------------------------------------------------------------------- /report/fonts/Arial.tfm: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/report/fonts/Arial.tfm -------------------------------------------------------------------------------- /report/fonts/Arial.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/report/fonts/Arial.ttf -------------------------------------------------------------------------------- /report/fonts/Arial_Bold.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/report/fonts/Arial_Bold.ttf -------------------------------------------------------------------------------- /report/fonts/Arial_Bold_Italic.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/report/fonts/Arial_Bold_Italic.ttf -------------------------------------------------------------------------------- /report/fonts/Arial_Italic.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/report/fonts/Arial_Italic.ttf -------------------------------------------------------------------------------- /report/fonts/BookmanOldStyle-Bold.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/report/fonts/BookmanOldStyle-Bold.ttf -------------------------------------------------------------------------------- /report/fonts/BookmanOldStyle-BoldItalic.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/report/fonts/BookmanOldStyle-BoldItalic.ttf -------------------------------------------------------------------------------- /report/fonts/BookmanOldStyle-Italic.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/report/fonts/BookmanOldStyle-Italic.ttf -------------------------------------------------------------------------------- /report/fonts/BookmanOldStyle.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/report/fonts/BookmanOldStyle.ttf -------------------------------------------------------------------------------- /report/fonts/CambriaMath.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/report/fonts/CambriaMath.ttf -------------------------------------------------------------------------------- /report/fonts/CourierNewPS-BoldItalicMT.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/report/fonts/CourierNewPS-BoldItalicMT.ttf -------------------------------------------------------------------------------- /report/fonts/CourierNewPS-BoldMT.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/report/fonts/CourierNewPS-BoldMT.ttf -------------------------------------------------------------------------------- /report/fonts/CourierNewPS-ItalicMT.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/report/fonts/CourierNewPS-ItalicMT.ttf -------------------------------------------------------------------------------- /report/fonts/CourierNewPSMT.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/report/fonts/CourierNewPSMT.ttf -------------------------------------------------------------------------------- /report/fonts/Georgia.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/report/fonts/Georgia.ttf -------------------------------------------------------------------------------- /report/fonts/Georgia_Bold.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/report/fonts/Georgia_Bold.ttf -------------------------------------------------------------------------------- /report/fonts/Georgia_Bold_Italic.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/report/fonts/Georgia_Bold_Italic.ttf -------------------------------------------------------------------------------- /report/fonts/Georgia_Italic.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/report/fonts/Georgia_Italic.ttf -------------------------------------------------------------------------------- /report/fonts/TUDelft-UltraLight.afm: -------------------------------------------------------------------------------- 1 | StartFontMetrics 4.1 2 | FontName TUDelft-UltraLight 3 | FullName TU Delft-UltraLight 4 | Notice Copyright 2015 TU Delft. All rights reserved. 5 | EncodingScheme FontSpecific 6 | FamilyName TU Delft-UltraLight 7 | Weight UltraLight 8 | Version Version 1.000; ttfautohint [v0.96] -l 8 -r 50 -G 200 -x 14 -w "G" -W -p 9 | Characters 219 10 | ItalicAngle 0.0 11 | Ascender 941 12 | Descender -211 13 | UnderlineThickness 50 14 | UnderlinePosition -75 15 | IsFixedPitch false 16 | FontBBox -68 -211 1013 941 17 | StartCharMetrics 256 18 | C 0 ; WX 500 ; N .notdef ; B 0 0 500 700 ; 19 | C 1 ; WX 500 ; N .notdef ; B 0 0 500 700 ; 20 | C 2 ; WX 500 ; N .notdef ; B 0 0 500 700 ; 21 | C 3 ; WX 500 ; N .notdef ; B 0 0 500 700 ; 22 | C 4 ; WX 500 ; N .notdef ; B 0 0 500 700 ; 23 | C 5 ; WX 500 ; N .notdef ; B 0 0 500 700 ; 24 | C 6 ; WX 500 ; N .notdef ; B 0 0 500 700 ; 25 | C 7 ; WX 500 ; N .notdef ; B 0 0 500 700 ; 26 | C 8 ; WX 500 ; N .notdef ; B 0 0 500 700 ; 27 | C 9 ; WX 500 ; N .notdef ; B 0 0 500 700 ; 28 | C 10 ; WX 500 ; N .notdef ; B 0 0 500 700 ; 29 | C 11 ; WX 500 ; N .notdef ; B 0 0 500 700 ; 30 | C 12 ; WX 500 ; N .notdef ; B 0 0 500 700 ; 31 | C 13 ; WX 500 ; N .notdef ; B 0 0 500 700 ; 32 | C 14 ; WX 500 ; N .notdef ; B 0 0 500 700 ; 33 | C 15 ; WX 500 ; N .notdef ; B 0 0 500 700 ; 34 | C 16 ; WX 500 ; N .notdef ; B 0 0 500 700 ; 35 | C 17 ; WX 500 ; N .notdef ; B 0 0 500 700 ; 36 | C 18 ; WX 500 ; N .notdef ; B 0 0 500 700 ; 37 | C 19 ; WX 500 ; N .notdef ; B 0 0 500 700 ; 38 | C 20 ; WX 500 ; N .notdef ; B 0 0 500 700 ; 39 | C 21 ; WX 500 ; N .notdef ; B 0 0 500 700 ; 40 | C 22 ; WX 500 ; N .notdef ; B 0 0 500 700 ; 41 | C 23 ; WX 500 ; N .notdef ; B 0 0 500 700 ; 42 | C 24 ; WX 500 ; N .notdef ; B 0 0 500 700 ; 43 | C 25 ; WX 500 ; N .notdef ; B 0 0 500 700 ; 44 | C 26 ; WX 500 ; N .notdef ; B 0 0 500 700 ; 45 | C 27 ; WX 500 ; N .notdef ; B 0 0 500 700 ; 46 | C 28 ; WX 500 ; N .notdef ; B 0 0 500 700 ; 47 | C 29 ; WX 500 ; N .notdef ; B 0 0 500 700 ; 48 | C 30 ; WX 500 ; N .notdef ; B 0 0 500 700 ; 49 | C 31 ; WX 500 ; N .notdef ; B 0 0 500 700 ; 50 | C 32 ; WX 278 ; N .null ; B 84 0 104 714 ; 51 | C 33 ; WX 188 ; N exclam ; B 84 0 104 714 ; 52 | C 34 ; WX 288 ; N quotedbl ; B 84 483 204 714 ; 53 | C 35 ; WX 524 ; N numbersign ; B 19 34 505 714 ; 54 | C 36 ; WX 524 ; N dollar ; B 25 -95 499 755 ; 55 | C 37 ; WX 711 ; N percent ; B 28 -12 683 731 ; 56 | C 38 ; WX 607 ; N ampersand ; B 18 -12 595 732 ; 57 | C 39 ; WX 188 ; N quotesingle ; B 84 483 104 714 ; 58 | C 40 ; WX 184 ; N parenleft ; B 30 -199 172 731 ; 59 | C 41 ; WX 184 ; N parenright ; B 12 -199 154 731 ; 60 | C 42 ; WX 348 ; N asterisk ; B 32 438 316 714 ; 61 | C 43 ; WX 524 ; N plus ; B 26 116 498 589 ; 62 | C 44 ; WX 188 ; N comma ; B 48 -124 104 107 ; 63 | C 45 ; WX 319 ; N hyphen ; B 72 258 247 278 ; 64 | C 46 ; WX 188 ; N period ; B 84 0 104 108 ; 65 | C 47 ; WX 333 ; N slash ; B -7 -17 340 731 ; 66 | C 48 ; WX 524 ; N zero ; B 30 -12 494 714 ; 67 | C 49 ; WX 524 ; N one ; B 115 0 308 704 ; 68 | C 50 ; WX 524 ; N two ; B 30 0 493 714 ; 69 | C 51 ; WX 524 ; N three ; B 25 -12 499 714 ; 70 | C 52 ; WX 524 ; N four ; B 18 0 506 704 ; 71 | C 53 ; WX 524 ; N five ; B 32 -12 494 699 ; 72 | C 54 ; WX 524 ; N six ; B 30 -12 504 714 ; 73 | C 55 ; WX 464 ; N seven ; B 0 0 464 700 ; 74 | C 56 ; WX 524 ; N eight ; B 25 -12 499 714 ; 75 | C 57 ; WX 524 ; N nine ; B 21 -13 495 713 ; 76 | C 58 ; WX 188 ; N colon ; B 84 0 104 514 ; 77 | C 59 ; WX 190 ; N semicolon ; B 48 -124 106 514 ; 78 | C 60 ; WX 524 ; N less ; B 25 116 499 589 ; 79 | C 61 ; WX 524 ; N equal ; B 26 247 498 461 ; 80 | C 62 ; WX 524 ; N greater ; B 25 116 499 589 ; 81 | C 63 ; WX 479 ; N question ; B 18 0 449 731 ; 82 | C 64 ; WX 800 ; N at ; B 57 -17 743 714 ; 83 | C 65 ; WX 598 ; N A ; B 0 0 598 714 ; 84 | C 66 ; WX 572 ; N B ; B 60 0 542 714 ; 85 | C 67 ; WX 636 ; N C ; B 30 -17 612 731 ; 86 | C 68 ; WX 616 ; N D ; B 45 0 571 714 ; 87 | C 69 ; WX 521 ; N E ; B 60 0 509 714 ; 88 | C 70 ; WX 524 ; N F ; B 60 0 489 714 ; 89 | C 71 ; WX 654 ; N G ; B 30 -17 606 731 ; 90 | C 72 ; WX 619 ; N H ; B 60 0 559 714 ; 91 | C 73 ; WX 140 ; N I ; B 60 0 80 714 ; 92 | C 74 ; WX 467 ; N J ; B 12 -17 407 714 ; 93 | C 75 ; WX 566 ; N K ; B 60 0 566 714 ; 94 | C 76 ; WX 505 ; N L ; B 60 0 485 714 ; 95 | C 77 ; WX 786 ; N M ; B 60 0 726 714 ; 96 | C 78 ; WX 617 ; N N ; B 60 0 557 714 ; 97 | C 79 ; WX 679 ; N O ; B 30 -17 649 731 ; 98 | C 80 ; WX 570 ; N P ; B 60 0 546 714 ; 99 | C 81 ; WX 679 ; N Q ; B 30 -42 667 731 ; 100 | C 82 ; WX 607 ; N R ; B 60 0 597 714 ; 101 | C 83 ; WX 588 ; N S ; B 30 -17 558 731 ; 102 | C 84 ; WX 532 ; N T ; B 9 0 523 714 ; 103 | C 85 ; WX 612 ; N U ; B 60 -17 552 714 ; 104 | C 86 ; WX 564 ; N V ; B 0 0 564 714 ; 105 | C 87 ; WX 880 ; N W ; B 1 1 879 715 ; 106 | C 88 ; WX 561 ; N X ; B 7 0 558 714 ; 107 | C 89 ; WX 574 ; N Y ; B 0 0 569 714 ; 108 | C 90 ; WX 516 ; N Z ; B 8 0 503 714 ; 109 | C 91 ; WX 185 ; N bracketleft ; B 60 -199 173 714 ; 110 | C 92 ; WX 333 ; N backslash ; B 23 -12 252 731 ; 111 | C 93 ; WX 185 ; N bracketright ; B 12 -199 125 714 ; 112 | C 94 ; WX 524 ; N asciicircum ; B 85 335 440 727 ; 113 | C 95 ; WX 500 ; N underscore ; B -2 -162 502 -142 ; 114 | C 96 ; WX 130 ; N grave ; B -25 583 111 720 ; 115 | C 97 ; WX 488 ; N a ; B 24 -12 452 526 ; 116 | C 98 ; WX 535 ; N b ; B 60 -12 507 714 ; 117 | C 99 ; WX 483 ; N c ; B 28 -13 465 526 ; 118 | C 100 ; WX 534 ; N d ; B 28 -12 475 714 ; 119 | C 101 ; WX 491 ; N e ; B 28 -12 466 527 ; 120 | C 102 ; WX 247 ; N f ; B 4 0 266 722 ; 121 | C 103 ; WX 530 ; N g ; B 28 -199 470 526 ; 122 | C 104 ; WX 510 ; N h ; B 60 0 450 714 ; 123 | C 105 ; WX 140 ; N i ; B 60 0 80 714 ; 124 | C 106 ; WX 140 ; N j ; B -66 -177 80 714 ; 125 | C 107 ; WX 439 ; N k ; B 55 0 436 714 ; 126 | C 108 ; WX 140 ; N l ; B 60 0 80 714 ; 127 | C 109 ; WX 763 ; N m ; B 61 0 703 526 ; 128 | C 110 ; WX 510 ; N n ; B 60 0 450 526 ; 129 | C 111 ; WX 514 ; N o ; B 28 -12 486 526 ; 130 | C 112 ; WX 533 ; N p ; B 58 -187 505 526 ; 131 | C 113 ; WX 535 ; N q ; B 28 -187 475 526 ; 132 | C 114 ; WX 300 ; N r ; B 60 0 288 526 ; 133 | C 115 ; WX 460 ; N s ; B 15 -12 424 526 ; 134 | C 116 ; WX 286 ; N t ; B 13 -8 275 675 ; 135 | C 117 ; WX 510 ; N u ; B 60 -12 450 514 ; 136 | C 118 ; WX 426 ; N v ; B 8 0 421 514 ; 137 | C 119 ; WX 668 ; N w ; B 5 0 663 514 ; 138 | C 120 ; WX 427 ; N x ; B 5 0 422 514 ; 139 | C 121 ; WX 420 ; N y ; B 8 -199 416 514 ; 140 | C 122 ; WX 412 ; N z ; B 11 0 399 514 ; 141 | C 123 ; WX 231 ; N braceleft ; B 4 -199 219 714 ; 142 | C 124 ; WX 222 ; N bar ; B 101 -211 121 728 ; 143 | C 125 ; WX 231 ; N braceright ; B 12 -199 227 714 ; 144 | C 126 ; WX 524 ; N asciitilde ; B 26 310 498 397 ; 145 | C 127 ; WX 500 ; N .notdef ; B 0 0 500 700 ; 146 | C 128 ; WX 524 ; N Euro ; B 0 -13 501 714 ; 147 | C 129 ; WX 500 ; N .notdef ; B 0 0 500 700 ; 148 | C 130 ; WX 188 ; N quotesinglbase ; B 48 -124 104 107 ; 149 | C 131 ; WX 524 ; N florin ; B 24 -177 500 731 ; 150 | C 132 ; WX 288 ; N quotedblbase ; B 48 -124 204 107 ; 151 | C 133 ; WX 1000 ; N ellipsis ; B 157 0 843 107 ; 152 | C 134 ; WX 524 ; N dagger ; B 22 -187 501 700 ; 153 | C 135 ; WX 524 ; N daggerdbl ; B 21 -187 501 700 ; 154 | C 136 ; WX 130 ; N circumflex ; B -56 583 186 720 ; 155 | C 137 ; WX 1041 ; N perthousand ; B 28 -12 1013 731 ; 156 | C 138 ; WX 588 ; N Scaron ; B 30 -17 558 920 ; 157 | C 139 ; WX 249 ; N guilsinglleft ; B 36 35 213 480 ; 158 | C 140 ; WX 946 ; N OE ; B 30 -19 934 731 ; 159 | C 141 ; WX 500 ; N .notdef ; B 0 0 500 700 ; 160 | C 142 ; WX 516 ; N Zcaron ; B 8 0 503 920 ; 161 | C 143 ; WX 500 ; N .notdef ; B 0 0 500 700 ; 162 | C 144 ; WX 500 ; N .notdef ; B 0 0 500 700 ; 163 | C 145 ; WX 167 ; N quoteleft ; B 53 483 109 714 ; 164 | C 146 ; WX 188 ; N quoteright ; B 48 483 104 714 ; 165 | C 147 ; WX 288 ; N quotedblleft ; B 84 483 240 714 ; 166 | C 148 ; WX 288 ; N quotedblright ; B 48 483 204 714 ; 167 | C 149 ; WX 500 ; N bullet ; B 72 178 429 535 ; 168 | C 150 ; WX 524 ; N endash ; B 0 258 524 278 ; 169 | C 151 ; WX 1000 ; N emdash ; B 0 258 1000 278 ; 170 | C 152 ; WX 130 ; N tilde ; B -68 613 197 691 ; 171 | C 153 ; WX 847 ; N trademark ; B 24 318 763 714 ; 172 | C 154 ; WX 460 ; N scaron ; B 18 -12 424 720 ; 173 | C 155 ; WX 249 ; N guilsinglright ; B 36 35 213 480 ; 174 | C 156 ; WX 878 ; N oe ; B 28 -13 854 526 ; 175 | C 157 ; WX 500 ; N .notdef ; B 0 0 500 700 ; 176 | C 158 ; WX 410 ; N zcaron ; B 11 0 399 720 ; 177 | C 159 ; WX 576 ; N Ydieresis ; B 1 0 570 890 ; 178 | C 160 ; WX 278 ; N nbspace ; B 84 -187 104 514 ; 179 | C 161 ; WX 188 ; N exclamdown ; B 84 -187 104 514 ; 180 | C 162 ; WX 524 ; N cent ; B 55 -109 470 602 ; 181 | C 163 ; WX 524 ; N sterling ; B 12 -12 507 714 ; 182 | C 164 ; WX 524 ; N currency ; B 51 143 474 566 ; 183 | C 165 ; WX 524 ; N yen ; B 25 0 499 701 ; 184 | C 166 ; WX 222 ; N brokenbar ; B 101 -210 121 727 ; 185 | C 167 ; WX 524 ; N section ; B 41 -189 483 731 ; 186 | C 168 ; WX 130 ; N dieresis ; B -23 609 154 690 ; 187 | C 169 ; WX 800 ; N copyright ; B 26 -17 774 731 ; 188 | C 170 ; WX 364 ; N ordfeminine ; B 39 344 335 714 ; 189 | C 171 ; WX 353 ; N guillemotleft ; B 36 35 317 480 ; 190 | C 172 ; WX 524 ; N logicalnot ; B 26 194 498 461 ; 191 | C 173 ; WX 319 ; N sfthyphen ; B 72 258 247 278 ; 192 | C 174 ; WX 800 ; N registered ; B 26 -17 774 731 ; 193 | C 175 ; WX 130 ; N macron ; B -51 632 181 652 ; 194 | C 176 ; WX 310 ; N degree ; B 12 426 298 714 ; 195 | C 177 ; WX 524 ; N plusminus ; B 26 0 498 589 ; 196 | C 178 ; WX 333 ; N twosuperior ; B 18 347 315 714 ; 197 | C 179 ; WX 333 ; N threesuperior ; B 18 337 315 713 ; 198 | C 180 ; WX 130 ; N acute ; B 19 583 155 720 ; 199 | C 181 ; WX 524 ; N mu ; B 68 -177 457 514 ; 200 | C 182 ; WX 524 ; N paragraph ; B 18 -200 494 714 ; 201 | C 183 ; WX 278 ; N periodcentered ; B 107 308 171 372 ; 202 | C 184 ; WX 130 ; N cedilla ; B -16 -188 145 3 ; 203 | C 185 ; WX 333 ; N onesuperior ; B 62 333 201 700 ; 204 | C 186 ; WX 363 ; N ordmasculine ; B 24 346 339 714 ; 205 | C 187 ; WX 353 ; N guillemotright ; B 36 35 317 480 ; 206 | C 188 ; WX 720 ; N onequarter ; B 62 -28 699 731 ; 207 | C 189 ; WX 720 ; N onehalf ; B 62 -28 690 731 ; 208 | C 190 ; WX 720 ; N threequarters ; B 17 -28 698 731 ; 209 | C 191 ; WX 479 ; N questiondown ; B 18 -200 449 514 ; 210 | C 192 ; WX 598 ; N Agrave ; B 0 0 598 920 ; 211 | C 193 ; WX 598 ; N Aacute ; B 0 0 598 920 ; 212 | C 194 ; WX 599 ; N Acircumflex ; B 0 0 598 920 ; 213 | C 195 ; WX 598 ; N Atilde ; B 0 0 598 891 ; 214 | C 196 ; WX 598 ; N Adieresis ; B 0 0 598 890 ; 215 | C 197 ; WX 599 ; N Aring ; B 0 0 598 941 ; 216 | C 198 ; WX 854 ; N AE ; B -1 0 846 714 ; 217 | C 199 ; WX 636 ; N Ccedilla ; B 27 -188 609 731 ; 218 | C 200 ; WX 521 ; N Egrave ; B 60 0 509 920 ; 219 | C 201 ; WX 521 ; N Eacute ; B 60 0 509 920 ; 220 | C 202 ; WX 521 ; N Ecircumflex ; B 60 0 509 920 ; 221 | C 203 ; WX 521 ; N Edieresis ; B 60 0 509 890 ; 222 | C 204 ; WX 140 ; N Igrave ; B -18 0 118 920 ; 223 | C 205 ; WX 140 ; N Iacute ; B 22 0 158 920 ; 224 | C 206 ; WX 140 ; N Icircumflex ; B -51 0 191 920 ; 225 | C 207 ; WX 140 ; N Idieresis ; B -19 0 158 890 ; 226 | C 208 ; WX 616 ; N Eth ; B -10 0 586 714 ; 227 | C 209 ; WX 617 ; N Ntilde ; B 60 0 557 891 ; 228 | C 210 ; WX 679 ; N Ograve ; B 30 -17 649 920 ; 229 | C 211 ; WX 679 ; N Oacute ; B 30 -17 649 920 ; 230 | C 212 ; WX 679 ; N Ocircumflex ; B 30 -17 649 920 ; 231 | C 213 ; WX 679 ; N Otilde ; B 30 -17 649 891 ; 232 | C 214 ; WX 679 ; N Odieresis ; B 30 -17 649 890 ; 233 | C 215 ; WX 524 ; N multiply ; B 26 117 498 588 ; 234 | C 216 ; WX 679 ; N Oslash ; B 30 -29 649 743 ; 235 | C 217 ; WX 612 ; N Ugrave ; B 60 -17 552 920 ; 236 | C 218 ; WX 612 ; N Uacute ; B 60 -17 552 920 ; 237 | C 219 ; WX 612 ; N Ucircumflex ; B 60 -17 552 920 ; 238 | C 220 ; WX 612 ; N Udieresis ; B 60 -17 552 890 ; 239 | C 221 ; WX 573 ; N Yacute ; B 0 0 569 920 ; 240 | C 222 ; WX 570 ; N Thorn ; B 60 0 546 714 ; 241 | C 223 ; WX 537 ; N germandbls ; B 60 -12 519 726 ; 242 | C 224 ; WX 488 ; N agrave ; B 24 -12 452 720 ; 243 | C 225 ; WX 488 ; N aacute ; B 24 -12 452 720 ; 244 | C 226 ; WX 488 ; N acircumflex ; B 24 -12 452 720 ; 245 | C 227 ; WX 488 ; N atilde ; B 24 -12 452 691 ; 246 | C 228 ; WX 488 ; N adieresis ; B 24 -12 452 690 ; 247 | C 229 ; WX 488 ; N aring ; B 24 -12 452 751 ; 248 | C 230 ; WX 845 ; N ae ; B 24 -12 821 527 ; 249 | C 231 ; WX 485 ; N ccedilla ; B 28 -188 465 526 ; 250 | C 232 ; WX 490 ; N egrave ; B 28 -12 466 709 ; 251 | C 233 ; WX 491 ; N eacute ; B 28 -12 466 720 ; 252 | C 234 ; WX 491 ; N ecircumflex ; B 28 -12 466 720 ; 253 | C 235 ; WX 491 ; N edieresis ; B 28 -12 466 690 ; 254 | C 236 ; WX 140 ; N igrave ; B -18 0 118 720 ; 255 | C 237 ; WX 140 ; N iacute ; B 22 0 158 720 ; 256 | C 238 ; WX 140 ; N icircumflex ; B -51 0 191 720 ; 257 | C 239 ; WX 140 ; N idieresis ; B -19 0 158 690 ; 258 | C 240 ; WX 519 ; N eth ; B 28 -11 491 727 ; 259 | C 241 ; WX 510 ; N ntilde ; B 60 0 450 691 ; 260 | C 242 ; WX 514 ; N ograve ; B 28 -12 486 720 ; 261 | C 243 ; WX 514 ; N oacute ; B 28 -12 486 720 ; 262 | C 244 ; WX 514 ; N ocircumflex ; B 28 -12 486 720 ; 263 | C 245 ; WX 514 ; N otilde ; B 28 -12 486 691 ; 264 | C 246 ; WX 514 ; N odieresis ; B 28 -12 486 690 ; 265 | C 247 ; WX 524 ; N divide ; B 26 116 498 590 ; 266 | C 248 ; WX 514 ; N oslash ; B 28 -26 486 539 ; 267 | C 249 ; WX 510 ; N ugrave ; B 60 -12 450 720 ; 268 | C 250 ; WX 510 ; N uacute ; B 60 -12 450 720 ; 269 | C 251 ; WX 510 ; N ucircumflex ; B 60 -12 450 720 ; 270 | C 252 ; WX 510 ; N udieresis ; B 60 -12 450 690 ; 271 | C 253 ; WX 384 ; N yacute ; B 8 -199 416 720 ; 272 | C 254 ; WX 533 ; N thorn ; B 55 -199 502 714 ; 273 | C 255 ; WX 431 ; N ydieresis ; B 8 -199 416 690 ; 274 | EndCharMetrics 275 | EndFontMetrics 276 | -------------------------------------------------------------------------------- /report/fonts/TUDelft-UltraLight.tfm: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/report/fonts/TUDelft-UltraLight.tfm -------------------------------------------------------------------------------- /report/fonts/TUDelft-UltraLight.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/report/fonts/TUDelft-UltraLight.ttf -------------------------------------------------------------------------------- /report/fonts/Tahoma-Bold.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/report/fonts/Tahoma-Bold.ttf -------------------------------------------------------------------------------- /report/fonts/Tahoma.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/report/fonts/Tahoma.ttf -------------------------------------------------------------------------------- /report/preface.tex: -------------------------------------------------------------------------------- 1 | \chapter*{Preface} 2 | \setheader{Preface} 3 | This document contains the work done for my thesis for the completion of the Master of Science in Computer Science (Data Science track). 4 | The work was conducted and completed at the Pattern Recognition Laboratory (PRLab) at Delft University of Technology. 5 | This research stemmed from my interest in cryptocurrencies, for which I am grateful in many ways. 6 | My passion for software engineering and finance, as well as my fascination with machine learning, which I developed during my studies at TU Delft, drove me into advances in automated cryptocurrency trading. 7 | I gained an understanding of some of the many concepts present in order-driven financial markets. 8 | Soon I realized that market orders were the limiting factor for my strategies to become profitable, and that appropriate placements of limit orders would solve this problem. 9 | But how? 10 | This thesis attempts to provide answers to this question. 11 | The work mainly concerns the transposition of a financial problem into the reinforcement learning context and the explorations that were necessary thereafter. 12 | My efforts included work that is well summarized in the following quote from Marcos Lopez de Prado: 13 | \begin{quote} 14 | It takes almost as much effort to produce one true investment strategy as to produce a hundred, and the complexities are overwhelming: data curation and processing, HPC infrastructure, software development, feature analysis, execution simulators, backtesting, etc. \cite{de2018advances} 15 | \end{quote} 16 | \noindent 17 | Many times during this project, I was reminded how important software engineering skills are, how much time a small mistake can cost, and how crucial it is to pay attention to details. 18 | I was able to strengthen my skills and expand my knowledge in mathematics and machine learning. 19 | \\ 20 | \\ 21 | I am grateful for this journey and would like to thank my supervisor Marco Loog for his guidance and advice during this research. 22 | Our meetings and reviews he provided me were always a great help and urged me to take the extra step and think outside of the box. 23 | Additionally, I would also like to thank Johan Pouwelse, as my co-supervisor, for his equally valuable advice and perspective from an applied point of view. 24 | Our meetings were always inspiring and pointed towards the practical applications of my research, which made it without doubt an even more enjoyable project. 25 | I would also like to thank Mateusz Garbacz, with whom I had many discussions during our "hacker-nights"; he helped in this project by providing me with his perspective and creativity. 26 | I should also mention Satoshi Nakamoto, whoever that may be, who is not only the father of Bitcoin but has also created a new field of research. 27 | Last but not least, I would like to thank my friends and family for their moral support. 28 | 29 | \begin{flushright} 30 | {\makeatletter\itshape 31 | \@author \\ 32 | Delft, July 10, 2018 33 | \makeatother} 34 | \end{flushright} 35 | 36 | -------------------------------------------------------------------------------- /report/report.tex: -------------------------------------------------------------------------------- 1 | \documentclass[whitelogo]{tudelft-report} 2 | \usepackage{natbib} 3 | \usepackage{changes} 4 | \usepackage{float} 5 | \graphicspath{{../images/}} 6 | \usepackage{graphicx} 7 | \usepackage{subcaption} 8 | \usepackage{algorithm} 9 | \usepackage[noend]{algpseudocode} 10 | \usepackage{todonotes} 11 | \usepackage[normalem]{ulem} 12 | \useunder{\uline}{\ul}{} 13 | 14 | \begin{document} 15 | 16 | %% Use Roman numerals for the page numbers of the title pages and table of 17 | %% contents. 18 | \frontmatter 19 | 20 | %% Uncomment following 19 lines for a cover with a picture on the lower half only 21 | %\title[tudelft-white]{Title} 22 | % \subtitle[tudelft-cyan]{Optional subtitle} 23 | % \author[tudelft-white]{J.\ Random Author} 24 | % \affiliation{Technische Universiteit Delft} 25 | % \coverimage{cover.jpg} 26 | % \titleoffsetx{10cm} 27 | % \titleoffsety{10cm} 28 | % \afiloffsetx{1cm} 29 | % \afiloffsety{18cm} 30 | % \covertext[tudelft-white]{ 31 | % \textbf{Cover Text} \\ 32 | % possibly \\ 33 | % spanning 34 | % multiple 35 | % lines 36 | % \vfill 37 | % ISBN 000-00-0000-000-0 38 | % } 39 | % \makecover 40 | 41 | %% Uncomment following 16 lines for a cover with a picture on the lower half only 42 | \title[tudelft-white]{Limit order placement optimization with Deep Reinforcement Learning} 43 | \subtitle[tudelft-black]{Learning from patterns in cryptocurrency market data} 44 | \author[tudelft-white]{Marc B. Juchli} 45 | \affiliation{Technische Universiteit Delft} 46 | \coverimage{tank.jpg} 47 | \covertext[tudelft-white]{ 48 | \textbf{Cover Text} \\ 49 | possibly \\ 50 | spanning 51 | multiple 52 | lines 53 | \vfill 54 | } 55 | %\setpagecolor{tudelft-cyan} 56 | %\makecover[split] 57 | 58 | 59 | %% Include an optional title page. 60 | \input{title} 61 | 62 | \input{abstract} 63 | 64 | \input{preface} 65 | 66 | \tableofcontents 67 | 68 | %% Use Arabic numerals for the page numbers of the chapters. 69 | \mainmatter 70 | 71 | \input{chapter-1} 72 | \input{chapter-2} 73 | \input{chapter-3} 74 | \input{chapter-4} 75 | \input{chapter-5} 76 | \input{chapter-6} 77 | \input{chapter-7} 78 | 79 | %% Use letters for the chapter numbers of the appendices. 80 | \appendix 81 | 82 | %\input{appendix-a} 83 | 84 | \bibliography{report} 85 | 86 | \end{document} 87 | 88 | -------------------------------------------------------------------------------- /report/roboto.sty: -------------------------------------------------------------------------------- 1 | \NeedsTeXFormat{LaTeX2e} 2 | \ProvidesPackage{roboto} 3 | [2015/04/16 (Bob Tennent) Supports Roboto fonts for all LaTeX engines.] 4 | 5 | \RequirePackage{ifxetex,ifluatex,xkeyval,textcomp} 6 | 7 | \newif\ifroboto@otf 8 | \ifxetex 9 | \roboto@otftrue 10 | \else\ifluatex 11 | \roboto@otftrue 12 | \else % [pdf]LaTeX 13 | \roboto@otffalse 14 | \fi\fi 15 | 16 | \newif\ifroboto@lining \roboto@liningfalse 17 | \newif\ifroboto@tabular \roboto@tabularfalse 18 | \newif\ifroboto@default \roboto@defaultfalse 19 | 20 | \newcommand*{\Roboto@scale}{1} 21 | \newcommand*{\RobotoCondensed@scale}{1} 22 | \DeclareOptionX{scaled}{\renewcommand*{\Roboto@scale}{#1}\renewcommand*{\RobotoCondensed@scale}{#1}} 23 | \DeclareOptionX{scale}{\renewcommand*{\Roboto@scale}{#1}\renewcommand*{\RobotoCondensed@scale}{#1}} 24 | 25 | \newif\ifroboto@light \roboto@lightfalse 26 | \newif\ifroboto@thin \roboto@thinfalse 27 | \newif\ifroboto@medium \roboto@mediumfalse 28 | \newif\ifroboto@black \roboto@blackfalse 29 | \newif\ifroboto@condensed \roboto@condensedfalse 30 | \newif\ifroboto@slab \roboto@slabfalse 31 | 32 | \DeclareOptionX{default}{\roboto@defaulttrue} 33 | \DeclareOptionX{sfdefault}{\roboto@defaulttrue} 34 | \DeclareOptionX{type1}{\roboto@otffalse} 35 | \DeclareOptionX{medium}{\roboto@mediumtrue\roboto@blackfalse} 36 | \DeclareOptionX{bold}{\roboto@blackfalse\roboto@mediumfalse} 37 | \DeclareOptionX{black}{\roboto@blacktrue\roboto@mediumfalse} 38 | \DeclareOptionX{thin}{\roboto@thintrue\roboto@lightfalse} 39 | \DeclareOptionX{light}{\roboto@lighttrue\roboto@thinfalse} 40 | \DeclareOptionX{regular}{\roboto@lightfalse\roboto@thinfalse} 41 | \DeclareOptionX{condensed}{\roboto@condensedtrue\roboto@thinfalse} 42 | \DeclareOptionX{rm}{\roboto@slabtrue} 43 | \DeclareOptionX{lining}{\roboto@liningtrue} 44 | \DeclareOptionX{nf}{\roboto@liningtrue} 45 | \DeclareOptionX{lf}{\roboto@liningtrue} 46 | \DeclareOptionX{oldstyle}{\roboto@liningfalse} 47 | \DeclareOptionX{osf}{\roboto@liningfalse} 48 | \DeclareOptionX{tabular}{\roboto@tabulartrue} 49 | \DeclareOptionX{t}{\roboto@tabulartrue} 50 | \DeclareOptionX{proportional}{\roboto@tabularfalse} 51 | \DeclareOptionX{p}{\roboto@tabularfalse} 52 | 53 | 54 | \ExecuteOptionsX{bold,regular,lining,proportional} 55 | \ProcessOptionsX\relax 56 | 57 | \ifroboto@otf 58 | \def\roboto@boldstyle{Bold} 59 | \ifroboto@medium\def\roboto@boldstyle{Medium}\fi 60 | \ifroboto@black\def\roboto@boldstyle{Black}\fi 61 | \def\roboto@regstyle{Regular} 62 | \ifroboto@light\def\roboto@regstyle{Light}\fi 63 | \ifroboto@thin\def\roboto@regstyle{Thin}\fi 64 | 65 | \else % type1 66 | 67 | \def\bfseries@sf{b} 68 | \ifroboto@medium\def\bfseries@sf{mb}\fi 69 | \ifroboto@black\def\bfseries@sf{k}\fi 70 | \def\mdseries@sf{m} 71 | \ifroboto@thin\def\mdseries@sf{t} 72 | \ifroboto@slab\def\mdseries@rm{t}\fi\fi 73 | \ifroboto@light\def\mdseries@sf{l} 74 | \ifroboto@slab\def\mdseries@rm{l}\fi\fi 75 | 76 | \fi 77 | 78 | \ifroboto@otf 79 | \ifroboto@lining 80 | \def\roboto@figurestyle{Lining} 81 | \else 82 | \def\roboto@figurestyle{OldStyle} 83 | \fi 84 | \ifroboto@tabular 85 | \def\roboto@figurealign{Monospaced} 86 | \else 87 | \def\roboto@figurealign{Proportional} 88 | \fi 89 | \else % type1 90 | \ifroboto@lining 91 | \def\roboto@figurestyle{LF} 92 | \else 93 | \def\roboto@figurestyle{OsF} 94 | \fi 95 | \ifroboto@tabular 96 | \def\roboto@figurealign{T} 97 | \else 98 | \def\roboto@figurealign{} 99 | \fi 100 | \fi 101 | 102 | 103 | \ifroboto@otf 104 | \RequirePackage{fontspec} 105 | \else 106 | \RequirePackage{fontenc,fontaxes,mweights} 107 | \fi 108 | 109 | \ifroboto@otf 110 | \ifxetex\XeTeXtracingfonts=1\fi 111 | \defaultfontfeatures{ 112 | Ligatures = TeX , 113 | Scale = \Roboto@scale , 114 | Extension = .ttf } 115 | \ifroboto@condensed 116 | \setsansfont 117 | [ Numbers = {\roboto@figurealign,\roboto@figurestyle}, 118 | UprightFont = *-\roboto@regstyle , 119 | ItalicFont = *-\roboto@regstyle Italic , 120 | BoldFont = *-Bold , 121 | BoldItalicFont = *-BoldItalic ] 122 | {RobotoCondensed} 123 | \else 124 | \setsansfont 125 | [ Numbers = {\roboto@figurealign,\roboto@figurestyle}, 126 | UprightFont = *-\roboto@regstyle , 127 | ItalicFont = *-\roboto@regstyle Italic , 128 | BoldFont = *-\roboto@boldstyle , 129 | BoldItalicFont = *-\roboto@boldstyle Italic ] 130 | {Roboto} 131 | \fi 132 | % grab current family in case of subsequent change: 133 | \let\robotofamily\sfdefault 134 | \ifroboto@slab 135 | \setmainfont 136 | [ UprightFont = *-\roboto@regstyle , 137 | BoldFont = *-Bold ] 138 | {RobotoSlab} 139 | \fi 140 | \ifroboto@default\renewcommand*\familydefault{\robotofamily}\fi 141 | \newfontfamily\roboto 142 | [ Numbers = {\roboto@figurealign,\roboto@figurestyle}, 143 | UprightFont = *-\roboto@regstyle , 144 | ItalicFont = *-\roboto@regstyle Italic , 145 | BoldFont = *-\roboto@boldstyle , 146 | BoldItalicFont = *-\roboto@boldstyle Italic ] 147 | {Roboto} 148 | \newfontfamily\robotocondensed 149 | [ Numbers = {\roboto@figurealign,\roboto@figurestyle}, 150 | UprightFont = *-\roboto@regstyle , 151 | ItalicFont = *-\roboto@regstyle Italic , 152 | BoldFont = *-Bold , 153 | BoldItalicFont = *-BoldItalic ] 154 | {RobotoCondensed} 155 | \newfontfamily\robotoslab 156 | [ UprightFont = *-\roboto@regstyle , 157 | BoldFont = *-Bold ] 158 | {RobotoSlab} 159 | \newfontfamily\robotoOsF 160 | [ Numbers = {\roboto@figurealign,OldStyle}, 161 | UprightFont = *-\roboto@regstyle , 162 | ItalicFont = *-\roboto@regstyle Italic , 163 | BoldFont = *-\roboto@boldstyle , 164 | BoldItalicFont = *-\roboto@boldstyle Italic ] 165 | {Roboto} 166 | \newfontfamily\robotoLF 167 | [ Numbers = {\roboto@figurealign,Lining}, 168 | UprightFont = *-\roboto@regstyle , 169 | ItalicFont = *-\roboto@regstyle Italic , 170 | BoldFont = *-\roboto@boldstyle , 171 | BoldItalicFont = *-\roboto@boldstyle Italic ] 172 | {Roboto} 173 | \DeclareTextFontCommand{\oldstylenums}{\robotoOsF} 174 | \DeclareTextFontCommand{\liningnums}{\robotoLF} 175 | 176 | \else % type1 177 | \def\robotofamily{Roboto-\roboto@figurealign\roboto@figurestyle} 178 | \def\robotocondensedfamily{RobotoCondensed-\roboto@figurealign\roboto@figurestyle} 179 | \def\robotoslabfamily{RobotoSlab-LF} 180 | \newcommand*\roboto{\fontfamily{\robotofamily}\selectfont} 181 | \newcommand*\robotocondensed{\fontfamily{\robotocondensedfamily}\selectfont} 182 | \newcommand*\robotoslab{\fontfamily{\robotoslabfamily}\selectfont} 183 | \def\sfdefault{\robotofamily} 184 | \ifroboto@condensed\def\sfdefault{\robotocondensedfamily}\fi 185 | \ifroboto@default\edef\familydefault{\sfdefault}\edef\seriesdefault{\mdseries@sf}\fi 186 | \ifroboto@slab\edef\rmdefault{\robotoslabfamily}\fi 187 | 188 | \DeclareTextFontCommand{\oldstylenums}{\fontfamily{Roboto-\roboto@figurealign OsF}\selectfont} 189 | \DeclareTextFontCommand{\liningnums}{\fontfamily{Roboto-\roboto@figurealign LF}\selectfont} 190 | 191 | \fi 192 | 193 | \ifroboto@otf 194 | % turn off defaults in case other fonts are selected: 195 | \defaultfontfeatures{} 196 | \fi 197 | 198 | \endinput -------------------------------------------------------------------------------- /report/tank.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mjuchli/ctc-executioner/d07a05f002b4ef99c04ad627c00b1214cc9bcdc0/report/tank.jpg -------------------------------------------------------------------------------- /report/title.tex: -------------------------------------------------------------------------------- 1 | \begin{titlepage} 2 | 3 | 4 | \begin{center} 5 | 6 | %% Insert the TU Delft logo at the bottom of the page. 7 | 8 | %% Print the title in cyan. 9 | {\makeatletter 10 | \largetitlestyle\fontsize{42}{94}\selectfont\@title 11 | %\largetitlestyle\color{tudelft-cyan}\Huge\@title 12 | \makeatother} 13 | 14 | %% Print the optional subtitle in black. 15 | {\makeatletter 16 | \ifx\@subtitle\undefined\else 17 | \bigskip 18 | {\tudsffamily\fontsize{18}{32}\selectfont\@subtitle} 19 | %\titlefont\titleshape\LARGE\@subtitle 20 | \fi 21 | \makeatother} 22 | 23 | \bigskip 24 | \bigskip 25 | 26 | by 27 | %door 28 | 29 | \bigskip 30 | \bigskip 31 | 32 | %% Print the name of the author. 33 | {\makeatletter 34 | %\largetitlefont\Large\bfseries\@author 35 | \largetitlestyle\fontsize{26}{26}\selectfont\@author 36 | \makeatother} 37 | 38 | \bigskip 39 | \bigskip 40 | 41 | to obtain the degree of Master of Science 42 | %ter verkrijging van de graad van Master of Science 43 | 44 | at the Delft University of Technology, 45 | %aan de Technische Universiteit Delft, 46 | 47 | to be defended publicly on Thursday July 19, 2018 at 09:00 AM. 48 | %in het openbaar de verdedigen op dinsdag 1 januari om 10:00 uur. 49 | 50 | \vfill 51 | 52 | \begin{tabular}{lll} 53 | Student number: & 4634845 \\ 54 | Project duration: & \multicolumn{2}{l}{November 1, 2017 -- July 19, 2018} \\ 55 | Thesis committee: & Dr.\ M.\ Loog, & TU Delft, supervisor \\ 56 | & Dr.\ J.\ Pouwelse, & TU Delft, co-supervisor \\ 57 | & Prof.\ dr.\ ir. M.J.T. Reinders, & TU Delft 58 | \end{tabular} 59 | %% Only include the following lines if confidentiality is applicable. 60 | 61 | \bigskip 62 | \bigskip 63 | \emph{This thesis is confidential and cannot be made public until July 12, 2018.} 64 | %\emph{Op dit verslag is geheimhouding van toepassing tot en met 31 december 2013.} 65 | 66 | \bigskip 67 | \bigskip 68 | An electronic version of this thesis is available at \url{http://repository.tudelft.nl/}. 69 | %\\[1cm] 70 | 71 | %\centering{\includegraphics{cover/logo_black}} 72 | 73 | 74 | \end{center} 75 | 76 | \begin{tikzpicture}[remember picture, overlay] 77 | \node at (current page.south)[anchor=south,inner sep=0pt]{ 78 | \includegraphics{cover/logo_black} 79 | }; 80 | \end{tikzpicture} 81 | 82 | \end{titlepage} 83 | 84 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | numpy==1.14.2 2 | diskcache==3.0.1 3 | keras_rl==0.4.0 4 | pandas==0.22.0 5 | setuptools==38.5.2 6 | baselines==0.1.5 7 | tensorflow==1.4.1 8 | seaborn==0.8.1 9 | Keras==2.1.4 10 | matplotlib==2.1.0 11 | gym==0.7.4 12 | python_dateutil==2.7.2 13 | rl==2.4 14 | scikit_learn==0.19.1 15 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup 2 | 3 | setup(name='gym_ctc_executioner', 4 | packages=['gym_ctc_executioner'], 5 | version='0.0.1', 6 | install_requires=['gym'] 7 | ) 8 | 9 | setup(name='gym_ctc_marketmaker', 10 | packages=['gym_ctc_marketmaker'], 11 | version='0.0.1', 12 | install_requires=['gym'] 13 | ) 14 | -------------------------------------------------------------------------------- /strategy.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import numpy as np 3 | from ctc_executioner.action_space import ActionSpace 4 | from ctc_executioner.qlearn import QLearn 5 | from ctc_executioner.order_side import OrderSide 6 | from ctc_executioner.orderbook import Orderbook 7 | from ctc_executioner.action_state import ActionState 8 | from ctc_executioner.agent_utils.ui import UI 9 | import pprint 10 | import datetime 11 | import seaborn as sns 12 | sns.set(color_codes=True) 13 | 14 | 15 | def getAvgPriceDiffForInventory(M, inventory_observe): 16 | ms = [x for x in M if x[0][0] != 0 and x[0][1] == inventory_observe] # filter market orders (t==0) 17 | price_diffs = [x[4] for x in ms] 18 | return np.mean(price_diffs) 19 | 20 | 21 | def getBestTimeForInventory(M, inventory_observe): 22 | ms = [x for x in M if x[0][1] == inventory_observe] 23 | # difference of the price to what was bought e.g. sold for 24 | price_diffs = [x[4] for x in ms] 25 | if side == OrderSide.BUY: 26 | best_price = max(price_diffs) 27 | else: 28 | best_price = min(price_diffs) 29 | i = price_diffs.index(best_price) 30 | return ms[i] 31 | 32 | 33 | def train(episodes=100): 34 | if not orderbook.getStates(): 35 | orderbook.loadFromFile(trainBook) 36 | 37 | for episode in range(episodes): 38 | # pp.pprint("Episode " + str(episode)) 39 | actionSpace.train(episodes=1, force_execution=False) 40 | np.save('q.npy', actionSpace.ai.q) 41 | # pp.pprint(actionSpace.ai.q) 42 | return actionSpace.ai.q 43 | 44 | 45 | def test(episodes=100, average=True, fixed_a=None): 46 | if not orderbook_test.getStates(): 47 | orderbook_test.loadFromFile(testBook) 48 | 49 | q = np.load('q.npy').item() 50 | # M <- [t, i, Price, A, Paid, Diff] 51 | M = actionSpace_test.backtest(q, episodes, average=average, fixed_a=fixed_a) 52 | return M 53 | 54 | 55 | def run_profit(epochs_train=10, epochs_test=5, fixed_a=None): 56 | if epochs_train > 0: 57 | q = train(epochs_train) 58 | M = test(epochs_test, average=False, fixed_a=fixed_a) 59 | M = np.array(M) 60 | # print(M) 61 | return np.mean(M[0:, 4]) 62 | 63 | 64 | def calculate_profits(epochs, fixed_a=None): 65 | profits = [] 66 | for i in range(epochs): 67 | M = test(1, average=False, fixed_a=fixed_a) 68 | M = np.array(M) 69 | #print(M) 70 | profits.append(np.sum(M[0:, 4])) 71 | return profits 72 | 73 | 74 | def hist_profit(episodes, fixed_a=None): 75 | x = calculate_profits(episodes, fixed_a=fixed_a) 76 | sns.distplot(x) 77 | plt.show() 78 | 79 | 80 | def run_q_reward(): 81 | q = train(1) 82 | reward = np.mean(list(q.values())) 83 | print("Cummultive reward: " + str(reward)) 84 | return reward 85 | 86 | 87 | def evaluateReturns(levels=range(-100, 101), crossval=10, force_execution=True, trade_log=False): 88 | t = T[-1] 89 | i = I[-1] 90 | ys = [] 91 | ys2 = [] 92 | for level in levels: 93 | profit = [] 94 | profit2 = [] 95 | a = level 96 | for _ in range(crossval): 97 | action = actionSpace.createAction(a, t, i, force_execution=force_execution) 98 | refBefore = action.getReferencePrice() 99 | if trade_log: 100 | print("\nLEVEL: " + str(level)) 101 | print("-----------") 102 | print("Reference price: " + str(refBefore) + " ("+str(action.getOrderbookState().getTimestamp())+", index="+str(action.getOrderbookIndex())+")") 103 | action.run(actionSpace.orderbook) 104 | refAfter = action.getOrderbookState().getTradePrice() 105 | paid = action.getAvgPrice() 106 | if trade_log: 107 | print("Order: " + str(action.getOrder())) 108 | print("Trades:") 109 | print(action.getTrades()) 110 | if paid == 0.0: 111 | assert force_execution == False 112 | continue 113 | elif action.getOrder().getSide() == OrderSide.BUY: 114 | profit.append(refBefore - paid) 115 | profit2.append(refAfter - paid) 116 | else: 117 | profit.append(paid - refBefore) 118 | profit2.append(paid - refAfter) 119 | 120 | ys.append(profit) 121 | ys2.append(profit2) 122 | x = levels 123 | return (x, ys, ys2) 124 | 125 | 126 | def reject_outliers(data, m=1.5): 127 | return data[abs(data - np.mean(data)) < m * np.std(data)] 128 | 129 | 130 | def priceReturnCurve(enable_after_exec_return=True, levels=range(-100, 101), crossval=10, force_execution=True, filter_outliers=False, trade_log=False): 131 | (x, ys, ys2) = evaluateReturns(levels, crossval, force_execution, trade_log) 132 | if filter_outliers: 133 | y = [np.mean(reject_outliers(np.array(x))) for x in ys] 134 | y2 = [np.mean(reject_outliers(np.array(x))) for x in ys2] 135 | else: 136 | y = [np.mean(np.array(x)) for x in ys] 137 | y2 = [np.mean(np.array(x)) for x in ys2] 138 | 139 | plt.plot(x, y, 'r-') 140 | if enable_after_exec_return: 141 | plt.plot(x, y2, 'g-') 142 | plt.grid(linestyle='-', linewidth=2) 143 | plt.show() 144 | 145 | #logging.basicConfig(level=logging.DEBUG) 146 | 147 | side = OrderSide.BUY 148 | levels = [5, 4, 3, 2, 1, 0, -1, -2, -3, -4, -5, -6, -7, -10, -12, -15] 149 | ai = QLearn(actions=levels, epsilon=0.4, alpha=0.3, gamma=0.8) 150 | 151 | #trainBook = 'query_result_train_15m.tsv' 152 | #testBook = 'query_result_train_15m.tsv' 153 | 154 | # orderbook = Orderbook(extraFeatures=False) 155 | # orderbook.loadFromBitfinexFile('orderbook_bitfinex_btcusd_view.tsv') 156 | # orderbook_test = Orderbook(extraFeatures=False) 157 | # orderbook_test.loadFromBitfinexFile('orderbook_bitfinex_btcusd_view.tsv') 158 | 159 | # Load orderbook 160 | cols = ["ts", "seq", "size", "price", "is_bid", "is_trade", "ttype"] 161 | import pandas as pd 162 | events = pd.read_table('data/events/ob-1-small.tsv', sep='\t', names=cols, index_col="seq") 163 | d = Orderbook.generateDictFromEvents(events) 164 | orderbook = Orderbook() 165 | orderbook.loadFromDict(d) 166 | # clean first n states (due to lack of bids and asks) 167 | print("#States: " + str(len(orderbook.states))) 168 | for i in range(100): 169 | orderbook.states.pop(0) 170 | del d[list(d.keys())[0]] 171 | orderbook_test = orderbook 172 | #orderbook.plot() 173 | 174 | 175 | T = [0, 10, 20, 40, 60, 80, 100] #, 120, 240] 176 | T_test = [0, 10, 20, 40, 60, 80, 100]# 120, 240] 177 | 178 | I = [0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0] 179 | actionSpace = ActionSpace(orderbook, side, T, I, ai, levels) 180 | actionSpace_test = ActionSpace(orderbook_test, side, T_test, I, ai, levels) 181 | 182 | #priceReturnCurve(crossval=1) 183 | 184 | 185 | UI.animate(run_profit, interval=100) 186 | # UI.animate(run_q_reward, interval=1000) 187 | --------------------------------------------------------------------------------