├── .gitignore ├── LICENSE.md ├── README.md ├── app ├── __init__.py ├── btcchina.py ├── collect_data │ ├── collect_btcc_books.py │ ├── collect_btcc_ticks.py │ ├── crawl_btcc_trades_history.py │ └── run_collect_scripts.sh ├── create_live_features.py ├── model │ ├── __init__.py │ ├── add_final_to_features.py │ ├── add_midx_to_features.py │ ├── features.py │ ├── features_parallel.py │ ├── full_create_features_and_train_script.sh │ ├── model.py │ ├── strategy.py │ └── strategy_multiple.py └── predict.py └── images └── bot.png /.gitignore: -------------------------------------------------------------------------------- 1 | data/ 2 | .idea 3 | *.pyc 4 | -------------------------------------------------------------------------------- /LICENSE.md: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # CryptoBot v1.0.0 2 | 3 | 5 | 6 | ## About 7 | CryptoBot is an automated, high(ish) frequency trading bot for cryptocurrencies. It uses **Machine Learning** to decide when to trade. Currently it supports only Bitcoins, and trading on the BTCC exchange. In the future I intend on adding other currencies and exchanges. 8 | 9 | This project is a very-hard-fork of Christopher Bynum's BitPredict which can be seen at: https://github.com/cbyn/bitpredict. The base code, and the idea, is modeled off the BitPredict project, so credit is due. However, CryptoBot has evolved immensely and looks very different than BitPredict, and have been taken several steps further. 10 | 11 | The project is written entirely in **Python**, with the exception of some shell scripts. 12 | 13 | ## Details 14 | Data is collected from BTCC using their JSON RPC API, and stored in MongoDB using scripts located in the app/collect_data folder: 15 | - Books snapshot collected every second 16 | - Latest trades collected every second 17 | - Ticks are collected every second 18 | - The run_collect_scripts.sh script can be used to launch it all 19 | 20 | Features are created and saved to disk using the create_live_features.py script. The **Machine Learning features** include: 21 | - Width 22 | - Power Imbalance 23 | - Power Adjusted Price 24 | - Trade Count 25 | - Trade Average 26 | - Aggressor 27 | - Trend 28 | - These features were adapted from Christopher Bynum's BitPredict project. More details at: https://github.com/cbyn/bitpredict/ 29 | - Please feel free to suggest others!? 30 | 31 | A target (named "final") is created using the future midpoint prices (at 5, 10, 15, 20, 25, 30 seconds in the future) of the midpoint between bids/asks for the books at those moments: 32 | - -1 means the average price of future midpoints went down below a certain threshold percentage, after 15 seconds 33 | - +1 means the average price of future midpoints went up above a certain threshold percentage, after 15 seconds 34 | - 0 means the average price did not go up or below the threshold percentage, after 15 seconds 35 | 36 | Using the features, we train a Machine Learning **classifier** model (using the strategy.py script) against the target value to give us one of three options: 37 | - -1 means the price is predicted to go down, so trade accordingly 38 | - +1 means the price is predicted to go up, so trade accordingly 39 | - 0 means don't make a trade 40 | 41 | I have tried it using the following classifiers: 42 | - **XGBClassifier** from the XGBoost project: https://xgboost.readthedocs.io/en/latest/ 43 | - **RandomForestClassifier** from the scikit-learn library: http://scikit-learn.org/ 44 | - **GradientBoostingClassifier** from the scikit-learn library 45 | - YMMV, try which one gives you the best results (strategy.py will back test and create the model) 46 | 47 | predict.py is used to do the live trading. 48 | 49 | ## Inner Workings 50 | 51 | - A trade is made (position took) and then reversed after 15 seconds 52 | - The balance is kept in a 50/50 split, with 50% as bitcoin and 50% as cash (FIAT) 53 | - When price is predicted to go down, bitcoins are traded for cash, and then bought back at a (hopefully) lower price, yielding a profit in the bitcoin balance. 54 | - When price is predicted to go up, cash is traded for bitcoins, and then the bitcoins are sold back at a (hopefully) higher price, yielding a profit in the cash balance. 55 | - Keep in mind that orders you make never execute immediately (if ever), which is why we want to take the average of the price midpoints for +/- 15 seconds. In case the trade actually takes places, and reverses, within a 15 second window. 56 | - You will probably need several weeks of data before you can train a classifier to get you any meaningful results. 57 | 58 | ## Disclaimer 59 | The bot is fully functional. However, this was more of an exercise to teach myself Machine Learning. I have not be able to make a consistent profit. Neither should you expect to. Please be very careful in using this bot, and assume all responsibility yourself. Never use it for trading more than you're willing to lose (i.e. use it for fun only). 60 | 61 | ## Other Notes 62 | This is very much a work in progress. Contributions are welcome. If you can bring it to a consistent profitability, do share! 63 | 64 | ## License 65 | Licensed under the [Apache License](LICENSE.md) -------------------------------------------------------------------------------- /app/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AdeelMufti/CryptoBot/6cbdfea43af9690d289f92db6b1b3b371abbd2fb/app/__init__.py -------------------------------------------------------------------------------- /app/btcchina.py: -------------------------------------------------------------------------------- 1 | import time 2 | import re 3 | import hmac 4 | import hashlib 5 | import base64 6 | import httplib 7 | import json 8 | 9 | 10 | class BTCChina(): 11 | def __init__(self, access=None, secret=None): 12 | self.access_key = access 13 | self.secret_key = secret 14 | self.conn = None 15 | self._make_connection() 16 | 17 | def _make_connection(self): 18 | if self.conn: 19 | self.conn.close() 20 | self.conn = httplib.HTTPSConnection("api.btcchina.com") 21 | 22 | def _get_tonce(self): 23 | return int(time.time() * 1000000) 24 | 25 | def _get_params_hash(self, pdict): 26 | pstring = "" 27 | # The order of params is critical for calculating a correct hash 28 | fields = ['tonce', 'accesskey', 'requestmethod', 'id', 'method', 'params'] 29 | for f in fields: 30 | if pdict[f]: 31 | if f == 'params': 32 | # Convert list to string, then strip brackets and spaces 33 | # probably a cleaner way to do this 34 | param_string = re.sub("[\[\] ]", "", str(pdict[f])) 35 | param_string = re.sub("'", '', param_string) 36 | param_string = re.sub("True", '1', param_string) 37 | param_string = re.sub("False", '', param_string) 38 | param_string = re.sub("None", '', param_string) 39 | pstring += f + '=' + param_string + '&' 40 | else: 41 | pstring += f + '=' + str(pdict[f]) + '&' 42 | else: 43 | pstring += f + '=&' 44 | pstring = pstring.strip('&') 45 | 46 | # now with correctly ordered param string, calculate hash 47 | phash = hmac.new(self.secret_key, pstring, hashlib.sha1).hexdigest() 48 | return phash 49 | 50 | def _private_request(self, post_data): 51 | # fill in common post_data parameters 52 | tonce = self._get_tonce() 53 | post_data['tonce'] = tonce 54 | post_data['accesskey'] = self.access_key 55 | post_data['requestmethod'] = 'post' 56 | 57 | # If ID is not passed as a key of post_data, just use tonce 58 | if not 'id' in post_data: 59 | post_data['id'] = tonce 60 | 61 | pd_hash = self._get_params_hash(post_data) 62 | 63 | # must use b64 encode 64 | auth_string = 'Basic ' + base64.b64encode(self.access_key + ':' + pd_hash) 65 | headers = {'Authorization': auth_string, 'Json-Rpc-Tonce': tonce} 66 | 67 | # post_data dictionary passed as JSON 68 | try: 69 | self.conn.request("POST", '/api_trade_v1.php', json.dumps(post_data), headers) 70 | response = self.conn.getresponse() 71 | except Exception as e: 72 | print "[btcchina.py] ***!!! Exception with httplib. Will reconnect." 73 | self._make_connection() 74 | raise 75 | else: 76 | # check response code, ID, and existence of 'result' or 'error' 77 | # before passing a dict of results 78 | if response.status == 200: 79 | # this might fail if non-json data is returned 80 | resp_dict = json.loads(response.read()) 81 | 82 | # The id's may need to be used by the calling application, 83 | # but for now, check and discard from the return dict 84 | if str(resp_dict['id']) == str(post_data['id']): 85 | if 'result' in resp_dict: 86 | return resp_dict['result'] 87 | elif 'error' in resp_dict: 88 | return resp_dict 89 | else: 90 | # not great error handling.... 91 | print "status:", response.status 92 | print "reason:", response.reason 93 | return None 94 | 95 | def get_account_info(self, post_data={}): 96 | post_data['method'] = 'getAccountInfo' 97 | post_data['params'] = [] 98 | return self._private_request(post_data) 99 | 100 | def get_market_depth2(self, limit=10, market="btccny", post_data={}): 101 | post_data['method'] = 'getMarketDepth2' 102 | post_data['params'] = [limit, market] 103 | return self._private_request(post_data) 104 | 105 | def buy(self, price, amount, market="btccny", post_data={}): 106 | amountStr = "{0:.4f}".format(round(amount, 4)) 107 | post_data['method'] = 'buyOrder2' 108 | if price == None: 109 | priceStr = None 110 | else: 111 | priceStr = "{0:.4f}".format(round(price, 4)) 112 | post_data['params'] = [priceStr, amountStr, market] 113 | return self._private_request(post_data) 114 | 115 | def sell(self, price, amount, market="btccny", post_data={}): 116 | amountStr = "{0:.4f}".format(round(amount, 4)) 117 | post_data['method'] = 'sellOrder2' 118 | if price == None: 119 | priceStr = None 120 | else: 121 | priceStr = "{0:.4f}".format(round(price, 4)) 122 | post_data['params'] = [priceStr, amountStr, market] 123 | return self._private_request(post_data) 124 | 125 | def cancel(self, order_id, market="btccny", post_data={}): 126 | post_data['method'] = 'cancelOrder' 127 | post_data['params'] = [order_id, market] 128 | return self._private_request(post_data) 129 | 130 | def request_withdrawal(self, currency, amount, post_data={}): 131 | post_data['method'] = 'requestWithdrawal' 132 | post_data['params'] = [currency, amount] 133 | return self._private_request(post_data) 134 | 135 | def get_deposits(self, currency='BTC', pending=True, post_data={}): 136 | post_data['method'] = 'getDeposits' 137 | post_data['params'] = [currency, pending] 138 | return self._private_request(post_data) 139 | 140 | def get_orders(self, id=None, open_only=True, market="btccny", details=True, post_data={}): 141 | # this combines getOrder and getOrders 142 | if id is None: 143 | post_data['method'] = 'getOrders' 144 | post_data['params'] = [open_only, market] 145 | else: 146 | post_data['method'] = 'getOrder' 147 | post_data['params'] = [id, market, details] 148 | return self._private_request(post_data) 149 | 150 | def get_withdrawals(self, id='BTC', pending=True, post_data={}): 151 | # this combines getWithdrawal and getWithdrawals 152 | try: 153 | id = int(id) 154 | post_data['method'] = 'getWithdrawal' 155 | post_data['params'] = [id] 156 | except: 157 | post_data['method'] = 'getWithdrawals' 158 | post_data['params'] = [id, pending] 159 | return self._private_request(post_data) 160 | 161 | def get_transactions(self, trans_type='all', limit=10, post_data={}): 162 | post_data['method'] = 'getTransactions' 163 | post_data['params'] = [trans_type, limit] 164 | return self._private_request(post_data) 165 | 166 | def get_archived_order(self, id, market='btccny', withdetail=False, post_data={}): 167 | post_data['method'] = 'getArchivedOrder' 168 | post_data['params'] = [id, market, withdetail] 169 | return self._private_request(post_data) 170 | 171 | def get_archived_orders(self, market='btccny', limit=200, less_than_order_id=0, withdetail=False, post_data={}): 172 | post_data['method'] = 'getArchivedOrders' 173 | post_data['params'] = [market, limit, less_than_order_id, withdetail] 174 | return self._private_request(post_data) -------------------------------------------------------------------------------- /app/collect_data/collect_btcc_books.py: -------------------------------------------------------------------------------- 1 | #python collect_btcc_books.py 2 | from urllib2 import urlopen 3 | import time 4 | import json 5 | from pymongo import MongoClient 6 | import sys 7 | import datetime 8 | 9 | api = 'http://data.btcchina.com' 10 | book_url = '{0}/data/orderbook?market=btccny&limit=10'.format(api) 11 | client = MongoClient() 12 | db = client['cryptobot'] 13 | books_collection = db['btcc_btccny_books'] 14 | sleep_between_requests_secs = 1.0 15 | timestamp_format = "%Y-%m-%d %H:%M:%S.%f" 16 | 17 | def get_formatted_time_string(this_time): 18 | return datetime.datetime.utcfromtimestamp(this_time).strftime(timestamp_format) 19 | 20 | def format_book_entry(entry): 21 | ''' 22 | Converts book data to float 23 | ''' 24 | bids = entry['bids'] 25 | new_bids = [] 26 | for row in bids: 27 | new_row = {} 28 | new_row['price'] = float(row[0]) 29 | new_row['amount'] = float(row[1]) 30 | new_row['timestamp'] = float(entry['date']) 31 | new_bids.append(new_row) 32 | entry['bids'] = new_bids 33 | 34 | asks = entry['asks'] 35 | new_asks = [] 36 | for row in asks: 37 | new_row = {} 38 | new_row['price'] = float(row[0]) 39 | new_row['amount'] = float(row[1]) 40 | new_row['timestamp'] = float(entry['date']) 41 | new_asks.append(new_row) 42 | entry['asks'] = new_asks 43 | 44 | return entry 45 | 46 | 47 | def get_json(url): 48 | ''' 49 | Gets json from the API 50 | ''' 51 | resp = urlopen(url,timeout=5) 52 | return json.load(resp, object_hook=format_book_entry), resp.getcode() 53 | 54 | 55 | print 'Running...' 56 | while True: 57 | start = time.time() 58 | print '*** Getting books at',get_formatted_time_string(start),start 59 | try: 60 | book, code = get_json(book_url) 61 | except Exception as e: 62 | print e 63 | sys.exc_clear() 64 | else: 65 | if code != 200: 66 | print code 67 | else: 68 | book.pop('date') 69 | book['_id'] = time.time() 70 | books_collection.insert_one(book) 71 | time_delta = time.time()-start 72 | if time_delta < sleep_between_requests_secs: 73 | time.sleep(sleep_between_requests_secs-time_delta) 74 | -------------------------------------------------------------------------------- /app/collect_data/collect_btcc_ticks.py: -------------------------------------------------------------------------------- 1 | #python collect_btcc_ticks.py 2 | from urllib2 import urlopen 3 | import time 4 | import json 5 | from pymongo import MongoClient 6 | import sys 7 | import datetime 8 | 9 | api = 'http://data.btcchina.com' 10 | tick_url = '{0}/data/ticker?market=btccny'.format(api) 11 | client = MongoClient() 12 | db = client['cryptobot'] 13 | ticks_collection = db['btcc_btccny_ticks'] 14 | sleep_between_requests_secs = 1.0 15 | timestamp_format = "%Y-%m-%d %H:%M:%S.%f" 16 | 17 | def get_formatted_time_string(this_time): 18 | return datetime.datetime.utcfromtimestamp(this_time).strftime(timestamp_format) 19 | 20 | def get_json(url): 21 | ''' 22 | Gets json from the API 23 | ''' 24 | resp = urlopen(url,timeout=5) 25 | entry = json.load(resp)['ticker'] 26 | entry['_id'] = entry.pop('date') 27 | for key in entry: 28 | entry[key] = float(entry[key]) 29 | # tick = {} 30 | # tick['_id'] = float(entry['date']) 31 | # tick['high'] = float(entry['high']) 32 | # tick['low'] = float(entry['low']) 33 | # tick['buy'] = float(entry['buy']) 34 | # tick['sell'] = float(entry['sell']) 35 | return entry, resp.getcode() 36 | 37 | print 'Running...' 38 | while True: 39 | start = time.time() 40 | print '*** Getting tick at',get_formatted_time_string(start),start,'.', 41 | try: 42 | tick, code = get_json(tick_url) 43 | except Exception as e: 44 | print e 45 | sys.exc_clear() 46 | else: 47 | if code != 200: 48 | print code 49 | else: 50 | print 'Gotten it for',get_formatted_time_string(tick['_id']),tick['_id'] 51 | ticks_collection.update_one({'_id': tick['_id']}, 52 | {'$setOnInsert': tick}, upsert=True) 53 | time_delta = time.time()-start 54 | if time_delta < sleep_between_requests_secs: 55 | time.sleep(sleep_between_requests_secs-time_delta) 56 | -------------------------------------------------------------------------------- /app/collect_data/crawl_btcc_trades_history.py: -------------------------------------------------------------------------------- 1 | #python crawl_btcc_trades_history.py 2 | from urllib2 import urlopen 3 | import sys 4 | import json 5 | import time 6 | from pymongo import MongoClient 7 | import datetime 8 | 9 | api = 'http://data.btcchina.com' 10 | trades_history_url = '{0}/data/historydata?since={1}&limit=5000&sincetype=time'; 11 | client = MongoClient() 12 | db = client['cryptobot'] 13 | trades_collection = db['btcc_btccny_trades'] 14 | sleep_between_requests_secs = 1.0 15 | timestamp_format = "%Y-%m-%d %H:%M:%S.%f" 16 | 17 | def get_formatted_time_string(this_time): 18 | return datetime.datetime.utcfromtimestamp(this_time).strftime(timestamp_format) 19 | 20 | def format_trade(trade): 21 | ''' 22 | Formats trade data 23 | ''' 24 | if all(key in trade for key in ('tid', 'amount', 'price', 'date')): 25 | trade['_id'] = int(trade.pop('tid')) 26 | trade['amount'] = float(trade['amount']) 27 | trade['price'] = float(trade['price']) 28 | trade['timestamp'] = float(trade.pop('date')) 29 | return trade 30 | 31 | 32 | def get_json(url): 33 | ''' 34 | Gets json from the API 35 | ''' 36 | resp = urlopen(url,timeout=5) 37 | return json.load(resp, object_hook=format_trade), resp.getcode() 38 | 39 | def get_latest_time(time_to_fetch): 40 | cursor = trades_collection.find().sort("$natural", -1).limit(1) 41 | for document in cursor: 42 | return document['timestamp'] 43 | return time_to_fetch 44 | 45 | continuous = False 46 | if len(sys.argv) == 2: 47 | if sys.argv[1] == 'true': 48 | continuous = True 49 | start_time = 1476093600 50 | time_to_fetch = get_latest_time(start_time) 51 | trades_count = 1 52 | while continuous or trades_count > 0: 53 | start = time.time() 54 | url = trades_history_url.format(api, int(time_to_fetch)) 55 | print '*** Getting trades at',get_formatted_time_string(time_to_fetch),time_to_fetch,'.', 56 | try: 57 | trades, code = get_json(url) 58 | except Exception as e: 59 | print e 60 | sys.exc_clear() 61 | else: 62 | if code != 200: 63 | print code 64 | else: 65 | for trade in trades: 66 | trades_collection.update_one({'_id': trade['_id']}, 67 | {'$setOnInsert': trade}, upsert=True) 68 | time_to_fetch = get_latest_time(time_to_fetch) 69 | #time_to_fetch = trades[len(trades)-1]['timestamp'] 70 | trades_count = len(trades) 71 | print 'Got',trades_count,'trades.' 72 | time_delta = time.time() - start 73 | if time_delta < sleep_between_requests_secs: 74 | time.sleep(sleep_between_requests_secs - time_delta) -------------------------------------------------------------------------------- /app/collect_data/run_collect_scripts.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | nohup python /home/ec2-user/cryptobot/app/collect_data/collect_btcc_books.py >> /dev/null 2>&1 & 3 | nohup python /home/ec2-user/cryptobot/app/collect_data/collect_btcc_ticks.py >> /dev/null 2>&1 & 4 | nohup python /home/ec2-user/cryptobot/app/collect_data/crawl_btcc_trades_history.py true >> /dev/null 2>&1 & 5 | cd /home/ec2-user/cryptobot/app/ 6 | nohup python /home/ec2-user/cryptobot/app/create_live_features.py >> /dev/null 2>&1 & -------------------------------------------------------------------------------- /app/create_live_features.py: -------------------------------------------------------------------------------- 1 | from model import features as f 2 | import pymongo 3 | import time 4 | from datetime import datetime 5 | 6 | client = pymongo.MongoClient() 7 | db = client['cryptobot'] 8 | books_db = db['btcc_btccny_books'] 9 | 10 | def append_df_to_csv(df, csvFilePath, sep=","): 11 | import os 12 | if not os.path.isfile(csvFilePath): 13 | df.to_csv(csvFilePath, mode='a', index=True, sep=sep) 14 | else: 15 | df.to_csv(csvFilePath, mode='a', index=True, sep=sep, header=False) 16 | 17 | def get_current_time_seconds_utc(): 18 | return (datetime.utcnow()-datetime(1970,1,1)).total_seconds() 19 | 20 | def get_latest_book_timestamp(): 21 | book = books_db.find({},{'_id': 1}).sort('_id', -1).limit(1).next() 22 | return book['_id'] 23 | 24 | last_data_timestamp = 0 25 | 26 | while True: 27 | start = get_current_time_seconds_utc() 28 | 29 | this_data_timestamp = get_latest_book_timestamp() 30 | if this_data_timestamp < (start-3): 31 | # print "Data hasn't been updated in less than 3 seconds, skipping...",this_data_timestamp,start-3 32 | None 33 | elif last_data_timestamp != 0 and last_data_timestamp == this_data_timestamp: 34 | # print "Last data timestamp is equal to this data timestamp, skipping..." 35 | None 36 | else: 37 | last_data_timestamp = this_data_timestamp 38 | 39 | data = f.make_features(1, 40 | [], 41 | [10, 15, 30, 45, 60, 75, 90, 105, 120, 135, 150, 165, 180], 42 | [2, 4, 8], 43 | True) 44 | 45 | append_df_to_csv(data, 'data/data_live.tsv', '\t') 46 | 47 | time_delta = get_current_time_seconds_utc()-start 48 | if time_delta < 1.0: 49 | time.sleep(1-time_delta) -------------------------------------------------------------------------------- /app/model/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AdeelMufti/CryptoBot/6cbdfea43af9690d289f92db6b1b3b371abbd2fb/app/model/__init__.py -------------------------------------------------------------------------------- /app/model/add_final_to_features.py: -------------------------------------------------------------------------------- 1 | #python add_final_to_features.py ../data/data_live.with_midx.tsv 2 | import sys 3 | import pandas as pd 4 | from datetime import datetime 5 | import numpy as np 6 | import multiprocessing 7 | 8 | mids = [0, 2.5, 5, 7.5, 10, 12.5, 15, 17.5, 20, 22.5, 25, 27.5, 30] 9 | threshold_percent = 0.05 10 | threshold = threshold_percent/100 11 | input_filename = sys.argv[1] 12 | cpu_count = multiprocessing.cpu_count() 13 | 14 | timestamp_format = "%Y-%m-%d %H:%M:%S.%f" 15 | 16 | def get_formatted_time_string(this_time): 17 | return datetime.utcfromtimestamp(this_time).strftime(timestamp_format) 18 | 19 | def get_current_time_seconds_utc(): 20 | return (datetime.utcnow()-datetime(1970,1,1)).total_seconds() 21 | 22 | def all_signs_equal(data, mids): 23 | mids_as_list = [] 24 | for mid in mids: 25 | mids_as_list.append(data["mid%s"%mid]) 26 | mid_signs = np.sign(mids_as_list) 27 | all_signs_equal = True 28 | sign = mid_signs[0] 29 | for mid_sign in mid_signs: 30 | if sign != mid_sign: 31 | all_signs_equal = False 32 | break 33 | return all_signs_equal 34 | 35 | def average_all_mids(data, mids): 36 | mids_as_list = [] 37 | for mid in mids: 38 | mids_as_list.append(data["mid%s"%mid]) 39 | average = np.asarray(mids_as_list).mean() 40 | return average 41 | 42 | def get_final(books): 43 | def final(book): 44 | # if not all_signs_equal(book,mids): 45 | # return 0 46 | # else: 47 | average = average_all_mids(book,mids) 48 | 49 | if average > threshold: 50 | return 1 51 | elif average < -threshold: 52 | return -1 53 | else: 54 | return 0 55 | 56 | return books.apply(final,axis=1) 57 | 58 | def worker(params): 59 | num = params[0] 60 | data = params[1] 61 | split_interval = params[2] 62 | split_start = num*split_interval 63 | split_end = ((num+1)*split_interval)+5 64 | print "%s - Worker %s starting at %s, ending at %s" % (get_formatted_time_string(get_current_time_seconds_utc()), num, split_start, split_end) 65 | this_data = data.iloc[split_start:split_end].copy() 66 | this_data['final'] = get_final(this_data) 67 | return this_data 68 | 69 | def handler(data, split_interval): 70 | splits = range(0, cpu_count) 71 | parallel_arguments = [] 72 | for split in splits: 73 | parallel_arguments.append([split, data, split_interval]) 74 | pool = multiprocessing.Pool(cpu_count) 75 | data_array = pool.map(worker, parallel_arguments) 76 | pool.close() 77 | pool.join() 78 | final_data = pd.concat(data_array) 79 | final_data = final_data.groupby(final_data.index).max() 80 | #final_data = final_data[~final_data.index.duplicated(keep='first')] 81 | return final_data.sort_index() 82 | 83 | if __name__ == '__main__': 84 | print "%s - Reading data" % (get_formatted_time_string(get_current_time_seconds_utc())) 85 | data = pd.DataFrame.from_csv(input_filename, sep='\t') 86 | #data = data.groupby(data.index).first() 87 | #data = data.dropna(0) 88 | data_count = len(data) 89 | split_interval = data_count / cpu_count 90 | print "%s - Data length %s, cpu count %s, therefore split interval %s" % ( 91 | get_formatted_time_string(get_current_time_seconds_utc()), data_count, cpu_count, split_interval) 92 | 93 | final_data = handler(data, split_interval) 94 | 95 | base_filename = '.'.join(input_filename.split('.')[:-1]) if '.' in input_filename else input_filename 96 | dump_filename = base_filename+".with_final.tsv" 97 | print "%s - Dumping %s records to %s" % (get_formatted_time_string(get_current_time_seconds_utc()), len(final_data), dump_filename) 98 | final_data.to_csv(dump_filename, sep='\t') 99 | 100 | print "%s - Done" % (get_formatted_time_string(get_current_time_seconds_utc())) -------------------------------------------------------------------------------- /app/model/add_midx_to_features.py: -------------------------------------------------------------------------------- 1 | #Can add midx to features dump, and then can use that in strategy.py for testing/creating a model from that 2 | #python add_midx_to_features.py ../data/data_live.tsv 3 | from math import log 4 | import sys 5 | import pandas as pd 6 | from datetime import datetime 7 | import multiprocessing 8 | 9 | mids_to_add = [0, 2.5, 5, 7.5, 10, 12.5, 15, 17.5, 20, 22.5, 25, 27.5, 30] 10 | input_filename = sys.argv[1] 11 | cpu_count = multiprocessing.cpu_count() 12 | 13 | timestamp_format = "%Y-%m-%d %H:%M:%S.%f" 14 | 15 | def get_formatted_time_string(this_time): 16 | return datetime.utcfromtimestamp(this_time).strftime(timestamp_format) 17 | 18 | def get_current_time_seconds_utc(): 19 | return (datetime.utcnow()-datetime(1970,1,1)).total_seconds() 20 | 21 | def get_future_mid(books, offset, sensitivity): 22 | ''' 23 | Returns percent change of future midpoints for each data point in DataFrame 24 | of book data 25 | ''' 26 | def future(timestamp): 27 | i = books.index.get_loc(timestamp+offset, method='nearest') 28 | if abs(books.index[i] - (timestamp+offset)) < sensitivity: 29 | return books.mid.iloc[i] 30 | return (books.index.map(future)/books.mid).apply(log) 31 | 32 | def worker(params): 33 | num = params[0] 34 | data = params[1] 35 | split_interval = params[2] 36 | split_start = num*split_interval 37 | split_end = ((num+1)*split_interval)-1 + mids_to_add[-1]+5 38 | print "%s - Worker %s starting at %s, ending at %s" % (get_formatted_time_string(get_current_time_seconds_utc()), num, split_start, split_end) 39 | this_data = data.iloc[split_start:split_end].copy() 40 | for mid in mids_to_add: 41 | print "%s - Worker %s getting mid%s" % (get_formatted_time_string(get_current_time_seconds_utc()),num,mid) 42 | this_data["mid%s"%mid] = get_future_mid(this_data, mid, sensitivity=5) 43 | return this_data 44 | 45 | def handler(data, split_interval): 46 | splits = range(0, cpu_count) 47 | parallel_arguments = [] 48 | for split in splits: 49 | parallel_arguments.append([split, data, split_interval]) 50 | pool = multiprocessing.Pool(cpu_count) 51 | data_array = pool.map(worker, parallel_arguments) 52 | pool.close() 53 | pool.join() 54 | final_data = pd.concat(data_array) 55 | final_data = final_data.groupby(final_data.index).max() 56 | # final_data = final_data[~final_data.index.duplicated(keep='first')] 57 | subset = [] 58 | for mid in mids_to_add: 59 | subset.append("mid%s"%(mid)) 60 | final_data = final_data.dropna(axis=0, subset=subset) 61 | return final_data.sort_index() 62 | 63 | if __name__ == '__main__': 64 | print "%s - Reading data" % (get_formatted_time_string(get_current_time_seconds_utc())) 65 | data = pd.DataFrame.from_csv(input_filename, sep='\t') 66 | data = data.groupby(data.index).first() 67 | data_count = len(data) 68 | split_interval = data_count / cpu_count 69 | print "%s - Data length %s, cpu count %s, therefore split interval %s" % ( 70 | get_formatted_time_string(get_current_time_seconds_utc()), data_count, cpu_count, split_interval) 71 | 72 | final_data = handler(data, split_interval) 73 | 74 | base_filename = '.'.join(input_filename.split('.')[:-1]) if '.' in input_filename else input_filename 75 | dump_filename = base_filename+".with_midx.tsv" 76 | print "%s - Dumping %s records to %s" % (get_formatted_time_string(get_current_time_seconds_utc()), len(final_data), dump_filename) 77 | final_data.to_csv(dump_filename, sep='\t') 78 | 79 | print "%s - Done" % (get_formatted_time_string(get_current_time_seconds_utc())) -------------------------------------------------------------------------------- /app/model/features.py: -------------------------------------------------------------------------------- 1 | #python features.py .pkl 2 | #python -W ignore -u features.py 0 ../data/data.pkl 3 | #python -W ignore -u features.py 1035650 ../data/data.pkl 4 | import datetime 5 | import pymongo 6 | import pandas as pd 7 | from math import log 8 | from time import time 9 | import sys 10 | from scipy.stats import linregress 11 | import pickle 12 | import numpy as np 13 | 14 | client = pymongo.MongoClient() 15 | db = client['cryptobot'] 16 | books_db = db['btcc_btccny_books'] 17 | trades_db = db['btcc_btccny_trades'] 18 | # ticks_db = db['btcc_btccny_ticks'] 19 | timestamp_format = "%Y-%m-%d %H:%M:%S.%f" 20 | 21 | 22 | def get_formatted_time_string(this_time): 23 | return datetime.datetime.utcfromtimestamp(this_time).strftime(timestamp_format) 24 | 25 | def get_book_df(limit, convert_timestamps=False, skip=0, live=False, theoretical_order=None): 26 | ''' 27 | Returns a DataFrame of book data 28 | ''' 29 | if live: 30 | cursor = books_db.find().sort('_id', -1).limit(limit) 31 | else: 32 | cursor = books_db.find().sort('_id', 1).skip(skip).limit(limit) 33 | books = pd.DataFrame(list(cursor)) 34 | 35 | books = books.set_index('_id') 36 | if convert_timestamps: 37 | books.index = pd.to_datetime(books.index, unit='s') 38 | 39 | if live and theoretical_order: 40 | order_type = theoretical_order[0] 41 | price = theoretical_order[1] 42 | amount_order_in_btc = theoretical_order[2] 43 | new_order = {} 44 | new_order['timestamp'] = books.iloc[0].asks[0]['timestamp'] 45 | new_order['price'] = price 46 | new_order['amount'] = amount_order_in_btc 47 | if order_type == 'bid': 48 | books.iloc[0].bids = books.iloc[0].bids[:9] 49 | bids = [new_order] 50 | for order in books.iloc[0].bids: 51 | bids.append(order) 52 | books.iloc[0].bids = bids 53 | elif order_type == 'ask': 54 | books.iloc[0].asks = books.iloc[0].asks[1:] 55 | asks = [] 56 | for order in books.iloc[0].asks: 57 | asks.append(order) 58 | asks.append(new_order) 59 | books.iloc[0].asks = asks 60 | 61 | def to_df(x): 62 | return pd.DataFrame(x[:10]) 63 | 64 | return books.applymap(to_df).sort_index() 65 | 66 | 67 | def get_width_and_mid(books): 68 | ''' 69 | Returns width of best market and midpoint for each data point in DataFrame 70 | of book data 71 | ''' 72 | best_bid = books.bids.apply(lambda x: x.price[0]) 73 | best_ask = books.asks.apply(lambda x: x.price[len(x.price)-1]) 74 | return best_ask-best_bid, (best_bid + best_ask)/2 75 | 76 | #Since asks/bids seem to be repeating in books for a while, at most (observed so far) every 15 seconds, we want to get the future mid within plus/minus 25 seconds 77 | def get_future_mid(books, offset, sensitivity=1): 78 | ''' 79 | Returns percent change of future midpoints for each data point in DataFrame 80 | of book data 81 | ''' 82 | 83 | def future(timestamp): 84 | i = books.index.get_loc(timestamp+offset, method='nearest') 85 | if abs(books.index[i] - (timestamp+offset)) < sensitivity: 86 | return books.mid.iloc[i] 87 | return (books.index.map(future)/books.mid).apply(log) 88 | 89 | 90 | def get_power_imbalance(books, n=10, power=2): 91 | ''' 92 | Returns a measure of the imbalance between bids and offers for each data 93 | point in DataFrame of book data 94 | ''' 95 | 96 | def calc_imbalance(book): 97 | def calc(x): 98 | return 0 if x.price-book.mid==0 else x.amount*(.5*book.width/(x.price-book.mid))**power 99 | bid_imbalance = book.bids.iloc[:n].apply(calc, axis=1) 100 | ask_imbalance = book.asks.iloc[:n].apply(calc, axis=1) 101 | return (bid_imbalance-ask_imbalance).sum() 102 | imbalance = books.apply(calc_imbalance, axis=1) 103 | return imbalance 104 | 105 | 106 | def get_power_adjusted_price(books, n=10, power=2): 107 | ''' 108 | Returns the percent change of an average of order prices weighted by inverse 109 | distance-wieghted volume for each data point in DataFrame of book data 110 | ''' 111 | 112 | def calc_adjusted_price(book): 113 | def calc(x): 114 | return 0 if x.price-book.mid==0 else x.amount*(.5*book.width/(x.price-book.mid))**power 115 | bid_inv = 1/book.bids.iloc[:n].apply(calc, axis=1) 116 | ask_inv = 1/book.asks.iloc[:n].apply(calc, axis=1) 117 | bid_price = book.bids.price.iloc[:n] 118 | ask_price = book.asks.price.iloc[:n] 119 | sum_numerator = (bid_price*bid_inv + ask_price*ask_inv).sum() 120 | sum_denominator = (bid_inv + ask_inv).sum() 121 | # if np.isnan(sum_numerator) or np.isinf(sum_numerator) or sum_numerator == 0.0 or np.isnan(sum_denominator) or np.isinf(sum_denominator) or sum_denominator == 0.0: 122 | # return 0 123 | quotient = sum_numerator / sum_denominator 124 | # if quotient < 0.0: 125 | # return 0 126 | return quotient 127 | adjusted = books.apply(calc_adjusted_price, axis=1) 128 | return (adjusted/books.mid).apply(log).fillna(0) 129 | 130 | 131 | def get_trade_df(books, min_ts, max_ts, live, convert_timestamps=False, theoretical_trade=None): 132 | ''' 133 | Returns a DataFrame of trades in time range 134 | ''' 135 | if not live: 136 | query = {'timestamp': {'$gt': min_ts, '$lt': max_ts}} 137 | cursor = trades_db.find(query).sort('_id', pymongo.ASCENDING) 138 | else: 139 | cursor = trades_db.find({}).sort('$natural', pymongo.DESCENDING).limit(3000) 140 | 141 | trades = pd.DataFrame(list(cursor)) 142 | 143 | if live: 144 | trades = trades[trades.timestamp <= max_ts] #The above is not gte or lte because later on we do a pandas search sorted on a much larger range if not live, and that includes equals 145 | trades = trades[trades.timestamp >= min_ts] 146 | trades = trades.sort_values(['timestamp']) 147 | 148 | if live and theoretical_trade: 149 | trade_type = theoretical_trade[0] 150 | price = theoretical_trade[1] 151 | amount_trade_in_btc = theoretical_trade[2] 152 | trade = {} 153 | trade['_id']=trades.iloc[-1]['_id']+1 154 | trade['timestamp']=books.index[0]-8 #So it can be included in the -7.5 offset for trades 155 | trade['price']=price 156 | trade['amount']=amount_trade_in_btc 157 | trade['type']='buy' if trade_type=='bid' else 'sell' 158 | trades = pd.concat([trades,pd.DataFrame(trade,index=[0])]) 159 | trades = trades.sort_values(['timestamp']) 160 | 161 | #for index, row in trades.iterrows(): 162 | # print row['timestamp'] 163 | 164 | if not trades.empty: 165 | trades = trades.set_index('_id') 166 | if convert_timestamps: 167 | trades.index = pd.to_datetime(trades.index, unit='s') 168 | 169 | # for i in xrange(len(trades)): 170 | # print i, trades.index[i],trades.iloc[i]['amount'],trades.iloc[i]['price'],trades.iloc[i]['timestamp'],trades.iloc[i]['type'] 171 | 172 | return trades 173 | 174 | def get_trades_indexes(books, trades, offset, live=False): 175 | ''' 176 | Returns indexes of trades in offset range for each data point in DataFrame 177 | of book data 178 | ''' 179 | def trades_indexes(ts): 180 | ts = int(ts) 181 | i_0 = trades.timestamp.searchsorted([ts-offset], side='left')[0] 182 | # if live: 183 | # i_n = -1 184 | # else: 185 | # i_n = trades.timestamp.searchsorted([ts-1], side='right')[0] 186 | i_n = trades.timestamp.searchsorted([ts - 7.5], side='right')[0] #because live trades lag behind for about 7-10 seconds 187 | if i_n == len(trades): 188 | i_n = i_n-1 189 | #print offset, ts, len(trades), i_0, i_n, trades.iloc[i_0].timestamp, trades.iloc[i_n].timestamp 190 | return (i_0, i_n) 191 | return books.index.map(trades_indexes) 192 | 193 | def get_trades_count(books, trades): 194 | ''' 195 | Returns a count of trades for each data point in DataFrame of book data 196 | ''' 197 | def count(x): 198 | return len(trades.iloc[x.trades_indexes[0]:x.trades_indexes[1]]) 199 | return books.apply(count, axis=1) 200 | 201 | 202 | def get_trades_average(books, trades): 203 | ''' 204 | Returns the percent change of a volume-weighted average of trades for each 205 | data point in DataFrame of book data 206 | ''' 207 | 208 | def mean_trades(x): 209 | trades_n = trades.iloc[x.trades_indexes[0]:x.trades_indexes[1]] 210 | if not trades_n.empty: 211 | return (trades_n.price*trades_n.amount).sum()/trades_n.amount.sum() 212 | return (books.mid/books.apply(mean_trades, axis=1)).apply(log).fillna(0) 213 | 214 | 215 | def get_aggressor(books, trades): 216 | ''' 217 | Returns a measure of whether trade aggressors were buyers or sellers for 218 | each data point in DataFrame of book data 219 | ''' 220 | 221 | def aggressor(x): 222 | trades_n = trades.iloc[x.trades_indexes[0]:x.trades_indexes[1]] 223 | if trades_n.empty: 224 | return 0 225 | buys = trades_n['type'] == 'buy' 226 | buy_vol = trades_n[buys].amount.sum() 227 | sell_vol = trades_n[~buys].amount.sum() 228 | return buy_vol - sell_vol 229 | return books.apply(aggressor, axis=1) 230 | 231 | 232 | def get_trend(books, trades): 233 | ''' 234 | Returns the linear trend in previous trades for each data point in DataFrame 235 | of book data 236 | ''' 237 | 238 | def trend(x): 239 | trades_n = trades.iloc[x.trades_indexes[0]:x.trades_indexes[1]] 240 | if len(trades_n) < 3: 241 | return 0 242 | else: 243 | return linregress(trades_n.index.values, trades_n.price.values)[0] 244 | return books.apply(trend, axis=1) 245 | 246 | 247 | # def get_tick_df(min_ts, max_ts, live, convert_timestamps=False): 248 | # ''' 249 | # Returns a DataFrame of ticks in time range 250 | # ''' 251 | # if not live: 252 | # query = {'_id': {'$gt': min_ts, '$lt': max_ts}} 253 | # cursor = ticks_db.find(query).sort('_id', pymongo.ASCENDING) 254 | # else: 255 | # cursor = ticks_db.find({}).sort('$natural', pymongo.DESCENDING).limit(1) 256 | # 257 | # ticks = pd.DataFrame(list(cursor)) 258 | # 259 | # if not ticks.empty: 260 | # ticks = ticks.set_index('_id') 261 | # if convert_timestamps: 262 | # ticks.index = pd.to_datetime(ticks.index, unit='s') 263 | # return ticks 264 | # 265 | # def get_ticks_indexes(books, ticks): 266 | # ''' 267 | # Returns indexes of ticks closest to each data point in DataFrame 268 | # of book data 269 | # ''' 270 | # def ticks_indexes(ts): 271 | # ts = int(ts) 272 | # return ticks.index.get_loc(ts, method='nearest') 273 | # return books.index.map(ticks_indexes) 274 | # 275 | # def get_buys_from_ticks(books, ticks): 276 | # ''' 277 | # Returns a count of trades for each data point in DataFrame of book data 278 | # ''' 279 | # def get_buy(x): 280 | # return ticks.iloc[x.ticks_indexes].buy 281 | # return books.apply(get_buy, axis=1) 282 | # 283 | # def get_sells_from_ticks(books, ticks): 284 | # ''' 285 | # Returns a count of trades for each data point in DataFrame of book data 286 | # ''' 287 | # def get_sell(x): 288 | # return ticks.iloc[x.ticks_indexes].sell 289 | # return books.apply(get_sell, axis=1) 290 | 291 | def check_times(books): 292 | ''' 293 | Returns list of differences between collection time and max book timestamps 294 | for verification purposes 295 | ''' 296 | time_diff = [] 297 | for i in range(len(books)): 298 | book = books.iloc[i] 299 | ask_ts = max(book.asks.timestamp) 300 | bid_ts = max(book.bids.timestamp) 301 | ts = max(ask_ts, bid_ts) 302 | time_diff.append(book.name-ts) 303 | return time_diff 304 | 305 | 306 | def make_features(limit, mid_offsets, 307 | trades_offsets, powers, live=False, skip=0, 308 | theoretical_order=None, theoretical_trade=None): 309 | ''' 310 | Returns a DataFrame with targets and features 311 | ''' 312 | start = time() 313 | stage = time() 314 | # Book related features: 315 | books = get_book_df(limit,skip=skip,live=live,theoretical_order=theoretical_order) 316 | if not live: 317 | print 'get book data run time:', (time()-stage)/60, 'minutes' 318 | stage = time() 319 | books['width'], books['mid'] = get_width_and_mid(books) 320 | if not live: 321 | print 'width and mid run time:', (time()-stage)/60, 'minutes' 322 | stage = time() 323 | for n in mid_offsets: 324 | books['mid{}'.format(n)] = get_future_mid(books, n) 325 | if not live: 326 | books = books.dropna() 327 | print 'offset mids run time:', (time()-stage)/60, 'minutes' 328 | stage = time() 329 | for p in powers: 330 | books['imbalance{}'.format(p)] = get_power_imbalance(books, 10, p) 331 | books['adj_price{}'.format(p)] = get_power_adjusted_price(books, 10, p) 332 | if not live: 333 | print 'power calcs run time:', (time()-stage)/60, 'minutes' 334 | stage = time() 335 | books = books.drop(['bids', 'asks'], axis=1) 336 | 337 | # Trade related features: 338 | min_ts = books.index.min() - trades_offsets[-1] 339 | max_ts = books.index.max() 340 | if live: 341 | max_ts += 10 342 | #print "Getting trades between '",datetime.datetime.utcfromtimestamp(min_ts).strftime(timestamp_format), "' and '", datetime.datetime.utcfromtimestamp(max_ts).strftime(timestamp_format),"'" 343 | trades = get_trade_df(books, min_ts, max_ts, live, theoretical_trade=theoretical_trade) 344 | for n in trades_offsets: 345 | if trades.empty: 346 | books['trades_indexes'] = 0 347 | books['t{}_count'.format(n)] = 0 348 | books['t{}_av'.format(n)] = 0 349 | books['agg{}'.format(n)] = 0 350 | books['trend{}'.format(n)] = 0 351 | else: 352 | books['trades_indexes'] = get_trades_indexes(books, trades, n, live) 353 | books['t{}_count'.format(n)] = get_trades_count(books, trades) 354 | books['t{}_av'.format(n)] = get_trades_average(books, trades) 355 | books['agg{}'.format(n)] = get_aggressor(books, trades) 356 | books['trend{}'.format(n)] = get_trend(books, trades) 357 | if not live: 358 | print 'trade features run time:', (time()-stage)/60, 'minutes' 359 | stage = time() 360 | books = books.drop('trades_indexes', axis=1) 361 | 362 | # # Ticks 363 | # ticks = get_tick_df(min_ts, max_ts, live) 364 | # if ticks.empty: 365 | # books['ticks_indexes'] = 0 366 | # books['tick_buy'] = 0 367 | # books['tick_sell'] = 0 368 | # else: 369 | # books['ticks_indexes'] = get_ticks_indexes(books, ticks) 370 | # books['tick_buy'] = get_buys_from_ticks(books, ticks) 371 | # books['tick_sell'] = get_sells_from_ticks(books, ticks) 372 | # if not live: 373 | # print 'tick features run time:', (time()-stage)/60, 'minutes' 374 | # stage = time() 375 | # books = books.drop('ticks_indexes', axis=1) 376 | 377 | if not live: 378 | print 'make_features run time:', (time() - start) / 60, 'minutes' 379 | 380 | return books 381 | 382 | def make_data(limit, skip=0): 383 | ''' 384 | Convenience function for calling make_features 385 | ''' 386 | # data = make_features(limit=limit, 387 | # mid_offsets=[30], 388 | # trades_offsets=[30, 60, 120, 180], 389 | # powers=[2, 4, 8], 390 | # skip=skip) 391 | data = make_features(limit=limit, 392 | mid_offsets=[5, 10, 15, 20, 25, 30, 35, 40, 45], 393 | trades_offsets=[10, 15, 30, 45, 60, 75, 90, 105, 120, 135, 150, 165, 180], 394 | powers=[2, 4, 8], 395 | skip=skip) 396 | return data 397 | 398 | if __name__ == '__main__' and len(sys.argv) == 3: 399 | print 'Starting at', get_formatted_time_string(time()) 400 | data = make_data(int(sys.argv[1])) 401 | output_filename = sys.argv[2] 402 | base_filename = '.'.join(output_filename.split('.')[:-1]) if '.' in output_filename else output_filename 403 | data.to_csv(base_filename+".tsv", sep='\t') 404 | with open(base_filename+".pkl", 'w+') as file: 405 | pickle.dump(data, file) 406 | file.close() 407 | print 'Ending at', get_formatted_time_string(time()) -------------------------------------------------------------------------------- /app/model/features_parallel.py: -------------------------------------------------------------------------------- 1 | #python -W ignore -u features_parallel.py 2 | #python -W ignore -u features_parallel.py 0 1231625 ./data1.pkl && python -W ignore -u features_parallel.py 1231625-150 1231625 ./data2.pkl (-150 on the second command's split because from the first run the last 150 get dropped! 3 | #python -W ignore -u features_parallel.py 0 0 ../data/data.pkl 4 | import datetime 5 | import multiprocessing 6 | import pymongo 7 | import pickle 8 | import sys 9 | import features 10 | import pandas as pd 11 | from time import time 12 | 13 | data = pd.concat 14 | 15 | client = pymongo.MongoClient() 16 | db = client['cryptobot'] 17 | books_db = db['btcc_btccny_books'] 18 | cpu_count = multiprocessing.cpu_count() 19 | initial_skip = int(sys.argv[1]) 20 | limit = int(sys.argv[2]) 21 | if limit == 0: 22 | limit = books_db.find().count() 23 | output_filename = sys.argv[3] 24 | timestamp_format = "%Y-%m-%d %H:%M:%S.%f" 25 | each_limit = limit/cpu_count 26 | 27 | def get_formatted_time_string(this_time): 28 | return datetime.datetime.utcfromtimestamp(this_time).strftime(timestamp_format) 29 | 30 | def worker(num): 31 | skip = (each_limit * num) + initial_skip 32 | print 'Worker #%s starting starting at record %s, limiting to %s records' % (num,skip,each_limit) 33 | if num == cpu_count-1: 34 | return features.make_data(each_limit, skip) 35 | else: 36 | return features.make_data(each_limit + 150, skip) 37 | 38 | def handler(): 39 | splits = range(0, cpu_count) 40 | pool = multiprocessing.Pool(cpu_count) 41 | data_array = pool.map(worker, splits) 42 | pool.close() 43 | pool.join() 44 | data = pd.concat(data_array) 45 | data = data[~data.index.duplicated(keep='first')] 46 | #data = data.groupby(data.index).first() 47 | return data.sort_index() 48 | 49 | if __name__ == '__main__': 50 | start = time() 51 | print 'Starting parallel features gen at', get_formatted_time_string(start) 52 | print cpu_count, 'threads will work on', each_limit, 'records each, totalling to', limit, 'records.' 53 | data = handler() 54 | print 'Done generating. Dumping...' 55 | base_filename = '.'.join(output_filename.split('.')[:-1]) if '.' in output_filename else output_filename 56 | data.to_csv(base_filename+".tsv", sep='\t') 57 | with open(base_filename+".pkl", 'w+') as file: 58 | pickle.dump(data, file) 59 | file.close() 60 | print len(data),'Records produced into '+base_filename+".tsv and "+base_filename+".pkl" 61 | print 'Ending parallel features gen at', get_formatted_time_string(time()) 62 | print 'Took', (time() - start) / 60, 'minutes to run.' 63 | -------------------------------------------------------------------------------- /app/model/full_create_features_and_train_script.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | echo "---------------Starting features create-----------------" 4 | python -W ignore -u features_parallel.py 0 0 ../data/data.pkl 5 | 6 | # https://aws.amazon.com/ec2/pricing/on-demand/ c4.8xlarge = 36 processors, 60gb ram = $1.675/hr 7 | # With 3938164 records, each instance takes memory: virt=9.8gb, res=7.3g =~ 55gb used for 7 in free -g =~ 7.8gb/instance 8 | echo "---------------Starting training-----------------" 9 | nohup python strategy.py ../data/data.tsv 99 5 0.01 true >> ../data/5.txt & 10 | nohup python strategy.py ../data/data.tsv 99 10 0.01 true >> ../data/10.txt & 11 | nohup python strategy.py ../data/data.tsv 99 15 0.01 true >> ../data/15.txt & 12 | nohup python strategy.py ../data/data.tsv 99 20 0.01 true >> ../data/20.txt & 13 | nohup python strategy.py ../data/data.tsv 99 25 0.01 true >> ../data/25.txt & 14 | nohup python strategy.py ../data/data.tsv 99 30 0.01 true >> ../data/30.txt & 15 | nohup python strategy.py ../data/data.tsv 99 35 0.01 true >> ../data/35.txt & 16 | nohup python strategy.py ../data/data.tsv 99 40 0.01 true >> ../data/40.txt & 17 | nohup python strategy.py ../data/data.tsv 99 45 0.01 true >> ../data/45.txt & 18 | 19 | while pgrep &>/dev/null -f python; do sleep 60; done; shutdown -h now -------------------------------------------------------------------------------- /app/model/model.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from sklearn.ensemble import RandomForestRegressor 3 | from sklearn.ensemble import GradientBoostingRegressor 4 | import pickle 5 | import pandas as pd 6 | 7 | 8 | def cross_validate(X, y, model, window): 9 | ''' 10 | Cross validates time series data using a shifting window where train data is 11 | always before test data 12 | ''' 13 | in_sample_score = [] 14 | out_sample_score = [] 15 | for i in range(1, len(y)/window): 16 | train_index = np.arange(0, i*window) 17 | test_index = np.arange(i*window, (i+1)*window) 18 | print 'Train index',train_index 19 | print 'Test index', test_index 20 | y_train = y.take(train_index) 21 | y_test = y.take(test_index) 22 | X_train = X.take(train_index, axis=0) 23 | X_test = X.take(test_index, axis=0) 24 | model.fit(X_train, y_train) 25 | in_sample_score.append(model.score(X_train, y_train)) 26 | out_sample_score.append(model.score(X_test, y_test)) 27 | print 'Window', i 28 | print 'in-sample score', in_sample_score[-1] 29 | print 'out-sample score:', out_sample_score[-1] 30 | print '---' 31 | return model, np.mean(in_sample_score), np.mean(out_sample_score) 32 | 33 | 34 | def fit_forest(X, y, window=100000, estimators=100, 35 | samples_leaf=250, validate=True): 36 | ''' 37 | Fits Random Forest 38 | ''' 39 | model = RandomForestRegressor(n_estimators=estimators, 40 | min_samples_leaf=samples_leaf, 41 | random_state=42, 42 | n_jobs=-1) 43 | if validate: 44 | return cross_validate(X, y, model, window) 45 | return model.fit(X, y) 46 | 47 | 48 | def fit_boosting(X, y, window=100000, estimators=250, learning=.01, 49 | samples_leaf=500, depth=20, validate=False): 50 | ''' 51 | Fits Gradient Boosting 52 | ''' 53 | model = GradientBoostingRegressor(n_estimators=estimators, 54 | learning_rate=learning, 55 | min_samples_leaf=samples_leaf, 56 | max_depth=depth, 57 | random_state=42) 58 | if validate: 59 | return cross_validate(X, y, model, window) 60 | return model.fit(X, y) 61 | 62 | 63 | def grid_search(X, y, split, learn=[.01], samples_leaf=[250, 350, 500], 64 | depth=[10, 15]): 65 | ''' 66 | Runs a grid search for GBM on split data 67 | ''' 68 | for l in learn: 69 | for s in samples_leaf: 70 | for d in depth: 71 | model = GradientBoostingRegressor(n_estimators=250, 72 | learning_rate=l, 73 | min_samples_leaf=s, 74 | max_depth=d, 75 | random_state=42) 76 | model.fit(X.values[:split], y.values[:split]) 77 | in_score = model.score(X.values[:split], y.values[:split]) 78 | out_score = model.score(X.values[split:], y.values[split:]) 79 | print 'learning_rate: {}, min_samples_leaf: {}, max_depth: {}'.\ 80 | format(l, s, d) 81 | print 'in-sample score:', in_score 82 | print 'out-sample score:', out_score 83 | print '' 84 | 85 | 86 | def run_models(data, window, model_function, drop_zeros=False): 87 | ''' 88 | Runs cross-validated models with a range of target offsets and outputs 89 | results sorted by out-of-sample performance 90 | ''' 91 | mids = [col for col in data.columns if 'mid' in col] 92 | prevs = [col for col in data.columns if 'prev' in col] 93 | in_reg_scores = {} 94 | out_reg_scores = {} 95 | for i in range(len(mids)): 96 | print 'fitting model #{}...'.format(i+1) 97 | m = mids[i] 98 | p = prevs[i] 99 | if drop_zeros: 100 | y = data[data[m] != 0][m].values 101 | prev = data[data[m] != 0][p] 102 | X = data[data[m] != 0].drop(mids+prevs, axis=1) 103 | X = X.join(prev) 104 | X = X.values 105 | else: 106 | y = data[m].values 107 | prev = data[p] 108 | X = data.drop(mids+prevs, axis=1) 109 | X = X.join(prev) 110 | X = X.values 111 | 112 | _, in_reg_score, out_reg_score = model_function(X, y, window) 113 | in_reg_scores[m] = in_reg_score 114 | out_reg_scores[out_reg_score] = m 115 | 116 | print '\nrandom forest regressor r^2:' 117 | for score in sorted(out_reg_scores): 118 | m = out_reg_scores[score] 119 | print 'out-sample', m, score 120 | print 'in-sample', m, in_reg_scores[m], '\n' 121 | 122 | 123 | def get_feature_importances(fitted_model, labels): 124 | ''' 125 | Returns labels sorted by feature importance 126 | ''' 127 | labels = np.array(labels) 128 | importances = fitted_model.feature_importances_ 129 | indexes = np.argsort(importances)[::-1] 130 | for i in indexes: 131 | print '{}: {}'.format(labels[i], importances[i]) 132 | return labels[indexes] 133 | 134 | 135 | def get_pickle(filename): 136 | ''' 137 | Pickle convenience function 138 | ''' 139 | with open(filename, 'r') as f: 140 | data = pickle.load(f) 141 | return data 142 | 143 | 144 | def append_data(df1, df2): 145 | ''' 146 | Append df2 to df1 147 | ''' 148 | df = pd.concat((df1, df2)) 149 | return df.groupby(df.index).first() 150 | -------------------------------------------------------------------------------- /app/model/strategy.py: -------------------------------------------------------------------------------- 1 | #python strategy.gy 2 | #python strategy.py ../data/data.pkl 50 30 0.01 true 3 | # https://aws.amazon.com/ec2/pricing/on-demand/ c4.8xlarge = 36 processors, 60gb ram = $1.675/hr 4 | # With 3938164 records, each instance takes memory: virt=9.8gb, res=7.3g =~ 55gb used for 7 in free -g =~ 7.8gb/instance 5 | 6 | import datetime 7 | import matplotlib.pyplot as plt 8 | from sklearn.ensemble import RandomForestRegressor 9 | from sklearn.ensemble import RandomForestClassifier 10 | from sklearn.ensemble import GradientBoostingRegressor 11 | from sklearn.ensemble import GradientBoostingClassifier 12 | import numpy as np 13 | import matplotlib.ticker as mtick 14 | import matplotlib.dates as mdates 15 | import pickle 16 | import sys 17 | import model 18 | import pandas as pd 19 | from time import time 20 | import multiprocessing 21 | from os import path 22 | import xgboost as xgb 23 | 24 | do_parallel_search = False 25 | timestamp_format = "%Y-%m-%d %H:%M:%S.%f" 26 | #input_model_filename = "../data/model_featuresNewAll_1476108655.82-1478514760.62_duration30_times100.pkl" 27 | #input_cols_filename = "../data/cols_featuresNewAll.pkl" 28 | input_model_filename = "" 29 | input_cols_filename = "" 30 | output_cols_filename = "../data/cols_featuresOldAll.pkl" #Todo: create this filename automatically 31 | y_multiplied_by = 100 32 | 33 | def get_output_model_filename(features, train_start, train_end, prediction_duration): 34 | return "../data/model_features%s_%s-%s_duration%s_times%s.pkl"%(features, train_start, train_end, prediction_duration, y_multiplied_by) 35 | 36 | def get_formatted_time_string(this_time): 37 | return datetime.datetime.utcfromtimestamp(this_time).strftime(timestamp_format) 38 | 39 | def fit_and_trade(period, data, features, cols, split, split_percent, prediction_duration, threshold, threshold_percent, do_dump): 40 | ''' 41 | Fits and backtests a theoretical trading strategy 42 | ''' 43 | X = data[cols] 44 | # y = data["mid%s"%prediction_duration] 45 | y = data["final"] 46 | X_train = X.iloc[:split] 47 | X_test = X.iloc[split:] 48 | y_train = y.iloc[:split]*y_multiplied_by 49 | y_test = y.iloc[split:] 50 | 51 | print 'Data split at {}%. {}/{} records will be used for training/fitting. Rest used for testing.'.format(split_percent,split,len(data)) 52 | print 'Train data starts at %s (%s), ends at %s (%s)'%(get_formatted_time_string(X_train.index.values[0]),X_train.index.values[0],get_formatted_time_string(X_train.index.values[-1]),X_train.index.values[-1]) 53 | print 'Test data starts at %s (%s), ends at %s (%s)'%(get_formatted_time_string(X_test.index.values[0]),X_test.index.values[0],get_formatted_time_string(X_test.index.values[-1]),X_test.index.values[-1]) 54 | train_start = X_train.index.values[0] 55 | train_end = X_train.index.values[-1] 56 | 57 | if do_dump == 'true': 58 | print 'Model output file will be:',get_output_model_filename(features, train_start, train_end, prediction_duration) 59 | 60 | # from sklearn.model_selection import GridSearchCV 61 | # param_grid = {'learning_rate': [0.1, 0.05, 0.01, 0.005], 62 | # 'max_depth': [10, 15, 20, 25, 30], 63 | # 'min_samples_leaf': [100, 250, 500, 750, 1000], 64 | # 'max_features': [1.0, 0.75, 0.5, 0.25, 0.1] 65 | # } 66 | # est = GradientBoostingRegressor(n_estimators=125, verbose=100) 67 | # gs_cv = GridSearchCV(est, param_grid, n_jobs=7, verbose=100).fit(X_train, y_train) 68 | # print gs_cv.best_params_ 69 | # exit(0) 70 | 71 | # regressor, mean_in_sample_score, mean_out_sample_score = model.fit_forest(X_train.iloc[0:].values,y_train.iloc[0:].values) 72 | # print 'mean_in_sample_score:',mean_in_sample_score 73 | # print 'mean_out_sample_score:',mean_out_sample_score 74 | # exit(0) 75 | 76 | if path.isfile(input_model_filename): 77 | print "Loading model/regressor from",input_model_filename 78 | with open(input_model_filename, 'r') as file: 79 | regressor = pickle.load(file) 80 | file.close() 81 | else: 82 | 83 | #regressor = RandomForestClassifier(n_estimators=75,min_samples_leaf=750,n_jobs=-1) 84 | 85 | # regressor = RandomForestRegressor(n_estimators=100, 86 | # min_samples_leaf=500, 87 | # random_state=42, 88 | # n_jobs=-1) 89 | 90 | # regressor = GradientBoostingRegressor(n_estimators=250, 91 | # learning_rate=.01, 92 | # min_samples_leaf=500, 93 | # max_depth=20, 94 | # random_state=42, verbose=100) 95 | 96 | # regressor = GradientBoostingRegressor(n_estimators=500, 97 | # learning_rate=.001, 98 | # min_samples_leaf=1000, 99 | # max_depth=30, 100 | # random_state=42, 101 | # verbose=100) 102 | 103 | # regressor = GradientBoostingClassifier(n_estimators=50,verbose=100,min_samples_leaf=1000,max_depth=3) 104 | 105 | # regressor = xgb.XGBRegressor() 106 | 107 | regressor = xgb.XGBClassifier(n_estimators=50,max_depth=3,min_child_weight=5) 108 | 109 | print 'Training started at %s...'%(get_formatted_time_string(time())), 110 | regressor.fit(X_train.values, y_train.values) 111 | print 'Training done at %s'%(get_formatted_time_string(time())) 112 | 113 | # print '--Model features importance--' 114 | # model.get_feature_importances(regressor,cols) 115 | # print '----' 116 | 117 | if do_dump == 'true': 118 | with open(get_output_model_filename(features, train_start, train_end, prediction_duration), 'w+') as f: 119 | pickle.dump(regressor, f) 120 | f.close() 121 | 122 | print 'r-squared', regressor.score(X_test.values, y_test.values) 123 | 124 | trade(period, X_test.values, y_test.values, X_test.index, regressor, features, cols, split, split_percent, prediction_duration, threshold, threshold_percent, do_dump) 125 | 126 | def trade(period, X, y, index, model, features, cols, split, split_percent, prediction_duration, threshold, threshold_percent, do_dump): 127 | ''' 128 | Backtests a theoretical trading strategy 129 | ''' 130 | preds = model.predict(X)/y_multiplied_by 131 | 132 | accurate_count = 0 133 | negative_trades = 0 134 | positive_trades = 0 135 | accurate_trades_count = 0 136 | for i, pred in enumerate(preds): 137 | if pred == y[i]: 138 | accurate_count = accurate_count+1 139 | if pred != 0: 140 | accurate_trades_count = accurate_trades_count+1 141 | if pred == -1: 142 | negative_trades = negative_trades+1 143 | elif pred == 1: 144 | positive_trades = positive_trades+1 145 | accurate_percent = float(accurate_count)/float(len(preds))*100 146 | print "Total predictions count =",len(preds) 147 | print "Accurate total predictions =",accurate_count 148 | print "Accurate total predictions percent =",(float(accurate_count)/float(len(preds))*100),"%" 149 | print "-1 trades =",negative_trades 150 | print "1 trades =",positive_trades 151 | print "Total trades =", (negative_trades+positive_trades) 152 | print "Accurate trades =",accurate_trades_count 153 | print "Accurate trades percent =",(float(accurate_trades_count)/float(negative_trades+positive_trades)*100),"%" 154 | return 155 | 156 | index_as_dates = [] 157 | for value in index: 158 | index_as_dates.append(datetime.datetime.utcfromtimestamp(value)) 159 | 160 | trades = np.zeros(len(preds)) 161 | trade_at = 0 162 | active = False 163 | for i, pred in enumerate(preds): 164 | if active: 165 | if (index_as_dates[i]-trade_at).total_seconds() >= prediction_duration: 166 | trade_at = 0 167 | active = False 168 | # print 'Trade expired at', index_as_dates[i] 169 | elif abs(pred) >= threshold: 170 | active = True 171 | trades[i] = np.sign(pred) 172 | trade_at = index_as_dates[i] 173 | # print 'Trading at',trade_at 174 | 175 | returns = trades*y*100 176 | 177 | # if do_dump == 'true': 178 | # with open(base_filename+".human_readable_results.txt", "w") as file: 179 | # it = np.nditer(preds, flags=['f_index']) 180 | # while not it.finished: 181 | # file.write( 182 | # "%s (%s):\t%s\t%s\t%s\t%s\n" % 183 | # (index_as_dates[it.index].strftime(timestamp_format), index[it.index], format(preds[it.index],'.10f'), format(y[it.index],'.10f'), trades[it.index], returns[it.index])) 184 | # it.iternext() 185 | # file.close() 186 | 187 | trades_only = returns[trades != 0] 188 | if len(trades_only) == 0: 189 | print 'No trades were made.' 190 | return 191 | mean_return = trades_only.mean() 192 | accuracy = sum(trades_only > 0)*1./len(trades_only) 193 | profit = np.cumsum(returns) 194 | 195 | title_text = ('Trading at every {}% prediction. Position held for {} secs.' 196 | .format(threshold_percent, prediction_duration)) 197 | return_text = 'Average Return: {:.4f} %'.format(mean_return) 198 | trades_text = 'Total Trades: {:d}'.format(len(trades_only)) 199 | accuracy_text = 'Accuracy: {:.2f} %'.format(accuracy*100) 200 | print title_text 201 | print return_text 202 | print trades_text 203 | print accuracy_text 204 | 205 | #plt.figure(dpi=100000) 206 | fig, ax = plt.subplots() 207 | plt.plot(index_as_dates, profit) 208 | plt.title(title_text) 209 | plt.ylabel('Returns') 210 | plt.xticks(rotation=20) 211 | x_formatter = mtick.FormatStrFormatter('%.0f%%') 212 | ax.yaxis.set_major_formatter(x_formatter) 213 | y_formatter = mdates.DateFormatter("%Y-%m-%d %H:%M") 214 | ax.xaxis.set_major_formatter(y_formatter) 215 | plt.text(.05, .85, return_text, transform=ax.transAxes) 216 | plt.text(.05, .78, trades_text, transform=ax.transAxes) 217 | plt.text(.05, .71, accuracy_text, transform=ax.transAxes) 218 | if do_dump or do_parallel_search: 219 | plt.savefig(get_file_name(period, features, split_percent, prediction_duration, threshold_percent)) 220 | if not do_parallel_search: 221 | plt.show() 222 | 223 | def get_file_name(period,features,split_percent,prediction_duration,threshold_percent): 224 | #ToDo make this the same as the model output filename 225 | return '../data/strategy_period%s_features%s_split%s_duration%s_threshold%s.png' % (period, features, split_percent, prediction_duration, threshold_percent) 226 | 227 | def parallel(params): 228 | period = params[0] 229 | data = params[1] 230 | features = params[2] 231 | cols = params[3] 232 | split = params[4] 233 | split_percent = params[5] 234 | prediction_duration = params[6] 235 | threshold = params[7] 236 | threshold_percent = params[8] 237 | 238 | print '-------' 239 | print 'Starting iteration. Period:', period, 'Features:', features, 'Split:', split_percent,'Prediction duration:', prediction_duration, 'Threshold:', threshold_percent 240 | image_filename = get_file_name(period,features,split_percent,prediction_duration,threshold_percent) 241 | if path.isfile(image_filename): 242 | print 'Image file already exists, skipping...' 243 | return 244 | 245 | fit_and_trade(period, data, features, cols, split, split_percent, prediction_duration, threshold, threshold_percent, False) 246 | 247 | 248 | if __name__ == '__main__' and len(sys.argv) == 6: 249 | print "Starting at",get_formatted_time_string(time()) 250 | 251 | filename = sys.argv[1] 252 | split_percent = int(sys.argv[2]) 253 | prediction_duration = int(sys.argv[3]) 254 | threshold_percent = float(sys.argv[4]) 255 | do_dump = sys.argv[5] 256 | 257 | base_filename = '.'.join(filename.split('.')[:-1]) if '.' in filename else filename 258 | filename_extension = filename.split('.')[-1] if '.' in filename else filename 259 | 260 | threshold = threshold_percent/100 261 | 262 | print "Reading data from disk" 263 | if filename_extension == 'pkl': 264 | with open(filename, 'r') as file: 265 | data = pickle.load(file) 266 | elif filename_extension == 'tsv': 267 | data = pd.DataFrame.from_csv(filename, sep='\t') 268 | 269 | if do_dump == 'true' and filename_extension != 'tsv': 270 | data.to_csv(base_filename+'.tsv', sep='\t') 271 | 272 | if path.isfile(input_cols_filename): 273 | print "Loading cols from file",input_cols_filename 274 | with open(input_cols_filename, 'r') as file: 275 | cols = pickle.load(file) 276 | file.close() 277 | else: 278 | # Original features, work well when trained with books data that doesn't skip, and is fresh roughly every 1-2 seconds 279 | # cols = [ 280 | # 'width', 281 | # 'imbalance2', 282 | # 'imbalance4', 283 | # 'imbalance8', 284 | # 'adj_price2', 285 | # 'adj_price4', 286 | # 'adj_price8', 287 | # 't30_count', 288 | # 't60_count', 289 | # 't120_count', 290 | # 't180_count', 291 | # 't30_av', 292 | # 't60_av', 293 | # 't120_av', 294 | # 't180_av', 295 | # 'agg30', 296 | # 'agg60', 297 | # 'agg120', 298 | # 'agg180', 299 | # 'trend30', 300 | # 'trend60', 301 | # 'trend120', 302 | # 'trend180' 303 | # ] 304 | 305 | # Features with added windows 306 | cols = [ 307 | 'width', 308 | 309 | 'imbalance2', 310 | 'imbalance4', 311 | 'imbalance8', 312 | 313 | 'adj_price2', 314 | 'adj_price4', 315 | 'adj_price8', 316 | 317 | 't10_count', 318 | 't15_count', 319 | 't30_count', 320 | 't45_count', 321 | 't60_count', 322 | 't75_count', 323 | 't90_count', 324 | 't105_count', 325 | 't120_count', 326 | 't135_count', 327 | 't150_count', 328 | 't165_count', 329 | 't180_count', 330 | 331 | 't10_av', 332 | 't15_av', 333 | 't30_av', 334 | 't45_av', 335 | 't60_av', 336 | 't75_av', 337 | 't90_av', 338 | 't105_av', 339 | 't120_av', 340 | 't135_av', 341 | 't150_av', 342 | 't165_av', 343 | 't180_av', 344 | 345 | 'agg10', 346 | 'agg15', 347 | 'agg30', 348 | 'agg45', 349 | 'agg60', 350 | 'agg75', 351 | 'agg90', 352 | 'agg105', 353 | 'agg120', 354 | 'agg135', 355 | 'agg150', 356 | 'agg165', 357 | 'agg180', 358 | 359 | 'trend10', 360 | 'trend15', 361 | 'trend30', 362 | 'trend45', 363 | 'trend60', 364 | 'trend75', 365 | 'trend90', 366 | 'trend105', 367 | 'trend120', 368 | 'trend135', 369 | 'trend150', 370 | 'trend165', 371 | 'trend180', 372 | ] 373 | if do_dump == 'true': 374 | with open(output_cols_filename, 'w+') as f: 375 | pickle.dump(cols, f) 376 | f.close() 377 | 378 | data = data.fillna(0) 379 | data = data[data.width > 0] 380 | # ToDo if using scikit-learn 381 | #data[cols] = data[cols].astype('float32') 382 | 383 | # Search for optimal fits 384 | if do_parallel_search: 385 | columns = {} 386 | columns['OldAll'] = [ 387 | 'width', 388 | 'imbalance2', 389 | 'imbalance4', 390 | 'imbalance8', 391 | 'adj_price2', 392 | 'adj_price4', 393 | 'adj_price8', 394 | 't30_count', 395 | 't60_count', 396 | 't120_count', 397 | 't180_count', 398 | 't30_av', 399 | 't60_av', 400 | 't120_av', 401 | 't180_av', 402 | 'agg30', 403 | 'agg60', 404 | 'agg120', 405 | 'agg180', 406 | 'trend30', 407 | 'trend60', 408 | 'trend120', 409 | 'trend180' 410 | ] 411 | columns['NewAll'] = [ 412 | 'width', 413 | 'imbalance2', 414 | 'imbalance4', 415 | 'imbalance8', 416 | 'imbalance16', 417 | 'imbalance32', 418 | 'imbalance64', 419 | 'adj_price2', 420 | 'adj_price4', 421 | 'adj_price8', 422 | 'adj_price16', 423 | 'adj_price32', 424 | 'adj_price64', 425 | 't30_count', 426 | 't60_count', 427 | 't120_count', 428 | 't180_count', 429 | 't90_count', 430 | 't150_count', 431 | 't210_count', 432 | 't240_count', 433 | 't270_count', 434 | 't300_count', 435 | 't330_count', 436 | 't360_count', 437 | 't390_count', 438 | 't420_count', 439 | 't450_count', 440 | 't30_av', 441 | 't60_av', 442 | 't120_av', 443 | 't180_av', 444 | 't90_av', 445 | 't150_av', 446 | 't210_av', 447 | 't240_av', 448 | 't270_av', 449 | 't300_av', 450 | 't330_av', 451 | 't360_av', 452 | 't390_av', 453 | 't420_av', 454 | 't450_av', 455 | 'agg30', 456 | 'agg60', 457 | 'agg120', 458 | 'agg180', 459 | 'agg90', 460 | 'agg150', 461 | 'agg210', 462 | 'agg240', 463 | 'agg270', 464 | 'agg300', 465 | 'agg330', 466 | 'agg360', 467 | 'agg390', 468 | 'agg420', 469 | 'agg450', 470 | 'trend30', 471 | 'trend60', 472 | 'trend120', 473 | 'trend180', 474 | 'trend90', 475 | 'trend150', 476 | 'trend210', 477 | 'trend240', 478 | 'trend270', 479 | 'trend300', 480 | 'trend330', 481 | 'trend360', 482 | 'trend390', 483 | 'trend420', 484 | 'trend450' 485 | ] 486 | parallel_arguments = [] 487 | for period in ['All']: 488 | if period == 'All': 489 | None 490 | for features in columns: 491 | cols = columns[features] 492 | for split_percent in [50]: 493 | for prediction_duration in [30, 60, 90, 120]: 494 | split = int(len(data)*(float(split_percent)/100)) 495 | parallel_arguments.append([period, data, features, cols, split, split_percent, prediction_duration, threshold, threshold_percent]) 496 | print 'Trying', len(parallel_arguments), 'combinations on multiple threads' 497 | pool = multiprocessing.Pool(multiprocessing.cpu_count()-1) #Leave one CPU core free so system doesn't lock up 498 | pool.map(parallel, parallel_arguments) 499 | pool.close() 500 | pool.join() 501 | 502 | if not do_parallel_search: 503 | split = int(len(data)*(float(split_percent)/100)) 504 | fit_and_trade('All', data, 'New', cols, split, split_percent, prediction_duration, threshold, threshold_percent, do_dump) 505 | 506 | print "Done at", get_formatted_time_string(time()) -------------------------------------------------------------------------------- /app/model/strategy_multiple.py: -------------------------------------------------------------------------------- 1 | #python strategy_multiple.gy 2 | #python strategy_multiple.py ../data/data.tsv 1479943156.27 0.01 3 | 4 | import datetime 5 | import matplotlib.pyplot as plt 6 | import numpy as np 7 | import matplotlib.ticker as mtick 8 | import matplotlib.dates as mdates 9 | import pickle 10 | import sys 11 | import pandas as pd 12 | from time import time 13 | import xgboost as xgb 14 | 15 | prediction_duration = 30 16 | prediction_periods = [5, 10, 15, 30, 35, 40, 45] 17 | 18 | timestamp_format = "%Y-%m-%d %H:%M:%S.%f" 19 | input_cols_filename = "../data/cols_featuresNewAll.pkl" 20 | model_base_filename = "../data/model_featuresNewAll_1476108655.82-1479943156.27_duration%s_times%s.pkl" 21 | y_multiplied_by = 100 22 | models = {} 23 | for prediction_period in prediction_periods: 24 | model_filename = model_base_filename%(prediction_period,y_multiplied_by) 25 | with open(model_filename, 'r') as file: 26 | models[prediction_period] = pickle.load(file) 27 | file.close() 28 | 29 | def get_formatted_time_string(this_time): 30 | return datetime.datetime.utcfromtimestamp(this_time).strftime(timestamp_format) 31 | 32 | def fit_and_trade(data, cols, threshold, threshold_percent): 33 | ''' 34 | Fits and backtests a theoretical trading strategy 35 | ''' 36 | X = data[cols] 37 | y = {} 38 | for prediction_period in prediction_periods: 39 | y[prediction_period] = data["mid%s"%prediction_period].values 40 | 41 | print 'Test data starts at %s (%s), ends at %s (%s)'%(get_formatted_time_string(X.index.values[0]),X.index.values[0],get_formatted_time_string(X.index.values[-1]),X.index.values[-1]) 42 | 43 | trade(X.values, y, X.index, threshold, threshold_percent) 44 | 45 | def check_preds(i, preds, threshold): 46 | preds_as_list = [] 47 | for key in preds: 48 | preds_as_list.append(preds[key][i]) 49 | pred_signs = np.sign(preds_as_list) 50 | all_signs_equal = True 51 | sign = pred_signs[0] 52 | for pred_sign in pred_signs: 53 | if sign != pred_sign: 54 | all_signs_equal = False 55 | break 56 | all_preds_above_threshold = True 57 | for key in preds: 58 | if abs(preds[key][i]) < threshold: 59 | all_preds_above_threshold = False 60 | break 61 | if all_signs_equal and all_preds_above_threshold: 62 | return True 63 | else: 64 | return False 65 | 66 | def trade(X, y, index, threshold, threshold_percent): 67 | ''' 68 | Backtests a theoretical trading strategy 69 | ''' 70 | preds = {} 71 | for prediction_period in prediction_periods: 72 | preds[prediction_period] = models[prediction_period].predict(X) / y_multiplied_by 73 | 74 | index_as_dates = [] 75 | for value in index: 76 | index_as_dates.append(datetime.datetime.utcfromtimestamp(value)) 77 | 78 | trades = np.zeros(len(preds[prediction_duration])) 79 | trade_at = 0 80 | active = False 81 | for i in xrange(len(index)): 82 | if active: 83 | if (index_as_dates[i]-trade_at).total_seconds() >= prediction_duration: 84 | trade_at = 0 85 | active = False 86 | elif check_preds(i,preds,threshold): 87 | active = True 88 | trades[i] = np.sign(preds[prediction_duration][i]) 89 | trade_at = index_as_dates[i] 90 | print_string = "%s %s Trading %s at predictions: "%(i,index[i],trades[i]) 91 | for key in preds: 92 | print_string = print_string+ "%s=%s "%(key,format(preds[key][i], '.10f')) 93 | print print_string 94 | print_string = "%s %s Real mids: "%(i,index[i]) 95 | for key in preds: 96 | print_string = print_string + "%s=%s "%(key,format(y[key][i], '.10f')) 97 | print print_string 98 | print "" 99 | 100 | 101 | returns = trades*y[prediction_duration]*100 102 | 103 | for i in xrange(len(trades)): 104 | if trades[i] != 0 and returns[i]>0: 105 | #Check that real future mid (y[x]), x secs later, is in the same direction as pred[x], for x in 5,10,15,30,35,40,45 106 | this_timestamp = index[i] 107 | all_predictions_correct = True 108 | for x in prediction_periods: 109 | for check_timestamp_index in xrange(i,len(index)): 110 | if index[check_timestamp_index]-this_timestamp >= x: 111 | print "For trade made at timestamp",this_timestamp,"period of",x,"is found at",index[check_timestamp_index],"with prediction=",format(preds[x][check_timestamp_index],'.10f'),"real=",format(y[x][check_timestamp_index],'.10f') 112 | if(np.sign(y[x][check_timestamp_index]) != np.sign(preds[x][check_timestamp_index])): 113 | all_predictions_correct = False 114 | break 115 | if not all_predictions_correct: 116 | break 117 | if not all_predictions_correct: 118 | print "***A prediction for this trade was not correct, reversing return" 119 | returns[i] = (returns[i]*-1.0) 120 | else: 121 | print "+++All predictions were correct! This is a definite positive return" 122 | 123 | trades_only = returns[trades != 0] 124 | if len(trades_only) == 0: 125 | print 'No trades were made.' 126 | return 127 | mean_return = trades_only.mean() 128 | accuracy = sum(trades_only > 0)*1./len(trades_only) 129 | profit = np.cumsum(returns) 130 | 131 | title_text = ('Trading at every {}% prediction. Position held for {} secs.' 132 | .format(threshold_percent, prediction_duration)) 133 | return_text = 'Average Return: {:.4f} %'.format(mean_return) 134 | trades_text = 'Total Trades: {:d}'.format(len(trades_only)) 135 | accuracy_text = 'Accuracy: {:.2f} %'.format(accuracy*100) 136 | print title_text 137 | print return_text 138 | print trades_text 139 | print accuracy_text 140 | 141 | #plt.figure(dpi=100000) 142 | fig, ax = plt.subplots() 143 | plt.plot(index_as_dates, profit) 144 | plt.title(title_text) 145 | plt.ylabel('Returns') 146 | plt.xticks(rotation=20) 147 | x_formatter = mtick.FormatStrFormatter('%.0f%%') 148 | ax.yaxis.set_major_formatter(x_formatter) 149 | y_formatter = mdates.DateFormatter("%Y-%m-%d %H:%M") 150 | ax.xaxis.set_major_formatter(y_formatter) 151 | plt.text(.05, .85, return_text, transform=ax.transAxes) 152 | plt.text(.05, .78, trades_text, transform=ax.transAxes) 153 | plt.text(.05, .71, accuracy_text, transform=ax.transAxes) 154 | plt.show() 155 | 156 | if __name__ == '__main__': 157 | print "Starting at",get_formatted_time_string(time()) 158 | 159 | filename = sys.argv[1] 160 | test_after_timestamp = float(sys.argv[2]) 161 | threshold_percent = float(sys.argv[3]) 162 | 163 | base_filename = '.'.join(filename.split('.')[:-1]) if '.' in filename else filename 164 | filename_extension = filename.split('.')[-1] if '.' in filename else filename 165 | 166 | threshold = threshold_percent/100 167 | 168 | print "Reading data from disk" 169 | if filename_extension == 'pkl': 170 | with open(filename, 'r') as file: 171 | data = pickle.load(file) 172 | elif filename_extension == 'tsv': 173 | data = pd.DataFrame.from_csv(filename, sep='\t') 174 | 175 | data = data.fillna(0) 176 | data = data[data.width > 0] 177 | # ToDo if using scikit-learn 178 | #data[cols] = data[cols].astype('float32') 179 | 180 | data = data[data.index >= test_after_timestamp] 181 | 182 | print "Loading cols from file",input_cols_filename 183 | with open(input_cols_filename, 'r') as file: 184 | cols = pickle.load(file) 185 | file.close() 186 | 187 | fit_and_trade(data, cols, threshold, threshold_percent) 188 | 189 | print "Done at", get_formatted_time_string(time()) -------------------------------------------------------------------------------- /app/predict.py: -------------------------------------------------------------------------------- 1 | #python predict.py 2 | #python predict.py 0.001 3 | #python -W ignore -u predict.py 0.01 >> data/predict.out.txt 2>&1 4 | import math 5 | from model import features as f 6 | import pymongo 7 | import time 8 | import sys 9 | import pickle 10 | import numpy as np 11 | from math import log 12 | import btcchina 13 | import traceback 14 | from datetime import datetime 15 | import pandas as pd 16 | from StringIO import StringIO 17 | import subprocess 18 | 19 | prediction_duration = 15 20 | #prediction_periods = [5, 10, 15, 20, 25, 30] 21 | prediction_periods = [15] 22 | finish_within_one_x_of_duration = 1 23 | periods = 3 # if 3, it means 2 retries and 1 final as market 24 | 25 | header = "_id width mid imbalance2 adj_price2 imbalance4 adj_price4 imbalance8 adj_price8 t10_count t10_av agg10 trend10 t15_count t15_av agg15 trend15 t30_count t30_av agg30 trend30 t45_count t45_av agg45 trend45 t60_count t60_av agg60 trend60 t75_count t75_av agg75 trend75 t90_count t90_av agg90 trend90 t105_count t105_av agg105 trend105 t120_count t120_av agg120 trend120 t135_count t135_av agg135 trend135 t150_count t150_av agg150 trend150 t165_count t165_av agg165 trend165 t180_count t180_av agg180 trend180" 26 | y_multiplied_by = 100 27 | #model_base_filename = "data/model_featuresNewAll_1476108655.82-1480731470.85_duration%s_times%s.pkl" 28 | model_base_filename = "data/model_featuresNew_1482247313.0-1482762365.28_duration15_times100.pkl" 29 | cols_filename = "data/cols_featuresNewAll.pkl" 30 | 31 | client = pymongo.MongoClient() 32 | db = client['cryptobot'] 33 | threshold = float(sys.argv[1])/100 34 | # ticks_db = db['btcc_btccny_ticks'] 35 | timestamp_format = "%Y-%m-%d %H:%M:%S.%f" 36 | access_key="" #ADD YOUR ACCESS KEY HERE 37 | secret_key="" #ADD YOUR SECRET HERE 38 | max_api_retry = 2 39 | 40 | trade_api = btcchina.BTCChina(access_key,secret_key) 41 | 42 | with open(cols_filename, 'r') as file: 43 | cols = pickle.load(file) 44 | file.close() 45 | models = {} 46 | for prediction_period in prediction_periods: 47 | #model_filename = model_base_filename%(prediction_period,y_multiplied_by) 48 | model_filename = model_base_filename 49 | with open(model_filename, 'r') as file: 50 | models[prediction_period] = pickle.load(file) 51 | file.close() 52 | 53 | def get_formatted_time_string(this_time): 54 | return datetime.utcfromtimestamp(this_time).strftime(timestamp_format) 55 | 56 | def round_down(value, places): 57 | return math.floor(value * math.pow(10,places)) / math.pow(10,places) 58 | 59 | def call_trade_api_with_retries(trade_api_function): 60 | response = {"error":{"message":"Didn't even try"}} 61 | for trial in xrange(max_api_retry): 62 | if trial > 0: 63 | print_formatted("!!! API request failed (%s). Retrying #%s ..."%(response['error'], trial)) 64 | try: 65 | response = trade_api_function() 66 | except Exception as e: 67 | print_formatted("!!! Exception thrown when calling api: %s"%(str(e))) 68 | traceback.print_exc() 69 | sys.exc_clear() 70 | else: 71 | if (not isinstance(response, dict)) or (isinstance(response, dict) and 'error' not in response): 72 | break 73 | else: 74 | None #Todo send email on insufficient balance and when on max_api_retry 75 | return response 76 | 77 | def get_live_balances(): 78 | global balance_btc 79 | global balance_fiat 80 | response = call_trade_api_with_retries(lambda: trade_api.get_account_info()) 81 | if 'balance' in response: 82 | balance_btc = float(response['balance']['btc']['amount']) 83 | balance_fiat = float(response['balance']['cny']['amount']) 84 | return balance_btc, balance_fiat 85 | 86 | # def get_latest_tick(): 87 | # tick = ticks_db.find().sort("$natural", -1).limit(1).next() 88 | # return tick 89 | 90 | def get_current_time_seconds_utc(): 91 | return (datetime.utcnow()-datetime(1970,1,1)).total_seconds() 92 | 93 | def print_formatted(string, beginning_tabs_count=0, print_timestamp=True, timestamp=None, max_chars_per_line=125): 94 | max_chars_count = 0 95 | if not timestamp: 96 | timestamp = get_current_time_seconds_utc() 97 | formatted_string = "" 98 | timestamp_char_count = 0 99 | if print_timestamp: 100 | formatted_string = "%s (%s) - "%(get_formatted_time_string(timestamp),timestamp) 101 | timestamp_char_count = len(formatted_string) 102 | formatted_string = formatted_string + "\t"*beginning_tabs_count 103 | for c in string: 104 | if max_chars_count == max_chars_per_line: 105 | formatted_string = formatted_string + "\n" + " "*timestamp_char_count + "\t"*beginning_tabs_count 106 | max_chars_count = 0 107 | formatted_string = formatted_string + c 108 | max_chars_count+=1 109 | print formatted_string 110 | 111 | def check_preds(preds): 112 | preds_as_list = [] 113 | for key in preds: 114 | preds_as_list.append(preds[key]) 115 | pred_signs = np.sign(preds_as_list) 116 | all_signs_equal = True 117 | sign = pred_signs[0] 118 | for pred_sign in pred_signs: 119 | if sign != pred_sign: 120 | all_signs_equal = False 121 | break 122 | all_preds_above_threshold = True 123 | for key in preds: 124 | if abs(preds[key]) < threshold: 125 | all_preds_above_threshold = False 126 | break 127 | if all_signs_equal and all_preds_above_threshold: 128 | return True 129 | else: 130 | return False 131 | 132 | print_formatted("Running at prediction_duration=%s, threshold=%s ..."%(prediction_duration,threshold*100)) 133 | 134 | data = pd.DataFrame() 135 | index = 0 136 | last_data_timestamp = 0 137 | position = 0 138 | trade_time = 0 139 | change = 0 140 | previous_price = None 141 | last_trade_midpoint_price = 0 142 | current_order_id = 0 143 | trades_count = 0 144 | accurate_trades_count = 0 145 | trade_position_periods_checked = {} 146 | amount_trade_in_fiat = 0 147 | amount_trade_in_btc = 0 148 | 149 | response = call_trade_api_with_retries(lambda: trade_api.get_orders()) 150 | if isinstance(response, dict) and 'order' in response: 151 | orders = response['order'] 152 | for order in orders: 153 | response = call_trade_api_with_retries(lambda: trade_api.cancel(order['id'])) 154 | if response == True or response == 'true' or response == 'True': 155 | print_formatted("Order canceled on startup: %s"%(order)) 156 | #ToDO redistribute account 50/50, and do it periodically 157 | 158 | balance_btc = 0 159 | balance_fiat = 0 160 | balance_btc, balance_fiat = get_live_balances() 161 | balance_btc_initial = balance_btc 162 | balance_fiat_initial = balance_fiat 163 | if balance_btc == 0 or balance_fiat == 0: 164 | print_formatted('***Unable to get balances. Or one or both of balances (btc/fiat) are zero.') 165 | exit(0) 166 | print_formatted("Starting balances: balance_btc=%s, balance_fiat=%s"%(balance_btc,balance_fiat)) 167 | 168 | while True: 169 | try: 170 | start = get_current_time_seconds_utc() 171 | 172 | if current_order_id == 0 and position == 0: 173 | line = subprocess.check_output(['tail', '-1', 'data/data_live.tsv']) 174 | data_as_tsv_str = header + "\n" + line 175 | data_as_tsv_StringIO = StringIO(data_as_tsv_str) 176 | data = pd.DataFrame.from_csv(data_as_tsv_StringIO, sep='\t') 177 | data = data.fillna(0) 178 | #ToDo if using scikit-learn 179 | #data[cols] = data[cols].astype('float32') 180 | 181 | preds = {} 182 | 183 | this_data_timestamp = data.index[index] 184 | if this_data_timestamp < (start - 3): 185 | # print "Data hasn't been updated in less than 3 seconds, skipping...",this_data_timestamp,start-3 186 | None 187 | elif last_data_timestamp != 0 and last_data_timestamp == this_data_timestamp: 188 | # print "Last data timestamp is equal to this data timestamp, skipping..." 189 | None 190 | else: 191 | last_data_timestamp = this_data_timestamp 192 | 193 | for prediction_period in prediction_periods: 194 | preds[prediction_period] = models[prediction_period].predict(data[cols].values)[index] / y_multiplied_by 195 | 196 | else: 197 | data = f.get_book_df(1,live=True) 198 | data['width'], data['mid'] = f.get_width_and_mid(data) 199 | preds = {} 200 | # print "In a position or order, skipping features", data.index[index], data.width.iloc[index], data.mid.iloc[index] 201 | 202 | current_midpoint_price = data.mid.iloc[index] 203 | 204 | # If we can execute a new trade, and no pending orders 205 | if data.width.iloc[index] > 0 and current_order_id==0 and position == 0 and len(preds)>0 and check_preds(preds): 206 | position = np.sign(preds[prediction_duration]) 207 | 208 | if position < 0: 209 | price = current_midpoint_price 210 | amount_trade_in_btc = balance_btc 211 | trade_type = 'ask' 212 | elif position > 0: 213 | price = current_midpoint_price 214 | amount_trade_in_fiat = balance_fiat 215 | amount_trade_in_btc = round_down(amount_trade_in_fiat / price, 4) 216 | trade_type = 'bid' 217 | trades_count+=1 218 | print_formatted("-----------Trade #%s----------"%(trades_count)) 219 | trade_time = get_current_time_seconds_utc() 220 | print_string = "1) Trading %s at predictions: "%(position) 221 | for key in preds: 222 | print_string = print_string+"%s=%s "%(key,format(preds[key], '.10f')) 223 | print_formatted(print_string,timestamp=trade_time) 224 | print_formatted("Current midpoint price: %s" % (current_midpoint_price),1) 225 | if position < 0: 226 | print_formatted("Going from btc->fiat: selling %s btc, buying %s fiat." % (amount_trade_in_btc, amount_trade_in_fiat),2) 227 | print_formatted("+Hopefully buyback price will be lower than current sell price, for a profit",2) 228 | response = call_trade_api_with_retries(lambda: trade_api.sell(price, amount_trade_in_btc)) 229 | elif position > 0: 230 | print_formatted("Going from fiat->btc: buying %s btc, selling %s fiat." % (amount_trade_in_btc, amount_trade_in_fiat),2) 231 | print_formatted("+Hopefully sellback price will be higher than current buy price, for a profit",2) 232 | response = call_trade_api_with_retries(lambda: trade_api.buy(price, amount_trade_in_btc)) 233 | if isinstance(response, int): 234 | last_trade_midpoint_price = current_midpoint_price 235 | current_order_id = response 236 | trade_position_periods_checked = {} 237 | print_formatted("@@@*** Order successfully made, order ID: %s" % (current_order_id),1) 238 | print_formatted("Holding position for %s seconds"%(prediction_duration)) 239 | else: 240 | print_formatted("@@@!!! Error making order: %s" % (str(response['error'] if isinstance(response, dict) and 'error' in response else response)), 1) 241 | position = 0 242 | last_trade_midpoint_price = 0 243 | amount_trade_in_fiat = 0 244 | amount_trade_in_btc = 0 245 | 246 | # If an open position has expired, and no pending orders 247 | if current_order_id==0 and position != 0 and (start - trade_time) >= prediction_duration+1: 248 | print_formatted("2) Position %s expired after %s secs. " % (position, (start-trade_time))) 249 | trade_time = get_current_time_seconds_utc() 250 | print_formatted("Current midpoint price: %s" % (current_midpoint_price),1) 251 | if position < 0: 252 | price = current_midpoint_price 253 | # amount_trade_in_fiat = balance_fiat-fiat_balance_before_btc_sale 254 | amount_trade_in_fiat = balance_fiat*0.50 255 | amount_trade_in_btc = round_down(amount_trade_in_fiat / price, 4) 256 | print_formatted("Going from fiat->btc: selling %s fiat, buying %s btc" % (amount_trade_in_fiat, amount_trade_in_btc),2) 257 | if price < last_trade_midpoint_price: 258 | print_formatted("+Buyback price did go down, so should be profiting!",2) 259 | accurate_trades_count = accurate_trades_count+1 260 | else: 261 | print_formatted("-Buyback price didn't go down, should be a loss",2) 262 | response = call_trade_api_with_retries(lambda: trade_api.buy(price, amount_trade_in_btc)) 263 | elif position > 0: 264 | price = current_midpoint_price 265 | # amount_trade_in_btc = amount_trade_in_btc 266 | amount_trade_in_btc = balance_btc*0.5 267 | amount_trade_in_fiat = round_down(amount_trade_in_btc * price, 2) 268 | print_formatted("Going from btc->fiat: buying %s fiat, selling %s btc" % (amount_trade_in_fiat, amount_trade_in_btc),2) 269 | if last_trade_midpoint_price < price: 270 | print_formatted("+Sellback price did go up, so should be profiting!",2) 271 | accurate_trades_count = accurate_trades_count + 1 272 | else: 273 | print_formatted("-Sellback price didn't go up, should be a loss",2) 274 | response = call_trade_api_with_retries(lambda: trade_api.sell(price, amount_trade_in_btc)) 275 | if isinstance(response, int): 276 | last_trade_midpoint_price = current_midpoint_price 277 | current_order_id = response 278 | trade_position_periods_checked = {} 279 | print_formatted("@@@*** Order successfully made, order ID: %s" % (current_order_id),1) 280 | else: 281 | print_formatted("@@@!!! Error making order: %s" % (str(response['error'] if isinstance(response, dict) and 'error' in response else response)), 1) 282 | last_trade_midpoint_price = 0 283 | amount_trade_in_fiat = 0 284 | amount_trade_in_btc = 0 285 | print_formatted("------------------------------") 286 | position = 0 287 | 288 | if current_order_id != 0: 289 | response = call_trade_api_with_retries(lambda: trade_api.get_orders(current_order_id)) 290 | print_formatted("@@@ Pending order (ID %s) status: "%(current_order_id),1) 291 | print_formatted(str(response).replace("u\"","\"").replace("u\'","\'").replace("\'","\""),7,False,None,75) 292 | 293 | seconds_elapsed_since_trade = start - trade_time 294 | order_status = response['order']['status'] 295 | order_type = response['order']['type'] 296 | order_price = float(response['order']['price']) 297 | amount_unfilled = float(response['order']['amount']) 298 | amount_filled_total = amount_trade_in_btc - amount_unfilled 299 | 300 | if amount_unfilled != 0 or order_status == 'closed': 301 | current_order_id = 0 302 | balance_btc, balance_fiat = get_live_balances() 303 | print_formatted("+++ Order completed after %s s. Current balances: balance_btc=%s, balance_fiat=%s"%(seconds_elapsed_since_trade,balance_btc,balance_fiat), 1) 304 | if position == 0: 305 | # ToDo % change from previous trade. Send email if going too low and kill 306 | btc_percent_comparison = balance_btc / balance_btc_initial * 100 307 | fiat_percent_comparison = balance_fiat / balance_fiat_initial * 100 308 | total_percent_comparison = (btc_percent_comparison-100.0) + (fiat_percent_comparison-100.0) 309 | print_formatted("+++ Current bals in %% comparison w/ initial: btc=%s%%, fiat=%s%%, total=%s%%" % (btc_percent_comparison, fiat_percent_comparison, total_percent_comparison), 1) 310 | print_formatted("+++ Current accuracy percentage: %s%%"%((float(accurate_trades_count)/float(trades_count))*100), 1) 311 | else: 312 | period = 1 313 | period_duration = prediction_duration / finish_within_one_x_of_duration / periods 314 | for key in trade_position_periods_checked: 315 | if key >= period: 316 | period = key+1 317 | this_period_duration = period_duration * period 318 | if seconds_elapsed_since_trade >= this_period_duration and period not in trade_position_periods_checked: 319 | if order_status != 'cancelled': 320 | print_formatted("@@@*** Position %s order still not filled at period %s after %s seconds" % (position, period, seconds_elapsed_since_trade), 1) 321 | response = call_trade_api_with_retries(lambda: trade_api.cancel(current_order_id)) 322 | print_formatted("@@@*** Order canceled.", 1) 323 | else: 324 | trade_position_periods_checked[period] = True 325 | current_order_id = 0 326 | 327 | if period < periods and seconds_elapsed_since_trade < prediction_duration: 328 | amount = amount_unfilled 329 | print_formatted("@@@*** Retry #%s. Will remake order as %s at current midpoint price %s for unfilled btc amount: %s" % (period, order_type, current_midpoint_price, amount), 1) 330 | if order_type == 'bid': 331 | if last_trade_midpoint_price != current_midpoint_price and position != 0: 332 | #Recalculate amount based on new price. Only necessary for initial (position take) orders which leave nothing left in fiat balance 333 | balance_btc, balance_fiat = get_live_balances() 334 | balance_fiat_remaining = balance_fiat 335 | amount = round_down(balance_fiat_remaining / current_midpoint_price, 4) 336 | amount_trade_in_btc = amount_filled_total + amount 337 | print_formatted("@@@*** Price changed for initial buy order. Recalc w/ remaining fiat bal: %s, unfilled btc amount: %s"%(balance_fiat_remaining, amount),1) 338 | print_formatted("@@@*** Total trade in btc amount adjusted to: %s" % (amount_trade_in_btc), 1) 339 | response = call_trade_api_with_retries(lambda: trade_api.buy(current_midpoint_price, amount)) 340 | elif order_type == 'ask': 341 | response = call_trade_api_with_retries(lambda: trade_api.sell(current_midpoint_price, amount)) 342 | if isinstance(response, int): 343 | current_order_id = response 344 | last_trade_midpoint_price = current_midpoint_price 345 | print_formatted("@@@*** Order successfully made, order ID: %s" % (current_order_id), 1) 346 | else: 347 | print_formatted("@@@!!! Error making order: %s" % (str(response['error'] if isinstance(response,dict) and 'error' in response else response)),1) 348 | 349 | else: 350 | print_formatted("@@@*** Final period reached. Filled btc amount: %s. Unfilled btc amount: %s"%(amount_filled_total, amount_unfilled),1) 351 | 352 | if position!=0 and amount_filled_total==0: #Initial (position take) trade not filled at all, all good 353 | balance_btc, balance_fiat = get_live_balances() 354 | print_formatted("@@@*** Position take order not filled at all, moving on. Current balances: balance_btc=%s, balance_fiat=%s"%(balance_btc,balance_fiat), 1) 355 | position = 0 356 | trades_count = trades_count - 1 357 | 358 | else: 359 | if position != 0: #Initial (position take) trade 360 | if (amount_filled_total / amount_trade_in_btc) < 0.5: #Less than 50% filled, so reverse it! 361 | reverse = True 362 | amount = amount_filled_total #Reverse the filled amount 363 | if order_type == 'bid': 364 | this_order_type = 'ask' 365 | elif order_type == 'ask': 366 | this_order_type = 'bid' 367 | else: #More than 50% filled, so forge ahead 368 | amount = amount_unfilled 369 | reverse = False 370 | this_order_type = order_type 371 | elif position == 0: 372 | amount = amount_unfilled 373 | reverse = False 374 | this_order_type = order_type 375 | 376 | if not reverse: 377 | print_formatted("@@@*** Will fill remainder as %s at market, the unfilled btc amount: %s" % (this_order_type, amount), 2) 378 | elif reverse: 379 | trades_count = trades_count - 1 380 | print_formatted("@@@*** Position take order filled only < 50 percent. Canceling position / reversing trade as %s at market. Reversing filled btc amount: %s" % (this_order_type, amount), 2) 381 | if this_order_type == 'bid': 382 | response = call_trade_api_with_retries(lambda: trade_api.buy(None, amount)) 383 | elif this_order_type == 'ask': 384 | response = call_trade_api_with_retries(lambda: trade_api.sell(None, amount)) 385 | if isinstance(response, int): 386 | current_order_id = response 387 | print_formatted("@@@*** Order successfully made, order ID: %s" % (current_order_id),1) 388 | else: 389 | print_formatted("@@@!!! Error making order: %s" % (str(response['error'] if isinstance(response,dict) and 'error' in response else response)),1) 390 | if position != 0 and reverse: 391 | position = 0 392 | 393 | time_delta = get_current_time_seconds_utc()-start 394 | if time_delta < 1.0: 395 | time.sleep(1-time_delta) 396 | 397 | except Exception as e: 398 | print_formatted("!!! Exception: %s"%str(e)) 399 | traceback.print_exc() 400 | sys.exc_clear() 401 | -------------------------------------------------------------------------------- /images/bot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AdeelMufti/CryptoBot/6cbdfea43af9690d289f92db6b1b3b371abbd2fb/images/bot.png --------------------------------------------------------------------------------