├── LICENSE ├── README.md ├── caffeinated_pandas_utils.py ├── run_dataframe_squeeze.py ├── run_file_storage_tests.py ├── run_multiprocessing_tests.py └── run_sample_demos.py /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Caffeinated Pandas 2 | ## Accelerate your modeling, reporting, and development 3 | #### 89% less RAM | 98% faster disk | 72% less storage | 48–95% faster enhancements| 10x+ faster development 4 | 5 | This repo is a companion to a series of Medium.com articles I published in September 2021. The series' introduction can be found here: [Caffeinated Pandas: accelerate your modeling, reporting, and development](https://medium.com/@scollay/caffeinated-pandas-accelerate-your-modeling-reporting-and-development-e9d41476de3b) 6 | 7 | ## Background 8 | I've long been interested in financial markets, trading, and investing. 9 | 10 | Last year, with a lot more down time and home time than expected, and armed with a few years of casual Python programming under my belt, I decided to build a backtesting and trading system that would suit MY needs. Then, if I happened on an interesting book or white paper with a trading idea, I'd have no limits to testing because I controlled the code, the market universe, and computing resources. 11 | 12 | After months of "organic", ad-hoc data gathering and testing, I realized that I lacked even the most basic data processing capabilities and the infrastructure needed to efficiently test my trading and portfolio building ideas on a broad range of stocks and markets. 13 | 14 | I discovered that there were four productivity killers that were really holding me back during the development and testing phases of my modeling: 15 | 16 | 1. **Running out of memory.** Solution: [Squeezing Pandas - 89% less RAM consumption](https://medium.com/@scollay/squeezing-pandas-89-less-ram-consumption-4d91a0eb9c08) 17 | 2. **Disk reading and writing taking way too long.** Solution: [Storing Pandas - 98% faster disk reads and 72% less space](https://medium.com/@scollay/storing-pandas-98-faster-disk-reads-and-72-less-space-208e2e2be8bb) 18 | 3. **Computer's full processing power not being used.** Solution: [Multiprocessing Pandas - 46 to 95% faster Dataframe enhancements](https://medium.com/@scollay/multiprocessing-pandas-46-95-faster-dataframe-enhancements-c65ef29f03b1) 19 | 4. **Taking too long to develop, iterate, and test code.** Solution: [Processing Pandas - 10x+ faster coding and iteration with "rough samples"](https://medium.com/@scollay/processing-pandas-10x-faster-coding-and-iteration-with-rough-samples-78b75b7d5b0b) 20 | 21 | *Note:* After a few articles you may be presented with a Medium.com paywall. A 1-month subscription costs a mere $5.00 and helps to support thousands of writers. 22 | 23 | ## Not just for stocks 24 | It should be noted that while I'm clearly focused on stock quotes in this series of articles, the principles will certainly work on any columnar, Pandas-based data for all types of models and analysis. 25 | 26 | ## Why I've written and published this 27 | It's taken me a long time with many false starts to get to this point where I can confidently process large swaths of data. Going into this I had a notion that I'd like to write about it. Just knowing that others would be reading it, I've dramatically improved my own code and its performance, sometimes by an order of magnitude as I searched for "better" ways. Also, by sharing, I'm hoping others will provide constructive feedback on what I could have done better! 28 | 29 | 30 | ## Requirements 31 | 32 | ### Python version 33 | On Mac I intalled with Miniconda: Python 3.9.5 34 | 35 | On Linux/Ubuntu server: Python 3.8.2 36 | 37 | ### Libraries 38 | *apt install python3-pip* 39 | 40 | *pip install pandas* 41 | 42 | *pip install psutil* 43 | 44 | *pip install pyarrow* 45 | 46 | *pip install scipy* 47 | 48 | tables library: *conda install pytables* for Miniconda installation, or *pip install tables* for Ubuntu's Python 49 | 50 | ## Feedback 51 | Please feel free to reach out to me directly at scollay@coldbrew.cc 52 | -------------------------------------------------------------------------------- /caffeinated_pandas_utils.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | import numpy as np 3 | 4 | import psutil 5 | import os 6 | import io 7 | import glob 8 | import gc #memory garbage collection 9 | import sqlite3 10 | import hashlib 11 | from datetime import datetime 12 | import multiprocessing 13 | 14 | from pandas.api.types import CategoricalDtype 15 | 16 | import time 17 | 18 | def squeeze_dataframe(df): 19 | 20 | #----- Get columns in dataframe 21 | cols = dict(df.dtypes) 22 | 23 | #----- Check each column's type downcast or categorize as appropriate 24 | for col, type in cols.items(): 25 | if type == 'float64': 26 | df[col] = pd.to_numeric(df[col], downcast='float') 27 | elif type == 'int64': 28 | df[col] = pd.to_numeric(df[col], downcast='integer') 29 | elif type == 'object': 30 | df[col] = df[col].astype(CategoricalDtype(ordered=True)) 31 | 32 | return df 33 | 34 | 35 | def panda_mem_usage(df, detail='full'): 36 | 37 | dtypes = df.dtypes.reset_index(drop=False) 38 | memory = df.memory_usage(deep=True).reset_index(drop=False) 39 | 40 | df1 = pd.merge(dtypes, memory, on='index') 41 | df1 = df1.rename(columns = {'index': 'col', '0_x': 'type', '0_y': 'bytes'}) 42 | total = df1['bytes'].sum() 43 | 44 | objects = df.select_dtypes(include=['object', 'category']) 45 | df_objs = objects.select_dtypes(include=['object', 'category']).describe().T.reset_index() 46 | 47 | if detail == 'full': 48 | print('') 49 | print('{:<15} {:<15} {:>15} {:>8} {:>8}'.format('Column', 'Data Type', 'Bytes', 'MBs', 'GBs')) 50 | print('{} {} {} {} {}'.format('-'*15, '-'*15, '-'*15, '-'*8, '-'*8)) 51 | 52 | for index, row in df1.iterrows(): 53 | print('{:<15} {:<15} {:>15,.0f} {:>8,.1f} {:>8,.2f}'.format(row['col'], str(row['type']), row['bytes'], row['bytes']/1024**2, row['bytes']/1024**3)) 54 | 55 | print('\nTotal: {:,.0f} Rows, {:,.0f} Bytes, {:,.1f} MBs, {:,.2f} GBs\n'.format(len(df), total, total/1024**2, total/1024**3)) 56 | 57 | print('{:<15} {:>13} {:>13}'.format('Column', 'Count', 'Unique')) 58 | print('{} {} {}'.format('-'*15, '-'*13, '-'*13)) 59 | for index, row in df_objs.iterrows(): 60 | print('{:<15} {:>13,.0f} {:>13,.0f}'.format(row['index'], row['count'], row['unique'])) 61 | 62 | elif detail == 'return_short': 63 | return len(df), total 64 | 65 | 66 | def write_file(df, fn, compression=''): 67 | 68 | fn_ext = os.path.splitext(fn)[1] 69 | 70 | if fn_ext == '.csv': 71 | df.to_csv(fn, index=False) 72 | 73 | elif fn_ext == '.zip': 74 | df.to_csv(fn, compression=dict(method='zip', archive_name='data'), index=False) 75 | 76 | elif fn_ext == '.parquet': 77 | compression = 'brotli' if compression == '' else compression 78 | df.to_parquet(fn, engine='pyarrow', compression=compression) 79 | 80 | elif fn_ext == '.feather': 81 | compression = 'zstd' if compression == '' else compression 82 | df.to_feather(fn, compression=compression) 83 | 84 | elif fn_ext == '.h5': 85 | compression = 'blosc:lz4' if compression == '' else compression 86 | df.to_hdf(fn, key='data', mode='w', format='table', index=False, complevel=9, complib=compression) 87 | 88 | elif fn_ext == '.pkl': 89 | compression = 'zip' if compression == '' else compression 90 | df.to_pickle(fn, compression=compression) 91 | 92 | elif fn_ext == '.sqlite': 93 | con = sqlite3.connect(fn) 94 | df.to_sql('data', con=con, if_exists='replace', index=False) 95 | con.close() 96 | 97 | elif fn_ext == '.json': 98 | df.to_json(fn, orient='records') 99 | 100 | elif fn_ext == '.xlsx': 101 | writer = pd.ExcelWriter(fn, engine='xlsxwriter') 102 | df.to_excel(writer, sheet_name='quotes', index=False) 103 | #add more sheets by repeating df.to_excel() and change sheet_name 104 | writer.save() 105 | 106 | else: 107 | print('oopsy in write_file()! File extension unknown:', fn_ext) 108 | quit(0) 109 | 110 | return 111 | 112 | 113 | def read_file(fn, compression='', sql=''): 114 | 115 | fn_ext = os.path.splitext(fn)[1] 116 | 117 | if fn_ext == '.csv' or fn_ext == '.zip': 118 | df = pd.read_csv(fn, keep_default_na=False) 119 | 120 | elif fn_ext == '.parquet': 121 | df = pd.read_parquet(fn) 122 | 123 | elif fn_ext == '.feather': 124 | df = pd.read_feather(fn) 125 | 126 | elif fn_ext == '.h5': 127 | df = pd.read_hdf(fn, key='data') 128 | 129 | elif fn_ext == '.pkl': 130 | df = pd.read_pickle(fn, compression=compression).copy() #copy added because of some trouble with categories not fully read by mem util on first pass 131 | 132 | elif fn_ext == '.sqlite': 133 | if sql == '': 134 | sql = 'SELECT * FROM data' 135 | con = sqlite3.connect(fn) 136 | df = pd.read_sql(sql, con) 137 | con.close() 138 | 139 | elif fn_ext == '.json': 140 | df = pd.read_json(fn, convert_dates=False) 141 | 142 | elif fn_ext == '.xlsx': 143 | df = pd.read_excel(fn, sheet_name='quotes', keep_default_na=False) 144 | 145 | else: 146 | print('oopsy in read_file()! File extension unknown:', fn_ext) 147 | quit(0) 148 | 149 | return df 150 | 151 | 152 | def sample_id(string, samples=100): 153 | #----- given a string, return a repeatable integer between 0 and q-1 154 | # don't use expecting a perfect sample 155 | # this is really just a quick way to split up a dataset into roughly equal parts in a way that's repeatable 156 | # from https://stackoverflow.com/questions/16008670/how-to-hash-a-string-into-8-digits 157 | sample = int(hashlib.sha256(string.encode('utf8')).hexdigest(), 16) % samples 158 | 159 | return sample 160 | 161 | 162 | def create_test_dataframe(start_date, end_date, num_symbols, squeeze=True, out=''): 163 | 164 | #----- Create skeleton dataframe for one symbol, 20-years, business days only 165 | print('cp.create_test_dataframe --> create skeleton') 166 | np.random.seed(0) # seed so there's consistency between testing runs 167 | dfs = pd.DataFrame({'date': pd.date_range(start=start_date, end=end_date, freq='B').strftime('%Y-%m-%d'), 168 | 'vendor':'StockDataCo', 'interval':'1day', 'symbol':'BEAN'}) 169 | 170 | 171 | #----- Duplicate skeleton and populate with psudo-random values 172 | print('cp.create_test_dataframe --> duplicate symbols by', num_symbols) 173 | df = dfs.loc[np.repeat(dfs.index.values, num_symbols)] 174 | print('cp.create_test_dataframe --> created {:,.0f} rows'.format(len(df))) 175 | 176 | 177 | #----- for each duplicate added, create a unique symbol name 178 | print('cp.create_test_dataframe --> make symbol names') 179 | df['dupe_num'] = df.groupby(['date']).cumcount()+1 #asssigns a sequence 180 | df['dupe_num'] = df['dupe_num'].astype(str).str.zfill(len(str(num_symbols))) #pad with 0's based on num_symbols length 181 | df['symbol'] = dfs['symbol'].str.cat('.'+df['dupe_num']) 182 | df = df.drop('dupe_num', axis=1).reset_index(drop='true') 183 | 184 | 185 | #----- For each column, populate values based on a random open to demonstrate compression. 186 | # Note that this is not a true depiction of random prices or indicators! 187 | print('cp.create_test_dataframe --> populate prices, indicators and signals') 188 | df['open'] = [round(np.random.uniform(1,200),2) for k in df.index] 189 | df['high'] = round(df['open'] * 1.11, 2) 190 | df['low'] = round(df['open'] * 0.91, 2) 191 | df['close'] = round(df['open'] * 1.06, 2) 192 | df['volume'] = (df['open'] * 1123211).astype(int) 193 | df['dividend'] = round(df['open'] * 0.021, 2) 194 | df['ind1'] = round(df['open'] * 0.5, 2) 195 | df['ind2'] = round(df['open'] * 1.2, 2) 196 | df['ind3'] = round(df['open'] * 0.9, 2) 197 | df['trend1'] = (df['open'] % 2).astype(int) 198 | df['trend2'] = (df['close'] % 2).astype(int) 199 | df['signal'] = df['open'].apply(lambda x: 'buy' if (int(x) % 2) == 0 else 'sell') 200 | df['sample'] = df['symbol'].apply(lambda x: sample_id(x, samples=100)) 201 | 202 | 203 | #----- Squeeze if specified 204 | print('cp.create_test_dataframe --> squeezing') 205 | if squeeze == True: 206 | df = squeeze_dataframe(df) 207 | 208 | 209 | #----- Write to file if specified 210 | if out != '': 211 | print('cp.create_test_dataframe --> writing to fn=', out) 212 | write_file(df=df, fn=out) 213 | 214 | print('cp.create_test_dataframe --> done') 215 | 216 | return df 217 | 218 | 219 | def multiproc_run_target(*args): 220 | #----- A companion to multiproc_dataframe below - sends to data enhancement function and saves results to be read by parent function 221 | # Done this way so the data enhancement function can remain unchanged from one that does a straight apply() 222 | 223 | #----- First argument is filename of data split 224 | function = args[0] 225 | fn = args[1] 226 | 227 | args1 = [] 228 | 229 | #----- read in file from multiproc_dataframe parent function - is is first df= argument for enhancement function 230 | df = pd.read_feather(fn) 231 | args1.append(df) 232 | 233 | #----- append any additional parameters that are provided for enhancement function 234 | for arg in args[2:]: 235 | args1.append(arg) 236 | 237 | #----- Run function 238 | df = function(*args1) 239 | 240 | 241 | #----- Save returned dataframe so it can be read by parent when all multitasking processes have finished 242 | df.to_feather(fn, compression='lz4') 243 | 244 | del df 245 | gc.collect() 246 | 247 | 248 | 249 | def multiproc_dataframe(**kwargs): 250 | 251 | #----- load required parameter to pass onto the function, and load optionally added parameters to a list 252 | added_args = [] 253 | 254 | for key, val in kwargs.items(): 255 | 256 | if key == 'function': 257 | function = val 258 | 259 | elif key == 'df': 260 | df = val 261 | 262 | elif key == 'procs': 263 | procs = val 264 | 265 | elif key == 'splitby': 266 | splitby = val 267 | 268 | else: 269 | added_args.append(val) 270 | 271 | if 'procs' not in kwargs: 272 | procs = multiprocessing.cpu_count() 273 | 274 | if 'splitby' not in kwargs or splitby == None: 275 | df_splits = np.array_split(df, procs) 276 | 277 | temp_files = [] 278 | for i in range(len(df_splits)): 279 | temp_files.append('temp_file_'+str(i)+'.feather') 280 | dfs = df_splits[i].reset_index(drop=True) 281 | dfs.to_feather(temp_files[i], compression='lz4') 282 | 283 | else: #create splits by splitting symbols and then finding their related starting and ending rows 284 | 285 | temp_files = [] 286 | for i in range(procs): 287 | temp_files.append('temp_file_'+str(i)+'.feather') 288 | 289 | df1 = df[splitby].drop_duplicates(keep='first').reset_index(drop=False) 290 | 291 | df_splits = np.array_split(df1, procs) 292 | 293 | for i in range(len(df_splits)): 294 | 295 | start_split = df_splits[i].iloc[0]['index'] 296 | 297 | if i == len(df_splits)-1: 298 | end_split = None 299 | else: 300 | end_split = df_splits[i+1].iloc[0]['index'] 301 | 302 | dfs = df[start_split : end_split].reset_index(drop=True) 303 | dfs.to_feather(temp_files[i], compression='lz4') 304 | del dfs 305 | 306 | del df 307 | gc.collect() 308 | 309 | 310 | #----- Initialize 311 | manager = multiprocessing.Manager() 312 | processes = [] 313 | 314 | 315 | #----- Process splits and concatinate all element in the return_list 316 | for i in range(procs): 317 | 318 | process = multiprocessing.Process(target=multiproc_run_target, args=[function, temp_files[i]]+added_args) 319 | processes.append(process) 320 | 321 | process.start() 322 | 323 | for process in processes: 324 | process.join() 325 | 326 | 327 | dfs = [] 328 | for fn in temp_files: 329 | dfs.append(pd.read_feather(fn)) 330 | os.remove(fn) 331 | 332 | df = pd.concat(dfs, ignore_index=True) 333 | df = df.reset_index(drop=True) 334 | 335 | return df 336 | 337 | 338 | 339 | def del_files(path): 340 | 341 | files = glob.glob(path) 342 | 343 | if len(files) == 0: 344 | print('no files to delete matching "'+path+'"') 345 | return 346 | 347 | for file in files: 348 | print('Deleting', file) 349 | os.remove(file) 350 | 351 | -------------------------------------------------------------------------------- /run_dataframe_squeeze.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | import numpy as np 3 | from datetime import datetime 4 | 5 | import caffeinated_pandas_utils as cp 6 | 7 | 8 | #----- Create a test Dataframe 9 | fn = 'stock_test.csv' 10 | #num_symbols = 10_000 11 | #num_symbols = 1_000 12 | num_symbols = 100 #try this first and build up bigger 13 | cp.create_test_dataframe(start_date='2000-01-01', end_date='2019-12-31', 14 | num_symbols=num_symbols, 15 | squeeze=False, 16 | out=fn) 17 | df = cp.read_file(fn) 18 | print('\n------- Test Dataframe -------\n', df) 19 | 20 | 21 | #----- Print memory usage of dataframe 22 | cp.panda_mem_usage(df) 23 | 24 | 25 | #----- Squeeze test Dataframe 26 | print('Squeezing Dataframe') 27 | start = datetime.utcnow() 28 | df = cp.squeeze_dataframe(df) 29 | end = datetime.utcnow() 30 | print('\nTime to compress: '+str(end-start)[0:11]) 31 | 32 | #----- Print usage of compressed datafrme 33 | cp.panda_mem_usage(df) 34 | -------------------------------------------------------------------------------- /run_file_storage_tests.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | import numpy as np 3 | 4 | from datetime import datetime 5 | import os 6 | import gc #memory garbage collection 7 | import sys 8 | 9 | import caffeinated_pandas_utils as cp 10 | 11 | 12 | def write_read_test(df, fn, compression='', iterations=3): 13 | 14 | gc.collect() 15 | 16 | print('\nFile:', fn) 17 | 18 | timer = [] 19 | for i in range(iterations): 20 | print('writing...', end='', flush=True) 21 | start = datetime.utcnow() 22 | cp.write_file(df=df, fn=fn, compression=compression) 23 | end = datetime.utcnow() 24 | timer.append(end - start) 25 | file_size = os.path.getsize(fn)/(1024**2) 26 | df_size_written = cp.panda_mem_usage(df, detail='return_short') 27 | write_time = np.mean(timer) 28 | print() 29 | 30 | timer = [] 31 | for i in range(iterations): 32 | print('reading...', end='', flush=True) 33 | start = datetime.utcnow() 34 | df1 = cp.read_file(fn=fn, compression=compression) 35 | end = datetime.utcnow() 36 | timer.append(end - start) 37 | df_size_read = cp.panda_mem_usage(df1, detail='return_short') 38 | read_time = np.mean(timer) 39 | print() 40 | 41 | timer = [] 42 | for i in range(iterations): 43 | print('selecting...', end='', flush=True) 44 | start = datetime.utcnow() 45 | df2 = df1[(df1['symbol'].str.contains('666') | df1['symbol'].str.contains('777')) & (df1['date'].str[8:10] <= '10')].reset_index(drop=True) 46 | df2 = df1[df1['dividend'] > (df1['dividend'].max() * .99)].reset_index(drop=True) #highest dividends paid 47 | df2 = df1[(df1['ind1'] > df1['ind2'].shift()) & (df1['ind2'] > df1['ind3'].shift(3))].reset_index(drop=True) #highest dividends paid 48 | df2 = df1.pivot(index='date', columns='symbol', values='close') 49 | end = datetime.utcnow() 50 | timer.append(end - start) 51 | select_time = np.mean(timer) 52 | print() 53 | 54 | timer = [] 55 | for i in range(iterations): 56 | print('sorting...', end='', flush=True) 57 | start = datetime.utcnow() 58 | df2 = df1.sort_values(by=['symbol','date'], ascending=[True, False]).reset_index(drop=True) 59 | end = datetime.utcnow() 60 | timer.append(end - start) 61 | sort_time = np.mean(timer) 62 | print() 63 | 64 | 65 | #----- Clean up memory 66 | print('deleting dataframes...', end='', flush=True) 67 | df = pd.DataFrame() 68 | del df 69 | 70 | df1 = pd.DataFrame() 71 | del df1 72 | 73 | df2 = pd.DataFrame() 74 | del df2 75 | 76 | gc.collect() 77 | 78 | print('done', end='', flush=True) 79 | 80 | 81 | #----- Print stats 82 | print('') 83 | print('Time Write to Disk - '+str(write_time)[0:11]) 84 | print('Disk File Size - {:,.2f} MB'.format(file_size)) 85 | 86 | print('Read Time - '+str(read_time)[0:11]) 87 | print('Dataframe After Read - {:,.1f} MB'.format(df_size_read[1]/1024**2)) 88 | 89 | print('Time Selects - '+str(select_time)[0:11]) 90 | print('Time Sorts - '+str(sort_time)[0:11]) 91 | 92 | 93 | 94 | #===== Main program 95 | 96 | #----- Create a test Dataframe 97 | fn = 'stock_test.csv' 98 | #num_symbols = 10_000 99 | #num_symbols = 1_000 100 | num_symbols = 100 #try this first and build up bigger 101 | cp.create_test_dataframe(start_date='2000-01-01', end_date='2019-12-31', num_symbols=num_symbols, squeeze=False, out=fn) #comment out after first run to re-use file 102 | df = cp.read_file(fn) 103 | print('\n------- Test Dataframe -------\n', df) 104 | 105 | #----- Squeeze the dataframe to ideal memory size (see "compressing" Medium article and run_dataframe_squeeze.py for background) 106 | df_size_read = cp.panda_mem_usage(df, detail='return_short') 107 | 108 | print('\nSqueezing Dataframe') 109 | df = cp.squeeze_dataframe(df) 110 | df_size_squeezed = cp.panda_mem_usage(df, detail='return_short') 111 | 112 | print('Dataframe Rows - {:,.0f}'.format(df_size_read[0])) 113 | print('Dataframe Size - {:,.1f} MB'.format(df_size_read[1]/1024**2)) 114 | print('Dataframe Size Squeezed - {:,.1f} MB'.format(df_size_squeezed[1]/1024**2)) 115 | 116 | 117 | #---- Delete previous test files 118 | cp.del_files('test-file*') 119 | 120 | 121 | #----- Test various file format and compression tests 122 | iterations = 3 123 | 124 | write_read_test(df=df, fn='test-file.csv', iterations=iterations) 125 | write_read_test(df=df, fn='test-file.zip', iterations=iterations) 126 | 127 | write_read_test(df=df, fn='test-file-snappy.parquet', compression='snappy', iterations=iterations) 128 | write_read_test(df=df, fn='test-file-gzip.parquet', compression='gzip', iterations=iterations) 129 | write_read_test(df=df, fn='test-file-brotli.parquet', compression='brotli', iterations=iterations) 130 | 131 | write_read_test(df=df, fn='test-file-zstd.feather', compression='zstd', iterations=iterations) 132 | write_read_test(df=df, fn='test-file-lz4.feather', compression='lz4', iterations=iterations) 133 | 134 | write_read_test(df=df, fn='test-file-bzip2.h5', compression='bzip2', iterations=iterations) 135 | write_read_test(df=df, fn='test-file-lzo.h5', compression='lzo', iterations=iterations) 136 | write_read_test(df=df, fn='test-file-zlib.h5', compression='zlib', iterations=iterations) 137 | write_read_test(df=df, fn='test-file-blosc-blosclz.h5', compression='blosc:blosclz', iterations=iterations) 138 | write_read_test(df=df, fn='test-file-blosc-lz4.h5', compression='blosc:lz4', iterations=iterations) 139 | write_read_test(df=df, fn='test-file-blosc-lz4hc.h5', compression='blosc:lz4hc', iterations=iterations) 140 | write_read_test(df=df, fn='test-file-blosc-zlib.h5', compression='blosc:zlib', iterations=iterations) 141 | write_read_test(df=df, fn='test-file-blosc-zstd.h5', compression='blosc:zstd', iterations=iterations) 142 | 143 | write_read_test(df=df, fn='test-file-zip.pkl', compression='zip', iterations=iterations) 144 | write_read_test(df=df, fn='test-file-gzip.pkl', compression='gzip', iterations=iterations) 145 | write_read_test(df=df, fn='test-file-xz.pkl', compression='xz', iterations=iterations) 146 | write_read_test(df=df, fn='test-file-bz2.pkl', compression='bz2', iterations=iterations) 147 | 148 | write_read_test(df=df, fn='test-file.sqlite', iterations=iterations) 149 | 150 | -------------------------------------------------------------------------------- /run_multiprocessing_tests.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | import numpy as np 3 | from datetime import datetime 4 | import psutil 5 | import gc #memory garbage collection 6 | 7 | import caffeinated_pandas_utils as cp 8 | from scipy import stats as scipy_stats 9 | 10 | import time 11 | 12 | 13 | def add_stats(df, bars1, bars2, bars3): 14 | 15 | #----- Groupby symbol and store into an object which can be used for all calculations 16 | groups = df.groupby('symbol', observed=True)['close'] #observed=True needed for categorical data, otherwise results may be unpredictable 17 | 18 | #----- Rolling Simple Moving Averages 19 | df['sma_1'] = groups.rolling(bars1).mean().reset_index(drop=True) 20 | df['sma_2'] = groups.rolling(bars2).mean().reset_index(drop=True) 21 | df['sma_3'] = groups.rolling(bars3).mean().reset_index(drop=True) 22 | 23 | #----- Rolling Standard Deviation 24 | df['std_1'] = groups.rolling(bars1).std(ddof=0).reset_index(drop=True) 25 | df['std_2'] = groups.rolling(bars2).std(ddof=0).reset_index(drop=True) 26 | df['std_3'] = groups.rolling(bars3).std(ddof=0).reset_index(drop=True) 27 | 28 | #----- Rollling Higher Highs and Lower Lows - Donchian Channels - https://www.investopedia.com/terms/d/donchianchannels.asp 29 | df['hh_1'] = groups.rolling(bars1).max().reset_index(drop=True) 30 | df['hh_2'] = groups.rolling(bars2).max().reset_index(drop=True) 31 | df['hh_3'] = groups.rolling(bars3).max().reset_index(drop=True) 32 | 33 | df['ll_1'] = groups.rolling(bars1).min().reset_index(drop=True) 34 | df['ll_2'] = groups.rolling(bars2).min().reset_index(drop=True) 35 | df['ll_3'] = groups.rolling(bars3).min().reset_index(drop=True) 36 | 37 | #----- Maximum Drawdown - https://www.investopedia.com/terms/m/maximum-drawdown-mdd.asp 38 | df['maxdd_1'] = groups.rolling(bars1).apply(lambda x: np.min(x / np.maximum.accumulate(x)) - 1).reset_index(drop=True) 39 | df['maxdd_2'] = groups.rolling(bars2).apply(lambda x: np.min(x / np.maximum.accumulate(x)) - 1).reset_index(drop=True) 40 | df['maxdd_3'] = groups.rolling(bars3).apply(lambda x: np.min(x / np.maximum.accumulate(x)) - 1).reset_index(drop=True) 41 | 42 | #----- R-Value not used as it is demonstrated here for any type of technical analysis 43 | #----- Included here because it's a CPU intensive function and good for this demo! 44 | def calc_rvalue(ts): 45 | x = np.arange(len(ts)) 46 | log_ts = np.log(ts) 47 | slope, intercept, rvalue, pvalue, std_err = scipy_stats.linregress(x, log_ts) 48 | return rvalue 49 | 50 | df['rvalue_1'] = groups.rolling(bars1).apply(calc_rvalue).reset_index(drop=True) 51 | df['rvalue_2'] = groups.rolling(bars2).apply(calc_rvalue).reset_index(drop=True) 52 | df['rvalue_3'] = groups.rolling(bars3).apply(calc_rvalue).reset_index(drop=True) 53 | 54 | return df 55 | 56 | 57 | if __name__ == '__main__': #--- confirms that the code is under main function 58 | 59 | #----- Core check 60 | print('\nPhysical Cores: '+str(psutil.cpu_count(logical=False)), ' / Logical Cores: '+ str(psutil.cpu_count(logical=True)), '\n') 61 | 62 | #----- Create a test Dataframe 63 | fn = 'stock_test.feather' 64 | #num_symbols = 10_000 65 | #num_symbols = 1_000 66 | num_symbols = 100 #try this first and build up bigger 67 | df = cp.create_test_dataframe(start_date='2000-01-01', end_date='2019-12-31', num_symbols=num_symbols, squeeze=True) #comment out after first run to re-use file 68 | 69 | #----- Dataframe needs to be sorted by the splitby parameter in the multiproc_dataframe as well as by intended groupby ordering (e.g., groupby symbol, ordered by date) 70 | df = df.sort_values(by=['symbol','date'], ascending=True).reset_index(drop=True) #comment out after first run to re-use file 71 | 72 | #----- Write to file for next time 73 | cp.write_file(df, fn) #comment out after first run to re-use file 74 | 75 | #----- Read the file 76 | df = cp.read_file(fn) 77 | print('\n------- Test Dataframe -------\n', df) 78 | 79 | #----- Run Single Processor test 80 | start = datetime.utcnow() 81 | df1 = add_stats(df=df, bars1=21, bars2=63, bars3=252) 82 | end = datetime.utcnow() 83 | print('add_stats Single-processor: 1 Time: '+str(end-start)[0:11]) 84 | 85 | print() 86 | 87 | 88 | # for procs in [2,4,8,12,16,20,24,28,32,36]: # test on 32-CPU server 89 | for procs in [2,4,6,8,10,12]: # test for quad-core PC 90 | # for procs in [2,4,6,8]: # test for dual-core PC 91 | start = datetime.utcnow() 92 | df1 = cp.multiproc_dataframe(function=add_stats, df=df, bars1=21, bars2=63, bars3=252, procs=procs, splitby=['symbol']) 93 | end = datetime.utcnow() 94 | print('add_stats Multi-processor: '+str(procs)+' Time: '+str(end-start)[0:11]) 95 | 96 | print() 97 | 98 | 99 | -------------------------------------------------------------------------------- /run_sample_demos.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | import numpy as np 3 | 4 | from datetime import datetime 5 | 6 | import caffeinated_pandas_utils as cp 7 | 8 | 9 | #----- Create a test Dataframe 10 | fn = 'stock_test.feather' 11 | #num_symbols = 10_000 12 | num_symbols = 1_000 #try this first and build up bigger 13 | cp.create_test_dataframe(start_date='2000-01-01', end_date='2019-12-31', num_symbols=num_symbols, squeeze=True, out=fn) #comment out after first run to re-use file 14 | df = cp.read_file(fn) 15 | print('\n------- Test Dataframe -------\n', df) 16 | 17 | cp.panda_mem_usage(df) 18 | 19 | 20 | #----- Create sample in 1% chunks (range 0 to 99) 21 | print('\n\n======== Applying 1% samples ========') 22 | 23 | print('Number of input rows: {:>13,.0f}'.format(len(df))) 24 | start = datetime.utcnow() 25 | df['sample'] = df['symbol'].apply(lambda x: cp.sample_id(string=x, samples=100)) 26 | end = datetime.utcnow() 27 | print('Time to apply 1% sample: '+str(end-start)[0:12]) 28 | 29 | 30 | #----- Show 1% distributions 31 | start = datetime.utcnow() 32 | total_rows = len(df) 33 | sample_rows = df.groupby(['sample']).size().values 34 | end = datetime.utcnow() 35 | print('Time to summarize: '+str(end-start)[0:12]) 36 | for i, rows in enumerate(sample_rows): 37 | print('{:>3d} {:>10,d} {:>6,.2f}%'.format(i, rows, 100*(rows/total_rows))) 38 | 39 | 40 | #---- Select a "rough" 1% sample of all sample keys equaling 64 41 | start = datetime.utcnow() 42 | df1 = df[df['sample'] == 99].reset_index(drop=True) 43 | end = datetime.utcnow() 44 | print('\n------- 1% sample -------\n', df1) 45 | print('Time to select: '+str(end-start)[0:12]) 46 | 47 | 48 | #---- Select a "rough" 3% sample of all sample keys equaling 0, 1, or 2 49 | start = datetime.utcnow() 50 | df1 = df[df['sample'].isin([0,1,2])].reset_index(drop=True) 51 | end = datetime.utcnow() 52 | print('\n------- 3% sample -------\n', df1) 53 | print('Time to select: '+str(end-start)[0:12]) 54 | 55 | 56 | #---- Select a "rough" 10% sample of all sample keys between and including 10 through 19 57 | start = datetime.utcnow() 58 | df1 = df[(df['sample'] >= 10) & (df['sample'] <= 19)].reset_index(drop=True) 59 | end = datetime.utcnow() 60 | print('\n------- 10% sample -------\n', df1) 61 | print('Time to select: '+str(end-start)[0:12]) 62 | 63 | --------------------------------------------------------------------------------