├── images └── ERD.png ├── ML models └── .ipynb_checkpoints │ └── Untitled-checkpoint.ipynb ├── Imbalanced Strategies └── Readme.md ├── main.py ├── README.md ├── environment_hc.yml ├── model.py └── Feature Engineering Strategies └── XGBoost_Automated Features.ipynb /images/ERD.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ali-ghorbani-k/Credit-Risk-Management/HEAD/images/ERD.png -------------------------------------------------------------------------------- /ML models/.ipynb_checkpoints/Untitled-checkpoint.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [], 3 | "metadata": {}, 4 | "nbformat": 4, 5 | "nbformat_minor": 4 6 | } 7 | -------------------------------------------------------------------------------- /Imbalanced Strategies/Readme.md: -------------------------------------------------------------------------------- 1 | # Different strategies for balancing dataset: 2 | 1. Clustering Undersampling of majority class (HClusteringUnderSampling.ipynb) 3 | 2. Random undersampling of majority class & Oversampling of minority class (RUndersample_SMOTE.ipynb) 4 | 3. Clustering undersampling of majority class & Oversampling of minority class (HClustering_USample_SMOTE.ipynb) 5 | -------------------------------------------------------------------------------- /main.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | from model import classifier 3 | 4 | parser = argparse.ArgumentParser(description='') 5 | parser.add_argument('--use_ftools', dest='use_ftools', default=False, action='store_true', help='use automated feature extraction with ft') 6 | parser.add_argument('--ft_maxdep', dest='ft_maxdep', default=1, type= int, help='Max depth of deep feature synthesis in feature tools') 7 | parser.add_argument('--use_cnnft', dest='use_cnnft', default=False, action='store_true', help='use cnn feature extraction method') 8 | parser.add_argument('--cnn_bsize', dest='cnn_bsize', default=256, type= int, help='batch_size when training cnn for feature extraction') 9 | parser.add_argument('--cnn_epoch', dest='cnn_epoch', default=100, type= int, help='number of epochs when training cnn for feature extraction') 10 | parser.add_argument('--use_rnnft', dest='use_rnnft', default=False, action='store_true', help='use rnn feature extraction method') 11 | parser.add_argument('--rnn_bsize', dest='rnn_bsize', default=256, type= int, help='batch_size when training cnn for feature extraction') 12 | parser.add_argument('--rnn_epoch', dest='rnn_epoch', default=100, type= int, help='number of epochs when training cnn for feature extraction') 13 | parser.add_argument('--resample', dest='resample', default=False, action='store_true', help='resample training dataset to get balanced positive/negative label ratio') 14 | parser.add_argument('--use_hclstr', dest='use_hclstr', default=False, action='store_true', help='use hierarchical clustering (undersampling) of majority class') 15 | parser.add_argument('--use_hclstrsmote', dest='use_hclstrsmote', default=False, action='store_true', help='hierarchical clustering (undersampling) of majority class & Oversampling of minority class') 16 | parser.add_argument('--nfolds', dest='nfolds', type=int, default=5, help='# of folds for cross-validation') 17 | parser.add_argument('--test_size', dest='test_size', type=float, default=0.05, help='test to train data ratio') 18 | parser.add_argument('--pca_n', dest='pca_n', type=int, default=150, help='number of pca components considered for training xgb') 19 | parser.add_argument('--lgbm', dest='lgbm', default=True, action='store_true', help='use lightGBM algorithm') 20 | parser.add_argument('--xgb', dest='xgb' , default=False, action='store_true', help='use XGBoost algorithm') 21 | parser.add_argument('--catb', dest='catb', default=False, action='store_true', help='use Catboost algorithm') 22 | parser.add_argument('--fcnn', dest='fcnn', default=False, action='store_true', help='use fully connected neural network') 23 | parser.add_argument('--batch_size', dest='batch_size', type=int, default=256, help='batch size for FCNN algorithm') 24 | parser.add_argument('--epoch', dest='epoch', type=int, default=10, help='number of epochs for FCNN algorithm') 25 | 26 | args = parser.parse_args() 27 | 28 | if __name__ == '__main__': 29 | model = classifier(args) 30 | model.train(args) 31 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Credit-Risk-Management 2 | Building an end-to-end machine learning model to predict the probability of paying back a loan by an applicant. 3 | 4 | # Problem Statement 5 | This is a supervised binary classification problem since the labels are provided in the application_train table (supervised), and the label is a binary variable with 0 (repaying the loan) and 1 (having difficulty repaying the loan). 6 | 7 | # Setup 8 | This code can be run with the following steps. This setup assumes you already have conda installed. 9 | 1. Create the conda environment: conda env create -f environment_hc.yml 10 | 2. Activate the environment: conda activate hc 11 | 12 | The required files to run this projects are main.py and model.py. Jupyert notebooks for each algorithms is provided for the reference. 13 | 14 | # Data Source 15 | I have collected the data from kaggle that was provided by [Home Credit financial institution]( https://www.kaggle.com/c/home-credit-default-risk/data). 16 | 17 | There are two main tables related to the current credit application: 18 | 19 | * __application_train__: This tables includes the information for the each loan application represented by an id of loan (__SK_ID_CURR__). The applicatoin_train table includes a TARGET column (1 : client with payment difficulties: he/she had late payment more than X days on at least one of the first Y installments of the loan, 0 : the loan was repaid) 20 | 21 | * __application_test__ : This table has the same column as the application_train, but does not have TARGET column. The TARGET column will be predicted by the Machine learning model and could be used in kaggle competition. Application_test is not used in this project. 22 | 23 | In addition to application_train that includes the current application information, there are two other sources of data related to each customer historical transactions and records obtained from 1) Bureau 2) Home Credit, presented in following tables: 24 | 1. __Bureau__: 25 | 26 | 1. __Bureau__ : This table includes information for all client's previous credits provided by other financial institutions that were reported to the Credit Bureau. Each credit in the bureau table is represented by a bureau id (__SK_ID_BUREAU__) which is related to the one id of loan application (__SK_ID_CURR__). One SK_ID_CURR can have 0,1,2 or more related previous credits (SK_ID_BUREAU) in a bureau table showing a one-to-many relationship. 27 | 2. __Bureau_balance__ : This table includes information related to the monthly balance of previous credits in Credit Bureau. This table has one row for each month of history of every previous credit reported to Credit Bureau – i.e the table has (#loans in sample, #of relative previous credits, #of months where we have some history observable for the previous credits) rows. 28 | 29 | 2. __Home Credit__: 30 | 31 | 1. __previous_application__: This table includes all previous application at Home Credit which represented by an id of loan (__SK_ID_PREV__). One SK_ID_CURR can have 0,1,2 or more related previous credits (SK_ID_PREV) in previous_application table showing a one-to-many relationship. 32 | 33 | 2. __POS_CASH_BALANCE__: This table includes the monthly balance of previous point of sale (POS) with Home Credit. 34 | 35 | 3. __credit_card_balance__ : This table includes the monthly balance snapshots of previous credit cards that the applicant has with Home Credit 36 | 37 | 4. __installments_payments__ : This table includes repayment history for the previously disbursed credits related to the loans in the Home Credit database. 38 | 39 | Description of all the columns will be found in HomeCredit_columns_description.csv which is available in the provided link. 40 | The following Entity Relation Diagram (ERD) shows how different tables are related: 41 | 42 | ![ERD](images/ERD.png) 43 | 44 | # Preprocessing of data 45 | As shown in the above ERD each SK_ID_CURR is related to multiple SK_ID_BUREAU in Bureau tables and to multiple SK_ID_PREV in Home Credit tables. 46 | In order to develop a machine learning model, first we need to flatten out the database. It means aggregating the information from 2 bureau and 4 Home Credit tables to have one merged table. Each row in the final merged table represents one loan application (SK_ID_CURR). 47 | 48 | Three different strategies have been used in this project to flatten out the database: 49 | 50 | 1. __Manual feature engineering__: Manual Feature engineering involves leveraging domain knowledge to extract useful feature from data. With this strategy, we analyze each customer journey using bureau and Home Credit sources and extract useful information from previous loans cycle life. 51 | 52 | 2. __Automated feature engineering__: Automated feature engineering use [Featuretools](https://community.alteryx.com/t5/Data-Science/Feature-Engineering-Secret-to-Data-Science-Success/ba-p/545041) library to generate hundreds or thousands of new features. We have used one level of depth since this is a computationally expensive work. 53 | 54 | 3. __Deep learning__: Deep learning strategy employs Convolution Neural Network (CNN) and Recurrent Neural Network (RNN) to extract new feture from the data. The concept of using power of CNN for the feature engineering is discussed [here](https://towardsdatascience.com/convolutional-neural-network-on-a-structured-bank-customer-data-358e6b8aa759) 55 | 56 | # Imbalalanced Dataset 57 | The dataset of this problem is highly imbalanced with 91% of data not-defaulted and 9% being defaulted. The challenge of working with an imbalanced dataset is that most machine learning algorithms perform poorly on the minority class that is more important to detect in credit risk management. Two different strategies have been used to balance the positive and negative labels and their model performances are compared: 58 | 59 | 1. __Hierarchial clustering (undersampling) of majority class__: The idea is to undersample the majority class so that we end up having balanced data. We have ~27k positive training data (Minority class) and ~300k negative data (Majority class). Undersampling needs to be done in a fashion that the resulted majority class has similar distribution to the original 300k, therefore, we do not lose information from data. Hieracrchial clustering (Agglomerative Clustering) has been conducted on majority class with 27k cluster. In the end, the resampled dataset had 1:1 data with 27k for positive and negative class. 60 | 61 | 2. __Hierarchial clustering (undersampling) of majority class & Oversampling of minority class__: Combination of undersampling of majority class (method one) to 50% of total ratio with oversampling of the minority class (up to 1:1 minority/majority ratio) to get better performance. In this way, the majority class ratio decreases from 91% to 50% whereas minority class size increases from 9% to 50%. Hierarchial clustering is used for undersampling and SMOTE is used for oversampling. 62 | 63 | # Machine Learning Models: 64 | We have tried boosted algorithms (XGBoost, LightGBM, Catboost) and fully connected neural network (FCNN) in this project. 65 | There are some technical differences in the application of different algorithms that needs to noticed: 66 | 67 | * __Handling missing data__: XGBoost, LightGBM, and Catboost can handle missing data, but for FCNN the missing values needs to be imputed. The missing categorical variable is imputed by 'Not Available' new category and missing numerical feature is imputed by average of that column in the training data (to avoid data leakage). 68 | 69 | * __Categorical variables__: XGBoost and FCNN can not handle categorical variable, therefore, one-hot encoding is performed on the categorical features. On the other hand, LightGBM and Catboost can handle categorical feature (use Fisher method), but the categorical features should be given to the algorithm to avoid error. This is accomplished by encoding each category to non negative integer and save it astype 'category' in [pandas](https://medium.com/swlh/dealing-with-categorical-variables-in-machine-learning-4401b949b093). 70 | 71 | The hyperparameter of boosted algorithm is chosen using Bayasian hyperparameter optimization (Hyperopt ). 72 | 73 | # Performance Metric: 74 | 75 | * __Precision, Recall, F1-Score__: In credit risk management the cost of misclassification of a 'defaulted applicant' as 'non defaulted' is very high and may cause significant loss of money for the financial institution. Therefore, we need to significantly reduce the probability of approving a defaulted applicatn (False Negative (type 2 error)). Therefore, Recall is more important metric in this project due to the high risk of losing money. Low precision might result in losing of a customer (False Positive), but not harm the business drastically. I also have presented F1-score that considers both precision and recall. However, when we are dealing with an imbalanced dataset such as in our case, if we don't balance positive and negative labels, we need to be careful a proper threshold for the classifier, different from the 0.5 (0.09 could be a good choice in this problem since positive/negtative labels volume equals to 0.09 in the training dataset). 76 | 77 | * __Area Under ROC Curve (AUC)__: If we use original imbalanced dataset, ROC Curve is the appropriate metric to visualize the performance of binary classifier and Area under ROC (AUC) is the best way to summarize the classifier performance in just one number. In this way, the True positive rate and False positive rate for different threshold is presented and area under this curve is the AUC. 78 | 79 | * __Cohen's kappa__: is a more conservative metric that is used in industry as a performance metric of a classifier. Cohen’s kappa is a measure of the agreement between two raters and is between 0 to 1 . Cohen’s kappa of 1 indicates perfect agreement between the raters and 0 indicates that any agreement is totally due to chance. 80 | 81 | 82 | 83 | 84 | 85 | 86 | 87 | 88 | 89 | 90 | 91 | -------------------------------------------------------------------------------- /environment_hc.yml: -------------------------------------------------------------------------------- 1 | name: hc 2 | channels: 3 | - anaconda 4 | - conda-forge 5 | - defaults 6 | dependencies: 7 | - _anaconda_depends=2020.07=py37_0 8 | - _ipyw_jlab_nb_ext_conf=0.1.0=py37_0 9 | - _py-xgboost-mutex=2.0=cpu_0 10 | - _tflow_select=2.2.0=eigen 11 | - absl-py=0.10.0=py37_0 12 | - aiohttp=3.6.2=py37he774522_0 13 | - alabaster=0.7.12=py37_0 14 | - anaconda=custom=py37_1 15 | - anaconda-client=1.7.2=py37_0 16 | - anaconda-navigator=1.9.12=py37_0 17 | - anaconda-project=0.8.4=py_0 18 | - argh=0.26.2=py37_0 19 | - argon2-cffi=20.1.0=py37he774522_1 20 | - asn1crypto=1.4.0=py_0 21 | - astor=0.8.1=py37_0 22 | - astroid=2.4.2=py37_0 23 | - astropy=4.0.1.post1=py37he774522_1 24 | - async-timeout=3.0.1=py37_0 25 | - async_generator=1.10=py37h28b3542_0 26 | - atomicwrites=1.4.0=py_0 27 | - autopep8=1.5.4=py_0 28 | - babel=2.8.0=py_0 29 | - backcall=0.2.0=py_0 30 | - backports=1.0=py_2 31 | - backports.shutil_get_terminal_size=1.0.0=py37_2 32 | - basemap=1.2.0=py37h4e5d7af_0 33 | - basemap-data-hires=1.2.0=0 34 | - bcrypt=3.2.0=py37he774522_0 35 | - beautifulsoup4=4.9.3=pyhb0f4dca_0 36 | - bitarray=1.5.3=py37he774522_0 37 | - bkcharts=0.2=py37_0 38 | - blas=1.0=mkl 39 | - blinker=1.4=py37_0 40 | - blosc=1.20.0=h7bd577a_0 41 | - bokeh=2.2.1=py37_0 42 | - boto=2.49.0=py37_0 43 | - bottleneck=1.3.2=py37h2a96729_1 44 | - brotlipy=0.7.0=py37he774522_1000 45 | - bzip2=1.0.8=he774522_0 46 | - ca-certificates=2020.6.20=hecda079_0 47 | - cachetools=4.1.1=py_0 48 | - catboost=0.24=py37hc8dfbb8_0 49 | - certifi=2020.6.20=py37hf50a25e_2 50 | - cffi=1.14.3=py37h7a1dbc1_0 51 | - chardet=3.0.4=py37_1003 52 | - click=7.1.2=py_0 53 | - cloudpickle=1.6.0=py_0 54 | - clyent=1.2.2=py37_1 55 | - colorama=0.4.3=py_0 56 | - comtypes=1.1.7=py37_1001 57 | - conda=4.8.5=py37hf50a25e_2 58 | - conda-build=3.17.6=py37_0 59 | - conda-env=2.6.0=1 60 | - conda-package-handling=1.6.1=py37h62dcd97_0 61 | - conda-verify=3.1.1=py37_0 62 | - console_shortcut=0.1.1=4 63 | - contextlib2=0.6.0.post1=py_0 64 | - convertdate=2.1.3=py_1000 65 | - cryptography=3.1.1=py37h7a1dbc1_0 66 | - curl=7.71.1=h2a8f88b_1 67 | - cycler=0.10.0=py37_0 68 | - cython=0.29.21=py37ha925a31_0 69 | - cytoolz=0.11.0=py37he774522_0 70 | - dask=2.30.0=py_0 71 | - dask-core=2.30.0=py_0 72 | - decorator=4.4.2=py_0 73 | - defusedxml=0.6.0=py_0 74 | - diff-match-patch=20200713=py_0 75 | - distributed=2.30.0=py37_0 76 | - django=2.2.5=py37_1 77 | - docutils=0.16=py37_1 78 | - entrypoints=0.3=py37_0 79 | - ephem=3.7.7.1=py37he774522_0 80 | - et_xmlfile=1.0.1=py_1001 81 | - fastcache=1.1.0=py37he774522_0 82 | - fbprophet=0.6=py37h6538335_0 83 | - ffmpeg=4.2=ha925a31_0 84 | - filelock=3.0.12=py_0 85 | - findspark=1.3.0=py_1 86 | - flake8=3.8.4=py_0 87 | - flask=1.1.2=py_0 88 | - freetype=2.10.2=hd328e21_0 89 | - fsspec=0.8.0=py_0 90 | - future=0.18.2=py37_1 91 | - gast=0.2.2=py37_0 92 | - geos=3.6.2=h9ef7328_2 93 | - get_terminal_size=1.0.0=h38e98db_0 94 | - gevent=20.9.0=py37he774522_0 95 | - glob2=0.7=py_0 96 | - gmpy2=2.0.8=py37h0964b28_3 97 | - google-auth=1.22.1=py_0 98 | - google-auth-oauthlib=0.4.1=py_2 99 | - google-pasta=0.2.0=py_0 100 | - greenlet=0.4.17=py37he774522_0 101 | - grpcio=1.31.0=py37he7da953_0 102 | - h5py=2.10.0=py37h5e291fa_0 103 | - hdf5=1.10.4=h7ebc959_0 104 | - heapdict=1.0.1=py_0 105 | - holidays=0.10.1=py_0 106 | - html5lib=1.1=py_0 107 | - icc_rt=2019.0.0=h0cc432a_1 108 | - icu=58.2=ha925a31_3 109 | - idna=2.10=py_0 110 | - imageio=2.9.0=py_0 111 | - imagesize=1.2.0=py_0 112 | - imbalanced-learn=0.6.2=py_0 113 | - importlib_metadata=1.7.0=0 114 | - iniconfig=1.0.1=py_0 115 | - intel-openmp=2020.2=254 116 | - intervaltree=3.1.0=py_0 117 | - ipykernel=5.3.4=py37h5ca1d4c_0 118 | - ipython=7.18.1=py37h5ca1d4c_0 119 | - ipython_genutils=0.2.0=py37_0 120 | - ipywidgets=7.5.1=py_1 121 | - isort=5.5.4=py37_0 122 | - itsdangerous=1.1.0=py37_0 123 | - jdcal=1.4.1=py_0 124 | - jedi=0.17.1=py37_0 125 | - jinja2=2.11.2=py_0 126 | - joblib=0.17.0=py_0 127 | - jpeg=9b=hb83a4c4_2 128 | - json5=0.9.5=py_0 129 | - jsonschema=3.2.0=py37_1 130 | - jupyter=1.0.0=py37_7 131 | - jupyter_client=6.1.7=py_0 132 | - jupyter_console=6.2.0=py_0 133 | - jupyter_core=4.6.3=py37_0 134 | - jupyterlab=2.2.6=py_0 135 | - jupyterlab_pygments=0.1.2=py_0 136 | - jupyterlab_server=1.2.0=py_0 137 | - keras-applications=1.0.8=py_1 138 | - keras-preprocessing=1.1.0=py_1 139 | - keyring=21.4.0=py37_1 140 | - kiwisolver=1.2.0=py37h74a9793_0 141 | - krb5=1.18.2=hc04afaa_0 142 | - lazy-object-proxy=1.4.3=py37he774522_0 143 | - libarchive=3.4.2=h5e25573_0 144 | - libcurl=7.71.1=h2a8f88b_1 145 | - libiconv=1.15=h1df5818_7 146 | - liblief=0.10.1=ha925a31_0 147 | - libllvm9=9.0.1=h21ff451_0 148 | - libpng=1.6.37=h2a8f88b_0 149 | - libprotobuf=3.13.0=h200bbdf_0 150 | - libpython=2.1=py37_0 151 | - libsodium=1.0.18=h62dcd97_0 152 | - libspatialindex=1.9.3=h33f27b4_0 153 | - libssh2=1.9.0=h7a1dbc1_1 154 | - libtiff=4.1.0=h56a325e_1 155 | - libxcb=1.13=h301d43c_1002 156 | - libxgboost=0.90=1 157 | - libxml2=2.9.10=h464c3ec_1 158 | - libxslt=1.1.34=he774522_0 159 | - lightgbm=3.0.0=py37h1834ac0_1 160 | - llvmlite=0.34.0=py37h1a82afc_4 161 | - locket=0.2.0=py37_1 162 | - lunarcalendar=0.0.9=py_0 163 | - lxml=4.5.2=py37h1350720_0 164 | - lz4-c=1.9.2=h62dcd97_1 165 | - lzo=2.10=he774522_2 166 | - m2w64-binutils=2.25.1=5 167 | - m2w64-bzip2=1.0.6=6 168 | - m2w64-crt-git=5.0.0.4636.2595836=2 169 | - m2w64-gcc=5.3.0=6 170 | - m2w64-gcc-ada=5.3.0=6 171 | - m2w64-gcc-fortran=5.3.0=6 172 | - m2w64-gcc-libgfortran=5.3.0=6 173 | - m2w64-gcc-libs=5.3.0=7 174 | - m2w64-gcc-libs-core=5.3.0=7 175 | - m2w64-gcc-objc=5.3.0=6 176 | - m2w64-gmp=6.1.0=2 177 | - m2w64-headers-git=5.0.0.4636.c0ad18a=2 178 | - m2w64-isl=0.16.1=2 179 | - m2w64-libiconv=1.14=6 180 | - m2w64-libmangle-git=5.0.0.4509.2e5a9a2=2 181 | - m2w64-libwinpthread-git=5.0.0.4634.697f757=2 182 | - m2w64-make=4.1.2351.a80a8b8=2 183 | - m2w64-mpc=1.0.3=3 184 | - m2w64-mpfr=3.1.4=4 185 | - m2w64-pkg-config=0.29.1=2 186 | - m2w64-toolchain=5.3.0=7 187 | - m2w64-tools-git=5.0.0.4592.90b8472=2 188 | - m2w64-windows-default-manifest=6.4=3 189 | - m2w64-winpthreads-git=5.0.0.4634.697f757=2 190 | - m2w64-zlib=1.2.8=10 191 | - markdown=3.3=py37_0 192 | - markupsafe=1.1.1=py37hfa6e2cd_1 193 | - matplotlib=3.3.1=0 194 | - matplotlib-base=3.3.1=py37hba9282a_0 195 | - mccabe=0.6.1=py37_1 196 | - menuinst=1.4.16=py37he774522_1 197 | - mistune=0.8.4=py37hfa6e2cd_1001 198 | - mkl=2020.2=256 199 | - mkl-service=2.3.0=py37hb782905_0 200 | - mkl_fft=1.2.0=py37h45dec08_0 201 | - mkl_random=1.1.1=py37h47e9c7a_0 202 | - mock=4.0.2=py_0 203 | - mongodb=4.0.3=he170510_0 204 | - more-itertools=8.5.0=py_0 205 | - mpc=1.1.0=h7edee0f_1 206 | - mpfr=4.0.2=h62dcd97_1 207 | - mpir=3.0.0=hec2e145_1 208 | - mpmath=1.1.0=py37_0 209 | - msgpack-python=1.0.0=py37h74a9793_1 210 | - msys2-conda-epoch=20160418=1 211 | - multidict=4.7.6=py37he774522_1 212 | - multipledispatch=0.6.0=py37_0 213 | - navigator-updater=0.2.1=py37_0 214 | - nbclient=0.5.0=py_0 215 | - nbconvert=6.0.7=py37_0 216 | - nbformat=5.0.7=py_0 217 | - nest-asyncio=1.4.1=py_0 218 | - networkx=2.5=py_0 219 | - nltk=3.5=py_0 220 | - nose=1.3.7=py37_1004 221 | - notebook=6.1.4=py37_0 222 | - numba=0.51.2=py37hf9181ef_1 223 | - numexpr=2.7.1=py37h25d0782_0 224 | - numpy=1.19.1=py37h5510c5b_0 225 | - numpy-base=1.19.1=py37ha3acd2a_0 226 | - numpydoc=1.1.0=py_0 227 | - oauthlib=3.1.0=py_0 228 | - olefile=0.46=py37_0 229 | - openpyxl=3.0.5=py_0 230 | - openssl=1.1.1h=he774522_0 231 | - opt_einsum=3.1.0=py_0 232 | - packaging=20.4=py_0 233 | - pandas=1.1.3=py37ha925a31_0 234 | - pandoc=2.10.1=0 235 | - pandocfilters=1.4.2=py37_1 236 | - paramiko=2.7.2=py_0 237 | - parso=0.7.0=py_0 238 | - partd=1.1.0=py_0 239 | - path=15.0.0=py37_0 240 | - path.py=12.5.0=0 241 | - pathlib2=2.3.5=py37_1 242 | - pathtools=0.1.2=py_1 243 | - patsy=0.5.1=py37_0 244 | - pep8=1.7.1=py37_0 245 | - pexpect=4.8.0=py37_1 246 | - pickleshare=0.7.5=py37_1001 247 | - pillow=7.2.0=py37hcc1f983_0 248 | - pip=20.2.3=py37_0 249 | - pkginfo=1.5.0.1=py37_0 250 | - plotly=4.5.2=py_0 251 | - pluggy=0.13.1=py37_0 252 | - ply=3.11=py37_0 253 | - powershell_shortcut=0.0.1=3 254 | - proj4=5.2.0=ha925a31_1 255 | - prometheus_client=0.8.0=py_0 256 | - prompt-toolkit=3.0.7=py_0 257 | - prompt_toolkit=3.0.7=0 258 | - protobuf=3.13.0=py37h33f27b4_1 259 | - psutil=5.7.2=py37he774522_0 260 | - pthread-stubs=0.3=h3c9f919_1 261 | - py=1.9.0=py_0 262 | - py-lief=0.10.1=py37ha925a31_0 263 | - py-xgboost=0.90=py37_1 264 | - pyasn1=0.4.8=py_0 265 | - pyasn1-modules=0.2.8=py_0 266 | - pycodestyle=2.6.0=py_0 267 | - pycosat=0.6.3=py37he774522_0 268 | - pycparser=2.20=py_2 269 | - pycrypto=2.6.1=py37he774522_10 270 | - pycurl=7.43.0.5=py37h7a1dbc1_0 271 | - pydocstyle=5.1.1=py_0 272 | - pydotplus=2.0.2=py37_1 273 | - pyflakes=2.2.0=py_0 274 | - pygments=2.7.1=py_0 275 | - pyjwt=1.7.1=py37_0 276 | - pylint=2.6.0=py37_0 277 | - pymongo=3.9.0=py37ha925a31_0 278 | - pynacl=1.4.0=py37h62dcd97_1 279 | - pyodbc=4.0.30=py37ha925a31_0 280 | - pyopenssl=19.1.0=py_1 281 | - pyparsing=2.4.7=py_0 282 | - pyproj=1.9.6=py37h6782396_0 283 | - pyqt=5.9.2=py37h6538335_2 284 | - pyreadline=2.1=py37_1 285 | - pyrsistent=0.17.3=py37he774522_0 286 | - pyshp=2.1.2=py_0 287 | - pysocks=1.7.1=py37_1 288 | - pystan=2.19.0.0=py37hcce6980_0 289 | - pytables=3.6.1=py37h1da0976_0 290 | - pytest=6.1.1=py37_0 291 | - python=3.7.7=h60c2a47_0_cpython 292 | - python-dateutil=2.8.1=py_0 293 | - python-jsonrpc-server=0.4.0=py_0 294 | - python-language-server=0.35.1=py_0 295 | - python-libarchive-c=2.9=py_0 296 | - python_abi=3.7=1_cp37m 297 | - pytz=2019.3=py_0 298 | - pywavelets=1.1.1=py37he774522_2 299 | - pywin32=227=py37he774522_1 300 | - pywin32-ctypes=0.2.0=py37_1001 301 | - pywinpty=0.5.7=py37_0 302 | - pyyaml=5.3.1=py37he774522_1 303 | - pyzmq=19.0.2=py37ha925a31_1 304 | - qdarkstyle=2.8.1=py_0 305 | - qt=5.9.7=vc14h73c81de_0 306 | - qtawesome=0.7.2=py_0 307 | - qtconsole=4.7.7=py_0 308 | - qtpy=1.9.0=py_0 309 | - regex=2020.7.14=py37he774522_0 310 | - requests=2.24.0=py_0 311 | - requests-oauthlib=1.3.0=py_0 312 | - retrying=1.3.3=py37_2 313 | - rope=0.17.0=py_0 314 | - rsa=4.6=py_0 315 | - rtree=0.9.4=py37h21ff451_1 316 | - ruamel_yaml=0.15.87=py37he774522_1 317 | - scikit-image=0.16.2=py37h47e9c7a_0 318 | - scikit-learn=0.23.2=py37h47e9c7a_0 319 | - scipy=1.5.2=py37h9439919_0 320 | - seaborn=0.11.0=py_0 321 | - send2trash=1.5.0=py37_0 322 | - setuptools=50.3.0=py37h9490d1a_1 323 | - simplegeneric=0.8.1=py37_2 324 | - singledispatch=3.4.0.3=py37_1000 325 | - sip=4.19.8=py37h6538335_0 326 | - six=1.15.0=py_0 327 | - snappy=1.1.8=h33f27b4_0 328 | - snowballstemmer=2.0.0=py_0 329 | - sortedcollections=1.2.1=py_0 330 | - sortedcontainers=2.2.2=py_0 331 | - soupsieve=2.0.1=py_0 332 | - sphinx=3.2.1=py_0 333 | - sphinxcontrib=1.0=py37_1 334 | - sphinxcontrib-applehelp=1.0.2=py_0 335 | - sphinxcontrib-devhelp=1.0.2=py_0 336 | - sphinxcontrib-htmlhelp=1.0.3=py_0 337 | - sphinxcontrib-jsmath=1.0.1=py_0 338 | - sphinxcontrib-qthelp=1.0.3=py_0 339 | - sphinxcontrib-serializinghtml=1.1.4=py_0 340 | - sphinxcontrib-websupport=1.2.4=py_0 341 | - spyder=4.1.5=py37_0 342 | - spyder-kernels=1.9.4=py37_0 343 | - sqlalchemy=1.3.19=py37he774522_0 344 | - sqlite=3.33.0=h2a8f88b_0 345 | - sqlparse=0.4.1=py_0 346 | - statsmodels=0.11.1=py37he774522_0 347 | - sympy=1.6.2=py37_0 348 | - tbb=2020.3=h74a9793_0 349 | - tblib=1.7.0=py_0 350 | - tensorboard=2.2.1=pyh532a8cf_0 351 | - tensorboard-plugin-wit=1.6.0=py_0 352 | - tensorflow=2.1.0=eigen_py37hd727fc0_0 353 | - tensorflow-base=2.1.0=eigen_py37h49b2757_0 354 | - tensorflow-estimator=2.1.0=pyhd54b08b_0 355 | - termcolor=1.1.0=py37_1 356 | - terminado=0.8.3=py37_0 357 | - testpath=0.4.4=py_0 358 | - threadpoolctl=2.1.0=pyh5ca1d4c_0 359 | - tk=8.6.10=he774522_0 360 | - toml=0.10.1=py_0 361 | - toolz=0.11.1=py_0 362 | - tornado=6.0.4=py37he774522_1 363 | - tqdm=4.50.2=py_0 364 | - traitlets=5.0.4=py_0 365 | - typed-ast=1.4.1=py37he774522_0 366 | - typing_extensions=3.7.4.3=py_0 367 | - ujson=4.0.0=py37ha925a31_0 368 | - unicodecsv=0.14.1=py37_0 369 | - unidecode=1.1.1=py_0 370 | - urllib3=1.25.10=py_0 371 | - vc=14.1=h0510ff6_4 372 | - vs2015_runtime=14.16.27012=hf0eaf9b_3 373 | - watchdog=0.10.3=py37_0 374 | - wcwidth=0.2.5=py_0 375 | - webencodings=0.5.1=py37_1 376 | - werkzeug=0.16.1=py_0 377 | - wheel=0.35.1=py_0 378 | - widgetsnbextension=3.5.1=py37_0 379 | - win_inet_pton=1.1.0=py37_0 380 | - win_unicode_console=0.5=py37_0 381 | - wincertstore=0.2=py37_0 382 | - winpty=0.4.3=4 383 | - wrapt=1.11.2=py37he774522_0 384 | - xlrd=1.2.0=py37_0 385 | - xlsxwriter=1.3.6=py_0 386 | - xlwings=0.20.7=py37_0 387 | - xlwt=1.3.0=py37_0 388 | - xmltodict=0.12.0=py_0 389 | - xorg-kbproto=1.0.7=h301d43c_1002 390 | - xorg-libice=1.0.10=h301d43c_0 391 | - xorg-libsm=1.2.3=h301d43c_1000 392 | - xorg-libx11=1.6.12=h301d43c_0 393 | - xorg-libxau=1.0.9=h301d43c_0 394 | - xorg-libxdmcp=1.1.3=h301d43c_0 395 | - xorg-libxext=1.3.4=h301d43c_0 396 | - xorg-libxpm=3.5.13=h301d43c_0 397 | - xorg-libxrender=0.9.10=h301d43c_1002 398 | - xorg-libxt=1.1.5=h301d43c_1003 399 | - xorg-renderproto=0.11.1=h301d43c_1002 400 | - xorg-xextproto=7.3.0=h301d43c_1002 401 | - xorg-xproto=7.0.31=h301d43c_1007 402 | - xz=5.2.5=h62dcd97_0 403 | - yaml=0.2.5=he774522_0 404 | - yapf=0.30.0=py_0 405 | - yarl=1.6.0=py37he774522_0 406 | - zeromq=4.3.2=ha925a31_3 407 | - zict=2.0.0=py_0 408 | - zipp=3.3.0=py_0 409 | - zlib=1.2.11=h62dcd97_4 410 | - zope=1.0=py37_1 411 | - zope.event=4.4=py37_0 412 | - zope.interface=5.1.2=py37he774522_0 413 | - zstd=1.4.5=h04227a9_0 414 | - pip: 415 | - appdirs==1.4.4 416 | - attrs==18.1.0 417 | - bleach==3.1.5 418 | - configargparse==1.2.3 419 | - coursera-dl==0.11.5 420 | - edx-dl==0.1.13 421 | - featuretools==0.16.0 422 | - gower==0.0.5 423 | - hyperopt==0.2.4 424 | - importlib-metadata==2.1.1 425 | - mlxtend==0.17.2 426 | - notebook-as-pdf==0.4.0 427 | - py4j==0.10.9 428 | - pyee==8.1.0 429 | - pynisher==0.5.0 430 | - pypdf2==1.26.0 431 | - pyppeteer==0.2.5 432 | - pyprind==2.11.2 433 | - pyspark==3.0.1 434 | - swifter==0.305 435 | - websockets==8.1 436 | - youtube-dl==2020.3.8 437 | prefix: C:\Users\New User\Anaconda3 438 | -------------------------------------------------------------------------------- /model.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | import numpy as np 3 | from matplotlib import pyplot as plt 4 | from functools import reduce 5 | import gc 6 | import featuretools as ft 7 | from sklearn.preprocessing import LabelEncoder 8 | from sklearn.model_selection import train_test_split 9 | from sklearn.pipeline import make_pipeline, Pipeline 10 | from sklearn.preprocessing import StandardScaler 11 | from sklearn.model_selection import RandomizedSearchCV, GridSearchCV, cross_val_score 12 | from sklearn.decomposition import PCA 13 | from xgboost import XGBClassifier 14 | from lightgbm import LGBMClassifier 15 | from catboost import CatBoostClassifier 16 | from sklearn.metrics import confusion_matrix, roc_curve, roc_auc_score, precision_recall_curve, cohen_kappa_score 17 | from tensorflow.keras.models import Model, Sequential 18 | from tensorflow.keras.layers import Dense, Dropout, Activation, BatchNormalization, Conv2D, MaxPooling2D, Flatten, LSTM 19 | from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint 20 | from tensorflow.keras.utils import to_categorical 21 | from sklearn.cluster import AgglomerativeClustering 22 | from imblearn.over_sampling import SMOTE 23 | import warnings 24 | from sklearn.exceptions import ConvergenceWarning 25 | warnings.simplefilter(action='ignore', category=ConvergenceWarning) 26 | from pdb import set_trace as st 27 | 28 | 29 | class classifier: 30 | 31 | def __init__(self, args): 32 | self.ft_maxdep = args.ft_maxdep 33 | self.cnn_bsize = args.cnn_bsize 34 | self.cnn_epoch = args.cnn_epoch 35 | self.rnn_bsize = args.rnn_bsize 36 | self.rnn_epoch = args.rnn_epoch 37 | self.pca_n = args.pca_n 38 | self.nfolds = args.nfolds 39 | self.test_size = args.test_size 40 | self.batch_size = args.batch_size 41 | self.epoch = args.epoch 42 | 43 | if args.use_ftools: 44 | self.data = self.automated_features 45 | elif args.use_cnnft: 46 | self.data = self.bureau_cnn_features() 47 | elif args.use_rnnft: 48 | self.data = self.bureau_rnn_features() 49 | else: 50 | self.data = self.merge_tables() 51 | 52 | def hc_prv_app(self): 53 | """ 54 | This method reads previous_application table that includes the recorded 55 | previous credits at Home Credit financial institution, perform manually feature engineering, 56 | flatten multiple loans, and returns the statistics related to each application SK_ID_CURR. 57 | """ 58 | 59 | print('Processing previous_application table related to Home Credit source...') 60 | prev = pd.read_csv('data/previous_application.csv') 61 | 62 | # ------------------------------Feature Engineering (1): General --------------------------- 63 | # When was the last application applied and contract status? 64 | prev1 = prev.sort_values('DAYS_DECISION', ascending=False). \ 65 | groupby(['SK_ID_CURR']).agg( 66 | {'DAYS_DECISION': 'first', 'NAME_CONTRACT_STATUS': 'first', 'AMT_CREDIT': 'first'}) 67 | # last credit amount, interest rates, ... the most recent approved 68 | df = prev[prev['NAME_CONTRACT_STATUS'] == 'Approved'] \ 69 | .sort_values('DAYS_DECISION', ascending=False).groupby('SK_ID_CURR').first() 70 | df = df[['NAME_CONTRACT_TYPE', 'AMT_APPLICATION', 'AMT_CREDIT', 'AMT_DOWN_PAYMENT', 71 | 'NAME_YIELD_GROUP', 'NFLAG_INSURED_ON_APPROVAL', 'RATE_INTEREST_PRIMARY', 'RATE_INTEREST_PRIVILEGED']] 72 | df['Diff_applied_apprved'] = df['AMT_APPLICATION'] - df['AMT_CREDIT'] 73 | prev1 = prev1.merge(df, on='SK_ID_CURR', how='outer') 74 | 75 | # ----Feature Engineering (2): Ratios of Approved, Refused, Canceled, and Unused offer for each application----- 76 | df = prev.groupby(['SK_ID_CURR', 'NAME_CONTRACT_STATUS']).agg({'SK_ID_PREV': 'count'}) 77 | df = df.groupby(level='SK_ID_CURR').apply(lambda x: x / x.sum()) 78 | approved = df[df.index.isin(['Approved'], level='NAME_CONTRACT_STATUS')]['SK_ID_PREV'] 79 | approved.index = approved.index.droplevel('NAME_CONTRACT_STATUS') 80 | refused = df[df.index.isin(['Refused'], level='NAME_CONTRACT_STATUS')]['SK_ID_PREV'] 81 | refused.index = refused.index.droplevel('NAME_CONTRACT_STATUS') 82 | canceled = df[df.index.isin(['Canceled'], level='NAME_CONTRACT_STATUS')]['SK_ID_PREV'] 83 | canceled.index = canceled.index.droplevel('NAME_CONTRACT_STATUS') 84 | unused = df[df.index.isin(['Unused offer'], level='NAME_CONTRACT_STATUS')]['SK_ID_PREV'] 85 | unused.index = unused.index.droplevel('NAME_CONTRACT_STATUS') 86 | data_frames = [approved, refused, canceled, unused] 87 | df = reduce(lambda left, right: pd.merge(left, right, on='SK_ID_CURR', how='outer'), data_frames) 88 | df.columns = ['ratio_approved', 'ratio_refused', 'ratio_canceled', 'ratio_unused'] 89 | prev1 = prev1.merge(df, on='SK_ID_CURR', how='outer') 90 | 91 | # -----Feature Engineering (3): Latest credit time and amount for the approved different NAME_CONTRACT_TYPE ---- 92 | df = prev[prev['NAME_CONTRACT_STATUS'] == 'Approved'].sort_values('DAYS_DECISION', ascending=False). \ 93 | groupby(['SK_ID_CURR', 'NAME_CONTRACT_TYPE']).agg({'DAYS_DECISION': 'first', 'AMT_CREDIT': 'first'}) 94 | consumer_loans = df[df.index.isin(['Consumer loans'], level='NAME_CONTRACT_TYPE')][ 95 | ['DAYS_DECISION', 'AMT_CREDIT']] 96 | consumer_loans.index = consumer_loans.index.droplevel('NAME_CONTRACT_TYPE') 97 | cash_loans = df[df.index.isin(['Cash loans'], level='NAME_CONTRACT_TYPE')][['DAYS_DECISION', 'AMT_CREDIT']] 98 | cash_loans.index = cash_loans.index.droplevel('NAME_CONTRACT_TYPE') 99 | revolving_loans = df[df.index.isin(['Revolving loans'], level='NAME_CONTRACT_TYPE')][ 100 | ['DAYS_DECISION', 'AMT_CREDIT']] 101 | revolving_loans.index = revolving_loans.index.droplevel('NAME_CONTRACT_TYPE') 102 | dfs = [consumer_loans, cash_loans, revolving_loans] 103 | df = reduce(lambda left, right: pd.merge(left, right, on='SK_ID_CURR', how='outer'), dfs) 104 | df.columns = ['Days_consumerloans', 'AMT_credit_consumerloans', 'Days_cashloans', 'AMT_credit_cashloans', 105 | 'Days_revolvingloans', 'AMT_credit_revolvingloans'] 106 | prev1 = prev1.merge(df, on='SK_ID_CURR', how='outer') 107 | del prev, df 108 | gc.collect() 109 | return prev1 110 | 111 | def hc_credit_card(self): 112 | """ 113 | This method reads credit_card_balance table that includes the recorded 114 | previous credit card transactions at Home Credit financial institution, perform manually feature engineering, 115 | flatten many transactions, and returns the statistics related to each application SK_ID_CURR. 116 | """ 117 | 118 | print('Processing credit_card_balance table related to Home Credit source...') 119 | ccb = pd.read_csv('data/credit_card_balance.csv') 120 | ccb['Beyond_limit'] = np.where(ccb['AMT_BALANCE'] > ccb['AMT_CREDIT_LIMIT_ACTUAL'], 1, 0) 121 | ccb['Delinquent'] = np.where(ccb['AMT_PAYMENT_CURRENT'] < ccb['AMT_INST_MIN_REGULARITY'], 1, 0) 122 | 123 | # Percentage change of credit card balance between two successive months 124 | def _pct_diff(group): 125 | group['balance_pct_change'] = group['AMT_BALANCE'].diff() / (group['AMT_BALANCE'].shift(1) + 1) 126 | group['balance_pct_change'] = np.where(group['balance_pct_change'] > 30, 30, group['balance_pct_change']) 127 | return group 128 | 129 | ccb = ccb.sort_values('MONTHS_BALANCE').groupby('SK_ID_PREV').apply(_pct_diff) 130 | # how many times credit card reached 80% and above? 131 | ccb['times_bal80'] = np.where(ccb['AMT_BALANCE'] / ccb['AMT_CREDIT_LIMIT_ACTUAL'] >= 0.8, 1, 0) 132 | # How many credit card do applicant have at the moment? 133 | # What is the total balance on them? (outstanding debt of credit card) 134 | ccb['has_cc_now'] = np.where(ccb['MONTHS_BALANCE'] == -1, 1, 0) 135 | ccb['balance_now'] = ccb['has_cc_now'] * ccb['AMT_BALANCE'] 136 | stats = ccb.groupby('SK_ID_CURR', as_index=False).agg( 137 | {'has_cc_now': 'sum', 'balance_now': 'sum', 'times_bal80': 'sum'}) 138 | # Flatten last four months balance and percentage change for each applicant 139 | cols = ['MONTHS_BALANCE', 'Beyond_limit', 'Delinquent', 'balance_pct_change'] 140 | cclast = ccb.sort_values('MONTHS_BALANCE', ascending=False).groupby(['SK_ID_CURR'])[cols].nth(0) 141 | cclag1 = ccb.sort_values('MONTHS_BALANCE', ascending=False).groupby(['SK_ID_CURR'])[cols].nth(1) 142 | cclag2 = ccb.sort_values('MONTHS_BALANCE', ascending=False).groupby(['SK_ID_CURR'])[cols].nth(2) 143 | cclag3 = ccb.sort_values('MONTHS_BALANCE', ascending=False).groupby(['SK_ID_CURR'])[cols].nth(3) 144 | dfs = [cclast, cclag1, cclag2, cclag3] 145 | lags = reduce(lambda left, right: pd.concat([left, right], axis=1, sort=False), dfs) 146 | columns = [] 147 | for i in range(4): 148 | columns += ['MONTHS_BALANCE' + str(i), 'Beyond_limit' + str(i), 'Delinquent' + str(i), 149 | 'balance_pct_change' + str(i)] 150 | lags.columns = columns 151 | stats = stats.merge(lags, on='SK_ID_CURR', how='outer') 152 | del lags, ccb 153 | gc.collect() 154 | return stats 155 | 156 | def hc_installment(self): 157 | """ 158 | This method reads installments_payments table that includes the recorded 159 | previous installments at Home Credit financial institution, perform manually feature engineering, 160 | flatten many transactions, and returns the statistics related to each application SK_ID_CURR. 161 | """ 162 | 163 | print('Processing installments table related to Home Credit source...') 164 | insta = pd.read_csv('data/installments_payments.csv') 165 | # 'DAYS_INSTALMENT': days before credit card supposed to be paid, 166 | # 'DAYS_ENTRY_PAYMENT': days that amount was acutually paid. 167 | insta['insta_delinquency'] = np.where(insta['DAYS_INSTALMENT'] >= insta['DAYS_ENTRY_PAYMENT'], 0, 1) 168 | insta['insta_debt'] = insta['AMT_INSTALMENT'] - insta['AMT_PAYMENT'] 169 | stats = insta.sort_values('DAYS_INSTALMENT', ascending=False).groupby(['SK_ID_CURR']).agg( 170 | {'DAYS_INSTALMENT': 'first', 'insta_debt': ['sum', 'mean', 'first'], 'insta_delinquency': ['sum', 'first']}) 171 | stats.columns = stats.columns.map('_'.join) 172 | del insta 173 | gc.collect() 174 | return stats 175 | 176 | def hc_pos_cash(self): 177 | """ 178 | This method reads POS_CASH_balance table that includes the recorded 179 | previous point of sale (POS) at Home Credit financial institution, perform manually feature engineering, 180 | flatten many transactions, and returns the statistics related to each application SK_ID_CURR. 181 | """ 182 | 183 | print('Processing POS_CASH_balance table related to Home Credit source...') 184 | pc = pd.read_csv('data/POS_CASH_balance.csv') 185 | # Flatten all the columns for the latest 4 POS data for each application 186 | pc = pc.sort_values('MONTHS_BALANCE', ascending=False) 187 | cols = ['MONTHS_BALANCE', 'SK_DPD', 'SK_DPD_DEF'] 188 | pos0 = pc.groupby(['SK_ID_CURR'])[cols].first() 189 | pos1 = pc.groupby(['SK_ID_CURR'])[cols].nth(1) 190 | pos2 = pc.groupby(['SK_ID_CURR'])[cols].nth(2) 191 | pos3 = pc.groupby(['SK_ID_CURR'])[cols].nth(3) 192 | data_frames = [pos0, pos1, pos2, pos3] 193 | poslag = reduce(lambda left, right: pd.concat([left, right], axis=1, sort=False), data_frames) 194 | columns = [] 195 | for i in range(4): 196 | columns += ['MONTHS_BALANCE' + str(i), 'SK_DPD' + str(i), 'SK_DPD_DEF' + str(i)] 197 | poslag.columns = columns 198 | del pc, pos0, pos1, pos2, pos3 199 | gc.collect() 200 | return poslag 201 | 202 | def application_train(self): 203 | """ 204 | This method reads application_train table that includes all the current applications, cleans it, and 205 | performs manually feature engineering 206 | """ 207 | 208 | print('Processing application_train table for the current loan application') 209 | train = pd.read_csv('data/application_train.csv') 210 | # Delete four applications with XNA CODE_GENDER (train set) 211 | train = train[train['CODE_GENDER'] != 'XNA'] 212 | # Replace DAYS_EMPLOYED = 365243 by nan 213 | train['DAYS_EMPLOYED'].replace({365243: np.nan}, inplace=True) 214 | # Feature engineering 215 | train['Days_employed_age'] = train['DAYS_EMPLOYED'] / train['DAYS_BIRTH'] 216 | train['Credit_income_ratio'] = train['AMT_CREDIT'] / train['AMT_INCOME_TOTAL'] 217 | train['Anuity_income_ratio'] = train['AMT_ANNUITY'] / train['AMT_INCOME_TOTAL'] 218 | train['Income_per_person'] = train['AMT_INCOME_TOTAL'] / train['CNT_FAM_MEMBERS'] 219 | # length of the payment in months since the annuity is the monthly amount due 220 | train['Credit_term'] = train['AMT_ANNUITY'] / train['AMT_CREDIT'] 221 | return train 222 | 223 | def bureau(self): 224 | 225 | """ 226 | This method reads bureau and bureau_balance tables that includes the recorded 227 | at Bureau, perform manually feature engineering,flatten transactions, and 228 | returns the statistics related to each application SK_ID_CURR. 229 | """ 230 | 231 | print('Processing bureau and bureau balance tables...') 232 | bureau = pd.read_csv('data/bureau.csv') 233 | 234 | # ------------------------------------Feature Engineering (1): General --------------------------------------- 235 | bureau['Days_early_paidoff'] = bureau['DAYS_CREDIT_ENDDATE'] - bureau['DAYS_ENDDATE_FACT'] 236 | bureau['Duration_real'] = bureau['DAYS_ENDDATE_FACT'] - bureau['DAYS_CREDIT'] 237 | bureau['Duration_planned'] = bureau['DAYS_CREDIT_ENDDATE'] - bureau['DAYS_CREDIT'] 238 | # Replace data with Duration_planned = 0 with 1 to avoide devision by zero 239 | bureau['Duration_planned'].replace({0: 1}, inplace=True) 240 | # Weighted sum of credit a person borrowed (per days) 241 | bureau['AMT_weightdebt_duration'] = bureau['AMT_CREDIT_SUM_DEBT'] / bureau['Duration_planned'] 242 | # 'AMT_CREDIT_SUM_OVERDUE': Current amount overdue on credit 243 | bureau['AMT_Overdue_duration'] = bureau['AMT_CREDIT_SUM_OVERDUE'] / bureau['Duration_planned'] 244 | # Maximal amount overdue so far 245 | bureau['AMT_Maxoverdue_duration'] = bureau['AMT_CREDIT_MAX_OVERDUE'] / bureau['Duration_planned'] 246 | # Defaulted: CREDIT_DAY_OVERDUE > 270 days is considered defaluted 247 | bureau['Defaulted'] = np.where(bureau['CREDIT_DAY_OVERDUE'] > 270, 1, 0) 248 | bureau['AMT_defaulted'] = bureau['Defaulted'] * bureau['AMT_CREDIT_SUM_DEBT'] 249 | # Encoding CREDIT_ACTIVE ('Closed','Active') to (0,1) 250 | mapping = {'Closed': 0, 'Active': 1} 251 | bureau['CREDIT_ACTIVE'] = bureau['CREDIT_ACTIVE'].map(mapping) 252 | # Flatten manual features with aggregations 253 | stats = bureau.sort_values('DAYS_CREDIT', ascending=False).groupby('SK_ID_CURR') \ 254 | .agg({'AMT_CREDIT_SUM_DEBT': ['count', 'sum', 'mean'], 255 | 'AMT_weightdebt_duration': ['sum', 'mean'], 256 | 'AMT_Overdue_duration': ['sum', 'mean'], 257 | 'AMT_Maxoverdue_duration': ['mean'], 258 | 'Days_early_paidoff': ['sum', 'mean'], 259 | 'Defaulted': ['sum', 'mean'], 260 | 'AMT_defaulted': ['sum', 'mean'], 261 | 'CREDIT_ACTIVE': 'sum'}) 262 | stats.columns = stats.columns.map('_'.join) 263 | # Flatten last four stats for each applicant (nth() method does not work with .agg method) 264 | columns = ['SK_ID_CURR', 'DAYS_CREDIT', 'AMT_CREDIT_SUM_DEBT', 'AMT_weightdebt_duration', 265 | 'AMT_Overdue_duration', \ 266 | 'Days_early_paidoff', 'Defaulted', 'AMT_defaulted'] 267 | stats0 = bureau.sort_values('DAYS_CREDIT', ascending=False)[columns].groupby('SK_ID_CURR').nth(0) 268 | stats1 = bureau.sort_values('DAYS_CREDIT', ascending=False)[columns].groupby('SK_ID_CURR').nth(1) 269 | stats2 = bureau.sort_values('DAYS_CREDIT', ascending=False)[columns].groupby('SK_ID_CURR').nth(2) 270 | stats3 = bureau.sort_values('DAYS_CREDIT', ascending=False)[columns].groupby('SK_ID_CURR').nth(3) 271 | data_frames = [stats0, stats1, stats2, stats3] 272 | lags = reduce(lambda left, right: pd.concat([left, right], axis=1, sort=False), data_frames) 273 | col = [] 274 | for i in range(4): 275 | col += ['DAYS_CREDIT' + str(i), 'AMT_CREDIT_SUM_DEBT' + str(i), 'AMT_weightdebt_duration' + str(i), \ 276 | 'AMT_Overdue_duration' + str(i), 'Days_early_paidoff' + str(i), 'Defaulted' + str(i), 277 | 'AMT_defaulted' + str(i)] 278 | lags.columns = col 279 | stats = pd.merge(stats, lags, on='SK_ID_CURR', how='outer') 280 | 281 | # ------------------------Feature Engineering (2): Loan cycle life for different CREDIT_TYPE-------------------- 282 | # Select 6 categories of loans with the highest frequency 283 | filter = (bureau['CREDIT_TYPE'] == 'Mortgage') | (bureau['CREDIT_TYPE'] == 'Real estate loan') \ 284 | | (bureau['CREDIT_TYPE'] == 'Car loan') \ 285 | | (bureau['CREDIT_TYPE'] == 'Loan for business development') \ 286 | | (bureau['CREDIT_TYPE'] == 'Loan for the purchase of equipment') \ 287 | | (bureau['CREDIT_TYPE'] == 'Cash loan (non-earmarked)') \ 288 | | (bureau['CREDIT_TYPE'] == 'Loan for working capital replenishment') 289 | btype = bureau[filter].copy() 290 | # first(DAYS_CREDIT) is when the last credit of each credit type applied and 291 | # last(DAYS_CREDIT) is when the first onces applied. 292 | bt_stats = btype.sort_values('DAYS_CREDIT', ascending=False).groupby(['SK_ID_CURR', 'CREDIT_TYPE']) \ 293 | .agg({'DAYS_CREDIT': ['first', 'last'], 'AMT_CREDIT_SUM_DEBT': ['count', 'sum', 'mean'], \ 294 | 'CREDIT_ACTIVE': 'sum', 'Defaulted': 'sum', 'AMT_defaulted': ['sum']}) 295 | bt_stats.columns = bt_stats.columns.map('_'.join) 296 | mortgage = bt_stats[bt_stats.index.isin(['Mortgage'], level='CREDIT_TYPE')] 297 | mortgage.index = mortgage.index.droplevel('CREDIT_TYPE') 298 | realestate = bt_stats[bt_stats.index.isin(['Real estate loan'], level='CREDIT_TYPE')] 299 | realestate.index = realestate.index.droplevel('CREDIT_TYPE') 300 | carloan = bt_stats[bt_stats.index.isin(['Car loan'], level='CREDIT_TYPE')] 301 | carloan.index = carloan.index.droplevel('CREDIT_TYPE') 302 | loanbusiness = bt_stats[bt_stats.index.isin(['Loan for business development'], level='CREDIT_TYPE')] 303 | loanbusiness.index = loanbusiness.index.droplevel('CREDIT_TYPE') 304 | loanpurchase = bt_stats[bt_stats.index.isin(['Loan for the purchase of equipment'], level='CREDIT_TYPE')] 305 | loanpurchase.index = loanpurchase.index.droplevel('CREDIT_TYPE') 306 | cashloan = bt_stats[bt_stats.index.isin(['Cash loan (non-earmarked)'], level='CREDIT_TYPE')] 307 | cashloan.index = cashloan.index.droplevel('CREDIT_TYPE') 308 | workingloan = bt_stats[bt_stats.index.isin(['Loan for working capital replenishment'], level='CREDIT_TYPE')] 309 | workingloan.index = workingloan.index.droplevel('CREDIT_TYPE') 310 | dataframes = [mortgage, realestate, carloan, loanbusiness, loanpurchase, cashloan, workingloan] 311 | credit_type = reduce(lambda left, right: pd.merge(left, right, on='SK_ID_CURR', how='outer'), dataframes) 312 | types = ['mortgage', 'realestate', 'carloan', 'loanbusiness', 'loanpurchase', 'cashloan', 'workingloan'] 313 | columns = [] 314 | for s in types: 315 | columns += ['DAYS_CREDIT_first_' + s, 'DAYS_CREDIT_last_' + s, 'AMT_CREDIT_SUM_DEBT_count_' + s, \ 316 | 'AMT_CREDIT_SUM_DEBT_sum_' + s, 'AMT_CREDIT_SUM_DEBT_mean_' + s, \ 317 | 'CREDIT_ACTIVE_sum_x_' + s, 'Defaulted_sum_x_' + s, 'AMT_defaulted_sum_' + s] 318 | credit_type.columns = columns 319 | stats.merge(credit_type, on='SK_ID_CURR', how='outer') 320 | 321 | # -----------------------------Feature Engineering (3): Loan cycle life for Credit Card------------------------- 322 | ccdebt_bureau = bureau[bureau['CREDIT_TYPE'] == 'Credit card'].sort_values('DAYS_CREDIT', ascending=False) \ 323 | .groupby('SK_ID_CURR').agg({'AMT_CREDIT_SUM_DEBT': ['count', 'sum', 'mean', 'first']}) 324 | ccdebt_bureau.columns = ccdebt_bureau.columns.map('_'.join) 325 | stats = stats.merge(ccdebt_bureau, on='SK_ID_CURR', how='outer') 326 | # ------------------------------Feature Engineering (4): Bureau Balance Table----------------------------------- 327 | bureaubal = pd.read_csv('data/bureau_balance.csv') 328 | # When did the credit closed? When was the last delinquency for each bureau credit? 329 | # Last close is obtained by first 0 in MONTHS_BALANCE, last delinquency is obtained from first 1. 330 | bbalance = bureaubal.groupby(['SK_ID_BUREAU', 'STATUS'], as_index=False).first() 331 | left = bbalance[(bbalance['STATUS'] == '0')][['SK_ID_BUREAU', 'MONTHS_BALANCE']] 332 | right = bbalance[(bbalance['STATUS'] == '1')][['SK_ID_BUREAU', 'MONTHS_BALANCE']] 333 | bbalance = pd.merge(left, right, on='SK_ID_BUREAU', how='left') 334 | bbalance.columns = ['SK_ID_BUREAU', 'Months_latest_open', 'Months_latest_delin'] 335 | # Delinquency ratios: how often each bureau delayed? 336 | delinquency = pd.get_dummies(bureaubal[(bureaubal['STATUS'] != 'X') & (bureaubal['STATUS'] != 'C')]) 337 | delinquency = delinquency.groupby('SK_ID_BUREAU').agg( 338 | {'STATUS_0': 'mean', 'STATUS_1': 'mean', 'STATUS_2': 'mean', 'STATUS_3': 'mean', 339 | 'STATUS_4': 'mean', 'STATUS_5': 'mean'}) 340 | bbalance = bbalance.merge(delinquency, on='SK_ID_BUREAU', how='inner') 341 | # Add SK_ID_CURR to bbalance dataframe 342 | bbalance = bureau[['SK_ID_CURR', 'SK_ID_BUREAU']].merge(bbalance, on='SK_ID_BUREAU', how='inner') 343 | # pick the latest open SK_ID_Bureau 344 | bbalance = bbalance.sort_values('Months_latest_open', ascending=False).groupby('SK_ID_CURR').first() 345 | # merge with stats 346 | stats = stats.merge(bbalance, on='SK_ID_CURR', how='outer') 347 | del bbalance, bureau, bureaubal, bt_stats, ccdebt_bureau 348 | gc.collect() 349 | return stats 350 | 351 | def merge_tables(self): 352 | ''' 353 | This method merges the tables from bureau and home credit sources with the application_train table, 354 | and return one row for each SK_ID_CURR. Automated feature engineering and deep learning feature extraction 355 | are not included in this method. 356 | ''' 357 | 358 | prev_home = self.hc_prv_app() 359 | prev_home = prev_home.merge(self.hc_credit_card(), on='SK_ID_CURR', how='outer') 360 | prev_home = prev_home.merge(self.hc_installment(), on='SK_ID_CURR', how='outer') 361 | prev_home = prev_home.merge(self.hc_pos_cash(), on='SK_ID_CURR', how='outer') 362 | train = self.application_train() 363 | train = train.merge(self.bureau(), on='SK_ID_CURR', how='left') 364 | train = train.merge(prev_home, on='SK_ID_CURR', how='left') 365 | # train.to_csv('merged_tables.csv') 366 | del prev_home 367 | gc.collect() 368 | return train 369 | 370 | def automated_features(self): 371 | ''' 372 | This method performs automated feature engineering using feature tools package and returns a dataframe 373 | with added new features from all the tables. Important paramater of feature tools is max_depth of 374 | deep feature synthesis (typically set to 1 or 2). 375 | ''' 376 | 377 | print('Performing automated feature engineering using feature tools') 378 | train = pd.read_csv('data/application_train.csv') 379 | bureau = pd.read_csv('data/bureau.csv') 380 | bureaubal = pd.read_csv('data/bureau_balance.csv') 381 | prev = pd.read_csv('data/previous_application.csv') 382 | ccb = pd.read_csv('data/credit_card_balance.csv') 383 | insta = pd.read_csv('data/installments_payments.csv') 384 | pc = pd.read_csv('data/POS_CASH_balance.csv') 385 | 386 | # Choosing nrows data from all datasets 387 | train = train.sample(frac=1) 388 | ids = train['SK_ID_CURR'].values 389 | bureau = bureau.loc[bureau['SK_ID_CURR'].isin(ids)] 390 | idsb = bureau['SK_ID_BUREAU'].values 391 | bureaubal = bureaubal.loc[bureaubal['SK_ID_BUREAU'].isin(idsb)] 392 | prev = prev.loc[prev['SK_ID_CURR'].isin(ids)] 393 | ccb = ccb.loc[ccb['SK_ID_CURR'].isin(ids)] 394 | insta = insta.loc[insta['SK_ID_CURR'].isin(ids)] 395 | pc = pc.loc[pc['SK_ID_CURR'].isin(ids)] 396 | 397 | # creating EntitySet (collection of tables) 398 | es = ft.EntitySet(id='applications') 399 | # adding Entity (table) to EntitySet 400 | es = es.entity_from_dataframe(entity_id='train', dataframe=train, index='SK_ID_CURR') 401 | es = es.entity_from_dataframe(entity_id='bureau', dataframe=bureau, index='SK_ID_BUREAU') 402 | es = es.entity_from_dataframe(entity_id='bureaubal', dataframe=bureaubal, make_index=True, index='bb_id') 403 | es = es.entity_from_dataframe(entity_id='prev', dataframe=prev, index='SK_ID_PREV') 404 | es = es.entity_from_dataframe(entity_id='ccb', dataframe=ccb, make_index=True, index='cc_id') 405 | es = es.entity_from_dataframe(entity_id='insta', dataframe=insta, make_index=True, index='installment.id') 406 | es = es.entity_from_dataframe(entity_id='pc', dataframe=pc, make_index=True, index='pos_cash_id') 407 | # Creating relation between Entities 408 | # Relationship between application training and bureau 409 | r_applications_bureau = ft.Relationship(es['train']['SK_ID_CURR'], es['bureau']['SK_ID_CURR']) 410 | es = es.add_relationship(r_applications_bureau) 411 | # Relationship between bureau and bureau balance 412 | r_bureau_bureaubal = ft.Relationship(es['bureau']['SK_ID_BUREAU'], es['bureaubal']['SK_ID_BUREAU']) 413 | es = es.add_relationship(r_bureau_bureaubal) 414 | # Relationship between application training and previous applications 415 | r_app_prev = ft.Relationship(es['train']['SK_ID_CURR'], es['prev']['SK_ID_CURR']) 416 | es = es.add_relationship(r_app_prev) 417 | # Relationship between previous applications with credit card balance, pos cash, and installments 418 | r_prev_cc = ft.Relationship(es['prev']['SK_ID_PREV'], es['ccb']['SK_ID_PREV']) 419 | es = es.add_relationship(r_prev_cc) 420 | r_prev_insta = ft.Relationship(es['prev']['SK_ID_PREV'], es['insta']['SK_ID_PREV']) 421 | es = es.add_relationship(r_prev_insta) 422 | r_prev_pc2 = ft.Relationship(es['prev']['SK_ID_PREV'], es['pc']['SK_ID_PREV']) 423 | es = es.add_relationship(r_prev_pc2) 424 | 425 | # Deep feature synthesis with depth of 2 by stacking feature primitives (aggregations and transformations) 426 | # Automated features are concatenated to the original features; Therefore, 427 | train_ft, feature_names = ft.dfs(entityset=es, target_entity='train', max_depth=self.ft_maxdep) 428 | train_ft = train_ft.reset_index() 429 | 430 | print('\nTotal number of features after adding automated features: ', train_ft.shape[1]) 431 | del train, bureau, bureaubal, prev, ccb, insta, pc 432 | gc.collect() 433 | return train_ft 434 | 435 | def bureau_cnn_features(self): 436 | ''' 437 | Convolution Neural Network (CNN) is used to extract new feature from sequential data from bureau and 438 | bureaubal tables. The method concatenats new features to applicaiton_train table and returns final dataframe. 439 | ''' 440 | 441 | print('Extracting features using Convolutional Neural Network (CNN) ...') 442 | train = pd.read_csv('data/application_train.csv') 443 | idl = train['SK_ID_CURR'].values 444 | bureau = pd.read_csv('data/bureau.csv') 445 | # Imputating the missing data in bureau table 446 | # Missing categorical features are imputed with 'Not_applicable' 447 | # Missing numeric features are imputed with Zero (logical choice for this dataset) 448 | cols = bureau.select_dtypes(include=object).columns 449 | bureau[cols] = bureau[cols].fillna('Not_Applicable') 450 | cols = bureau.select_dtypes(exclude=object).columns 451 | bureau[cols] = bureau[cols].fillna(0) 452 | # One-hot encoding of categorical features 453 | bureau = pd.get_dummies(bureau, drop_first=True) 454 | bureau = bureau.sort_values('DAYS_CREDIT', ascending=False) 455 | lst = bureau['SK_ID_CURR'].values 456 | lst = list(set(lst)) 457 | lst.sort() 458 | 459 | # Making bureau table data structure similar to an image 460 | # Applications are grouoped by SK_ID_CURR and for each SK_ID_CURR, the 5 most recent SK_ID_BUREAU is considered. 461 | # If an SK_ID_CURR did not have 5 records, empty rows added and filled with -99 (to avoid confusion with zero). 462 | group = bureau.groupby('SK_ID_CURR') 463 | b = [] # b is the reshaped data structure of bureau table, suitable for use in CNN 464 | j = 0 465 | for sk in idl: 466 | if sk in lst: 467 | a = group.get_group(lst[j]) 468 | if a.shape[0] >= 5: 469 | a = a[:5] 470 | else: 471 | # m99 represents rows having value of -99 472 | m99 = np.ones((5 - a.shape[0], a.shape[1])) * -99 473 | m99 = pd.DataFrame(m99, columns=a.columns) 474 | a = a.append(m99) 475 | a = a.drop(['SK_ID_CURR', 'SK_ID_BUREAU'], axis=1) 476 | a = a.values.flatten().tolist() 477 | b.extend(a) 478 | j += 1 479 | else: 480 | m99 = np.ones((5, bureau.shape[1])) * -99 481 | m99 = pd.DataFrame(m99, columns=bureau.columns) 482 | m99 = m99.drop(['SK_ID_CURR', 'SK_ID_BUREAU'], axis=1) 483 | m99 = m99.values.flatten().tolist() 484 | b.extend(m99) 485 | b = np.array(b) 486 | b = np.reshape(b, (idl.shape[0], 5, bureau.shape[1] - 2, 1)) 487 | print('shape of channel(bureau):', b.shape) 488 | y = train['TARGET'] 489 | y = to_categorical(y, 2) 490 | 491 | # Deep CNN implementation 492 | # CNN architecture includes 2 convolution layer followed by two fully connected layer 493 | np.random.seed(5) 494 | model = Sequential() 495 | 496 | # 1st conv layer 497 | model.add(Conv2D(filters=32, kernel_size=(5, 5), padding="same", 498 | input_shape=(b.shape[1], b.shape[2], 1), data_format="channels_last" 499 | )) 500 | model.add(Activation("relu")) 501 | model.add(MaxPooling2D(pool_size=(2, 2))) 502 | # 2nd conv layer 503 | model.add(Conv2D(32, (5, 5), padding="same")) 504 | model.add(Activation("relu")) 505 | model.add(MaxPooling2D(pool_size=(2, 2))) 506 | model.add(Flatten()) 507 | # FC1 508 | model.add(Dense(units=128)) 509 | model.add(Activation("relu")) 510 | model.add(Dropout(0.5)) 511 | # FC2 512 | model.add(Dense(units=100, name='feature_extract')) 513 | model.add(Activation("relu")) 514 | # output FC 515 | model.add(Dense(units=2, activation='sigmoid')) 516 | model.build() 517 | model.compile(optimizer='Adam', loss='binary_crossentropy', metrics=['AUC']) 518 | model.summary() 519 | 520 | # Train deep neural network 521 | early_stops = EarlyStopping(patience=5, monitor='val_auc') 522 | mc = ModelCheckpoint('best_model.h5', 523 | monitor='val_loss', 524 | verbose=0, 525 | save_best_only=True) 526 | model.fit(b, y, validation_split=0.05, 527 | callbacks=[early_stops, mc], batch_size=self.cnn_bsize, epochs=self.cnn_epoch, verbose=1) 528 | 529 | # Extract the useful featuer from CNN after training the deep nerual network 530 | intermediate_layer_model = Model(inputs=model.input, 531 | outputs=model.get_layer('feature_extract').output) 532 | intermediate_layer_model.summary() 533 | 534 | # predict to get featured data 535 | feauture_engg_data = intermediate_layer_model.predict(b) 536 | feauture_engg_data = pd.DataFrame(feauture_engg_data) 537 | print('feauture_engg_data shape:', feauture_engg_data.shape) 538 | 539 | # Renaming columns 540 | new_col = [] 541 | for i in range(100): 542 | new_col.append('bfeat_%d' % (i + 1)) 543 | feauture_engg_data.columns = new_col 544 | feauture_engg_data['SK_ID_CURR'] = idl 545 | train = train.merge(feauture_engg_data, on='SK_ID_CURR', how='left') 546 | del feauture_engg_data, bureau 547 | gc.collect() 548 | return train 549 | 550 | def bureau_rnn_features(self): 551 | ''' 552 | Recurrent Neural Network (RNN) is used to extract new feature from sequential data from bureau and 553 | bureaubal tables. The method concatenats new features to applicaiton_train table and returns final dataframe. 554 | ''' 555 | 556 | print('Extracting features using Recurrent Neural Network (RNN) ...') 557 | train = pd.read_csv('data/application_train.csv') 558 | idl = train['SK_ID_CURR'].values 559 | bureau = pd.read_csv('data/bureau.csv') 560 | # Imputating the missing data in bureau table 561 | # Missing categorical features are imputed with 'Not_applicable' 562 | # Missing numeric features are imputed with Zero (logical choice for this dataset) 563 | cols = bureau.select_dtypes(include=object).columns 564 | bureau[cols] = bureau[cols].fillna('Not_Applicable') 565 | cols = bureau.select_dtypes(exclude=object).columns 566 | bureau[cols] = bureau[cols].fillna(0) 567 | 568 | # One-hot encoding of categorical features 569 | bureau = pd.get_dummies(bureau, drop_first=True) 570 | bureau = bureau.sort_values('DAYS_CREDIT', ascending=False) 571 | lst = bureau['SK_ID_CURR'].values 572 | lst = list(set(lst)) 573 | lst.sort() 574 | 575 | # Making bureau table data structure similar to an image 576 | # Applications are grouoped by SK_ID_CURR and for each SK_ID_CURR, the 5 most recent SK_ID_BUREAU is considered. 577 | # If an SK_ID_CURR did not have 5 records, empty rows added and filled with -99 (to avoid confusion with zero). 578 | group = bureau.groupby('SK_ID_CURR') 579 | b = [] # b is the reshaped data structure of bureau table, suitable for use in RNN 580 | j = 0 581 | for sk in idl: 582 | if sk in lst: 583 | a = group.get_group(lst[j]) 584 | if a.shape[0] >= 5: 585 | a = a[:5] 586 | else: 587 | # m99 represents rows having value of -99 588 | m99 = np.ones((5 - a.shape[0], a.shape[1])) * -99 589 | m99 = pd.DataFrame(m99, columns=a.columns) 590 | a = a.append(m99) 591 | a = a.drop(['SK_ID_CURR', 'SK_ID_BUREAU'], axis=1) 592 | a = a.values.flatten().tolist() 593 | b.extend(a) 594 | j += 1 595 | else: 596 | m99 = np.ones((5, bureau.shape[1])) * -99 597 | m99 = pd.DataFrame(m99, columns=bureau.columns) 598 | m99 = m99.drop(['SK_ID_CURR', 'SK_ID_BUREAU'], axis=1) 599 | m99 = m99.values.flatten().tolist() 600 | b.extend(m99) 601 | b = np.array(b) 602 | b = np.reshape(b, (idl.shape[0], 5, bureau.shape[1] - 2)) 603 | print('shape of channel(bureau):', b.shape) 604 | y = train['TARGET'] 605 | y = to_categorical(y, 2) 606 | 607 | # Deep RNN implementation 608 | # RNN architecture includes 2 Long Short Term Memory (LSTM) units followed by two fully connected layer 609 | np.random.seed(5) 610 | model = Sequential() 611 | # 1st LSTM layer 612 | model.add(LSTM(units=50, input_shape=(b.shape[1], b.shape[2]), return_sequences=True)) 613 | model.add(BatchNormalization()) 614 | model.add(Activation("relu")) 615 | model.add(Dropout(0.2)) 616 | # 2nd LSTM layer 617 | model.add(LSTM(50, return_sequences=True)) 618 | model.add(BatchNormalization()) 619 | model.add(Activation("relu")) 620 | model.add(Dropout(0.2)) 621 | model.add(Flatten()) 622 | # FC1 623 | model.add(Dense(units=128)) 624 | model.add(BatchNormalization()) 625 | model.add(Activation("relu")) 626 | model.add(Dropout(0.5)) 627 | # FC2 628 | model.add(Dense(units=100, name='RNN_feature_extract')) 629 | model.add(BatchNormalization()) 630 | model.add(Activation("relu")) 631 | # output FC 632 | model.add(Dense(units=2, activation='sigmoid')) 633 | model.build() 634 | model.compile(optimizer='Adam', loss='binary_crossentropy', metrics=['AUC']) 635 | model.summary() 636 | 637 | # Train recurrent neural network 638 | early_stops = EarlyStopping(patience=5, monitor='val_auc') 639 | mc = ModelCheckpoint('best_model.h5', 640 | monitor='val_loss', 641 | verbose=0, 642 | save_best_only=True) 643 | model.fit(b, y, validation_split=0.05, callbacks=[early_stops, mc], batch_size=self.rnn_bsize, 644 | epochs=self.rnn_epoch, 645 | verbose=1) 646 | 647 | # Extract the useful featuer from RNN after training the deep nerual network 648 | intermediate_layer_model = Model(inputs=model.input, 649 | outputs=model.get_layer('RNN_feature_extract').output) 650 | intermediate_layer_model.summary() 651 | # predict to get featured data 652 | feauture_engg_data = intermediate_layer_model.predict(b) 653 | feauture_engg_data = pd.DataFrame(feauture_engg_data) 654 | print('feauture_engg_data shape:', feauture_engg_data.shape) 655 | 656 | # Renaming columns 657 | new_col = [] 658 | for i in range(100): 659 | new_col.append('bfeat_%d' % (i + 1)) 660 | feauture_engg_data.columns = new_col 661 | feauture_engg_data['SK_ID_CURR'] = idl 662 | 663 | # Merge RNN features to application_train dataset 664 | train = train.merge(feauture_engg_data, on='SK_ID_CURR', how='left') 665 | del feauture_engg_data, bureau 666 | gc.collect() 667 | return train 668 | 669 | def XGBoost(self): 670 | ''' 671 | This method train a machine learning model using XGBoost algorithm. Before that, it imputes the empty cells 672 | in self.data table, encodes categorical features using one-hot encoding method, applies PCA transformation 673 | on first self.pca_n principles components. 674 | 675 | Returns: 676 | self.pred_class: Binary class prediction of the target variable. 677 | self.pred: Probability prediction of the target variable. 678 | self.y_test: y_test in the training dataset 679 | ''' 680 | 681 | print('Preprocessing final table one-hot encoding categorical features...') 682 | # Drop the columns with correlation > 0.98 683 | corr = self.data.corr() 684 | upper = corr.where(np.triu(np.ones(corr.shape), k=1).astype(np.bool)) 685 | to_drop = [column for column in upper.columns if any(upper[column] > 0.98)] 686 | self.data = self.data.drop(to_drop, axis=1) 687 | 688 | # Imputating the missing data, PCA can not handle missing data 689 | # Missing categorical features are imputed with 'Not_applicable' 690 | # Missing numeric features are imputed with Zero (logical choice for this dataset) 691 | cols = self.data.select_dtypes(include=object).columns 692 | self.data[cols] = self.data[cols].fillna('Not_Applicable') 693 | cols = self.data.select_dtypes(exclude=object).columns 694 | self.data[cols] = self.data[cols].fillna(0) 695 | 696 | # One-hot encoding categorical features for XGBoost algorithm. 697 | self.data = pd.get_dummies(self.data, drop_first=True) 698 | 699 | # Train and test set split 700 | y = self.data['TARGET'] 701 | X = self.data.drop('TARGET', axis=1) 702 | X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=self.test_size, random_state=1234) 703 | self.y_test = y_test 704 | 705 | # First pca_n principle components are used for XGBoost 706 | # PCA requires standardization of features 707 | sc = StandardScaler() 708 | X_pca = sc.fit_transform(X_train) 709 | pca = PCA(n_components=self.pca_n) 710 | X_pca = pca.fit_transform(X_pca) 711 | print('\nRatio of variance explained by {} principal components: '.format(self.pca_n), 712 | sum(pca.explained_variance_ratio_)) 713 | 714 | pipeline = make_pipeline(StandardScaler(), PCA(n_components=self.pca_n), XGBClassifier()) 715 | params = { 716 | 'xgbclassifier__learning_rate': [0.05, 0.1, 0.15, 0.2], 717 | 'xgbclassifier__max_depth': [3, 4, 5, 6, 8, 10], 718 | 'xgbclassifier__min_child_weight': [1, 3, 5, 7], 719 | 'xgbclassifier__gamma': [0, 0.1, 0.2, 0.3, 0.4], 720 | 'xgbclassifier__colsample_bytree': [0.5, 0.7, 1] 721 | } 722 | print('\nApplying XGBoost classifier... \n') 723 | model = RandomizedSearchCV(pipeline, params, n_iter=1, scoring='roc_auc', cv=self.nfolds, n_jobs=-1, verbose=3) 724 | model.fit(X_train, y_train) 725 | print('\nCross validation best score(AUC) is:', model.best_score_) 726 | # Hyperparameters of the model with the best performance 727 | print('\nModel best hyperparamters are:', model.best_params_) 728 | # Binary class prediction 729 | self.pred_class = model.predict(X_test) 730 | # Probability prediction 731 | self.pred = model.predict_proba(X_test) 732 | self.pred = [p[1] for p in self.pred] 733 | 734 | def lightGBM(self): 735 | ''' 736 | This method trains a machine learning model using LightGBM algorithm. The boosted algorithm hyper parameters 737 | was found using Bayasian optimization. This methods encodes categorical features as integer 738 | and save them as 'category' type for lightGBM algorithms. 739 | 740 | Returns: 741 | self.pred_class: Binary class prediction of the target variable. 742 | self.pred: Probability prediction of the target variable. 743 | self.y_test: y_test in the training dataset 744 | ''' 745 | 746 | print('Preprocessing final table and label encoding categorical features...') 747 | # Drop the columns with correlation > 0.98 748 | corr = self.data.corr() 749 | upper = corr.where(np.triu(np.ones(corr.shape), k=1).astype(np.bool)) 750 | to_drop = [column for column in upper.columns if any(upper[column] > 0.98)] 751 | self.data = self.data.drop(to_drop, axis=1) 752 | # Encoding categorical features because lightGBM offers good accuracy with integer-encoded categorical features. 753 | class_le = LabelEncoder() 754 | cols = self.data.select_dtypes(include=object).columns 755 | for col in cols: 756 | self.data[col] = class_le.fit_transform(self.data[col].values.astype(str)) 757 | self.data[col] = self.data[col].astype('category') 758 | print('Applying LightGBM algorithm...') 759 | y = self.data['TARGET'] 760 | X = self.data.drop('TARGET', axis=1) 761 | X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=self.test_size, random_state=1234) 762 | self.y_test = y_test 763 | # Lighgbm parameters was found using Bayesian Optimization 764 | model_params = { 765 | 'colsample_bytree': 0.45544541538547634, 766 | 'learning_rate': 0.09712737568777673, 767 | 'max_depth': 10, 768 | 'min_child_weight': 44.81416318834993, 769 | 'min_split_gain': 0.47913323843650946, 770 | 'num_leaves': 44, 771 | 'reg_alpha': 8.507126649843658, 772 | 'reg_lambda': 2.2113739093853257, 773 | 'subsample': 0.43342993037373423 774 | } 775 | model = make_pipeline(StandardScaler(), LGBMClassifier(**model_params)) 776 | # cross validation scores 777 | scores = cross_val_score(model, X_train, y_train, scoring='roc_auc', cv=self.nfolds, n_jobs=-1, verbose=100) 778 | print('max cross_val AUC: ', np.max(scores)) 779 | model.fit(X_train, y_train) 780 | # Binary class prediction 781 | self.pred_class = model.predict(X_test) 782 | # Probability prediction 783 | self.pred = model.predict_proba(X_test) 784 | self.pred = [p[1] for p in self.pred] 785 | 786 | def Catboost(self): 787 | ''' 788 | This methods trains a machine learning model using Catboost algorithm. 789 | This method encodes categorical features and save them as 'category' type for Catboost 790 | algorithm. 791 | 792 | Returns: 793 | self.pred_class: Binary class prediction of the target variable. 794 | self.pred: Probability prediction of the target variable. 795 | self.y_test: y_test in the training dataset. 796 | ''' 797 | 798 | print('Preprocessing final table and one-hot encoding categorical features...') 799 | # Drop the columns with correlation > 0.98 800 | corr = self.data.corr() 801 | upper = corr.where(np.triu(np.ones(corr.shape), k=1).astype(np.bool)) 802 | to_drop = [column for column in upper.columns if any(upper[column] > 0.98)] 803 | self.data = self.data.drop(to_drop, axis=1) 804 | # Encoding categorical features because Catboost offers good accuracy with integer-encoded categorical features. 805 | class_le = LabelEncoder() 806 | cols = self.data.select_dtypes(include=object).columns 807 | for col in cols: 808 | self.data[col] = class_le.fit_transform(self.data[col].values.astype(str)) 809 | self.data[col] = self.data[col].astype('category') 810 | 811 | print('Applying CatBoost algorithm...') 812 | y = self.data['TARGET'] 813 | X = self.data.drop('TARGET', axis=1) 814 | X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=self.test_size, random_state=1234) 815 | self.y_test = y_test 816 | pipeline = Pipeline(steps=[('sc', StandardScaler()), ('catboost', CatBoostClassifier())]) 817 | params = { 818 | "catboost__depth": [5, 6], 819 | "catboost__iterations": [500, 1000], 820 | "catboost__learning_rate": [0.001, 0.01, 0.1], 821 | "catboost__l2_leaf_reg": [5, 100] 822 | } 823 | model = RandomizedSearchCV(pipeline, params, n_iter=4, scoring='roc_auc', cv=self.nfolds, n_jobs=-1, verbose=3) 824 | model.fit(X_train, y_train) 825 | 826 | # Binary class prediction 827 | self.pred_class = model.predict(X_test) 828 | # Probability prediction 829 | self.pred = model.predict_proba(X_test) 830 | self.pred = [p[1] for p in self.pred] 831 | 832 | def FCNN(self): 833 | ''' 834 | This method employs a fully connected neural network as the binary classifier. 835 | It impute the mission data and applies one-hot encoding on categorical features. 836 | 837 | Returns: 838 | self.pred_class: Binary class prediction of the target variable. 839 | self.pred: Probability prediction of the target variable. 840 | self.y_test: y_test in the training dataset. 841 | ''' 842 | 843 | print('Preprocessing final table, imputing missing values, and One-hot encoding of categorical features...') 844 | # Drop the columns with correlation > 0.98 845 | corr = self.data.corr() 846 | upper = corr.where(np.triu(np.ones(corr.shape), k=1).astype(np.bool)) 847 | to_drop = [column for column in upper.columns if any(upper[column] > 0.98)] 848 | self.data = self.data.drop(to_drop, axis=1) 849 | 850 | # Missing categorical features are imputed with 'Not_applicable' 851 | # Missing numeric features are imputed with Zero (logical choice for this dataset) 852 | cols = self.data.select_dtypes(include=object).columns 853 | self.data[cols] = self.data[cols].fillna('Not_Applicable') 854 | cols = self.data.select_dtypes(exclude=object).columns 855 | self.data[cols] = self.data[cols].fillna(0) 856 | 857 | # One-hot encoding of categorical features 858 | self.data = pd.get_dummies(self.data, drop_first=True) 859 | 860 | print('Applying Fully Connected Neural Network (FCNN) ...') 861 | y = self.data['TARGET'] 862 | X = self.data.drop('TARGET', axis=1) 863 | X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=self.test_size, random_state=1234) 864 | self.y_test = y_test 865 | sc = StandardScaler() 866 | X_train = sc.fit_transform(X_train) 867 | X_test = sc.fit_transform(X_test) 868 | 869 | # Deep FCNN implementation 870 | # FCNN architecture includes 3 fully connected units having 150, 75, 25 neurons, respectively. 871 | np.random.seed(5) 872 | # FC1 873 | model = Sequential() 874 | model.add(Dense(input_shape=(X_train.shape[1],), units=150)) 875 | model.add(BatchNormalization()) 876 | model.add(Activation("relu")) 877 | model.add(Dropout(0.2)) 878 | # FC2 879 | model.add(Dense(units=75)) 880 | model.add(BatchNormalization()) 881 | model.add(Activation("relu")) 882 | model.add(Dropout(0.2)) 883 | # FC3 884 | model.add(Dense(units=25)) 885 | model.add(BatchNormalization()) 886 | model.add(Activation("relu")) 887 | model.add(Dropout(0.2)) 888 | # Output layer 889 | model.add(Dense(units=1, kernel_initializer='uniform', activation='sigmoid')) 890 | model.build() 891 | model.compile(optimizer='Adam', loss='binary_crossentropy', metrics=['AUC']) 892 | model.summary() 893 | 894 | # Train deep neural network 895 | early_stops = EarlyStopping(patience=10, monitor='val_auc') 896 | mc = ModelCheckpoint('best_model.h5', monitor='val_loss', verbose=0, save_best_only=True) 897 | model.fit(X_train, y_train, validation_split=self.test_size, callbacks=[early_stops, mc], 898 | batch_size=self.batch_size, epochs=self.epoch, verbose=1) 899 | # Binary class prediction, Keras predict method always return probability (unlike Sklearn!) 900 | self.pred_class = np.argmax(model.predict(X_test), axis=-1) 901 | # Probability prediction 902 | self.pred = model.predict(X_test) 903 | 904 | def Hclustering(self): 905 | ''' 906 | This method resamples trining data to have balanced positive to negative labels. It undersamples the 907 | majority class (negative) using hierarcical clustering with cluster size equals to the size of positive class. 908 | Then randomly chose a sample from each cluster as the representative of that cluster. 909 | 910 | Returns: 911 | self.pred_class: Binary class prediction of the target variable. 912 | self.pred: Probability prediction of the target variable. 913 | self.y_test: y_test in the training dataset. 914 | ''' 915 | 916 | print('Preprocessing final table and one-hot encoding categorical features... \n') 917 | # Drop the columns with correlation > 0.98 918 | corr = self.data.corr() 919 | upper = corr.where(np.triu(np.ones(corr.shape), k=1).astype(np.bool)) 920 | to_drop = [column for column in upper.columns if any(upper[column] > 0.98)] 921 | self.data = self.data.drop(to_drop, axis=1) 922 | # Impute the missing data which is required for calculating euclidean distance in clustering 923 | # Missing value in categorical columns are imputed by NA (not available), 924 | # Missing value in numeric columns are imputed by zero, which most of the time is the case in this dataset. 925 | cols = self.data.select_dtypes(include=object).columns 926 | self.data[cols] = self.data[cols].fillna('NA') 927 | cols = self.data.select_dtypes(exclude=object).columns 928 | self.data[cols] = self.data[cols].fillna(0) 929 | 930 | # One-hot encoding categorical features for clustering (eucliden distance) and XGBoost algorithm. 931 | self.data = pd.get_dummies(self.data, drop_first=True) 932 | del corr 933 | y = self.data['TARGET'] 934 | X = self.data.drop('TARGET', axis=1) 935 | X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=self.test_size, random_state=1234) 936 | self.y_test = y_test 937 | 938 | print('Undersampling the majority class using hierarchical clustering .... \n') 939 | # defining positive and negative classes 940 | negative = self.data.loc[self.data['TARGET'] == 0] 941 | positive = self.data.loc[self.data['TARGET'] == 1] 942 | # finding number of clusters 943 | nclusters = np.ceil(len(positive)).astype(int) 944 | # Standardize negative class for the clustering 945 | sc = StandardScaler() 946 | transform = sc.fit_transform(negative) 947 | negative = pd.DataFrame(transform, columns=negative.columns) 948 | # Clustering the majority class using euclidean affinity and ward linkage 949 | # In Ward's linkage, two clusters that lead to the minimum increase of the total within-cluster SSE are merged. 950 | ac = AgglomerativeClustering(n_clusters=nclusters, affinity='euclidean', linkage='ward') 951 | clustering = ac.fit(negative) 952 | # add a new feature for each row to show what cluster they belong 953 | negative['cluster'] = clustering.labels_ 954 | # Randomly choose a sample from each cluster 955 | # A function for choosing one sample from each cluster 956 | def sampling(group): 957 | return group.sample(1, random_state=1) 958 | 959 | # Grouping the train data based on the cluster and select one sample from each cluster 960 | negative = negative.groupby('cluster', as_index=False).apply(sampling) 961 | negative = negative.droplevel(level=1) 962 | negative = negative.drop('cluster', axis=1) 963 | negative = pd.DataFrame(sc.inverse_transform(negative), columns=negative.columns) 964 | 965 | # Merging negative and positive class to form balanced train set 966 | train = pd.concat([negative, positive], axis=0, sort=False) 967 | # train['SK_ID_CURR'] = train['SK_ID_CURR'].astype(int) 968 | train = train.sample(frac=1) 969 | train = train.reset_index(drop=True) 970 | # Return X, y training dataset 971 | y_train = train['TARGET'] 972 | X_train = train.drop('TARGET', axis=1) 973 | X_train = X_train.to_numpy() 974 | 975 | print('Applying XGBoost algorithm on balanced dataset... \n') 976 | pipeline = make_pipeline(StandardScaler(), PCA(n_components=self.pca_n), XGBClassifier()) 977 | params = { 978 | 'xgbclassifier__learning_rate': [0.05, 0.1, 0.15, 0.2], 979 | 'xgbclassifier__max_depth': [3, 4, 5, 6, 8, 10], 980 | 'xgbclassifier__min_child_weight': [1, 3, 5, 7], 981 | 'xgbclassifier__gamma': [0, 0.1, 0.2, 0.3, 0.4], 982 | 'xgbclassifier__colsample_bytree': [0.5, 0.7, 1] 983 | } 984 | model = RandomizedSearchCV(pipeline, params, n_iter=1, scoring='roc_auc', cv=self.nfolds, n_jobs=-1, verbose=3) 985 | model.fit(X_train, y_train) 986 | print('\nCross validation best score(AUC) is:', model.best_score_) 987 | # Hyperparameters of the model with the best performance 988 | print('\nModel best hyperparamters are:', model.best_params_) 989 | # Binary class prediction 990 | self.pred_class = model.predict(X_test) 991 | # Probability prediction 992 | self.pred = model.predict_proba(X_test) 993 | self.pred = [p[1] for p in self.pred] 994 | del negative, positive, train, clustering, transform 995 | gc.collect() 996 | 997 | def Hclustering_smote(self): 998 | ''' 999 | This method resamples training data to have balanced positive to negative labels. It undersamples the 1000 | majority class (negative) using hierarcical clustering up to 50% of total data and oversamples the minority 1001 | class up to 50% using SMOTE. 1002 | 1003 | Returns: 1004 | self.pred_class: Binary class prediction of the target variable. 1005 | self.pred: Probability prediction of the target variable. 1006 | self.y_test: y_test in the training dataset. 1007 | ''' 1008 | 1009 | print('Preprocessing final table and one-hot encoding categorical features... \n') 1010 | # Drop the columns with correlation > 0.98 1011 | corr = self.data.corr() 1012 | upper = corr.where(np.triu(np.ones(corr.shape), k=1).astype(np.bool)) 1013 | to_drop = [column for column in upper.columns if any(upper[column] > 0.98)] 1014 | self.data = self.data.drop(to_drop, axis=1) 1015 | # Impute the missing data which is required for calculating euclidean distance in clustering 1016 | # Missing value in categorical columns are imputed by NA (not available), 1017 | # Missing value in numeric columns are imputed by zero, which most of the time is the case in this dataset. 1018 | cols = self.data.select_dtypes(include=object).columns 1019 | self.data[cols] = self.data[cols].fillna('NA') 1020 | cols = self.data.select_dtypes(exclude=object).columns 1021 | self.data[cols] = self.data[cols].fillna(0) 1022 | 1023 | # One-hot encoding categorical features for clustering (eucliden distance) and XGBoost algorithm. 1024 | self.data = pd.get_dummies(self.data, drop_first=True) 1025 | 1026 | # Agglomerative Clustering is computationaly expensive, 1027 | # In this experiment only a fraction of application train file is considered (nrows= 30000). 1028 | del corr 1029 | y = self.data['TARGET'] 1030 | X = self.data.drop('TARGET', axis=1) 1031 | X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=self.test_size, random_state=1234) 1032 | self.y_test = y_test 1033 | 1034 | print('Undersample majority class and oversampling minority class ...\n') 1035 | 1036 | # defining positive and negative classes 1037 | negative = self.data.loc[self.data['TARGET'] == 0] 1038 | positive = self.data.loc[self.data['TARGET'] == 1] 1039 | 1040 | # finding number of clusters (half of the training data) 1041 | nclusters = np.ceil(len(self.data) / 2).astype(int) 1042 | # Standardize negative class for the clustering 1043 | sc = StandardScaler() 1044 | transform = sc.fit_transform(negative) 1045 | negative = pd.DataFrame(transform, columns=negative.columns) 1046 | del transform 1047 | 1048 | # Clustering the majority class using euclidean affinity and ward linkage 1049 | # In Ward's linkage, two clusters that lead to the minimum increase of the total within-cluster SSE are merged. 1050 | ac = AgglomerativeClustering(n_clusters=nclusters, affinity='euclidean', linkage='ward') 1051 | clustering = ac.fit(negative) 1052 | # add a new feature for each row to show what cluster they belong 1053 | negative['cluster'] = clustering.labels_ 1054 | # Randomly choose a sample from each cluster 1055 | # A function for choosing one sample from each cluster 1056 | def sampling(group): 1057 | return group.sample(1, random_state=1) 1058 | # Grouping the train data based on the cluster and select one sample from each cluster 1059 | negative = negative.groupby('cluster', as_index=False).apply(sampling) 1060 | negative = negative.droplevel(level=1) 1061 | negative = negative.drop('cluster', axis=1) 1062 | negative = pd.DataFrame(sc.inverse_transform(negative), columns=negative.columns) 1063 | 1064 | # Merging negative and positive class to form balanced train set 1065 | train = pd.concat([negative, positive], axis=0, sort=False) 1066 | train = train.sample(frac=1) 1067 | train = train.reset_index(drop=True) 1068 | 1069 | # Return X, y training dataset 1070 | y_train = train['TARGET'] 1071 | X_train = train.drop('TARGET', axis=1) 1072 | 1073 | # SMOTE oversampling of minority class 1074 | oversample = SMOTE() 1075 | X_train, y_train = oversample.fit_resample(X_train, y_train) 1076 | 1077 | print('Applying XGBoost algorithm on the balanced training data... \n') 1078 | pipeline = make_pipeline(StandardScaler(), PCA(n_components=self.pca_n), XGBClassifier()) 1079 | params = { 1080 | 'xgbclassifier__learning_rate': [0.05, 0.1, 0.15, 0.2], 1081 | 'xgbclassifier__max_depth': [3, 4, 5, 6, 8, 10], 1082 | 'xgbclassifier__min_child_weight': [1, 3, 5, 7], 1083 | 'xgbclassifier__gamma': [0, 0.1, 0.2, 0.3, 0.4], 1084 | 'xgbclassifier__colsample_bytree': [0.5, 0.7, 1] 1085 | } 1086 | model = RandomizedSearchCV(pipeline, params, n_iter=1, scoring='roc_auc', cv=self.nfolds, n_jobs=-1, verbose=3) 1087 | model.fit(X_train, y_train) 1088 | print('\nCross validation best score(AUC) is:', model.best_score_) 1089 | # Hyperparameters of the model with the best performance 1090 | print('\nModel best hyperparamters are:', model.best_params_) 1091 | # Binary class prediction 1092 | self.pred_class = model.predict(X_test) 1093 | # Probability prediction 1094 | self.pred = model.predict_proba(X_test) 1095 | self.pred = [p[1] for p in self.pred] 1096 | 1097 | def train(self, args): 1098 | ''' 1099 | This method train the model using the selected algorithm (default: lightGBM). 1100 | If resampling method is used, separate methods are implemented due to different pipeline structure. 1101 | ''' 1102 | 1103 | if args.resample: 1104 | if args.use_hclstr: 1105 | self.Hclustering() 1106 | else: 1107 | self.Hclustering_smote() 1108 | elif args.xgb: 1109 | print('using XGBoost for training ...') 1110 | self.XGBoost() 1111 | elif args.catb: 1112 | print('using Catboost for training ...') 1113 | self.Catboost() 1114 | elif args.fcnn: 1115 | print('Using Fully connected neural network for training ...') 1116 | self.FCNN() 1117 | else: 1118 | print('using LightGBM for training ...') 1119 | self.lightGBM() 1120 | 1121 | # Evaluate ROC_AUC, Precision, Recall, F1-Score, Cohen-Cappa metrics 1122 | self.calculate_metrics() 1123 | # Plot ROC curve 1124 | self.plot_ROC() 1125 | # Plot Precision/R curve 1126 | self.plot_precision_recall() 1127 | 1128 | def calculate_metrics(self): 1129 | ''' 1130 | This method calculates the classification metrics including precision, recall, F1-Score, AUC_ROC, 1131 | and Cohen's kappa coefficient. 1132 | ''' 1133 | 1134 | # ROC_AUC score 1135 | print('ROC_AUC:', roc_auc_score(self.y_test, self.pred)) 1136 | # Precision/Recall (0.1 Threshold) 1137 | pred_class_2 = (np.array(self.pred) > 0.1).astype(int) 1138 | cm = confusion_matrix(self.y_test, pred_class_2) 1139 | print('\nConfusion_metrix (0.1 Threshold): \n', cm) 1140 | # True Negatives (TN) 1141 | tn = cm[0][0] 1142 | # False Positives (FP) 1143 | fp = cm[0][1] 1144 | # False Negatives (FN) 1145 | fn = cm[1][0] 1146 | # True Positives (TP) 1147 | tp = cm[1][1] 1148 | precision = tp / (tp + fp) 1149 | print('Precision (0.1 Threshold): ', precision) 1150 | recall = tp / (tp + fn) 1151 | print('Recall (0.1 Threshold): ', recall) 1152 | print('F1-score ( 0.1 Threshold):', 2 * precision * recall / (precision + recall)) 1153 | cohen_kappa = cohen_kappa_score(self.y_test, pred_class_2) 1154 | print('\nCohen_kappa (0.1 Threshold): ', cohen_kappa) 1155 | 1156 | def plot_ROC(self): 1157 | ''' 1158 | This method plots ROC based on y_test and predicted probability of positive class by lightGBM. 1159 | ''' 1160 | 1161 | # Initialize figure 1162 | fig = plt.figure(figsize=(9, 9)) 1163 | plt.title('Receiver Operating Characteristic') 1164 | # Plot ROC curve 1165 | fpr, tpr, thresholds = roc_curve(self.y_test, self.pred) 1166 | plt.plot(fpr, tpr) 1167 | # Diagonal 45 degree line 1168 | plt.plot([0, 1], [0, 1], 'k--') 1169 | # Axes limits and labels 1170 | plt.xlim([-0.1, 1.1]) 1171 | plt.ylim([-0.1, 1.1]) 1172 | plt.ylabel('True Positive Rate') 1173 | plt.xlabel('False Positive Rate') 1174 | plt.show() 1175 | 1176 | def plot_precision_recall(self): 1177 | ''' 1178 | This method plots precision_recall curve based on y_test and predicted probability of positive class. 1179 | ''' 1180 | 1181 | precision, recall, thresholds = precision_recall_curve(self.y_test, self.pred) 1182 | fig = plt.figure(figsize=(9, 9)) 1183 | plt.title('Precision_Recall') 1184 | # Plot Precision-Recall curve 1185 | plt.plot(recall, precision) 1186 | # Axes limits and labels 1187 | plt.xlim([0, 1]) 1188 | plt.ylim([0, 1]) 1189 | plt.ylabel('Precision') 1190 | plt.xlabel('Recall') 1191 | plt.show() 1192 | -------------------------------------------------------------------------------- /Feature Engineering Strategies/XGBoost_Automated Features.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 29, 6 | "metadata": { 7 | "executionInfo": { 8 | "elapsed": 3527, 9 | "status": "ok", 10 | "timestamp": 1601504229624, 11 | "user": { 12 | "displayName": "Ali Ghorbani", 13 | "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhWTYg2QaNPZd4GGNiDkkHX8r9t7BRVHYGz3JwSKA=s64", 14 | "userId": "10869472433171243113" 15 | }, 16 | "user_tz": 240 17 | }, 18 | "id": "xp4I20KBm54b" 19 | }, 20 | "outputs": [], 21 | "source": [ 22 | "''' Import feature tools library and numpy, pandas, sklearn modules and libraries'''\n", 23 | "import pandas as pd\n", 24 | "import numpy as np\n", 25 | "from matplotlib import pyplot as plt\n", 26 | "%matplotlib inline\n", 27 | "import seaborn as sns\n", 28 | "from sklearn.model_selection import train_test_split\n", 29 | "from sklearn.pipeline import make_pipeline\n", 30 | "from sklearn.preprocessing import StandardScaler\n", 31 | "from sklearn.model_selection import RandomizedSearchCV, GridSearchCV\n", 32 | "from sklearn.decomposition import PCA \n", 33 | "import featuretools as ft\n", 34 | "from xgboost import XGBClassifier\n", 35 | "# Classification metrics\n", 36 | "from sklearn.metrics import confusion_matrix\n", 37 | "from sklearn.metrics import roc_curve, roc_auc_score\n", 38 | "from sklearn.metrics import precision_recall_curve\n", 39 | "from sklearn.metrics import cohen_kappa_score\n", 40 | "# Ignore ConvergenceWarning messages\n", 41 | "import warnings\n", 42 | "from sklearn.exceptions import ConvergenceWarning\n", 43 | "warnings.simplefilter(action='ignore', category=ConvergenceWarning)" 44 | ] 45 | }, 46 | { 47 | "cell_type": "code", 48 | "execution_count": 30, 49 | "metadata": { 50 | "executionInfo": { 51 | "elapsed": 3496, 52 | "status": "ok", 53 | "timestamp": 1601504229625, 54 | "user": { 55 | "displayName": "Ali Ghorbani", 56 | "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhWTYg2QaNPZd4GGNiDkkHX8r9t7BRVHYGz3JwSKA=s64", 57 | "userId": "10869472433171243113" 58 | }, 59 | "user_tz": 240 60 | }, 61 | "id": "mMmpzLtirfYi" 62 | }, 63 | "outputs": [], 64 | "source": [ 65 | "def train_application():\n", 66 | " \n", 67 | " \"\"\"\n", 68 | " This function reads application_train.csv, cleans it and perform manual feature engineering for each\n", 69 | " application (SK_ID_CURR). \n", 70 | " \n", 71 | " Parameters:\n", 72 | " None\n", 73 | "\n", 74 | " Returns:\n", 75 | " train: training dataFrame which includes hand engineered features from \n", 76 | " just application_train table. \n", 77 | " \n", 78 | " \"\"\"\n", 79 | " \n", 80 | " print('Processing application_train and application_test tables')\n", 81 | " train = pd.read_csv('application_train.csv')\n", 82 | " # Delete four applications with XNA CODE_GENDER (train set)\n", 83 | " train = train[train['CODE_GENDER'] != 'XNA']\n", 84 | " # Replace DAYS_EMPLOYED = 365243 by nan\n", 85 | " train['DAYS_EMPLOYED'].replace({365243: np.nan}, inplace = True) \n", 86 | " # Feature engineering\n", 87 | " train['Days_employed_age'] = train['DAYS_EMPLOYED'] / train['DAYS_BIRTH']\n", 88 | " train['Credit_income_ratio'] = train['AMT_CREDIT'] / train['AMT_INCOME_TOTAL']\n", 89 | " train['Anuity_income_ratio'] = train['AMT_ANNUITY'] / train['AMT_INCOME_TOTAL'] \n", 90 | " train['Income_per_person'] = train['AMT_INCOME_TOTAL'] / train['CNT_FAM_MEMBERS']\n", 91 | " #length of the payment in months since the annuity is the monthly amount due\n", 92 | " train['Credit_term'] = train['AMT_ANNUITY']/train['AMT_CREDIT'] \n", 93 | " \n", 94 | " return train" 95 | ] 96 | }, 97 | { 98 | "cell_type": "code", 99 | "execution_count": 31, 100 | "metadata": { 101 | "executionInfo": { 102 | "elapsed": 784, 103 | "status": "ok", 104 | "timestamp": 1601507091521, 105 | "user": { 106 | "displayName": "Ali Ghorbani", 107 | "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhWTYg2QaNPZd4GGNiDkkHX8r9t7BRVHYGz3JwSKA=s64", 108 | "userId": "10869472433171243113" 109 | }, 110 | "user_tz": 240 111 | }, 112 | "id": "Ow3vmIJZrwr_" 113 | }, 114 | "outputs": [], 115 | "source": [ 116 | "def Automated_features(train, nrows, max_depth):\n", 117 | " \n", 118 | " ''' \n", 119 | " Automated feature engineering is conducted on all the tables using feature tools.\n", 120 | " \n", 121 | " Parameters:\n", 122 | " train: preprocessed training data \n", 123 | " nrows: number of rows considered in train data for the model due to the computational power limitation.\n", 124 | " max_depth: depth of a deep feature is the number of primitives required to make the feature.\n", 125 | "\n", 126 | " Returns:\n", 127 | " train_ft: train dataframe with added new features from automated feature engineering. \n", 128 | " \n", 129 | " '''\n", 130 | " bureau = pd.read_csv('bureau.csv')\n", 131 | " bureaubal = pd.read_csv('bureau_balance.csv')\n", 132 | " prev = pd.read_csv('previous_application.csv')\n", 133 | " ccb = pd.read_csv('credit_card_balance.csv')\n", 134 | " insta = pd.read_csv('installments_payments.csv')\n", 135 | " pc = pd.read_csv('POS_CASH_balance.csv')\n", 136 | "\n", 137 | " # Choosing nrows data from all datasets\n", 138 | " train = train.sample(frac=1)\n", 139 | " train = train[:nrows]\n", 140 | " ids = train['SK_ID_CURR'].values\n", 141 | " bureau = bureau.loc[bureau['SK_ID_CURR'].isin(ids)]\n", 142 | " idsb = bureau['SK_ID_BUREAU'].values\n", 143 | " bureaubal = bureaubal.loc[bureaubal['SK_ID_BUREAU'].isin(idsb)]\n", 144 | " prev = prev.loc[prev['SK_ID_CURR'].isin(ids)]\n", 145 | " ccb = ccb.loc[ccb['SK_ID_CURR'].isin(ids)]\n", 146 | " insta = insta.loc[insta['SK_ID_CURR'].isin(ids)]\n", 147 | " pc = pc.loc[pc['SK_ID_CURR'].isin(ids)]\n", 148 | "\n", 149 | " # creating EntitySet (collection of tables)\n", 150 | " es = ft.EntitySet(id = 'applications')\n", 151 | " # adding Entity (table) to EntitySet\n", 152 | " es = es.entity_from_dataframe(entity_id = 'train' , dataframe = train , index= 'SK_ID_CURR')\n", 153 | " es = es.entity_from_dataframe(entity_id = 'bureau' , dataframe = bureau , index= 'SK_ID_BUREAU')\n", 154 | " es = es.entity_from_dataframe(entity_id = 'bureaubal', dataframe = bureaubal, make_index = True, index = 'bb_id')\n", 155 | " es = es.entity_from_dataframe(entity_id = 'prev' , dataframe = prev , index = 'SK_ID_PREV')\n", 156 | " es = es.entity_from_dataframe(entity_id = 'ccb' , dataframe = ccb , make_index = True, index = 'cc_id')\n", 157 | " es = es.entity_from_dataframe(entity_id = 'insta' , dataframe = insta , make_index = True, index = 'installment.id')\n", 158 | " es = es.entity_from_dataframe(entity_id = 'pc' , dataframe = pc , make_index = True, index = 'pos_cash_id')\n", 159 | " # Creating relation between Entities\n", 160 | " # Relationship between application training and bureau\n", 161 | " r_applications_bureau = ft.Relationship(es['train']['SK_ID_CURR'], es['bureau']['SK_ID_CURR'])\n", 162 | " es = es.add_relationship(r_applications_bureau)\n", 163 | " # Relationship between bureau and bureau balance\n", 164 | " r_bureau_bureaubal = ft.Relationship(es['bureau']['SK_ID_BUREAU'], es['bureaubal']['SK_ID_BUREAU'])\n", 165 | " es = es.add_relationship(r_bureau_bureaubal)\n", 166 | " # Relationship between application training and previous applications\n", 167 | " r_app_prev = ft.Relationship(es['train']['SK_ID_CURR'], es['prev']['SK_ID_CURR'])\n", 168 | " es = es.add_relationship(r_app_prev)\n", 169 | " # Relationship between previous applications with credit card balance, pos cash, and installments\n", 170 | " r_prev_cc = ft.Relationship(es['prev']['SK_ID_PREV'], es['ccb']['SK_ID_PREV'])\n", 171 | " es = es.add_relationship(r_prev_cc)\n", 172 | " r_prev_insta = ft.Relationship(es['prev']['SK_ID_PREV'], es['insta']['SK_ID_PREV'])\n", 173 | " es = es.add_relationship(r_prev_insta)\n", 174 | " r_prev_pc2 = ft.Relationship(es['prev']['SK_ID_PREV'], es['pc']['SK_ID_PREV'])\n", 175 | " es = es.add_relationship(r_prev_pc2)\n", 176 | " print('EntitySet with Relationships', es)\n", 177 | " # Deep feature synthesis with depth of 2 by stacking feature primitives (agregations and tranformations)\n", 178 | " # Automated features are concatenated to the original features; Therefore, \n", 179 | " train_ft, feature_names = ft.dfs(entityset = es, target_entity = 'train', max_depth = max_depth)\n", 180 | " train_ft = train_ft.reset_index()\n", 181 | " print('\\nTotal number of features after adding automated features: ', train_ft.shape[1])\n", 182 | " return train_ft" 183 | ] 184 | }, 185 | { 186 | "cell_type": "code", 187 | "execution_count": 32, 188 | "metadata": { 189 | "executionInfo": { 190 | "elapsed": 3447, 191 | "status": "ok", 192 | "timestamp": 1601504229626, 193 | "user": { 194 | "displayName": "Ali Ghorbani", 195 | "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhWTYg2QaNPZd4GGNiDkkHX8r9t7BRVHYGz3JwSKA=s64", 196 | "userId": "10869472433171243113" 197 | }, 198 | "user_tz": 240 199 | }, 200 | "id": "_NEVlazP1Ebx" 201 | }, 202 | "outputs": [], 203 | "source": [ 204 | "def preprocessing(train):\n", 205 | " '''\n", 206 | " This function calculates the correlation between all features in training data and drops the columns with \n", 207 | " correlation > 0.98. Then, it encodes categorical features and save them as 'category' type for lightGBM algorithms.\n", 208 | "\n", 209 | " Parameters:\n", 210 | " train: trainig sets after with automated features from feature tools.\n", 211 | "\n", 212 | " Returns:\n", 213 | " train: pandas.DataFrame which includes preprocessed training dataset.\n", 214 | " '''\n", 215 | "\n", 216 | " print('Preprocessing final table and one-hot encoding categorical features...')\n", 217 | " # Drop the columns with correlation > 0.98\n", 218 | " corr = train.corr()\n", 219 | " upper = corr.where(np.triu(np.ones(corr.shape), k=1).astype(np.bool))\n", 220 | " to_drop = [column for column in upper.columns if any(upper[column] > 0.98)]\n", 221 | " train = train.drop(to_drop, axis=1)\n", 222 | " \n", 223 | " # Imputating the missing data, PCA can not handle missing data\n", 224 | " # Missing categorical features are imputed with 'Not_applicable'\n", 225 | " # Missing numeric features are imputed with Zero (logical choice for this dataset)\n", 226 | " cols = train.select_dtypes(include = object).columns\n", 227 | " train[cols] = train[cols].fillna('Not_Applicable')\n", 228 | " cols = train.select_dtypes(exclude = object).columns\n", 229 | " train[cols] = train[cols].fillna(0)\n", 230 | " \n", 231 | " # One-hot encoding categorical features for XGBoost algorithm. \n", 232 | " train = pd.get_dummies(train, drop_first=True)\n", 233 | " \n", 234 | " return train" 235 | ] 236 | }, 237 | { 238 | "cell_type": "code", 239 | "execution_count": 33, 240 | "metadata": { 241 | "executionInfo": { 242 | "elapsed": 3423, 243 | "status": "ok", 244 | "timestamp": 1601504229627, 245 | "user": { 246 | "displayName": "Ali Ghorbani", 247 | "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhWTYg2QaNPZd4GGNiDkkHX8r9t7BRVHYGz3JwSKA=s64", 248 | "userId": "10869472433171243113" 249 | }, 250 | "user_tz": 240 251 | }, 252 | "id": "o7_ludBW1WUN" 253 | }, 254 | "outputs": [], 255 | "source": [ 256 | "def XGBoost(train, num_folds, test_size):\n", 257 | " '''\n", 258 | " This function train a machine learning model using XGBoost algorithm. \n", 259 | "\n", 260 | " Parameters:\n", 261 | " train: preprocessed training data \n", 262 | " num_folds: number of folds for cross-validation (default is 5)\n", 263 | " test_size: ratio of train to test dataset\n", 264 | "\n", 265 | " Returns:\n", 266 | " pred_class: Binary class prediction of the target variable.\n", 267 | " pred: Probability prediction of the target variable.\n", 268 | " y_test: y_test in the trainig dataset\n", 269 | " '''\n", 270 | " \n", 271 | " # Train and test set split\n", 272 | " y = train['TARGET']\n", 273 | " X = train.drop('TARGET', axis=1)\n", 274 | " X_train, X_test, y_train, y_test = train_test_split(X, y , test_size = test_size, random_state=1234)\n", 275 | " \n", 276 | " # First 200 principle components are used for XGBoost \n", 277 | " # PCA requires standardization of featuers\n", 278 | " sc = StandardScaler()\n", 279 | " X_pca = sc.fit_transform(X_train)\n", 280 | " pca = PCA(n_components = 200)\n", 281 | " X_pca = pca.fit_transform(X_pca)\n", 282 | " print('\\nRatio of variance explained by 200 principal components: ', sum(pca.explained_variance_ratio_))\n", 283 | " \n", 284 | " pipeline = make_pipeline(StandardScaler(),PCA(n_components = 200), XGBClassifier())\n", 285 | "\n", 286 | " params = {\n", 287 | " 'xgbclassifier__learning_rate': [0.05, 0.1, 0.15, 0.2],\n", 288 | " 'xgbclassifier__max_depth': [3, 4, 5, 6, 8, 10 ],\n", 289 | " 'xgbclassifier__min_child_weight': [1, 3, 5, 7], \n", 290 | " 'xgbclassifier__gamma' : [0, 0.1, 0.2, 0.3, 0.4], \n", 291 | " 'xgbclassifier__colsample_bytree': [ 0.5, 0.7, 1] \n", 292 | " }\n", 293 | " print('\\nApplying XGBoost classifier... \\n')\n", 294 | " \n", 295 | " model = RandomizedSearchCV(pipeline, params, n_iter=4, scoring='roc_auc', cv=5, n_jobs=-1, verbose = 3)\n", 296 | " model.fit(X_train, y_train)\n", 297 | " print('\\nCross validation best score(AUC) is:', model.best_score_)\n", 298 | " # Hyperparameters of the model with the best performance\n", 299 | " print('\\nModel best hyperparamters are:', model.best_params_)\n", 300 | " # Binary class prediction\n", 301 | " pred_class = model.predict(X_test)\n", 302 | " # Probability prediction\n", 303 | " pred = model.predict_proba(X_test)\n", 304 | " pred = [p[1] for p in pred]\n", 305 | " return pred_class, pred, y_test" 306 | ] 307 | }, 308 | { 309 | "cell_type": "code", 310 | "execution_count": 34, 311 | "metadata": { 312 | "executionInfo": { 313 | "elapsed": 3406, 314 | "status": "ok", 315 | "timestamp": 1601504229628, 316 | "user": { 317 | "displayName": "Ali Ghorbani", 318 | "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhWTYg2QaNPZd4GGNiDkkHX8r9t7BRVHYGz3JwSKA=s64", 319 | "userId": "10869472433171243113" 320 | }, 321 | "user_tz": 240 322 | }, 323 | "id": "bI01HDaZeeOS" 324 | }, 325 | "outputs": [], 326 | "source": [ 327 | "def calculate_metrics(pred_class, pred, y_test):\n", 328 | " '''\n", 329 | " This function calculates the classificaiton metrics including precision, recall, F1-Score, AUC_ROC, and cohen-kappa coefficient.\n", 330 | "\n", 331 | " Parameters:\n", 332 | " pred_class: Binary class prediction of the target variable.\n", 333 | " pred: Probability prediction of the target variable.\n", 334 | " y_test: y_test in the trainig dataset\n", 335 | "\n", 336 | " Returns:\n", 337 | " None\n", 338 | " '''\n", 339 | "\n", 340 | " # ROC_AUC score\n", 341 | " print('ROC_AUC:', roc_auc_score(y_test, pred))\n", 342 | " # Precision/Recall (0.1 Threshold)\n", 343 | " pred_class_2 = (np.array(pred) > 0.1).astype(int)\n", 344 | " cm = confusion_matrix(y_test, pred_class_2)\n", 345 | " print('\\nConfusion_metrix (0.1 Threshold): \\n', cm)\n", 346 | " # True Negatives (TN)\n", 347 | " tn = cm[0][0]\n", 348 | " # False Positives (FP)\n", 349 | " fp = cm[0][1] \n", 350 | " # False Negatives (FN)\n", 351 | " fn = cm[1][0]\n", 352 | " # True Positives (TP)\n", 353 | " tp = cm[1][1]\n", 354 | " precision = tp / (tp + fp)\n", 355 | " print( 'Precision (0.1 Threshold): ', precision )\n", 356 | " recall = tp / (tp + fn)\n", 357 | " print( 'Recall (0.1 Threshold): ', recall )\n", 358 | " print( 'F1-score ( 0.1 Threshold):', 2*precision*recall/(precision+recall))\n", 359 | " cohen_kappa = cohen_kappa_score(y_test, pred_class_2)\n", 360 | " print( '\\nCohen_kappa (0.1 Threshold): ', cohen_kappa )\n", 361 | "\n", 362 | "def plot_ROC(y_test, pred):\n", 363 | " '''\n", 364 | " This function plots ROC based on y_test and predictied probability of positive class by lightGBM.\n", 365 | "\n", 366 | " Parameters:\n", 367 | " pred: Probability prediction of the target variable.\n", 368 | " y_test: y_test in the trainig dataset\n", 369 | "\n", 370 | " Returns:\n", 371 | " None\n", 372 | " '''\n", 373 | " # Initialize figure\n", 374 | " fig = plt.figure(figsize=(9,9))\n", 375 | " plt.title('Receiver Operating Characteristic')\n", 376 | " # Plot ROC curve\n", 377 | " fpr, tpr, thresholds = roc_curve(y_test, pred)\n", 378 | " plt.plot(fpr, tpr)\n", 379 | " # Diagonal 45 degree line\n", 380 | " plt.plot([0,1],[0,1],'k--')\n", 381 | " # Axes limits and labels\n", 382 | " plt.xlim([-0.1,1.1])\n", 383 | " plt.ylim([-0.1,1.1])\n", 384 | " plt.ylabel('True Positive Rate')\n", 385 | " plt.xlabel('False Positive Rate')\n", 386 | " plt.show()\n", 387 | "\n", 388 | "def plot_precision_recall(y_test, pred):\n", 389 | " '''\n", 390 | " This function plots precicision_recall curve based on y_test and predictied probability of positive class by lightGBM..\n", 391 | "\n", 392 | " Parameters:\n", 393 | " pred: Probability prediction of the target variable.\n", 394 | " y_test: y_test in the trainig dataset\n", 395 | " \n", 396 | " Returns:\n", 397 | " None\n", 398 | " '''\n", 399 | " precision, recall, thresholds = precision_recall_curve(y_test, pred)\n", 400 | " fig = plt.figure(figsize=(9,9))\n", 401 | " plt.title('Precision_Recall')\n", 402 | " # Plot Precision-Recall curve\n", 403 | " plt.plot(recall, precision)\n", 404 | " # Axes limits and labels\n", 405 | " plt.xlim([0,1])\n", 406 | " plt.ylim([0,1])\n", 407 | " plt.ylabel('Precision')\n", 408 | " plt.xlabel('Recall')\n", 409 | " plt.show()" 410 | ] 411 | }, 412 | { 413 | "cell_type": "code", 414 | "execution_count": 35, 415 | "metadata": { 416 | "colab": { 417 | "base_uri": "https://localhost:8080/", 418 | "height": 697 419 | }, 420 | "executionInfo": { 421 | "elapsed": 336143, 422 | "status": "error", 423 | "timestamp": 1601507431833, 424 | "user": { 425 | "displayName": "Ali Ghorbani", 426 | "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhWTYg2QaNPZd4GGNiDkkHX8r9t7BRVHYGz3JwSKA=s64", 427 | "userId": "10869472433171243113" 428 | }, 429 | "user_tz": 240 430 | }, 431 | "id": "I3-Q4ElLlcKl", 432 | "outputId": "3dbe33d1-40ad-42ac-fc56-da6ff8303a2a", 433 | "scrolled": false 434 | }, 435 | "outputs": [ 436 | { 437 | "name": "stdout", 438 | "output_type": "stream", 439 | "text": [ 440 | "Processing application_train and application_test tables\n", 441 | "EntitySet with Relationships Entityset: applications\n", 442 | " Entities:\n", 443 | " train [Rows: 30000, Columns: 127]\n", 444 | " bureau [Rows: 86286, Columns: 17]\n", 445 | " bureaubal [Rows: 911993, Columns: 4]\n", 446 | " prev [Rows: 138560, Columns: 37]\n", 447 | " ccb [Rows: 84931, Columns: 24]\n", 448 | " insta [Rows: 1125445, Columns: 9]\n", 449 | " pc [Rows: 86293, Columns: 9]\n", 450 | " Relationships:\n", 451 | " bureau.SK_ID_CURR -> train.SK_ID_CURR\n", 452 | " bureaubal.SK_ID_BUREAU -> bureau.SK_ID_BUREAU\n", 453 | " prev.SK_ID_CURR -> train.SK_ID_CURR\n", 454 | " ccb.SK_ID_PREV -> prev.SK_ID_PREV\n", 455 | " insta.SK_ID_PREV -> prev.SK_ID_PREV\n", 456 | " pc.SK_ID_PREV -> prev.SK_ID_PREV\n", 457 | "\n", 458 | "Total number of features after adding automated features: 2226\n", 459 | "Preprocessing final table and one-hot encoding categorical features...\n", 460 | "\n", 461 | "Ratio of variance explained by 200 principal components: 0.7023963403648208\n", 462 | "\n", 463 | "Applying XGBoost classifier... \n", 464 | "\n", 465 | "Fitting 5 folds for each of 4 candidates, totalling 20 fits\n" 466 | ] 467 | }, 468 | { 469 | "name": "stderr", 470 | "output_type": "stream", 471 | "text": [ 472 | "[Parallel(n_jobs=-1)]: Using backend LokyBackend with 4 concurrent workers.\n", 473 | "[Parallel(n_jobs=-1)]: Done 20 out of 20 | elapsed: 8.2min remaining: 0.0s\n", 474 | "[Parallel(n_jobs=-1)]: Done 20 out of 20 | elapsed: 8.2min finished\n" 475 | ] 476 | }, 477 | { 478 | "name": "stdout", 479 | "output_type": "stream", 480 | "text": [ 481 | "\n", 482 | "Cross validation best score(AUC) is: 0.6735569039652113\n", 483 | "\n", 484 | "Model best hyperparamters are: {'xgbclassifier__min_child_weight': 5, 'xgbclassifier__max_depth': 3, 'xgbclassifier__learning_rate': 0.1, 'xgbclassifier__gamma': 0.4, 'xgbclassifier__colsample_bytree': 0.7}\n", 485 | "ROC_AUC: 0.6674257906376042\n", 486 | "\n", 487 | "Confusion_metrix (0.1 Threshold): \n", 488 | " [[1025 354]\n", 489 | " [ 59 62]]\n", 490 | "Precision (0.1 Threshold): 0.14903846153846154\n", 491 | "Recall (0.1 Threshold): 0.512396694214876\n", 492 | "F1-score ( 0.1 Threshold): 0.23091247672253257\n", 493 | "\n", 494 | "Cohen_kappa (0.1 Threshold): 0.12106215984609003\n" 495 | ] 496 | }, 497 | { 498 | "data": { 499 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAikAAAImCAYAAACWxRrLAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjMsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy+AADFEAAAgAElEQVR4nOzde5yN5f7/8ddnhiYhh/B1SqnspF0pE0kZZsghoYMcEzntdvRtR+yKdinVVqnItx0dUBH1Q9Q4hIwOcmrHLipqhJCccshx5vr9sda0V9PMWMasda/D+/l4zKNZa92z7vdaM5rPXJ/rui9zziEiIiISaRK8DiAiIiKSFxUpIiIiEpFUpIiIiEhEUpEiIiIiEUlFioiIiEQkFSkiIiISkVSkiHjEzLqa2Xyvc0QSMztgZud5cN5zzcyZWbFwnzsUzOwrM2tSiK/Tz6REFBUpIoCZbTSzQ/5fktvNbIKZlQrlOZ1zbzrnrgvlOQKZ2dVmtsjM9pvZL2Y228zqhOv8eeRZbGa9A+9zzpVyzn0fovP9yczeNrOd/te/xszuNbPEUJyvsPzF0gWn8hzOuYudc4tPcJ4/FGbh/pkUOREVKSL/dYNzrhRQF7gcuN/jPIWS12iAmTUE5gPvAlWBmsBq4JNQjFxE2oiEmZ0PLAM2A5c458oAHYBkoHQRn8uz1x5p77vIqVKRIpKLc247MA9fsQKAmSWZ2dNmtsnMfjKzf5lZiYDH25nZF2a2z8y+M7OW/vvLmNkrZrbNzH40s8dy/nI3sx5m9rH/83+Z2dOBOczsXTO71/95VTP7f2b2s5llmtndAcc9bGbvmNkbZrYP6JHHyxoJTHLOPe+c2++c2+2cGwp8Bjzsf54mZrbFzB7wjzZsNLOuwbwHAV87xMy2A6+ZWTkze8+feY//8+r+40cA1wIv+EevXvDf/9sogn80a6yZve8f/VnmLzZy8lxnZt/4R0X+z8wyco/MBHgE+NQ5d69zbpv/+/yNc66Lc25vwHFd/a9vp5k9GHCu+ma21Mz2+r+XL5jZaQGPOzO7y8zWA+v99z1vZpv9PxOrzOzagOMT/e/zd/7XtsrMzjazJf5DVvvfl47+49v4f772mtmnZnZpwHNt9L/va4CDZlbMf1+zgOwr/Tl+MrNR/i/NOdde/7kaBv5M+r/2YjP7wMx2+7/2gXzeX5HQcM7pQx9x/wFsBJr5P68O/Ad4PuDx54BZQHl8f3nPBp7wP1Yf+AVojq/wrwbU9j82E3gJKAlUApYD/fyP9QA+9n/eGN9f+ea/XQ44hG/UIwFYBTwEnAacB3wPtPAf+zBwDGjvP7ZErtd2BpAFNM3jdfcEtvk/bwIcB0YBSUAKcBC4MIj3IOdr/+n/2hLAWcDN/vOXBt4GZgacezHQO1ceB1zg/3wCsNv//hYD3gTe8j9WAdgH3OR/7H/970HvfL6/24GeBXz/z/Wfe7w/+2XAEeAi/+P1gKv85zoXWAfckyv3B/73poT/vm7+96AYMNCf4XT/Y/fh+xm7EDD/+c7K/R74b18B7AAaAInA7fh+XpMCfna/AM4OOPdG/vvzvBS4zf95KeCqXK+5WMC5evDfn8nSwDZ/9tP9txt4/W9VH/H14XkAfegjEj78/1M/AOz3/497IVDW/5jh+2V9fsDxDYFM/+cvAc/m8Zz/4/9FVyLgvs7Ah/7PA38hGLAJaOy/3QdY5P+8AbAp13PfD7zm//xhYEkBr626/zXVzuOxlsAx/+dN8BUaJQMenwYMC+I9aAIczfklnE+OusCegNuLOXGR8nLAY62Br/2fdweWBjxm+Iq8/IqUY0DLArLl/MKuHnDfcqBTPsffA8zIlTv1BD9je4DL/J9/A7TL57jcRcqLwKO5jvkGSAn42b0jj5/nnCJlCb6RpAr5vOb8ipTOwL9D+e9OH/o40Yf6lyL/1d45t8DMUoDJ+P5a3wtUxDcasMrMco41fH/Vgu8v2PQ8nu8coDiwLeDrEvD9Mv0d55wzs7fw/WJYAnQB3gh4nqpmFtiWSAQ+Crj9h+cMsAfIBqoAX+d6rAqwM/BY59zBgNs/4BvNOdF7APCzc+7wbw+anQE8i68QKue/u7SZJTrnsgrIG2h7wOe/4hsJwJ/pt9fsf/+2FPA8u/C91kKdz8z+hG+EKRnf+1AM3+hWoN99D8xsINDbn9UBZ+L7mQLfz8x3QeQB3/f/djMbEHDfaf7nzfPcufQChgNfm1km8Ihz7r0gznsyGUVCQnNSRHJxzmXg+ys+Z47ITnytl4udc2X9H2Wcb5It+H5BnP/HZ2IzvpGUCgFfd6Zz7uJ8Tj0FuMXMzsE3evL/Ap4nM+A5yjrnSjvnWgfGLuD1HMQ35N8hj4dvxTdqlKOcmZUMuF0D2BrEe5BXhoH42hkNnHNn4mtpga+4KTBzELbhGyHyPaGvcqqe/+EswNd6KqwX8RV4tfyv5QH++zpy/PZ6/PNPhuB7f8s558riawnmfE1+PzN52QyMyPX9P8M5NyWvc+fmnFvvnOuMr934T+Ad//f4RO//yWQUCQkVKSJ5ew5obmZ1nXPZ+OYqPGtmlQDMrJqZtfAf+wrQ08zSzCzB/1ht55ugOR94xszO9D92vn+k5g+cc/8GfgZeBua5/07oXA7s80+OLOGfdPlnM7vyJF7P3/H9NX63mZX2T2p9DF/L5pFcxz5iZqf5f9G2Ad4O4j3IS2l8hc1eMysP/CPX4z/hm19TGO8Dl5hZe/OtaLkLqFzA8f8Arjazp8yssj//BeabbFw2iPOVxjcH5oCZ1QbuDOL44/i+n8XM7CF8Iyk5XgYeNbNa5nOpmZ3lfyz3+zIe+IuZNfAfW9LMrjezoFYlmVk3M6vo/x7m/Exl+bNlk//34D2gspndY75J06XNrEEw5xQpKipSRPLgnPsZmIRvPgb4/ireAHxmvhU0C/CNEuCcW45vAuqz+P5azsA3RA++uROnAWvxtV3eoeC2wxSgGb52U06WLOAGfHM6MvGNarwMlDmJ1/Mx0ALfRNNt+No4lwPXOOfWBxy63Z9zK76Jqn9xzuW0iPJ9D/LxHL5JqDvxrSKam+vx5/GNHO0xs9HBvhb/69mJb2RoJL5WTh1gJb6Rq7yO/w5fQXYu8JWZ/YJvpGolvnlIJzIIXwtuP76iYeoJjp8HzAG+xfdeH+b3LZlR+Ob7zMdX/LyC770C3xyjif6VPLc651bim6P0Ar7vzQbyXsGVn5b4XvMBfO95J+fcYefcr8AIfMvQ95rZVYFf5Jzbj28y+A34fi7WA01P4rwipyxnJYGIxDnzXaH0DedcQW2TiGRmCcAWoKtz7kOv84hI0dBIiohEJTNrYWZlzSyJ/84R+czjWCJShFSkiEi0aohv9clOfC2J9s65Q95GEpGipHaPiIiIRCSNpIiIiEhEUpEiIiIiESnqrjhboUIFd+6553odQ0RERIrAqlWrdjrnKub1WNQVKeeeey4rV670OoaIiIgUATP7Ib/H1O4RERGRiKQiRURERCKSihQRERGJSCpSREREJCKpSBEREZGIpCJFREREIpKKFBEREYlIKlJEREQkIqlIERERkYikIkVEREQikooUERERiUgqUkRERCQiqUgRERGRiKQiRURERCKSihQRERGJSCpSREREJCKpSBEREZGIpCJFREREIpKKFBEREYlIKlJEREQkIqlIERERkYikIkVEREQikooUERERiUgqUkRERCQiqUgRERGRiKQiRURERCKSihQRERGJSCpSREREJCKpSBEREZGIpCJFREREIpKKFBEREYlIKlJEREQkIqlIERERkYikIkVEREQikooUERERiUghK1LM7FUz22FmX+bzuJnZaDPbYGZrzOyKUGURERGR6BPKkZQJQMsCHm8F1PJ/9AVeDGEWERERiTLFQvXEzrklZnZuAYe0AyY55xzwmZmVNbMqzrltocokIiISLSYv28S7X/xYqK89fvQwf65RgUfaXVLEqcLLyzkp1YDNAbe3+O/7AzPra2YrzWzlzz//HJZwIiIiXnr3ix9Zu23fSX/d0UMHyHjubuaOfzIEqcIrZCMpQbA87nN5HeicGweMA0hOTs7zGBERkVhTp8qZTO3XMOjj9+zZQ4sWLfhl09cMHvlw6IKFiZdFyhbg7IDb1YGtHmURERGJen/9619ZvXo106dP54YbbvA6zinzskiZBfQ3s7eABsAvmo8iIiKx4lTmlACs3baPOlXOPKmvGTVqFL179yYtLa3Q540koVyCPAVYClxoZlvMrJeZ/cXM/uI/JB34HtgAjAf+GqosIiIi4VbYOSU56lQ5k3Z185yq+Ttbt25l8ODBHD9+nCpVqsRMgQKhXd3T+QSPO+CuUJ1fRETEayc7p+Rkbd68mdTUVLZv387tt9/OxRdfHLJzecHLdo+IiEhMyKu1U5h2zcnYuHEjqamp7Nq1i/nz58dcgQK6LL6IiMgpy6u1E2y7pjC+++47UlJS2LNnDwsWLKBhw9CN1nhJIykiIiJFINStnUA7duzAzFi0aBGXX355WM7pBRUpIiISU051VU1hhLq1k2PPnj2UK1eOhg0b8u2333LaaaeF/JxeUrtHRERiyqmuqimMULZ2cqxZs4batWszfvx4gJgvUEAjKSIiEoPC2XoJh88//5zmzZtTokQJUlJSvI4TNhpJERERiWDLly8nLS2N0qVLs2TJEv70pz95HSlsNJIiIiJRL3AeSrjmh4TDjh07aN68ORUqVGDRokWcc845XkcKK42kiIhI1AuchxKO+SHhUqlSJZ5//nmWLFkSdwUKaCRFRERiRCzNQ1mwYAFJSUlce+219OjRw+s4nlGRIiIiUSf3MuNYavHMmTOHG2+8keTkZD766CPMzOtInlG7R0REok7uZcax0uKZNWsW7du35+KLL+bdd9+N6wIFNJIiIiJRKpbaOwDvvPMOnTt35oorrmDevHmULVvW60ie00iKiIhIBJg1axYNGjTggw8+UIHip5EUERGJCrG6zPjIkSMkJSXx6quvcuTIEUqWLOl1pIihkRQREYkKsbjMePz48dStW5effvqJYsWKqUDJRSMpIiISNWJpHsrYsWPp378/rVq1okyZMl7HiUgaSREREQmzUaNG0b9/f9q1a8eMGTM4/fTTvY4UkVSkiIiIhNErr7zCwIEDueWWW3j77bdJSkryOlLEUpEiIiISRm3btuWBBx5gypQpFC9e3Os4EU1zUkREJOLkvqIsRPeKHuccb775JrfeeisVK1ZkxIgRXkeKChpJERGRiJP7irIQvSt6nHMMHjyY2267jQkTJngdJ6poJEVERCJSLKzkcc5xzz33MHr0aO666y569+7tdaSoopEUERGREMjOzuavf/0ro0eP5m9/+xtjxowhIUG/dk+GRlJERMQTec07yRHN809ybNy4kcmTJzNkyBCeeOKJuN8ssDBUpIiIiCdy5p3kVYxE6/wT8I2gJCQkcN555/Hll19SvXp1FSiFpCJFREQ8EwvzTgIdO3aMbt26Ua9ePQYPHszZZ5/tdaSopiJFRETCInd7JxZaOoGOHj1Kp06dmDFjBvXr1/c6TkzQDB4REQmL3MuKo7mlk9vhw4e5+eabmTFjBqNHj2bgwIFeR4oJGkkREZGwibX2DvjmoNx0003MmTOHf/3rX/Tr18/rSDFDRYqIiIRMYIsn1to7ORISEmjfvj233HILd9xxh9dxYoqKFBERCZnAFTyx1N4B2L9/P19++SUNGzakb9++XseJSSpSREQkpGKxxbN3715atWrF2rVryczMpHz58l5HikkqUkRERE7C7t27adGiBatXr2bq1KkqUEJIRYqIiBSpWJ6HsnPnTpo3b87atWuZPn06bdq08TpSTNMSZBERKVKBS41jbR7K2LFj+frrr5k9e7YKlDDQSIqIiBS5WJyHAjB06FBuuukmLrnkEq+jxAWNpIiISJGZvGwTyzJ3ex2jSG3evJkWLVqwefNmEhMTVaCEkUZSRESkyOTMRYmVFs/GjRtJTU1l165dbNu2TXvxhJmKFBERKVINapanS4MaXsc4ZRs2bCA1NZUDBw6wcOFCkpOTvY4Ud1SkiIiI5LJ+/XqaNGnC0aNHWbRoEXXr1vU6UlzSnBQREZFczjrrLC655BI+/PBDFSge0kiKiIiI3zfffMM555xD+fLlmTt3rtdx4p5GUkRERIBVq1bRsGFD7r77bq+jiJ+KFBERKRLRvPz4s88+Iy0tjTJlynD//fd7HUf8VKSIiEiRiNblxx9//DHNmzenQoUKZGRkULNmTa8jiZ+KFBERKTLRtvz4yJEjdO3alWrVqrFkyRJq1Iie7PFAE2dFRKRQAjcShOjcTDApKYlZs2ZRuXJl/ud//sfrOJKLRlJERKRQAjcShOjaTDA9PZ0RI0YAcNlll6lAiVAaSRERkUKLxo0EZ86cya233sqll17KwIEDOf30072OJPnQSIqIiMSNt99+mw4dOlCvXj0WLFigAiXCqUgREZGTFo3Ljd988006derEVVddxbx58yhbtqzXkeQEVKSIiMhJi8blxllZWTRt2pQ5c+Zw5pnRNcE3XqlIERGRQomW5cZbtmwBoHv37syfP59SpUp5nEiCpYmzIiKSp9xLjANFy3LjF154gfvuu4+MjAzq169PQoL+No8m+m6JiEieci8xDhQNy41HjRrFgAEDaNGiBZdddpnXcaQQNJIiIiL5isYlxgBPPPEEDzzwAB06dODNN9+kePHiXkeSQlCRIiIivwls8URLSye39PR0HnjgAbp06cLEiRMpVky/6qKV2j0iIvKbwBZPNLR08tKyZUsmTJjApEmTVKBEOX33RETkd6KxxeOc4/HHH6dLly7UrFmT22+/3etIUgQ0kiIiIlHNOcfdd9/N0KFDef31172OI0VIIykiIjGioCXDwYq2eSjZ2dnceeedjBs3joEDBzJs2DCvI0kR0kiKiEiMKGjJcLCiaR5KVlYWvXr1Yty4cTzwwAM89dRTmJnXsaQIaSRFRCSGRON8ksI6dOgQa9eu5eGHH+ahhx5SgRKDVKSIiESxWFgyfLKOHTvGsWPHKFWqFBkZGdrJOIap3SMiEsViYcnwyThy5AgdOnSgffv2ZGVlqUCJcRpJERGJcvHS4jl8+DA333wz6enpjBkzhsTERK8jSYipSBERiSK5V/DES4vn119/pX379ixYsICXXnqJvn37eh1JwkDtHhGRKJJ7BU88tHgAevbsyYIFC3j11VdVoMQRjaSIiESZeGnvBBo6dCg33XQTHTt29DqKhJFGUkREJCLt3buXF198Eeccl1xyiQqUOKSRFBGRCBavc1B2797Nddddx5o1a2jSpAkXXXSR15HEAxpJERGJYPE4B+Xnn3+madOmfPnll8ycOVMFShwL6UiKmbUEngcSgZedc0/merwGMBEo6z/m78659FBmEhGJNvE0B2X79u2kpaWRmZnJ7Nmzad68udeRxEMhK1LMLBEYCzQHtgArzGyWc25twGFDgWnOuRfNrA6QDpwbqkwiIuFQFBv95YiX9k6OFStWsGXLFtLT02nSpInXccRjoRxJqQ9scM59D2BmbwHtgMAixQE5//rKAFtDmEdEJCxyWjRFUVzEQ3sHfJe6L168ODfccAOZmZmUL1/e60gSAUJZpFQDNgfc3gI0yHXMw8B8MxsAlASa5fVEZtYX6AtQo0aNIg8qIlLU4qlFc6oyMzNp0aIFo0aNok2bNipQ5DehnDib13aULtftzsAE51x1oDXwupn9IZNzbpxzLtk5l1yxYsUQRBURES+sX7+exo0bs2vXLqpUqeJ1HIkwoSxStgBnB9yuzh/bOb2AaQDOuaXA6UCFEGYSEQmpycs2sSxzt9cxosK6detISUnh8OHDLFq0iHr16nkdSSJMKIuUFUAtM6tpZqcBnYBZuY7ZBKQBmNlF+IqUn0OYSUQkpHImzMbDPJJTsXXrVpo0aUJ2djaLFy/msssu8zqSRKCQFSnOueNAf2AesA7fKp6vzGy4mbX1HzYQ6GNmq4EpQA/nXO6WkIhIVGlQszxdGmj+XEGqVKnCnXfeSUZGBhdffLHXcSRCWbTVBMnJyW7lypVexxARyXOpcc6qHk2azdvKlSspXbo0F154oddRJEKY2SrnXHJej+mKsyIihZT7arAQP0uGC2Pp0qWkpaXRu3dvou0PZPGG9u4RETkFGjUJzpIlS7j++uupXLkykydPxiyvBaAiv6ciRUTkBPK7gmy8XQ22sBYuXEjbtm2pUaMGCxcupGrVql5Hkiihdo+IyAnk1dYBtXaC4Zxj5MiRnHfeeSxevFgFipwUjaSIiARBbZ2T55zDzHjnnXc4cuQIFSroMlhycjSSIiIiRW7GjBk0b96cgwcPUrp0aRUoUigaSRER8dPck6Ixbdo0unTpwpVXXsnx48e9jiNRTCMpIiJ+mnty6t544w06d+7M1Vdfzfz58ylTpozXkSSKaSRFRCSA5p4U3uTJk+nevTtNmjRh9uzZlCxZ0utIEuVUpIhIXMivlRNIbZ1Tc8UVV9C1a1deeuklzjjjDK/jSAxQu0dE4kJ+rZxAausUTkZGBs45ateuzeuvv64CRYqMRlJEJG6olVP0nnrqKQYPHszEiRPp3r2713EkxqhIEZGYFdjiUSun6I0YMYKhQ4fSsWNHOnfu7HUciUFq94hIzAps8aiVU3Scc/zjH/9g6NCh3HbbbbzxxhsUL17c61gSgzSSIiIxTS2eordu3Toef/xx7rjjDsaNG0diYqLXkSRGqUgREZGTUqdOHZYuXcoVV1xBQoIG5CV0VKSISNTTlWJDLzs7m7/97W9cc801dOjQgeTkZK8jSRxQCSwiUU9Xig2t7Oxs+vXrx+jRo/n888+9jiNxRCMpIhITNPckNLKysujVqxcTJ05k6NChDB8+3OtIEkdUpIhI1Mnd3lFbJzSysrLo3r07kydPZvjw4QwbNszrSBJn1O4RkaiTu72jtk5oJCQkUK1aNZ588kkVKOIJjaSISFRSeyd0jhw5wpYtWzj//PMZOXKk13EkjmkkRUREfnPo0CFuvPFGrrnmGvbtK3ivI5FQ00iKiESNnLkomoMSGr/++ivt2rVj4cKFjBs3jjPP1Hss3lKRIiJRI7BA0RyUonXgwAHatGnDRx99xGuvvcbtt9/udSQRFSkiEl00FyU0HnnkET7++GPeeOMNbRYoEUNFiohENO1kHB6PPPIILVu2JC0tzesoIr/RxFkRiWjayTh0du3aRZ8+fdi3bx9nnHGGChSJOBpJEZGIpxZP0duxYwfNmzfnm2++oXv37lx77bVeRxL5AxUpIhKRtJIndLZt20azZs3IzMzkvffeU4EiEUtFiohEJK3kCY0ff/yR1NRUfvzxR+bMmUNKSorXkUTypSJFRCKW2jxF78iRIxQrVox58+bRqFEjr+OIFEhFiohIHPjpp5+oWLEi5513HmvWrCExMdHrSCInpCJFRDyXe1dj0HLjovTtt9+SmppKt27dePLJJ1WgSNTQEmQR8VzuXY1By42Lytq1a0lJSeHo0aN06dLF6zgiJ0UjKSISETT/pOitWbOGZs2akZCQwOLFi6lTp47XkUROikZSRMRTk5dtYlnmbq9jxJxDhw7RqlUrihcvTkZGhgoUiUoaSRERT+XMRVFrp2iVKFGCV155hVq1anH++ed7HUekUFSkiIjnGtQsT5cGNbyOERM+/fRTMjMz6dq1Ky1btvQ6jsgpUbtHRDyjVk/RWrJkCddddx2PPfYYR48e9TqOyClTkSIinlGrp+gsXLiQli1bUqNGDRYtWsRpp53mdSSRU6YiRUQ8pVbPqZs7dy5t2rThggsuYPHixVSpUsXrSCJFQkWKiEiUW758ORdddBEffvghlSpV8jqOSJEx55zXGU5KcnKyW7lypdcxRKSQAq8um3NVWV0fpXAOHDhAqVKlcM5x6NAhzjjjDK8jiZw0M1vlnEvO6zGNpIhIWAVeXVZXlS28t956iwsuuIB169ZhZipQJCZpCbKIhJ1GT07NpEmT6NmzJ9dccw3Vq1f3Oo5IyKhIEZEikdcmgXnRxoGn5pVXXqFPnz6kpqby7rvvUrJkSa8jiYSM2j0iUiTy2iQwL2rxFN77779P7969adGiBbNnz1aBIjFPIykiUmTUxgmttLQ0hg8fzuDBg0lKSvI6jkjIqUgRkULJ3d5RGyd0JkyYQLt27ShXrhzDhg3zOo5I2KjdIyKFkru9ozZOaDz66KP07NmT559/3usoImGnkRQRKTS1d0LHOcdDDz3EY489xm233aYRFIlLGkkREYkwzjn+/ve/89hjj9GrVy9ee+01EhMTvY4lEnYqUkTkpExetomOLy0NaiWPFM6ePXuYNm0ad955J+PGjVOBInFL7R4ROSk5c1E0B6XoZWdn45yjfPnyLF++nAoVKmBmXscS8YyKFBE5aZqLUvSys7Pp168f2dnZjB8/nooVK3odScRzKlJE4lywV4rNoaXGRS8rK4s77riDSZMmMWzYMI2eiPhpTopInAv2SrE51OYpWseOHaNbt25MmjSJRx99lOHDh6tIEfHTSIqIqH3joTvuuIO33nqLkSNHct9993kdRySiqEgREfFQ9+7dqV+/PgMGDPA6ikjEUbtHRCTMDh06RHp6OgDNmzdXgSKSDxUpIiJhdPDgQW644Qbatm3Lhg0bvI4jEtHU7hERCZP9+/fTpk0bPv74YyZMmMAFF1zgdSSRiKYiRSTOaPdib/zyyy+0atWK5cuXM3nyZDp27Oh1JJGIp3aPSJzR7sXemDlzJitXrmTatGkqUESCpJEUkTikJcfh45zDzLj99tu5+uqrqVWrlteRRKKGihSRGKf2jnd++uknbrnlFp577jnq1aunAkXkJKndIxLj1N7xxtatW2nSpAmrVq3il19+8TqOSFTSSIpIHFB7J7w2b95Mamoq27dvZ+7cuTRu3NjrSCJRSUWKiEgR2rp1KykpKezatYt58+Zx9dVXex1JJGqp3SMSwyYv28SyzN1ex4grFSpUoFGjRixYsEAFisgp0kiKSAzLmTCrOSih9+2331KuXDkqVqzI66+/7qOi3r0AACAASURBVHUckZgQ1EiKmZ1mZid9aUQza2lm35jZBjP7ez7H3Gpma83sKzObfLLnEJGCNahZni4NangdI6atXbuWxo0b061bN6+jiMSUE46kmNn1wCjgNKCmmdUF/uGcu/EEX5cIjAWaA1uAFWY2yzm3NuCYWsD9QCPn3B4zq1T4lyISv3IvM86h5caht2bNGpo1a0axYsV47rnnvI4jElOCGUkZDjQA9gI4574AghlVqQ9scM5975w7CrwFtMt1TB9grHNuj/+5dwQbXET+K/cy4xxabhxan3/+OU2bNiUpKYmMjAwuuugiryOJxJRg5qQcc87tNbPA+1wQX1cN2Bxwewu+YifQnwDM7BMgEXjYOTc39xOZWV+gL0CNGhq2FsmLlhmHl3OOv/71r5QqVYoPP/yQ8847z+tIIjEnmCJlnZndCiSYWU3gf4HPgvg6y+O+3MVNMaAW0ASoDnxkZn92zu393Rc5Nw4YB5CcnBxMgSQSF3LaPGrrhJ+Z8c4775CVlcU555zjdRyRmBRMu6c/UA/IBqYDh/EVKieyBTg74HZ1YGsex7zrnDvmnMsEvsFXtIhIEAILFLV1wiMjI4M+ffqQlZVF9erVVaCIhFAwIyktnHNDgCE5d5jZTfgKloKsAGr5R19+BDoBXXIdMxPoDEwwswr42j/fB5ldRFCbJ5wWLFhA27ZtqVmzJnv37uWss87yOpJITAtmJGVoHvc9eKIvcs4dxzcKMw9YB0xzzn1lZsPNrK3/sHnALjNbC3wI3Oec2xVcdBGR8JkzZw5t2rShVq1aLF68WAWKSBjkO5JiZi2AlkA1MxsV8NCZ+Fo/J+ScSwfSc933UMDnDrjX/yEiQdJclPCaPXs2t9xyC3/+85+ZP3++ChSRMCmo3bMD+BLfHJSvAu7fD+R5YTYRCQ/NRQmvMmXK0KhRI6ZPn07ZsmW9jiMSN/ItUpxz/wb+bWZvOucOhzGTiARBc1FCb8OGDVxwwQU0btyYhQsXkutSDCISYsHMSalmZm+Z2Roz+zbnI+TJROR3Ji/bRMeXltLxpaV5XrhNitbEiROpXbs2M2fOBFCBIuKBYIqUCcBr+K570gqYhu/qsSISRoFXlVWbJ7RefvllevbsSdOmTbnuuuu8jiMSt4JZgnyGc26emT3tnPsOGGpmH4U6mIj8kVo8oTd27Fj69+9Pq1atmD59OqeffrrXkUTiVjBFyhHzjXN+Z2Z/wXfNE20EKCIx54svvqB///60a9eOqVOnkpSU5HUkkbgWTJHyN6AUcDcwAigD3BHKUCLxTDsae6du3brMnDmT1q1bU7x4ca/jiMS9E85Jcc4tc87td85tcs7d5pxrC/wQhmwicUk7GoffP//5T5YuXQpAu3btVKCIRIgCR1LM7Ep8uxl/7JzbaWYX47s8fiq+vXhEJAQ09yQ8nHMMGzaMESNGcNddd9Gwod5zkUiS70iKmT0BvAl0Beaa2YP4Ll2/Gt8eOyIiUcs5x+DBgxkxYgR9+vRh9OjRXkcSkVwKGklpB1zmnDtkZuXx7WB8mXPum/BEExEJDecc99xzD6NHj+auu+5i9OjRJCQEc0UGEQmngv5VHnbOHQJwzu0GvlaBIiKxICsri61bt3LvvfcyZswYFSgiEaqgkZTzzGy6/3MDzg24jXPuppAmE4lxWsUTfllZWezdu5ezzjqLKVOmkJiYqCvJikSwgoqUm3PdfiGUQUTiTX67GGsVT2gcP36cnj17smrVKlasWEHJkiW9jiQiJ1DQBoMLwxlEJB5pFU94HDt2jG7dujFt2jRGjBihAkUkSgRzMTcRkah19OhROnXqxIwZM3j66acZOHCg15FEJEgqUkRCJL85Jzk09yQ8Bg8ezIwZMxg9ejQDBgzwOo6InISgixQzS3LOHQllGJFYkt+ckxyaexIef//736lfvz5dunTxOoqInKQTFilmVh94Bd+ePTXM7DKgt3NOf5KInIDmnHjj4MGDPPvsswwZMoTKlSurQBGJUsGMpIwG2gAzAZxzq82saUhTiUSpwBaP2jne2L9/P9dffz2ffPIJjRs3pnHjxl5HEpFCCuYKRgnOudwbCmaFIoxItAvcHFDtnPD75ZdfuO666/j000+ZMmWKChSRKBfMSMpmf8vHmVkiMAD4NrSxRKKXWjze2L17Ny1atGD16tW8/fbb3HjjjV5HEpFTFEyRcie+lk8N4Cdggf8+kbhwolU6gdTi8U5mZiYbN25k+vTptGnTxus4IlIEgilSjjvnOoU8iUiEOtEqnUBq8YTfoUOHKFGiBPXq1SMzM5NSpUp5HUlEikgwRcoKM/sGmApMd87tD3EmkYijFk5k2rp1K2lpadx5553cfffdKlBEYswJJ846584HHgPqAf8xs5lmppEVEfHU5s2bSUlJYcuWLVx++eVexxGREAjqYm7OuU+BT83sYeA54E3grRDmEgkbXRk2+mzcuJHU1FR27drF/PnzadhQo1wiseiEIylmVsrMuprZbGA58DNwdciTiYRJ4LLhvGieSWQ5cOAAKSkp7Nmzh4ULF6pAEYlhwYykfAnMBkY65z4KcR4RT2jOSfQoVaoUQ4cOJTk5WW0ekRgXTJFynnMuO+RJRMIsp82jdk50+Oqrr9i5cycpKSn06dPH6zgiEgb5Film9oxzbiDw/8zM5X7cOXdTSJOJhFhggaJ2TmRbvXo1zZo1o1y5cqxdu5ZixbSBu0g8KOhf+lT/f18IRxARL6jNE/lWrVpF8+bNKVmyJO+//74KFJE4ku+/dufccv+nFznnfleomFl/YGEog4kUtdyreNTmiXyfffYZLVu2pFy5cixatIiaNWt6HUlEwiiYDQbvyOO+XkUdRCTUcq/iUZsn8k2cOJEKFSqQkZGhAkUkDhU0J6Uj0AmoaWbTAx4qDewNdTCRUFB7JzpkZWWRmJjImDFj2L17N5UqVfI6koh4oKDm7nJgF1AdGBtw/37g36EMJSLx64MPPmDQoEHMmTOHqlWrqkARiWMFzUnJBDLx7XosEpUC56FoDkrkS09P56abbqJ27doUL17c6zgi4rF856SYWYb/v3vMbHfAxx4z2x2+iCKFFzgPRXNQItvMmTNp3749f/7zn1m0aBEVK1b0OpKIeKygdk9T/38rhCOISKhoHkrkmzNnDh06dCA5OZk5c+ZQtmxZryOJSAQoqN2Tc5XZs4GtzrmjZnYNcCnwBpD/ZiciYZbfJoFq8USHK6+8kh49evDMM89w5pn6fomITzBLkGcCzszOByYBFwGTQ5pK5CTlt0mgWjyRbf78+Rw9epQKFSowfvx4FSgi8jvBXLox2zl3zMxuAp5zzo02M63ukYijtk50GTduHP369eOxxx7jwQcf9DqOiESgYEZSjptZB+A24D3/fZp2LyKF9sILL9CvXz+uv/56Bg4c6HUcEYlQwV5xtikw0jn3vZnVBKaENpZI8CYv28SyTC04ixajRo1iwIABtG/fnunTp3P66ad7HUlEItQJixTn3JfA3cBKM6sNbHbOjQh5MpEg5UyY1dyTyLdjxw4effRRbr31VqZNm8Zpp53mdSQRiWAnnJNiZtcCrwM/AgZUNrPbnHOfhDqcSLAa1CxPlwY1vI4hJ1CpUiU+++wzzj//fO1mLCInFMz/JZ4FWjvn1gKY2UX4ipbkUAYTkdjgnOPBBx+kdOnS3H///Vx44YVeRxKRKBHMnJTTcgoUAOfcOkBjtCJyQs45Bg0axBNPPMEPP/yAc87rSCISRYIZSfnczF7CN3oC0BVtMCgiJ5Cdnc3//u//8sILLzBgwACef/55zMzrWCISRYIZSfkL8B0wGBgCfA/0C2UokWBpZU/k6t+/Py+88AIDBw5UgSIihVLgSIqZXQKcD8xwzo0MTySR4GllT+RKTk7mgQce4LHHHlOBIiKFkm+RYmYPAL2Az4ErzWy4c+7VsCUTCZJW9kSO48eP85///IfLL7+cO+64w+s4IhLlCmr3dAUudc51AK4E7gxPJBGJRseOHaNLly5cffXV/PDDD17HEZEYUFC754hz7iCAc+5nMwtm/opIyOS107F2OY4MR44coVOnTsycOZNnnnmGc845x+tIIhIDCipSzjOz6f7PDTg/4DbOuZtCmkwkl5ydjgOLEu1y7L3Dhw9z8803k56ezpgxY+jfv7/XkUQkRhRUpNyc6/YLoQwiEgztdBx5xo0bR3p6Oi+99BJ9+/b1Oo6IxJB8ixTn3MJwBhGBvFs6OdTaiUz9+/enbt26NG7c2OsoIhJjNM9EIkpOSycvau1Ejn379tGlSxd++OEHEhISVKCISEhohy+JOGrpRLa9e/fSsmVLVq1aRadOnTRJVkRCJuiRFDNLCmUQEV09NvLt3r2bZs2a8fnnn/POO+/Qtm1bryOJSAw7YZFiZvXN7D/Aev/ty8xsTMiTSdzR1WMj288//0xqaipffvklM2fOpF27dl5HEpEYF8xIymigDbALwDm3GmgaylASv3T12MhVrFgxSpcuzaxZs2jdurXXcUQkDgQzJyXBOfdDrr03skKUR0QizLZt2yhbtizlypVjyZIl2odHRMImmJGUzWZWH3Bmlmhm9wDfhjiXiESATZs2ce2119KjRw8AFSgiElbBFCl3AvcCNYCfgKvQPj4iMS8zM5PGjRuzc+dO7r33Xq/jiEgcOmG7xzm3A+gUhiwiEiHWr19Pamoqv/76KwsXLqRevXpeRxKROHTCIsXMxgMu9/3OOV3/WiQGZWdnc8stt3D48GEWLVrEZZdd5nUkEYlTwUycXRDw+enAjcDm0MQREa8lJCQwceJEihcvzsUXX+x1HBGJY8G0e6YG3jaz14EPQpZIRDzxxRdfMG/ePIYMGULdunW9jiMiUqi9e2oCug62SAxZuXIlqampjB07lj179ngdR0QECG5Oyh7+OyclAdgN/D2UoSQ+5N7xWLsce2Pp0qW0bNmS8uXL8+GHH1KuXDmvI4mIACcoUsx3UYTLgJzfJNnOuT9MohUpjJwdj3MKE+1yHH4fffQRrVu3pnLlyixatIizzz7b60giIr8psEhxzjkzm+Gc0/pDCQnteOytLVu2cM455zB//nyqVq3qdRwRkd8JZk7KcjO7ojBPbmYtzewbM9tgZvm2iMzsFjNzZpZcmPNI9Ji8bBMdX1pKx5eWsnbbPq/jxK3du327TXfu3JnPP/9cBYqIRKR8ixQzyxlluQZfofKNmX1uZv82s89P9MRmlgiMBVoBdYDOZlYnj+NKA3cDywrzAiS65LR4QO0dr7z33nuce+65LF68GIDTTjvN20AiIvkoqN2zHLgCaF/I564PbHDOfQ9gZm8B7YC1uY57FBgJDCrkeSTKqMXjnRkzZtCxY0cuu+wyLr30Uq/jiIgUqKAixQCcc98V8rmr8fuLvm0BGvzuBGaXA2c7594zs3yLFDPrC/QFqFGjRiHjiJdyVvJoBY93pk6dSteuXbnyyiuZO3cuZcqU8TqSiEiBCipSKppZvruKOedGneC589ou9beVQWaWADwL9DjB8+CcGweMA0hOTtbqoigUWKCoxRN+K1asoEuXLjRq1Ij333+f0qVLex1JROSECipSEoFS5F1sBGMLELiesTqwNeB2aeDPwGL/9u+VgVlm1tY5t7KQ55QIpjaPd5KTk3n22Wfp1asXJUuW9DqOiEhQCipStjnnhp/Cc68AaplZTXzXWekEdMl50Dn3C1Ah57aZLQYGqUARKToTJ06kUaNGXHDBBdx9991exxEROSkFLUEu7AgKAM6540B/YB6wDpjmnPvKzIabWdtTeW6JLpOXbWJZ5m6vY8SdMWPG0KNHD5566imvo4iIFEpBIylpp/rkzrl0ID3XfQ/lc2yTUz2fRKacS99rLkr4PP3009x3333ceOONjBkzxus4IiKFku9IinNOf/pKkWlQszxdGmhlVjiMGDGC++67j44dOzJ16lRdB0VEotYJNxgUORm5Nw0EbRwYTkeOHOH999/ntttu49VXX6VYMf0TF5Hopf+DSZHK61ooWnYces45jh49SlJSEvPnz6dEiRIkJiZ6HUtE5JSoSJEip6XG4eWcY+DAgXzxxRekp6dTqlQpryOJiBSJYDYYFAmKVvGEX3Z2NgMGDODZZ5/lkksuISkpyetIIiJFRkWKFBmt4gmv7Oxs+vXrx9ixY7nvvvt47rnn8F8YUUQkJqhIkSKlVTzhc9999/Hyyy/z4IMP8s9//lMFiojEHM1JEYlSvXr1okqVKgwapA3ERSQ2qUiRQtFSY28cPXqUqVOn0q1bN+rUqUOdOnW8jiQiEjJq90ih5Cw1DqSlxqF15MgROnToQPfu3fn000+9jiMiEnIaSZFC01Lj8Dl06BA333wzc+bMYezYsTRq1MjrSCIiIaciRU5KTptHrZ3w+fXXX2nXrh0LFy5k/Pjx9O7d2+tIIiJhoSJFTkpggaLWTnh89tlnLFmyhAkTJtC9e3ev44iIhI2KFDlpavOER3Z2NgkJCaSmprJhwwbOPvtsryOJiISVJs6KRKA9e/bQuHFjZs6cCaACRUTikooUCZouex8eu3btIi0tjeXLl+sCbSIS19TukaDpsveht2PHDpo1a8a3337Lu+++S6tWrbyOJCLiGRUpclJ02fvQ2bdvH02bNiUzM5P33nuPZs2aeR1JRMRTavdIUNTqCb3SpUvTtm1b0tPTVaCIiKCRFAmSWj2hs2nTJn799Vdq167NE0884XUcEZGIoSJFgqZWT9H7/vvvSU1NpWTJkqxZs4bExESvI4mIRAy1e+SE1OoJjfXr15OSksL+/ft5/fXXVaCIiOSikRQ5IbV6it66detITU0lKyuLDz/8kEsvvdTrSCIiEUdFigRFrZ6i9Y9//AOAxYsXU6dOHY/TiIhEJhUpIh549dVX+emnnzj//PO9jiIiErFUpMjv5OxyHEg7HheNFStWMHz4cKZMmUKpUqUoVaqU15FERCKaJs7K7+TschxIOx6fuk8//ZRmzZrx1VdfsWfPHq/jiIhEBY2kyB9ol+OitWTJElq3bk3VqlVZtGgR1atX9zqSiEhU0EiKSAgtXryYli1bUqNGDTIyMlSgiIicBBUpIiFUrVo1GjduzOLFi6lSpYrXcUREooqKFJEQWLNmDc45atWqxdy5c6lUqZLXkUREoo6KFPmNrixbNKZPn069evV4/vnnvY4iIhLVVKTIb3Rl2VP31ltvceutt1K/fn169uzpdRwRkaimIkV+R1eWLbxJkybRtWtXGjVqxNy5cylTpozXkUREopqKFJEi8OOPP9K3b1+aNm1Keno6pUuX9jqSiEjU03VS4pSuLFu0qlWrxgcffEBycjIlSpTwOo6ISEzQSEqc0pVli8bzzz/PW2+9BcC1116rAkVEpAhpJCWO6cqyp2bkyJEMGTKEjh070rFjR8zM60giIjFFIylxSEuNT92jjz7KkCFD6NSpE2+88YYKFBGREFCREoe01LjwnHMMGzaMhx56iNtuu4033niDYsU0ICkiEgoqUuKUlhoX3vHjx+nVqxevvfYaiYmJXscREYlZ+hMwDuReyaNVPCfPOcePP/5I9erVefzxx3HOkZCgGl9EJJT0f9k4kHslj1bxnJzs7GzuuusurrjiCrZt24aZqUAREQkDjaTECa3kKZysrCz69evHK6+8wpAhQ6hcubLXkURE4ob+HBTJR1ZWFj179uSVV15h2LBhPPHEE1rFIyISRipSYpyWGxfeqFGjeP3113n00UcZPny4ChQRkTBTuyfGablx4fXv35/q1avTuXNnr6OIiMQljaTEAS03Dt6RI0cYMmQIv/zyCyVKlFCBIiLiIRUpMWrysk10fGnpH/bnkfwdOnSI9u3bM3LkSD744AOv44iIxD21e2JUzrJjLTcOzsGDB2nXrh2LFi1i/Pjx3HLLLV5HEhGJeypSYpiWHQdn//79tGnTho8//pgJEybQvXt3ryOJiAgqUkTYs2cPW7Zs4c0336RTp05exxERET8VKRK39u/fT8mSJalRowZr164lKSnJ60giIhJAE2clLu3cuZOUlBTuueceABUoIiIRSEWKxJ0dO3aQmprKunXraN26tddxREQkH2r3SFzZtm0baWlpbNy4kffee4+0tDSvI4mISD5UpEjcyMrKokWLFmzevJm5c+fSuHFjryOJiEgBVKRI3EhMTOSf//wnZcqU4eqrr/Y6joiInICKlBgxedmm3/bpAX67kJvAd999x8qVK+nYsSOtWrXyOo6IiARJE2djRM4VZnPoSrM+33zzDSkpKQwYMIB9+7RFgIhINNFISgzRFWZ/b+3ataSmpuKcY+HChZx5pkaWRESiiUZSJCatWbOGJk2akJCQwOLFi7nkkku8jiQiIidJRUoMmLxsE8syd3sdI6LMnz+fpKQkMjIyuOiii7yOIyIihaAiJQbkTJjVHBQ4evQoAIMGDWL16tXUqlXL40QiIlJYKlJiRIOa5enSoIbXMTz1ySef8Kc//YnVq1cDUL58eY8TiYjIqdDE2Sik5cZ/tHjxYtq0aUO1atWoUKGC13FERKQIaCQlCmm58e8tWLCA1q1bc84555CRkUG1avH7XoiIxBKNpEQpLTf2WbZsGW3atOHCCy9kwYIFVKxY0etIIiJSRFSkRInAFo/aO/9Vt25d7rrrLh544AHOOussr+OIiEgRUrsnSgS2eOK9vQMwZ84cdu3aRVJSEs8884wKFBGRGKSRlCiiFo/P5MmT6d69O3369OHFF1/0Oo6IiISIRlIkqkycOJHbbruNa665hqeeesrrOCIiEkIhLVLMrKWZfWNmG8zs73k8fq+ZrTWzNWa20MzOCWUeiW4vv/wyPXv2JDU1lfT0dEqVKuV1JBERCaGQFSlmlgiMBVoBdYDOZlYn12H/BpKdc5cC7wAjQ5VHotuhQ4d48sknadmyJbNnz+aMM87wOpKIiIRYKOek1Ac2OOe+BzCzt4B2wNqcA5xzHwYc/xnQLYR5JEo55yhRogRLlizhrLPOIikpyetIIiISBqEsUqoBmwNubwEaFHB8L2BOXg+YWV+gL0CNGvFz6XctO4Ynn3ySr7/+mldffZWqVat6HUdERMIolHNSLI/7XJ4HmnUDkoE8Z0I658Y555Kdc8nxdLGueF92PHz4cO6//36OHj1Kdna213FERCTMQjmSsgU4O+B2dWBr7oPMrBnwIJDinDsSwjxRKR6XHTvnGDZsGCNGjOD222/nlVdeITEx0etYIiISZqEcSVkB1DKzmmZ2GtAJmBV4gJldDrwEtHXO7QhhFokiDz30ECNGjKB37968+uqrKlBEROJUyEZSnHPHzaw/MA9IBF51zn1lZsOBlc65WfjaO6WAt80MYJNzrm2oMkl0aNKkCQcPHuTpp58mIUGX8hERiVchveKscy4dSM9130MBnzcL5fklemRnZ/PRRx+RkpJCWloaaWlpXkcSERGP6c9U8VxWVha9e/emSZMmrFixwus4IiISIbR3TwTKWXocD8uOjx8/To8ePXjzzTd56KGHSE5O9jqSiIhECBUpESiwQInlZcfHjh2jW7duTJs2jccee4wHH3zQ60giIhJBVKREqHhYevz+++8zbdo0nnrqKQYNGuR1HBERiTAqUiJIPLV5ANq3b8+KFSvU4hERkTxp4mwEiYc2z6FDh7j11lt/myCrAkVERPKjIiXC5LR5ujSIvT2KDh48yPXXX88777zDunXrvI4jIiIRTu0eCYv9+/dz/fXX88knnzBp0iS6ddOG1yIiUjCNpESIycs2sSxzt9cxQmLfvn1cd911fPrpp0yZMkUFioiIBEVFSoR494sfAWJyLkqJEiWoUaMGb7/9NrfeeqvXcUREJEqo3RNBGtQsH1NzUXbu3El2djaVKlVi6tSpXscREZEooyLFY7G67Pinn34iLS2NkiVL8tlnn+HfQFJERCRoKlI8FovLjrdu3UpaWhqbNm1i9uzZKlBERKRQVKREgFi6uuzmzZtJTU1l+/btzJ07l2uvvdbrSCIiEqVUpEiR6tevHz///DMffPABV111lddxREQkiqlIkSL18ssvs337dq644gqvo4iISJTTEmQ5ZV9//TV33303x48fp2rVqipQRESkSKhIkVPy5Zdf0qRJE6ZOncqWLVu8jiMiIjFERYqHov0qs6tXr6Zp06YkJCSQkZHBueee63UkERGJISpSPBTNV5ldtWoVTZs25fTTTycjI4PatWt7HUlERGKMihSPRetVZg8fPkzVqlVZsmQJtWrV8jqOiIjEIK3u8UA0X2V227ZtVKlShUaNGrF69WoSExO9jiQiIjFKIykeiNarzC5evJhatWoxadIkABUoIiISUhpJ8Ui0XWX2gw8+oF27dtSsWZPrrrvO6zgiIhIHNJIiJ5Sens4NN9xArVq1WLx4MZUrV/Y6koiIxAEVKVKgjRs3cuONN3LxxRezaNEiKlas6HUkERGJEypSpEDnnnsur732GgsXLuSss87yOo6IiMQRFSmSpylTpvDRRx8B0KVLF8qWLetxIhERiTcqUuQPJkyYQNeuXXn66ae9jiIiInFMRYr8zrhx4+jZsyfNmjVjypQpXscREZE4piJFfvPCCy/Qr18/rr/+embNmsUZZ5zhdSQREYljKlIEAOccn3zyCe3bt2f69OmcfvrpXkcSEZE4p4u5CQcOHKBUqVK/XUm2ePHiHicSERHRSEpcc87xyCOPkJyczK5duyhevLgKFBERiRgqUuKUc44HH3yQhx9+mIYNG2qJsYiIRBy1e8IoUnY/ds4xaNAgRo0aRd++fXnxxRdJSFC9KiIikUW/mcIoUnY/fuqppxg1ahT9+/fnX//6lwoUERGJSBpJCbNI2P24R48eJCQkMHDgQMzM0ywiIiL50Z/QcSIrK4v/+7//49ixY1SqVIlBgwapQBERGk0UAQAADoxJREFUkYimIiUOHD9+nO7du3PXXXcxc+ZMr+OIiIgERe2eGHfs2DG6du3K22+/zeOPP06HDh28jiQiIhIUFSkx7MiRI3Tq1ImZM2fyzDPPcO+993odSUREJGgqUmLYd999x4cffsiYMWPo37+/13FEREROioqUGHT8+HGKFStGnTp1WL9+PRUrVvQ6koiIyEnTxNkYc+DAAZo3b84zzzwDoAJFRESiloqUMJm8bBPLMneH9Bz79u2jZcuWfPTRR1SpUiWk5xIREQk1tXvC5N0vfgQI2ZVm9+7dS8uWLVm1ahVTpkzRKh4REYl6KlLCqEHN8nRpUKPIn/fYsWM0b96c1atX884779CuXbsiP4eIiEi4qUiJAcWLF6dPnz5Ur16d1q1bex1HRESkSKhIiWLbt29n/fr1XHvttfTt29frOCIiIkVKRUqU+vHHH0lLS2P37t1kZmZSsmRJryOJiIgUKa3uCYOiXtmzadMmUlJS2Lp1K9OnT1eBIiIiMUkjKWFQlCt7MjMzSU1NZc+ePcyfP5+rrrrqlJ9TREQkEqlICZOiWtnz4osv8ssvv7Bw4ULq1atXBMlEREQik9o9UcI5B8ATTzzBypUrVaCIiEjMU5ESBb788ksaNWrEli1bSExM5LzzzvM6koiISMip3RPhvvjiC5o1a0ZSUhIHDx70Oo6IiEjYaCQlgq1cuZLU1P/f3v3H3lXXdxx/vgQKdPxwpdOgIF8MCCu2MtYxnHFOyxxC1g7CoAQUCM6VTRt1kAiYTd1IUEdkogyYNKAR7fg1GgbpLNbWEIrU8aNCdHTIECUDJ2NDYVp4749z2L5+/ba93/Z77z23PB/JTe4953O/5913vj9e/XzOPedtzJw5kzVr1nDIIYcMuyRJkgbGmZQ+uvauR7n53u/z4OP/xZx995rSe7/5zW+yYMECZs2axerVqxkbG+tPkZIkdZQzKX00PqBM9ePHBx54IEcffTRr1641oEiSXpKcSemzOfvuxfI/fmPP49evX8/cuXOZNWsWN9xwQx8rkySp25xJ6ZCVK1fy5je/mfPOO2/YpUiSNHSGlD6Z6qXwb7nlFhYuXMihhx7K+eef38fKJEkaDYaUPpnKpfBvuukmTjjhBObNm8ftt9/O7Nmz+12eJEmdZ0jpo14uhf/MM8+wZMkS5s+fz6pVq5g1a9aAqpMkqds8cXbI9thjD1atWsXY2Bh77rnnsMuRJKkznEkZkmXLlnHhhRcCMHfuXAOKJEkTGFKG4IorruCss85i7dq1bNq0adjlSJLUSYaUAbv00ktZsmQJxx13HDfffDM77+yKmyRJkzGkDNDFF1/M0qVLOf7447nxxhvZbbfdhl2SJEmdZUgZoNmzZ7N48WKWL1/OjBkzhl2OJEmdZkjps6rioYceAuD000/n2muvZZdddhlyVZIkdZ8hpQ9evNpsVXH++eczd+5cNmzYAECSIVcnSdJo6GtISXJMku8k2ZjkQ5Ps3zXJ8nb/XUnG+lnPoNx87/epKp5Zs4yLLrqIM888k8MOO2zYZUmSNFL6FlKS7AR8FngHMAc4JcmcCcPOAp6qqoOATwEf71c9g1QvvMCMu67mti9fxdKlS7nssst42cuctJIkaSr6+ZfzSGBjVT1cVT8FvgwsmjBmEXBN+/x6YEF2gPWQR9evYuOaGzj33HO55JJLXOKRJGkb9PMiHa8Gvjfu9WPAb25uTFVtSvI0sA/ww/GDkrwHeA/Aa16z5XvhdMHvLTyBsVe+nI9fuNSAIknSNupnSJnsr3Ntwxiq6krgSoD58+f/wv6u+eiiubBo7rDLkCRppPVzuecxYP9xr/cDfrC5MUl2BvYGftTHmiRJ0ojoZ0i5Gzg4yYFJZgCLgRUTxqwATm+fnwh8tao6P1MiSZL6r2/LPe05Ju8FVgI7Acuq6oEkHwPWV9UK4CrgC0k20sygLO5XPZIkabT09e52VXUrcOuEbX8+7vlzwB/2swZJkjSavHiHJEnqJEOKJEnqJEOKJEnqJEOKJEnqJEOKJEnqJEOKJEnqJEOKJEnqJEOKJEnqJEOKJEnqJEOKJEnqJEOKJEnqJEOKJEnqJEOKJEnqJEOKJEnqJEOKJEnqJEOKJEnqJEOKJEnqJEOKJEnqJEOKJEnqJEOKJEnqJEOKJEnqJEOKJEnqJEOKJEnqJEOKJEnqJEOKJEnqJEOKJEnqJEOKJEnqJEOKJEnqJEOKJEnqJEOKJEnqJEOKJEnqJEOKJEnqJEOKJEnqJEOKJEnqJEOKJEnqpFTVsGuYkiRPAv827Dp6MBv44bCL2IHYz+ljL6eX/Zxe9nP6jEovD6iqX5lsx8iFlFGRZH1VzR92HTsK+zl97OX0sp/Ty35Onx2hly73SJKkTjKkSJKkTjKk9M+Vwy5gB2M/p4+9nF72c3rZz+kz8r30nBRJktRJzqRIkqROMqRspyTHJPlOko1JPjTJ/l2TLG/335VkbPBVjoYeevnBJA8muT/J7UkOGEado2Jr/Rw37sQklWSkPwXQb730M8lJ7ffoA0muHXSNo6KHn/XXJFmd5J725/3YYdQ5KpIsS/JEkm9tZn+SfLrt9/1Jjhh0jdusqnxs4wPYCfhX4LXADOA+YM6EMX8CXN4+XwwsH3bdXXz02Mu3AjPb52fby+3rZztuT2AtsA6YP+y6u/ro8fvzYOAe4Jfb168Ydt1dfPTYyyuBs9vnc4BHhl13lx/AbwNHAN/azP5jgduAAEcBdw275l4fzqRsnyOBjVX1cFX9FPgysGjCmEXANe3z64EFSTLAGkfFVntZVaur6ifty3XAfgOucZT08r0J8JfAJ4DnBlncCOqln38EfLaqngKoqicGXOOo6KWXBezVPt8b+MEA6xs5VbUW+NEWhiwCPl+NdcDLk+w7mOq2jyFl+7wa+N6414+12yYdU1WbgKeBfQZS3WjppZfjnUXzPwNNbqv9TPJrwP5VdcsgCxtRvXx/vg54XZI7kqxLcszAqhstvfTyI8BpSR4DbgXeN5jSdlhT/f3aGTsPu4ARN9mMyMSPS/UyRlPoU5LTgPnAW/pa0WjbYj+TvAz4FHDGoAoacb18f+5Ms+TzOzSzfF9P8vqq+s8+1zZqeunlKcDVVXVxkjcCX2h7+UL/y9shjezfIWdSts9jwP7jXu/HL05L/t+YJDvTTF1uaVrupaqXXpLkaOACYGFV/c+AahtFW+vnnsDrga8leYRmnXqFJ89uVq8/6zdX1c+q6rvAd2hCi35eL708C/h7gKq6E9iN5j402jY9/X7tIkPK9rkbODjJgUlm0JwYu2LCmBXA6e3zE4GvVnsmk37OVnvZLk9cQRNQXO/fsi32s6qerqrZVTVWVWM05/gsrKr1wym383r5Wf8HmpO7STKbZvnn4YFWORp66eWjwAKAJL9KE1KeHGiVO5YVwLvaT/kcBTxdVY8Pu6heuNyzHapqU5L3AitpzlhfVlUPJPkYsL6qVgBX0UxVbqSZQVk8vIq7q8defhLYA7iuPff40apaOLSiO6zHfqpHPfZzJfD2JA8CzwPnVtV/DK/qbuqxl38G/F2SD9AsS5zhf+42L8mXaJYZZ7fn8fwFsAtAVV1Oc17PscBG4CfAmcOpdOq84qwkSeokl3skSVInGVIkSVInGVIkSVInGVIkSVInGVIkSVInGVKkHVyS55PcO+4xtoWxY5u7k+oUj/m19i6397WXiT9kG77GkiTvap+fkeRV4/Z9Lsmcaa7z7iSH9/Ce9yeZub3HlrR1hhRpx/dsVR0+7vHIgI57alW9geYGm5+c6pur6vKq+nz78gzgVeP2vbuqHpyWKv+/zsvorc73A4YUaQAMKdJLUDtj8vUk/9w+fmuSMYcl+UY7+3J/koPb7aeN235Fkp22cri1wEHtexckuSfJhiTLkuzabr8oyYPtcf663faRJOckOZHmXk1fbI+5ezsDMj/J2Uk+Ma7mM5Jcuo113sm4m64l+dsk65M8kOSj7balNGFpdZLV7ba3J7mz7eN1SfbYynEk9ciQIu34dh+31HNTu+0J4Her6gjgZODTk7xvCfA3VXU4TUh4rL1E+cnAm9rtzwOnbuX4vw9sSLIbcDVwclXNpbni9dlJZgHHA4dV1Tzgr8a/uaquB9bTzHgcXlXPjtt9PXDCuNcnA8u3sc5jaC5t/6ILqmo+MA94S5J5VfVpmnuevLWq3tpe/v7DwNFtL9cDH9zKcST1yMviSzu+Z9s/1OPtAnymPQfjeZr7zEx0J3BBkv2AG6vqoSQLgF8H7m5vTbA7TeCZzBeTPAs8ArwPOAT4blX9S7v/GuBPgc8AzwGfS/KPwC29/sOq6skkD7f3I3moPcYd7dedSp2/RHOJ9iPGbT8pyXtofk/uC8wB7p/w3qPa7Xe0x5lB0zdJ08CQIr00fQD4d+ANNDOqz00cUFXXJrkLOA5YmeTdNLd8v6aqzuvhGKeOv2Fhkn0mG9Tey+VImhvKLQbeC7xtCv+W5cBJwLeBm6qq0iSGnusE7gMuAj4LnJDkQOAc4Deq6qkkV9Pc5G6iAF+pqlOmUK+kHrncI7007Q08XlUvAO+kmUX4OUleCzzcLnGsoFn2uB04Mckr2jGzkhzQ4zG/DYwlOah9/U5gTXsOx95VdSvNSamTfcLmv4E9N/N1bwT+ADiFJrAw1Tqr6mc0yzZHtUtFewE/Bp5O8krgHZupZR3wphf/TUlmJplsVkrSNjCkSC9NlwGnJ1lHs9Tz40nGnAx8K8m9wKHA59tP1HwY+Kck9wNfoVkK2aqqeo7m7qvXJdkAvABcTvMH/5b2662hmeWZ6Grg8hdPnJ3wdZ8CHgQOqKpvtNumXGd7rsvFwDlVdR9wD/AAsIxmCelFVwK3JVldVU/SfPLoS+1x1tH0StI08C7IkiSpk5xJkSRJnWRIkSRJnWRIkSRJnWRIkSRJnWRIkSRJnWRIkSRJnWRIkSRJnWRIkSRJnfS/uiz9ucYBKUYAAAAASUVORK5CYII=\n", 500 | "text/plain": [ 501 | "
" 502 | ] 503 | }, 504 | "metadata": { 505 | "needs_background": "light" 506 | }, 507 | "output_type": "display_data" 508 | }, 509 | { 510 | "data": { 511 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAjEAAAImCAYAAACivNvXAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjMsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy+AADFEAAAgAElEQVR4nOzdd3Sc5Z3+/+uj3q3uIsuWewMXMMX0HtNJfimQZdkkBLakQLK7CdmUTcgm+aYSNmGzkAYkGwgpdIIxvdlgG4PBcpebJFu9l9GU+/fHjIRsySq2R9Izer/O8YmeZ+6Z+TzmxLrOXc05JwAAAK+JG+0CAAAAjgYhBgAAeBIhBgAAeBIhBgAAeBIhBgAAeBIhBgAAeBIhBgAAeBIhBsAxMbPNZnbeIG2mmVmrmcWPUFkjwszOM7PyXtd7zOyi0awJGE8IMUAMi/xS7YgEiCoz+62ZZRzP73DOLXLOvThIm33OuQznXPB4fndvhz3rQTO793g/K4CxhRADxL4rnXMZkk6SdIqkr/V+0cJi5d+C7mddKmmZpK+Mcj0AoihW/uECMAjnXIWkv0k6wcxeNLPvmNlrktolzTSzCWb2azM7YGYVZvZfvYd/zOwmM9tiZi1mVmpmJ0Xu9wyhmNmpZrbezJojPT8/idwvMTNnZgmR6ylm9piZ1ZvZTjO7qdf3fNPMHjKz+yPftdnMlg/zWQ9KWqVwmOn+3GQz+5GZ7YvU9r9mltrr9avN7O1I7bvMbGXk/id7PXeZmf3jcP/uAUQHIQYYJ8ysWNJlkjZGbv29pJslZUraK+k+SQFJsxXuxbhE0qcj7/2IpG9KukFSlqSrJNX18zV3SrrTOZclaZakh45QzgOSyiVNkfRhSd81swt7vX6VpAclZUt6TNLPh/msUyVdKmlnr9vflzRX4WAzW1KRpG9E2p8q6X5J/x75znMk7Ym8r1rSFQo/9ycl3dEd4ACMLkIMEPseMbNGSa9KeknSdyP373XObXbOBSTlKvxL/1bnXJtzrlrSHZKujbT9tKQfOOfWubCdzrm9/XyXX9JsM8t3zrU659Ye3iASps6S9GXnXKdz7m1Jv1I4VHV71Tn3VGQOze8kLRnGs7ZI2q9w+PjPyHeapJskfcE5V++ca4n8PXQ/342SfuOcW+2cCznnKpxzWyXJOfekc25X5LlfkvSMpLOHWA+AKCLEALHvGudctnNuunPuX5xzHZH7+3u1mS4pUdIBM2uMhJ67JRVGXi+WtGsI33Wjwr0dW81snZld0U+bKZK6g0S3vQr3jHQ72Ovndkkp3UNRg7jGOZcp6TxJ8yXlR+4XSEqTtKHX8z0duS8N8HxmdqmZrY0MfTUq3JuV319bACNrKP8oAIhNrtfP+yX5JOVHemYOt1/h4aGBP9C5HZKui0wU/pCkP5tZ3mHNKiXlmllmryAzTVLFcB9ggDpeMrN7Jf1I0jWSaiV1SFoUmRt0uH6fz8ySJf1F4WG0R51zfjN7RJIdr1oBHD16YgDIOXdA4WGSH5tZlpnFmdksMzs30uRXkv7NzE6OrGaabWbTD/8cM7vezAqccyFJjZHbhyyrds7tl/S6pO+ZWYqZLVa4B+f/jvNj/VTSxWa2NFLPLxWez1IYqbXIzD4QaftrSZ80swsjz15kZvMlJUlKllQjKWBmlyo8VwjAGECIAdDtBoV/aZdKapD0Z0mTJck59ydJ35H0B0ktkh5ReB7N4VZK2mxmrQpP8r3WOdfZT7vrJJUo3CvzsKT/dM6tPp4P45yrUXiy7tcjt76s8ETftWbWLOlZSfMibd9UZNKupCaF5w5Nj/QUfV7hCcoNkj6u8ERjAGOAOecGbwUAADDGRK0nxsx+Y2bVZvbeEV43M/vvyB4Rm1iyCAAAhiOaw0n3Kty1fCSXSpoT+XOzpF9EsRYAHmfvn7/U359po10fgJEXtdVJzrmXzaxkgCZXS7rfhcez1ppZtplNjkwwBIBDOOf2SeIsJAA9RnNib5EO3aeiXIfuEwEAAHBEo7lPTH/7LPQ7y9jMblZ4yEkpaRknF5fMlCQ1dvjV3OHXCUUTolYkAACIng0bNtQ65woGb9nXaIaYcoV3yew2VeHlln045+6RdI8kLV++3K1fv16S9IOnt+qXr5Rp/Xcui3KpAAAgGsysvyNMhmQ0h5Mek3RDZJXS6ZKamA8DAACGKmo9MWb2gMLnl+SbWbnCB7ElSpJz7n8lPaXwGSQ7FT4b5ZPRqgUAAMSeaK5Oum6Q152kz0Tr+wEAQGzj2AEAAOBJhBgAAOBJhBgAAOBJhBgAAOBJhBgAAOBJhBgAAOBJhBgAAOBJhBgAAOBJhBgAAOBJhBgAAOBJhBgAAOBJhBgAAOBJhBgAAOBJhBgAAOBJhBgAAOBJhBgAAOBJhBgAAOBJhBgAAOBJhBgAAOBJhBgAAOBJhBgAAOBJhBgAAOBJhBgAAOBJhBgAAOBJhBgAAOBJhBgAAOBJhBgAAOBJhBgAAOBJhBgAAOBJhBgAAOBJhBgAAOBJhBgAAOBJhBgAAOBJhBgAAOBJhBgAAOBJhBgAAOBJhBgAAOBJhBgAAOBJhBgAAOBJhBgAAOBJhBgAAOBJhBgAAOBJhBgAAOBJhBgAAOBJhBgAAOBJhBgAAOBJhBgAAOBJhBgAAOBJhBgAAOBJhBgAAOBJhBgAAOBJhBgAAOBJhBgAAOBJhBgAAOBJhBgAAOBJhBgAAOBJhBgAAOBJhBgAAOBJhBgAAOBJhBgAAOBJhBgAAOBJhBgAAOBJhBgAAOBJhBgAAOBJhBgAAOBJhBgAAOBJhBgAAOBJhBgAAOBJhBgAAOBJhBgAAOBJhBgAAOBJhBgAAOBJhBgAAOBJhBgAAOBJhBgAAOBJhBgAAOBJhBgAAOBJhBgAAOBJhBgAAOBJhBgAAOBJhBgAAOBJhBgAAOBJhBgAAOBJhBgAAOBJhBgAAOBJhBgAAOBJhBgAAOBJhBgAAOBJhBgAAOBJUQ0xZrbSzLaZ2U4zu62f16eZ2QtmttHMNpnZZdGsBwAAxI6ohRgzi5d0l6RLJS2UdJ2ZLTys2dckPeScWybpWkn/E616AABAbIlmT8ypknY658qcc12SHpR09WFtnKSsyM8TJFVGsR4AABBDohliiiTt73VdHrnX2zclXW9m5ZKekvS5/j7IzG42s/Vmtr6mpiYatQIAAI+JZoixfu65w66vk3Svc26qpMsk/c7M+tTknLvHObfcObe8oKAgCqUCAACviWaIKZdU3Ot6qvoOF90o6SFJcs6tkZQiKT+KNQEAgBgRzRCzTtIcM5thZkkKT9x97LA2+yRdKElmtkDhEMN4EQAAGFTUQoxzLiDps5JWSdqi8CqkzWZ2u5ldFWn2r5JuMrN3JD0g6RPOucOHnAAAAPpIiOaHO+eeUnjCbu973+j1c6mkM6NZAwAAiE3s2AsAADyJEAMAADyJEAMAADyJEAMAADyJEAMAADyJEAMAADyJEAMAADyJEAMAADyJEAMAADyJEAMAADyJEAMAADyJEAMAADyJEAMAADyJEAMAADyJEAMAADyJEAMAADyJEAMAADyJEAMAADyJEAMAADyJEAMAADyJEAMAADyJEAMAADyJEAMAADyJEAMAADyJEAMAADyJEAMAADyJEAMAADyJEAMAADyJEAMAADyJEAMAADyJEAMAADyJEAMAADyJEAMAADyJEAMAADyJEAMAADyJEAMAADyJEAMAADyJEAMAADyJEAMAADyJEAMAADyJEAMAADyJEAMAADyJEAMAADyJEAMAADyJEAMAADyJEAMAADyJEAMAADyJEAMAADyJEAMAADyJEAMAADyJEAMAADyJEAMAADyJEAMAADyJEAMAADyJEAMAADyJEAMAADyJEAMAADyJEAMAADyJEAMAADyJEAMAADyJEAMAADyJEAMAADyJEAMAADyJEAMAADyJEAMAADyJEAMAADyJEAMAADyJEAMAADyJEAMAADyJEAMAADyJEAMAADyJEAMAADyJEAMAADyJEAMAADyJEAMAADyJEAMAADyJEAMAADyJEAMAADyJEAMAADyJEAMAADyJEAMAADyJEAMAADyJEAMAADyJEAMAADyJEAMAADyJEAMAADyJEAMAADyJEAMAADyJEAMAADwpqiHGzFaa2TYz22lmtx2hzUfNrNTMNpvZH6JZDwAAiB0J0fpgM4uXdJekiyWVS1pnZo8550p7tZkj6SuSznTONZhZYbTqAQAAsSWaPTGnStrpnCtzznVJelDS1Ye1uUnSXc65BklyzlVHsR4AABBDohliiiTt73VdHrnX21xJc83sNTNba2Yr+/sgM7vZzNab2fqampoolQsAALwkmiHG+rnnDrtOkDRH0nmSrpP0KzPL7vMm5+5xzi13zi0vKCg47oUCAADviWaIKZdU3Ot6qqTKfto86pzzO+d2S9qmcKgBAAAYUDRDzDpJc8xshpklSbpW0mOHtXlE0vmSZGb5Cg8vlUWxJgAAECOiFmKccwFJn5W0StIWSQ855zab2e1mdlWk2SpJdWZWKukFSf/unKuLVk0AACB2RG2JtSQ5556S9NRh977R62cn6YuRPwAAAEPGjr0AAMCTCDEAAMCTCDEAAMCTCDEAAMCTCDEAAMCTCDEAAMCTCDEAAMCTCDEAAMCTCDEAAMCTCDEAAMCTCDEAAMCTCDEAAMCTCDEAAMCTCDEAAMCTCDEAAMCTCDEAAMCTCDEAAMCTCDEAAMCTCDEAAMCTCDEAAMCTCDEAAMCTCDEAAMCTCDEAAMCTEoba0MyKJE3v/R7n3MvRKAoAAGAwQwoxZvZ9SR+TVCopGLntJBFiAADAqBhqT8w1kuY553zRLAYAAGCohjonpkxSYjQLAQAAGI6h9sS0S3rbzJ6T1NMb45z7fFSqAgAAGMRQQ8xjkT8AAABjwpBCjHPuPjNLkjQ3cmubc84fvbIAAAAGNtTVSedJuk/SHkkmqdjM/oEl1gAAYLQMdTjpx5Iucc5tkyQzmyvpAUknR6swAACAgQx1dVJid4CRJOfcdrFaCQAAjKKh9sSsN7NfS/pd5PrvJG2ITkkAAACDG2qI+WdJn5H0eYXnxLws6X+iVRQAAMBghro6ySfpJ5E/AAAAo27AEGNmDznnPmpm7yp8VtIhnHOLo1YZAADAAAbribkl8r9XRLsQAACA4RhwdZJz7kDkx1pJ+51zeyUlS1oiqTLKtQEAABzRUJdYvywpxcyKJD0n6ZOS7o1WUQAAAIMZaogx51y7pA9J+plz7oOSFkavLAAAgIENOcSY2QqF94d5MnJvqMuzAQAAjruhhphbJX1F0sPOuc1mNlPSC9ErCwAAYGBD3SfmJUkv9bouU3jjOwAAgFEx2D4xP3XO3Wpmj6v/fWKuilplAAAAAxisJ6b7rKQfRbsQAACA4RgwxDjnug95XC+pwzkXkiQzi1d4vxgAAIBRMdSJvc9JSut1nSrp2eNfDgAAwNAMNcSkOOdauy8iP6cN0B4AACCqhhpi2szspO4LMztZUkd0SgIAABjcUDesu1XSn8ys+7ykyZI+Fp2SAAAABjfUfWLWmdl8SfMkmaStzjl/VCsDAAAYwJCGk8wsTdKXJd3inHtXUomZXRHVygAAAAYw1Dkxv5XUJWlF5Lpc0n9FpSIAAIAhGGqImeWc+4EkvyQ55zoUHlYCAAAYFUMNMV1mlqrI0QNmNkuSL2pVAQAADGKoq5P+U9LTkorN7P8knSnpE9EqCgAAYDCDhhgzM0lbJX1I0ukKDyPd4pyrjXJtAAAARzRoiHHOOTN7xDl3sqQnR6AmAACAQQ11TsxaMzslqpUAAAAMw1DnxJwv6Z/MbI+kNoWHlJxzbnG0CgMAABjIUEPMpVGtAgAAYJgGDDFmliLpnyTNlvSupF875wIjURgAAMBABpsTc5+k5QoHmEsl/TjqFQEAAAzBYMNJC51zJ0qSmf1a0pvRLwkAAGBwg/XE9JxUzTASAAAYSwbriVliZs2Rn01SauS6e3VSVlSrAwAAOIIBQ4xzLn6kCgEAABiOoW52BwAAMKaMuxCzfk+92ruY3gMAgNeNqxBT2+rTR+5eo8ferhztUgAAwDEaVyFmX327nJM6/cHRLgUAAByjcRViyhs6RrsEAABwnIyrEFNBiAEAIGaMrxDT2D7aJQAAgONkfIUYemIAAIgZ4yrEMCcGAIDYMW5CjHNOFY2EGAAAYsW4CTGN7X61d7G0GgCAWDFuQgxDSQAAxJZxE2JYmQQAQGwZNyGGnhgAAGLLuAkxTOoFACC2jJsQU97QocLM5D73v/vUFr2wtXoUKgIAAMdi3ISYioYOFeWkHnKv0x/UL18p0wvbCDEAAHjN+AkxjR0qyj40xJTVtMm5USoIAAAck3ERYlo6/Wrq8PfpidlZ0zpKFQEAgGM1LkJM96TeqTlph9zfWU2IAQDAq8ZHiIksr5562HDSLkIMAACeFdUQY2YrzWybme00s9sGaPdhM3NmtjwadXT3xPQZTiLEAADgWVELMWYWL+kuSZdKWijpOjNb2E+7TEmfl/RGtGopb+hQUkKc8tKTeu4FgiHtrm2L1lcCAIAoi2ZPzKmSdjrnypxzXZIelHR1P+2+LekHkjqjVUhFQ4emZqcqzqzn3v6GDnUFQ9H6SgAAEGXRDDFFkvb3ui6P3OthZsskFTvnnhjog8zsZjNbb2bra2pqhl1IeWPfPWIYSgIAwNuiGWKsn3s9u7KYWZykOyT962Af5Jy7xzm33Dm3vKCgYNiFVDT03SOmO8QkxY+Luc0AAMScaP4GL5dU3Ot6qqTKXteZkk6Q9KKZ7ZF0uqTHjvfk3k5/ULWtvn5DTGFmstKT44/n1wEAgBESzRCzTtIcM5thZkmSrpX0WPeLzrkm51y+c67EOVciaa2kq5xz649nET17xOT23ehudmHG8fwqAAAwgqIWYpxzAUmflbRK0hZJDznnNpvZ7WZ2VbS+93Dde8QUZb+/0Z1TeI8YQgwAAN6VEM0Pd849Jempw+594whtz4tGDeUNffeIqWr2qdUXIMQAAOBhMT+rtaKxXfFxpomZyT33uif1ziogxAAA4FWxH2IaOjR5QooSeq1C2hU5+JGeGAAAvCv2Q0xj3+XV++rblZmcoMJevTMAAMBbYj7ElDf03eguGHKaVZghs/62sgEAAF4Q0yHGHwypqrmzz+nVEkNJAAB4XUyHmINNnQo5aWpOWp/XRjPEbCpv7Nm/BgAAHJ2YDjHdQWFydkqf12aP0sqk5k6/rrtnrX7+/I5R+X4AAGJFTIeYmhafJGliVj8hZpR6Yv66oVxtXUH5ApygDQDAsRgXIaYg49BVSEkJcSrO7TvEFG2hkNP9a/aO+PcCABCLYjrEVLf4lBhvyk5LPOT+zPx0xceN3MqkLQea1eYL6LVdtSqrbRux7wUAIJZF9diB0VbT4lNBRnKfpdSzRnAoqbyhXVf87FV95dL5emN3vfLSk0Y0QAEAEKtiuiemptWngn42tBvJSb1/Wl+uYMhpV02rnttSpWtPLVZSwrH9te+sbtW196xRXavvOFUJAID3xHSIqW7uPCTEZKcl6t8umauPLJ86It8fDDn9af1+SdKjb1dKkv7utOnH9JnOOX3zsc1aW1av3QxNAQDGsZgOMbWtPhVkvr8yycz02Qvm9LtvTDS8urNWlU2dkqT2rqAuWThJU/rZeO9wWw406+GN5f2+9uyWar26s/a41gkAgBfFbIgJBEOqa+vqdzhppPxx3T5lpbw/7eiGFYP3woRCTl/449v61uOlPfd2Vrfqe3/bIl8gqO88WarEeObUAAAQsyGmrq1LzmnUQkxdq0+rS6t0zbIiSeF9aVbMyhv0fU9vPqitB1vkXPjaOaf/+Ou7uvulMt2xeof21LXrE2eURLFyAAC8IWZDTPceMaN1UvXDGyvkDzp9/LRpOnl6jm69aM6gB06GQk53PnvoTr7Pb63Wm3vqJUm/eXW3LphfqHPmFkStbgAAvCJml1j3bHQ3CiHGOac/rtuvpcXZmj8pS3/55zOG9L6n3jugbVUtmpSVog5/UMGQ0/ef3trzesg5ffXyBark3CUAAGK3J6a6JTyh9vDdekfCW/sataO6VdeeUjzk93T3wswuzNDFCydKkv7yVrm2V7XqtBm5kqQbVpRo1iid+QQAwFgTsyFmNHtiHlq3X2lJ8bpiyZQhv+fJdw9oR3WrbrlwjuLjTP5gSHes3q4lxdn60sp5On1mrm65cE4UqwYAwFtiOsRkpSQoJTF+RL+3zRfQ45sqdcXiycpIHtpoXTDkdOdzOzSnMEOXnThZUnhJ9oGmTn3l0vk6eXquHrx5hSYcdnwCAADjWcyGmOqW/nfrjba/vXdQ7V1BfWT50IeSnthUqZ3VrbrlojmHHElwwfxCnT5z8BVNAACMRzEbYmpafCrstdHdSHl4Y7mm5aZp+fScIbUPhZx+/vxOzZ2YoctOCPfCxJnJTPrSynlD/t7tVS3yB0NHVTMAAF4UuyHmCOcmRdPBpk69vqtO1ywrGnQ5dbdnSqu0o7pVnzl/tuIivTCfOKNEv7phueZPyhrSZ7ywrVqX3PGyVm0+eNS1AwDgNTG5xNo5p+pm34jvEfPo2xVyTvpgZIO7wTjn9D8v7tT0vDRdHpkLI0nT8tI0LW9oRyO0dwX0tYffC//sCw6/aAAAPCome2LauoLq8AdHvCfm4Y0VWlqcrRn56UNq/+rOWm0qb9I/nTtLCfFH95/ijtXbVTGEfWNc9xbAAADEiJgKMY+/U6mS257UtoMtko5uefUX//i2vvX45iG3X7enXsv/a7Ve3VGrrQdb9KGThtYLI0l3vbBTE7OSh/We3jZXNus3r+0ZdAdfXyCoD//vGn3vb1uO6nsAABiLYirE3P3yLknSxn0NkjTsib3BkNOqzQe1s7p1yO958M39qm3t0t0v71JCnOmKxUPbG+bt/Y1aW1avm86eqeSEo1sG/qNntiknLUm3rZw/cLtV27Rhb4N217Qd1fcAADAWxVSIae4ISJJ8gfAqneH2xOyoblFb19DnlXQFQlpdGp5M+9rOWp03r1C56UlDem9ZTZty0hL18dOmDavG3lo6A/rGlQsH3D/mlR01+uUru4/6OwAAGKtiamJvU4dfklTf1iVp+CFm477GYbV/bVetmjvDwSnkNOxhoU+dOUNpSUf/n+DcuQW6cvFkVTZ19vt6fVuX/vWhdzS7MEMBll8DAGJMbPXEdIZDTHWLT4nxpuzU4e1w+/YwQ8xTmw70/JyZkqAL5hcO+b0ZyQm6YUXJsL6v27xJmbp44UR954MnHHEpt3NOX/7LJjW0d+nOa5eO+M7FAABEW0z1xHQvwKlp6VR+RnLPvitD9fb+oYcYfzCkZ0qrlJmSoJbOgC4/cfKQg8IF8ws1Mz/9qI8RKMxM0S9vWD5gmwfe3K/VpVX66mULtGjKhD6v/37tXjV3+vUv580+qhoAABhtMdUT063mKI4caOn0a3t1y5Dbv76rTk0d/p49YT588tQhv/f2q0/QJ86cMaz6hmN3bZu+/USpzpqdrxvP6vs9L2+v0dcffU8Pv1URtRoAAIi2mAwx1S3D3+ju3fIm9beVyoa9DXr07b6/7J/adEAZyQn6yqUL9MwXztHyktyjLfe4CgRD+uJDbysx3vSjjyzp0xt1oKlDt/7x7X6fFQAAL4mp4aRuLZ2B4U/qjQwlzSo4dKO6H67aqv31Hbp66fuTdv3BkFaVHtSFCwqVmhSvuRMzj73o4+Tul8u0cV+j7rx2qSZNOHSJeSDk9Nk/bJTPH9TCyVmctQQA8LSY7ImRpIKM4a9MmlmQrgm9JgN3dAX11t6+82TWltWpsd2vy3odFTAWbK5s0h2rt+vyxZN11ZK++9W8uK1aG/Y26P/9f4tVkn/osQYdXcFh7Y8DAMBoi5kQEwodOj5SkDX0je6cC0/qXVqcfcj99Xvr1dVPb8VT7x5QelK8zh1kp9yRdv/avcpNT9J/Xd3/qqWQCx8ueeVhAacrENI//OZNffCu10aqVAAAjlnMhJgWX+CQ6+H0xFQ0dqi21adl03IOuf/azro+bQPBkFZtrtIFCyaOuWXLzknf//Bi5fSz4V5OWpKWTcvWf1y2oM9r33p8s97cU692PwdIAgC8I2bmxDRHNrrrNpw5Md1Lq5cVZ+vht8p77r++q7ZP27Vl9apv69LlJ046ykqPv+SEcBb9+GnTdP68/veq+c0nTlF8nCkp4dDc+vu1e/V/b+xTTlpiz8Z9AAB4QcyEmKbDQsxwVifVt3UpJTFO8ya9P0G3qd2vdyua+rRdtfmgUhPjde7coW9sF235Gcl69DNnauGUrCO2SU3q22t0oKlT33xss86fV6D5k7N0z8tl0SwTAIDjKmaGk7p36+023NVJJxZNUGL8+38da8rq5Jw0Pe/9CbDOOT27pUpnz8nvNxSMpiXF2YfUPxStvoCm5aXpzuuWKf4IO/8CADBWxU6I6Xh/KCQzJWHY81UOn9T7+q5apSbGH3J/c2WzDjR16uKFE4+t2DEgNTFBmSkJ+uUNy5WVcnQ7BwMAMJpiZjipd0/McDe6k9TPpN5anTIj95DejdWlVYozDeuMpLHqa5cv0BcunqOpOWmDNwYAYAyKoZ6Y90PMcIeSpEN7YqqaO7Wrpk1nzso7pM2zW6p08vQc5Q1zD5qxKCc9iQADAPC0GA0xQ98jRgr33Ezutbvt9qrwpm9nzs7vuVfR2KHNlc26aIH3h5IAAIgFsRNiei0PHu5w0rJp2X02h8tOS9TCye+v9nluS5UkxcR8mKFoavfrsXcq5ThkCQAwRsXMnJimoxxO+tdL5mlRP0uTV8zMO+TwxNWlVZpZkK6ZBRnHVqgHdHQF9cl739Rb+xp18vQcFWWnjnZJAAD0ETs9Mb1DzDDmrFx/+vQ+k3ol6YxeQ0mtvoDWltXp4nEwlBQMOX3+wY16a194A8BgsP+eGOecnt9a1Wd/HgAARkrshJjeq5Oyjn3ibe9JvU0dfvmDLuaHkpxz+s/H3tPq0iqdWpI7YLsfrtqmT927Xo+9XTGCFQIA8L6YCTFHO5zUn8kTUjQjP/2Qe3npSf322MSSkJN+v3af/hJkkkAAACAASURBVOncWfrYKcVHbPez53fqf17cJUnyH6GnBgCAaIuZOTHNHQGdPjNXxTlpmpl/9PNWPnTSVCXEWZ+JvhfML1R8XOzvanvN0in60gfm6eGN/few3P3SLv1k9XZ9YNFErdpcNcLVAQDwvtgJMZ1+nTBlgr52xcJj+pzrT5/e7/2LYnwo6cIFhWrvCuq2S+cfMqG5t3tf263v/W2rrlwyRd+6apFWbV49wlUCAPC+mAgx/mBI7V1BTUg9/tvnx1n4lOiz5+QP3tjDlk3LGXC47A9v7NM3Hy/VBxZN1E8+ukTtvuAIVgcAQF8xEWK6VyZlRSHEfOKMGbpg/kSlJcXEX9VR+cuGcn31kXd1/rwC/fd1yyJHMRBiAACjKyZ+M3dvdJeVevwfZ+GULC3sZx+Z8eLxTZX68TPbdMasPP3i+pOVnDC2Tu8GAIxfMbE6qbsnJhrDSePdD1dt0/LpufrlDcuHfTI4AADRFBMhpnt5dVYKIeZ4W1qcrd988pSjGk7bsLdB71U0RaEqAABiJMR0b3QXjTkx49WKWXn61JkzdN+nTlVG8tACzPNbq7Svrl2S9ND6/fro3Wv0g1XbolkmAGAci405MR2ROTH0xBw3U7JT9Y0rh7Zc3TmnO5/boZ8+u0OfOKNEhVnJ+sHT4fASDIWiWSYAYByLjRDTyZyY0RJyTrc/UarfvrZHkvTwxgo1dfh19dIp2lPbNrrFAQBiWkwMJzV1+JUYb0pJjInH8ZR7Xi7Tb1/bo0+eWaKctEQ1dfj1qTNn6I6PLo0sxQYAIDpioyemw6+slMQ+RwUg+qpbfLr1ojm65cI5ck6alpumT55Zwn8LAEDUxUaI6QwwlDTC0pLjtbQ4W9csnaJPnDlDkvTNqxaNclUAgPEkJkJMU4dfmYSYEZUYH6dHPnPmaJcBABjHYmLSQng4KSbyGAAAGKLYCDGdfoaTPGBfXbve2tcw2mUAAGJEbISYjgAb3Y1xL2yr1uX//YpuffDt0S4FABAjPD8G49z7q5Mw9jgn/eqVMn33qS0KOSmDYT8AwHHi+d8ogZCT5KJygjWO3bo99Xp9V50+sGiiEuPjtGEvw0kAgOMjJoaTJHbrHYvizOQPOn3ugtn6xd+drLQkTsEGABw/MdN9wXDS2PO5C2erKxDShQsmjnYpAIAYFDshhp6YMefsOQWjXQIAIIYxnAQAADwpZkIMm915UzDkRrsEAIBHxU6IoSfGU1p9AX3ugY067bvPKRAMSZI6uoJ6ZUeNnCPYAAAGFzPdF0zs9Y6tB5v1L79/S2W1bZLCy+TLalv02T+8pe1VrXr2i+dodmHmKFcJABjrYqInJjUxXkkJMfEoMa+urUvX3PWaWnwBXbIwvGrpD2/s05U/e1U7q1slSb5AaDRLBAB4REz85mejO28wmboCIS0rztGTnz9Ly6blSJJuf6JUp87I1e1XnzDKFQIAvCQmfvszlOQNHz2lWHMnZeofVkxXQnycJqQmKiHO9K+XzNM/njNTq7dUDfszWzr9SoyPU0oiG+kBwHgTEyGG5dXecPL0HJ08Pafn+tpTivWBRROVl5E87M8KBEP67Wt79OPV23T9adP1tSsW9tuuvKFdzR0BLZySddR1AwDGppgIMaxM8qa4OBt2gNlb16bGdr++8eh7eqe8SVJ4ns3hOv1B3f1Sme56cacmpCZq3VcvOi41AwDGjtgIMewRE/OaOvy6/fFS/eWtcklSXnqSfnbdMv1g1daeNqGQ0+ObKvVeRZOeKa3S3rp2ZaclqtMfHK2yAQBRFBO//RlOik3OOZUeaNaBxk599ZF3VdPikyRdtKBQP/zwEuWkJ+mHq7ZJCi/b/trD72l95JTsmQXp+v2Np+m5rVX684byUXsGAED0xESIYTgp9lQ0dujGe9dp68EWSdK8iZn61Q2n6MSpE/q0fXVnrR5/p1KZKQn64LIiLZk6QdedNk3JCfF6buvwJwsDALwhNkIMq5Niyl/fqtAf1+1Xqy8gSbr5nJn6t0vm9bsXUEK8qabep48un6rbLl2g3PSkkS4XADBKYiPEsE9MTPn1q7t1+sxc/fDDS1ScmzZg2x9+eLHizHr2nBlM95EGZnbMdQIARldM/PZnTkxsmFWQoeLcVN145gzdsKJEcXGDB42Tp+cO+fPfq2jSd57cos2VTXrjPy5SalK8Ov1BvVvRpOXTcwg2AOAxMRFiGE6KDbMLM/TKly447p/rD4b07396R39+q1zdZ0s2d/r1xKZK/WT1dh1o6tQTnztLJxT1nW8DABi7YiPE0BODAXT6Q3rk7QrddPZMTUhN1A9XbdPH7l6jPXXtKswM71PT3sUybADwmqiGGDNbKelOSfGSfuWc+3+Hvf5FSZ+WFJBUI+lTzrm9w/0ehpNwJJeeMFn+YEg3nT1T0/PS9dD6/ZKkkJN+/vFlyk5N0vW/fmOUqwQAHI2ohRgzi5d0l6SLJZVLWmdmjznnSns12yhpuXOu3cz+WdIPJH1suN/FcBKO5NQZuTp1xvvzZq5YPFn5GUk6a3aBkhLi9NrO2mF/5p7aNmWlJrISCgBGWTRPsT5V0k7nXJlzrkvSg5Ku7t3AOfeCc649crlW0tSj+aIMduzFEKUlJeiC+RP7Xa49mHfLm3TT/et13o9e1LefKB38DQCAqIrmb/8iSft7XZdLOm2A9jdK+lt/L5jZzZJulqRp06b13L/0hMlKT05Q/BBWsQBHa+O+Bv3s+Z16fmu1slISlJGcoJbOwGiXBQDjXjRDTH/JwvXb0Ox6Scslndvf6865eyTdI0nLly/v+YwTp07odwdX4FhUNnZoUlaK3trXoDuf26FXdtQqOy1R/3bJXN1wRomuvXvtaJcIAFB0Q0y5pOJe11MlVR7eyMwukvRVSec653xRrAcY0BtldfrOU1u0KXI6thQ+aPIrl87X9adPV3oyw5YAMJZE81/ldZLmmNkMSRWSrpX08d4NzGyZpLslrXTOVUexFmBAt/1lk8pq25QU//5cma9dvkB/d9p0pSbFD/jerkBIZlJifN95Nk3tfj24bp82VzbrRx9ZcsS5OO1dAaUmxrPhHgAMQ9RCjHMuYGaflbRK4SXWv3HObTaz2yWtd849JumHkjIk/Snyj/c+59xV0aoJOFxKYjigNHcG9LXLF+j606frYFOnJk1I6XntSBrauvS7tXt13+t7NG9Spv5w0+naX9+u37y2W6WVzZo7MVN/3lCuDn94D5ovXDxXNS0+3bdmj17eXqM//dMKVTZ26Hdr9urF7TW689plumrJlGg/MgDEDOs+S8Yrli9f7tavXz/aZSBGOOe0pqxOy4pzBu1x6XbZna/oYHOn2rsC6vSHlJYUr7SkBJ1SkqNVmw8qFPm/VFJ8nK5eOkX5mcn6xYu7NC03Tfvq25UUH6euYEjpSfFq6woqNz1J9W1d+trlC/Tps2dG8WkBYOwxsw3OueVH814G+TGumZnOmJU/rPdkpSZoR7Vf1ywt0k3nzNTPn9+px96p1Gs7a/WP587SRQsKVVrZrEtPnKz8jGS9sLVav9AupSXF63sfOlGnzcjVyjtf0YlTJ+jvTy/Rill5Ounbq6P0hAAQuwgxwDD9z9+drJBzys8IH1nwz+fN0llz8nX5iZN7Jv/2Ppjy3LkFevXL56soO7VnzsvW21f2HHDZ3Okf4ScAgNhAiAGG6fCdehdMztKCyVlHbB8XZ5qak9bnXn9CIae1ZXV6fFOlrlw8RYuLs/Xkpkr99a0KnTU7X5+7cM6xPwAAxAhCDDBGPLO5Svev2at99eFNrJ/fWq3mjkDPxOA4M0IMAPQSzWMHAAxBQpwpzqQ399RrSnaKfvqxpZpVkK42X1DXLJuiv/7LGTqlJGe0ywSAMYeeGGCUpSUl6A83na6JWSmakZ8uSbpwQaES4uJ6VkxZvxtgh9W1+vTEpgNq7vDTUwNgXCHEAGPA6TPzDrnOHOBk9q5ASC9sq9bzW6pV1dKpV3bUKhhZ133j2TOUlpSg6pZO7axq1Rmzh7fyCgC8hBADeERNq09fe+RdPbHpgBrbwyuairJTdfM5M9XY7tcDb+7TH9ft1+rSKq0tq1PISc9+8VzNLsyQFN49OCs1YUi7AjvntOVAiyakJWpCaqJe2laj1aUHlZmSqG9fc0JUnxMAhooQA3hAXJy0s7pV5Q3tumThJJ0/v0DT89K1dGq24uJMv3y5TJL0rcdLNSM/XRfML9SzW6q1ubJJT2yq1JObDmhHdav+cNNpR9wXJxhyWr+nXqs2V2nV5oOqaOyQpJ7N+SQpPSmeEANgzCDEAB5w26ULtKe2TRctnKiMfg6ivHzxZHUFQzp3boEWTcnSs1uq9eyWat3y4Nsyk+ZPCi8Br23tkhTuaSlv6FBOepLeKKvTM5ur9OyWKtW1dSkpIU5nz85XyDn5AiF9cFmRLlk4UX9776D+tH7/iD43AAyEEAN4wNLibC0tzj7i61OyU/WZ82f3XC+blq1rlk7RSdNztHLRJDV3BnTRT17SlgPNeq+iSX9774D213f0tM9MTtD58wv1gUWTdO68gn6D0urSquP7UABwjAgxQAzKz0jWT69d1nPd4muVJP3ixV1KjA8ftVDX2qWrlxbpA4smasWsPCUnDO3sqG7OOQVCrt/TuwFgJBBigHFgRl66br1ojkry0nXBgkJlDbD6aSAhJz23pUrPb63Wi9tq1Nzp1xv/caHSkgb+pyQQDOmd8ka9ubtBlyyaKJ8/pFd31mjNrjpdsmiSrjt12lHVA2B8I8QA40BcnOnWi+Ye02eYSR3+oG68b73SkuI1KStFLZ0BtXYG+oSYhrYuvbKzVo9srFBdW5fKqlvV4gtIkr7/9Nb367LwTsSEGABHgxADYEg+dNJUJcbHacWsPJ06I1d/3lCurz78npyk7VUtenl7jX63dq8S4kxltW1y4a1rlJ4Ur6uWTtHZcwr0zOaDCjnpnLkFOmt2vj59/7pRfSYA3kaIATAkRzro8vL/fqVn1VN3u1svnKtz5uZr4ZQsJcXH9exNc9mJk/v97EAwpE0VTVqzq07T89J0xeIpPfcDIaeUxOHN1wEwPhBiAByV2QUZmjIhRUuKs3Xu3AKdM7dAU7JTh/05b+6u19LbV6s1Mtw0MStZFQ0dWlNWp3W765WWnKB1X73oeJcPIAYQYgAcldNm5un1r1x4TJ+xtDhbHV1BrZiVpxUz8/XYOxVatblK3/vbVs0qSFdRTqq2V7VqbVmd3iir1xu761TX2qU///MKZaYk6kBTh3bXtOm0mXmKjxt8J2IAscVc98C1RyxfvtytX79+tMsAEAX769v1XkWTTi7JUWFmin767Hb99NkdksITi/PSk1Xb6tMF8wu1vapF5Q3hvW7+79On6UzOiQI8ycw2OOeWH8176YkBMGYU56apODet5/qapUUKOWlx0QSdUpKrdyuadP2v39A7+xt1SkmuLlowUfe+vkcdXcFRrBrAaCHEABizSvLT9cWL318afubsPL319YuVk5YoM9O75U269/U9Pa8751Tf1qXc9KQhHXQJwNsIMQA8w8yUm57U5/4Tmyr15w3l2rCvQTUtPv3oI0v04ZOnjkKFAEYS+4UD8Kz05PDS60ferlTpgWadOiNXklTf5hvNsgCMEHpiAHjWzIIMPfG5s1SYlazCzBS1+QJ6ctOB0S4LwAghxADwtBOKJox2CQBGCSEGQMwJhqStB5v1zv5Gba5s1tVLp2hZcY5217Xp3fImzZuU2e/uwwC8hRADIOZ8/+mthxw0ef+avcpMTug5hPLM2Xn6v0+fPlrlAThOCDEAYkZaUrw+cUaJJGlJ8QQtmZqt/35uh7ZVteqkadlaMjVbv319j/xBb23yCaB/hBgAMcPM9M2rFh1y76fXLjvk+q8by9XhD+nN3fV6t6JJmyuaVJKfrs9fOKffzwyFnDr8QaUn888lMNbw/0oA44rJ9M7+Rn307jWSpPi48N4zn79wjnyBoHZUtaq0slmbK5u0ubJZWw40q8Mf1MtfOl9Tc9IG+XQAI4kQA2Bc+dwFs3XGrDwtKsrSCVMm6I5nd+iBN/fp0jtf0Y6qFgVC4aGm9KR4LZicpZOm5+iVHbVaXVqlhPg4bTnQrMrGDn3zykUqyU9Xqy+gPbVtml2YIZ8/pK0Hm7WjulWnz8zV7MJMdfqD2lndqsLMZBVmpYzy0wOxhRADYFw5Y3a+zuh1WOSiKVnKS09SYWayzptXoEVTsrRoygRNz01TXJzpxW3VemVHrb71eOkhn3Pethc1LTdN++rbj/hdM/PTtaeuTSEnnTojVw/94wo551TT4pNMavMFtb2qpSc83XLhnJ7jEgLBkBLi+9+PtCsQUkVjh4qyU5WUwJ6lGL84xRoABtDpD+pPG8o1KStFCyZnKjMlUR+442XFx5mWTsvWgkmZemLTAc2flKl5k7I0f3KmfvPqbpXVtOmEoizNm5SlZzYf1NaDLTq1JFfbq1vU2O7v97s+sGiimjr82rivUYGQ06OfOVPOSTuqW7SzujX8p6ZVe+vaFQw5feGiubrlov7n8gBecSynWBNiACDKfvD0Vv3q1d1aXDRBcyZmKs7Cc3FOKJqgeRMztbe+XZ9/YKMS48P3Gtv92l3bdshnJMSZSvLTNbsgQ7MLM3TPK2W6/rTp+saVC0fpqYDj41hCDMNJABBlX1o5X19aOf+Iry8pzta5cwuUlZIgM1N7V0A/WrVdOWmJmjMxHFqm56Ursdfw0n2v75GTU3VLp8pq2rSntk0nTc/R7IIMHWjuVFlNqyZPSNHswsyReERgVNATAwAeNPMrTyrUzz/fKYlx6vSHeq7/5bxZ2l3bpt21bZqQmqgHbz69Z94NMBbQEwMA48yXVs5XZWOHZuana2ZBhl7cVqPdta2aWZChmQXpuv/1vdpW1aJ7Xi7TtLw0+YMhbT3YIuckM6nNF1CHP6j8jOSez3TO9QScrkBIB5s6FXROe+vatK++XeUNHbpi8WQtnpotSWrvCiglIV5xceH3dPqDijPrmWzsnJNz6nldCu+70/saOBb0xABADPIFgqpq8mlKdooS4uN057M7dMez23Xy9Bztq28Pr5CSdOH8QrV3BbWtqkX1bV3KS09SalK8yhs6jvjZJ0/P0d66dtW2hj/j1JJc7atv18HmTknSykWTtK++XVsPNivkpIsXTlRVc6e2HmhRVzCkb1yxUIFQSPvrO9TQ3qUvr5yv4tzwHjyd/qBSEuOj/LeDsYSJvQCAAT1bWqWvP/qeinPSVJKfprrWLj23tVrpSfGaNylT2WlJen5rtU4oytLM/AyV5KWpod2vxVMnqCQ/XdNz0/T1R9/Thr2Nml2Yrum56Xp8U6WSE+I0pzBTxblpeml7jWpbfZpVkK5puWkqq23T3rp2FeemqiQvXZWNHdpV09ZvfScUZam8oUON7X4VZafqzNl5Km/oUGVjh25YUaJPnTVjhP/GMFIIMQCAYRtoL5qj1XtI6nDBkNMbu+uUlZKo4tw0xceZrvr5qwqGnGbkp2tqTqp+v3afJKkwM1lFOanaXNms7NRE/cdlC1TR2KGKxg6dUpKjDy6b2vN9hw9ZwVsIMQCAmNDpD0pSz5DSFT97Re9VNPdpd8asPFU2dmhPXbvmTszQ07eco5pWnyobOxQfZz3zdnrzB0OqafGpudOvOYWZiif4jAlM7AUAxITD58Pcee0y7a9v19ScVE3JTtVvX9uj3762W53+oE4omqA9de3aXtWqeV//2yGnk3/8tGkKBEOqavapusWn6uZO1bV19bz+048t1TXLikbsuRAd9MQAADxrU3mj7l+zVwWZyZoyIUXbqlp6hqQmZiVrYlZKz7lVhZnJSkuK13ef2qpvX71If7+iZHSLhyR6YgAA49Tiqdn60UcOHTr60sr5Sk9K6He4qLbVp+8+tVXbqlr00vYaVTV1qqbVpwvmFyo1MV5VzZ2qavFpydQJmp6Xrq5ASDWtPmWlJCgzJVHOOTW2+xUfb8pKSRypx8QREGIAADFloHCRGBeeyPz7tft6emwk6YertvVpm5+RpNrW94egpuakqrrFp65ASBNSE7Xx6xczoXiUEWIAAOPGhLRE/fKG5er0BzVpQoomZaXojd31qmru1MSs8PXq0oPa39ChiVkpmpiVrHf2N6qtK6ii7FQVZiWrtLJZr+yoVcg5xcnU6Q8qEHLKSA7/Sm3zBdTeFVRBZvIg1RyZc06+QEj1bV2qa+1SXZtP9W1d4eu2LtW1+rS3rl1XLy1Smy+g2laf9tS1afHUbMWZqbbVp4PNnTpnTr4KM1NU0+pTXWuXzptXoAWTsySFV4vFmY64miwQDCnonJIT4hUIhtTQ7pc/GNKU7NRD6hzNHaCZEwMAwDD87Lkd+vHq7ZqZn666ti41dYRPJc9MSVAw5NTeFV5h9ZnzZ2l6XrpqWnwqb2hXc2dASfFxqmnxqbKpQ0nxcTqlJFc1LT5Vt3Rqy4EWzS7MUE2Lr2fjwP4kxJkC/Z050UtqYrw6Iiu9DjdvYqbq2nw9vUz/sGK6Gjv8amj3q7G9S43tfjW0d6mlMyApfJSFLxBSd1zITE5QQVayqpo61dYVVFZKguZPylJtm09N7X59aeU8feyUaUP++2RODAAAI+T0WXk6bWeuctKSVJiVLH8wpI37GjVnYqYKM5PV3hXQA2/u110v7Orz3qk5qSrITFZZZNO/g82dKshI1q6aVs2flKX8jCTNn5SpQMipJC9dE7OSlZuepLyMJOWmh3/OSgn/6t5yoEXJiXEqyExWZnKCyhs65JyUn5mktKQEbdjboLpWn/Izk5Wfnqz71+zRW/salJ+RrJNLcvTHdfsVDDk9vLFCOelJyk5NVE5akmbmpys7LUltvoAONndqWm6a8jKSFWfhYbhZBenKz0jWaTPy9MCb+5SfkSwzacGkLK0urVJpZd8l8dFCTwwAAMfZ+j31MjMVZiYrPyNZyQlxsl5DN8GQUzDkes6ZGi3BkDuu++Usvf0ZXb1kim67dIHq27vU0NalaXlpPfOUugIhxcfZId9JTwwAAGPI8pLcAV8//Bf5aDneNfgDId23Zq/uW7P3kPvFualqaPOr1RfQqTNy9dA/rjgu30eIAQAAx8U3rlyoXTVtyklLUm56olaXVqkr6JSblqic9CSt2VWnN3fX6yt/3aSGtvDcm2NBiAEAAMfF4RN6D7/+xYu7tPXprVpdWq2ctPAcnGPBnBgAADBiDl+WfSxzYkZ3RhEAABhXjue+MoQYAADgSYQYAADgSYQYAADgSYQYAADgSYQYAADgSYQYAADgSYQYAADgSYQYAADgSYQYAADgSYQYAADgSYQYAADgSYQYAADgSYQYAADgSYQYAADgSYQYAADgSYQYAADgSYQYAADgSYQYAADgSYQYAADgSYQYAADgSYQYAADgSYQYAADgSYQYAADgSYQYAADgSYQYAADgSYQYAADgSYQYAADgSYQYAADgSYQYAADgSYQYAADgSYQYAADgSYQYAADgSYQYAADgSYQYAADgSVENMWa20sy2mdlOM7utn9eTzeyPkdffMLOSaNYDAABiR9RCjJnFS7pL0qWSFkq6zswWHtbsRkkNzrnZku6Q9P1o1QMAAGJLNHtiTpW00zlX5pzrkvSgpKsPa3O1pPsiP/9Z0oVmZlGsCQAAxIhohpgiSft7XZdH7vXbxjkXkNQkKS+KNQEAgBiREMXP7q9HxR1FG5nZzZJujlz6zOy9Y6wNx1e+pNrRLgI9+O8xtvDfY+zhv8nYMu9o3xjNEFMuqbjX9VRJ/397dxYiRxWFcfz/uQSVaFyCIm5RVFAiRgkSEVxQggSMLyoRgokbEjdwfRE0KiIovriARiJiQHF50FEUcYlGJSNGxgQNCG6IC7gHxAWNnw+3AsOYzFTGqe7q7u8HBdU1NcXJHKr7pO7te77ZxjlfSdoJmAH8NPZCtlcAKwAkrbM9t5GIY1KSk3ZJPtol+Wif5KRdJK2b7O82OZz0HnCEpEMlTQMWAUNjzhkCllT75wCv2/7Pk5iIiIiIsRp7EmP7b0lXAi8DOwKP2P5I0m3AOttDwEpglaRPKE9gFjUVT0RERPSXJoeTsP0i8OKYYzeP2v8DOHc7L7tiCkKLqZWctEvy0S7JR/skJ+0y6XwoozcRERHRi9J2ICIiInpSa4uYtCxolxr5uFbSRkkbJL0m6ZBuxDlIJsrJqPPOkWRJ+TZGg+rkQ9J51X3ykaTHOx3joKnxvnWwpNWSRqr3rgXdiHMQSHpE0nfbWiJFxb1VrjZIOr7WhW23bqNMBP4UOAyYBqwHjh5zzuXAg9X+IuDJbsfdr1vNfJwG7FbtL0s+up+T6rzdgTXAMDC323H361bzHjkCGAH2ql7v2+24+3mrmZMVwLJq/2jgi27H3a8bcDJwPPDhNn6+AHiJsn7cPODdOtdt65OYtCxolwnzYXu17d+ql8OUdYGiOXXuEYDbgbuAPzoZ3ACqk49LgQds/wxg+7sOxzho6uTEwB7V/gz+u5ZZTBHba9jKOnCjnA085mIY2FPS/hNdt61FTFoWtEudfIx2MaWijuZMmBNJxwEH2X6hk4ENqDr3yJHAkZLekTQs6cyORTeY6uRkObBY0leUb9Je1ZnQYiu293MGaPgr1v/DlLUsiClR+28taTEwFzil0Yhi3JxI2oHSGX5ppwIacHXukZ0oQ0qnUp5UviVptu1fGo5tUNXJyfnAo7bvkXQiZd2y2bb/aT68GGNSn+ltfRKzPS0LGK9lQUyJOvlA0hnATcBC2392KLZBNVFOdgdmA29I+oIyxjyUyb2Nqfue9Zztv2x/DnxMKWqiGXVycjHwFIDttcAulL5K0Xm1PmfGamsRk5YF7TJhPqqhi4coBUzG+ps3bk5sb7I90/Ys27Mo85QW2p50j5IYV533rGcpE+CRNJMyvPRZmGJOwQAAAlhJREFUR6McLHVy8iVwOoCkoyhFzPcdjTK2GAIuqL6lNA/YZPvbiX6plcNJTsuCVqmZj7uB6cDT1fzqL20v7FrQfa5mTqJDaubjZWC+pI3AZuAG2z92L+r+VjMn1wEPS7qGMnSxNP8ZboakJyhDqTOrOUi3ADsD2H6QMidpAfAJ8BtwYa3rJl8RERHRi9o6nBQRERExrhQxERER0ZNSxERERERPShETERERPSlFTERERPSkFDER0ShJmyV9IOlDSc9L2nOKr79U0v3V/nJJ10/l9SOivVLERETTfrc9x/ZsyppOV3Q7oIjoDyliIqKT1jKqqZukGyS9J2mDpFtHHb+gOrZe0qrq2FmS3pU0IulVSft1If6IaJFWrtgbEf1H0o6UJd5XVq/nU3oHnUBp/jYk6WTgR0oPrpNs/yBp7+oSbwPzbFvSJcCNlBVXI2JApYiJiKbtKukDYBbwPvBKdXx+tY1Ur6dTippjgWds/wBge0tj1wOBJyXtD0wDPu9I9BHRWhlOioim/W57DnAIpfjYMidGwJ3VfJk5tg+3vbI6vrV+KPcB99s+BriM0qwvIgZYipiI6Ajbm4Crgesl7UxpzHeRpOkAkg6QtC/wGnCepH2q41uGk2YAX1f7S4iIgZfhpIjoGNsjktYDi2yvknQUsLbqfP4rsLjqNHwH8KakzZThpqXAckqX9K+BYeDQbvwbIqI90sU6IiIielKGkyIiIqInpYiJiIiInpQiJiIiInpSipiIiIjoSSliIiIioieliImIiIielCImIiIielKKmIiIiOhJ/wIez4wPQ9cn1QAAAABJRU5ErkJggg==\n", 512 | "text/plain": [ 513 | "
" 514 | ] 515 | }, 516 | "metadata": { 517 | "needs_background": "light" 518 | }, 519 | "output_type": "display_data" 520 | } 521 | ], 522 | "source": [ 523 | "''' -------------------------------Main---------------------------\n", 524 | "The main calls all funcitons sequentillay, apply automated feature engineering, train XGBoost, and plot the classification metrics.\n", 525 | " \n", 526 | "'''\n", 527 | "# 1) Model performance with concatenating CNN features\n", 528 | "train = train_application()\n", 529 | "\n", 530 | "# Extract new features using feature tools\n", 531 | "# max_depth: depth of a deep feature is the number of primitives required to make the feature.\n", 532 | "max_depth = 2\n", 533 | "\n", 534 | "# nrows: number of rows considered in train data for the model due to the computational power limitation.\n", 535 | "nrows = 30000\n", 536 | "\n", 537 | "train = Automated_features(train, nrows, max_depth)\n", 538 | "\n", 539 | "# Preprocesing including one hot encoding of categorical varibles for XGBoost \n", 540 | "train = preprocessing(train)\n", 541 | "\n", 542 | "# Training XGBoost\n", 543 | "nfolds = 5\n", 544 | "test_size = 0.05\n", 545 | "pred_class, pred, y_test = XGBoost(train, nfolds, test_size)\n", 546 | "# Evalutate ROC_AUC, Precision, Recall, F1-Score, Kohen-Cappa metrics\n", 547 | "calculate_metrics(pred_class, pred, y_test)\n", 548 | "# Plot ROC curve\n", 549 | "plot_ROC(y_test, pred)\n", 550 | "# Plot Precision/R curve\n", 551 | "plot_precision_recall(y_test, pred)" 552 | ] 553 | } 554 | ], 555 | "metadata": { 556 | "accelerator": "GPU", 557 | "colab": { 558 | "collapsed_sections": [], 559 | "name": "5Featuretoolstree.ipynb", 560 | "provenance": [ 561 | { 562 | "file_id": "1R5IYeiRZufzRipTqBk5zcdMgdkmUoEHE", 563 | "timestamp": 1595445733426 564 | }, 565 | { 566 | "file_id": "1ZnpsttZrvD6wbKD6aETZuuMbHnE7FLFi", 567 | "timestamp": 1594932112784 568 | }, 569 | { 570 | "file_id": "1QIbvwXBVKCtoJHH1alQcP13irUjezFYp", 571 | "timestamp": 1594841666146 572 | }, 573 | { 574 | "file_id": "1W2dSR-Ua7USBX0gTUcVxEtEvVlEbZCsq", 575 | "timestamp": 1594734735941 576 | } 577 | ] 578 | }, 579 | "kernelspec": { 580 | "display_name": "Python 3", 581 | "language": "python", 582 | "name": "python3" 583 | }, 584 | "language_info": { 585 | "codemirror_mode": { 586 | "name": "ipython", 587 | "version": 3 588 | }, 589 | "file_extension": ".py", 590 | "mimetype": "text/x-python", 591 | "name": "python", 592 | "nbconvert_exporter": "python", 593 | "pygments_lexer": "ipython3", 594 | "version": "3.7.7" 595 | } 596 | }, 597 | "nbformat": 4, 598 | "nbformat_minor": 1 599 | } 600 | --------------------------------------------------------------------------------