├── .gitignore ├── LICENSE ├── README.md ├── requirements.txt └── source ├── TransferLearningNLP.ipynb ├── convnet-dataviz.ipynb ├── gpu.py ├── logsumexp.py └── vgg_params.py /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | .idea 6 | # C extensions 7 | *.so 8 | .idea/** 9 | # Distribution / packaging 10 | .Python 11 | env/ 12 | build/ 13 | develop-eggs/ 14 | dist/ 15 | downloads/ 16 | eggs/ 17 | .eggs/ 18 | lib/ 19 | lib64/ 20 | parts/ 21 | sdist/ 22 | var/ 23 | wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | 28 | # PyInstaller 29 | # Usually these files are written by a python script from a template 30 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 31 | *.manifest 32 | *.spec 33 | 34 | # Installer logs 35 | pip-log.txt 36 | pip-delete-this-directory.txt 37 | 38 | # Unit test / coverage reports 39 | htmlcov/ 40 | .tox/ 41 | .coverage 42 | .coverage.* 43 | .cache 44 | nosetests.xml 45 | coverage.xml 46 | *.cover 47 | .hypothesis/ 48 | 49 | # Translations 50 | *.mo 51 | *.pot 52 | 53 | # Django stuff: 54 | *.log 55 | local_settings.py 56 | 57 | # Flask stuff: 58 | instance/ 59 | .webassets-cache 60 | 61 | # Scrapy stuff: 62 | .scrapy 63 | 64 | # Sphinx documentation 65 | docs/_build/ 66 | 67 | # PyBuilder 68 | target/ 69 | 70 | # Jupyter Notebook 71 | .ipynb_checkpoints 72 | 73 | # pyenv 74 | .python-version 75 | 76 | # celery beat schedule file 77 | celerybeat-schedule 78 | 79 | # SageMath parsed files 80 | *.sage.py 81 | 82 | # dotenv 83 | .env 84 | 85 | # virtualenv 86 | .venv 87 | venv/ 88 | ENV/ 89 | 90 | # Spyder project settings 91 | .spyderproject 92 | .spyproject 93 | 94 | # Rope project settings 95 | .ropeproject 96 | 97 | # mkdocs documentation 98 | /site 99 | 100 | # mypy 101 | .mypy_cache/ 102 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2018 Feedly 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # ml-demos 2 | Python code examples for the feedly Machine Learning blog (https://blog.feedly.com/category/all/Machine-Learning/) 3 | 4 | # Setup 5 | It's recommended you use a [virtualenv](http://docs.python-guide.org/en/latest/dev/virtualenvs/) to run code from the 6 | repo. Python 3.6 is required. As usual, install requirements in requirements.txt and then you should be good to go. -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | keras 2 | tensorflow 3 | h5py 4 | matplotlib 5 | # warning! this is for mac os. pick the right package for your at pytorch.org 6 | http://download.pytorch.org/whl/torch-0.3.1-cp36-cp36m-macosx_10_7_x86_64.whl 7 | torchvision 8 | git+https://github.com/fastai/fastai.git 9 | -------------------------------------------------------------------------------- /source/TransferLearningNLP.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Transfer Learning for NLP: Sentiment Analysis on Amazon Reviews\n", 8 | "In this notebook, we show how transfer learning can be applied to detecting the sentiment of amazon reviews, between positive and negative reviews.\n", 9 | "\n", 10 | "This notebook uses the work from [Howard and Ruder, Ulmfit](https://arxiv.org/pdf/1801.06146.pdf).\n", 11 | "The idea of the paper (and it implementation explained in the [fast.ai deep learning course](http://course.fast.ai/lessons/lesson10.html)) is to learn a language model trained on a very large dataset, e.g. a Wikipedia dump. The intuition is that if a model is able to predict the next word at each word, it means it has learnt something about the structure of the language we are using.\n", 12 | "\n", 13 | "[Word2vec](https://arxiv.org/pdf/1310.4546.pdf) and the likes have lead to huge improvements on various NLP tasks. This could be seen as a first step to transfer learning, where the pre-trained word vectors correspond to a transfer of the embedding layer.\n", 14 | "The ambition of [Ulmfit](https://arxiv.org/pdf/1801.06146.pdf) (and others like [ELMO](https://arxiv.org/pdf/1802.05365.pdf) or the [Transformer language model](https://s3-us-west-2.amazonaws.com/openai-assets/research-covers/language-unsupervised/language_understanding_paper.pdf) recently introduced) is to progressively move the NLP field to the state where Computer Vision has risen thanks to the ImageNet challenge. Thanks to the ImageNet challenge, today it is easy to download a model pre-trained on massive dataset of images, remove the last layer and replace it by a classifier or a regressor depending on the interest. \n", 15 | "\n", 16 | "With Ulmfit, the goal is for everyone to be able to use a pre-trained language model and use it a backbone which we can use along with a classifier and a regressor. The game-changing apect of transfer learning is that we are no longer limited by the size of training data! With only a fraction of the data size that was necessary before, we can train a classifier/regressor and have very good result with few labelled data.\n", 17 | "\n", 18 | "Given that labelled text data are difficult to get, in comparison with unlabelled text data which is almost infinite, transfer learning is likely to change radically the field of NLP, and help lead to a maturity state closer to computer vision.\n", 19 | "\n", 20 | "The architecture for the language model used in ULMFit is the [AWD-LSTM language model](https://arxiv.org/pdf/1708.02182.pdf) by Merity.\n", 21 | "\n", 22 | "While we are using this language model for this experiment, we keep an eye open to a recently proposed character language model with [Contextual String Embedings](http://alanakbik.github.io/papers/coling2018.pdf) by Akbik." 23 | ] 24 | }, 25 | { 26 | "cell_type": "markdown", 27 | "metadata": {}, 28 | "source": [ 29 | "# Content of this notebook" 30 | ] 31 | }, 32 | { 33 | "cell_type": "markdown", 34 | "metadata": {}, 35 | "source": [ 36 | "This notebook illustrate the power of Ulmfit on a dataset of Amazon reviews available on Kaggle at https://www.kaggle.com/bittlingmayer/amazonreviews/home.\n", 37 | "We use code from the excellent fastai course and use it for a different dataset. The original code is available at https://github.com/fastai/fastai/tree/master/courses/dl2\n", 38 | "\n", 39 | "The data consists of 4M reviews that are either positives or negatives. Training a model with FastText classifier results in a f1 score of 0.916.\n", 40 | "We show that using only a fraction of this dataset we are able to reach similar and even better results.\n", 41 | "\n", 42 | "We encourage you to try it on your own tasks!\n", 43 | "Note that if you are interested in Regression instead of classification, you can also do it following this [advice](http://forums.fast.ai/t/regression-using-ulmfit/18063/6)." 44 | ] 45 | }, 46 | { 47 | "cell_type": "markdown", 48 | "metadata": {}, 49 | "source": [ 50 | "The notebook is organized as such:\n", 51 | "\n", 52 | "- Tokenize the reviews and create dictionaries\n", 53 | "- Download a pre-trained model and link the dictionary to the embedding layer of the model\n", 54 | "- Fine-tune the language model on the amazon reviews texts\n", 55 | "\n", 56 | "We have then the backbone of our algorithm: a pre-trained language model fine-tuned on Amazon reviews\n", 57 | "\n", 58 | "- Add a classifier to the language model and train the classifier layer only\n", 59 | "- Gradually defreeze successive layers to train different layers on the amazon reviews\n", 60 | "- Run a full classification task for several epochs\n", 61 | "- Use the model for inference!\n", 62 | "\n", 63 | "We end this notebook by looking at the specific effect of training size on the overall performance. This is to test the hypothesis that the ULMFit model does not need much labeled data to perform well." 64 | ] 65 | }, 66 | { 67 | "cell_type": "markdown", 68 | "metadata": {}, 69 | "source": [ 70 | "# Data" 71 | ] 72 | }, 73 | { 74 | "cell_type": "markdown", 75 | "metadata": {}, 76 | "source": [ 77 | "Before starting, you should download the data from https://www.kaggle.com/bittlingmayer/amazonreviews, and put the extracted files into an ./Amazon folder somewhere you like, and use this path for this notebook.\n", 78 | "\n", 79 | "Also, we recommend working on a dedicated environment (e.g. mkvirtualenv fastai). Then clone the fastai github repo https://github.com/fastai/fastai and install requirements." 80 | ] 81 | }, 82 | { 83 | "cell_type": "code", 84 | "execution_count": 2, 85 | "metadata": {}, 86 | "outputs": [], 87 | "source": [ 88 | "from fastai.text import *\n", 89 | "import html\n", 90 | "import os\n", 91 | "import pandas as pd\n", 92 | "import pickle\n", 93 | "import re\n", 94 | "from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score, classification_report, \\\n", 95 | "confusion_matrix\n", 96 | "from sklearn.model_selection import train_test_split\n", 97 | "from time import time" 98 | ] 99 | }, 100 | { 101 | "cell_type": "code", 102 | "execution_count": 2, 103 | "metadata": {}, 104 | "outputs": [], 105 | "source": [ 106 | "path = '/your/path/to/folder/Amazon'\n", 107 | "train = []\n", 108 | "with open(os.path.join(path, 'train.ft.txt'), 'r') as file:\n", 109 | " for line in file:\n", 110 | " train.append(file.readline())\n", 111 | " \n", 112 | "test = []\n", 113 | "with open(os.path.join(path, 'test.ft.txt'), 'r') as file:\n", 114 | " for line in file:\n", 115 | " test.append(file.readline())" 116 | ] 117 | }, 118 | { 119 | "cell_type": "code", 120 | "execution_count": 65, 121 | "metadata": {}, 122 | "outputs": [], 123 | "source": [ 124 | "print(f'The train data contains {len(train)} examples')\n", 125 | "print(f'The test data contains {len(test)} examples')" 126 | ] 127 | }, 128 | { 129 | "cell_type": "code", 130 | "execution_count": 3, 131 | "metadata": {}, 132 | "outputs": [], 133 | "source": [ 134 | "BOS = 'xbos' # beginning-of-sentence tag\n", 135 | "FLD = 'xfld' # data field tag\n", 136 | "\n", 137 | "PATH=Path('/your/path/to/folder/Amazon')\n", 138 | "\n", 139 | "CLAS_PATH=PATH/'amazon_class'\n", 140 | "CLAS_PATH.mkdir(exist_ok=True)\n", 141 | "\n", 142 | "LM_PATH=PATH/'amazon_lm'\n", 143 | "LM_PATH.mkdir(exist_ok=True)" 144 | ] 145 | }, 146 | { 147 | "cell_type": "code", 148 | "execution_count": 12, 149 | "metadata": {}, 150 | "outputs": [], 151 | "source": [ 152 | "# Each item is '__label__1/2' and then the review so we split to get texts and labels\n", 153 | "trn_texts,trn_labels = [text[10:] for text in train], [text[:10] for text in train]\n", 154 | "trn_labels = [0 if label == '__label__1' else 1 for label in trn_labels]\n", 155 | "val_texts,val_labels = [text[10:] for text in test], [text[:10] for text in test]\n", 156 | "val_labels = [0 if label == '__label__1' else 1 for label in val_labels]" 157 | ] 158 | }, 159 | { 160 | "cell_type": "code", 161 | "execution_count": 13, 162 | "metadata": {}, 163 | "outputs": [], 164 | "source": [ 165 | "# Following fast.ai recommendations we put our data in pandas dataframes\n", 166 | "col_names = ['labels','text']\n", 167 | "\n", 168 | "df_trn = pd.DataFrame({'text':trn_texts, 'labels':trn_labels}, columns=col_names)\n", 169 | "df_val = pd.DataFrame({'text':val_texts, 'labels':val_labels}, columns=col_names)" 170 | ] 171 | }, 172 | { 173 | "cell_type": "code", 174 | "execution_count": 66, 175 | "metadata": {}, 176 | "outputs": [], 177 | "source": [ 178 | "df_trn.head(10)" 179 | ] 180 | }, 181 | { 182 | "cell_type": "code", 183 | "execution_count": 16, 184 | "metadata": {}, 185 | "outputs": [], 186 | "source": [ 187 | "df_trn.to_csv(CLAS_PATH/'train.csv', header=False, index=False)\n", 188 | "df_val.to_csv(CLAS_PATH/'test.csv', header=False, index=False)" 189 | ] 190 | }, 191 | { 192 | "cell_type": "code", 193 | "execution_count": 17, 194 | "metadata": {}, 195 | "outputs": [], 196 | "source": [ 197 | "CLASSES = ['neg', 'pos']\n", 198 | "(CLAS_PATH/'classes.txt').open('w').writelines(f'{o}\\n' for o in CLASSES)" 199 | ] 200 | }, 201 | { 202 | "cell_type": "markdown", 203 | "metadata": {}, 204 | "source": [ 205 | "# Language Model" 206 | ] 207 | }, 208 | { 209 | "cell_type": "code", 210 | "execution_count": 11, 211 | "metadata": {}, 212 | "outputs": [], 213 | "source": [ 214 | "# We're going to fine tune the language model so it's ok to take some of the test set in our train data\n", 215 | "# for the lm fine-tuning\n", 216 | "trn_texts,val_texts = train_test_split(np.concatenate([trn_texts,val_texts]), test_size=0.1)\n", 217 | "\n", 218 | "df_trn = pd.DataFrame({'text':trn_texts, 'labels':[0]*len(trn_texts)}, columns=col_names)\n", 219 | "df_val = pd.DataFrame({'text':val_texts, 'labels':[0]*len(val_texts)}, columns=col_names)\n", 220 | "\n", 221 | "df_trn.to_csv(LM_PATH/'train.csv', header=False, index=False)\n", 222 | "df_val.to_csv(LM_PATH/'test.csv', header=False, index=False)" 223 | ] 224 | }, 225 | { 226 | "cell_type": "code", 227 | "execution_count": 19, 228 | "metadata": {}, 229 | "outputs": [], 230 | "source": [ 231 | "# Here we use functions from the fast.ai course to get data\n", 232 | "\n", 233 | "chunksize=24000\n", 234 | "re1 = re.compile(r' +')\n", 235 | "\n", 236 | "def fixup(x):\n", 237 | " x = x.replace('#39;', \"'\").replace('amp;', '&').replace('#146;', \"'\").replace(\n", 238 | " 'nbsp;', ' ').replace('#36;', '$').replace('\\\\n', \"\\n\").replace('quot;', \"'\").replace(\n", 239 | " '
', \"\\n\").replace('\\\\\"', '\"').replace('','u_n').replace(' @.@ ','.').replace(\n", 240 | " ' @-@ ','-').replace('\\\\', ' \\\\ ')\n", 241 | " return re1.sub(' ', html.unescape(x))\n", 242 | "\n", 243 | "def get_texts(df, n_lbls=1):\n", 244 | " labels = df.iloc[:,range(n_lbls)].values.astype(np.int64)\n", 245 | " texts = f'\\n{BOS} {FLD} 1 ' + df[n_lbls].astype(str)\n", 246 | " for i in range(n_lbls+1, len(df.columns)): \n", 247 | " texts += f' {FLD} {i-n_lbls} ' + df[i].astype(str)\n", 248 | " texts = list(texts.apply(fixup).values)\n", 249 | "\n", 250 | " tok = Tokenizer().proc_all_mp(partition_by_cores(texts))\n", 251 | " return tok, list(labels)\n", 252 | "\n", 253 | "def get_all(df, n_lbls):\n", 254 | " tok, labels = [], []\n", 255 | " for i, r in enumerate(df):\n", 256 | " print(i)\n", 257 | " tok_, labels_ = get_texts(r, n_lbls)\n", 258 | " tok += tok_;\n", 259 | " labels += labels_\n", 260 | " return tok, labels\n", 261 | "\n", 262 | "df_trn = pd.read_csv(LM_PATH/'train.csv', header=None, chunksize=chunksize)\n", 263 | "df_val = pd.read_csv(LM_PATH/'test.csv', header=None, chunksize=chunksize)" 264 | ] 265 | }, 266 | { 267 | "cell_type": "code", 268 | "execution_count": 21, 269 | "metadata": {}, 270 | "outputs": [], 271 | "source": [ 272 | "# This cell can take quite some time if your dataset is large\n", 273 | "# Run it once and comment it for later use\n", 274 | "tok_trn, trn_labels = get_all(df_trn, 1)\n", 275 | "tok_val, val_labels = get_all(df_val, 1)" 276 | ] 277 | }, 278 | { 279 | "cell_type": "code", 280 | "execution_count": 15, 281 | "metadata": {}, 282 | "outputs": [], 283 | "source": [ 284 | "# Run this cell once and comment everything but the load statements for later use\n", 285 | "\n", 286 | "(LM_PATH/'tmp').mkdir(exist_ok=True)\n", 287 | "np.save(LM_PATH/'tmp'/'tok_trn.npy', tok_trn)\n", 288 | "np.save(LM_PATH/'tmp'/'tok_val.npy', tok_val)\n", 289 | "tok_trn = np.load(LM_PATH/'tmp'/'tok_trn.npy')\n", 290 | "tok_val = np.load(LM_PATH/'tmp'/'tok_val.npy')" 291 | ] 292 | }, 293 | { 294 | "cell_type": "code", 295 | "execution_count": 63, 296 | "metadata": {}, 297 | "outputs": [], 298 | "source": [ 299 | "# Check the most common tokens\n", 300 | "freq = Counter(p for o in tok_trn for p in o)\n", 301 | "freq.most_common(25)" 302 | ] 303 | }, 304 | { 305 | "cell_type": "code", 306 | "execution_count": 64, 307 | "metadata": {}, 308 | "outputs": [], 309 | "source": [ 310 | "# Check the least common tokens\n", 311 | "freq.most_common()[-25:]" 312 | ] 313 | }, 314 | { 315 | "cell_type": "code", 316 | "execution_count": null, 317 | "metadata": {}, 318 | "outputs": [], 319 | "source": [ 320 | "# Build your vocabulary by keeping only the most common tokens that appears frequently enough\n", 321 | "# and constrain the size of your vocabulary. We follow here the 60k recommendation.\n", 322 | "max_vocab = 60000\n", 323 | "min_freq = 2\n", 324 | "\n", 325 | "itos = [o for o,c in freq.most_common(max_vocab) if c>min_freq]\n", 326 | "itos.insert(0, '_pad_')\n", 327 | "itos.insert(0, '_unk_')\n", 328 | "\n", 329 | "stoi = collections.defaultdict(lambda:0, {v:k for k,v in enumerate(itos)})\n", 330 | "len(itos)\n", 331 | "\n", 332 | "trn_lm = np.array([[stoi[o] for o in p] for p in tok_trn])\n", 333 | "val_lm = np.array([[stoi[o] for o in p] for p in tok_val])\n", 334 | "\n", 335 | "np.save(LM_PATH/'tmp'/'trn_ids.npy', trn_lm)\n", 336 | "np.save(LM_PATH/'tmp'/'val_ids.npy', val_lm)\n", 337 | "pickle.dump(itos, open(LM_PATH/'tmp'/'itos.pkl', 'wb'))" 338 | ] 339 | }, 340 | { 341 | "cell_type": "code", 342 | "execution_count": 10, 343 | "metadata": {}, 344 | "outputs": [], 345 | "source": [ 346 | "# Save everything\n", 347 | "trn_lm = np.load(LM_PATH/'tmp'/'trn_ids.npy')\n", 348 | "val_lm = np.load(LM_PATH/'tmp'/'val_ids.npy')\n", 349 | "itos = pickle.load(open(LM_PATH/'tmp'/'itos.pkl', 'rb'))" 350 | ] 351 | }, 352 | { 353 | "cell_type": "code", 354 | "execution_count": 33, 355 | "metadata": {}, 356 | "outputs": [], 357 | "source": [ 358 | "vs=len(itos)\n", 359 | "vs,len(trn_lm)" 360 | ] 361 | }, 362 | { 363 | "cell_type": "markdown", 364 | "metadata": {}, 365 | "source": [ 366 | "# Using pre trained Language Model" 367 | ] 368 | }, 369 | { 370 | "cell_type": "code", 371 | "execution_count": null, 372 | "metadata": {}, 373 | "outputs": [], 374 | "source": [ 375 | "# Uncomment this cell to download the pre-trained model.\n", 376 | "# It will be placed into the PATH that you defined earlier.\n", 377 | "# ! wget -nH -r -np -P {PATH} http://files.fast.ai/models/wt103/" 378 | ] 379 | }, 380 | { 381 | "cell_type": "code", 382 | "execution_count": 5, 383 | "metadata": {}, 384 | "outputs": [], 385 | "source": [ 386 | "# Load the weights of the model\n", 387 | "em_sz,nh,nl = 400,1150,3\n", 388 | "\n", 389 | "PRE_PATH = PATH/'models'/'wt103'\n", 390 | "PRE_LM_PATH = PRE_PATH/'fwd_wt103.h5'\n", 391 | "\n", 392 | "wgts = torch.load(PRE_LM_PATH, map_location=lambda storage, loc: storage)" 393 | ] 394 | }, 395 | { 396 | "cell_type": "code", 397 | "execution_count": 8, 398 | "metadata": {}, 399 | "outputs": [ 400 | { 401 | "data": { 402 | "text/plain": [ 403 | "(238462, 400)" 404 | ] 405 | }, 406 | "execution_count": 8, 407 | "metadata": {}, 408 | "output_type": "execute_result" 409 | } 410 | ], 411 | "source": [ 412 | "# Check the word embedding layer and keep a 'mean word' for unknown tokens\n", 413 | "enc_wgts = to_np(wgts['0.encoder.weight'])\n", 414 | "row_m = enc_wgts.mean(0)\n", 415 | "\n", 416 | "enc_wgts.shape" 417 | ] 418 | }, 419 | { 420 | "cell_type": "code", 421 | "execution_count": 12, 422 | "metadata": {}, 423 | "outputs": [], 424 | "source": [ 425 | "# Load the vocabulary on which the pre-trained model was trained\n", 426 | "# Define an embedding matrix with the vocabulary of our dataset\n", 427 | "itos2 = pickle.load((PRE_PATH/'itos_wt103.pkl').open('rb'))\n", 428 | "stoi2 = collections.defaultdict(lambda:-1, {v:k for k,v in enumerate(itos2)})\n", 429 | "\n", 430 | "new_w = np.zeros((vs, em_sz), dtype=np.float32)\n", 431 | "for i,w in enumerate(itos):\n", 432 | " r = stoi2[w]\n", 433 | " new_w[i] = enc_wgts[r] if r>=0 else row_m" 434 | ] 435 | }, 436 | { 437 | "cell_type": "code", 438 | "execution_count": 16, 439 | "metadata": {}, 440 | "outputs": [], 441 | "source": [ 442 | "# Use the new embedding matrix for the pre-trained model\n", 443 | "wgts['0.encoder.weight'] = T(new_w)\n", 444 | "wgts['0.encoder_with_dropout.embed.weight'] = T(np.copy(new_w))\n", 445 | "wgts['1.decoder.weight'] = T(np.copy(new_w))" 446 | ] 447 | }, 448 | { 449 | "cell_type": "code", 450 | "execution_count": 17, 451 | "metadata": {}, 452 | "outputs": [], 453 | "source": [ 454 | "# Define the learner object to do the fine-tuning\n", 455 | "# Here we will freeze everything except the embedding layer, so that we can have a better \n", 456 | "# embedding for unknown words than just the mean embedding on which we initialise it.\n", 457 | "wd=1e-7\n", 458 | "bptt=70\n", 459 | "bs=52\n", 460 | "opt_fn = partial(optim.Adam, betas=(0.8, 0.99))\n", 461 | "\n", 462 | "trn_dl = LanguageModelLoader(np.concatenate(trn_lm), bs, bptt)\n", 463 | "val_dl = LanguageModelLoader(np.concatenate(val_lm), bs, bptt)\n", 464 | "md = LanguageModelData(PATH, 1, vs, trn_dl, val_dl, bs=bs, bptt=bptt)\n", 465 | "\n", 466 | "drops = np.array([0.25, 0.1, 0.2, 0.02, 0.15])*0.7\n", 467 | "\n", 468 | "learner= md.get_model(opt_fn, em_sz, nh, nl, \n", 469 | " dropouti=drops[0], dropout=drops[1], wdrop=drops[2], dropoute=drops[3], dropouth=drops[4])\n", 470 | "\n", 471 | "learner.metrics = [accuracy]\n", 472 | "learner.freeze_to(-1)\n", 473 | "\n", 474 | "learner.model.load_state_dict(wgts)\n", 475 | "\n", 476 | "lr=1e-3\n", 477 | "lrs = lr" 478 | ] 479 | }, 480 | { 481 | "cell_type": "code", 482 | "execution_count": 22, 483 | "metadata": {}, 484 | "outputs": [], 485 | "source": [ 486 | "# Run one epoch of fine-tuning \n", 487 | "learner.fit(lrs/2, 1, wds=wd, use_clr=(32,2), cycle_len=1)" 488 | ] 489 | }, 490 | { 491 | "cell_type": "code", 492 | "execution_count": 30, 493 | "metadata": {}, 494 | "outputs": [], 495 | "source": [ 496 | "# Save the fine-tuned model and unfreeze everything to later fine-tune the whole model\n", 497 | "learner.save('lm_last_ft')\n", 498 | "learner.load('lm_last_ft')\n", 499 | "learner.unfreeze()" 500 | ] 501 | }, 502 | { 503 | "cell_type": "code", 504 | "execution_count": 23, 505 | "metadata": {}, 506 | "outputs": [], 507 | "source": [ 508 | "learner.lr_find(start_lr=lrs/10, end_lr=lrs*10, linear=True)" 509 | ] 510 | }, 511 | { 512 | "cell_type": "code", 513 | "execution_count": 24, 514 | "metadata": {}, 515 | "outputs": [], 516 | "source": [ 517 | "learner.sched.plot()" 518 | ] 519 | }, 520 | { 521 | "cell_type": "code", 522 | "execution_count": null, 523 | "metadata": {}, 524 | "outputs": [], 525 | "source": [ 526 | "# Run this if you want to highly tune the LM to the Amazon data, with 15 epochs\n", 527 | "# use_clr controls the shape of the cyclical (triangular) learning rate\n", 528 | "learner.fit(lrs, 1, wds=wd, use_clr=(20,10), cycle_len=15)" 529 | ] 530 | }, 531 | { 532 | "cell_type": "code", 533 | "execution_count": 33, 534 | "metadata": {}, 535 | "outputs": [], 536 | "source": [ 537 | "# Save the Backbone for further classification!!\n", 538 | "learner.save('lm1')\n", 539 | "learner.save_encoder('lm1_enc')" 540 | ] 541 | }, 542 | { 543 | "cell_type": "code", 544 | "execution_count": 25, 545 | "metadata": {}, 546 | "outputs": [], 547 | "source": [ 548 | "learner.sched.plot_loss()" 549 | ] 550 | }, 551 | { 552 | "cell_type": "markdown", 553 | "metadata": {}, 554 | "source": [ 555 | "# Going back to classification!" 556 | ] 557 | }, 558 | { 559 | "cell_type": "markdown", 560 | "metadata": {}, 561 | "source": [ 562 | "Now that we spent some time fine-tuning the language model on our Amazon data, let's see if we can classify easily these reviews.\n", 563 | "As before, some cells should be run once, and then use data loaders for later use." 564 | ] 565 | }, 566 | { 567 | "cell_type": "code", 568 | "execution_count": 35, 569 | "metadata": {}, 570 | "outputs": [], 571 | "source": [ 572 | "df_trn = pd.read_csv(CLAS_PATH/'train.csv', header=None, chunksize=chunksize)\n", 573 | "df_val = pd.read_csv(CLAS_PATH/'test.csv', header=None, chunksize=chunksize)" 574 | ] 575 | }, 576 | { 577 | "cell_type": "code", 578 | "execution_count": 26, 579 | "metadata": {}, 580 | "outputs": [], 581 | "source": [ 582 | "tok_trn, trn_labels = get_all(df_trn, 1)\n", 583 | "tok_val, val_labels = get_all(df_val, 1)" 584 | ] 585 | }, 586 | { 587 | "cell_type": "code", 588 | "execution_count": 36, 589 | "metadata": {}, 590 | "outputs": [], 591 | "source": [ 592 | "(CLAS_PATH/'tmp').mkdir(exist_ok=True)\n", 593 | "\n", 594 | "np.save(CLAS_PATH/'tmp'/'tok_trn.npy', tok_trn)\n", 595 | "np.save(CLAS_PATH/'tmp'/'tok_val.npy', tok_val)\n", 596 | "\n", 597 | "np.save(CLAS_PATH/'tmp'/'trn_labels.npy', trn_labels)\n", 598 | "np.save(CLAS_PATH/'tmp'/'val_labels.npy', val_labels)" 599 | ] 600 | }, 601 | { 602 | "cell_type": "code", 603 | "execution_count": 4, 604 | "metadata": {}, 605 | "outputs": [ 606 | { 607 | "data": { 608 | "text/plain": [ 609 | "60002" 610 | ] 611 | }, 612 | "execution_count": 4, 613 | "metadata": {}, 614 | "output_type": "execute_result" 615 | } 616 | ], 617 | "source": [ 618 | "tok_trn = np.load(CLAS_PATH/'tmp'/'tok_trn.npy')\n", 619 | "tok_val = np.load(CLAS_PATH/'tmp'/'tok_val.npy')\n", 620 | "itos = pickle.load((LM_PATH/'tmp'/'itos.pkl').open('rb'))\n", 621 | "stoi = collections.defaultdict(lambda:0, {v:k for k,v in enumerate(itos)})\n", 622 | "len(itos)" 623 | ] 624 | }, 625 | { 626 | "cell_type": "code", 627 | "execution_count": 38, 628 | "metadata": {}, 629 | "outputs": [], 630 | "source": [ 631 | "trn_clas = np.array([[stoi[o] for o in p] for p in tok_trn])\n", 632 | "val_clas = np.array([[stoi[o] for o in p] for p in tok_val])\n", 633 | "\n", 634 | "np.save(CLAS_PATH/'tmp'/'trn_ids.npy', trn_clas)\n", 635 | "np.save(CLAS_PATH/'tmp'/'val_ids.npy', val_clas)" 636 | ] 637 | }, 638 | { 639 | "cell_type": "markdown", 640 | "metadata": {}, 641 | "source": [ 642 | "# Classifier\n", 643 | "In this part, we adopt an unusual train/test hierarchy. While it's common to train on a big dataset and thewn test on a small one, here we wanrt to test the hypothesis that the model can learn with few training data. Hence we take less data for training than for testing." 644 | ] 645 | }, 646 | { 647 | "cell_type": "code", 648 | "execution_count": 5, 649 | "metadata": {}, 650 | "outputs": [], 651 | "source": [ 652 | "# We select here the 'size' first reviews of our dataset\n", 653 | "# The paper claims that it's possible to achieve very good results with few labeled examples\n", 654 | "# So let's try with 100 examples for training, and 5000 examples for validation.\n", 655 | "# We encourage you to try different values to see the effect of data size on performance.\n", 656 | "trn_size = 100\n", 657 | "val_size = 5000\n", 658 | "trn_clas = np.load(CLAS_PATH/'tmp'/'trn_ids.npy')\n", 659 | "val_clas = np.load(CLAS_PATH/'tmp'/'val_ids.npy')\n", 660 | "\n", 661 | "trn_labels = np.squeeze(np.load(CLAS_PATH/'tmp'/'trn_labels.npy'))\n", 662 | "val_labels = np.squeeze(np.load(CLAS_PATH/'tmp'/'val_labels.npy'))\n", 663 | "\n", 664 | "train = random.sample(list(zip(trn_clas, trn_labels)), trn_size)\n", 665 | "trn_clas = np.array([item[0] for item in train])\n", 666 | "trn_labels = np.array([item[1] for item in train])\n", 667 | "del train\n", 668 | "\n", 669 | "validation = random.sample(list(zip(val_clas, val_labels)), val_size)\n", 670 | "val_clas = np.array([item[0] for item in validation])\n", 671 | "val_labels = np.array([item[1] for item in validation])\n", 672 | "del validation\n", 673 | "\n", 674 | "\n", 675 | "bptt,em_sz,nh,nl = 70,400,1150,3\n", 676 | "vs = len(itos)\n", 677 | "opt_fn = partial(optim.Adam, betas=(0.8, 0.99))\n", 678 | "bs = 48\n", 679 | "\n", 680 | "min_lbl = trn_labels.min()\n", 681 | "trn_labels -= min_lbl\n", 682 | "val_labels -= min_lbl\n", 683 | "c=int(trn_labels.max())+1" 684 | ] 685 | }, 686 | { 687 | "cell_type": "code", 688 | "execution_count": 34, 689 | "metadata": {}, 690 | "outputs": [], 691 | "source": [ 692 | "# Ccheck that the validation dataset is well balanced so acccuracy is a good metric\n", 693 | "# We'll also check other metrics usual for binary classification (precision, recall, f1 score)\n", 694 | "len(trn_labels[trn_labels == 1]) / len(trn_labels)" 695 | ] 696 | }, 697 | { 698 | "cell_type": "code", 699 | "execution_count": 48, 700 | "metadata": {}, 701 | "outputs": [], 702 | "source": [ 703 | "trn_ds = TextDataset(trn_clas, trn_labels)\n", 704 | "val_ds = TextDataset(val_clas, val_labels)\n", 705 | "trn_samp = SortishSampler(trn_clas, key=lambda x: len(trn_clas[x]), bs=bs//2)\n", 706 | "val_samp = SortSampler(val_clas, key=lambda x: len(val_clas[x]))\n", 707 | "trn_dl = DataLoader(trn_ds, bs//2, transpose=True, num_workers=1, pad_idx=1, sampler=trn_samp)\n", 708 | "val_dl = DataLoader(val_ds, bs, transpose=True, num_workers=1, pad_idx=1, sampler=val_samp)" 709 | ] 710 | }, 711 | { 712 | "cell_type": "code", 713 | "execution_count": 50, 714 | "metadata": {}, 715 | "outputs": [], 716 | "source": [ 717 | "# We define the model, here it a classifier on top of an RNN language model\n", 718 | "# We load the language model encoder that we fine tuned before\n", 719 | "# We freeze everything but the last layer, so that we can train the classification layer only.\n", 720 | "#load the saved weights from before, and freeze everything until the last layer\n", 721 | "\n", 722 | "md = ModelData(PATH, trn_dl, val_dl)\n", 723 | "dps = np.array([0.4, 0.5, 0.05, 0.3, 0.1])\n", 724 | "\n", 725 | "m = get_rnn_classifier(bptt, 20*70, c, vs, emb_sz=em_sz, n_hid=nh, n_layers=nl, pad_token=1,\n", 726 | " layers=[em_sz*3, 50, c], drops=[dps[4], 0.1],\n", 727 | " dropouti=dps[0], wdrop=dps[1], dropoute=dps[2], dropouth=dps[3])\n", 728 | "\n", 729 | "opt_fn = partial(optim.Adam, betas=(0.7, 0.99))\n", 730 | "\n", 731 | "learn = RNN_Learner(md, TextModel(to_gpu(m)), opt_fn=opt_fn)\n", 732 | "learn.reg_fn = partial(seq2seq_reg, alpha=2, beta=1)\n", 733 | "learn.clip=25.\n", 734 | "learn.metrics = [accuracy]\n", 735 | "\n", 736 | "lr=3e-3\n", 737 | "lrm = 2.6\n", 738 | "lrs = np.array([lr/(lrm**4), lr/(lrm**3), lr/(lrm**2), lr/lrm, lr])\n", 739 | "\n", 740 | "lrs=np.array([1e-4,1e-4,1e-4,1e-3,1e-2])\n", 741 | "\n", 742 | "wd = 1e-7\n", 743 | "wd = 0\n", 744 | "learn.load_encoder('lm1_enc')\n", 745 | "\n", 746 | "learn.freeze_to(-1)" 747 | ] 748 | }, 749 | { 750 | "cell_type": "code", 751 | "execution_count": 37, 752 | "metadata": {}, 753 | "outputs": [], 754 | "source": [ 755 | "learn.lr_find(lrs/1000)" 756 | ] 757 | }, 758 | { 759 | "cell_type": "code", 760 | "execution_count": 38, 761 | "metadata": {}, 762 | "outputs": [], 763 | "source": [ 764 | "learn.sched.plot()" 765 | ] 766 | }, 767 | { 768 | "cell_type": "code", 769 | "execution_count": 39, 770 | "metadata": {}, 771 | "outputs": [], 772 | "source": [ 773 | "# Run one epoch on the classification layer\n", 774 | "learn.fit(lrs, 1, wds=wd, cycle_len=1, use_clr=(8,3))" 775 | ] 776 | }, 777 | { 778 | "cell_type": "code", 779 | "execution_count": 54, 780 | "metadata": {}, 781 | "outputs": [], 782 | "source": [ 783 | "# Save the trained model\n", 784 | "learn.save('clas_0')\n", 785 | "learn.load('clas_0')" 786 | ] 787 | }, 788 | { 789 | "cell_type": "code", 790 | "execution_count": 40, 791 | "metadata": {}, 792 | "outputs": [], 793 | "source": [ 794 | "# Gradually unfreeze another layer to train a bit more parameters than just the classifier layer\n", 795 | "learn.freeze_to(-2)\n", 796 | "learn.fit(lrs, 1, wds=wd, cycle_len=1, use_clr=(8,3))" 797 | ] 798 | }, 799 | { 800 | "cell_type": "code", 801 | "execution_count": 56, 802 | "metadata": {}, 803 | "outputs": [], 804 | "source": [ 805 | "# Save the trained model\n", 806 | "learn.save('clas_1')\n", 807 | "learn.load('clas_1')" 808 | ] 809 | }, 810 | { 811 | "cell_type": "code", 812 | "execution_count": 41, 813 | "metadata": {}, 814 | "outputs": [], 815 | "source": [ 816 | "# Unfreeze everything and train for a few epochs on the whole set of parameters of the model\n", 817 | "learn.unfreeze()\n", 818 | "learn.fit(lrs, 1, wds=wd, cycle_len=14, use_clr=(32,10))" 819 | ] 820 | }, 821 | { 822 | "cell_type": "code", 823 | "execution_count": 42, 824 | "metadata": {}, 825 | "outputs": [], 826 | "source": [ 827 | "learn.sched.plot_loss()" 828 | ] 829 | }, 830 | { 831 | "cell_type": "code", 832 | "execution_count": 59, 833 | "metadata": {}, 834 | "outputs": [], 835 | "source": [ 836 | "# Save the model\n", 837 | "learn.save('clas_2')" 838 | ] 839 | }, 840 | { 841 | "cell_type": "markdown", 842 | "metadata": {}, 843 | "source": [ 844 | "# Inference\n", 845 | "Nonw, let's play with the model we've just learned!" 846 | ] 847 | }, 848 | { 849 | "cell_type": "code", 850 | "execution_count": 60, 851 | "metadata": {}, 852 | "outputs": [], 853 | "source": [ 854 | "m = get_rnn_classifer(bptt, 20*70, c, vs, emb_sz=em_sz, n_hid=nh, n_layers=nl, pad_token=1,\n", 855 | " layers=[em_sz*3, 50, c], drops=[dps[4], 0.1],\n", 856 | " dropouti=dps[0], wdrop=dps[1], dropoute=dps[2], dropouth=dps[3])\n", 857 | "opt_fn = partial(optim.Adam, betas=(0.7, 0.99))\n", 858 | "learn = RNN_Learner(md, TextModel(to_gpu(m)), opt_fn=opt_fn)\n", 859 | "learn.reg_fn = partial(seq2seq_reg, alpha=2, beta=1)\n", 860 | "learn.clip=25.\n", 861 | "learn.metrics = [accuracy]\n", 862 | "\n", 863 | "lr=3e-3\n", 864 | "lrm = 2.6\n", 865 | "lrs = np.array([lr/(lrm**4), lr/(lrm**3), lr/(lrm**2), lr/lrm, lr])\n", 866 | "wd = 1e-7\n", 867 | "wd = 0\n", 868 | "learn.load_encoder('lm1_enc')\n", 869 | "learn.load('clas_2')" 870 | ] 871 | }, 872 | { 873 | "cell_type": "code", 874 | "execution_count": 6, 875 | "metadata": {}, 876 | "outputs": [], 877 | "source": [ 878 | "def get_sentiment(input_str: str):\n", 879 | "\n", 880 | " # predictions are done on arrays of input.\n", 881 | " # We only have a single input, so turn it into a 1x1 array\n", 882 | " texts = [input_str]\n", 883 | "\n", 884 | " # tokenize using the fastai wrapper around spacy\n", 885 | " tok = [t.split() for t in texts]\n", 886 | " # tok = Tokenizer().proc_all_mp(partition_by_cores(texts))\n", 887 | "\n", 888 | " # turn into integers for each word\n", 889 | " encoded = [stoi[p] for p in tok[0]]\n", 890 | "\n", 891 | " idx = np.array(encoded)[None]\n", 892 | " idx = np.transpose(idx)\n", 893 | " tensorIdx = VV(idx)\n", 894 | " m.eval()\n", 895 | " m.reset()\n", 896 | " p = m.forward(tensorIdx)\n", 897 | " return np.argmax(p[0][0].data.cpu().numpy())\n", 898 | "\n", 899 | "def prediction(texts):\n", 900 | " \"\"\"Do the prediction on a list of texts\n", 901 | " \"\"\"\n", 902 | " y = []\n", 903 | " \n", 904 | " for i, text in enumerate(texts):\n", 905 | " if i % 1000 == 0:\n", 906 | " print(i)\n", 907 | " encoded = text\n", 908 | " idx = np.array(encoded)[None]\n", 909 | " idx = np.transpose(idx)\n", 910 | " tensorIdx = VV(idx)\n", 911 | " m.eval()\n", 912 | " m.reset()\n", 913 | " p = m.forward(tensorIdx)\n", 914 | " y.append(np.argmax(p[0][0].data.cpu().numpy()))\n", 915 | " return y" 916 | ] 917 | }, 918 | { 919 | "cell_type": "code", 920 | "execution_count": 43, 921 | "metadata": {}, 922 | "outputs": [], 923 | "source": [ 924 | "sentence = \"I like Feedly\"\n", 925 | "start = time()\n", 926 | "print(get_sentiment(sentence))\n", 927 | "print(time() - start)" 928 | ] 929 | }, 930 | { 931 | "cell_type": "code", 932 | "execution_count": 44, 933 | "metadata": {}, 934 | "outputs": [], 935 | "source": [ 936 | "y = prediction(list(val_clas))" 937 | ] 938 | }, 939 | { 940 | "cell_type": "code", 941 | "execution_count": 45, 942 | "metadata": {}, 943 | "outputs": [], 944 | "source": [ 945 | "# Show relevant metrics for binary classification\n", 946 | "# We encourage you to try training the classifier with different data size and its effect on performance\n", 947 | "print(f'Accuracy --> {accuracy_score(y, val_labels)}')\n", 948 | "print(f'Precision --> {precision_score(y, val_labels)}')\n", 949 | "print(f'F1 score --> {f1_score(y, val_labels)}')\n", 950 | "print(f'Recall score --> {recall_score(y, val_labels)}')\n", 951 | "print(confusion_matrix(y, val_labels))\n", 952 | "print(classification_report(y, val_labels))" 953 | ] 954 | }, 955 | { 956 | "cell_type": "markdown", 957 | "metadata": {}, 958 | "source": [ 959 | "# What training size do we need?\n", 960 | "The language model has already learnt a lot about the syntax. It is very knowledgeable about the context in which words appear in sentences. However, the language model does not contain any notion of [meaning](https://en.wikipedia.org/wiki/Meaning_%28linguistics%29). This problem is well summarised in [Emily Bender's tweet](https://twitter.com/emilymbender/status/1024042044035985408) during a very interesting twiter thread that occur in July around meaning in NLP. A cool summary of this thread can be found in the [Hugging Face](https://medium.com/huggingface/learning-meaning-in-natural-language-processing-the-semantics-mega-thread-9c0332dfe28e) blogpost. Hence the meaning in language is very likely to be learned through supervision, with the help of ground-truth examples.\n", 961 | "\n", 962 | "However, when we perform some NLP tasks, sentiment analysis in our example, both syntax and meaning are important!\n", 963 | "The idea is that you can save a lot of time by being taught with a lot of blind synatx first, and then learning meaning. Think of when you start learning a complete new field. Well, it is far easier to learn it in your mother tongue than in another language you master less. \n", 964 | "\n", 965 | "The big practical gain here is that once you \"know\" a language, you need less supervised examples to learn a new thing! In our example, it means we need less labeled reviews for us to learn a relevant classifier.\n", 966 | "\n", 967 | "Let's verify this hypothesis by training a classifier with several training size and see how this size affects the performance!" 968 | ] 969 | }, 970 | { 971 | "cell_type": "code", 972 | "execution_count": 7, 973 | "metadata": {}, 974 | "outputs": [], 975 | "source": [ 976 | "trn_clas = np.load(CLAS_PATH/'tmp'/'trn_ids.npy')\n", 977 | "val_clas = np.load(CLAS_PATH/'tmp'/'val_ids.npy')\n", 978 | "\n", 979 | "trn_labels = np.squeeze(np.load(CLAS_PATH/'tmp'/'trn_labels.npy'))\n", 980 | "val_labels = np.squeeze(np.load(CLAS_PATH/'tmp'/'val_labels.npy'))" 981 | ] 982 | }, 983 | { 984 | "cell_type": "code", 985 | "execution_count": 8, 986 | "metadata": {}, 987 | "outputs": [], 988 | "source": [ 989 | "def experiment(trn_size, val_size):\n", 990 | "\n", 991 | " train = random.sample(list(zip(trn_clas, trn_labels)), trn_size)\n", 992 | " aux_trn_clas = np.array([item[0] for item in train])\n", 993 | " aux_trn_labels = np.array([item[1] for item in train])\n", 994 | " del train\n", 995 | "\n", 996 | " validation = random.sample(list(zip(val_clas, val_labels)), val_size)\n", 997 | " aux_val_clas = np.array([item[0] for item in validation])\n", 998 | " aux_val_labels = np.array([item[1] for item in validation])\n", 999 | " del validation\n", 1000 | "\n", 1001 | "\n", 1002 | " bptt,em_sz,nh,nl = 70,400,1150,3\n", 1003 | " vs = len(itos)\n", 1004 | " opt_fn = partial(optim.Adam, betas=(0.8, 0.99))\n", 1005 | " bs = 48\n", 1006 | "\n", 1007 | " min_lbl = aux_trn_labels.min()\n", 1008 | " aux_trn_labels -= min_lbl\n", 1009 | " aux_val_labels -= min_lbl\n", 1010 | " c=int(aux_trn_labels.max())+1\n", 1011 | "\n", 1012 | " # Load data in relevant structures\n", 1013 | " trn_ds = TextDataset(aux_trn_clas, aux_trn_labels)\n", 1014 | " val_ds = TextDataset(aux_val_clas, aux_val_labels)\n", 1015 | " trn_samp = SortishSampler(aux_trn_clas, key=lambda x: len(aux_trn_clas[x]), bs=bs//2)\n", 1016 | " val_samp = SortSampler(aux_val_clas, key=lambda x: len(aux_val_clas[x]))\n", 1017 | " trn_dl = DataLoader(trn_ds, bs//2, transpose=True, num_workers=1, pad_idx=1, sampler=trn_samp)\n", 1018 | " val_dl = DataLoader(val_ds, bs, transpose=True, num_workers=1, pad_idx=1, sampler=val_samp)\n", 1019 | "\n", 1020 | " # Define the model and load the backbone lamguage model\n", 1021 | " md = ModelData(PATH, trn_dl, val_dl)\n", 1022 | " dps = np.array([0.4, 0.5, 0.05, 0.3, 0.1])\n", 1023 | "\n", 1024 | " m = get_rnn_classifier(bptt, 20*70, c, vs, emb_sz=em_sz, n_hid=nh, n_layers=nl, pad_token=1,\n", 1025 | " layers=[em_sz*3, 50, c], drops=[dps[4], 0.1],\n", 1026 | " dropouti=dps[0], wdrop=dps[1], dropoute=dps[2], dropouth=dps[3])\n", 1027 | "\n", 1028 | " opt_fn = partial(optim.Adam, betas=(0.7, 0.99))\n", 1029 | "\n", 1030 | " learn = RNN_Learner(md, TextModel(to_gpu(m)), opt_fn=opt_fn)\n", 1031 | " learn.reg_fn = partial(seq2seq_reg, alpha=2, beta=1)\n", 1032 | " learn.clip=25.\n", 1033 | " learn.metrics = [accuracy]\n", 1034 | "\n", 1035 | " lr=3e-3\n", 1036 | " lrm = 2.6\n", 1037 | " lrs = np.array([lr/(lrm**4), lr/(lrm**3), lr/(lrm**2), lr/lrm, lr])\n", 1038 | "\n", 1039 | " lrs=np.array([1e-4,1e-4,1e-4,1e-3,1e-2])\n", 1040 | "\n", 1041 | " wd = 1e-7\n", 1042 | " wd = 0\n", 1043 | " learn.load_encoder('lm1_enc')\n", 1044 | "\n", 1045 | " learn.freeze_to(-1)\n", 1046 | "\n", 1047 | " # Find th learning rate\n", 1048 | " learn.lr_find(lrs/1000)\n", 1049 | "\n", 1050 | " # Run one epoch on the classification layer\n", 1051 | " learn.fit(lrs, 1, wds=wd, cycle_len=1, use_clr=(8,3))\n", 1052 | "\n", 1053 | " # Save the trained model\n", 1054 | " learn.save(f'{trn_size}clas_0')\n", 1055 | " learn.load(f'{trn_size}clas_0')\n", 1056 | "\n", 1057 | " # Gradually unfreeze another layer to train a bit more parameters than just the classifier layer\n", 1058 | " learn.freeze_to(-2)\n", 1059 | " learn.fit(lrs, 1, wds=wd, cycle_len=1, use_clr=(8,3))\n", 1060 | "\n", 1061 | " # Save the trained model\n", 1062 | " learn.save(f'{trn_size}clas_1')\n", 1063 | " learn.load(f'{trn_size}clas_1')\n", 1064 | "\n", 1065 | " # Unfreeze everything and train for a few epochs on the whole set of parameters of the model\n", 1066 | " learn.unfreeze()\n", 1067 | " learn.fit(lrs, 1, wds=wd, cycle_len=14, use_clr=(32,10))\n", 1068 | "\n", 1069 | " # Save the model\n", 1070 | " learn.sched.plot_loss()\n", 1071 | " learn.save(f'{trn_size}clas_2')" 1072 | ] 1073 | }, 1074 | { 1075 | "cell_type": "code", 1076 | "execution_count": null, 1077 | "metadata": {}, 1078 | "outputs": [ 1079 | { 1080 | "name": "stdout", 1081 | "output_type": "stream", 1082 | "text": [ 1083 | "##################################################\n", 1084 | "Experiment with training size 50\n" 1085 | ] 1086 | }, 1087 | { 1088 | "data": { 1089 | "application/vnd.jupyter.widget-view+json": { 1090 | "model_id": "527d42dc659a450da6729240a5031473", 1091 | "version_major": 2, 1092 | "version_minor": 0 1093 | }, 1094 | "text/plain": [ 1095 | "HBox(children=(IntProgress(value=0, description='Epoch', max=1), HTML(value='')))" 1096 | ] 1097 | }, 1098 | "metadata": {}, 1099 | "output_type": "display_data" 1100 | }, 1101 | { 1102 | "name": "stdout", 1103 | "output_type": "stream", 1104 | "text": [ 1105 | "epoch trn_loss val_loss accuracy \n", 1106 | " 0 0.739306 0.713452 0.515713 \n" 1107 | ] 1108 | }, 1109 | { 1110 | "data": { 1111 | "application/vnd.jupyter.widget-view+json": { 1112 | "model_id": "ee8c2bcd3caa40509641c4735fa0af4f", 1113 | "version_major": 2, 1114 | "version_minor": 0 1115 | }, 1116 | "text/plain": [ 1117 | "HBox(children=(IntProgress(value=0, description='Epoch', max=1), HTML(value='')))" 1118 | ] 1119 | }, 1120 | "metadata": {}, 1121 | "output_type": "display_data" 1122 | }, 1123 | { 1124 | "name": "stdout", 1125 | "output_type": "stream", 1126 | "text": [ 1127 | "epoch trn_loss val_loss accuracy \n", 1128 | " 0 0.780253 0.682528 0.60368 \n" 1129 | ] 1130 | }, 1131 | { 1132 | "data": { 1133 | "application/vnd.jupyter.widget-view+json": { 1134 | "model_id": "4d79ca2874ce4160a029781cd75042f8", 1135 | "version_major": 2, 1136 | "version_minor": 0 1137 | }, 1138 | "text/plain": [ 1139 | "HBox(children=(IntProgress(value=0, description='Epoch', max=1), HTML(value='')))" 1140 | ] 1141 | }, 1142 | "metadata": {}, 1143 | "output_type": "display_data" 1144 | }, 1145 | { 1146 | "name": "stdout", 1147 | "output_type": "stream", 1148 | "text": [ 1149 | "epoch trn_loss val_loss accuracy \n", 1150 | " 0 0.480616 0.665205 0.64434 \n" 1151 | ] 1152 | }, 1153 | { 1154 | "data": { 1155 | "application/vnd.jupyter.widget-view+json": { 1156 | "model_id": "ff07a3dc2f0341b5953d7589bc45d242", 1157 | "version_major": 2, 1158 | "version_minor": 0 1159 | }, 1160 | "text/plain": [ 1161 | "HBox(children=(IntProgress(value=0, description='Epoch', max=14), HTML(value='')))" 1162 | ] 1163 | }, 1164 | "metadata": {}, 1165 | "output_type": "display_data" 1166 | }, 1167 | { 1168 | "name": "stdout", 1169 | "output_type": "stream", 1170 | "text": [ 1171 | " 1 0.60659 0.642443 0.60112 \n", 1172 | " 2 0.61519 0.619721 0.69608 \n", 1173 | " 3 0.642923 0.626678 0.61732 \n", 1174 | " 4 0.652647 0.660426 0.51752 \n", 1175 | " 5 0.602682 0.620081 0.5915 \n", 1176 | " 6 0.594284 0.584023 0.66818 \n", 1177 | " 7 0.58685 0.559354 0.73106 \n", 1178 | " 8 0.55382 0.540782 0.77018 \n", 1179 | " 9 0.52772 0.527295 0.79862 \n", 1180 | " 10 0.518118 0.487917 0.83798 \n", 1181 | " 11 0.518521 0.461052 0.84442 \n", 1182 | " 12 0.53044 0.453327 0.84558 \n", 1183 | " 13 0.510322 0.468408 0.83854 \n", 1184 | "Time cost: 3158.540988445282\n", 1185 | "##################################################\n", 1186 | "Experiment with training size 100\n" 1187 | ] 1188 | }, 1189 | { 1190 | "data": { 1191 | "application/vnd.jupyter.widget-view+json": { 1192 | "model_id": "ad60ac5683bd408992d4c89c436c2daf", 1193 | "version_major": 2, 1194 | "version_minor": 0 1195 | }, 1196 | "text/plain": [ 1197 | "HBox(children=(IntProgress(value=0, description='Epoch', max=1), HTML(value='')))" 1198 | ] 1199 | }, 1200 | "metadata": {}, 1201 | "output_type": "display_data" 1202 | }, 1203 | { 1204 | "name": "stdout", 1205 | "output_type": "stream", 1206 | "text": [ 1207 | "epoch trn_loss val_loss accuracy \n", 1208 | " 0 0.598895 78.284828 0.482651 \n" 1209 | ] 1210 | }, 1211 | { 1212 | "data": { 1213 | "application/vnd.jupyter.widget-view+json": { 1214 | "model_id": "5124bc474f48464487b3f3e1b58ae6bc", 1215 | "version_major": 2, 1216 | "version_minor": 0 1217 | }, 1218 | "text/plain": [ 1219 | "HBox(children=(IntProgress(value=0, description='Epoch', max=1), HTML(value='')))" 1220 | ] 1221 | }, 1222 | "metadata": {}, 1223 | "output_type": "display_data" 1224 | }, 1225 | { 1226 | "name": "stdout", 1227 | "output_type": "stream", 1228 | "text": [ 1229 | "epoch trn_loss val_loss accuracy \n", 1230 | " 0 0.62033 0.664568 0.71318 \n" 1231 | ] 1232 | }, 1233 | { 1234 | "data": { 1235 | "application/vnd.jupyter.widget-view+json": { 1236 | "model_id": "e5fe7d0a84f64499b389d7c14a44854f", 1237 | "version_major": 2, 1238 | "version_minor": 0 1239 | }, 1240 | "text/plain": [ 1241 | "HBox(children=(IntProgress(value=0, description='Epoch', max=1), HTML(value='')))" 1242 | ] 1243 | }, 1244 | "metadata": {}, 1245 | "output_type": "display_data" 1246 | }, 1247 | { 1248 | "name": "stdout", 1249 | "output_type": "stream", 1250 | "text": [ 1251 | "epoch trn_loss val_loss accuracy \n", 1252 | " 0 0.602062 0.617134 0.74574 \n" 1253 | ] 1254 | }, 1255 | { 1256 | "data": { 1257 | "application/vnd.jupyter.widget-view+json": { 1258 | "model_id": "aaeca1c3aa2b4a9cbeabe672487990fb", 1259 | "version_major": 2, 1260 | "version_minor": 0 1261 | }, 1262 | "text/plain": [ 1263 | "HBox(children=(IntProgress(value=0, description='Epoch', max=14), HTML(value='')))" 1264 | ] 1265 | }, 1266 | "metadata": {}, 1267 | "output_type": "display_data" 1268 | }, 1269 | { 1270 | "name": "stdout", 1271 | "output_type": "stream", 1272 | "text": [ 1273 | "epoch trn_loss val_loss accuracy \n", 1274 | " 0 0.509279 0.616894 0.58494 \n", 1275 | " 1 0.528293 0.574365 0.69924 \n", 1276 | " 2 0.496826 0.544474 0.75798 \n", 1277 | " 3 0.478803 0.559163 0.6684 \n", 1278 | " 4 0.442439 0.568413 0.64396 \n", 1279 | " 5 0.45688 0.435576 0.82176 \n", 1280 | " 6 0.438374 0.401803 0.87232 \n", 1281 | " 7 0.435346 0.382793 0.86982 \n", 1282 | " 8 0.430963 0.38687 0.86138 \n", 1283 | " 9 0.421749 0.363613 0.86442 \n", 1284 | " 10 0.404818 0.347554 0.87324 \n", 1285 | " 11 0.402366 0.34878 0.8688 \n", 1286 | " 12 0.420744 0.341431 0.86758 \n", 1287 | " 13 0.405834 0.34154 0.86362 \n", 1288 | "Time cost: 3164.5589134693146\n", 1289 | "##################################################\n", 1290 | "Experiment with training size 500\n" 1291 | ] 1292 | }, 1293 | { 1294 | "data": { 1295 | "application/vnd.jupyter.widget-view+json": { 1296 | "model_id": "fb902a8b103341478714ea65b737d627", 1297 | "version_major": 2, 1298 | "version_minor": 0 1299 | }, 1300 | "text/plain": [ 1301 | "HBox(children=(IntProgress(value=0, description='Epoch', max=1), HTML(value='')))" 1302 | ] 1303 | }, 1304 | "metadata": {}, 1305 | "output_type": "display_data" 1306 | }, 1307 | { 1308 | "name": "stdout", 1309 | "output_type": "stream", 1310 | "text": [ 1311 | " \r" 1312 | ] 1313 | }, 1314 | { 1315 | "data": { 1316 | "application/vnd.jupyter.widget-view+json": { 1317 | "model_id": "8503deab63514cf4ad216d70c1c6d27e", 1318 | "version_major": 2, 1319 | "version_minor": 0 1320 | }, 1321 | "text/plain": [ 1322 | "HBox(children=(IntProgress(value=0, description='Epoch', max=1), HTML(value='')))" 1323 | ] 1324 | }, 1325 | "metadata": {}, 1326 | "output_type": "display_data" 1327 | }, 1328 | { 1329 | "name": "stdout", 1330 | "output_type": "stream", 1331 | "text": [ 1332 | "epoch trn_loss val_loss accuracy \n", 1333 | " 0 0.531424 0.558967 0.85856 \n" 1334 | ] 1335 | }, 1336 | { 1337 | "data": { 1338 | "application/vnd.jupyter.widget-view+json": { 1339 | "model_id": "09bb2a30cd8c41f89c4f419b504d5a87", 1340 | "version_major": 2, 1341 | "version_minor": 0 1342 | }, 1343 | "text/plain": [ 1344 | "HBox(children=(IntProgress(value=0, description='Epoch', max=1), HTML(value='')))" 1345 | ] 1346 | }, 1347 | "metadata": {}, 1348 | "output_type": "display_data" 1349 | }, 1350 | { 1351 | "name": "stdout", 1352 | "output_type": "stream", 1353 | "text": [ 1354 | "epoch trn_loss val_loss accuracy \n", 1355 | " 0 0.427045 0.402448 0.88602 \n" 1356 | ] 1357 | }, 1358 | { 1359 | "data": { 1360 | "application/vnd.jupyter.widget-view+json": { 1361 | "model_id": "4a15c2e251094cbf8b86b7eae495b92d", 1362 | "version_major": 2, 1363 | "version_minor": 0 1364 | }, 1365 | "text/plain": [ 1366 | "HBox(children=(IntProgress(value=0, description='Epoch', max=14), HTML(value='')))" 1367 | ] 1368 | }, 1369 | "metadata": {}, 1370 | "output_type": "display_data" 1371 | }, 1372 | { 1373 | "name": "stdout", 1374 | "output_type": "stream", 1375 | "text": [ 1376 | "epoch trn_loss val_loss accuracy \n", 1377 | " 0 0.43276 0.325113 0.88386 \n", 1378 | " 1 0.439859 0.350954 0.85564 \n", 1379 | " 2 0.420882 0.301699 0.88072 \n", 1380 | " 3 0.408916 0.243965 0.91232 \n", 1381 | " 4 0.385137 0.265443 0.8924 \n", 1382 | " 5 0.374238 0.249731 0.89888 \n", 1383 | " 6 0.397431 0.265853 0.90392 \n", 1384 | " 7 0.388508 0.256725 0.90612 \n", 1385 | " 8 0.405042 0.269658 0.90676 \n", 1386 | " 9 0.3749 0.278558 0.89718 \n", 1387 | " 10 0.378312 0.280107 0.89688 \n", 1388 | " 11 0.368829 0.269968 0.90122 \n", 1389 | " 12 0.412016 0.274945 0.90104 \n", 1390 | " 13 0.399776 0.281551 0.89786 \n", 1391 | "Time cost: 3095.5910897254944\n", 1392 | "##################################################\n", 1393 | "Experiment with training size 1000\n" 1394 | ] 1395 | }, 1396 | { 1397 | "data": { 1398 | "application/vnd.jupyter.widget-view+json": { 1399 | "model_id": "612a9871f9a84d9cbaf1d58e8471a242", 1400 | "version_major": 2, 1401 | "version_minor": 0 1402 | }, 1403 | "text/plain": [ 1404 | "HBox(children=(IntProgress(value=0, description='Epoch', max=1), HTML(value='')))" 1405 | ] 1406 | }, 1407 | "metadata": {}, 1408 | "output_type": "display_data" 1409 | }, 1410 | { 1411 | "name": "stdout", 1412 | "output_type": "stream", 1413 | "text": [ 1414 | " \r" 1415 | ] 1416 | }, 1417 | { 1418 | "data": { 1419 | "application/vnd.jupyter.widget-view+json": { 1420 | "model_id": "c0caf2cfa4414c0080a551270ba43207", 1421 | "version_major": 2, 1422 | "version_minor": 0 1423 | }, 1424 | "text/plain": [ 1425 | "HBox(children=(IntProgress(value=0, description='Epoch', max=1), HTML(value='')))" 1426 | ] 1427 | }, 1428 | "metadata": {}, 1429 | "output_type": "display_data" 1430 | }, 1431 | { 1432 | "name": "stdout", 1433 | "output_type": "stream", 1434 | "text": [ 1435 | "epoch trn_loss val_loss accuracy \n", 1436 | " 0 0.538816 0.369876 0.90136 \n" 1437 | ] 1438 | }, 1439 | { 1440 | "data": { 1441 | "application/vnd.jupyter.widget-view+json": { 1442 | "model_id": "b6f8e0bf835441088904e22ce529794b", 1443 | "version_major": 2, 1444 | "version_minor": 0 1445 | }, 1446 | "text/plain": [ 1447 | "HBox(children=(IntProgress(value=0, description='Epoch', max=1), HTML(value='')))" 1448 | ] 1449 | }, 1450 | "metadata": {}, 1451 | "output_type": "display_data" 1452 | }, 1453 | { 1454 | "name": "stdout", 1455 | "output_type": "stream", 1456 | "text": [ 1457 | "epoch trn_loss val_loss accuracy \n", 1458 | " 0 0.453464 0.315374 0.88258 \n" 1459 | ] 1460 | }, 1461 | { 1462 | "data": { 1463 | "application/vnd.jupyter.widget-view+json": { 1464 | "model_id": "9a52a6708906433c9b1bfde614fcf859", 1465 | "version_major": 2, 1466 | "version_minor": 0 1467 | }, 1468 | "text/plain": [ 1469 | "HBox(children=(IntProgress(value=0, description='Epoch', max=14), HTML(value='')))" 1470 | ] 1471 | }, 1472 | "metadata": {}, 1473 | "output_type": "display_data" 1474 | }, 1475 | { 1476 | "name": "stdout", 1477 | "output_type": "stream", 1478 | "text": [ 1479 | "epoch trn_loss val_loss accuracy \n", 1480 | " 0 0.404357 0.259631 0.90256 \n", 1481 | " 1 0.419865 0.254745 0.89808 \n", 1482 | " 2 0.445964 0.268253 0.89904 \n", 1483 | " 3 0.427022 0.229095 0.91462 \n", 1484 | " 4 0.414167 0.228874 0.91148 \n", 1485 | " 5 0.407483 0.219707 0.91912 \n", 1486 | " 6 0.381847 0.216046 0.9203 \n", 1487 | " 7 0.365503 0.219289 0.91962 \n", 1488 | " 8 0.358103 0.213313 0.92152 \n", 1489 | " 9 0.328652 0.219443 0.91694 \n", 1490 | " 10 0.360773 0.225698 0.9129 \n", 1491 | " 11 0.325618 0.216891 0.91786 \n", 1492 | " 12 0.358954 0.213793 0.91994 \n", 1493 | " 13 0.324676 0.217357 0.91804 \n", 1494 | "Time cost: 3222.9498105049133\n", 1495 | "##################################################\n", 1496 | "Experiment with training size 5000\n" 1497 | ] 1498 | }, 1499 | { 1500 | "data": { 1501 | "application/vnd.jupyter.widget-view+json": { 1502 | "model_id": "d427fdd87c0f4503a4071c1af8ab976f", 1503 | "version_major": 2, 1504 | "version_minor": 0 1505 | }, 1506 | "text/plain": [ 1507 | "HBox(children=(IntProgress(value=0, description='Epoch', max=1), HTML(value='')))" 1508 | ] 1509 | }, 1510 | "metadata": {}, 1511 | "output_type": "display_data" 1512 | }, 1513 | { 1514 | "name": "stdout", 1515 | "output_type": "stream", 1516 | "text": [ 1517 | " 80%|████████ | 168/209 [00:33<00:08, 5.06it/s, loss=2.11] " 1518 | ] 1519 | }, 1520 | { 1521 | "data": { 1522 | "application/vnd.jupyter.widget-view+json": { 1523 | "model_id": "3eba1d18c6ce49fab0ce55092d8289aa", 1524 | "version_major": 2, 1525 | "version_minor": 0 1526 | }, 1527 | "text/plain": [ 1528 | "HBox(children=(IntProgress(value=0, description='Epoch', max=1), HTML(value='')))" 1529 | ] 1530 | }, 1531 | "metadata": {}, 1532 | "output_type": "display_data" 1533 | }, 1534 | { 1535 | "name": "stdout", 1536 | "output_type": "stream", 1537 | "text": [ 1538 | "epoch trn_loss val_loss accuracy \n", 1539 | " 0 0.476658 0.251208 0.91892 \n" 1540 | ] 1541 | }, 1542 | { 1543 | "data": { 1544 | "application/vnd.jupyter.widget-view+json": { 1545 | "model_id": "ab5437ff9b5243cfb8167a58dbc9392c", 1546 | "version_major": 2, 1547 | "version_minor": 0 1548 | }, 1549 | "text/plain": [ 1550 | "HBox(children=(IntProgress(value=0, description='Epoch', max=1), HTML(value='')))" 1551 | ] 1552 | }, 1553 | "metadata": {}, 1554 | "output_type": "display_data" 1555 | }, 1556 | { 1557 | "name": "stdout", 1558 | "output_type": "stream", 1559 | "text": [ 1560 | "epoch trn_loss val_loss accuracy \n", 1561 | " 0 0.433952 0.231621 0.92414 \n" 1562 | ] 1563 | }, 1564 | { 1565 | "data": { 1566 | "application/vnd.jupyter.widget-view+json": { 1567 | "model_id": "b5b7a98c33b14bab9419ff6a0017aca9", 1568 | "version_major": 2, 1569 | "version_minor": 0 1570 | }, 1571 | "text/plain": [ 1572 | "HBox(children=(IntProgress(value=0, description='Epoch', max=14), HTML(value='')))" 1573 | ] 1574 | }, 1575 | "metadata": {}, 1576 | "output_type": "display_data" 1577 | }, 1578 | { 1579 | "name": "stdout", 1580 | "output_type": "stream", 1581 | "text": [ 1582 | "epoch trn_loss val_loss accuracy \n", 1583 | " 0 0.441624 0.26157 0.91548 \n", 1584 | " 1 0.39728 0.216438 0.92384 \n", 1585 | " 2 0.409002 0.224356 0.92368 \n", 1586 | " 3 0.422964 0.215129 0.92208 \n", 1587 | " 5 0.323477 0.190459 0.92822 \n", 1588 | " 6 0.359594 0.204132 0.9299 \n", 1589 | " 7 0.364609 0.197063 0.92962 \n", 1590 | " 8 0.335434 0.195078 0.93054 \n", 1591 | " 9 0.344869 0.193901 0.93174 \n", 1592 | " 10 0.355132 0.204457 0.92736 \n", 1593 | " 11 0.361977 0.196434 0.92986 \n", 1594 | " 12 0.335396 0.200645 0.92896 \n", 1595 | " 13 0.327323 0.20609 0.92624 \n", 1596 | "Time cost: 4408.779232263565\n", 1597 | "##################################################\n", 1598 | "Experiment with training size 10000\n" 1599 | ] 1600 | }, 1601 | { 1602 | "data": { 1603 | "application/vnd.jupyter.widget-view+json": { 1604 | "model_id": "8f45368cce77460e99e57503a0aaa86a", 1605 | "version_major": 2, 1606 | "version_minor": 0 1607 | }, 1608 | "text/plain": [ 1609 | "HBox(children=(IntProgress(value=0, description='Epoch', max=1), HTML(value='')))" 1610 | ] 1611 | }, 1612 | "metadata": {}, 1613 | "output_type": "display_data" 1614 | }, 1615 | { 1616 | "name": "stdout", 1617 | "output_type": "stream", 1618 | "text": [ 1619 | " 77%|███████▋ | 323/417 [00:54<00:15, 5.95it/s, loss=1.4] " 1620 | ] 1621 | }, 1622 | { 1623 | "data": { 1624 | "application/vnd.jupyter.widget-view+json": { 1625 | "model_id": "2b682accf3d4426db72b169099aebc1b", 1626 | "version_major": 2, 1627 | "version_minor": 0 1628 | }, 1629 | "text/plain": [ 1630 | "HBox(children=(IntProgress(value=0, description='Epoch', max=1), HTML(value='')))" 1631 | ] 1632 | }, 1633 | "metadata": {}, 1634 | "output_type": "display_data" 1635 | }, 1636 | { 1637 | "name": "stdout", 1638 | "output_type": "stream", 1639 | "text": [ 1640 | "epoch trn_loss val_loss accuracy \n", 1641 | " 0 0.442663 0.237719 0.91894 \n" 1642 | ] 1643 | }, 1644 | { 1645 | "data": { 1646 | "application/vnd.jupyter.widget-view+json": { 1647 | "model_id": "874d6b51c4f741c09dac9aa917dd1b44", 1648 | "version_major": 2, 1649 | "version_minor": 0 1650 | }, 1651 | "text/plain": [ 1652 | "HBox(children=(IntProgress(value=0, description='Epoch', max=1), HTML(value='')))" 1653 | ] 1654 | }, 1655 | "metadata": {}, 1656 | "output_type": "display_data" 1657 | }, 1658 | { 1659 | "name": "stdout", 1660 | "output_type": "stream", 1661 | "text": [ 1662 | "epoch trn_loss val_loss accuracy \n", 1663 | " 0 0.431919 0.23883 0.92334 \n" 1664 | ] 1665 | }, 1666 | { 1667 | "data": { 1668 | "application/vnd.jupyter.widget-view+json": { 1669 | "model_id": "d92179a697634fa8849aca16e5354f0a", 1670 | "version_major": 2, 1671 | "version_minor": 0 1672 | }, 1673 | "text/plain": [ 1674 | "HBox(children=(IntProgress(value=0, description='Epoch', max=14), HTML(value='')))" 1675 | ] 1676 | }, 1677 | "metadata": {}, 1678 | "output_type": "display_data" 1679 | }, 1680 | { 1681 | "name": "stdout", 1682 | "output_type": "stream", 1683 | "text": [ 1684 | "epoch trn_loss val_loss accuracy \n", 1685 | " 0 0.423774 0.199739 0.92554 \n", 1686 | " 1 0.400266 0.206542 0.92344 \n", 1687 | " 2 0.327765 0.191927 0.93002 \n", 1688 | " 3 0.355688 0.193465 0.92908 \n", 1689 | " 4 0.336286 0.182849 0.93128 \n", 1690 | " 5 0.324608 0.18046 0.93278 \n", 1691 | " 6 0.314902 0.183413 0.93328 \n", 1692 | " 7 0.328284 0.178485 0.93288 \n", 1693 | " 8 0.337061 0.180216 0.93436 \n", 1694 | " 9 0.308937 0.179975 0.9341 \n", 1695 | " 10 0.290357 0.178364 0.93366 \n", 1696 | " 11 0.301147 0.175089 0.93584 \n", 1697 | " 12 0.267383 0.176672 0.934 \n", 1698 | " 13 0.305133 0.17432 0.93538 \n", 1699 | "Time cost: 5908.472403526306\n", 1700 | "##################################################\n", 1701 | "Experiment with training size 20000\n" 1702 | ] 1703 | }, 1704 | { 1705 | "data": { 1706 | "application/vnd.jupyter.widget-view+json": { 1707 | "model_id": "9f3ca9f298094380949988fdef48a975", 1708 | "version_major": 2, 1709 | "version_minor": 0 1710 | }, 1711 | "text/plain": [ 1712 | "HBox(children=(IntProgress(value=0, description='Epoch', max=1), HTML(value='')))" 1713 | ] 1714 | }, 1715 | "metadata": {}, 1716 | "output_type": "display_data" 1717 | }, 1718 | { 1719 | "name": "stdout", 1720 | "output_type": "stream", 1721 | "text": [ 1722 | " 75%|███████▍ | 623/834 [01:46<00:36, 5.84it/s, loss=1.45] " 1723 | ] 1724 | }, 1725 | { 1726 | "data": { 1727 | "application/vnd.jupyter.widget-view+json": { 1728 | "model_id": "98d4e901bff745158fb949b6967242b1", 1729 | "version_major": 2, 1730 | "version_minor": 0 1731 | }, 1732 | "text/plain": [ 1733 | "HBox(children=(IntProgress(value=0, description='Epoch', max=1), HTML(value='')))" 1734 | ] 1735 | }, 1736 | "metadata": {}, 1737 | "output_type": "display_data" 1738 | }, 1739 | { 1740 | "name": "stdout", 1741 | "output_type": "stream", 1742 | "text": [ 1743 | "epoch trn_loss val_loss accuracy \n", 1744 | " 0 0.425248 0.229867 0.91804 \n" 1745 | ] 1746 | }, 1747 | { 1748 | "data": { 1749 | "application/vnd.jupyter.widget-view+json": { 1750 | "model_id": "10b105d862fe4db9b180c4122105e5d6", 1751 | "version_major": 2, 1752 | "version_minor": 0 1753 | }, 1754 | "text/plain": [ 1755 | "HBox(children=(IntProgress(value=0, description='Epoch', max=1), HTML(value='')))" 1756 | ] 1757 | }, 1758 | "metadata": {}, 1759 | "output_type": "display_data" 1760 | }, 1761 | { 1762 | "name": "stdout", 1763 | "output_type": "stream", 1764 | "text": [ 1765 | "epoch trn_loss val_loss accuracy \n", 1766 | " 0 0.410012 0.210839 0.92766 \n" 1767 | ] 1768 | }, 1769 | { 1770 | "data": { 1771 | "application/vnd.jupyter.widget-view+json": { 1772 | "model_id": "c13f18e98fb244a4b86697eec5088914", 1773 | "version_major": 2, 1774 | "version_minor": 0 1775 | }, 1776 | "text/plain": [ 1777 | "HBox(children=(IntProgress(value=0, description='Epoch', max=14), HTML(value='')))" 1778 | ] 1779 | }, 1780 | "metadata": {}, 1781 | "output_type": "display_data" 1782 | }, 1783 | { 1784 | "name": "stdout", 1785 | "output_type": "stream", 1786 | "text": [ 1787 | "epoch trn_loss val_loss accuracy \n", 1788 | " 0 0.418405 0.202191 0.92848 \n", 1789 | " 1 0.385172 0.21752 0.92934 \n", 1790 | " 2 0.341867 0.1879 0.93032 \n", 1791 | " 3 0.343511 0.176737 0.93358 \n", 1792 | " 4 0.299173 0.169992 0.9357 \n", 1793 | " 58%|█████▊ | 480/834 [03:46<02:46, 2.12it/s, loss=0.315]" 1794 | ] 1795 | }, 1796 | { 1797 | "name": "stderr", 1798 | "output_type": "stream", 1799 | "text": [ 1800 | "IOPub message rate exceeded.\n", 1801 | "The notebook server will temporarily stop sending output\n", 1802 | "to the client in order to avoid crashing it.\n", 1803 | "To change this limit, set the config variable\n", 1804 | "`--NotebookApp.iopub_msg_rate_limit`.\n", 1805 | "\n", 1806 | "Current values:\n", 1807 | "NotebookApp.iopub_msg_rate_limit=1000.0 (msgs/sec)\n", 1808 | "NotebookApp.rate_limit_window=3.0 (secs)\n", 1809 | "\n" 1810 | ] 1811 | }, 1812 | { 1813 | "name": "stdout", 1814 | "output_type": "stream", 1815 | "text": [ 1816 | " 9 0.313465 0.162371 0.93966 \n", 1817 | " 10 0.2692 0.162227 0.93946 \n", 1818 | " 11 0.272758 0.159716 0.94032 \n", 1819 | " 80%|███████▉ | 666/834 [04:43<01:11, 2.35it/s, loss=0.261]" 1820 | ] 1821 | }, 1822 | { 1823 | "name": "stderr", 1824 | "output_type": "stream", 1825 | "text": [ 1826 | "IOPub message rate exceeded.\n", 1827 | "The notebook server will temporarily stop sending output\n", 1828 | "to the client in order to avoid crashing it.\n", 1829 | "To change this limit, set the config variable\n", 1830 | "`--NotebookApp.iopub_msg_rate_limit`.\n", 1831 | "\n", 1832 | "Current values:\n", 1833 | "NotebookApp.iopub_msg_rate_limit=1000.0 (msgs/sec)\n", 1834 | "NotebookApp.rate_limit_window=3.0 (secs)\n", 1835 | "\n" 1836 | ] 1837 | }, 1838 | { 1839 | "name": "stdout", 1840 | "output_type": "stream", 1841 | "text": [ 1842 | "epoch trn_loss val_loss accuracy \n", 1843 | " 0 0.441473 0.254497 0.9168 \n" 1844 | ] 1845 | }, 1846 | { 1847 | "data": { 1848 | "application/vnd.jupyter.widget-view+json": { 1849 | "model_id": "1e715be6b7434946857e005128042985", 1850 | "version_major": 2, 1851 | "version_minor": 0 1852 | }, 1853 | "text/plain": [ 1854 | "HBox(children=(IntProgress(value=0, description='Epoch', max=1), HTML(value='')))" 1855 | ] 1856 | }, 1857 | "metadata": {}, 1858 | "output_type": "display_data" 1859 | }, 1860 | { 1861 | "name": "stdout", 1862 | "output_type": "stream", 1863 | "text": [ 1864 | " 98%|█████████▊| 2044/2084 [07:08<00:08, 4.77it/s, loss=0.414]" 1865 | ] 1866 | }, 1867 | { 1868 | "name": "stderr", 1869 | "output_type": "stream", 1870 | "text": [ 1871 | "IOPub message rate exceeded.\n", 1872 | "The notebook server will temporarily stop sending output\n", 1873 | "to the client in order to avoid crashing it.\n", 1874 | "To change this limit, set the config variable\n", 1875 | "`--NotebookApp.iopub_msg_rate_limit`.\n", 1876 | "\n", 1877 | "Current values:\n", 1878 | "NotebookApp.iopub_msg_rate_limit=1000.0 (msgs/sec)\n", 1879 | "NotebookApp.rate_limit_window=3.0 (secs)\n", 1880 | "\n" 1881 | ] 1882 | }, 1883 | { 1884 | "name": "stdout", 1885 | "output_type": "stream", 1886 | "text": [ 1887 | " 1 0.309567 0.170769 0.9367 \n", 1888 | " 80%|███████▉ | 1664/2084 [11:45<02:57, 2.36it/s, loss=0.249]" 1889 | ] 1890 | }, 1891 | { 1892 | "name": "stderr", 1893 | "output_type": "stream", 1894 | "text": [ 1895 | "IOPub message rate exceeded.\n", 1896 | "The notebook server will temporarily stop sending output\n", 1897 | "to the client in order to avoid crashing it.\n", 1898 | "To change this limit, set the config variable\n", 1899 | "`--NotebookApp.iopub_msg_rate_limit`.\n", 1900 | "\n", 1901 | "Current values:\n", 1902 | "NotebookApp.iopub_msg_rate_limit=1000.0 (msgs/sec)\n", 1903 | "NotebookApp.rate_limit_window=3.0 (secs)\n", 1904 | "\n" 1905 | ] 1906 | }, 1907 | { 1908 | "name": "stdout", 1909 | "output_type": "stream", 1910 | "text": [ 1911 | " 4 0.257701 0.153826 0.9416 \n", 1912 | " 80%|███████▉ | 1665/2084 [13:25<03:22, 2.07it/s, loss=0.239]" 1913 | ] 1914 | }, 1915 | { 1916 | "name": "stderr", 1917 | "output_type": "stream", 1918 | "text": [ 1919 | "IOPub message rate exceeded.\n", 1920 | "The notebook server will temporarily stop sending output\n", 1921 | "to the client in order to avoid crashing it.\n", 1922 | "To change this limit, set the config variable\n", 1923 | "`--NotebookApp.iopub_msg_rate_limit`.\n", 1924 | "\n", 1925 | "Current values:\n", 1926 | "NotebookApp.iopub_msg_rate_limit=1000.0 (msgs/sec)\n", 1927 | "NotebookApp.rate_limit_window=3.0 (secs)\n", 1928 | "\n" 1929 | ] 1930 | }, 1931 | { 1932 | "name": "stdout", 1933 | "output_type": "stream", 1934 | "text": [ 1935 | " 6 0.24764 0.148807 0.94436 \n", 1936 | " 7 0.239934 0.146907 0.9456 \n", 1937 | " 8 0.224837 0.156241 0.94496 \n", 1938 | " 9%|▉ | 189/2084 [01:18<13:09, 2.40it/s, loss=0.212]" 1939 | ] 1940 | }, 1941 | { 1942 | "name": "stderr", 1943 | "output_type": "stream", 1944 | "text": [ 1945 | "IOPub message rate exceeded.\n", 1946 | "The notebook server will temporarily stop sending output\n", 1947 | "to the client in order to avoid crashing it.\n", 1948 | "To change this limit, set the config variable\n", 1949 | "`--NotebookApp.iopub_msg_rate_limit`.\n", 1950 | "\n", 1951 | "Current values:\n", 1952 | "NotebookApp.iopub_msg_rate_limit=1000.0 (msgs/sec)\n", 1953 | "NotebookApp.rate_limit_window=3.0 (secs)\n", 1954 | "\n" 1955 | ] 1956 | }, 1957 | { 1958 | "name": "stdout", 1959 | "output_type": "stream", 1960 | "text": [ 1961 | " 10 0.212315 0.145792 0.94616 \n", 1962 | " 11 0.221374 0.14564 0.9458 \n", 1963 | " 8%|▊ | 166/2084 [01:09<13:18, 2.40it/s, loss=0.186]" 1964 | ] 1965 | } 1966 | ], 1967 | "source": [ 1968 | "from time import time\n", 1969 | "val_size = 100000\n", 1970 | "for trn_size in [50, 100, 500, 1000, 5000, 10000, 20000, 50000]:\n", 1971 | " print('#'*50)\n", 1972 | " print(f'Experiment with training size {trn_size}')\n", 1973 | " start = time()\n", 1974 | " experiment(trn_size, val_size)\n", 1975 | " t = time() - start\n", 1976 | " print(f'Time cost: {t}')" 1977 | ] 1978 | }, 1979 | { 1980 | "cell_type": "markdown", 1981 | "metadata": {}, 1982 | "source": [ 1983 | "Some notebook issues here, you might want to run this cell from a python script..." 1984 | ] 1985 | }, 1986 | { 1987 | "cell_type": "markdown", 1988 | "metadata": {}, 1989 | "source": [ 1990 | "# Conclusions\n", 1991 | "Lety's see the evollution of the accuracy when we increas the size of the train data.\n", 1992 | "For each training size, we report the best accuracy among the different epochs." 1993 | ] 1994 | }, 1995 | { 1996 | "cell_type": "code", 1997 | "execution_count": 32, 1998 | "metadata": {}, 1999 | "outputs": [ 2000 | { 2001 | "data": { 2002 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAZMAAAEWCAYAAACjYXoKAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvhp/UCwAAIABJREFUeJzt3XmcHVWd9/HPN0l3OuksJOkQyEYCASSAokRQwTEPiyIqKDMqKCpu6LiPMoojw/Cgjugw6iyOiiuLgojLwyjKqMOioECQNQEkgUhCQLKQPen08nv+OOcm1Z1e783tm+7+vl+vfnXVqeWeqlv3/OqcU4siAjMzs0qMqHUGzMxs8HMwMTOzijmYmJlZxRxMzMysYg4mZmZWMQcTMzOr2LAOJpJC0rwyl32ppEf2dJ768LmHSrpX0iZJHxqAz5sm6db8ef9a7c8bTCTdLOldA/A5NTnWyiXpHyR9c4A+a6GklQPxWX0habakzZJG7sl595RqHkujqrHSPU3ScmAa0FZI/m5EfGAA8xDAwRGxFCAifgscOlCfX/Bx4KaIOGqAPu9cYA0wIXxTUk3U8FgrS0T8c7XW3fl3uIfXfQ7wrog4vtx1RMQTwLg9Pe+eUs1jaVAEk+w1EfHrWmdiL3AAcE21P0SSAOXPW1JOIJE0KiJa93jmrGKl7zci2mudl8FE0siIaOt9zmEoIvb6P2A5cFIX6aOB9cARhbSpwDZg3zz+bmApsA64HphemDeAeXn4ZtJZSWnaOcDv8vCted4twGbgjcBCYGVh/sPyOtYDi4HTCtO+C3wF+DmwCbgDOKiH7T0tr2N9XudhOf1/SbWz7Tkfh3Sx7M3A54A7gY3A/wMmF6a/CLg9r/s+YGGnZT8L3Jb34VVAC7Ajf95JeZ9/GViV/74MjM7LLwRWAp8AngauLKR9HHgGeAp4LXAq8Kf8vfxDIQ/HAL/P+XsK+E+gvtN39l7g0TzPV0iFIoXv+6G8n5cAL8jp04EfAauBx4EPdbPv5+b1jsjj3wCeKUy/EvhIYX99Ou+vTcD/AE392NfdLtspTwvpeKwtB84D7gc2AD8AGgrTTwfuzd//MuCUbr7fecBE4Ft5Xz8JfAYYmec/iHTMrSXVTr8H7FP4nE/kZTYBjwAn5vSLgKvy8Jz8nb0NeCKv51OFdYwBLgeezd/bx4vb2mk/dPs7BD7GruPr7Z3KiEvzZ/8F+Bowpot1H0b6XbXlda8v/Ha/CtyQP/ck4FXAPXn/rgAuKqyntL2jevue+zNvnv5W4M/5+/hHuikX87ynko7/Tfk7Oq/zsZT33+bCXzNwc3/2W4fPrHYg2BN/vey0bwOfLYy/H/hlHj4hH7wvyDvnP4BbOxVMvQaTzvN28aXUkQLWPwD1+XM3AYcWDsi1pIJyFOlHeU0323NIPmhPzuv9eF53fVf57GL5m/PBcwTQSCpASz/sGTkfp5L6y07O41MLyz4BHJ7zWZfz/pnC+i8G/gDsSwrctwOfLuyTVuDzeX+PKaRdmNf3blKB/n1gfP6sbcDcvI6jSYXwKNKP7SFy4V34Hn4G7APMzusqFZavz9v+QlKtah6pZjUCuDvnoR44EHgMeEU3+/AJ4Og8/Eie97DCtOcX9tey/J2NyeOX9GNfd7lsF/lZyO7B5E5SgJyc99F787RjSAHm5Py5M4Dn9PD9/gT4OulY2Tev9z15/nl5PaPzd30r8OU87VBSQTq9UDAelIcvYvdg8o28nc8jFVql/XkJcAswCZhJCpBdBpMefoetpOOyLu/vrcCkPP1LpJPIyaTj7b+Bz3Wz7nMo/OYLv90NwHF5fzbkzzwyjz+XVNi+ttP2FgNEd8dIf+adTyrwjycdw5eSTvS6KxefAl6ahyex66RqYVf7F5hAOo7e09/9tnMd1QoAe/KP9OPZTDrDK/29O087CVhWmPc24K15+FvAFwrTxuUvYE7nA5PKgslLSWfiIwrTryafseQD8puFaacCD3ezrf8IXFsYH0EqIBd2lc8ult95ABYOwh3ASNKZ5JWd5r8ReFth2Yu7+DEVg8ky4NTC+CuA5YV9soOOZ8kLScGidLY7Pu/LYwvz3E3+MXaxPR8BftLpezi+MH4tcH5hWz7cxTqOBZ7olPZJ4DvdfOaVwEeB/UjB5Auk2lDnWsvNwAWF5d7HrhOZvuzrLpftIj87j7XC7+HswvgXgK/l4a8DX+rh2Li4MD6NVLCPKaSdReqT62r51wL35OF5pJrASUBdp/kuYvdgMrMw/U7gzDzcIagD76L/wWQbuUDOac+QTkhEOjE7qDDtxcDj3az7HLoOJld0l588z5dL+5yuA0R3x0h/5r0QuLowbSzpt9ZdMHkCeA+pr7PbYymnjSCdoH01j/drv5X+BlOfyWuj6z6Tm4Cxko4lnSEcRTrbgnTm9sfSjBGxWdJa0tna8j2Yt+nAiujY/vzn/DklTxeGt9J9x9v0vCwAEdEuaUWndfVmRad81AFNpLP010t6TWF6HWkfdrVsr/nLw9ML46sjYnunZdbGrnbmbfn/XwrTt5H3h6RDgC8CC0g/mFGkYFPU3b6cRQp2nR0ATJe0vpA2EvhtF/NCOlM+jdR8civpR/4WUjPIbzt9z93lpS/7uq/HRFc6L1v6DmaRmmS6U/x+D8h5eip1oQCpYFkB6Uo+4N9IJ0vj87RnASJiqaSPkALH4ZJuBD4aEav6mN/Stk7vlKfejr+urI2OfXOl9U8lHUN3F7ZPpO++PzrkKZc1l5Bq//WkmtsPe1i+P99zn/ZTRGzNZVl3/hq4ALhE0v2kE67fdzPvZ0nfb+nq0LL226C/NDgXUteSzqjOAn4WEZvy5FWkHwwAkhqBKaQz/c62kHZgyX79yMYqYJak4v6c3c3n9GVdxTyLVED0Z12zOuWjhdTct4J0trxP4a8xIi4pzB/9yV9ef7EA6W353nwVeJh0xc4EUtOhel5kpxWkdv6u0h/vtN3jI+LUbtZzC6kAXZiHf0dq5nhZHu9rXnrb19XQ3T4oKX4/K0g1k6ZCHidExOF5+j/n+Y/M38XZFL6LiPh+pCufDsjzfb6M/D5Fat4qmdXdjGVYQzpRObywfRMjorvCvLtjt3P690lNQLMiYiKpP6Gvx2i5OuwnSWNIZVmXIuKuiDid1HT5U1IZuRtJZ5LKzb+JiJac3N/9BgyBYJJ9n9SZ9OY8XHI18HZJR0kaTfpx3BERy7tYx73AGZLG5ntP3tlp+l9Ibe1duYN0FvFxSXWSFgKvobyrrq4FXiXpREl1pI7FZlLfRF+dLWm+pLGktuTrctC9CniNpFdIGimpIV+nP7Pn1XVwNXCBpKmSmkjV76v6sXxvxpM6NjdLeg7wt/1Y9pvAeZKOVjJP0gGkZpVNkj4haUze9iMkvbCrlUTEo6Qf09nALRGxkfT9/zV9DyZ7Yl+X41ukY/5ESSMkzcj7cTcR8RSpk/dfJU3I8x8k6WV5lvGk5uUNkmYAf19aNt/vdEL+XW0n7a9yrgy7FvikpEn5M3q73L+n32EHuQb5DeBLkvbN+Z4h6RU9rHumpPpeVj0eWBcR2yUdA7ypL/mp0HWk4+klOX8X0U0Ak1Qv6c2SJuYAsZEuvhtJzyf1I782IlaX0svYb8DgCib/nW/wKf2VmrKIiDtINYvpwC8K6b8m9UH8iBTZDwLO7Gb9XyK1Qf6FdHXJ9zpNvwi4XNJ6SW8oToiIHaTg8UpSVP8vUr/Nw/3dyIh4hFSI/Ude12tIl0Xv6MdqriS19T5N6jD8UF73CtKVPv9A6rheQSog+nMcfAZYROoofYDUjPiZfizfm/NIP85NpAP6B31dMCJ+SKqyfz8v/1PSlWxtwKtJTaCPk/brN0lXMnXnFlLzyYrCuCg0m/aSlz2xr/stIu4E3k46njeQ8n1AD4u8ldRUs4TUhHUdsH+e9n9JF69sIF2J+OPCcqNJTT1rSMfZvqR+qP66mNSc+Djw6/z5zT3MfxHd/A678QnSBSx/kLQxf0Z391n8L+kqyqclrelhne8DLpa0iXQy1eVZ/54UEYuBD5JOUJ8iBfln6H5fvQVYnrf5vaQT7c5OJ3XO/65QrpbKz/7sNyBfUmlDh6SbSZ2fA3IHstmeJOlvSZ3zL+t15mFM0jjSxSAHR8Tjtc4PDK6aiZkNMZL2l3RcbmI7lNSs+5PelhuOJL0mN8M3ki4NfoA9eyFRRRxMzKyW6kmXM28iNTP9P1Izse3udHbdLHwwqQa31zQtuZnLzMwq5pqJmZlVbDDdtNijpqammDNnTq2zYWY2qNx9991rImJqpesZMsFkzpw5LFq0qNbZMDMbVCT9ufe5eudmLjMzq5iDiZmZVczBxMzMKuZgYmZmFXMwMTOzijmYmJlZxRxMzMysYkPmPhMzs6EoImhubWfjthY2bm9hw7bWncPpfyuTxtbzpmNn1zSfDiZmZlW2vaUtF/6tHYLAhm0thcDQcdqmQvqOtp7fO/b82fs4mJiZ7e2aW9t2K+x31RR2DwSdA8SO1p6DQf3IEUwYU8eEMaOY0FDHxDF1zJo0hglj0vCEhl3TJoypY0LDqPy/jvENo2io6+9r7fc8BxMzG/J2tLb3WNhv3JaDwvbdm5A2bmuhuZdgUDdSOwv98bmwnzFpzG5BYGKnQFCatjcEg0o5mJjZXq+lrb3HQFAa39BFINi4vYXtLT0Hg1EjcjAoFPbTJ47ptjaQAsOuaaNHjUDq8pXsw4aDiZlVXUtbO5t6CQQ99SVsa2nrcf0jR4gJDaMKAaGO/SY2dBMIigEi1RYa6hwMKuVgYma9ai0Fgz4Egq76Erbu6DkYjBAdCvuJY+rYd/y43Qv/ToGgND62fqSDQY05mJgNA23twaZ+BILOfQlbegkGEh0L/oY65jY17jzz76qZqBgYGh0MBj0HE7NBoL092NTcWugo7lsgKDUtbWpu7XH9Eowf3bEpaPbksbvVFnZrNsrDjfWjGDHCwWA4czAxGwDt7cHmHbnA76GjuLsAsbm5lYieP2N8Q8fO4lmTx3Z7SWmHy03H1DHOwcAqVNVgIukU4N+AkcA3I+KSTtMPAL4NTAXWAWdHxMrC9AnAEuCnEfGBaubVrCft7cGWHa27CvguLyXtvglpU1+CQa4ZjM8F/ox9xnDY/uN77EQuNSGNGz2KkQ4GVkNVCyaSRgJfAU4GVgJ3Sbo+IpYUZrsUuCIiLpd0AvA54C2F6Z8Gbq1WHm34iAi27GjrWNB3e9NZYTwPb9reQnsvwaCxfmSHK4Sm79PAcxrG93o10YQxoxg3ehSjRvpReTZ4VbNmcgywNCIeA5B0DXA6qaZRMh/4aB6+CfhpaYKko4FpwC+BBVXMpw0h67fu4PE1W1i+dguPr9nK8p3DW9i0ved+g7H1IzsU9tMmNHDItPG9BoLSXcgOBjacVTOYzABWFMZXAsd2muc+4AxSU9jrgPGSpgDPAv8KnA2c1N0HSDoXOBdg9uzaPpfGBs7G7S0sX5MCxPI1W3cGi+Vrt7B+a8vO+SSYPnEMc5saOf2o6cycNJZ9xuweCEpNS3UOBmZlq3UH/HnAf0o6h9Sc9STQBrwPuCEiVvZ0uWBEXAZcBrBgwYJeGiFsMNnS3LozQKTAsXXn8NotOzrMu//EBuZMaeSVR+zP3KaxzJnSyNymRmZNHjskHlNhNhhUM5g8CcwqjM/MaTtFxCpSzQRJ44C/joj1kl4MvFTS+4BxQL2kzRFxfhXzawNs2462XcEi/1++ZiuPr93C6k3NHebdd/xo5jQ1ctJh05jT1JiCRlMjB0xuZEy9A4ZZrVUzmNwFHCxpLimInAm8qTiDpCZgXUS0A58kXdlFRLy5MM85wAIHksFpe0sbT6zbmpukdvVfLF+zlac3bu8wb9O40cxtGsvCQ6Yyp6mROVMamZNrGo2ja12JNrOeVO0XGhGtkj4A3Ei6NPjbEbFY0sXAooi4HlgIfE5SkJq53l+t/Fj17Ght54l1HTu7U41jK6s2bOtwSezkxnrmTBnLS+ZNYe6UxlzLaOSAKWMZ31BXu40ws4ooerv4fZBYsGBBLFq0qNbZGLJa2tpZ+ey2XR3fhaDx5LPbOlw2O3FMXQoSU8buDBZzpqS/iWMdMMz2JpLujoiKr5h124Ht1NYePPnstp39F8UO8JXPbqO1EDHGjx7FnKZGjpo1idcdNSM1SzU1MndKI5Ma62u4FWZWCw4mw9DG7S08sHLDbv0YT6zbSkvbroAxtn4kc6Y0cvj0ibzqufvvvEpqTlMjUxrr/WA+M9vJwWQYaG5t449/Xs/ty9bwu6VruH/lBtpyLaOhbgRzpjRy8L7jOXn+fh0urZ06frQDhpn1iYPJENTeHix5amMOHmu58/G1bG9pZ+QI8byZE3nfwoM4du4UDtq3kWnjG/yAPzOrmIPJEPHE2q38bukablu2htuXruHZfCf4wfuO48wXzua4eU0ce+BkJviKKTOrAgeTQWrt5mZuX7aW23IAWbFuGwD7TWjghOdM47h5UzhuXhPTJjTUOKdmNhw4mAwSW3e0cufj61LwWLqWJU9tBNI7LF584BTedfyBHDeviYOmNrqfw8wGnIPJXqq1rZ37Vm7gtqWp0/yeJ56lpS2oHzmCow+YxN+/4lBectAUjpwx0U+rNbOaczDZS0QES5/ZnPo9lq7hD4+tY3NzKxIcPn0C7zh+LsfPa2LBAZP9LCoz2+s4mNTQ6k3N3PKn1bnpag3P5IcbHjBlLKcdNZ3j5zXx4gOn+CZAM9vrOZjUyO+XreXdVyxic3MrUxrrecm8Jo6fN4WXHNTErMlja509M7N+cTCpgf9Z/DQfuPoeZk8ey5ffeBTz95/gez3MbFBzMBlgP1y0gk/86H6OnLkP3z3nhW7CMrMhwcFkAH3j1sf47A0Pcfy8Jr7+lqP9jg4zGzJcmg2AiOALNz7CV29exqlH7seX3ngUo0f5iiwzGzocTKqsrT244KcPcPWdK3jTsbP59OlHMNL9I2Y2xDiYVFFzaxsfueZefvHg07z//xzEeS8/1Henm9mQ5GBSJZubW3nPlYu4belaLnjVYbzrpQfWOktmZlXjYFIF67bs4O3fuZMHV23k0tc/j785emats2RmVlUOJnvYqvXbeMu37mDFs9v42tlHc/L8abXOkplZ1TmY7EHLVm/mLd+8g03bW7niHcfwogOn1DpLZmYDwsFkD3lg5Qbe9p07GSG4+twXccSMibXOkpnZgHEw2QNuX7aGd1++iH3G1nPVu45lblNjrbNkZjagHEwq9MsHn+ZDV9/DAVPGcuU7j2W/iX6zoZkNPw4mFbj2rhWc/+P7ee7Mffju21/IPmP9nC0zG54cTMr09VuW8blfPMxLD27ia2f7OVtmNry5BOyniOCSXz7M1295jFc/d3+++IajqB/l1+aa2fDmYNIPbe3Bp37yANfctYI3Hzubi/2cLTMzwMGkz7a3pOds/XLx03zohHn83cmH+DlbZmaZg0kfbG5u5dwrFnH7srVc+Or5vOP4ubXOkpnZXqWqjf2STpH0iKSlks7vYvoBkn4j6X5JN0uamdOPkvR7SYvztDdWM589WbdlB2/6xh+44/F1fPENz3MgMTPrQtWCiaSRwFeAVwLzgbMkze8026XAFRHxXOBi4HM5fSvw1og4HDgF+LKkfaqV1+6sWr+Nv/na7Tzy9CYue8vRnPECP7DRzKwr1ayZHAMsjYjHImIHcA1weqd55gP/m4dvKk2PiD9FxKN5eBXwDDC1inndzTObtvM3X72d1RubufKdx3LiYX5go5lZd6oZTGYAKwrjK3Na0X3AGXn4dcB4SR2ejijpGKAeWFalfHbpNw89w6oN2/nuO17IMXMnD+RHm5kNOrW+QeI84GWS7gFeBjwJtJUmStofuBJ4e0S0d15Y0rmSFklatHr16j2asQef3MD4hlG8YPakPbpeM7OhqJrB5ElgVmF8Zk7bKSJWRcQZEfF84FM5bT2ApAnAz4FPRcQfuvqAiLgsIhZExIKpU/dsK9iDqzZy+PQJvvzXzKwPqhlM7gIOljRXUj1wJnB9cQZJTZJKefgk8O2cXg/8hNQ5f10V89il1rZ2Hn5qI0dM92Pkzcz6omrBJCJagQ8ANwIPAddGxGJJF0s6Lc+2EHhE0p+AacBnc/obgL8CzpF0b/47qlp57WzZ6i00t7b7nSRmZn1U1ZsWI+IG4IZOaRcWhq8Ddqt5RMRVwFXVzFtPHnxyAwCHT59QqyyYmQ0qte6A3ystXrWRhroRHDh1XK2zYmY2KDiYdOHBVRuYv/8EP8TRzKyPHEw6aW8Plqza6P4SM7N+cDDp5Il1W9nc3Or+EjOzfnAw6eTBVaXOd9dMzMz6ysGkkwef3EjdSHHItPG1zoqZ2aDhYNLJ4lUbOGTaeL+K18ysH1xiFkQEi1f5znczs/5yMCl4asN21m3ZwREz3PluZtYfDiYFDz21EYD5rpmYmfWLg0nB+q0tAEwdN7rGOTEzG1wcTAqaW9MrU0bXebeYmfWHS82C5tb0Xq7RvpLLzKxfXGoW7KyZjBpZ45yYmQ0uDiYFzS0pmPgeEzOz/nGpWdDc2kbdSPlpwWZm/eRgUrC9pZ0GN3GZmfWbg0lBc2ubr+QyMyuDS86C5tZ2d76bmZXBwaQgBRPvEjOz/nLJWdDc0uYruczMyuCSs6C5tZ3RdW7mMjPrr16DiaQPSpo0EJmptebWNjdzmZmVoS8l5zTgLknXSjpF0pC9CcN9JmZm5em15IyIC4CDgW8B5wCPSvpnSQdVOW8DbnuLr+YyMytHn07DIyKAp/NfKzAJuE7SF6qYtwHX3NpGg+8zMTPrt1G9zSDpw8BbgTXAN4G/j4gWSSOAR4GPVzeLA6fZNRMzs7L0GkyAycAZEfHnYmJEtEt6dXWyVRvpai7XTMzM+qsvJecvgHWlEUkTJB0LEBEPVStjteCruczMytOXkvOrwObC+OacNuT4cSpmZuXpSzBR7oAHUvMWfWseG1Qigh2+NNjMrCx9KTkfk/QhSXX578PAY31Zeb4v5RFJSyWd38X0AyT9RtL9km6WNLMw7W2SHs1/b+v7JpXH7383MytfX0rO9wIvAZ4EVgLHAuf2tpCkkcBXgFcC84GzJM3vNNulwBUR8VzgYuBzednJwD/lzzoG+Kdq34Vfesuim7nMzPqv1+aqiHgGOLOMdR8DLI2IxwAkXQOcDiwpzDMf+Ggevgn4aR5+BfCriFiXl/0VcApwdRn56JPm1jYA32diZlaGvtxn0gC8EzgcaCilR8Q7ell0BrCiMF6q1RTdB5wB/BvwOmC8pCndLDuji7ydS64lzZ49u7dN6dHOZi7XTMzM+q0vp+FXAvuRagu3ADOBTXvo888DXibpHuBlpKa0tr4uHBGXRcSCiFgwderUijJSqpm4A97MrP/6UnLOi4h/BLZExOXAq9i9htGVJ4FZhfGZOW2niFgVEWdExPOBT+W09X1Zdk/bvrPPxMHEzKy/+lJytuT/6yUdAUwE9u3DcncBB0uaK6me1O9yfXEGSU35sSwAnwS+nYdvBF4uaVLueH95TquaXVdzuZnLzKy/+hJMLssF+gWkYLAE+HxvC0VEK/ABUhB4CLg2IhZLuljSaXm2hcAjkv5EetT9Z/Oy64BPkwLSXcDFpc74anEzl5lZ+XrsgM+1ho0R8SxwK3Bgf1YeETcAN3RKu7AwfB1wXTfLfptdNZWq29UB72BiZtZfPZac+W73IfNU4J40t5RqJm7mMjPrr76chv9a0nmSZkmaXPqres4GWKlm4vtMzMz6ry/P2Hpj/v/+QlrQzyavvd3OO+DdAW9m1m99uQN+7kBkpNbcAW9mVr6+3AH/1q7SI+KKPZ+d2nEHvJlZ+frSzPXCwnADcCLwR2CIBhM3c5mZ9Vdfmrk+WByXtA9wTdVyVCPNLW1IUDdStc6KmdmgU06bzhZgyPWjNOcXY0kOJmZm/dWXPpP/Jl29BSn4zAeurWamamF7S5ubuMzMytSXPpNLC8OtwJ8jYmWV8lMzzX5lr5lZ2foSTJ4AnoqI7QCSxkiaExHLq5qzAdbc2k6D7zExMytLX07Ffwi0F8bbctqQ0tza5pqJmVmZ+lJ6joqIHaWRPFxfvSzVRnNLO6P9KBUzs7L0pfRcXXhkPJJOB9ZUL0u1kfpM3MxlZlaOvvSZvBf4nqT/zOMrgS7vih/M3MxlZla+vty0uAx4kaRxeXxz1XNVA82t7Ywb3ZfYamZmnfV6Ki7pnyXtExGbI2JzfpXuZwYicwPJ95mYmZWvL+06r4yI9aWR/NbFU6uXpdpobnUHvJlZufpSeo6UNLo0ImkMMLqH+Qel5pZ2GlwzMTMrS186Cb4H/EbSdwAB5wCXVzNTtdDc2uaaiZlZmfrSAf95SfcBJ5Ge0XUjcEC1MzbQ/DgVM7Py9bX0/AspkLweOAF4qGo5qhHfZ2JmVr5uayaSDgHOyn9rgB8Aioj/M0B5GzCtbe20tYdrJmZmZeqpmeth4LfAqyNiKYCkvxuQXA2wnW9ZdJ+JmVlZeio9zwCeAm6S9A1JJ5I64Iec7S1tgF/Za2ZWrm6DSUT8NCLOBJ4D3AR8BNhX0lclvXygMjgQdr3/3TUTM7Ny9Fp6RsSWiPh+RLwGmAncA3yi6jkbQG7mMjOrTL9Kz4h4NiIui4gTq5WhWmhuTc1cvmnRzKw8PhUn3f0OrpmYmZXLpSfFPhPXTMzMylHVYCLpFEmPSFoq6fwups+WdJOkeyTdL+nUnF4n6XJJD0h6SNInq5nPUjOXO+DNzMpTtdJT0kjgK8ArgfnAWZLmd5rtAuDaiHg+cCbwXzn99cDoiDgSOBp4j6Q51crrzmYu10zMzMpSzVPxY4ClEfFYfm/8NcDpneYJYEIengisKqQ3ShoFjAF2ABurlVFfzWVmVplqlp4zgBWF8ZU5regi4GxJK4EbgA/m9OuALaSbJp8ALo2IdZ0/QNK5khZJWrR69eqyM7rrpkUHEzOzctS69DwL+G5EzCS9cOtKSSNItZo2YDowF/iYpAM7L5wvU14QEQumTp1adibcAW9mVplqBpMngVmF8Zk5reidwLUAEfF7oAFoAt4E/DIiWiLiGeA2YEG1MrrzPhMFGmPUAAAL0klEQVQ3c5mZlaWapeddwMGS5kqqJ3WwX99pnieAEwEkHUYKJqtz+gk5vRF4EenBk1XhmomZWWWqFkwiohX4AOllWg+RrtpaLOliSafl2T4GvDu/fOtq4JyICNJVYOMkLSYFpe9ExP3Vymvpaq5695mYmZWlL6/tLVtE3EDqWC+mXVgYXgIc18Vym0mXBw+I5tY26kaKkSOG5EORzcyqzqfi+C2LZmaVcjAh1Ux8WbCZWflcggLbW9odTMzMKuASlNzMVedmLjOzcjmYAM0tbuYyM6uES1BcMzEzq5SDCe6ANzOrlEtQSpcGe1eYmZXLJSjQHjBCvmHRzKxcDiZAROBYYmZWPgcTIAIcS8zMyudgAgThZi4zswo4mADt7biZy8ysAg4mpBfOy9HEzKxsDibkDvhaZ8LMbBBzMCF1wLvPxMysfA4mQLsvDTYzq4iDCaU+k1rnwsxs8HIwoXTToqOJmVm5HEzwTYtmZpVyMCE1c7kD3sysfA4muAPezKxSDib40mAzs0o5mJBrJrXOhJnZIOZgQqqZOJqYmZXPwYR0abCbuczMyudgQr5psdaZMDMbxBxMcAe8mVmlHEzwpcFmZpVyMMHvMzEzq1RVg4mkUyQ9ImmppPO7mD5b0k2S7pF0v6RTC9OeK+n3khZLekBSQ7XyGa6ZmJlVZFS1VixpJPAV4GRgJXCXpOsjYklhtguAayPiq5LmAzcAcySNAq4C3hIR90maArRUK6+pz6RaazczG/qqWTM5BlgaEY9FxA7gGuD0TvMEMCEPTwRW5eGXA/dHxH0AEbE2ItqqldF006KjiZlZuaoZTGYAKwrjK3Na0UXA2ZJWkmolH8zphwAh6UZJf5T08Srm0+8zMTOrUK074M8CvhsRM4FTgSsljSA1vx0PvDn/f52kEzsvLOlcSYskLVq9enXZmfClwWZmlalmMHkSmFUYn5nTit4JXAsQEb8HGoAmUi3m1ohYExFbSbWWF3T+gIi4LCIWRMSCqVOnlp3R9oiylzUzs+oGk7uAgyXNlVQPnAlc32meJ4ATASQdRgomq4EbgSMljc2d8S8DllAtrpmYmVWkaldzRUSrpA+QAsNI4NsRsVjSxcCiiLge+BjwDUl/R+q6OCciAnhW0hdJASmAGyLi59XKq29aNDOrTNWCCUBE3EBqoiqmXVgYXgIc182yV5EuD6669KbFgfgkM7OhqdYd8HuFVDNxNDEzK5eDCelqLocSM7PyOZjgZ3OZmVXKwQQ/m8vMrFIOJvjZXGZmlXIwwc/mMjOrlIMJvjTYzKxSDiakZi53mpiZlW/YB5PIz+VyKDEzK5+DSX7Go5/NZWZWvmEfTEpPDHYsMTMr37APJqWHz7sD3sysfMM+mOyqmTiamJmVa9gHk1KfiWOJmVn5HExKwcTXc5mZlc3BJPeauM/EzKx8wz6YtLuZy8ysYsM+mOy6adHRxMysXA4m+b9rJmZm5XMwaU//fWmwmVn5HEzcAW9mVrFhH0x2dsDXNhtmZoPasA8mpQ74Ea6amJmVbdgHk7pRIzj1yP2YPXlsrbNiZjZojap1BmptQkMd//Xmo2udDTOzQW3Y10zMzKxyDiZmZlYxBxMzM6uYg4mZmVXMwcTMzCrmYGJmZhVzMDEzs4o5mJiZWcVUepzIYCdpNfDnMhdvAtbswewMBt7m4cHbPDxUss0HRMTUSjMwZIJJJSQtiogFtc7HQPI2Dw/e5uFhb9hmN3OZmVnFHEzMzKxiDibJZbXOQA14m4cHb/PwUPNtdp+JmZlVzDUTMzOrmIOJmZlVbNgHE0mnSHpE0lJJ59c6P/0l6duSnpH0YCFtsqRfSXo0/5+U0yXp3/O23i/pBYVl3pbnf1TS2wrpR0t6IC/z75Jq+n5jSbMk3SRpiaTFkj6c04fyNjdIulPSfXmb/29OnyvpjpzPH0iqz+mj8/jSPH1OYV2fzOmPSHpFIX2v/B1IGinpHkk/y+NDepslLc/H3r2SFuW0wXFsR8Sw/QNGAsuAA4F64D5gfq3z1c9t+CvgBcCDhbQvAOfn4fOBz+fhU4FfAAJeBNyR0ycDj+X/k/LwpDztzjyv8rKvrPH27g+8IA+PB/4EzB/i2yxgXB6uA+7I+bsWODOnfw342zz8PuBrefhM4Ad5eH4+xkcDc/OxP3Jv/h0AHwW+D/wsjw/pbQaWA02d0gbFsT3caybHAEsj4rGI2AFcA5xe4zz1S0TcCqzrlHw6cHkevhx4bSH9ikj+AOwjaX/gFcCvImJdRDwL/Ao4JU+bEBF/iHQkXlFYV01ExFMR8cc8vAl4CJjB0N7miIjNebQu/wVwAnBdTu+8zaV9cR1wYj4DPR24JiKaI+JxYCnpN7BX/g4kzQReBXwzj4shvs3dGBTH9nAPJjOAFYXxlTltsJsWEU/l4aeBaXm4u+3tKX1lF+l7hdyU8XzSmfqQ3ubc3HMv8AypcFgGrI+I1jxLMZ87ty1P3wBMof/7ota+DHwcaM/jUxj62xzA/0i6W9K5OW1QHNuj9tSKbO8UESFpyF3/LWkc8CPgIxGxsdj0OxS3OSLagKMk7QP8BHhOjbNUVZJeDTwTEXdLWljr/Ayg4yPiSUn7Ar+S9HBx4t58bA/3msmTwKzC+MycNtj9JVdpyf+fyendbW9P6TO7SK8pSXWkQPK9iPhxTh7S21wSEeuBm4AXk5o1SieExXzu3LY8fSKwlv7vi1o6DjhN0nJSE9QJwL8xtLeZiHgy/3+GdNJwDIPl2K51h1Mt/0g1s8dIHXOlTrjDa52vMrZjDh074P+Fjh12X8jDr6Jjh92dOX0y8Dips25SHp6cp3XusDu1xtsqUlvvlzulD+Vtngrsk4fHAL8FXg38kI6d0e/Lw++nY2f0tXn4cDp2Rj9G6ojeq38HwEJ2dcAP2W0GGoHxheHbgVMGy7Fd8wOl1n+kKyL+RGqD/lSt81NG/q8GngJaSG2g7yS1Ff8GeBT4deFAEvCVvK0PAAsK63kHqXNyKfD2QvoC4MG8zH+Sn5pQw+09ntSufD9wb/47dYhv83OBe/I2PwhcmNMPzIXD0lzIjs7pDXl8aZ5+YGFdn8rb9QiFK3n25t8BHYPJkN3mvG335b/FpTwNlmPbj1MxM7OKDfc+EzMz2wMcTMzMrGIOJmZmVjEHEzMzq5iDiZmZVczBxIYFSVPyk1jvlfS0pCcL4/V9XMd3JB3ayzzvl/TmPZPrgV+/Wbl8abANO5IuAjZHxKWd0kX6TbR3uaCZdcs1ExvWJM1TejfK90g3iu0v6TJJi5TeHXJhYd7fSTpK0ihJ6yVdovSOkd/nZykh6TOSPlKY/xKld5E8IuklOb1R0o/y516XP+uoLvL2L3me+yV9vrh+pfe63Fv4a5c0Q9I0ST/O67xT0osGYj+a+UGPZumhiW+NiNLLiM6PiHX5GU83SbouIpZ0WmYicEtEnC/pi6Q7ji/pYt2KiGMknQZcSHo8xgeBpyPiryU9D/jjbgtJ00h3aB8eEZEf8LhTRKwAjsrzfhg4NtIDAn9AetzGH/JTlX8GHFHWXjHrBwcTM1hWCiTZWZLeSfp9TCe9YKlzMNkWEb/Iw3cDL+1m3T8uzDMnDx8PfB4gIu6TtLiL5daRHr3+DUk/JwWF3Uj6K+BteZ0AJwGHFp6iPEnSmIjY1k3+zPYIBxMz2FIakHQw8GHgmIhYL+kq0nOfOttRGG6j+99Scx/m2U1EtEhaAJwMvB74W+DlxXkkzQAuA14dEVtLyTnvxfyZVZ37TMw6mgBsAjYW3lq3p90GvAFA0pGkmk8HksaT3or3M+DvSC8BK06vJz3Y8GMRsbQw6dekJ+iW5tutL8asGhxMzDr6I6lJ62HSo+5vq8Jn/AcwQ9IS4J/y523oNM9E4OeS7gNuIb0LveilpADz2UIn/L6kQHJc7rRfAry7Cvk3240vDTYbYLljf1REbM/Nav8DHBy7XkdrNui4z8Rs4I0DfpODioD3OJDYYOeaiZmZVcx9JmZmVjEHEzMzq5iDiZmZVczBxMzMKuZgYmZmFfv/SYShTXYXecAAAAAASUVORK5CYII=\n", 2003 | "text/plain": [ 2004 | "
" 2005 | ] 2006 | }, 2007 | "metadata": {}, 2008 | "output_type": "display_data" 2009 | }, 2010 | { 2011 | "data": { 2012 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAkEAAAEWCAYAAABhZ0N/AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvhp/UCwAAIABJREFUeJzt3Xl8XXWd//HXJ0mTtEm6t4Eu0AJttYKCdIqOMnQEZVGpivqDEaGKoDI6LjCI4zIMiqKDM86MDg4KCshixY1RlHFhGR0EilgUaNoChbY06d4maZv18/vj+73Jye29yc16m5z38/HII2c/3/u93/M9n/s933OOuTsiIiIiaVNS7ASIiIiIFIOCIBEREUklBUEiIiKSSgqCREREJJUUBImIiEgqKQgSERGRVCpaEGRmbmbHDHDdk82sbqjTVMB+F5nZH82s0cz+bgT2V2tmD8b9fWW49zeamNn9Zva+EdhPUcraQJnZP5jZt0ZoX8vMbNNI7KsQZnaEmTWZWelQLjtURltZGk3MbIOZnTYE25kXz01NZnbJUKTtUGFm3zGz/QM5Zs3s52Z24XCkq9j6DIJi4dofC0Xm72sjkbhEGnoETO7+v+6+aCTTEF0B3OfuNe7+7yOwv0uA7cBEd79sBPYnWYpY1gbE3b/g7sMSHA7mh0sB215hZr8dzDbc/QV3r3b3jqFcdqgMd1lKBHbZf+1m9pvh2u9Iiyfzzw/zbia7+w2JfZ5qZmvMbJ+Z3WdmR/aSvsPN7G4zezEeM/Oy5leY2U1mttfM6s3s41nz8+5rMOu6+wrgzIFkhruf6e43D2TdQ12hLUFvjhVG5u9Dw5qqQ9eRwJPDvRMLSuL+nvIBPNHSzMqGPmUyFBLfr/TDSLbajEaJwK7rD/hLYD/whSInb9Qys+nAD4HPAFOBVcD3elmlE/gFcE6e+VcBCwj1+18DV5jZGQXuazDrSi7u3usfsAE4Lcf0CmA3cGxi2gzCATczjl8MrAd2AncDsxLLOnBMHL4feF9i3grgt3H4wbhsM9AE/D9gGbApsfxL4zZ2E4KUsxPzvgN8HfgZ0Ag8DBzdy+c9O25jd9zmS+P03wAdwIGYjoU51r0f+CLwCLAX+AkwNTH/VcD/xW2vBpZlrXsN8LuYh98F2oDWuL/TYp5/FXgx/n0VqIjrLwM2AZ8A6oFbE9OuALYCW4C3AGcBa+P38g+JNCwFHorp2wJ8DSjP+s4+AKyLy3wdsMT8i4GnYz4/BbwyTp8F/ADYBjwH/F2evJ8ft1sSx78JbE3MvxX4aCK/PhfzqxH4H2B6P/I677pZaVpGz7K2AbgceALYQ6hkKhPzlwN/jN//M8AZeb7fY4BJwI0xrzcDnwdK4/JHE8rcDkJr4G2EX6eZ/XwirtMI1AGnxulXAd+Nw/Pid3Yh8ELczqcS2xgP3Azsit/bFcnPmpUPeY9D4DK6y9d7suqI6+K+G4BvAONzbPulhOOqI257d+LYvR64J+73NOCNwOMxfzcCVyW2k/m8ZX19z/1ZNs6/AHg+fh+fIU+9GJc9i1D+G+N3dHl2WYr515T4awHu70++FVB3TyQc55/O+k5y1iEF1tmXEo7/xphfRxOOs73AShL1RVZaSoBPxzzcCtwCTCqknGZt5xJ61ov/XeBx+SbCcbk7pvflebbfo1wk9vl/ifEqwjH8kj7yvyxua17W9BeBNyTGPwfcWci+BrNurvosK12VhPPOjphPjwK1iePjfXF4NT3LrhPrV3qpd7P21Vv5nxTLx7ZYXj5N9zlhBfBbwvGxi3A+OTOx3bx1at7vqYADaQP5D/abgGsS438L/CIOv45QmF9JOPD+A3gw64DqMwjKXjZHZTKOcND+A1Ae99sILIrzvxO/1KWEQnlbptDk+DwLCZXt6+N2r4jbLs+Vzhzr3x8z/thYAH9A9wlpdkzHWYQK4fVxfEZi3ReAl8V0jotp/3xi+1cDvwdmEgLO/wM+l8iTduBLMb/HJ6Z9Nm7v4liwbgdq4r72A/PjNk4kFOIyQmXwNDHoSHwPPwUmA0fEbWVO8u+In/0vACOc5I+Mn/WxmIZy4CjgWeD0PHn4AnBiHK6Ly740Me+ERH49E7+z8XH82n7kdc51c6RnGQcHQY8QArupMY8+EOctJVTAr4/7nU135ZXr+/0R8F+EsjIzbvf9cflj4nYq4nf9IPDVOG8RIQCYlai4j47DV3FwEPTN+DlfQahsMvl5LfAAMAWYQziB5KwgezkO2wnlclzM733AlDj/Xwkn0qmE8vbfwBfzbHsFiWM+cezuAV4T87My7vO4OP5yQpDwlqzPmwxs8pWR/iy7mFBRv5ZQhq8jnIjz1YtbgJPj8BS6fwwsy5W/hGDl6cR3X3C+9VF3/4Dw4y/5Q6W3OqSQOvsnMb0vi2Xp14RjehIh8LswT1reS6hLjwKqCS0WtxZSTnNs6zsk6sUCjssTCIHXSUApIdjaQCL4S2ynR7mI0/4NuD5ruT8D5/SR/wcFQbE8ODG4iNPeDvypr30NZt189VnWsu+PZW1CzKcTCV0xIM+5jxB4rYllotd6t5d8yi7/t8RyVhO/j7XARYl6oo1wLisFPkgIDC3Oz1un5t1/AQfSBuKvs8TfxXHeacAziWV/B1wQh28EvpyYVx0TPy9xQA1FEHQyoeWjJDH/DuIvRMIB863EvLOANXk+62eAlYnxEsKJfVlvBSGx/P0kTqaEyrM1flmfIB70ifn3EiuNuO7VvR3shEr6rMT46cCGRJ600vPXzzJCkJNpXaiJeXlSYpnHiCeRHJ/no8CPsr6H1ybGVwJXJj7LR3Js4yTghaxpnwS+nWeftwIfBw4jBEFfJrQ+ZbcS3U/PX7iX0h2AF5LXOdfNkZ6uspY4Hs5PjH8Z+EYc/i/gX3spG1cnxmsJFf34xLTzCH3Ocq3/FuDxOHwMoVI/DRiXtdxVHBwEzUnMfwQ4Nw73CEaB99H/IGg/PU8YWwmBtBF+UBydmPdq4Lk8215B7iDolnzpict8NZPn5A5s8pWR/iz7WeCOxLwJhGMtXxD0AuGEMrG3shSnlRB+WFwfx/uVb73ky2WxrE7Nmt5bHVJInf2axPzHgE8kxr9CDNRzpOfXwKWJ8UVx25kfXHnLaY5tfYfcQVC+4/J6YqCXmF8HnJJj2z3KRSJfrs1a7nfAij6+g1xB0Nw4LVlPvz7rO8i5r8Gs21sZTMx7L3laychx7iP8KNhKvCpCH/Vunn1ml/9SwrG1OLHM++luJVoBrM86Fp1wvuhXnZr5K7TfyFvc/Vc5pt8HTDCzkwi/yI4nRGIQIvI/ZBZ09yYz20GIFjcUuN9CzAI2untnYtrzcT8Z9YnhfYSDO9+2ns+MuHunmW3M2lZfNmalYxwwndAq8g4ze3Ni/jhCHuZat8/0xeFZifFt7n4ga50d3t35c3/835CYv5+YH2a2EPgXYAmhcJURKrqkfHk5l1DBZjsSmGVmuxPTSoH/zbEshJaJswmXWR4kHHzvJlwu+d+s7zlfWgrJ60LLRC7Z62a+g7mESzf5JL/fI2OatphZZlpJZhkzqyX8sjuZELyWEJp/cff1ZvZRQsDzMjO7F/i4u79YYHozn3VWVpr6Kn+57HD39hzbn0EoQ48lPp8Rvvv+6JGmWNdcS2htLSe0WHy/l/X78z0XlE/uvi/WZfmcQ2jCv9bMniD8UHgoz7LXEL7fzN2mg843M3st8E+EH287s2b3VocUUmdn1x3Z44flSVau/ZYRTlwZgzkmc62f+VxHAhea2YcT88vpWXf2ponQWpE0EWg0s5OBn8dpz7v7ywrYVmb9A4nhxr72Nch1C3EroQ6708wmEy6Nfcrd27IXNLO5hB/BF7r72ji5kHo3W3b5nx7XyS4rOc/n8ViEUFam0kudms+gOmfGk+tKQrR1HvBTd89k+IuETAHAzKqAaYSWlWzNhAM/I9+BlMuLwNysjqZH5NlPIdtKptkIhaI/25qblY42QhPzRkKUPDnxV+Xu1yaW9/6kL24/eeLra/2+XE9o2lzg7hMJlxit91W6bCT0D8g1/bmsz13j7mfl2c4DhBP/sjj8W8LlkFPieKFp6Suvh0O+PMhIfj8bCb9apifSODFRiX4hLn9c/C7OJ/FduPvt7v5aQnlwwmXQ/tpCuAyWMTffggOwnXBSfFni803y0Fk3l3xlN3v67YRLRXPdfRKhv0yhZXSgeuSTmY0n1GU5ufuj7r6c0Bz/Y0IdeRAzO5dQb749caLpb75lb7OW0B/mcndflWOR3uqQ/tTZ/ZVrv+30DKIK1d96biOh20ayPpjg7ncUuP6ThEt0QFe+HA086eGOv0xH9L4CINx9F6E8vSIx+RV033DT274GvG4hH9Ld29z9n9x9MaFD/ZsIfeF6iOX/x4RWv58nZvWr3u2l/LdxcFkppAz2VafmNBR3qNxO6OT0rjiccQfwHjM73swqCJX6w+6+Icc2/gi8zcwmxFtwL8qa30C4lpzLw4So/wozG2dmy4A3A3cO4LOsBN4YbzMcR2hSbiE0ERbqfDNbbGYTCNff74rB4neBN5vZ6WZWamaVFp6zMqf3zfVwB/BpM5sR7wT4bNzuUKkhdHBsMrOXEK63FupbwOVmdmK8++mYeHvmI4RfTJ8ws/Hxsx9rZn+RayPuvo5wEjgfeMDd9xK+/3MoPAgairweiBsJZf5UMysxs9kxHw/i7lsInW+/YmYT4/JHm9kpcZEawi+7PWY2G/j7zLoWnlf1unhcHSDkVyf9txL4pJlNifvo667P3o7DHmKL3TeBfzWzmTHds83s9F62PcfMyvvYdA2w090PmNlS4G8KSc8g3UUoT38Z03cVeQIvMys3s3eZ2aRYse8lx3djZicQ+ty8xd23ZaYXkm8WbrtelmObpYR67zfu/o08n6W3OqQ/dXZ/3QF8zMzmm1l13Pb3sloRC1VwOYy+CXzAzE6KdVOVmb3RzGoKXP9HwLFmdo6ZVRLy7Al3X5NvhbhcRRytiOMZtxC+gymxfriYcImvkH0NZt1emdlfm9lxsRztJQQjueqVmwhdSr6cNb3gereX8p9pWLnGzGriOeTjFHCeK6BOzanQIOi/redzJzKXvHD3hwktObPobhYkXj77DKFz3hZCRHpunu3/K+E6YAPhbpXbsuZfBdxsZrvN7J3JGe7eSgh6ziREkf9J6JdU0Befta06wsn3P+K23kx4PEBrPzZzK6FQ1hM6cv5d3PZGwp1D/0DoULyRcGLrTyD6ecJtj08AfyI0XQ/l8zIuJ5xUGgkVR8G3V7r79wlNm7fH9X9M6I/QQfhFcTyhJ/92QsA0qZfNPUC4zLIxMW4kmur7SMtQ5HW/ufsjwHsI5XkPId1H9rLKBYRm+acIl7ruAg6P8/6J0EF1D6Fz6w8T61UQLgltJ5SzmYR+Vv11NeGy43PAr+L+W3pZ/iryHId5fILQGfb3ZrY37iPfc3J+Q/jFWm9m23vZ5qXA1WbWSKjkc7ayDCV3fxL4MCHA2EIITreSP6/eDWyIn/kDhB+I2ZYTOrr+NlGvZurPvPlm4TJEI+H4z/YaQgvqOXbws4IyrQF565B+1tn9dROhbnyQUN4OEPJ0IG4EFsdy+OO+Fo4tYhcT7nbdRcjbFYXuLJ6kzyHUb7sI/Rz7ypf9dF++WkN3VwSAfyR0HXieUEf8s7v/osB9DWbdvhxGqAP2EjoqP0D4zrKdC7w1q3yd3M96t7fy/2FCTPEs4UrA7YTyU4je6tScMj2qZQiY2f2ETqkj8sRekaFkZh8kdEbt9ZdT2sWWjN2Ey8bPjfC+zydcKhtI0Ct9iC0PdYQg7e/d/ZtFTtKQMbMbCXfxbnX3YXno6WikB+qJpJSZHU64rPAQ4QFslxF+LUsWC509f01okbyO0IqyYaTT4e5Deflbsrj784QW/DHH3S/i4K4mqaen1oqkVznhtv5GwuWonxAuJ8vBltP9gMEFhBYzNaOLjHK6HCYiIiKppJYgERERSSX1CUqJ6dOn+7x584qdDBGRUeOxxx7b7u4zip0OGT4KglJi3rx5rFqV69lpIiKSi5k93/dSMprpcpiIiIikkoIgERERSSUFQSIiIpJKCoJEREQklRQEiYiISCopCBIREZFUUhAkIiIiqaQgSERERFJJQZCIiIikkoIgERERSSUFQSIiIpJKCoJEREQklRQEiYiISCopCBIREZFUUhAkIiIiqaQgSERERFJJQZCIiIikkoKgIjGzM8yszszWm9mVOeYfaWa/NrMnzOx+M5uTNX+imW0ys6+NXKpFRETGDgVBRWBmpcDXgTOBxcB5ZrY4a7HrgFvc/eXA1cAXs+Z/DnhwuNMqIiIyVikIKo6lwHp3f9bdW4E7geVZyywGfhOH70vON7MTgVrgf0YgrSIiImOSgqDimA1sTIxvitOSVgNvi8NvBWrMbJqZlQBfAS7vaydmdomZrTKzVdu2bRuCZIuIiIwdCoIOXZcDp5jZ48ApwGagA7gUuMfdN/W1AXe/wd2XuPuSGTNmDG9qRURERpmyYicgpTYDcxPjc+K0Lu7+IrElyMyqgXPcfbeZvRo42cwuBaqBcjNrcveDOleLiIhIfgqCiuNRYIGZzScEP+cCf5NcwMymAzvdvRP4JHATgLu/K7HMCmCJAiAREZH+0+WwInD3duBDwL3A08BKd3/SzK42s7PjYsuAOjNbS+gEfU1REisiIjJGmbsXOw0yApYsWeKrVq0qdjJEREYNM3vM3ZcUOx0yfNQSJCIiIqmkPkEiIpJ67s7+tg6aWzrY19pOc0tHsZMkI0BBkIiIjCruTkt7J/taO2huaac5Bi0heOkebkoENJnl9rV20NTSzr7Wdva1ZIY7aG5tR71D0kdBkIiIDKu2js4QcLS2s6+lneZM8NLSMyjpDlYODmiy12nvLCxiKTGoKi9jQkUpVRVlYbi8lNqaSiZML6OqPDO9lAkVZd3D5WWc+aVhzhgpOgVBIiLSpaPTQ4tJS0fX/64gJSsQ6WpdyWpR6RG8tHTQ2tFZ8P4nxACkuiL8r6ooZUpVOXOmhOClqiJMC8skp3UHL9UVMegpL6NyXAlmNow5JqOZgiARkVHK3bsCj1yBSCaQ6WpdaUlcIsoT0BxoKzxgqSgr6RFwVFWEAKS2pjIrWOkOaJKtMT2Cl4oyJowrpaREAYuMHAVBIiIjINOPJdNKEvqnxKAkeYmoNXfw0r1ODHZa2tnX1lFwP5ZxpZYjACllatWEni0q5d3BS1UMbiZUlMZlugOZCeNKKSvVDcYyuikIEhHJoTUTsLQmWkp6CV7ydsBNBC8d/enHkghIqmKQcvikyqzgJNGnJRO4ZK2TCV7KyxSwiGRTECQio157R2eiM22uFpXM3ULJgKa7A24moEl20m3rKPxWoczlnK4WlfIyplWVM3fqhETH26wWlV76t1SUqR+LyEhQECQiReHubG1sYVtjy0G3OSdbWbqCl8Qlou5+LyGgaWkvvB9L5biSrkAk01Iyafw4Zk2q7NE/JRnQZN9ZlOwHM179WERGLQVBIjKs3J1tjS2sbWhibUMj67Y2dg03Hmjvdd3y0pKe/VNiIDK9uuKgDrl93SWU2U6pAhYRiRQEiciQcHe2N7WyrqGRtQ2NrN3aFIeb2LO/rWu5yRPGsXBmDWe/YhYLa2s4bFIl1Vl3CVWXlzG+vFT9WERkWCkIEpF+29EUWnZCq04IdNY1NLJrX3ewM7GyjIW1NZx13OEsrK1mYW0NC2qrmVFdof4uInJIUBAkInntbG4Nl7Aaui9hrd/axI7m1q5lamKwc8axh7FgZk1XsDOzRsGOiBzaFASJCLv3tXb32cm07GxtZHtTd7BTXVHGgtpqTntpLQtiy87C2hpqJyrYEZHRSUGQSIrs2d/Wo1Un00l5W2NL1zJV5aUcU1vDXy+a2dWqs7C2hsMnVSrYEZExRUGQyBi090Ab6xqaerTqrG1opGFvd7AzflwpC2qr+asFM3r02Zk1abxu+RaRVFAQJDKKNbW0s66hkXWxZSdzR9aWPQe6lqkcV8IxM6t5zdHTWVBb0xXwzJ6sYEdE0k1BkMgo0NzSzvqtmUtYmb47TWzevb9rmYqyEo6eUc1J86fGYCcEPHOmTNCzcUREclAQJHII2d/a0RXsrN3a3cKzaVd3sFNeWsJRM6o48cgpnLd0blfAc8RUBTsiIv2hIEikCA60hWAn0zE503dn4659XW8FH1dqHD2jmuPnTuadS+aysLaaBbU1HDl1gt7eLSIyBBQEiQyjA20dPLut+aCHCr6wcx+ZF4qXlRhHzajiuDmTOOeVc7qCnXnTFOyIiAwnBUEiQ6ClPQQ7axOdlNdtbeL5Hc1dwU5piTF/ehWLZ01k+fGzu/rszJtexTgFOyIiI05BkEg/tLZ38tz25p5PUd7ayPM79tERo53SEuPIaRNYVFvDm19+eFefnfnTq/QuLBGRQ4iCIJEc2jo62bC9+aCHCm7Y3kx7DHZKDI6cVsWCmdWcdezhXQ8VPGpGFRVlpUX+BCIi0hcFQZJq7R2dbNixr0erzrqGRp7b3kxbRwh2zOCIqRNYMLOGNyyu7Xqo4NEzqqkcp2BHRGS0UhAkqdDR6Ty/o7n7Tqz4UMFntzXT2tHZtdzcqeNZOLOG172ktuuhgkfPqGZ8uYIdEZGxRkGQjCkdnc7Gnft6PFRwbUMTz2xrorW9O9iZM2U8C2trOGXRDBbODC07x8ysZkK5DgkRkbRQjS+jUmens2nX/oMeKrh+axMtiWBn9uTxLKit5uQF01kwM7TsHDOzmqoKFX0RkbTTmUAOaZ2dzubd+7s6JmduQV+/tYn9bR1dyx0+qZIFtTW8+qhpXX12jplZTU3luCKmXkREDmUKgorEzM4A/g0oBb7l7tdmzT8SuAmYAewEznf3TWZ2PHA9MBHoAK5x9++NaOKHgbvz4p4DPW49Xxcvae1r7Q52aidWsLC2hvOWHtH1UMEFtdVMVLAjIiL9pCCoCMysFPg68HpgE/Comd3t7k8lFrsOuMXdbzaz1wFfBN4N7AMucPd1ZjYLeMzM7nX33SP8MQbE3dmy5wDrtmZeFRECnvVbm2hqae9abkZNBQtrq+PrIsJDBRfMrGHSBAU7IiIyNBQEFcdSYL27PwtgZncCy4FkELQY+Hgcvg/4MYC7r80s4O4vmtlWQmvRIR0E3frQBn74+GbWNzTRmAh2pleXs2BmDee8cnaPN59PnlBevMSKiEgqKAgqjtnAxsT4JuCkrGVWA28jXDJ7K1BjZtPcfUdmATNbCpQDzwxvcgfO3fnSL+r4xgPPcNzsSbzlhNldl7EW1tYwtUrBjoiIFIeCoEPX5cDXzGwF8CCwmdAHCAAzOxy4FbjQ3TtzbcDMLgEuATjiiCOGO70H6eh0PvOTP3P7wy/wrpOO4Orlx1JaYiOeDhERkVwUBBXHZmBuYnxOnNbF3V8ktARhZtXAOZl+P2Y2EfgZ8Cl3/32+nbj7DcANAEuWLPGh/AB9aevo5LKVq7l79Yt8cNnRXHH6IswUAImIyKFDQVBxPAosMLP5hODnXOBvkguY2XRgZ2zl+SThTjHMrBz4EaHT9F0jmuoCHWjr4NLb/sBv1mzlijMWcemyY4qdJBERkYPoldZF4O7twIeAe4GngZXu/qSZXW1mZ8fFlgF1ZrYWqAWuidPfCfwVsMLM/hj/jh/ZT5Bf44E2LrjpEe6r28rn33KsAiARETlkmfuIXiWRIlmyZImvWrVqWPexs7mVC296hKe37OUr73wFy4+fPaz7ExEZTmb2mLsvKXY6ZPjocpgMifo9Bzj/xofZuHMf//XuEzn1pbXFTpKIiEivFATJoD2/o5l3fethdu9r4+b3LuVVR00rdpJERET6pCBIBqWuvpHzb3yY9o5Obr/4JF4+Z3KxkyQiIlIQBUEyYI+/sIsV336UynElrHz/q1lQW1PsJImIiBRMQZAMyP+t3877blnF9OoKbnvfScydOqHYSRIREekXBUHSb//zZD0fuuNx5k+r4taLljJzYmWxkyQiItJvCoKkX370+CYu//4THDt7Eje/5y/0olMRERm19LBEKdgtD23gY99bzUnzp3Lb+05SACQiIqOaWoKkT+7Of97/DP98bx2vX1zLf5x3ApXjSoudLBERkUFRECR9unv1i/zzvXW89YTZfPntL2dcqRoQRURk9FMQJH166JkdTK0q5yvveAUlJXoTvIiIjA36SS99WlPfyKLaGgVAIiIypigIkl51djrrGhpZdJgehCgiImOLgiDp1ebd+2lu7VAQJCIiY46CIOnVmvpGABbqlRgiIjLGKAiSXq1tCEGQWoJERGSsURAkvVpT38icKeOprtCNhCIiMrYoCJJerY13homIiIw1CoIkr9b2Tp7Z1qRLYSIiMiYpCJK8nt3eRHunKwgSEZExSUGQ5FVXr07RIiIydikIkrzq6hspKzGOml5d7KSIiIgMOQVBg2BmHzazKcVOx3Cpq2/k6BnVlJepmIiIyNijs9vg1AKPmtlKMzvDzMbUy7XqGhpZqEthIiIyRikIGgR3/zSwALgRWAGsM7MvmNnRRU3YEGhqaWfTrv28REGQiIiMUQqCBsndHaiPf+3AFOAuM/tyURM2SHV6XYaIiIxxegzwIJjZR4ALgO3At4C/d/c2MysB1gFXFDN9g5F5XYZagkREZKxSEDQ4U4G3ufvzyYnu3mlmbypSmoZEXX0jVeWlzJ48vthJERERGRa6HDY4Pwd2ZkbMbKKZnQTg7k8XLVVDoK6+kQW1NZSUjKm+3iIiIl0UBA3O9UBTYrwpThvV3J26hkZdChMRkTFNQdDgWOwYDYTLYIyBS4zbmlrY2dyqJ0WLiMiYpiBocJ41s78zs3Hx7yPAs4WsGJ8rVGdm683syhzzjzSzX5vZE2Z2v5nNScy70MzWxb8Lh/DzALC2PjRu6e3xIiIylikIGpwPAH8JbAY2AScBl/S1kpmVAl8HzgQWA+eZ2eKsxa4DbnH3lwNXA1+M604F/jHuaynwj0P91Oo19XsBvTNMRETGtlF/6aaY3H0rcO4AVl0KrHf3ZwHM7E5gOfBUYpnFwMfj8H3Aj+Pw6cAv3X1nXPeXwBmSgiW+AAATy0lEQVTAHQNIR0519Y1Mr65gWnXFUG1SRETkkKMgaBDMrBK4CHgZUJmZ7u7v7WPV2cDGxHimFSlpNfA24N+AtwI1ZjYtz7qz86TvEmLL1BFHHNFHkrqtbWhk0WF6aaqIiIxtuhw2OLcChxFaZx4A5gCNQ7Tty4FTzOxx4BTCJbeO/mzA3W9w9yXuvmTGjBkFrdPZ6axtaGJR7cR+J1hERGQ0URA0OMe4+2eAZne/GXgjB7fo5LIZmJsYnxOndXH3F939be5+AvCpOG13IesOxsZd+9jf1qGWIBERGfMUBA1OW/y/28yOBSYBMwtY71FggZnNN7NyQr+iu5MLmNn0+PoNgE8CN8Xhe4E3mNmU2CH6DXHakFgT3xm26DC1BImIyNimIGhwboiByKcJQcxTwJf6Wsnd24EPEYKXp4GV7v6kmV1tZmfHxZYBdWa2FqgFronr7gQ+RwikHgWuznSSHgrdL05VS5CIiIxt6hg9QLGVZq+77wIeBI7qz/rufg9wT9a0zyaG7wLuyrPuTXS3DA2puoZGjpg6gQnlKhoiIjK2qSVogOLToUftW+Lzqatv1POBREQkFRQEDc6vzOxyM5trZlMzf8VO1EC1tHfw3PZmvTNMRERSQdc8Buf/xf9/m5jm9PPS2KHima3NdHQ6C/W6DBERSQEFQYPg7vOLnYahVNcQXpehliAREUkDBUGDYGYX5Jru7reMdFqGwpr6RspLS5g3varYSRERERl2CoIG5y8Sw5XAqcAfgFEZBK2tb+SoGVWMK1VXMRERGfsUBA2Cu384OW5mk4E7i5ScQaurb2Tp/FHbr1tERKRf9JN/aDUDo7Kf0N4Dbby45wAL1R9IRERSQi1Bg2Bm/024GwxCQLkYWFm8FA3c2vikaHWKFhGRtFAQNDjXJYbbgefdfVOxEjMYemeYiIikjYKgwXkB2OLuBwDMbLyZzXP3DcVNVv+tbWikpqKMWZMqi50UERGREaE+QYPzfaAzMd4Rp406a+obWXhYDWZW7KSIiIiMCAVBg1Pm7q2ZkThcXsT0DIi7651hIiKSOgqCBmebmZ2dGTGz5cD2IqZnQLY2trBnfxuL9LoMERFJEfUJGpwPALeZ2dfi+CYg51OkD2XdnaIVBImISHooCBoEd38GeJWZVcfxpiInaUDq6sM7w9QSJCIiaaLLYYNgZl8ws8nu3uTuTWY2xcw+X+x09VddfRMzayqYUjXqujOJiIgMmIKgwTnT3XdnRtx9F3BWEdMzIHUNe3UpTEREUkdB0OCUmllFZsTMxgMVvSx/yOnodNY1NOlSmIiIpI76BA3ObcCvzezbgAErgJuLmqJ+en5HMy3tnWoJEhGR1FEQNAju/iUzWw2cRniH2L3AkcVNVf/Udb0zTK/LEBGRdNHlsMFrIARA7wBeBzxd3OT0T11DI2ZwzMzqYidFRERkRKklaADMbCFwXvzbDnwPMHf/66ImbADq6huZN62K8eWlxU6KiIjIiFIQNDBrgP8F3uTu6wHM7GPFTdLA1NU3qlO0iIikki6HDczbgC3AfWb2TTM7ldAxelQ50NbBhh3NLFSnaBERSSEFQQPg7j9293OBlwD3AR8FZprZ9Wb2huKmrnDrtzbR6fASBUEiIpJCCoIGwd2b3f12d38zMAd4HPhEkZNVsDq9M0xERFJMQdAQcfdd7n6Du59a7LQUqq6hkfKyEo6cOqHYSRERERlxCoJSbE19IwtmVlNWqmIgIiLpo7Nfiq3VnWEiIpJiCoKKxMzOMLM6M1tvZlfmmH+Emd1nZo+b2RNmdlacPs7MbjazP5nZ02b2yYHsf8++Nur3HlB/IBERSS0FQUVgZqXA14EzgcXAeWa2OGuxTwMr3f0E4FzgP+P0dwAV7n4ccCLwfjOb1980rKnfC6hTtIiIpJeCoOJYCqx392fdvRW4E1ietYwDmRd6TQJeTEyvMrMyYDzQCuztbwLWNujOMBERSTcFQcUxG9iYGN8UpyVdBZxvZpuAe4APx+l3Ac2EhzW+AFzn7jtz7cTMLjGzVWa2atu2bT3mralvZGJlGYdNrBzsZxERERmVFAQdus4DvuPuc4CzgFvNrITQitQBzALmA5eZ2VG5NhBv2V/i7ktmzJjRY15dfSMvOWwiZqPuQdciIiJDQkFQcWwG5ibG58RpSRcBKwHc/SGgEpgO/A3wC3dvc/etwO+AJf3ZubtT19DIwsP05ngREUkvBUHF8SiwwMzmm1k5oePz3VnLvACcCmBmLyUEQdvi9NfF6VXAqwgvdC3Ylj0HaDzQzqLDJva9sIiIyBilIKgI3L0d+BBwL/A04S6wJ83sajM7Oy52GXCxma0G7gBWuLsT7iqrNrMnCcHUt939if7svy52itY7w0REJM3Kip2AtHL3ewgdnpPTPpsYfgp4TY71mgi3yQ9Y5p1hC2cqCBIRkfRSS1AK1dU3cvikSiZNGFfspIiIiBSNgqAUqqtvZKFelyEiIimnIChl2js6Wb+tSf2BREQk9RQEpcyGHc20tnfqSdEiIpJ6CoJSpq6+CUCXw0REJPUUBKVMXf1eSkuMY2bqQYkiIpJuCoJSpq6hkXnTJlA5rrTYSRERESkqBUEp88y2ZrUCiYiIoCAodfa1tDOxUs8HEhERURCUMi3tnVSM09cuIiKis2HKtLR3UlGm/kAiIiIKglKmpb2DijJ97SIiIjobpkhHp9PW4WoJEhERQUFQqrS2dwKoT5CIiAgKglKlKwjS5TAREREFQWnS0t4BQLmCIBEREQVBadLS1RKkPkEiIiIKglIk0xKky2EiIiIKglLlQJv6BImIiGTobJgiXZfD9PJUERERBUFposthIiIi3XQ2TJEW3SIvIiLSRWfDFGnV3WEiIiJdFASlSIueGC0iItJFZ8MUaWmLD0ss1dcuIiKis2GKqCVIRESkm86GKaInRouIiHRTEJQiukVeRESkm86GKdKiJ0aLiIh00dkwRVraOykvK8HMip0UERGRolMQVCRmdoaZ1ZnZejO7Msf8I8zsPjN73MyeMLOzEvNebmYPmdmTZvYnM6ssZJ+t7Z1qBRIREYnKip2ANDKzUuDrwOuBTcCjZna3uz+VWOzTwEp3v97MFgP3APPMrAz4LvBud19tZtOAtkL229LeoU7RIiIikZoFimMpsN7dn3X3VuBOYHnWMg5MjMOTgBfj8BuAJ9x9NYC773D3jkJ22qKWIBERkS46IxbHbGBjYnxTnJZ0FXC+mW0itAJ9OE5fCLiZ3WtmfzCzKwrdqYIgERGRbjojHrrOA77j7nOAs4BbzayEcAnztcC74v+3mtmpuTZgZpeY2SozW7Vt2zZa2jooVxAkIiICKAgqls3A3MT4nDgt6SJgJYC7PwRUAtMJrUYPuvt2d99HaCV6Za6duPsN7r7E3ZfMmDEjtASNU58gERERUBBULI8CC8xsvpmVA+cCd2ct8wJwKoCZvZQQBG0D7gWOM7MJsZP0KcBTFCB0jNZXLiIiAro7rCjcvd3MPkQIaEqBm9z9STO7Gljl7ncDlwHfNLOPETpJr3B3B3aZ2b8QAikH7nH3nxWy35b2Tqor9JWLiIiAgqCicfd7CJeyktM+mxh+CnhNnnW/S7hNvl9a2zupqNLlMBEREdDlsFQJfYL0lYuIiICCoFRRnyAREZFuOiOmSEubnhMkIiKSoTNiioSHJapPkIiICCgIShVdDhMREemmM2KK6LUZIiIi3XRGTAn38KcnRouIiAQKglLCcQC1BImIiEQ6I6ZEZ4iBFASJiIhEOiOmRHjjBro7TEREJFIQlBKZlqBytQSJiIgACoJSo7slSF+5iIgIKAhKDc/0CdK7w0RERAAFQanRqT5BIiIiPSgISonYEKTLYSIiIpHOiCmhu8NERER6UhCUEp3qEyQiItKDzogpobvDREREetIZMSW6nxity2EiIiKgICg1XA9LFBER6UFnxJTo1AtURUREetAZMSVcL1AVERHpQWfElHB3SkuMslJ95SIiIgBlxU6AjIyKslLOOu7wYidDRETkkKFmgZSYPGEc/37eCcVOhoiIyCFDQZCIiIikkoIgERERSSUFQSIiIpJKCoJEREQklRQEiYiISCopCBIREZFUUhAkIiIiqaQgSERERFLJPPNSKRnTzKwRqCt2Og4R04HtxU7EIUD50E150U150W2Ru9cUOxEyfPTajPSoc/clxU7EocDMVikvlA9JyotuyotuZraq2GmQ4aXLYSIiIpJKCoJEREQklRQEpccNxU7AIUR5ESgfuikvuikvuikvxjh1jBYREZFUUkuQiIiIpJKCIBEREUklBUFjnJmdYWZ1ZrbezK4sdnqGg5nNNbP7zOwpM3vSzD4Sp081s1+a2br4f0qcbmb27zFPnjCzVya2dWFcfp2ZXViszzQYZlZqZo+b2U/j+Hwzezh+3u+ZWXmcXhHH18f58xLb+GScXmdmpxfnkwyemU02s7vMbI2ZPW1mr05xufhYPD7+bGZ3mFllWsqGmd1kZlvN7M+JaUNWDszsRDP7U1zn383MRvYTyoC5u/7G6B9QCjwDHAWUA6uBxcVO1zB8zsOBV8bhGmAtsBj4MnBlnH4l8KU4fBbwc8CAVwEPx+lTgWfj/ylxeEqxP98A8uPjwO3AT+P4SuDcOPwN4INx+FLgG3H4XOB7cXhxLCsVwPxYhkqL/bkGmBc3A++Lw+XA5DSWC2A28BwwPlEmVqSlbAB/BbwS+HNi2pCVA+CRuKzFdc8s9mfWX2F/agka25YC6939WXdvBe4Elhc5TUPO3be4+x/icCPwNKHSX044CRL/vyUOLwdu8eD3wGQzOxw4Hfilu+90913AL4EzRvCjDJqZzQHeCHwrjhvwOuCuuEh2PmTy5y7g1Lj8cuBOd29x9+eA9YSyNKqY2STCye9GAHdvdffdpLBcRGXAeDMrAyYAW0hJ2XD3B4GdWZOHpBzEeRPd/ffu7sAtiW3JIU5B0Ng2G9iYGN8Up41Zsdn+BOBhoNbdt8RZ9UBtHM6XL2Mhv74KXAF0xvFpwG53b4/jyc/U9Xnj/D1x+bGQDxBaKrYB346XB79lZlWksFy4+2bgOuAFQvCzB3iM9JYNGLpyMDsOZ0+XUUBBkIwZZlYN/AD4qLvvTc6Lv9DG9PMgzOxNwFZ3f6zYaTlElBEugVzv7icAzYTLHl3SUC4AYn+X5YTAcBZQxehszRoWaSkHcjAFQWPbZmBuYnxOnDbmmNk4QgB0m7v/ME5uiE3VxP9b4/R8+TLa8+s1wNlmtoFw6fN1wL8RmvMz7wlMfqauzxvnTwJ2MPrzIWMTsMndH47jdxGCorSVC4DTgOfcfZu7twE/JJSXtJYNGLpysDkOZ0+XUUBB0Nj2KLAg3gFSTujgeHeR0zTkYl+FG4Gn3f1fErPuBjJ3cFwI/CQx/YJ4F8irgD2xWfxe4A1mNiX+cn5DnDYquPsn3X2Ou88jfNe/cfd3AfcBb4+LZedDJn/eHpf3OP3ceIfQfGABoePnqOLu9cBGM1sUJ50KPEXKykX0AvAqM5sQj5dMXqSybERDUg7ivL1m9qqYtxcktiWHumL3zNbf8P4R7nRYS7iL41PFTs8wfcbXEpqynwD+GP/OIvRh+DWwDvgVMDUub8DXY578CViS2NZ7CZ091wPvKfZnG0SeLKP77rCjCCeq9cD3gYo4vTKOr4/zj0qs/6mYP3WM4jtdgOOBVbFs/JhwV08qywXwT8Aa4M/ArYQ7vFJRNoA7CH2h2ggthBcNZTkAlsR8fQb4GvFtDPo79P/02gwRERFJJV0OExERkVRSECQiIiKppCBIREREUklBkIiIiKSSgiARERFJJQVBIilnZtPM7I/xr97MNifGywvcxrcTz+PJt8zfmtm7hibVI799ERl7dIu8iHQxs6uAJne/Lmu6EeqLzpwrioiMQmoJEpGczOwYM3vKzG4DngQON7MbzGyVmT1pZp9NLPtbMzvezMrMbLeZXWtmq83sITObGZf5vJl9NLH8tWb2iJnVmdlfxulVZvaDuN+74r6Oz5G2f47LPGFmX0pu38zmJlqy/mhmnWY228xqzeyHcZuPxKcBi0iKlfW9iIik2EuAC9x9FYCZXenuO+P7pO4zs7vc/amsdSYBD7j7lWb2L4Sn7F6bY9vm7kvN7Gzgs4QXen4YqHf3c8zsFcAfDlrJrJbwRPCXubub2eTkfHffSHhSNGb2EeAkd99sZt8DvuzuvzezecBPgWMHlCsiMiYoCBKR3jyTCYCi88zsIkLdMQtYTHgHVdJ+d/95HH4MODnPtn+YWGZeHH4t8CUAd19tZk/mWG8n0Al808x+RghmDmJmf0V4J9Rr46TTgEXhyh4AU8xsvLvvz5M+ERnjFASJSG+aMwNmtgD4CLDU3Xeb2XcJ75jK1poY7iB/PdNSwDIHcfc2M1sCvB54B/BBwsssu5jZbOAG4E3uvi8zOaY9mT4RSTH1CRKRQk0EGglvzD4cOH0Y9vE74J0AZnYcoaWpBzOrASa6+0+BjwEnZM0vJ7z88zJ3X5+Y9SvgbxPLHdTXSETSRUGQiBTqD4RLX2uAWwgBy1D7D2C2mT0F/GPc356sZSYBPzOz1cADwMez5p9MCIyuSXSOnkkIgF4TO1M/BVw8DOkXkVFEt8iLyCEjdrguc/cD8fLb/wAL3L29yEkTkTFIfYJE5FBSDfw6BkMGvF8BkIgMF7UEiYiISCqpT5CIiIikkoIgERERSSUFQSIiIpJKCoJEREQklRQEiYiISCr9fzcTkNYSs0gAAAAAAElFTkSuQmCC\n", 2013 | "text/plain": [ 2014 | "
" 2015 | ] 2016 | }, 2017 | "metadata": {}, 2018 | "output_type": "display_data" 2019 | }, 2020 | { 2021 | "data": { 2022 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAf0AAAEWCAYAAABsT07JAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvhp/UCwAAIABJREFUeJzt3Xd8XnX5//HX1TRddDfpntBdZIaW3VLKXoqDIaNMUVGGCKjolx+ioKKigihThoiIiihIZXSwoaWsjnTRPZJ0h84k1++Pzwkcwp3mTprk5M79fj4eeeQ+53zOOdeZ19kfc3dERESk+WuRdAAiIiLSOJT0RUREsoSSvoiISJZQ0hcREckSSvoiIiJZQklfREQkSySW9M3MzWxwHfs9wswK6zumNMY7zMzeMbPNZvbtRhhfDzObFo3vlw09vkxiZlPM7OJGGE8i61pdmdn3zezeRhrXODNb3hjjSoeZ9TezUjPLqc+y9aUprUtm9l8zO38X3f9kZjenOayB0f68Zf1F2DBqM10p+j3MzOZH683n6zu22HgadD2pcSGZ2WKgB1Aea/0nd7+8oYJKEYMDQ9x9AYC7vwQMa6zxx1wLTHb3/RppfJcCJUBH1wcVEpHgulYn7v7Thhp21e2wnoc9EbjY3Q+v6zDcfSnQvr7L1pemtC65+wmVv+tj3meJm4A73P03DTmShl5P0j0yO8Xdn2+oIDLIAOCxhh6JmRlg0fhm1yXhm1lLdy+r9+Bkt1UuX3evSDqWTGJmOe5eXnNJkQYxAJhVlx6b1P7Y3Xf5BywGJqRo3xrYAOwda5cPbAW6R82XAAuAdcBTQO9YWQcGR7+nEI40K7tNBF6Ofk+Lyn4ElAJnAOOA5bHyI6JhbCAslFNj3f4E3Ak8DWwG3gD22sX0nhoNY0M0zBFR+xcJVzu2RXEMTdHvFOAW4E1gE/AvoGus+8HAq9Gw3wXGVen3J8Ar0Tx8BNgJ7IjGNyGa57cDK6O/24HWUf/jgOXAdcBq4OFYu2uBImAV8HngRGBetFy+H4thNPBaFN8q4A6gVZVldhkwPypzJyF5EVvec6L5PBs4IGrfG/g7UAx8CHy7mnk/KBpui6j5HqAo1v1h4MrY/PpxNL82A/8D8moxr6vtt0pM4/j0urYYuAZ4D9gI/BVoE+t+GvBOtPwXAsdXs3wHA52A+6J5vQK4GciJyu9FWOfWEq72/BnoHBvPdVE/m4FC4Oio/Y3AI9HvgdEyOx9YGg3nB7FhtAUeBNZHy+3a+LRWmQ/VbofAd/hk/bqgyj7itmjca4A/AG1TDHsEYbsqj4a9Ibbt3gU8E413AnASMDOav8uAG2PDqZzeljUt59qUjbqfByyJlscPqWa/GJU9kbD+b46W0TVV16Vo/pXG/rYDU2oz3+ph+7m4hnmf1n4zxbzsTdjfryPs/y+p4zpnwK8J69Ym4H2ifBMN55fRMtkIvFw5j4C/EfaBGwnr7agq+eDmWPPJhO11A2F/sU81sSwEKgjbbmm0jHY1nTcCTxD245uI5bfE15M0VqTFVL9y3w/8JNb8TeDZ6Pd4wk7mgCi43wHTYmXTSvpVy6aYKbnRDP8+0Coa72ZgWGwhryUktJaEnedj1UzPUMLO5ZhouNdGw26VKs4U/U+JFt7ewB6ERFe5A+4TxXEi4VmKY6Lm/Fi/S4FRUZy5fHYFvQl4HehOOMB6FfhxbJ6UAT+L5nfbWLsfRcO7hJB4HwU6ROPaCgyKhnEgIVm2JGzIc4h2ErHl8B+gM9A/GlZlUvtyNO0HETbWwYQj4xbAjCiGVsCewCLguGrm4VLgwOh3YVR2RKzb/rH5tTBaZm2j5ltrMa9T9psinnF8Num/Sdjgu0bz6LKo22jCjuaYaLx9gOG7WL7/BP5IWFe6R8P9WlR+cDSc1tGyngbcHnUbRkh4vWM73b1iO5uqSf+eaDr3Jew0KufnrcBUoAvQl3Agk3IHvIvtsIywXuZG83sL0CXq/mvCTrErYX37N3BLNcOeSGybj227G4HDovnZJhrn56LmfQg7u89Xk3yqXc61LDuSsMM9nLAO30Y4IK9uv7gKOCL63YVPDn7HpZq/QEfCevS12s633dx+Lq5h3qe736w6L6cBv4+W136E/cT42q5zwHGEfUdnwj5lBNAr6nZnNA19gBzgUD45Abowmm+VJ0nvVJmum6Pf+xMOKMZEwzifsH23riaexfFlXsN03hitI58nrKupDnYTWU/STfqlhCOhyr9Lom4TgIWxsq8A50W/7wN+HuvWPpoJA6vuQNi9pH8E4aiuRaz7X4jOAKKFfG+s24nA3Gqm9YfA47HmFoRENi5VnCn6n0IseRB2FjuiFeo64OEq5ScB58f6vSnFhhdP+guBE6tsFItj82QHnz7rHEdI6pVnjx2ieTkmVmYG0U4zxfRcCfyzynI4PNb8OHB9bFquSDGMMcDSKu2+BzxQzTgfBq4GehJ2Wj8nXF2oehYzBbgh1t83+OSAM515nbLfFPF8vK7FtodzYs0/B/4Q/f4j8OtdrBs3xZp7EBJw21i7swjPjKTq//PAzOj3YMLOagKQW6XcjXw26feNdX8TODP6/amDL8KZX22T/lainX3Urohw4GiEA+i9Yt0OAT6sZtgTSZ14HqounqjM7ZXznNSJvLp1pDZlfwT8JdatHWFbqy7pLwW+RngWp9p1KWrXgnAgfVfUXKv5tpvbT01JP9395sfzEuhHuGrQIdb9FsJzYLVa5wgncPOi9Sm+f28RrXf7pjE/OkexdYpNV2XSv4vopClWvhAYW82wFlcu8zSm80ZiJ7lNaT1J9+n9z7t759jfPVH7yUA7MxtjZgMJRzv/jLr1Jlx6AcDdSwlHjn3SHGe6egPL/NP3R5dUGc/q2O8tVP8AT9WYKwhnVLWJeVmVOHKBPMJZ75fNbEPlH+HMoVc1/dYYX/S7d6y52N23VelnrX9yH3Rr9H9NrPtWovlhZkPN7D9mttrMNgE/jWKPq25e9iMclFQ1AOhdZbq/T0h6qUwlrPRHEo6kpwBjo7+Xqizn6mJJZ16nu06kUtt5UCm+fAcQ1o1VsRj/SDjjr3xz4zEzWxEti0eIloWHB+muJOxYiqJy8fUg3Xh7V4mppvUvlbX+6XuVlcPPJyTHGbHpezZqXxufiina10w2s2Iz20hIaFXX0bjaLOe05pO7byHsy6rzRUKSXGJmU83skF2U/QnhYLzybaDdnW+12X5qUpdtpDewzt03x9rF98dpr3Pu/iLhFuOdhPX8bjPrSFjebUixrZlZjpndamYLo+1mcdQp1ToyAPhOlf1EPz69T63rdO5y2iKJrCe79cpelEweJ5yhnAX8JzYTVhJmKgBmtgfQjXDmXNVH0QRU6lmLMFYC/cwsPi39qxlPOsOKx2yElaA2w+pXJY6dhNscywhnn/GDpz3c/dZYea9NfNHwV9ai/5rcBcwlPKHdkZCcLc1+lxHuQ6dq/2GV6e7g7idWM5yphKs346LfLxMu746NmtONpaZ53RCqmweV4stnGeFMPy8WY0d3HxV1/2lU/nPRsjiH2LJw90c9PG09ICr3szrEu4pwibVSv+oK1kEJ4YByVGz6Orl7dYmjunW3avtHCZc0+7l7J8J9zHTX0br61Hwys7aEfVlK7v6Wu59GOIB7krCP/AwzO5Ow3/ySu++MWtd2vlVVl+1nd/cbcSuBrmbWIdYuvj+u1Trn7r919wMJV02HAt8lzKNtpN7WziY8VzOB8MzMwKh9qnVkGeH2dHw/0c7d/7KrmCI1TSfUMF+TWk/q4z39RwkPG3w1+l3pL8AFZrafmbUm7MTecPfFKYbxDnC6mbWL3t2/qEr3NYR7wam8QTgKvdbMcs1sHHAKdXvK/nHgJDM72sxyCQ8obSfcO0/XOWY20szaEe51PhEdHD0CnGJmx0VHo22i95z77npwn/IX4AYzyzezPMJlx0dq0X9NOhAeOik1s+HA12vR773ANWZ2oAWDzWwA4XLyZjO7zszaRtO+t5kdlGog7j6fsDKfA0x1902E5f9F0k/69TGv6+I+wjp/tJm1MLM+0Xz8DHdfRXhY7Jdm1jEqv5eZjY2KdCDcVttoZn0IOzvg4+9FjI+2q22E+VWXNwEeB75nZl2icdT0Gu6utsNPic4o7wF+bWaVVy/6mNlxuxh2XzNrVcOgOxDOsLaZ2WjCTr6hPUFYnw6N4ruRag40zKyVmX3VzDpFO+hNpFg2ZrY/4Tmnz7t7cWX7dOabhXfix6Uafx23n3TnfY3cfRlhf3lLtN3tQ9ifV+6n0l7nzOyg6MpOLuHEcBtQEc2j+4FfmVnvaBs/JNoeOhD22WsJJ5K7eoX1HuCyaBxmZnuY2UlVEnldp3OXGmM9qU66Sf/fFj5IUPlXeQkfd3+DsEB6A/+NtX+ecI/874Sju72AM6sZ/q8J98jWEJ7s/HOV7jcCD0aXMb4S7+DuOwhJ/gTC0c/vCc8VzE1z2uLDKiRsLL+LhnUK4XXFHbUYzMOE+0arCZegvh0NexnhCPT7hAc+lhF25LU58LoZmE54+OV94O2oXX25hrAT3UxYof6abo/u/jfCJahHo/6fJLy5UE54QnY/wpP7JYQDhE67GNxUwmXjZbFmI0xvOrHUx7yuNXd/E7iAsD5vJMQ9YBe9nEd4MGw24WnmJ/jkFsT/IzwEu5HwBPU/Yv21JjwQVUJYz7oTnpOorZsIT99/CDwfjX/7LsrfSDXbYTWuIzwI+3p0qfV5qn//+EXCWzOrzaxkF8P8BnCTmW0mHPSmPDuqT+4+C/gW4URiFeFgrIjq59W5wOJomi8jnBBVdRrh4a2XY/vVyv1ntfPNzPoRtq/3dxFybbefdOd9us4inGGvJNzu/T//5JXv2qxzHQn7ofV88ubEL6Ju1xDmwVuEp+d/Rti+H4rKriBsV69XF6S7Tyc83HxHNI4FhOcb6mM609Fg68muWPQAgNQDM5tCeIiqUb6IJlKfzOzrhIf8xtZYOIuZWXvCQ3FD3P3DRh73OYRLunU5yGtytM41Pn17XyRLmVkvC58WbWFmwwi3s/5ZU3/ZyMxOiW4/7kF4Ze99PnlIrNG4+yOZnPC1ziVPSV8ke7UivDGwmXCJ91+E22PyWafxyUexhhDOTnWZtPa0ziVMl/dFRESyhM70RUREskSTrwoxk+Xl5fnAgQOTDkNEJKPMmDGjxN1r+yEnSYOSfgMaOHAg06dPTzoMEZGMYmZLai4ldaHL+yIiIllCSV9ERCRLKOmLiIhkCSV9ERGRLKGkLyIikiWU9EVERLKEkr6IiEiWUNIXEZF6UVHhvL98I797YT7vLtuQdDiSgj7OIyIidbb+ox1Mm1/M1MJips0vpqR0BwBtW+Wwb7/OCUcnVSnpi4hI2ioqnPdWbGRKYRFTCot5d/kG3KFzu1yOHJLPuGH5HDk0n7z2rZMOVVJQ0hcRkV1aW7qdafOLmVJYzEvzS1j30Q7MYJ++nfn2+CGMG5bPPn07k9PCkg5VaqCkLyIin1Je4byzbANT5xUztbCI91ZsxB267dGKsUPD2fzhg/PoprP5jKOkLyIiFG/ezrR5xUyZV8xL84vZsGUnLQz269eZqyYMZezQfD7XpxMtdDaf0ZT0RUSyUFl5Be8s28CUwmKmzCvigxWbAMhr35qjh/dg7LB8jhicR5c9WiUcqdQnJX0RkSxRtGkbU+aFJ+1fml/Mpm1ltDA4oH8Xrjl2KOOGdWdkr446m2/GlPRFRJqpneUVvL1k/ceJfvaqcDbfvUNrjhvVk3HDunP44Dw6tctNOFJpLEr6IiLNyOqN25g6L7xO9/L8EjZvLyOnhXHggC5ce/wwxg3tzoheHTDT2Xw2yrqkb2bHA78BcoB73f3WKt0HAPcD+cA64Bx3Xx7r3hGYDTzp7pc3WuAiIinsKKtgxpL1TJlXxNTCYuau3gxAz45tOGmfXowbls+hg/Po2EZn85JlSd/McoA7gWOA5cBbZvaUu8+OFbsNeMjdHzSz8cAtwLmx7j8GpjVWzCIiVa3csDU8gFdYxKsL11K6vYzcHKNgQFe+d8Jwxg7LZ1gPnc3LZ2VV0gdGAwvcfRGAmT0GnEY4c680Erg6+j0ZeLKyg5kdCPQAngUKGiNgEZGd5RVMX7z+46/gFa4JZ/N9Orfl1P16M3ZoPocNzqN962zbpUttZdsa0gdYFmteDoypUuZd4HTCLYAvAB3MrBuwHvglcA4woboRmNmlwKUA/fv3r7fARSS7rNm0jamFxUwuLPr43nxujjF6UFe+dOAIxg3LZ3D39jqbl1rJtqSfjmuAO8xsIuEy/gqgHPgG8Iy7L9/VRubudwN3AxQUFHiDRysizULle/OTC4uYPPeTJ+17dmzDyfv2Ytyw7jqbl92WbWvPCqBfrLlv1O5j7r6ScKaPmbUHvujuG8zsEOAIM/sG0B5oZWal7n5944QuIs1NSWn4Ct7kwmKmzStm49adHz9pf93xwzlquO7NS/3KtqT/FjDEzAYRkv2ZwNnxAmaWB6xz9wrge4Qn+XH3r8bKTAQKlPBFpDYqa6ibPLeIKbFv2ue1b82xI3uE9+aH5NGprZ60l4aRVUnf3cvM7HJgEuGVvfvdfZaZ3QRMd/engHHALWbmhMv730wsYBHJeJX1zU+JzubXRjXU7d+vM1dPGMpRw/UVPGk85q7bzg2loKDAp0+fnnQYItKI3J1ZKzcxpbCIyYXFzFy6ngqHrrEa6o4Ykk9XfdO+WmY2w931hlQDyKozfRGRhrBp205enl8SLtvPK6Z483YA9unbicvHD+Eo1TcvTYSSvohILbk7c1dvZuq8YibPLWLGkvWUVTgd27TkyKH5HDWsO0cOzSe/g+qbl6ZFSV9EJA3L12/hlQUlvLJgLa8uLKGkdAcAI3t15Gtj92TcsO7s368zLXNaJBypSPWU9EVEUlj/0Q5eXbiWVxaW8MqCEpas3QJAfofWHD44j0MH53HkkHx6dmqTcKQi6VPSFxEBtu4o583F63h1QQkvLyhh9qpNuEP71i05eM+uTDx0IIcNzmOIvoInGUxJX0SyUll5Be+t2Mgr80OSn7l0AzvKK8jNMQ7o34WrJwzl0MF57Nu3ky7ZS7OhpC8iWcHdWVBUysvRffk3Fq1l8/YyAEb17sjEw8KZ/EEDu9CulXaN0jxpzRaRZmvlhq28sqAk3JtfUEJR9CrdgG7tOHnf3hw+OI9D9uqmd+Ylayjpi0izsXHLTl5bFM7kX1lQwqKSjwDotkcrDh2cx+GDu3HoXnn069ou4UhFkqGkLyIZa3tZOdMXr48u2ZfwwYqNVDi0a5XDmEFdOXtMfw4bnMewHh30mVsRlPRFJMOUbi9j8twiJs1azZTCYkq3l9GyhbF//858a/wQDh+Sx759O9OqpR6+E6lKSV9Emry1pdt5fs4aJs1aw8sLSthRVkG3PVpx8j69OGZkD8bs2U31zIukQVuJiDRJy9dvYdKsNUyatZrpi9dR4dCnc1vOPXgAx43qyYEDuuhb9iK1pKQvIk2CuzO/qJRJH6xm0uzVfLBiEwDDenTg8qMGc+yonozq3VEfxhHZDUr6IpKYigrn3eUbeHbWav43aw0fRk/bH9C/M987YTjHjerJwLw9Eo5SpPlQ0heRRrWzvII3Fq1j0qzV/G/2atZs2k7LFsYhe3XjwsMHcezIHvToqO/ZizQEJX0RaXBbd5QzdV4x/5u1mhfmFrFx607a5LZg3NDuHLd3D8YP60GndrlJhynS7Cnpi0iD2LhlJy/MDQ/iTZ1XzLadFXRqm8vRI7pz3KieHDkkn7atcpIOUySrKOmLSL1Zs2kb/5u1mkmz1vD6orWUVTg9OrbmKwX9OG5UT0YP6kquKq8RSYySvojslg9LPmLSrNVMmrWamUs3ALBn3h5cfMSeHDeqB/v27ayv4Yk0EUr6IlIr7s6slZs+TvTz1pQCsHefjlxz7FCOG9WTwapzXqRJUtIXkRqVVzjTF6/7+GM5KzZspYXBQQO78qOTR3LsqB707aJKbESaOiV9EUlpe1k5ry5Yy7MfrOb5OWtY+9EOWuW04IgheVxx9BCOHtGdbu1bJx2miNSCkr6IfCxVZTbtW7fkqOHdOW5UD8YN665v3ItkMG29IlmupHQ7z88Ol+1fWbCWHeWhMptT9u3FsaN6cuhe3WjdUq/WiTQHSvoiWShVZTZ9u7Tl3ENUmY1Ic6akL5IFKiuzefaD8MT9rJWxymzGD+G4UT0Y2UuV2Yg0d0r6Is1URYXzzvIN4Rv3qsxGRFDSF2lWKiuzeXbWKp6bveZTldlcFFVm012V2YhkLSV9kQyXqjKbtrk5jB2ar8psRORTsirpm9nxwG+AHOBed7+1SvcBwP1APrAOOMfdl5vZfsBdQEegHPiJu/+1UYMXidm4ZSfPzwkP4k2b/0llNhNG9OC4UT04QpXZiEgKWZP0zSwHuBM4BlgOvGVmT7n77Fix24CH3P1BMxsP3AKcC2wBznP3+WbWG5hhZpPcfUMjT4ZksVSV2fTs2EaV2YhI2rIm6QOjgQXuvgjAzB4DTgPiSX8kcHX0ezLwJIC7z6ss4O4rzayIcDVASV8aVHmF8+93V/Lga4s/VZnNJUfuyXGjerJPn06qzEZE0pZNSb8PsCzWvBwYU6XMu8DphFsAXwA6mFk3d19bWcDMRgOtgIUNG65ks4oK59lZq/n1c/OYX1TKkO7tVZmNiOy2bEr66bgGuMPMJgLTgBWEe/gAmFkv4GHgfHevSDUAM7sUuBSgf//+DR2vNDPuzgtzivjlc/OYs2oTg7u3586zD+CEvXvqjF5Edls2Jf0VQL9Yc9+o3cfcfSXhTB8zaw98sfK+vZl1BJ4GfuDur1c3Ene/G7gboKCgwOtzAqT5cnemzS/hV8/N491lGxjQrR2/PmNfTt23j76MJyL1JpuS/lvAEDMbREj2ZwJnxwuYWR6wLjqL/x7hSX7MrBXwT8JDfk80atTS7L22cC2/eq6Qtxavp0/ntvzsi5/j9AP66qE8Eal3WZP03b3MzC4HJhFe2bvf3WeZ2U3AdHd/ChgH3GJmTri8/82o968ARwLdokv/ABPd/Z3GnAZpXmYsWc+vnivklQVr6dGxNT8+bRRfOaifKrcRkQZj7roC3VAKCgp8+vTpSYchTcz7yzfyq+cKmVxYTF77Vlw2di/OOXgAbXKV7EUAzGyGuxckHUdzlDVn+iJJm7t6E79+bh6TZq2hU9tcrjt+OOcfOoB2rbQZikjj0N5GpIEtKCrl9ufn8fT7q2jfqiVXThjChYcPomMbfRpXRBqXkr5IA1m6dgu3vzCPJ2euoE1uDt8YtxeXHLEnndu1Sjo0EclSSvoi9WzFhq3c8eJ8/jZ9OTktjIsOH8RlY/eiW/vWSYcmIllOSV+knhRt2sadkxfwlzfDhx+/OqY/3zhqMD1Ula2INBFK+iK7aW3pdv4wdSEPvbaEsgrnKwV9uXz8EPp0bpt0aCIin6KkL1JHG7bs4J6XFvHAK4vZtrOcz+/fhyuOHsKAbnskHZqISEpK+iK1tHnbTu5/eTH3vrSIzdvLOHmfXlw5YSiDu7dPOjQRkV1S0hdJ05YdZTz46hL+OG0hG7bs5NiRPbjqmKGM6NUx6dBERNKipC+ShuXrt3DGH19nxYatjBuWz9XHDGWfvp2TDktEpFaU9EVqUFK6nfPue5PN23by2KUHc/Ce3ZIOSUSkTpT0RXZh87adTHzgTVZu3MojF42hYGDXpEMSEakz1d0pUo1tO8u55KHpzF21mbvOOVAJX0Qyns70RVIoK6/gW3+ZyRsfruP2M/bjqGHdkw5JRGS36UxfpAp35/p/vM9zs9dw4ymjOG2/PkmHJCJSL5T0RWLcnZ8+M4cnZiznyglDOP/QgUmHJCJSb5T0RWLumrqQe176kImHDuSKo4ckHY6ISL1S0heJPPrGUn7+bCGn7debH508EjNLOiQRkXqlpC8CPPP+Kn7w5PscNSyf2768Ly1aKOGLSPOjpC9Z76X5xVzx2EwO7N+F33/1QHJztFmISPOkvZtktZlL1/O1h2ewV3577pt4EG1b5SQdkohIg1HSl6w1f81mLvjTW+S1b81DF46mU9vcpEMSEWlQGZn0zexbZtYl6Tgkcy1fv4Vz73uT3JwWPHLRGLp3bJN0SCIiDS4jkz7QA3jLzB43s+NNj1lLLZSUbufc+95ky44yHr5oNP27tUs6JBGRRpGRSd/dbwCGAPcBE4H5ZvZTM9sr0cCkydu8bSfn3/8mqzZu5YELDmJ4z45JhyQi0mgyMukDuLsDq6O/MqAL8ISZ/TzRwKTJ2raznIsfnE7h6lCBzoEDVIGOiGSXjKxwx8yuAM4DSoB7ge+6+04zawHMB65NMj5pesrKK7j80Zm8uVgV6IhI9srIpA90BU539yXxlu5eYWYnJxSTNFEVFc51f3+f5+es4cenqQIdEclemXp5/7/AusoGM+toZmMA3H1OYlFJk1NZgc7f317OVROGcu4hA5MOSUQkMZma9O8CSmPNpVE7kU/5/ZSF3PtyqEDn20cPTjocEZFEZWrSt+hBPiBc1idzb1VIA3n0jaX8YlIhn1cFOiIiQOYm/UVm9m0zy43+rgAWpdNj9F5/oZktMLPrU3QfYGYvmNl7ZjbFzPrGup1vZvOjv/PrcXqknj39XqhAZ/zw7vxCFeiIiACZm/QvAw4FVgDLgTHApTX1ZGY5wJ3ACcBI4CwzG1ml2G3AQ+6+D3ATcEvUb1fg/6JxjQb+T18FbJpeml/MlX+dScGALtx59gGqQEdEJJKRl8TdvQg4sw69jgYWuPsiADN7DDgNmB0rMxK4Ovo9GXgy+n0c8Jy7r4v6fQ44HvhLHeKQBhKvQOfe81WBjohIXEYmfTNrA1wEjAI+/mi6u19YQ699gGWx5sqrBHHvAqcDvwG+AHQws27V9PuZd7/M7FKiqw79+/dPY2qkvsyLKtDJ79Cahy5SBToiIlVl6nXPh4GehLPvqUBfYHM9DfsaYKyZzQTGEm4hlKfbs7vf7e4F7l6Qn59fTyFJTZat28K5971Bq8oKdDqoAh0RkaoyNekPdvcfAh+5+4PASXz2jD2VFUC/WHOHY4X8AAAWMUlEQVTfqN3H3H2lu5/u7vsDP4jabUinX0lGSel2zrv/TbbuKOehi0bTr6sq0BERSSVTk/7O6P8GM9sb6ASk813Vt4AhZjbIzFoRngt4Kl7AzPKiz/kCfA+4P/o9CTjWzLpED/AdG7WTBG1SBToiImnL1KR/d5R4byAk7dnAz2rqyd3LgMsJyXoO8Li7zzKzm8zs1KjYOKDQzOYRqvD9SdTvOuDHhAOHt4CbKh/qk2TEK9D5gyrQERGpkcW+cZMRorPwL7n740nHUpOCggKfPn160mE0S2XlFVz2yNu8MHcNt5+xn76nL9KMmNkMdy9IOo7mKOPO9KOv76kWvSwWr0DnplNVgY6ISLoyLulHnjeza8ysn5l1rfxLOihpeO7OT6IKdK4+RhXoiIjURka+pw+cEf3/ZqydA3smEIs0ot9PWch9UQU63xqvCnRERGojI5O+uw9KOgZpfH9+Ywm/mFTIF/bvowp0RETqICOTvpmdl6q9uz/U2LFI43j6vVXc8OQHjB/enZ9/aR9VoCMiUgcZmfSBg2K/2wBHA28DSvrN0LR5qkBHRKQ+ZGTSd/dvxZvNrDPwWELhSAN6O6pAZ3D3DqpAR0RkNzWXU6aPAN3nb2bmrdnMBQ+8RfeOrXnwwoNUgY6IyG7KyDN9M/s34Wl9CAcuI4Em/7EeSV9lBTqtW6oCHRGR+pKRSR+4Lfa7DFji7suTCkbqV/Hm7Zx73xts3VHO3y47VBXoiIjUk0xN+kuBVe6+DcDM2prZQHdfnGxYsrsqK9BZs2k7j1w8hmE9OyQdkohIs5Gp9/T/BlTEmsujdpLBKivQmV+0mbvOOYADB3RJOiQRkWYlU5N+S3ffUdkQ/W6VYDyym8rKK7j80bd5a/E6fvmV/Rg3LJ2akkVEpDYyNekXx6rCxcxOA0oSjEd2Q0WFc+3f3+P5OUXcdNrenLpv76RDEhFpljL1nv5lwJ/N7I6oeTmQ8it90rS5Ozc/PYd/vL0iVKBz8ICkQxIRabYyMum7+0LgYDNrHzWXJhyS1NHvpyzk/lc+5ILDVIGOiEhDy8jL+2b2UzPr7O6l7l5qZl3M7Oak45LaeeT1TyrQ+eFJqkBHRKShZWTSB05w9w2VDe6+HjgxwXiklv7z3kp++K8POFoV6IiINJpMTfo5Zta6ssHM2gKtd1FempCp84q56q/vcNCArtz5VVWgIyLSWDLynj7wZ+AFM3sAMGAi8GCiEUla3l66nsuiCnTuOb+ANrmqQEdEpLFkZNJ395+Z2bvABMI3+CcBeuy7iausQKdHx9Y8dOFoVaAjItLIMvm66hpCwv8yMB6Yk2w4sivxCnQevmgM+R10N0ZEpLFl1Jm+mQ0Fzor+SoC/AubuRyUamOxSZQU623ZW8PjXDlEFOiIiCcmopA/MBV4CTnb3BQBmdlWyIcmuqAIdEZGmI9Mu758OrAImm9k9ZnY04UE+aYK27Szn4j+FCnT+cO6BqkBHRCRhGZX03f1Jdz8TGA5MBq4EupvZXWZ2bLLRSdzOygp0lqzjV1/Zj7FD85MOSUQk62VU0q/k7h+5+6PufgrQF5gJXJdwWBKpqHCue+KTCnROUQU6IiJNQkYm/Th3X+/ud7v70UnHIrEKdGau4DuqQEdEpEnJ+KQvTcudkxdw/ysfcuFhg7hcFeiIiDQpSvpSbx5+fQm3/W8ep+/fhxtOGqEKdEREmpisS/pmdryZFZrZAjO7PkX3/mY22cxmmtl7ZnZi1D7XzB40s/fNbI6Zfa/xo2+6/v3uSn70rw+YMKI7P1MFOiIiTVJWJX0zywHuBE4ARgJnmdnIKsVuAB539/2BM4HfR+2/DLR2988BBwJfM7OBjRF3Uzd1XjFXPx4q0LnjbFWgIyLSVGXb3nk0sMDdF7n7DuAx4LQqZRzoGP3uBKyMtd/DzFoCbYEdwKaGD7lpm7Hkkwp07p2oCnRERJqybEv6fYBlseblUbu4G4FzzGw58Azwraj9E8BHhI8DLQVuc/d1VUdgZpea2XQzm15cXFzP4Tcthas3c+GfPqlAp2MbVaAjItKUZVvST8dZwJ/cvS9wIvCwmbUgXCUoB3oDg4DvmNmeVXuOXh8scPeC/Pzm+0Gaygp02uSqAh0RkUyRbUl/BdAv1tw3ahd3EfA4gLu/BrQB8oCzgWfdfae7FwGvAAUNHnETVFmBzvayCh66cIwq0BERyRDZlvTfAoaY2SAza0V4UO+pKmWWAkcDmNkIQtIvjtqPj9rvARxMqAAoq2zcupPzogp07p94kCrQERHJIFmV9N29DLgcmATMITylP8vMbjKzU6Ni3wEuMbN3gb8AE93dCU/9tzezWYSDhwfc/b3Gn4rkbNtZziUPTmeBKtAREclIFvKZNISCggKfPn160mHUi53lFVz28AxeLCzit2fur+/pi0iDMbMZ7p6Vt08bWlad6UvdVFQ41z7xHi/MLeLHqkBHRCRjKenLLrk7P356Nv+cuYJrjh3KOapAR0QkYynpyy7d8eICHnhlMRceNohvHqUKdEREMpmSvlTr4deX8Mvn5nH6AapAR0SkOVDSl5Seileg80VVoCMi0hwo6ctnTCks4uq/vsNBA1WBjohIc6K9uXzKjCXr+fojbzO0RwfuPV8V6IiINCdK+vKx0u1lfO3hGfTo2JoHVYGOiEizo6QvH7trygJKSrfzmzP3VwU6IiLNkJK+ALB8/RbueelDvrB/H/bt1znpcEREpAEo6QsAv5hUiAHfPW5Y0qGIiEgDUdIXZi5dz7/eWcmlR+5J785tkw5HREQaiJJ+lnN3bn56DvkdWnPZ2L2SDkdERBqQkn6We+b91cxYsp7vHDOUPVq3TDocERFpQEr6WWx7WTm3PjuH4T078OWCfkmHIyIiDUxJP4s9+Opilq3byg0njSRHn9kVEWn2lPSz1NrS7fzuhQWMH96dw4fkJR2OiIg0AiX9LPWbF+azZWc53z9xeNKhiIhII1HSz0ILijbz5zeWcvbo/gzu3iHpcEREpJEo6Wehnz4zl3a5OVw5YUjSoYiISCNS0s8yL88v4cW5RVw+fjDd2uv7+iIi2URJP4uUVzg3Pz2bfl3bcv6hA5MOR0REGpmSfhZ5YsYy5q7ezPXHj6BNbk7S4YiISCNT0s8SpdvLuO1/8zhwQBdO/FzPpMMREZEEKOlniT9OXUjx5u3ccNIIzPQhHhGRbKSknwVWbtjK3dMWceq+vdm/f5ekwxERkYQo6WeB2yYV4sC1xw9LOhQREUmQkn4z997yDfxj5gouPnwQfbu0SzocERFJkJJ+M+bu3PyfOeS1b8XXx+2VdDgiIpIwJf1mbNKs1by5eB1XHTOUDm1ykw5HREQSlnVJ38yON7NCM1tgZten6N7fzCab2Uwze8/MTox128fMXjOzWWb2vpm1adzo07ejrIJb/juXoT3ac0ZBv6TDERGRJqBl0gE0JjPLAe4EjgGWA2+Z2VPuPjtW7AbgcXe/y8xGAs8AA82sJfAIcK67v2tm3YCdjTwJaXvotcUsWbuFBy8cTcucrDu2ExGRFLItG4wGFrj7InffATwGnFaljAMdo9+dgJXR72OB99z9XQB3X+vu5Y0Qc62t/2gHv31hPmOH5jN2aH7S4YiISBORbUm/D7As1rw8ahd3I3COmS0nnOV/K2o/FHAzm2Rmb5vZtQ0dbF395oX5lG4v4wcnjUg6FBERaUKyLemn4yzgT+7eFzgReNjMWhBuhRwOfDX6/wUzO7pqz2Z2qZlNN7PpxcXFjRk3AAuLS3nk9SWcNbo/Q3t0aPTxi4hI05VtSX8FEH+qrW/ULu4i4HEAd38NaAPkEa4KTHP3EnffQrgKcEDVEbj73e5e4O4F+fmNf2n9lmfm0iY3h6uOGdro4xYRkaYt25L+W8AQMxtkZq2AM4GnqpRZChwNYGYjCEm/GJgEfM7M2kUP9Y0FZtOEvLqwhOfnrOEbR+1FXvvWSYcjIiJNTFY9ve/uZWZ2OSGB5wD3u/ssM7sJmO7uTwHfAe4xs6sID/VNdHcH1pvZrwgHDg484+5PJzMln1VeET7E06dzWy48bFDS4YiISBOUVUkfwN2fIVyaj7f7Uez3bOCwavp9hPDaXpPzj7eXM3vVJn571v60yc1JOhwREWmCsu3yfrO0ZUcZv5hUyP79O3PKPr2SDkdERJooJf1m4I9TF1G0eTs3nDQCM0s6HBERaaKU9DPc6o3b+OO0hZy0Ty8OHNA16XBERKQJU9LPcL+YVEhFBVx//PCkQxERkSZOST+DfbBiI/+YuZwLDh9Iv67tkg5HRESaOCX9DOXu3Pz0bLq0a8U3jxqcdDgiIpIBlPQz1HOz1/D6onVcdcxQOrbJTTocERHJAEr6GWhHWQW3/Hcug7u356yD+tXcg4iICEr6GemR15fwYclH/ODEEbTM0SIUEZH0KGNkmA1bdvCbF+ZzxJA8xg1r/Ap9REQkcynpZ5jfvbiAzdt28gN9iEdERGpJST+DfFjyEQ+9tpgzDurH8J4dkw5HREQyjJJ+Brn1v3NoldOCq44ZmnQoIiKSgZT0M8Tri9YyadYavj5uL7p3aJN0OCIikoGU9DNARUX4EE/vTm24+Ig9kw5HREQylJJ+BvjnzBV8sGIT1x4/nDa5OUmHIyIiGUpJv4nbuqOcX0wqZN++nTh1395JhyMiIhlMSb+Ju+elRazetI0bTh5JixZ6RU9EROpOSb8JW7NpG3dNWcgJe/fkoIFdkw5HREQynJJ+E/bL/xVSVlHB9ScMTzoUERFpBpT0m6hZKzfytxnLmXjoQAZ02yPpcEREpBlQ0m+C3J2fPD2Hzm1zuXz8kKTDERGRZkJJvwl6cW4Rry5cy5UThtKpbW7S4YiISDOhpN8EtW6Zw9HDu3P2mP5JhyIiIs1Iy6QDkM86fEgehw/JSzoMERFpZnSmLyIikiWU9EVERLKEkr6IiEiWUNIXERHJEkr6IiIiWUJJX0REJEso6YuIiGQJJX0REZEsYe6edAzNlpkVA0uSjqMO8oCSpIOoJ81lWprLdICmpalqStMywN3zkw6iOVLSl88ws+nuXpB0HPWhuUxLc5kO0LQ0Vc1pWqR6urwvIiKSJZT0RUREsoSSvqRyd9IB1KPmMi3NZTpA09JUNadpkWronr6IiEiW0Jm+iIhIllDSFxERyRJK+lnKzPqZ2WQzm21ms8zsihRlzMx+a2YLzOw9MzsgiVh3Jc3pGGdmG83snejvR0nEWhMza2Nmb5rZu9G0/L8UZVqb2V+jZfKGmQ1s/Ehrlua0TDSz4thyuTiJWNNhZjlmNtPM/pOiW0Ysk0o1TEvGLBOpm5ZJByCJKQO+4+5vm1kHYIaZPefus2NlTgCGRH9jgLui/01JOtMB8JK7n5xAfLWxHRjv7qVmlgu8bGb/dffXY2UuAta7+2AzOxP4GXBGEsHWIJ1pAfiru1+eQHy1dQUwB+iYolumLJNKu5oWyJxlInWgM/0s5e6r3P3t6Pdmwk6gT5VipwEPefA60NnMejVyqLuU5nRkhGg+l0aNudFf1SdtTwMejH4/ARxtZtZIIaYtzWnJCGbWFzgJuLeaIhmxTCCtaZFmTklfiC5H7g+8UaVTH2BZrHk5TTih7mI6AA6JLjX/18xGNWpgtRBden0HKAKec/dql4m7lwEbgW6NG2V60pgWgC9Gt46eMLN+jRxium4HrgUqqumeMcuEmqcFMmOZSB0p6Wc5M2sP/B240t03JR1PXdUwHW8TvuW9L/A74MnGji9d7l7u7vsBfYHRZrZ30jHVVRrT8m9goLvvAzzHJ2fLTYaZnQwUufuMpGPZXWlOS5NfJrJ7lPSzWHSv9e/An939HymKrADiR/p9o3ZNSk3T4e6bKi81u/szQK6Z5TVymLXi7huAycDxVTp9vEzMrCXQCVjbuNHVTnXT4u5r3X171HgvcGBjx5aGw4BTzWwx8Bgw3sweqVImU5ZJjdOSIctEdoOSfpaK7jneB8xx919VU+wp4LzoKf6DgY3uvqrRgkxDOtNhZj0r77Ga2WjCet/kdspmlm9mnaPfbYFjgLlVij0FnB/9/hLwojfBL2ylMy1Vng85lfA8RpPi7t9z977uPhA4kzC/z6lSLCOWSTrTkgnLRHaPnt7PXocB5wLvR/ddAb4P9Adw9z8AzwAnAguALcAFCcRZk3Sm40vA182sDNgKnNkUd8pAL+BBM8shHJg87u7/MbObgOnu/hThAOdhM1sArCPsvJuidKbl22Z2KuENjHXAxMSiraUMXSYpNZdlIunRZ3hFRESyhC7vi4iIZAklfRERkSyhpC8iIpIllPRFRESyhJK+iIhIllDSF6kHZtYtVjPZajNbEWtuleYwHjCzYTWU+aaZfbV+om644ZtZgZn9Mfp9sZndXsfh9DSzZ3Y3HhEJ9MqeSD0zsxuBUne/rUp7I2xzu/ruebNgZv8EbnD3WVH1rHu7+5V1HNbDwB3VfLtfRGpBZ/oiDcjMBpvZbDP7MzAL6GVmd5vZdAv1zP8oVvZlM9vPzFqa2QYzuzWqJOg1M+selbnZzK6Mlb/VQr31hWZ2aNR+DzP7ezTeJ6Jx7Zcitl9EZd4zs5/Fh29m/WJXKt4xswoz62NmPczsH9Ew34y+1Fh1uJ2AYe4+K0W3QWY2ORrncxZqfcPMhlioi/59M/uJmW2I9fYk0GBXN0SyiZK+SMMbDvza3Ue6+wrgencvAPYFjjGzkSn66QRMjSoJeg24sJphm7uPBr4LVB5AfAtY7e4jgR8Tah78dE9mPQhfWxwVVa5yS7y7uy9z9/2iCnMeAB6LYv8t8PMo/q+QuorW0cD71cT7e+DeaJx/I9T6BqEipNvc/XNA1U89TweOqGZ4IlILSvoiDW+hu0+PNZ9lZm8Tav8bAaRK+lvd/b/R7xnAwGqG/Y8UZQ4nVKiCu79LuMJQ1TpC9ar3mNkXgI9SDdzMjiR8V/7iqNUE4A/RJ4+fBLpE39aP6wUUVxPvmMrYgIf4JJmPIVSaBPBolX6KgN7VDE9EakHf3hdpeB8nVDMbAlwBjHb3DVEtZ21S9LMj9ruc6rfV7WmU+Qx332lmBYSKcL4MfB04Nl7GzPoAdwMnu/uWytZR7PH4qtpK6mmqqzbRMEVkN+lMX6RxdQQ2A5uiGs2Oa4BxvEK49I6ZfY4UVxLMrAPQ0d3/A1xFlVsA0RsHfwO+4+4LYp2eB74ZK/eZZwUINbMNria21ytjA84BpkW/3wS+EP2uWmHNUOCDaoYnIrWgpC/SuN4GZhOqmX2IkKDr2++APmY2G/i/aHwbq5TpBDxtZu8CU4Grq3Q/gnAg8JPYw3zdCQn/sOhBvNnAJSnGPwvIN7M9UnT7JnCpmb0HnEE44AD4NnBd1H5QlXiPAp5OZ8JFZNf0yp5IM2NmLYGW7r4tup3wP2CIu5c1YgzfBYrd/U9plt8D2OLubmbnAF9w9y9Grzm+BJzk7lUPXESklnRPX6T5aQ+8ECV/A77WmAk/cgdwei3KHwTcbmYtgPXABVH77oS3BZTwReqBzvRFRESyhO7pi4iIZAklfRERkSyhpC8iIpIllPRFRESyhJK+iIhIlvj/s5w4YORNCLAAAAAASUVORK5CYII=\n", 2023 | "text/plain": [ 2024 | "
" 2025 | ] 2026 | }, 2027 | "metadata": {}, 2028 | "output_type": "display_data" 2029 | } 2030 | ], 2031 | "source": [ 2032 | "import matplotlib.pyplot as plt\n", 2033 | "\n", 2034 | "best_acc = [0.84558, 0.87324, 0.91232, 0.9203, 0.93174, 0.93584, 0.94032, 0.94616]\n", 2035 | "sizes = [50, 100, 500, 1000, 5000, 10000, 20000, 50000]\n", 2036 | "plt.plot(sizes, best_acc)\n", 2037 | "plt.title('Evolution of performance when increasing the training size')\n", 2038 | "plt.xlabel('Training size')\n", 2039 | "plt.ylabel('Accuracy')\n", 2040 | "plt.show()\n", 2041 | "\n", 2042 | "plt.plot(sizes, best_acc)\n", 2043 | "plt.title('Evolution of performance when increasing the training size, Zoom on the [0-10000] size zone')\n", 2044 | "plt.xlabel('Training size')\n", 2045 | "plt.ylabel('Accuracy')\n", 2046 | "plt.xlim([0, 10000])\n", 2047 | "plt.show()\n", 2048 | "\n", 2049 | "plt.plot(np.log(sizes)/np.log(10), best_acc)\n", 2050 | "plt.title('Evolution of performance when increasing the training size, with log scale for size')\n", 2051 | "plt.xlabel('Training size (log)')\n", 2052 | "plt.ylabel('Accuracy')\n", 2053 | "plt.show()" 2054 | ] 2055 | }, 2056 | { 2057 | "cell_type": "markdown", 2058 | "metadata": {}, 2059 | "source": [ 2060 | "- The first observation is, even with 50 samples only, we get a pretty great accuracy of 0.85!\n", 2061 | "- Then we see that the learning progress is very consequent when going from a size of 50 to 1000 samples\n", 2062 | "- The ULMFit beats the reported score from FastText (~0.92) when using 1000 samples only! Note that the reported score from FastText is from a training using the whole training data (3.6M samples)\n", 2063 | "- The accuracy continues to rise when we increase the training size, but with a lower speed. Here the trade-off comes, where you have to decide whether the extra 0.1% in accuracy is worth paying for more labeled data!\n", 2064 | "- From the log-scale graph we might expect even greater results when raining the training size. We have 4.6M training reviews so we could get orders of magnitude more so we could expect reaching 0.95 accuracy or more with the full dataset." 2065 | ] 2066 | } 2067 | ], 2068 | "metadata": { 2069 | "kernelspec": { 2070 | "display_name": "Python 3", 2071 | "language": "python", 2072 | "name": "python3" 2073 | }, 2074 | "language_info": { 2075 | "codemirror_mode": { 2076 | "name": "ipython", 2077 | "version": 3 2078 | }, 2079 | "file_extension": ".py", 2080 | "mimetype": "text/x-python", 2081 | "name": "python", 2082 | "nbconvert_exporter": "python", 2083 | "pygments_lexer": "ipython3", 2084 | "version": "3.6.5" 2085 | } 2086 | }, 2087 | "nbformat": 4, 2088 | "nbformat_minor": 2 2089 | } 2090 | -------------------------------------------------------------------------------- /source/convnet-dataviz.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Lesson 1 Experiments\n", 8 | "This section just reproduces lesson 1 logic using my own code and with 30 tennis and 30 basketball player images. I chose all male players for simplicity. " 9 | ] 10 | }, 11 | { 12 | "cell_type": "code", 13 | "execution_count": null, 14 | "metadata": {}, 15 | "outputs": [], 16 | "source": [ 17 | "# Put these at the top of every notebook, to get automatic reloading and inline plotting\n", 18 | "%reload_ext autoreload\n", 19 | "%autoreload 2\n", 20 | "%matplotlib inline" 21 | ] 22 | }, 23 | { 24 | "cell_type": "code", 25 | "execution_count": null, 26 | "metadata": {}, 27 | "outputs": [], 28 | "source": [ 29 | "# This file contains all the main external libs we'll use\n", 30 | "from fastai.imports import *\n", 31 | "from fastai.transforms import *\n", 32 | "from fastai.conv_learner import *\n", 33 | "from fastai.model import *\n", 34 | "from fastai.dataset import *\n", 35 | "from fastai.sgdr import *\n", 36 | "from fastai.plots import *\n", 37 | "from typing import List, Union\n", 38 | "from pathlib import Path\n" 39 | ] 40 | }, 41 | { 42 | "cell_type": "markdown", 43 | "metadata": {}, 44 | "source": [ 45 | "## Download the Sample Data\n", 46 | "Only execute the cell below once! If the commands below don't work, try the direct link [here](https://1drv.ms/u/s!AkhwiUY5vHPCs03Q26908HIwKFkG)." 47 | ] 48 | }, 49 | { 50 | "cell_type": "code", 51 | "execution_count": null, 52 | "metadata": {}, 53 | "outputs": [], 54 | "source": [ 55 | "!wget 'https://onedrive.live.com/download?cid=C273BC3946897048&resid=C273BC3946897048%216605&authkey=AIVFQLj7IoJYiz4' -O foo.zip\n", 56 | "!unzip -d data foo.zip \n", 57 | "!rm foo.zip" 58 | ] 59 | }, 60 | { 61 | "cell_type": "markdown", 62 | "metadata": {}, 63 | "source": [ 64 | "## Load the Sample Data" 65 | ] 66 | }, 67 | { 68 | "cell_type": "code", 69 | "execution_count": null, 70 | "metadata": {}, 71 | "outputs": [], 72 | "source": [ 73 | "sz=224\n", 74 | "path = Path('data/tennisbball')\n", 75 | "path.absolute(), list(path.glob('*'))" 76 | ] 77 | }, 78 | { 79 | "cell_type": "code", 80 | "execution_count": null, 81 | "metadata": {}, 82 | "outputs": [], 83 | "source": [ 84 | "sample = plt.imread(next(iter((path / 'valid' / 'tennis').iterdir())))\n", 85 | "plt.imshow(sample)\n", 86 | "plt.figure()\n", 87 | "sample = plt.imread(next(iter((path / 'valid' / 'bball').iterdir())))\n", 88 | "plt.imshow(sample)" 89 | ] 90 | }, 91 | { 92 | "cell_type": "code", 93 | "execution_count": null, 94 | "metadata": {}, 95 | "outputs": [], 96 | "source": [ 97 | "sample.shape, sample[:4,:4]" 98 | ] 99 | }, 100 | { 101 | "cell_type": "code", 102 | "execution_count": null, 103 | "metadata": {}, 104 | "outputs": [], 105 | "source": [ 106 | "torch.cuda.is_available(),torch.backends.cudnn.enabled" 107 | ] 108 | }, 109 | { 110 | "cell_type": "markdown", 111 | "metadata": {}, 112 | "source": [ 113 | "## Construct the Model\n", 114 | "Define the model architecture" 115 | ] 116 | }, 117 | { 118 | "cell_type": "code", 119 | "execution_count": null, 120 | "metadata": {}, 121 | "outputs": [], 122 | "source": [ 123 | "#tfms_from_model -- model based image transforms (preprocessing stats)\n", 124 | "arch=resnet50\n", 125 | "data = ImageClassifierData.from_paths(path, test_name='test', test_with_labels=True, tfms=tfms_from_model(arch, sz))\n", 126 | "\n", 127 | "#precompute=True to save conv layer activations! pass False if you want to run the data viz below\n", 128 | "learner = ConvLearner.pretrained(f=arch, data=data, precompute=False)" 129 | ] 130 | }, 131 | { 132 | "cell_type": "markdown", 133 | "metadata": {}, 134 | "source": [ 135 | "## Train a Model\n", 136 | "This section trains a model using transfer learning." 137 | ] 138 | }, 139 | { 140 | "cell_type": "code", 141 | "execution_count": null, 142 | "metadata": {}, 143 | "outputs": [], 144 | "source": [ 145 | "\n", 146 | "learner.fit(0.01, 15)" 147 | ] 148 | }, 149 | { 150 | "cell_type": "code", 151 | "execution_count": null, 152 | "metadata": {}, 153 | "outputs": [], 154 | "source": [ 155 | "#uncomment line below to save the model\n", 156 | "\n", 157 | "#learner.save('tennis_v_bball.lrnr')" 158 | ] 159 | }, 160 | { 161 | "cell_type": "markdown", 162 | "metadata": {}, 163 | "source": [ 164 | "## Load/Visualize an Existing Model\n", 165 | "Or if you've already trained a model, skip the above section and start from here." 166 | ] 167 | }, 168 | { 169 | "cell_type": "code", 170 | "execution_count": null, 171 | "metadata": {}, 172 | "outputs": [], 173 | "source": [ 174 | "learner.load('tennis_v_bball.lrnr')" 175 | ] 176 | }, 177 | { 178 | "cell_type": "code", 179 | "execution_count": null, 180 | "metadata": {}, 181 | "outputs": [], 182 | "source": [ 183 | "probs = np.exp(learner.predict())\n", 184 | "probs" 185 | ] 186 | }, 187 | { 188 | "cell_type": "code", 189 | "execution_count": null, 190 | "metadata": {}, 191 | "outputs": [], 192 | "source": [ 193 | "#TODO: improve\n", 194 | "def display_images(images:List[Union[Path, np.ndarray]], columns:int, titles:List[str]=None, figsize=None) -> None:\n", 195 | " if not titles:\n", 196 | " titles = [f'Image {i+1}' for i in range(len(images))]\n", 197 | " rows = len(images) // columns + int(len(images) % columns > 0)\n", 198 | " if figsize is None:\n", 199 | " figsize = (60,60)\n", 200 | " plt.figure(figsize=figsize)\n", 201 | " for i, (image, title) in enumerate(zip(images, titles)):\n", 202 | " if isinstance(image, Path):\n", 203 | " image = np.array(PIL.Image.open(image))\n", 204 | " plt.subplot(rows, columns, i+1)\n", 205 | " plt.imshow(image)\n", 206 | " plt.title(title, fontsize=10*columns)\n", 207 | " plt.axis('off')" 208 | ] 209 | }, 210 | { 211 | "cell_type": "code", 212 | "execution_count": null, 213 | "metadata": {}, 214 | "outputs": [], 215 | "source": [ 216 | "#val images\n", 217 | "predictions = probs.argmax(axis=1)\n", 218 | "images, titles = [], []\n", 219 | "for prob, pclass, fname in zip(probs, predictions, data.val_ds.fnames):\n", 220 | " images.append(path / fname)\n", 221 | " titles.append(f'{fname} -- {prob[pclass]:.{3}f} ({data.classes[pclass]})')\n", 222 | " \n", 223 | "display_images(images, 4, titles)" 224 | ] 225 | }, 226 | { 227 | "cell_type": "code", 228 | "execution_count": null, 229 | "metadata": {}, 230 | "outputs": [], 231 | "source": [ 232 | "test_probs = np.exp(learner.predict(is_test=True))\n", 233 | "test_predictions = test_probs.argmax(axis=1)\n", 234 | "\n", 235 | "#test images\n", 236 | "images, titles = [],[]\n", 237 | "for prob, pclass, fname in zip(test_probs, test_predictions, data.test_ds.fnames):\n", 238 | " images.append(path / fname)\n", 239 | " titles.append(f'{fname} -- {prob[pclass]:.{3}f} ({data.classes[pclass]})')\n", 240 | " \n", 241 | "display_images(images, 4, titles)" 242 | ] 243 | }, 244 | { 245 | "cell_type": "markdown", 246 | "metadata": {}, 247 | "source": [ 248 | "## Dataviz -- Activations" 249 | ] 250 | }, 251 | { 252 | "cell_type": "code", 253 | "execution_count": null, 254 | "metadata": {}, 255 | "outputs": [], 256 | "source": [ 257 | "#check out the model structure\n", 258 | "model = learner.model\n", 259 | "model" 260 | ] 261 | }, 262 | { 263 | "cell_type": "code", 264 | "execution_count": null, 265 | "metadata": {}, 266 | "outputs": [], 267 | "source": [ 268 | "#\n", 269 | "# utilize torch hooks to capture the activations for any conv layer. for simplicity we use a \n", 270 | "# batch size of 1.\n", 271 | "#\n", 272 | "class ActivationHook:\n", 273 | " def __init__(self):\n", 274 | " self.output = []\n", 275 | " \n", 276 | " def __call__(self, module, input, output):\n", 277 | " self.output = output.data\n", 278 | " \n", 279 | "def find_layers(module, ltype): \n", 280 | " rv = []\n", 281 | " if isinstance(module, ltype):\n", 282 | " rv.append(module)\n", 283 | " else:\n", 284 | " for c in module.children():\n", 285 | " rv.extend(find_layers(c, ltype))\n", 286 | " \n", 287 | " return rv\n", 288 | "\n", 289 | "def capture_activations(model, x):\n", 290 | " layers = find_layers(model, nn.Conv2d)\n", 291 | " hooks = [ActivationHook() for _ in layers]\n", 292 | " handles = [conv.register_forward_hook(hook) for conv, hook in zip(layers, hooks)]\n", 293 | " model(x)\n", 294 | " for h in handles:\n", 295 | " h.remove()\n", 296 | " \n", 297 | " return [h.output for h in hooks]\n", 298 | "\n", 299 | "bs = data.bs\n", 300 | "data.bs = 1\n", 301 | "dl = data.get_dl(data.test_ds, False) \n", 302 | "i = iter(dl)\n", 303 | "ball_x = next(i)[0]\n", 304 | "noball_x = next(i)[0]\n", 305 | "data.bs = bs" 306 | ] 307 | }, 308 | { 309 | "cell_type": "code", 310 | "execution_count": null, 311 | "metadata": {}, 312 | "outputs": [], 313 | "source": [ 314 | "ball_activations = capture_activations(model, Variable(ball_x))\n", 315 | "noball_activations = capture_activations(model, Variable(noball_x))\n", 316 | "for i, layer_output in enumerate(ball_activations):\n", 317 | " print(f'Layer {i}: {layer_output.squeeze().shape}')" 318 | ] 319 | }, 320 | { 321 | "cell_type": "code", 322 | "execution_count": null, 323 | "metadata": { 324 | "scrolled": false 325 | }, 326 | "outputs": [], 327 | "source": [ 328 | "#layer 5, filter 18, 36 seems to like circular type things\n", 329 | "layer_idx = 0\n", 330 | "images = []\n", 331 | "titles = []\n", 332 | "num_filters = ball_activations[layer_idx].shape[1]\n", 333 | "asize = ball_activations[layer_idx].shape[2]\n", 334 | "\n", 335 | "def filter_activations_to_image(activations, lidx, fidx):\n", 336 | " a = activations[lidx].squeeze() #choose conv layer & discard batch dimension\n", 337 | " a = a[fidx] #choose conv filter\n", 338 | " a = (a - a.mean())/(3*a.std()) + 0.5 #center and scale down\n", 339 | " a = a.clamp(0, 1).numpy() # and finally clamp \n", 340 | " return a\n", 341 | "\n", 342 | "buff_size = 10\n", 343 | "for filter_idx in range(num_filters):\n", 344 | " a0 = filter_activations_to_image(ball_activations, layer_idx, filter_idx)\n", 345 | " a1 = filter_activations_to_image(noball_activations, layer_idx, filter_idx)\n", 346 | " z = np.hstack([a0, np.ones((asize, 10)), a1])\n", 347 | " plt.imshow(z, cmap='gray')\n", 348 | " plt.axis('off')\n", 349 | " plt.title(f'Filter {filter_idx}')\n", 350 | " plt.show()\n" 351 | ] 352 | }, 353 | { 354 | "cell_type": "markdown", 355 | "metadata": {}, 356 | "source": [ 357 | "## DataViz -- Filters\n", 358 | "\n", 359 | "We can also look at filters. This is easiest at the first layer where each filter is 3 dimensional." 360 | ] 361 | }, 362 | { 363 | "cell_type": "code", 364 | "execution_count": null, 365 | "metadata": {}, 366 | "outputs": [], 367 | "source": [ 368 | "import matplotlib.colors as mc\n", 369 | "import math\n", 370 | "conv = find_layers(learner.model, nn.Conv2d)[0]\n", 371 | "weight = conv.weight.data.numpy()\n", 372 | "\n", 373 | "num_filters, depth, w, h = weight.shape\n", 374 | "\n", 375 | "rows = int(num_filters**0.5)\n", 376 | "cols = int(math.ceil(num_filters/rows))\n", 377 | "border = 1\n", 378 | "img = np.zeros((depth, rows*h + (1+rows)*border, cols*w + (1+cols)*border))\n", 379 | "for f in range(num_filters):\n", 380 | " r = f // rows\n", 381 | " c = f % cols\n", 382 | " x = border + r * (w+border)\n", 383 | " y = border + c * (w+border)\n", 384 | " norm = mc.Normalize()\n", 385 | " img[:, x:x+w, y:y+h] = norm(weight[f, :, :, :])\n", 386 | "\n", 387 | "plt.figure(figsize=(12,12))\n", 388 | "plt.imshow(img.transpose(1,2,0))\n", 389 | "_ = plt.axis('off')" 390 | ] 391 | }, 392 | { 393 | "cell_type": "markdown", 394 | "metadata": {}, 395 | "source": [ 396 | "We can also visualize subsequent layers, though it's not so pretty. We can map each dimension of each filter back into grayscale." 397 | ] 398 | }, 399 | { 400 | "cell_type": "code", 401 | "execution_count": null, 402 | "metadata": {}, 403 | "outputs": [], 404 | "source": [ 405 | "# for i, conv in enumerate(find_layers(learner.model, nn.Conv2d)):\n", 406 | "# print(conv, conv.weight.shape)\n", 407 | "weight = find_layers(learner.model, nn.Conv2d)[2].weight.data.numpy()\n", 408 | "num_filters, depth, w, h = weight.shape\n", 409 | "rows = num_filters\n", 410 | "cols = depth\n", 411 | "border = 1\n", 412 | "img = np.zeros((rows*h + (1+rows)*border, cols*w + (1+cols)*border))\n", 413 | "for f in range(num_filters):\n", 414 | " norm = mc.Normalize()\n", 415 | " normed = norm(weight[f, :, :, :]) #normalize over all the weights in a filter\n", 416 | " for d in range(depth):\n", 417 | " r = f\n", 418 | " c = d\n", 419 | " x = border + r * (w+border)\n", 420 | " y = border + c * (w+border)\n", 421 | " img[x:x+w, y:y+h] = normed[d]\n", 422 | "\n", 423 | "plt.figure(figsize=(18,18))\n", 424 | "plt.imshow(img, cmap='gray')\n", 425 | "_ = plt.axis('off')\n" 426 | ] 427 | }, 428 | { 429 | "cell_type": "markdown", 430 | "metadata": {}, 431 | "source": [ 432 | "## Occlusion\n", 433 | "We can also mask out portions of the image by sliding a gray block over the image repeatedly and record how the predictions change." 434 | ] 435 | }, 436 | { 437 | "cell_type": "code", 438 | "execution_count": null, 439 | "metadata": {}, 440 | "outputs": [], 441 | "source": [ 442 | "block_size = 50\n", 443 | "image_path = path / data.test_ds.fnames[0]\n", 444 | "image = open_image(image_path)\n", 445 | "image[50:250, 50:250] = np.full((200,200,3), 0.75)\n", 446 | "scaled_image = Scale(sz=224).do_transform(orig_image, False)\n", 447 | "# image[0:block_size, 0:block_size] = np.full((block_size,block_size,3), 0.75)\n", 448 | "plt.imshow(image)\n", 449 | "_ = plt.axis('off')" 450 | ] 451 | }, 452 | { 453 | "cell_type": "code", 454 | "execution_count": null, 455 | "metadata": {}, 456 | "outputs": [], 457 | "source": [ 458 | "block_size = 50\n", 459 | "image_path = path / data.test_ds.fnames[0]\n", 460 | "orig_image = open_image(image_path)\n", 461 | "# image[0:200, 0:200] = np.full((200,200,3), 0.75)\n", 462 | "scaled_image = Scale(sz=224).do_transform(orig_image, False)\n", 463 | "# image[0:block_size, 0:block_size] = np.full((block_size,block_size,3), 0.75)\n", 464 | "# plt.imshow(image)\n", 465 | "plt.axis('off')\n", 466 | "\n", 467 | "#the prediction for the smaller image should be essentially unchanged\n", 468 | "print(learner.model(VV(tfms_from_model(arch, sz)[1](scaled_image)).unsqueeze(0)).exp())\n", 469 | "w,h,_ = scaled_image.shape\n", 470 | "learner.model.eval()\n", 471 | "t0 = time.time()\n", 472 | "prob_map = np.zeros((2, w, h))\n", 473 | "\n", 474 | "z = 0\n", 475 | "\n", 476 | "#TODO: add stride for efficiency.\n", 477 | "for x in tqdm(range(1 - block_size, w)):\n", 478 | " for y in range(1 - block_size, h):\n", 479 | " image = np.array(scaled_image)\n", 480 | " x0, x1 = max(0, x), min(w, x + block_size)\n", 481 | " y0, y1 = max(0, y), min(h, y + block_size)\n", 482 | " image[x0:x1,y0:y1] = np.full((x1-x0, y1-y0, 3), 0.75)\n", 483 | " image = tfms_from_model(arch, sz)[1](image)\n", 484 | " predictions = learner.model(VV(image).unsqueeze(0)) \n", 485 | " prob_map[0,x0:x1,y0:y1] += predictions.exp().data[0][0]\n", 486 | " prob_map[1,x0:x1,y0:y1] += 1\n" 487 | ] 488 | }, 489 | { 490 | "cell_type": "code", 491 | "execution_count": null, 492 | "metadata": {}, 493 | "outputs": [], 494 | "source": [ 495 | "np.save('probs-heatmap.npy', prob_map)" 496 | ] 497 | }, 498 | { 499 | "cell_type": "code", 500 | "execution_count": null, 501 | "metadata": {}, 502 | "outputs": [], 503 | "source": [ 504 | "heatmap = prob_map[0]/prob_map[1]\n", 505 | "plt.subplot(1,2,1)\n", 506 | "plt.imshow(1 - heatmap, cmap='jet')\n", 507 | "plt.axis('off')\n", 508 | "plt.subplot(1,2,2)\n", 509 | "plt.imshow(orig_image)\n", 510 | "_ = plt.axis('off')" 511 | ] 512 | }, 513 | { 514 | "cell_type": "code", 515 | "execution_count": null, 516 | "metadata": {}, 517 | "outputs": [], 518 | "source": [ 519 | "block_size = 50\n", 520 | "image_path = path / 'valid/bball/29.jpg'\n", 521 | "orig_image = open_image(image_path)\n", 522 | "# image[0:200, 0:200] = np.full((200,200,3), 0.75)\n", 523 | "scaled_image = Scale(sz=224).do_transform(orig_image, False)\n", 524 | "# orig_image[0:block_size, 0:block_size] = np.full((block_size,block_size,3), 0.75)\n", 525 | "# plt.imshow(orig_image)\n", 526 | "# plt.axis('off')\n", 527 | "\n", 528 | "#the prediction for the smaller image should be essentially unchanged\n", 529 | "print(learner.model(VV(tfms_from_model(arch, sz)[1](scaled_image)).unsqueeze(0)).exp())\n", 530 | "w,h,_ = scaled_image.shape\n", 531 | "learner.model.eval()\n", 532 | "t0 = time.time()\n", 533 | "prob_map = np.zeros((2, w, h))\n", 534 | "\n", 535 | "z = 0\n", 536 | "\n", 537 | "#TODO: add stride for efficiency.\n", 538 | "for x in tqdm(range(1 - block_size, w)):\n", 539 | " for y in range(1 - block_size, h):b\n", 540 | " image = np.array(scaled_image)\n", 541 | " x0, x1 = max(0, x), min(w, x + block_size)\n", 542 | " y0, y1 = max(0, y), min(h, y + block_size)\n", 543 | " image[x0:x1,y0:y1] = np.full((x1-x0, y1-y0, 3), 0.75)\n", 544 | " image = tfms_from_model(arch, sz)[1](image)\n", 545 | " predictions = learner.model(VV(image).unsqueeze(0)) \n", 546 | " prob_map[0,x0:x1,y0:y1] += predictions.exp().data[0][0]\n", 547 | " prob_map[1,x0:x1,y0:y1] += 1" 548 | ] 549 | }, 550 | { 551 | "cell_type": "code", 552 | "execution_count": null, 553 | "metadata": {}, 554 | "outputs": [], 555 | "source": [ 556 | "np.save('probs-giannis-heatmap.npy', prob_map)" 557 | ] 558 | }, 559 | { 560 | "cell_type": "code", 561 | "execution_count": null, 562 | "metadata": {}, 563 | "outputs": [], 564 | "source": [ 565 | "heatmap = prob_map[0]/prob_map[1]\n", 566 | "plt.subplot(1,2,1)\n", 567 | "plt.imshow(1 - heatmap, cmap='jet')\n", 568 | "plt.axis('off')\n", 569 | "plt.subplot(1,2,2)\n", 570 | "plt.imshow(orig_image)\n", 571 | "_ = plt.axis('off')" 572 | ] 573 | }, 574 | { 575 | "cell_type": "code", 576 | "execution_count": null, 577 | "metadata": {}, 578 | "outputs": [], 579 | "source": [ 580 | "block_size = 50\n", 581 | "image_path = path / 'valid/tennis/23.jpg'\n", 582 | "orig_image = open_image(image_path)\n", 583 | "# image[0:200, 0:200] = np.full((200,200,3), 0.75)\n", 584 | "scaled_image = Scale(sz=224).do_transform(orig_image, False)\n", 585 | "# orig_image[0:block_size, 0:block_size] = np.full((block_size,block_size,3), 0.75)\n", 586 | "plt.imshow(scaled_image)\n", 587 | "# plt.axis('off')\n", 588 | "\n", 589 | "#the prediction for the smaller image should be essentially unchanged\n", 590 | "print(learner.model(VV(tfms_from_model(arch, sz)[1](scaled_image)).unsqueeze(0)).exp())\n", 591 | "w,h,_ = scaled_image.shape\n", 592 | "learner.model.eval()\n", 593 | "t0 = time.time()\n", 594 | "prob_map = np.zeros((2, w, h))\n", 595 | "\n", 596 | "z = 0\n", 597 | "\n", 598 | "#TODO: add stride for efficiency.\n", 599 | "for x in tqdm(range(1 - block_size, w)):\n", 600 | " for y in range(1 - block_size, h):\n", 601 | " image = np.array(scaled_image)\n", 602 | " x0, x1 = max(0, x), min(w, x + block_size)\n", 603 | " y0, y1 = max(0, y), min(h, y + block_size)\n", 604 | " image[x0:x1,y0:y1] = np.full((x1-x0, y1-y0, 3), 0.75)\n", 605 | " image = tfms_from_model(arch, sz)[1](image)\n", 606 | " predictions = learner.model(VV(image).unsqueeze(0)) \n", 607 | " prob_map[0,x0:x1,y0:y1] += predictions.exp().data[0][0]\n", 608 | " prob_map[1,x0:x1,y0:y1] += 1" 609 | ] 610 | }, 611 | { 612 | "cell_type": "code", 613 | "execution_count": null, 614 | "metadata": {}, 615 | "outputs": [], 616 | "source": [ 617 | "np.save('probs-tennis-heatmap.npy', prob_map)" 618 | ] 619 | }, 620 | { 621 | "cell_type": "code", 622 | "execution_count": null, 623 | "metadata": {}, 624 | "outputs": [], 625 | "source": [ 626 | "heatmap = prob_map[0]/prob_map[1]\n", 627 | "plt.subplot(1,2,1)\n", 628 | "plt.imshow(heatmap, cmap='jet')\n", 629 | "plt.axis('off')\n", 630 | "plt.subplot(1,2,2)\n", 631 | "plt.imshow(orig_image)\n", 632 | "_ = plt.axis('off')" 633 | ] 634 | } 635 | ], 636 | "metadata": { 637 | "kernelspec": { 638 | "display_name": "Python 3", 639 | "language": "python", 640 | "name": "python3" 641 | }, 642 | "language_info": { 643 | "codemirror_mode": { 644 | "name": "ipython", 645 | "version": 3 646 | }, 647 | "file_extension": ".py", 648 | "mimetype": "text/x-python", 649 | "name": "python", 650 | "nbconvert_exporter": "python", 651 | "pygments_lexer": "ipython3", 652 | "version": "3.6.4" 653 | } 654 | }, 655 | "nbformat": 4, 656 | "nbformat_minor": 2 657 | } 658 | -------------------------------------------------------------------------------- /source/gpu.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | from torchvision import datasets 5 | from torch.utils.data import DataLoader 6 | import numpy as np 7 | import time 8 | 9 | class Model(nn.Module): 10 | def __init__(self): 11 | super(Model, self).__init__() 12 | self.hidden = nn.Linear(784, 50) 13 | self.final = nn.Linear(50, 10) 14 | 15 | def forward(self, features): 16 | x = self.hidden(features.float().view(len(features), -1)) 17 | x = self.final(x) 18 | return F.log_softmax(x, dim=1) 19 | 20 | def fun_with_gpus(): 21 | t1 = torch.cuda.FloatTensor(20,20) 22 | t2 = torch.cuda.FloatTensor(20,20) 23 | t3 = t1.matmul(t2) 24 | print(f"What is t3? Well it's a {type(t3)}") 25 | 26 | def this_wont_work_dummy(features, labels): 27 | dl = DataLoader(list(zip(features, labels)), batch_size=5) 28 | model = Model() 29 | model.hidden.cuda() 30 | 31 | batch = next(iter(dl)) 32 | batch = [torch.autograd.Variable(b) for b in batch] 33 | return model.forward(*batch[:-1]) 34 | 35 | 36 | def view_number(data:torch.FloatTensor, title:str): 37 | import matplotlib.pyplot as plt 38 | plt.imshow(data.numpy()) 39 | plt.title(title) 40 | plt.show() 41 | 42 | def data_shipping_experiment(n:int): 43 | #let's run all on the CPU 44 | array1 = np.random.randn(200,200) 45 | array2 = np.random.randn(200,200) 46 | t0 = time.time() 47 | for i in range(n): 48 | array3 = array1.matmul(array2) 49 | array1 = array3 50 | t1 = time.time() 51 | 52 | print(f'CPU only operations took {t1-t0}') 53 | 54 | 55 | #let's run all on the GPU 56 | tensor1 = torch.cuda.FloatTensor(200, 200) 57 | tensor2 = torch.cuda.FloatTensor(200, 200) 58 | 59 | t0 = time.time() 60 | for i in range(n): 61 | tensor3 = tensor1.matmul(tensor2) 62 | del tensor1 63 | tensor1 = tensor3 64 | t1 = time.time() 65 | 66 | print(f'GPU only operations took {t1-t0}') 67 | 68 | #let's ship data like a mofo 69 | tensor1 = torch.FloatTensor(200, 200) 70 | tensor2 = torch.FloatTensor(200, 200) 71 | 72 | t0 = time.time() 73 | for i in range(n): 74 | ctensor1 = tensor1.cuda() 75 | ctensor2 = tensor2.cuda() 76 | ctensor3 = ctensor1.matmul(ctensor2) 77 | tensor1 = ctensor3.cpu() 78 | 79 | del ctensor1 80 | del ctensor2 81 | del ctensor3 82 | 83 | t1 = time.time() 84 | 85 | print(f'data shipping took {t1-t0}') 86 | 87 | if __name__ == '__main__': 88 | if not torch.cuda.is_available(): 89 | raise ValueError('a GPU is required for these examples') 90 | 91 | _data = datasets.MNIST('/tmp/data', train=True, download=True) 92 | 93 | # if you want to look at some images... 94 | # view_number(_data.train_data[10], str(_data.train_labels[10])) 95 | 96 | data_shipping_experiment(100000) 97 | this_wont_work_dummy(_data.train_data, _data.train_labels) -------------------------------------------------------------------------------- /source/logsumexp.py: -------------------------------------------------------------------------------- 1 | """ 2 | This module demos the LogSumExp trick. See https://blog.feedly.com/?p=10329 3 | """ 4 | import math 5 | from typing import List 6 | import logging 7 | import time 8 | 9 | 10 | def log_sum_exp_naive(X:List[float]) -> float: 11 | """ 12 | a naive calculation of LogSumExp expressions 13 | :param X: a list of numbers 14 | :return: the LogSumExp calculation 15 | """ 16 | logging.debug('START lse_naive(%s)', X) 17 | try: 18 | summation = 0 19 | for x_i in X: 20 | v = math.e**x_i 21 | logging.debug('e^%f = %.5f', x_i, v) 22 | summation += v 23 | return math.log(summation) 24 | except Exception as e: 25 | logging.debug('lse_naive FAILURE') 26 | raise e 27 | 28 | 29 | def log_sum_exp(X:List[float]) -> float: 30 | """ 31 | a better calculation of LogSumExp expressions 32 | :param X: a list of numbers 33 | :return: the LogSumExp calculation 34 | """ 35 | logging.debug('START lse(%s)', X) 36 | c = max(X) 37 | summation = 0 38 | for x_i in X: 39 | v = math.e ** (x_i - c) 40 | logging.debug('e^(%f - c) = %.5f', x_i, v) 41 | summation += sum(math.e ** (x_i - c) for x_i in X) 42 | 43 | logging.debug('c=%.5f; summation=%.5f', c, summation) 44 | 45 | return math.log(summation) + c 46 | 47 | 48 | def log_softmax(j:int, X:List[float], naive:bool=False) -> float: 49 | """ 50 | a log softmax calculation 51 | :param j: an index into X that selects the numerator value. 52 | :param X: a list of numbers 53 | :param naive: use the naive LogSumExp method 54 | :return: the log softmax calculation 55 | """ 56 | lse = log_sum_exp_naive if naive else log_sum_exp 57 | return X[j] - lse(X) 58 | 59 | 60 | if __name__ == '__main__': 61 | logging.basicConfig(level='INFO') # change to debug to print intermediate calculations 62 | 63 | def _run_example(j:int, X:List[float]) -> None: 64 | print('*' * 30) 65 | print(f'* X={X}') 66 | print(f'* j={j}\n') 67 | time.sleep(0.001) # so the logs get printed out nicely 68 | y1 = log_sum_exp(X) 69 | try: 70 | y2 = log_sum_exp_naive(X) 71 | if abs(y1 - y2) > 1e-6: 72 | raise ValueError(f'calculation error {y1} != {y2}') 73 | except: 74 | y2 = 'bombed!' 75 | 76 | print(f'logsumpexp({X}): {y1}') 77 | print(f'logsumpexp({X}): {y2} (naive)') 78 | 79 | ls = log_softmax(j, X) 80 | print(f'log(softmax({j}, {X}) = {ls} --> softmax = {math.e**ls}') 81 | if isinstance(y2, float): 82 | ls = log_softmax(j, X, True) 83 | print(f'log(softmax({j}, {X}, naive) = {ls}') 84 | 85 | print('*' * 30,'\n') 86 | 87 | # the examples from the blog post plus a small numerically stable example 88 | _examples = [[1000]*3, [-1000]*3, [1,1,1]] 89 | 90 | for _example in _examples: 91 | _run_example(0, _example) 92 | 93 | # one huge X value 94 | _run_example(0, [1000, 1, 2, 3]) 95 | 96 | # one huge negative X value 97 | _run_example(0, [-1000, 1, 2, 3]) 98 | 99 | # run this in debug mode to see what happens to the contributions of the values < 1 in the logsumexp calculation and 100 | # also what happens to the softmax probability distribution. 101 | _run_example(0, [1000, 1e-5, 1e-10]) 102 | _run_example(1, [1000, 1e-5, 1e-10]) 103 | _run_example(2, [1000, 1e-5, 1e-10]) -------------------------------------------------------------------------------- /source/vgg_params.py: -------------------------------------------------------------------------------- 1 | import keras.applications.vgg16 as v 2 | 3 | if __name__ == '__main__': 4 | model = v.VGG16() 5 | print(f'{model.count_params()} parameters??? Yowsers!') 6 | --------------------------------------------------------------------------------