├── data
├── .blank
└── sloth_config.py
├── se_resnext50.ipynb
├── utils.py
├── README.md
├── resize.ipynb
├── oversample.ipynb
├── only_known_train.ipynb
├── classification_and_metric_learning.ipynb
└── siamese_network_prototype.ipynb
/data/.blank:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/data/sloth_config.py:
--------------------------------------------------------------------------------
1 | LABELS = (
2 | {"attributes": {"type": "rect",
3 | "class": "fluke",
4 | },
5 | "item": "sloth.items.RectItem",
6 | "inserter": "sloth.items.RectItemInserter",
7 | "text": "fluke"
8 | },
9 | {"attributes": {"type": "point",
10 | "class": "left"},
11 | "item": "sloth.items.PointItem",
12 | "inserter": "sloth.items.PointItemInserter",
13 | "text": "tip_on_left"
14 | },
15 | {"attributes": {"type": "point",
16 | "class": "notch"},
17 | "item": "sloth.items.PointItem",
18 | "inserter": "sloth.items.PointItemInserter",
19 | "text": "notch"
20 | },
21 | {"attributes": {"type": "point",
22 | "class": "right"},
23 | "item": "sloth.items.PointItem",
24 | "inserter": "sloth.items.PointItemInserter",
25 | "text": "tip_on_right"
26 | }
27 | )
28 |
--------------------------------------------------------------------------------
/se_resnext50.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "I didn't manage to train this model - might be because I ended up using too a small batch size, maybe I gave up too quickly, or maybe for some other reason.\n",
8 | "\n",
9 | "Leaving this here as a reference - it modifies the base architecture so that it can be run with any input size and uses forward hook to grab cnn activations."
10 | ]
11 | },
12 | {
13 | "cell_type": "code",
14 | "execution_count": null,
15 | "metadata": {},
16 | "outputs": [],
17 | "source": [
18 | "class CustomModel(nn.Module):\n",
19 | " def __init__(self):\n",
20 | " super().__init__()\n",
21 | " self.cnn = pretrainedmodels.se_resnext50_32x4d()\n",
22 | " self.cnn.avg_pool = Lambda(lambda x: x.view(BS, 2048, -1).mean(-1)) # this allows the model to be run on any size of input\n",
23 | " self.cnn_out = []\n",
24 | " nn.Module.register_forward_hook(self.cnn.layer4, self.grab_output)\n",
25 | " \n",
26 | " self.head = create_head(4096, 5004, [2048])\n",
27 | " self.ada_concat = AdaptiveConcatPool2d(1)\n",
28 | " \n",
29 | " def grab_output(self, mod, inp, out):\n",
30 | " self.cnn_out.append(out)\n",
31 | " \n",
32 | " def forward(self, ims_a, ims_b):\n",
33 | " self.cnn(ims_a)\n",
34 | " cnn_out_a = self.cnn_out[0]\n",
35 | " out_a = self.head(cnn_out_a)\n",
36 | " self.cnn_out = []\n",
37 | " \n",
38 | " self.cnn(ims_b)\n",
39 | " cnn_out_b = self.cnn_out[0]\n",
40 | " out_b = self.head(cnn_out_b)\n",
41 | " self.cnn_out = []\n",
42 | " \n",
43 | " return out_a, out_b, self.ada_concat(cnn_out_a).squeeze(), self.ada_concat(cnn_out_b).squeeze()"
44 | ]
45 | }
46 | ],
47 | "metadata": {
48 | "kernelspec": {
49 | "display_name": "Python 3",
50 | "language": "python",
51 | "name": "python3"
52 | },
53 | "language_info": {
54 | "codemirror_mode": {
55 | "name": "ipython",
56 | "version": 3
57 | },
58 | "file_extension": ".py",
59 | "mimetype": "text/x-python",
60 | "name": "python",
61 | "nbconvert_exporter": "python",
62 | "pygments_lexer": "ipython3",
63 | "version": "3.7.0"
64 | }
65 | },
66 | "nbformat": 4,
67 | "nbformat_minor": 2
68 | }
69 |
--------------------------------------------------------------------------------
/utils.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import torch
3 | import pandas as pd
4 | import random
5 | import string
6 |
7 | # https://github.com/benhamner/Metrics/blob/master/Python/ml_metrics/average_precision.py
8 | def apk(actual, predicted, k=10):
9 | if len(predicted)>k:
10 | predicted = predicted[:k]
11 |
12 | score = 0.0
13 | num_hits = 0.0
14 |
15 | for i,p in enumerate(predicted):
16 | if p in actual and p not in predicted[:i]:
17 | num_hits += 1.0
18 | score += num_hits / (i+1.0)
19 |
20 | if not actual:
21 | return 0.0
22 |
23 | return score / min(len(actual), k)
24 |
25 | def mapk(actual, predicted, k=10):
26 | return np.mean([apk(a,p,k) for a,p in zip(actual, predicted)])
27 |
28 | def map5kfast(preds, targs, k=10):
29 | predicted_idxs = preds.sort(descending=True)[1]
30 | top_5 = predicted_idxs[:, :5]
31 | scores = torch.zeros(len(preds), k).float()
32 | for kk in range(k):
33 | scores[:,kk] = (top_5[:,kk] == targs).float() / float((kk+1))
34 | return scores.max(dim=1)[0].mean()
35 |
36 | def map5(preds,targs):
37 | if type(preds) is list:
38 | return torch.cat([map5fast(p, targs, 5).view(1) for p in preds ]).mean()
39 | return map5kfast(preds,targs, 5)
40 |
41 | def top_5_preds(preds): return np.argsort(preds.numpy())[:, ::-1][:, :5]
42 |
43 | def top_5_pred_labels(preds, classes):
44 | top_5 = top_5_preds(preds)
45 | labels = []
46 | for i in range(top_5.shape[0]):
47 | labels.append(' '.join([classes[idx] for idx in top_5[i]]))
48 | return labels
49 |
50 | def create_submission(preds, data, name, classes=None):
51 | if not classes: classes = data.classes
52 | sub = pd.DataFrame({'Image': [path.name for path in data.test_ds.x.items]})
53 | sub['Id'] = top_5_pred_labels(preds, classes)
54 | sub.to_csv(f'subs/{name}.csv.gz', index=False, compression='gzip')
55 |
56 |
57 | def intersection(preds, targs):
58 | # preds and targs are of shape (bs, 4), pascal_voc format
59 | max_xy = torch.min(preds[:, 2:], targs[:, 2:])
60 | min_xy = torch.max(preds[:, :2], targs[:, :2])
61 | inter = torch.clamp((max_xy - min_xy), min=0)
62 | return inter[:, 0] * inter[:, 1]
63 |
64 | def area(boxes):
65 | return ((boxes[:, 2]-boxes[:, 0]) * (boxes[:, 3]-boxes[:, 1]))
66 |
67 | def union(preds, targs):
68 | return area(preds) + area(targs) - intersection(preds, targs)
69 |
70 | def IoU(preds, targs):
71 | return intersection(preds, targs) / union(preds, targs)
72 |
73 | def name(n=10, print_it=True):
74 | name = "".join(random.choice(string.ascii_lowercase) for _ in range(n))
75 | if print_it: print(name)
76 | return name
77 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Humpback Whale Identification Competition Starter Pack
2 |
3 | The code in this repo is all you need to make a first submission to the [Humpback Whale Identification Competition](https://www.kaggle.com/c/humpback-whale-identification). It uses the [FastAi library](https://github.com/fastai/fastai) release 1.0.36.post1 for anything up to point 7 in the *Navigating through the repository* list below (this is important - you are likely to encounter an error if you use any other version of the library). Subsequently I switch to 1.0.39.
4 |
5 | For additional information please refer to discussion threads on Kaggle forums: [classification](https://www.kaggle.com/c/humpback-whale-identification/discussion/74647), [feature learning](https://www.kaggle.com/c/humpback-whale-identification/discussion/75984), [detection](...).
6 |
7 | **Some people [reported issues](https://github.com/radekosmulski/whale/issues/1) with running the first_submission notebook. If you encounter the issue, you should be okay to skip to the subsequent notebooks. The one that scores 0.760 on the LB is `only_known_train.ipynb`.**
8 |
9 | ## Making first submission
10 | 1. Install the [fastai library](https://github.com/fastai/fastai), specifically version 1.0.36.post1. The easiest way to do it is to follow the developer install as outlined in the README of the fastai repository. Once you perform the installation, navigate to the fastai directory and execute `git checkout 1.0.36.post1`. You can verify that this worked by executing the following inside jupyter notebook or a Python REPL:
11 | ```
12 | import fastai
13 | fastai.__version__
14 | ```
15 | 2. Clone this repository. cd into data. Download competition data by running `kaggle competitions download -c humpback-whale-identification`. You might need to agree to competition rules on competition website if you get a 403.
16 | 3. Create the train directory and extract files via running `mkdir train && unzip train.zip -d train`
17 | 4. Do the same for test: `mkdir test && unzip test.zip -d test`
18 | 5. Open `first_submission.ipynb` in jupyter notebook and run all cells.
19 |
20 | ## Navigating through the repository
21 |
22 | Here is the order in which I worked on the notebooks:
23 | 1. [first_submission](https://github.com/radekosmulski/whale/blob/master/first_submission.ipynb) - getting all the basics in place
24 | 2. [new_whale_detector](https://github.com/radekosmulski/whale/blob/master/new_whale_detector.ipynb) - binary classifer known_whale / new_whale
25 | 3. [oversample](https://github.com/radekosmulski/whale/blob/master/oversample.ipynb) - addressing class imbalance
26 | 4. [only_known_research](https://github.com/radekosmulski/whale/blob/master/only_known_research.ipynb) - how to modify the architecture and what hyperparams to use
27 | 5. [only_known_train](https://github.com/radekosmulski/whale/blob/master/only_known_train.ipynb) - training on full dataset
28 | 6. [resize](https://github.com/radekosmulski/whale/blob/master/resize.ipynb) - resize the images before training to free up CPU
29 | 7. [siamese network](https://github.com/radekosmulski/whale/blob/master/siamese_network_prototype.ipynb) - a fully working prototype of a siamese network
30 | 8. **!!! Important !!!** - to make use of some of the new functionality available in fast.ai at this point I switch to 1.0.39.
31 | 9. [fluke detection](https://github.com/radekosmulski/whale/blob/master/fluke_detection.ipynb) - train a model to draw bounding boxes surrounding flukes
32 | 10. **!!! Important !!!** - here I switch to fastai master to incorporate a bug fix, will annotate with version once a new release comes out
33 | 11. [fluke detection redux](https://github.com/radekosmulski/whale/blob/master/fluke_detection_redux.ipynb) - better results, less code, works with current fastai master
34 | 12. [extract bboxes](https://github.com/radekosmulski/whale/blob/master/extract_bboxes.ipynb) - predicted bounding box extraction in images of specified size
35 | 13. [classification and metric learning](https://github.com/radekosmulski/whale/blob/master/classification_and_metric_learning.ipynb) - training the for predicting whale ids, places in top 7% of the competition
36 |
--------------------------------------------------------------------------------
/resize.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 1,
6 | "metadata": {},
7 | "outputs": [],
8 | "source": [
9 | "%matplotlib inline\n",
10 | "import matplotlib.pyplot as plt\n",
11 | "from fastai.vision import *\n",
12 | "from fastai.core import parallel\n",
13 | "import re\n",
14 | "from pathlib import Path\n",
15 | "\n",
16 | "from utils import *"
17 | ]
18 | },
19 | {
20 | "cell_type": "markdown",
21 | "metadata": {},
22 | "source": [
23 | "With simpler model, my CPU cannot keep up with feeding the GPU due to how long resizing takes.\n",
24 | "\n",
25 | "Let's resize the images to speed things up."
26 | ]
27 | },
28 | {
29 | "cell_type": "code",
30 | "execution_count": 2,
31 | "metadata": {},
32 | "outputs": [],
33 | "source": [
34 | "SZ = 224\n",
35 | "NUM_WORKERS = 12"
36 | ]
37 | },
38 | {
39 | "cell_type": "code",
40 | "execution_count": 3,
41 | "metadata": {},
42 | "outputs": [],
43 | "source": [
44 | "PATH = Path('data')"
45 | ]
46 | },
47 | {
48 | "cell_type": "code",
49 | "execution_count": 4,
50 | "metadata": {},
51 | "outputs": [],
52 | "source": [
53 | "(PATH/f'train-{SZ}').mkdir(exist_ok=True)\n",
54 | "(PATH/f'test-{SZ}').mkdir(exist_ok=True)"
55 | ]
56 | },
57 | {
58 | "cell_type": "code",
59 | "execution_count": 5,
60 | "metadata": {},
61 | "outputs": [],
62 | "source": [
63 | "def resize_img(path, _):\n",
64 | " PIL.Image.open(path).resize((SZ,SZ), resample=PIL.Image.BICUBIC).save((PATH/f'{path.parent.name}-{SZ}'/path.name))"
65 | ]
66 | },
67 | {
68 | "cell_type": "code",
69 | "execution_count": 6,
70 | "metadata": {},
71 | "outputs": [
72 | {
73 | "data": {
74 | "text/html": [
75 | "\n",
76 | "
\n",
77 | " \n",
89 | "
\n",
90 | " 100.00% [33321/33321 00:26<00:00]\n",
91 | "
\n",
92 | " "
93 | ],
94 | "text/plain": [
95 | ""
96 | ]
97 | },
98 | "metadata": {},
99 | "output_type": "display_data"
100 | },
101 | {
102 | "name": "stdout",
103 | "output_type": "stream",
104 | "text": [
105 | "CPU times: user 10.3 s, sys: 1.61 s, total: 11.9 s\n",
106 | "Wall time: 28 s\n"
107 | ]
108 | }
109 | ],
110 | "source": [
111 | "%%time\n",
112 | "\n",
113 | "files = list((PATH/'train').iterdir()) + list((PATH/'test').iterdir())\n",
114 | "parallel(resize_img, files, NUM_WORKERS)"
115 | ]
116 | },
117 | {
118 | "cell_type": "code",
119 | "execution_count": 7,
120 | "metadata": {},
121 | "outputs": [
122 | {
123 | "data": {
124 | "text/plain": [
125 | "(25361, 7960)"
126 | ]
127 | },
128 | "execution_count": 7,
129 | "metadata": {},
130 | "output_type": "execute_result"
131 | }
132 | ],
133 | "source": [
134 | "len(list((PATH/f'train-{SZ}').iterdir())), len(list((PATH/f'test-{SZ}').iterdir()))"
135 | ]
136 | },
137 | {
138 | "cell_type": "code",
139 | "execution_count": 8,
140 | "metadata": {},
141 | "outputs": [
142 | {
143 | "data": {
144 | "text/plain": [
145 | "(25361, 7960)"
146 | ]
147 | },
148 | "execution_count": 8,
149 | "metadata": {},
150 | "output_type": "execute_result"
151 | }
152 | ],
153 | "source": [
154 | "len(list((PATH/f'train').iterdir())), len(list((PATH/f'test').iterdir()))"
155 | ]
156 | }
157 | ],
158 | "metadata": {
159 | "kernelspec": {
160 | "display_name": "Python 3",
161 | "language": "python",
162 | "name": "python3"
163 | },
164 | "language_info": {
165 | "codemirror_mode": {
166 | "name": "ipython",
167 | "version": 3
168 | },
169 | "file_extension": ".py",
170 | "mimetype": "text/x-python",
171 | "name": "python",
172 | "nbconvert_exporter": "python",
173 | "pygments_lexer": "ipython3",
174 | "version": "3.7.0"
175 | }
176 | },
177 | "nbformat": 4,
178 | "nbformat_minor": 2
179 | }
180 |
--------------------------------------------------------------------------------
/oversample.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 1,
6 | "metadata": {},
7 | "outputs": [],
8 | "source": [
9 | "%matplotlib inline\n",
10 | "import matplotlib.pyplot as plt\n",
11 | "from fastai.vision import *\n",
12 | "from fastai.metrics import accuracy\n",
13 | "from fastai.basic_data import *\n",
14 | "from skimage.util import montage\n",
15 | "import pandas as pd\n",
16 | "from torch import optim\n",
17 | "import re\n",
18 | "\n",
19 | "from utils import *"
20 | ]
21 | },
22 | {
23 | "cell_type": "markdown",
24 | "metadata": {},
25 | "source": [
26 | "## Prepare data"
27 | ]
28 | },
29 | {
30 | "cell_type": "code",
31 | "execution_count": 2,
32 | "metadata": {},
33 | "outputs": [
34 | {
35 | "data": {
36 | "text/html": [
37 | "\n",
38 | "\n",
51 | "
\n",
52 | " \n",
53 | " \n",
54 | " | \n",
55 | " Image | \n",
56 | " Id | \n",
57 | "
\n",
58 | " \n",
59 | " \n",
60 | " \n",
61 | " | 0 | \n",
62 | " 0000e88ab.jpg | \n",
63 | " w_f48451c | \n",
64 | "
\n",
65 | " \n",
66 | " | 1 | \n",
67 | " 0001f9222.jpg | \n",
68 | " w_c3d896a | \n",
69 | "
\n",
70 | " \n",
71 | " | 2 | \n",
72 | " 00029d126.jpg | \n",
73 | " w_20df2c5 | \n",
74 | "
\n",
75 | " \n",
76 | " | 3 | \n",
77 | " 00050a15a.jpg | \n",
78 | " new_whale | \n",
79 | "
\n",
80 | " \n",
81 | " | 4 | \n",
82 | " 0005c1ef8.jpg | \n",
83 | " new_whale | \n",
84 | "
\n",
85 | " \n",
86 | "
\n",
87 | "
"
88 | ],
89 | "text/plain": [
90 | " Image Id\n",
91 | "0 0000e88ab.jpg w_f48451c\n",
92 | "1 0001f9222.jpg w_c3d896a\n",
93 | "2 00029d126.jpg w_20df2c5\n",
94 | "3 00050a15a.jpg new_whale\n",
95 | "4 0005c1ef8.jpg new_whale"
96 | ]
97 | },
98 | "execution_count": 2,
99 | "metadata": {},
100 | "output_type": "execute_result"
101 | }
102 | ],
103 | "source": [
104 | "df = pd.read_csv('data/train.csv')\n",
105 | "df.head()"
106 | ]
107 | },
108 | {
109 | "cell_type": "code",
110 | "execution_count": 3,
111 | "metadata": {},
112 | "outputs": [
113 | {
114 | "name": "stderr",
115 | "output_type": "stream",
116 | "text": [
117 | "/home/radek/anaconda3/envs/fastai_n/lib/python3.7/site-packages/ipykernel_launcher.py:4: UserWarning: Boolean Series key will be reindexed to match DataFrame index.\n",
118 | " after removing the cwd from sys.path.\n"
119 | ]
120 | }
121 | ],
122 | "source": [
123 | "im_count = df[df.Id != 'new_whale'].Id.value_counts()\n",
124 | "im_count.name = 'sighting_count'\n",
125 | "df = df.join(im_count, on='Id')\n",
126 | "val_fns = set(df.sample(frac=1)[(df.Id != 'new_whale') & (df.sighting_count > 1)].groupby('Id').first().Image)"
127 | ]
128 | },
129 | {
130 | "cell_type": "code",
131 | "execution_count": 4,
132 | "metadata": {},
133 | "outputs": [],
134 | "source": [
135 | "# pd.to_pickle(val_fns, 'data/val_fns')\n",
136 | "val_fns = pd.read_pickle('data/val_fns')"
137 | ]
138 | },
139 | {
140 | "cell_type": "code",
141 | "execution_count": 5,
142 | "metadata": {},
143 | "outputs": [],
144 | "source": [
145 | "fn2label = {row[1].Image: row[1].Id for row in df.iterrows()}"
146 | ]
147 | },
148 | {
149 | "cell_type": "code",
150 | "execution_count": 6,
151 | "metadata": {},
152 | "outputs": [],
153 | "source": [
154 | "SZ = 224\n",
155 | "BS = 64\n",
156 | "NUM_WORKERS = 12\n",
157 | "SEED=0"
158 | ]
159 | },
160 | {
161 | "cell_type": "code",
162 | "execution_count": 7,
163 | "metadata": {},
164 | "outputs": [],
165 | "source": [
166 | "path2fn = lambda path: re.search('\\w*\\.jpg$', path).group(0)"
167 | ]
168 | },
169 | {
170 | "cell_type": "code",
171 | "execution_count": 8,
172 | "metadata": {},
173 | "outputs": [],
174 | "source": [
175 | "df = df[df.Id != 'new_whale']"
176 | ]
177 | },
178 | {
179 | "cell_type": "code",
180 | "execution_count": 9,
181 | "metadata": {},
182 | "outputs": [
183 | {
184 | "data": {
185 | "text/plain": [
186 | "(15697, 3)"
187 | ]
188 | },
189 | "execution_count": 9,
190 | "metadata": {},
191 | "output_type": "execute_result"
192 | }
193 | ],
194 | "source": [
195 | "df.shape"
196 | ]
197 | },
198 | {
199 | "cell_type": "code",
200 | "execution_count": 10,
201 | "metadata": {},
202 | "outputs": [
203 | {
204 | "data": {
205 | "text/plain": [
206 | "73.0"
207 | ]
208 | },
209 | "execution_count": 10,
210 | "metadata": {},
211 | "output_type": "execute_result"
212 | }
213 | ],
214 | "source": [
215 | "df.sighting_count.max()"
216 | ]
217 | },
218 | {
219 | "cell_type": "code",
220 | "execution_count": 11,
221 | "metadata": {},
222 | "outputs": [],
223 | "source": [
224 | "df_val = df[df.Image.isin(val_fns)]\n",
225 | "df_train = df[~df.Image.isin(val_fns)]\n",
226 | "df_train_with_val = df"
227 | ]
228 | },
229 | {
230 | "cell_type": "code",
231 | "execution_count": 12,
232 | "metadata": {},
233 | "outputs": [
234 | {
235 | "data": {
236 | "text/plain": [
237 | "((2931, 3), (12766, 3), (15697, 3))"
238 | ]
239 | },
240 | "execution_count": 12,
241 | "metadata": {},
242 | "output_type": "execute_result"
243 | }
244 | ],
245 | "source": [
246 | "df_val.shape, df_train.shape, df_train_with_val.shape"
247 | ]
248 | },
249 | {
250 | "cell_type": "code",
251 | "execution_count": 13,
252 | "metadata": {},
253 | "outputs": [
254 | {
255 | "name": "stdout",
256 | "output_type": "stream",
257 | "text": [
258 | "CPU times: user 10.5 s, sys: 0 ns, total: 10.5 s\n",
259 | "Wall time: 10.5 s\n"
260 | ]
261 | }
262 | ],
263 | "source": [
264 | "%%time\n",
265 | "\n",
266 | "res = None\n",
267 | "sample_to = 15\n",
268 | "\n",
269 | "for grp in df_train.groupby('Id'):\n",
270 | " n = grp[1].shape[0]\n",
271 | " additional_rows = grp[1].sample(0 if sample_to < n else sample_to - n, replace=True)\n",
272 | " rows = pd.concat((grp[1], additional_rows))\n",
273 | " \n",
274 | " if res is None: res = rows\n",
275 | " else: res = pd.concat((res, rows))"
276 | ]
277 | },
278 | {
279 | "cell_type": "code",
280 | "execution_count": 14,
281 | "metadata": {},
282 | "outputs": [
283 | {
284 | "name": "stdout",
285 | "output_type": "stream",
286 | "text": [
287 | "CPU times: user 10.6 s, sys: 24 ms, total: 10.7 s\n",
288 | "Wall time: 10.7 s\n"
289 | ]
290 | }
291 | ],
292 | "source": [
293 | "%%time\n",
294 | "\n",
295 | "res_with_val = None\n",
296 | "sample_to = 15\n",
297 | "\n",
298 | "for grp in df_train_with_val.groupby('Id'):\n",
299 | " n = grp[1].shape[0]\n",
300 | " additional_rows = grp[1].sample(0 if sample_to < n else sample_to - n, replace=True)\n",
301 | " rows = pd.concat((grp[1], additional_rows))\n",
302 | " \n",
303 | " if res_with_val is None: res_with_val = rows\n",
304 | " else: res_with_val = pd.concat((res_with_val, rows))"
305 | ]
306 | },
307 | {
308 | "cell_type": "code",
309 | "execution_count": 15,
310 | "metadata": {},
311 | "outputs": [
312 | {
313 | "data": {
314 | "text/plain": [
315 | "((76174, 3), (76287, 3))"
316 | ]
317 | },
318 | "execution_count": 15,
319 | "metadata": {},
320 | "output_type": "execute_result"
321 | }
322 | ],
323 | "source": [
324 | "res.shape, res_with_val.shape"
325 | ]
326 | },
327 | {
328 | "cell_type": "markdown",
329 | "metadata": {},
330 | "source": [
331 | "Our training set increased 6-fold, but that is still an amount of data that is okay. I don't think it makes sense to worry about breaking up the data into smaller epochs."
332 | ]
333 | },
334 | {
335 | "cell_type": "code",
336 | "execution_count": 16,
337 | "metadata": {},
338 | "outputs": [],
339 | "source": [
340 | "pd.concat((res, df_val))[['Image', 'Id']].to_csv('data/oversampled_train.csv', index=False)\n",
341 | "res_with_val[['Image', 'Id']].to_csv('data/oversampled_train_and_val.csv', index=False)"
342 | ]
343 | },
344 | {
345 | "cell_type": "markdown",
346 | "metadata": {},
347 | "source": [
348 | "The naming here is not very fortunate, but the idea is that `oversampled_train` has single entries for images in `val_fns` and `oversampled_train_and_val` is both `val` and `train` combined. Meaning, `oversampled_train_and_val` is one we might want to use when retraining on the entire train set."
349 | ]
350 | },
351 | {
352 | "cell_type": "code",
353 | "execution_count": 17,
354 | "metadata": {},
355 | "outputs": [],
356 | "source": [
357 | "df = pd.read_csv('data/oversampled_train.csv')"
358 | ]
359 | },
360 | {
361 | "cell_type": "code",
362 | "execution_count": 18,
363 | "metadata": {},
364 | "outputs": [],
365 | "source": [
366 | "data = (\n",
367 | " ImageItemList\n",
368 | " .from_df(df[df.Id != 'new_whale'], 'data/train', cols=['Image'])\n",
369 | " .split_by_valid_func(lambda path: path2fn(path) in val_fns)\n",
370 | " .label_from_func(lambda path: fn2label[path2fn(path)])\n",
371 | " .add_test(ImageItemList.from_folder('data/test'))\n",
372 | " .transform(get_transforms(do_flip=False, max_zoom=1, max_warp=0, max_rotate=2), size=SZ, resize_method=ResizeMethod.SQUISH)\n",
373 | " .databunch(bs=BS, num_workers=NUM_WORKERS, path='data')\n",
374 | " .normalize(imagenet_stats)\n",
375 | ")"
376 | ]
377 | },
378 | {
379 | "cell_type": "code",
380 | "execution_count": 19,
381 | "metadata": {},
382 | "outputs": [
383 | {
384 | "data": {
385 | "text/plain": [
386 | "ImageDataBunch;\n",
387 | "\n",
388 | "Train: LabelList\n",
389 | "y: CategoryList (76174 items)\n",
390 | "[Category w_0003639, Category w_0003639, Category w_0003639, Category w_0003639, Category w_0003639]...\n",
391 | "Path: data/train\n",
392 | "x: ImageItemList (76174 items)\n",
393 | "[Image (3, 700, 1050), Image (3, 700, 1050), Image (3, 700, 1050), Image (3, 700, 1050), Image (3, 700, 1050)]...\n",
394 | "Path: data/train;\n",
395 | "\n",
396 | "Valid: LabelList\n",
397 | "y: CategoryList (2931 items)\n",
398 | "[Category w_f48451c, Category w_a6f9d33, Category w_d3b46e7, Category w_8dddbee, Category w_3881f28]...\n",
399 | "Path: data/train\n",
400 | "x: ImageItemList (2931 items)\n",
401 | "[Image (3, 700, 1050), Image (3, 667, 1000), Image (3, 347, 1050), Image (3, 450, 900), Image (3, 409, 1050)]...\n",
402 | "Path: data/train;\n",
403 | "\n",
404 | "Test: LabelList\n",
405 | "y: EmptyLabelList (7960 items)\n",
406 | "[EmptyLabel , EmptyLabel , EmptyLabel , EmptyLabel , EmptyLabel ]...\n",
407 | "Path: .\n",
408 | "x: ImageItemList (7960 items)\n",
409 | "[Image (3, 525, 1050), Image (3, 695, 2919), Image (3, 450, 1050), Image (3, 450, 1050), Image (3, 360, 1008)]...\n",
410 | "Path: data/train"
411 | ]
412 | },
413 | "execution_count": 19,
414 | "metadata": {},
415 | "output_type": "execute_result"
416 | }
417 | ],
418 | "source": [
419 | "data"
420 | ]
421 | }
422 | ],
423 | "metadata": {
424 | "kernelspec": {
425 | "display_name": "Python 3",
426 | "language": "python",
427 | "name": "python3"
428 | },
429 | "language_info": {
430 | "codemirror_mode": {
431 | "name": "ipython",
432 | "version": 3
433 | },
434 | "file_extension": ".py",
435 | "mimetype": "text/x-python",
436 | "name": "python",
437 | "nbconvert_exporter": "python",
438 | "pygments_lexer": "ipython3",
439 | "version": "3.7.0"
440 | }
441 | },
442 | "nbformat": 4,
443 | "nbformat_minor": 2
444 | }
445 |
--------------------------------------------------------------------------------
/only_known_train.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 1,
6 | "metadata": {},
7 | "outputs": [],
8 | "source": [
9 | "%matplotlib inline\n",
10 | "import matplotlib.pyplot as plt\n",
11 | "from fastai.vision import *\n",
12 | "from fastai.metrics import accuracy\n",
13 | "from fastai.basic_data import *\n",
14 | "from skimage.util import montage\n",
15 | "import pandas as pd\n",
16 | "from torch import optim\n",
17 | "import re\n",
18 | "\n",
19 | "from utils import *"
20 | ]
21 | },
22 | {
23 | "cell_type": "markdown",
24 | "metadata": {},
25 | "source": [
26 | "I take a curriculum approach to training here. I first expose the model to as many different images of whales as quickly as possible (no oversampling) and train on images resized to 224x224.\n",
27 | "\n",
28 | "I would like the conv layers to start picking up on features useful for identifying whales. For that, I want to show the model as rich of a dataset as possible.\n",
29 | "\n",
30 | "I then train on images resized to 448x448.\n",
31 | "\n",
32 | "Finally, I train on oversampled data. Here, the model will see some images more often than others but I am hoping that this will help alleviate the class imbalance in the training data."
33 | ]
34 | },
35 | {
36 | "cell_type": "code",
37 | "execution_count": 2,
38 | "metadata": {},
39 | "outputs": [],
40 | "source": [
41 | "import fastai\n",
42 | "from fastprogress import force_console_behavior\n",
43 | "import fastprogress\n",
44 | "fastprogress.fastprogress.NO_BAR = True\n",
45 | "master_bar, progress_bar = force_console_behavior()\n",
46 | "fastai.basic_train.master_bar, fastai.basic_train.progress_bar = master_bar, progress_bar"
47 | ]
48 | },
49 | {
50 | "cell_type": "code",
51 | "execution_count": 3,
52 | "metadata": {},
53 | "outputs": [],
54 | "source": [
55 | "df = pd.read_csv('data/train.csv')\n",
56 | "val_fns = {'69823499d.jpg'}"
57 | ]
58 | },
59 | {
60 | "cell_type": "code",
61 | "execution_count": 4,
62 | "metadata": {},
63 | "outputs": [],
64 | "source": [
65 | "fn2label = {row[1].Image: row[1].Id for row in df.iterrows()}\n",
66 | "path2fn = lambda path: re.search('\\w*\\.jpg$', path).group(0)"
67 | ]
68 | },
69 | {
70 | "cell_type": "code",
71 | "execution_count": 5,
72 | "metadata": {},
73 | "outputs": [],
74 | "source": [
75 | "name = f'res50-full-train'"
76 | ]
77 | },
78 | {
79 | "cell_type": "code",
80 | "execution_count": 6,
81 | "metadata": {},
82 | "outputs": [],
83 | "source": [
84 | "SZ = 224\n",
85 | "BS = 64\n",
86 | "NUM_WORKERS = 12\n",
87 | "SEED=0"
88 | ]
89 | },
90 | {
91 | "cell_type": "code",
92 | "execution_count": 7,
93 | "metadata": {},
94 | "outputs": [],
95 | "source": [
96 | "data = (\n",
97 | " ImageItemList\n",
98 | " .from_df(df[df.Id != 'new_whale'], 'data/train', cols=['Image'])\n",
99 | " .split_by_valid_func(lambda path: path2fn(path) in val_fns)\n",
100 | " .label_from_func(lambda path: fn2label[path2fn(path)])\n",
101 | " .add_test(ImageItemList.from_folder('data/test'))\n",
102 | " .transform(get_transforms(do_flip=False), size=SZ, resize_method=ResizeMethod.SQUISH)\n",
103 | " .databunch(bs=BS, num_workers=NUM_WORKERS, path='data')\n",
104 | " .normalize(imagenet_stats)\n",
105 | ")"
106 | ]
107 | },
108 | {
109 | "cell_type": "code",
110 | "execution_count": 8,
111 | "metadata": {},
112 | "outputs": [
113 | {
114 | "name": "stdout",
115 | "output_type": "stream",
116 | "text": [
117 | "epoch train_loss valid_loss\n",
118 | "1 7.457255 0.708407 \n",
119 | "2 6.666923 0.078581 \n",
120 | "3 6.077693 0.093856 \n",
121 | "4 5.138601 0.014386 \n",
122 | "5 4.253701 2.648608 \n",
123 | "6 3.402944 1.789264 \n",
124 | "7 2.651533 0.005592 \n",
125 | "8 1.863626 0.000208 \n",
126 | "9 1.186273 0.012435 \n",
127 | "10 0.684969 0.000092 \n",
128 | "11 0.384233 0.000017 \n",
129 | "12 0.225224 0.000001 \n",
130 | "13 0.141565 0.000004 \n",
131 | "14 0.111797 0.000007 \n",
132 | "epoch train_loss valid_loss\n",
133 | "1 0.096187 0.000008 \n",
134 | "2 0.121451 0.000062 \n",
135 | "3 0.160986 0.000025 \n",
136 | "4 0.242702 0.000004 \n",
137 | "5 0.267682 0.000053 \n",
138 | "6 0.314039 0.000002 \n",
139 | "7 0.327478 0.000000 \n",
140 | "8 0.302776 0.001108 \n",
141 | "9 0.269261 0.000341 \n",
142 | "10 0.239256 0.000433 \n",
143 | "11 0.201479 0.002558 \n",
144 | "12 0.171274 0.000002 \n",
145 | "13 0.149561 0.000000 \n",
146 | "14 0.135921 0.000027 \n",
147 | "15 0.113643 0.000001 \n",
148 | "16 0.094750 0.000456 \n",
149 | "17 0.074803 0.000000 \n",
150 | "18 0.055229 0.000000 \n",
151 | "19 0.045535 0.000002 \n",
152 | "20 0.037293 0.000000 \n",
153 | "21 0.037641 0.000000 \n",
154 | "22 0.031186 0.000000 \n",
155 | "23 0.031796 0.000000 \n",
156 | "24 0.032020 0.000000 \n",
157 | "CPU times: user 37min 36s, sys: 14min 15s, total: 51min 51s\n",
158 | "Wall time: 55min 21s\n"
159 | ]
160 | }
161 | ],
162 | "source": [
163 | "%%time\n",
164 | "\n",
165 | "learn = create_cnn(data, models.resnet50, lin_ftrs=[2048])\n",
166 | "learn.clip_grad();\n",
167 | "\n",
168 | "learn.fit_one_cycle(14, 1e-2)\n",
169 | "learn.save(f'{name}-stage-1')\n",
170 | "\n",
171 | "learn.unfreeze()\n",
172 | "\n",
173 | "max_lr = 1e-3\n",
174 | "lrs = [max_lr/100, max_lr/10, max_lr]\n",
175 | "\n",
176 | "learn.fit_one_cycle(24, lrs)\n",
177 | "learn.save(f'{name}-stage-2')"
178 | ]
179 | },
180 | {
181 | "cell_type": "code",
182 | "execution_count": 6,
183 | "metadata": {},
184 | "outputs": [],
185 | "source": [
186 | "SZ = 224 * 2\n",
187 | "BS = 64 // 4\n",
188 | "NUM_WORKERS = 12\n",
189 | "SEED=0"
190 | ]
191 | },
192 | {
193 | "cell_type": "code",
194 | "execution_count": 7,
195 | "metadata": {},
196 | "outputs": [],
197 | "source": [
198 | "data = (\n",
199 | " ImageItemList\n",
200 | " .from_df(df[df.Id != 'new_whale'], 'data/train', cols=['Image'])\n",
201 | " .split_by_valid_func(lambda path: path2fn(path) in val_fns)\n",
202 | " .label_from_func(lambda path: fn2label[path2fn(path)])\n",
203 | " .add_test(ImageItemList.from_folder('data/test'))\n",
204 | " .transform(get_transforms(do_flip=False), size=SZ, resize_method=ResizeMethod.SQUISH)\n",
205 | " .databunch(bs=BS, num_workers=NUM_WORKERS, path='data')\n",
206 | " .normalize(imagenet_stats)\n",
207 | ")"
208 | ]
209 | },
210 | {
211 | "cell_type": "code",
212 | "execution_count": 8,
213 | "metadata": {},
214 | "outputs": [
215 | {
216 | "name": "stdout",
217 | "output_type": "stream",
218 | "text": [
219 | "epoch train_loss valid_loss\n",
220 | "1 1.100031 0.000000 \n",
221 | "3 1.335055 0.000000 \n",
222 | "4 1.674122 0.000000 \n",
223 | "5 1.785136 0.000000 \n",
224 | "6 1.717228 0.000000 \n",
225 | "7 1.412960 0.000000 \n",
226 | "8 1.303269 0.000000 \n",
227 | "9 1.008257 0.000000 \n",
228 | "10 0.796222 0.000000 \n",
229 | "11 0.634087 0.000000 \n",
230 | "12 0.487326 0.000000 \n",
231 | "epoch train_loss valid_loss\n",
232 | "1 0.482283 0.000000 \n",
233 | "2 0.492100 0.000000 \n",
234 | "3 0.563699 0.000000 \n",
235 | "4 0.571843 0.000000 \n",
236 | "5 0.650438 0.000000 \n",
237 | "6 0.695321 0.000000 \n",
238 | "7 0.700596 0.000000 \n",
239 | "8 0.615317 0.000000 \n",
240 | "9 0.678798 0.000000 \n",
241 | "10 0.616675 0.000000 \n",
242 | "11 0.715437 0.000000 \n",
243 | "12 0.628833 0.000000 \n",
244 | "13 0.616170 0.000000 \n",
245 | "14 0.530670 0.000000 \n",
246 | "15 0.458034 0.000000 \n",
247 | "16 0.467264 0.000000 \n",
248 | "17 0.390240 0.000000 \n",
249 | "18 0.413110 0.000000 \n",
250 | "19 0.381089 0.000000 \n",
251 | "20 0.356445 0.000000 \n",
252 | "21 0.345979 0.000000 \n",
253 | "22 0.378644 0.000000 \n",
254 | "CPU times: user 2h 2min 35s, sys: 53min 23s, total: 2h 55min 59s\n",
255 | "Wall time: 2h 57min 8s\n"
256 | ]
257 | }
258 | ],
259 | "source": [
260 | "%%time\n",
261 | "learn = create_cnn(data, models.resnet50, lin_ftrs=[2048])\n",
262 | "learn.clip_grad();\n",
263 | "learn.load(f'{name}-stage-2')\n",
264 | "learn.freeze_to(-1)\n",
265 | "\n",
266 | "learn.fit_one_cycle(12, 1e-2 / 4)\n",
267 | "learn.save(f'{name}-stage-3')\n",
268 | "\n",
269 | "learn.unfreeze()\n",
270 | "\n",
271 | "max_lr = 1e-3 / 4\n",
272 | "lrs = [max_lr/100, max_lr/10, max_lr]\n",
273 | "\n",
274 | "learn.fit_one_cycle(22, lrs)\n",
275 | "learn.save(f'{name}-stage-4')"
276 | ]
277 | },
278 | {
279 | "cell_type": "code",
280 | "execution_count": 9,
281 | "metadata": {},
282 | "outputs": [],
283 | "source": [
284 | "# with oversampling\n",
285 | "df = pd.read_csv('data/oversampled_train_and_val.csv')"
286 | ]
287 | },
288 | {
289 | "cell_type": "code",
290 | "execution_count": 10,
291 | "metadata": {},
292 | "outputs": [],
293 | "source": [
294 | "data = (\n",
295 | " ImageItemList\n",
296 | " .from_df(df, 'data/train', cols=['Image'])\n",
297 | " .split_by_valid_func(lambda path: path2fn(path) in val_fns)\n",
298 | " .label_from_func(lambda path: fn2label[path2fn(path)])\n",
299 | " .add_test(ImageItemList.from_folder('data/test'))\n",
300 | " .transform(get_transforms(do_flip=False), size=SZ, resize_method=ResizeMethod.SQUISH)\n",
301 | " .databunch(bs=BS, num_workers=NUM_WORKERS, path='data')\n",
302 | " .normalize(imagenet_stats)\n",
303 | ")"
304 | ]
305 | },
306 | {
307 | "cell_type": "code",
308 | "execution_count": 11,
309 | "metadata": {},
310 | "outputs": [
311 | {
312 | "name": "stdout",
313 | "output_type": "stream",
314 | "text": [
315 | "epoch train_loss valid_loss\n",
316 | "1 1.626801 0.000010 \n",
317 | "2 0.566748 0.000010 \n",
318 | "epoch train_loss valid_loss\n",
319 | "1 0.604931 0.000121 \n",
320 | "2 0.531284 0.000026 \n",
321 | "3 0.442735 0.000039 \n",
322 | "CPU times: user 1h 25min 46s, sys: 38min 1s, total: 2h 3min 48s\n",
323 | "Wall time: 2h 3min 59s\n"
324 | ]
325 | }
326 | ],
327 | "source": [
328 | "%%time\n",
329 | "learn = create_cnn(data, models.resnet50, lin_ftrs=[2048])\n",
330 | "learn.clip_grad();\n",
331 | "learn.load(f'{name}-stage-4')\n",
332 | "learn.freeze_to(-1)\n",
333 | "\n",
334 | "learn.fit_one_cycle(2, 1e-2 / 4)\n",
335 | "learn.save(f'{name}-stage-5')\n",
336 | "\n",
337 | "learn.unfreeze()\n",
338 | "\n",
339 | "max_lr = 1e-3 / 4\n",
340 | "lrs = [max_lr/100, max_lr/10, max_lr]\n",
341 | "\n",
342 | "learn.fit_one_cycle(3, lrs)\n",
343 | "learn.save(f'{name}-stage-6')"
344 | ]
345 | },
346 | {
347 | "cell_type": "markdown",
348 | "metadata": {},
349 | "source": [
350 | "## Predict"
351 | ]
352 | },
353 | {
354 | "cell_type": "code",
355 | "execution_count": 12,
356 | "metadata": {},
357 | "outputs": [],
358 | "source": [
359 | "preds, _ = learn.get_preds(DatasetType.Test)"
360 | ]
361 | },
362 | {
363 | "cell_type": "code",
364 | "execution_count": 13,
365 | "metadata": {},
366 | "outputs": [],
367 | "source": [
368 | "preds = torch.cat((preds, torch.ones_like(preds[:, :1])), 1)"
369 | ]
370 | },
371 | {
372 | "cell_type": "code",
373 | "execution_count": 14,
374 | "metadata": {},
375 | "outputs": [],
376 | "source": [
377 | "preds[:, 5004] = 0.06"
378 | ]
379 | },
380 | {
381 | "cell_type": "code",
382 | "execution_count": 15,
383 | "metadata": {},
384 | "outputs": [],
385 | "source": [
386 | "classes = learn.data.classes + ['new_whale']"
387 | ]
388 | },
389 | {
390 | "cell_type": "code",
391 | "execution_count": 16,
392 | "metadata": {},
393 | "outputs": [],
394 | "source": [
395 | "create_submission(preds, learn.data, name, classes)"
396 | ]
397 | },
398 | {
399 | "cell_type": "code",
400 | "execution_count": 17,
401 | "metadata": {},
402 | "outputs": [
403 | {
404 | "data": {
405 | "text/html": [
406 | "\n",
407 | "\n",
420 | "
\n",
421 | " \n",
422 | " \n",
423 | " | \n",
424 | " Image | \n",
425 | " Id | \n",
426 | "
\n",
427 | " \n",
428 | " \n",
429 | " \n",
430 | " | 0 | \n",
431 | " 47380533f.jpg | \n",
432 | " w_6c995fd new_whale w_7206ab2 w_54ea24d w_620dffe | \n",
433 | "
\n",
434 | " \n",
435 | " | 1 | \n",
436 | " 1d9de38ba.jpg | \n",
437 | " w_641df87 new_whale w_e99ed06 w_3e6cee1 w_0b7ce1e | \n",
438 | "
\n",
439 | " \n",
440 | " | 2 | \n",
441 | " b3d4ee916.jpg | \n",
442 | " new_whale w_23ce00e w_bc7de9f w_71a1a08 w_708c3d2 | \n",
443 | "
\n",
444 | " \n",
445 | " | 3 | \n",
446 | " 460fd63ae.jpg | \n",
447 | " new_whale w_0bb71d3 w_9eab46a w_60cf87c w_42388df | \n",
448 | "
\n",
449 | " \n",
450 | " | 4 | \n",
451 | " 79738ffc1.jpg | \n",
452 | " new_whale w_1419d90 w_01976db w_dbf651b w_415dea0 | \n",
453 | "
\n",
454 | " \n",
455 | "
\n",
456 | "
"
457 | ],
458 | "text/plain": [
459 | " Image Id\n",
460 | "0 47380533f.jpg w_6c995fd new_whale w_7206ab2 w_54ea24d w_620dffe\n",
461 | "1 1d9de38ba.jpg w_641df87 new_whale w_e99ed06 w_3e6cee1 w_0b7ce1e\n",
462 | "2 b3d4ee916.jpg new_whale w_23ce00e w_bc7de9f w_71a1a08 w_708c3d2\n",
463 | "3 460fd63ae.jpg new_whale w_0bb71d3 w_9eab46a w_60cf87c w_42388df\n",
464 | "4 79738ffc1.jpg new_whale w_1419d90 w_01976db w_dbf651b w_415dea0"
465 | ]
466 | },
467 | "execution_count": 17,
468 | "metadata": {},
469 | "output_type": "execute_result"
470 | }
471 | ],
472 | "source": [
473 | "pd.read_csv(f'subs/{name}.csv.gz').head()"
474 | ]
475 | },
476 | {
477 | "cell_type": "code",
478 | "execution_count": 18,
479 | "metadata": {},
480 | "outputs": [
481 | {
482 | "data": {
483 | "text/plain": [
484 | "0.48693467336683416"
485 | ]
486 | },
487 | "execution_count": 18,
488 | "metadata": {},
489 | "output_type": "execute_result"
490 | }
491 | ],
492 | "source": [
493 | "pd.read_csv(f'subs/{name}.csv.gz').Id.str.split().apply(lambda x: x[0] == 'new_whale').mean()"
494 | ]
495 | },
496 | {
497 | "cell_type": "code",
498 | "execution_count": 19,
499 | "metadata": {},
500 | "outputs": [
501 | {
502 | "name": "stdout",
503 | "output_type": "stream",
504 | "text": [
505 | "100%|████████████████████████████████████████| 183k/183k [00:04<00:00, 37.6kB/s]\n",
506 | "Successfully submitted to Humpback Whale Identification"
507 | ]
508 | }
509 | ],
510 | "source": [
511 | "!kaggle competitions submit -c humpback-whale-identification -f subs/{name}.csv.gz -m \"{name}\""
512 | ]
513 | }
514 | ],
515 | "metadata": {
516 | "kernelspec": {
517 | "display_name": "Python 3",
518 | "language": "python",
519 | "name": "python3"
520 | },
521 | "language_info": {
522 | "codemirror_mode": {
523 | "name": "ipython",
524 | "version": 3
525 | },
526 | "file_extension": ".py",
527 | "mimetype": "text/x-python",
528 | "name": "python",
529 | "nbconvert_exporter": "python",
530 | "pygments_lexer": "ipython3",
531 | "version": "3.7.0"
532 | }
533 | },
534 | "nbformat": 4,
535 | "nbformat_minor": 2
536 | }
537 |
--------------------------------------------------------------------------------
/classification_and_metric_learning.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 1,
6 | "metadata": {},
7 | "outputs": [],
8 | "source": [
9 | "%matplotlib inline\n",
10 | "import matplotlib.pyplot as plt\n",
11 | "from fastai.vision import *\n",
12 | "from fastai.metrics import accuracy\n",
13 | "from fastai.basic_data import *\n",
14 | "from skimage.util import montage\n",
15 | "import pandas as pd\n",
16 | "from torch import optim\n",
17 | "import re\n",
18 | "import pretrainedmodels\n",
19 | "from torch.nn.functional import cross_entropy\n",
20 | "\n",
21 | "import albumentations\n",
22 | "from utils import *"
23 | ]
24 | },
25 | {
26 | "cell_type": "code",
27 | "execution_count": 2,
28 | "metadata": {},
29 | "outputs": [],
30 | "source": [
31 | "# fixes ulimit issue: https://github.com/pytorch/pytorch/issues/973\n",
32 | "\n",
33 | "import resource\n",
34 | "rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)\n",
35 | "resource.setrlimit(resource.RLIMIT_NOFILE, (2048, rlimit[1]))"
36 | ]
37 | },
38 | {
39 | "cell_type": "code",
40 | "execution_count": 3,
41 | "metadata": {},
42 | "outputs": [
43 | {
44 | "data": {
45 | "text/plain": [
46 | "'1.0.46.dev0'"
47 | ]
48 | },
49 | "execution_count": 3,
50 | "metadata": {},
51 | "output_type": "execute_result"
52 | }
53 | ],
54 | "source": [
55 | "import fastai\n",
56 | "fastai.__version__"
57 | ]
58 | },
59 | {
60 | "cell_type": "code",
61 | "execution_count": 4,
62 | "metadata": {},
63 | "outputs": [],
64 | "source": [
65 | "import fastai\n",
66 | "from fastprogress import force_console_behavior\n",
67 | "import fastprogress\n",
68 | "fastprogress.fastprogress.NO_BAR = True\n",
69 | "master_bar, progress_bar = force_console_behavior()\n",
70 | "fastai.basic_train.master_bar, fastai.basic_train.progress_bar = master_bar, progress_bar"
71 | ]
72 | },
73 | {
74 | "cell_type": "code",
75 | "execution_count": 5,
76 | "metadata": {},
77 | "outputs": [],
78 | "source": [
79 | "from zen_dataset import *\n",
80 | "from zen_dataset.utils import *"
81 | ]
82 | },
83 | {
84 | "cell_type": "markdown",
85 | "metadata": {},
86 | "source": [
87 | "## Setup"
88 | ]
89 | },
90 | {
91 | "cell_type": "markdown",
92 | "metadata": {},
93 | "source": [
94 | "Below follows a lot of code to set things up. I give an overview of how it all works together before I start to train the model."
95 | ]
96 | },
97 | {
98 | "cell_type": "code",
99 | "execution_count": 6,
100 | "metadata": {},
101 | "outputs": [],
102 | "source": [
103 | "aug = albumentations.Compose([\n",
104 | " albumentations.RandomBrightnessContrast(p=0.75),\n",
105 | " albumentations.ShiftScaleRotate(shift_limit=0, scale_limit=0.1, rotate_limit=10, interpolation=2, p=0.75)\n",
106 | "])"
107 | ]
108 | },
109 | {
110 | "cell_type": "code",
111 | "execution_count": 7,
112 | "metadata": {},
113 | "outputs": [],
114 | "source": [
115 | "composed_augs = albumentations.Compose([\n",
116 | " albumentations.RandomBrightnessContrast(p=0.75),\n",
117 | " albumentations.ShiftScaleRotate(shift_limit=0, scale_limit=0.1, rotate_limit=10, interpolation=2, p=0.75)\n",
118 | "])"
119 | ]
120 | },
121 | {
122 | "cell_type": "code",
123 | "execution_count": 8,
124 | "metadata": {},
125 | "outputs": [],
126 | "source": [
127 | "def augment(ary):\n",
128 | " return composed_augs(image=ary)['image']"
129 | ]
130 | },
131 | {
132 | "cell_type": "code",
133 | "execution_count": 51,
134 | "metadata": {},
135 | "outputs": [],
136 | "source": [
137 | "class Reader():\n",
138 | " def __init__(self, path, augment_fn=None):\n",
139 | " self.path = path\n",
140 | " self.augment_fn = augment_fn\n",
141 | " def __call__(self, fns):\n",
142 | " paths = [f'{self.path}/{filename}' for filename in fns]\n",
143 | " images = [open_image(image_path) for image_path in paths]\n",
144 | " tensors = [image2tensor(image, augment_fn = self.augment_fn) for image in images]\n",
145 | " return [imagenet_normalize(tensor) for tensor in tensors]"
146 | ]
147 | },
148 | {
149 | "cell_type": "code",
150 | "execution_count": 10,
151 | "metadata": {},
152 | "outputs": [],
153 | "source": [
154 | "class Labeler():\n",
155 | " def __init__(self):\n",
156 | " df = pd.read_csv('data/train.csv')\n",
157 | " self.fn2label = {}\n",
158 | " for row in df[df.Id != 'new_whale'].itertuples():\n",
159 | " self.fn2label[row.Image] = row.Id\n",
160 | " self.classes = sorted(list(set(list(self.fn2label.values()))))\n",
161 | " def __call__(self, fns):\n",
162 | " labels = [self.fn2label[fn] for fn in fns]\n",
163 | " return [self.classes.index(label) for label in labels] + [1 if labels[0] != labels[1] else 0]"
164 | ]
165 | },
166 | {
167 | "cell_type": "code",
168 | "execution_count": 11,
169 | "metadata": {},
170 | "outputs": [],
171 | "source": [
172 | "class CustomModel(nn.Module):\n",
173 | " def __init__(self):\n",
174 | " super().__init__()\n",
175 | " self.cnn = nn.Sequential(*list(models.resnet50(True).children())[:-2])\n",
176 | " self.head = create_head(4096, 5004, [2048])\n",
177 | " self.ada_concat = AdaptiveConcatPool2d(1)\n",
178 | "\n",
179 | " def forward(self, ims_a, ims_b):\n",
180 | " cnn_out_a = self.cnn(ims_a)\n",
181 | " out_a = self.head(cnn_out_a)\n",
182 | " \n",
183 | " cnn_out_b = self.cnn(ims_b)\n",
184 | " out_b = self.head(cnn_out_b)\n",
185 | "\n",
186 | " return out_a, out_b, self.ada_concat(cnn_out_a).squeeze(), self.ada_concat(cnn_out_b).squeeze()"
187 | ]
188 | },
189 | {
190 | "cell_type": "code",
191 | "execution_count": 12,
192 | "metadata": {},
193 | "outputs": [],
194 | "source": [
195 | "# https://hackernoon.com/facial-similarity-with-siamese-networks-in-pytorch-9642aa9db2f7\n",
196 | "\n",
197 | "class ContrastiveLoss(torch.nn.Module):\n",
198 | " \"\"\"\n",
199 | " Contrastive loss function.\n",
200 | " Based on: http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf\n",
201 | " \"\"\"\n",
202 | "\n",
203 | " def __init__(self, margin=2.0):\n",
204 | " super(ContrastiveLoss, self).__init__()\n",
205 | " self.margin = margin\n",
206 | "\n",
207 | " def forward(self, output1, output2, label):\n",
208 | " euclidean_distance = F.pairwise_distance(output1, output2)\n",
209 | " loss_contrastive = torch.mean((1-label) * torch.pow(euclidean_distance, 2) +\n",
210 | " (label) * torch.pow(torch.clamp(self.margin - euclidean_distance, min=0.0), 2))\n",
211 | " return loss_contrastive"
212 | ]
213 | },
214 | {
215 | "cell_type": "code",
216 | "execution_count": 13,
217 | "metadata": {},
218 | "outputs": [],
219 | "source": [
220 | "MARGIN = 60\n",
221 | "\n",
222 | "def cross_entropy_loss(preds, labels_a, labels_b, diff_class_ind):\n",
223 | " return F.cross_entropy(preds[0], labels_a) + F.cross_entropy(preds[1], labels_b)\n",
224 | "\n",
225 | "def contr_loss(preds, labels_a, labels_b, diff_class_ind):\n",
226 | " c_loss = ContrastiveLoss(MARGIN)\n",
227 | " return c_loss(preds[2], preds[3], diff_class_ind.float())\n",
228 | "\n",
229 | "def loss_fn(preds, labels_a, labels_b, diff_class_ind):\n",
230 | " return 10 * cross_entropy_loss(preds, labels_a, labels_b, diff_class_ind) + contr_loss(preds, labels_a, labels_b, diff_class_ind) / 25"
231 | ]
232 | },
233 | {
234 | "cell_type": "code",
235 | "execution_count": 14,
236 | "metadata": {},
237 | "outputs": [],
238 | "source": [
239 | "def accuracy_mod(preds, labels_a, labels_b, diff_class_ind):\n",
240 | " return 0.5 * accuracy(preds[0], labels_a) + 0.5 * accuracy(preds[1], labels_b)\n",
241 | "\n",
242 | "def map5_mod(preds, labels_a, labels_b, diff_class_ind):\n",
243 | " return 0.5 * map5(preds[0], labels_a) + 0.5 * map5(preds[1], labels_b)"
244 | ]
245 | },
246 | {
247 | "cell_type": "code",
248 | "execution_count": 15,
249 | "metadata": {},
250 | "outputs": [],
251 | "source": [
252 | "# When I refer to 'whale', I mean a particular image (the file name).\n",
253 | "\n",
254 | "df = pd.read_csv('data/train.csv')\n",
255 | "df = df[df.Id != 'new_whale']\n",
256 | "images_without_meaningful_bbox_predictions = \\\n",
257 | " ['85a95e7a8.jpg', 'b370e1339.jpg', 'b4cb30afd.jpg', 'd4cb9d6e4.jpg', '6a72d84ca.jpg']\n",
258 | "df = df[~df.Image.isin(images_without_meaningful_bbox_predictions)]\n",
259 | "\n",
260 | "labeler = Labeler()\n",
261 | "\n",
262 | "def create_basic_dataloader(sz, batch_size, num_workers=12):\n",
263 | " reader = Reader(f'data/train-extracted-{sz}')\n",
264 | " basic_ds = Dataset([*zip(df.Image.tolist(), df.Image.tolist())], reader, labeler)\n",
265 | " return DataLoader(basic_ds, batch_size=batch_size, num_workers=num_workers)\n",
266 | "\n",
267 | "def create_similarity_dict(model, dataloader):\n",
268 | " # Calculating descriptors for each image\n",
269 | " descs = []\n",
270 | " model.eval()\n",
271 | " with torch.no_grad():\n",
272 | " for batch in dataloader:\n",
273 | " ims = batch[0][0].cuda()\n",
274 | " cnn_out = learn.model.cnn(ims)\n",
275 | " descs.append(learn.model.ada_concat(cnn_out).squeeze().detach().cpu())\n",
276 | "\n",
277 | " descs = torch.cat(descs).cuda()\n",
278 | "\n",
279 | " # Calculating similarity dict for each image\n",
280 | " dists = {}\n",
281 | " for i, (whale, _) in enumerate(dataloader.items):\n",
282 | " dists[whale] = torch.pairwise_distance(descs[i], descs).cpu().numpy()\n",
283 | " \n",
284 | " return dists\n",
285 | "\n",
286 | "def create_data(sz, dist_dict, batch_size, k=20, num_workers=12, train_on_both_train_and_val=False):\n",
287 | " reader_aug = Reader(f'data/train-extracted-{sz}', augment_fn=augment)\n",
288 | " reader = Reader(f'data/train-extracted-{sz}')\n",
289 | " \n",
290 | " val_fns = list(pd.read_pickle('data/val_fns'))\n",
291 | " val_fns_set = set(val_fns)\n",
292 | "\n",
293 | " trn_df = df[~df.Image.isin(val_fns)]\n",
294 | " val_df = df[df.Image.isin(val_fns)]\n",
295 | " \n",
296 | " ds_on_which_dists_were_calculated = Dataset([*zip(df.Image.tolist(), df.Image.tolist())], reader, labeler)\n",
297 | " \n",
298 | " uniq_whales = df.Id.unique().tolist() if train_on_both_train_and_val else trn_df.Id.unique().tolist()\n",
299 | "\n",
300 | " def sample_other_whale():\n",
301 | " candidate_whales = dist_dict[this_whale].argsort() \n",
302 | " this_whale_class = labeler.fn2label[this_whale]\n",
303 | " candidate_fns = []\n",
304 | " for i in range(200):\n",
305 | " candidate_whale = ds_on_which_dists_were_calculated.items[candidate_whales[i]][0]\n",
306 | " if (candidate_whale not in val_fns_set) and (labeler.fn2label[candidate_whale] != this_whale_class): \n",
307 | " candidate_fns.append(candidate_whale)\n",
308 | " if len(candidate_fns) == k: break \n",
309 | " np.random.shuffle(candidate_fns) # randomly pick one from K toughest matches\n",
310 | " return candidate_fns[0]\n",
311 | "\n",
312 | " def sample_this_whale():\n",
313 | " return this_whale_df.sample(n=1).iloc[0].Image\n",
314 | "\n",
315 | " train_items = []\n",
316 | " for whale in uniq_whales:\n",
317 | " this_whale_df = trn_df[trn_df.Id == whale]\n",
318 | " other_whale_df = trn_df[trn_df.Id != whale]\n",
319 | "\n",
320 | " this_whale = sample_this_whale()\n",
321 | "\n",
322 | " # sampling same whale if possible\n",
323 | " if this_whale_df.shape[0] == 1: # only a single picture of this whale in dataset\n",
324 | " other_whale = sample_other_whale()\n",
325 | " train_items.append([this_whale, other_whale])\n",
326 | " else:\n",
327 | " same_whale = this_whale_df[this_whale_df.Image != this_whale].sample(n=1).iloc[0].Image\n",
328 | " train_items.append([this_whale, same_whale])\n",
329 | "\n",
330 | " # sampling different whales\n",
331 | " this_whale = sample_this_whale()\n",
332 | " train_items.append([this_whale, sample_other_whale()])\n",
333 | " \n",
334 | " if train_on_both_train_and_val:\n",
335 | " valid_items = list(zip(val_df.Image.values[:batch_size].tolist(), val_df.Image.values[BS:2*batch_size].tolist()))\n",
336 | " else:\n",
337 | " valid_items = list(zip(val_df.Image.values[:1465].tolist(), val_df.Image.values[1465:2930].tolist()))\n",
338 | "\n",
339 | " train_ds = Dataset(train_items, reader_aug, labeler)\n",
340 | " valid_ds = Dataset(valid_items, reader, labeler)\n",
341 | "\n",
342 | " train_dl = DataLoader(train_ds, batch_size=batch_size, shuffle=True, num_workers=num_workers)\n",
343 | " valid_dl = DataLoader(valid_ds, batch_size=batch_size, num_workers=num_workers)\n",
344 | "\n",
345 | " data = DataBunch(train_dl, valid_dl)\n",
346 | " data.train_ds.loss_func = lambda: None\n",
347 | " \n",
348 | " return data"
349 | ]
350 | },
351 | {
352 | "cell_type": "code",
353 | "execution_count": 16,
354 | "metadata": {},
355 | "outputs": [],
356 | "source": [
357 | "def create_fake_data(): # needed for loading the model\n",
358 | " fake_ds = Dataset([],_,_)\n",
359 | " fake_dl = DataLoader(fake_ds)\n",
360 | "\n",
361 | " data = DataBunch(fake_dl, fake_dl)\n",
362 | " data.train_ds.loss_func = lambda: None \n",
363 | " \n",
364 | " return data"
365 | ]
366 | },
367 | {
368 | "cell_type": "code",
369 | "execution_count": 17,
370 | "metadata": {},
371 | "outputs": [
372 | {
373 | "name": "stdout",
374 | "output_type": "stream",
375 | "text": [
376 | "CPU times: user 1.99 s, sys: 592 ms, total: 2.58 s\n",
377 | "Wall time: 2.6 s\n"
378 | ]
379 | }
380 | ],
381 | "source": [
382 | "%%time\n",
383 | "\n",
384 | "learn = Learner(create_fake_data(), CustomModel(), loss_func=loss_fn, metrics=[accuracy_mod, map5_mod, cross_entropy_loss, contr_loss])\n",
385 | "learn = learn.clip_grad()\n",
386 | "learn.split((learn.model.cnn[6], learn.model.head))"
387 | ]
388 | },
389 | {
390 | "cell_type": "markdown",
391 | "metadata": {},
392 | "source": [
393 | "I chose to implement a model based on resnet50 that would both classify each of the presented images as well as calculate dissimilarity between image pairs.\n",
394 | "\n",
395 | "Each training example consists of two images, most of them consisting of images of different whales and where possible of images of the same whale. I sample the images in a way as to maintain some class balance and to not favor whales with significantly more images.\n",
396 | "\n",
397 | "The model is presented with images A and B. It first sends the images through the convolution part of resnet50 (pretrained on imagenet). This way we obtain the 2048 feature maps of some dimensionality (the actual dimensionality of feature maps will depend on the size of the input). Once we have those, we run a classifier head on them to predict labels (whale ids) for each of the images.\n",
398 | "\n",
399 | "For each image pair the model outputs label prediction for image A, label prediction for image B, a 4096-length feature vector for image A and a 4096-length feature vector for image B.\n",
400 | "\n",
401 | "These outputs are then used for calculating the loss. I use a custom loss that combines cross entropy with contrastive loss.\n",
402 | "\n",
403 | "Below I generate initial data for the model to train on. Whale pairs are samples based on euclidean distance between the CNN features (after application of adaptive concatenation which doubles their lenght from 2048 to 4096). Controlling the `k` parameter is a proxy for how hard we want the sampled dataset to be. I also add some measure of randomness at multiple points to hopefully keep the datasets diverse while still balanced and challenging.\n",
404 | "\n",
405 | "In the later portions of the training I resample the dataset after each epoch.\n",
406 | "\n",
407 | "Classification output of a model trained in this fashion achieves around 0.86 on [the private LB](https://www.kaggle.com/c/humpback-whale-identification/leaderboard). Using similarity calculations solely, the performance improves to 0.9.\n",
408 | "\n",
409 | "The training procedure looked as followed:\n",
410 | "* train the classification model on extracted bounding boxes without dataset construction as in earlier notebooks\n",
411 | "* load the weights into the custom model (modifying the state dict and loading of weights which is not shown here)\n",
412 | "* train on 224x224 images as below\n",
413 | "* train on 448x448 images\n",
414 | "* train on 448x448 images only with contrastive loss\n",
415 | "\n",
416 | "For the later parts of the training I generated new datasets every epoch with K as low as 3."
417 | ]
418 | },
419 | {
420 | "cell_type": "code",
421 | "execution_count": 21,
422 | "metadata": {},
423 | "outputs": [
424 | {
425 | "name": "stdout",
426 | "output_type": "stream",
427 | "text": [
428 | "CPU times: user 1min 23s, sys: 27.7 s, total: 1min 51s\n",
429 | "Wall time: 1min 51s\n"
430 | ]
431 | }
432 | ],
433 | "source": [
434 | "%%time\n",
435 | "\n",
436 | "SZ = 224\n",
437 | "NUM_WORKERS = 12\n",
438 | "BS = 32\n",
439 | "\n",
440 | "basic_dataloader = create_basic_dataloader(SZ, BS, NUM_WORKERS)\n",
441 | "dists = create_similarity_dict(learn.model, basic_dataloader)\n",
442 | "data = create_data(SZ, dists, BS)"
443 | ]
444 | },
445 | {
446 | "cell_type": "markdown",
447 | "metadata": {},
448 | "source": [
449 | "Here I am training from scratch. I first train the classifier head with the rest of the model frozen."
450 | ]
451 | },
452 | {
453 | "cell_type": "code",
454 | "execution_count": 23,
455 | "metadata": {},
456 | "outputs": [
457 | {
458 | "name": "stdout",
459 | "output_type": "stream",
460 | "text": [
461 | "epoch train_loss valid_loss accuracy_mod map5_mod cross_entropy_loss contr_loss\n",
462 | "1 148.910690 147.522888 0.055290 0.076900 14.646444 26.461220 \n",
463 | "2 123.472168 145.344574 0.082594 0.108106 14.397060 34.348557 \n",
464 | "3 85.668175 135.626740 0.125256 0.165592 13.510564 13.028355 \n",
465 | "4 53.227573 127.417381 0.187713 0.246024 12.650944 22.698233 \n",
466 | "5 37.310898 117.722824 0.256655 0.318282 11.670763 25.379938 \n",
467 | "6 30.048853 112.574509 0.309215 0.374215 11.157745 24.926008 \n",
468 | "7 22.527187 108.538574 0.356655 0.419767 10.791218 15.660150 \n",
469 | "8 17.551012 99.792419 0.403754 0.465836 9.885334 23.477522 \n",
470 | "9 14.864764 91.965767 0.449829 0.508680 9.101912 23.665983 \n",
471 | "10 12.320655 83.247070 0.477816 0.538737 8.249646 18.765621 \n",
472 | "11 11.685419 78.960571 0.494198 0.554721 7.826274 17.445515 \n",
473 | "12 11.117297 78.526299 0.499659 0.558817 7.778657 18.493265 \n",
474 | "Total time: 19:22\n",
475 | "xbcfpkbgev\n",
476 | "CPU times: user 13min 24s, sys: 5min 40s, total: 19min 5s\n",
477 | "Wall time: 19min 23s\n"
478 | ]
479 | }
480 | ],
481 | "source": [
482 | "%%time\n",
483 | "\n",
484 | "learn = Learner(data, CustomModel(), loss_func=loss_fn, metrics=[accuracy_mod, map5_mod, cross_entropy_loss, contr_loss])\n",
485 | "learn = learn.clip_grad()\n",
486 | "learn.split((learn.model.cnn[6], learn.model.head))\n",
487 | "learn.freeze()\n",
488 | "\n",
489 | "learn.fit_one_cycle(12, 1e-2)\n",
490 | "learn.save(name())"
491 | ]
492 | },
493 | {
494 | "cell_type": "markdown",
495 | "metadata": {},
496 | "source": [
497 | "I then proceed to training the entire model. I use the one cycle policy and use discriminative fine tuning."
498 | ]
499 | },
500 | {
501 | "cell_type": "code",
502 | "execution_count": 24,
503 | "metadata": {},
504 | "outputs": [
505 | {
506 | "name": "stdout",
507 | "output_type": "stream",
508 | "text": [
509 | "epoch train_loss valid_loss accuracy_mod map5_mod cross_entropy_loss contr_loss\n",
510 | "1 26.731331 74.631279 0.519795 0.576769 7.457275 1.462620 \n",
511 | "2 21.375389 75.528976 0.516724 0.571519 7.549324 0.893103 \n",
512 | "3 20.266861 76.560631 0.497611 0.556661 7.649171 1.723411 \n",
513 | "4 18.548521 77.408104 0.483959 0.544317 7.732693 2.029275 \n",
514 | "5 17.174431 78.717873 0.479181 0.537947 7.857864 3.480946 \n",
515 | "6 16.853291 72.678078 0.521160 0.577253 7.253875 3.482910 \n",
516 | "7 15.177959 75.314728 0.501024 0.559454 7.515693 3.944957 \n",
517 | "8 13.921166 70.167458 0.518430 0.578083 7.001643 3.775323 \n",
518 | "9 13.039845 67.782875 0.534471 0.590421 6.761198 4.272540 \n",
519 | "10 10.834149 66.399384 0.544369 0.597173 6.614733 6.301155 \n",
520 | "11 10.516848 63.929035 0.555631 0.609278 6.361138 7.941088 \n",
521 | "12 8.837101 61.644421 0.564164 0.618168 6.137127 6.828721 \n",
522 | "13 8.199161 61.322220 0.576792 0.627218 6.088045 11.044115 \n",
523 | "14 7.196623 60.468719 0.575768 0.630205 5.998930 11.985427 \n",
524 | "15 6.209642 60.272873 0.570648 0.627389 5.976564 12.680258 \n",
525 | "16 5.264899 59.344940 0.577133 0.632827 5.891311 10.795804 \n",
526 | "17 4.952946 59.042267 0.583276 0.636030 5.851012 13.303552 \n",
527 | "18 4.683704 58.793663 0.582253 0.635950 5.826302 13.265815 \n",
528 | "19 4.361069 58.649273 0.581911 0.635421 5.803203 15.430987 \n",
529 | "20 4.349578 59.054955 0.583276 0.635921 5.844341 15.288428 \n",
530 | "Total time: 42:51\n",
531 | "gabgottwbr\n"
532 | ]
533 | }
534 | ],
535 | "source": [
536 | "learn.unfreeze()\n",
537 | "dists = create_similarity_dict(learn.model, basic_dataloader)\n",
538 | "learn.data = create_data(SZ, dists, BS)\n",
539 | "\n",
540 | "max_lr = 1e-3\n",
541 | "lrs = [max_lr/100, max_lr/10, max_lr]\n",
542 | "\n",
543 | "learn.fit_one_cycle(20, lrs)\n",
544 | "learn.save(name())"
545 | ]
546 | },
547 | {
548 | "cell_type": "markdown",
549 | "metadata": {},
550 | "source": [
551 | "I used the results on the validation set to understand what effect changes had on performance, to pick hyperparameters, etc. For this competition, the validation set removed a lot of valuable information from the train set.\n",
552 | "\n",
553 | "As such, to complete the training, I switch to training on the entire train set (without retaining any images for the validation set).\n",
554 | "\n",
555 | "(I could do that through the insights I gained earlier and also because I knew my model would not overfit)"
556 | ]
557 | },
558 | {
559 | "cell_type": "code",
560 | "execution_count": 26,
561 | "metadata": {},
562 | "outputs": [],
563 | "source": [
564 | "learn.metrics = [] # metrics calculated on the validation set will no longer be useful"
565 | ]
566 | },
567 | {
568 | "cell_type": "code",
569 | "execution_count": 52,
570 | "metadata": {},
571 | "outputs": [
572 | {
573 | "name": "stdout",
574 | "output_type": "stream",
575 | "text": [
576 | "epoch train_loss valid_loss\n",
577 | "1 12.779222 62.867638 \n",
578 | "Total time: 02:02\n",
579 | "epoch train_loss valid_loss\n",
580 | "1 12.693471 59.829559 \n",
581 | "Total time: 02:03\n",
582 | "epoch train_loss valid_loss\n",
583 | "1 12.260114 61.417843 \n",
584 | "Total time: 02:02\n",
585 | "epoch train_loss valid_loss\n",
586 | "1 12.241240 63.250805 \n",
587 | "Total time: 02:03\n",
588 | "epoch train_loss valid_loss\n",
589 | "1 12.103703 65.692261 \n",
590 | "Total time: 02:03\n",
591 | "Finished training with lr: 0.0005\n",
592 | "fmnmnrqjyx\n"
593 | ]
594 | }
595 | ],
596 | "source": [
597 | "max_lr = 5e-4\n",
598 | "lrs = [max_lr/100, max_lr/10, max_lr]\n",
599 | "\n",
600 | "for _ in range(5):\n",
601 | " dists = create_similarity_dict(learn.model, basic_dataloader)\n",
602 | " learn.data = create_data(SZ, dists, BS, k=10, train_on_both_train_and_val=True)\n",
603 | " learn.fit(1, lrs)\n",
604 | "print(f'Finished training with lr: {max_lr}')\n",
605 | "\n",
606 | "learn.save(name())"
607 | ]
608 | },
609 | {
610 | "cell_type": "markdown",
611 | "metadata": {},
612 | "source": [
613 | "The train loss is much higher, but the sampling of whales here is harder."
614 | ]
615 | },
616 | {
617 | "cell_type": "code",
618 | "execution_count": 53,
619 | "metadata": {},
620 | "outputs": [
621 | {
622 | "name": "stdout",
623 | "output_type": "stream",
624 | "text": [
625 | "epoch train_loss valid_loss\n",
626 | "1 11.932597 61.330284 \n",
627 | "Total time: 02:03\n",
628 | "epoch train_loss valid_loss\n",
629 | "1 11.082793 59.027733 \n",
630 | "Total time: 02:03\n",
631 | "epoch train_loss valid_loss\n",
632 | "1 10.894583 57.672829 \n",
633 | "Total time: 02:03\n",
634 | "epoch train_loss valid_loss\n",
635 | "1 11.322955 58.176491 \n",
636 | "Total time: 02:03\n",
637 | "epoch train_loss valid_loss\n",
638 | "1 10.782765 57.838867 \n",
639 | "Total time: 02:03\n",
640 | "Finished training with lr: 0.0001\n",
641 | "azhovmyckz\n"
642 | ]
643 | }
644 | ],
645 | "source": [
646 | "max_lr = 1e-4\n",
647 | "lrs = [max_lr/100, max_lr/10, max_lr]\n",
648 | "\n",
649 | "for _ in range(5):\n",
650 | " dists = create_similarity_dict(learn.model, basic_dataloader)\n",
651 | " learn.data = create_data(SZ, dists, BS, k=7, train_on_both_train_and_val=True)\n",
652 | " learn.fit(1, lrs)\n",
653 | "print(f'Finished training with lr: {max_lr}')\n",
654 | "\n",
655 | "learn.save(name())"
656 | ]
657 | },
658 | {
659 | "cell_type": "code",
660 | "execution_count": 54,
661 | "metadata": {},
662 | "outputs": [
663 | {
664 | "name": "stdout",
665 | "output_type": "stream",
666 | "text": [
667 | "epoch train_loss valid_loss\n",
668 | "1 10.938106 57.279255 \n",
669 | "Total time: 02:03\n",
670 | "epoch train_loss valid_loss\n",
671 | "1 10.535534 56.972042 \n",
672 | "Total time: 02:03\n",
673 | "epoch train_loss valid_loss\n",
674 | "1 10.983456 56.210239 \n",
675 | "Total time: 02:03\n",
676 | "epoch train_loss valid_loss\n",
677 | "1 10.421021 57.603001 \n",
678 | "Total time: 02:03\n",
679 | "epoch train_loss valid_loss\n",
680 | "1 11.137517 57.158092 \n",
681 | "Total time: 02:03\n",
682 | "Finished training with lr: 5e-05\n",
683 | "nfbjfylcqh\n"
684 | ]
685 | }
686 | ],
687 | "source": [
688 | "max_lr = 5e-5\n",
689 | "lrs = [max_lr/100, max_lr/10, max_lr]\n",
690 | "\n",
691 | "for _ in range(5):\n",
692 | " dists = create_similarity_dict(learn.model, basic_dataloader)\n",
693 | " learn.data = create_data(SZ, dists, BS, k=3, train_on_both_train_and_val=True)\n",
694 | " learn.fit(1, lrs)\n",
695 | "print(f'Finished training with lr: {max_lr}')\n",
696 | "\n",
697 | "learn.save(name())"
698 | ]
699 | },
700 | {
701 | "cell_type": "markdown",
702 | "metadata": {},
703 | "source": [
704 | "The above is not exactly how I trained, but it hopefully captures the gist of it in a readable way.\n",
705 | "\n",
706 | "At this point I would switch to training on 448x448 crops. I would train on 448x448 crops with the custom loss function (combining cross entropy and contrastive divergence). For the last segment of the training, I used contrastive loss only.\n",
707 | "\n",
708 | "I will not carry out the training on larger images here, let us rather proceed to generating a submission."
709 | ]
710 | },
711 | {
712 | "cell_type": "markdown",
713 | "metadata": {},
714 | "source": [
715 | "## Generating predictions"
716 | ]
717 | },
718 | {
719 | "cell_type": "code",
720 | "execution_count": 55,
721 | "metadata": {},
722 | "outputs": [],
723 | "source": [
724 | "class Reader():\n",
725 | " def __call__(self, paths):\n",
726 | " images = [open_image(image_path) for image_path in paths]\n",
727 | " tensors = [image2tensor(image) for image in images]\n",
728 | " return [imagenet_normalize(tensor) for tensor in tensors]\n",
729 | "\n",
730 | "train_items = df.Image.apply(lambda fn: f'data/train-extracted-{SZ}/{fn}').tolist()\n",
731 | "test_items = list(map(lambda p: str(p), paths_to_files_in(f'data/test-extracted-{SZ}')))\n",
732 | "\n",
733 | "train_and_test_items = train_items + test_items\n",
734 | "\n",
735 | "train_and_test_ds = Dataset([*zip(train_and_test_items)], Reader(), lambda _: 0)\n",
736 | "train_and_test_dl = DataLoader(train_and_test_ds, batch_size=BS, num_workers=NUM_WORKERS)"
737 | ]
738 | },
739 | {
740 | "cell_type": "markdown",
741 | "metadata": {},
742 | "source": [
743 | "I run the cnn part of the model on all the images in the train and test sets to obtain features (I call them descs for descriptors)."
744 | ]
745 | },
746 | {
747 | "cell_type": "code",
748 | "execution_count": 56,
749 | "metadata": {},
750 | "outputs": [
751 | {
752 | "name": "stdout",
753 | "output_type": "stream",
754 | "text": [
755 | "CPU times: user 27.8 s, sys: 14 s, total: 41.8 s\n",
756 | "Wall time: 42.4 s\n"
757 | ]
758 | }
759 | ],
760 | "source": [
761 | "%%time\n",
762 | "\n",
763 | "descs = []\n",
764 | "learn.model.eval()\n",
765 | "with torch.no_grad():\n",
766 | " for batch in train_and_test_dl:\n",
767 | " ims = batch[0][0].cuda()\n",
768 | " cnn_out = learn.model.cnn(ims)\n",
769 | " descs.append(learn.model.ada_concat(cnn_out).squeeze().detach().cpu())\n",
770 | "\n",
771 | "descs = torch.cat(descs).cuda()"
772 | ]
773 | },
774 | {
775 | "cell_type": "markdown",
776 | "metadata": {},
777 | "source": [
778 | "I calculate distances between each image in the test set and all images in the train set."
779 | ]
780 | },
781 | {
782 | "cell_type": "code",
783 | "execution_count": 57,
784 | "metadata": {},
785 | "outputs": [],
786 | "source": [
787 | "def path2whale(path):\n",
788 | " return re.search('(\\w*.\\w*$)', path).group(1)"
789 | ]
790 | },
791 | {
792 | "cell_type": "code",
793 | "execution_count": 58,
794 | "metadata": {},
795 | "outputs": [
796 | {
797 | "name": "stdout",
798 | "output_type": "stream",
799 | "text": [
800 | "CPU times: user 17.7 s, sys: 9.37 s, total: 27.1 s\n",
801 | "Wall time: 27.1 s\n"
802 | ]
803 | }
804 | ],
805 | "source": [
806 | "%%time\n",
807 | "\n",
808 | "dists = {}\n",
809 | "for i, path in enumerate(train_and_test_dl.items[15694:]):\n",
810 | " whale = path2whale(path[0])\n",
811 | " dists[whale] = torch.pairwise_distance(descs[i + 15694], descs[:15694]).cpu().numpy()"
812 | ]
813 | },
814 | {
815 | "cell_type": "markdown",
816 | "metadata": {},
817 | "source": [
818 | "And I generate whale id predictions."
819 | ]
820 | },
821 | {
822 | "cell_type": "code",
823 | "execution_count": 59,
824 | "metadata": {},
825 | "outputs": [],
826 | "source": [
827 | "test_fns = [p.name for p in paths_to_files_in(f'data/test-extracted-{SZ}')]"
828 | ]
829 | },
830 | {
831 | "cell_type": "code",
832 | "execution_count": 68,
833 | "metadata": {},
834 | "outputs": [
835 | {
836 | "name": "stdout",
837 | "output_type": "stream",
838 | "text": [
839 | "CPU times: user 17.1 s, sys: 4 ms, total: 17.1 s\n",
840 | "Wall time: 17.1 s\n"
841 | ]
842 | }
843 | ],
844 | "source": [
845 | "%%time\n",
846 | "\n",
847 | "new_whale_threshold = 47\n",
848 | "\n",
849 | "all_preds = []\n",
850 | "for fn in test_fns:\n",
851 | " most_similar = list(dists[fn].argsort())\n",
852 | " preds = []\n",
853 | " \n",
854 | " while len(preds) < 5:\n",
855 | " similar = most_similar.pop(0)\n",
856 | " class_of_similar = labeler.fn2label[path2whale(train_and_test_items[similar])]\n",
857 | " if dists[fn][similar] > new_whale_threshold:\n",
858 | " if 'new_whale' not in preds: preds.append('new_whale')\n",
859 | " if len(preds) < 5:\n",
860 | " if class_of_similar not in preds: preds.append(class_of_similar)\n",
861 | " all_preds.append(preds)"
862 | ]
863 | },
864 | {
865 | "cell_type": "markdown",
866 | "metadata": {},
867 | "source": [
868 | "I tried looking for the best threshold for predicting `new_whale` in a couple of ways. Based on thinking about this and the results I was seeing I came to the conclusion that predicting `new_whale` as first prediction a little more often than 27% of the time (which was the ratio of new whales in the public portion of the test set) should work quite well.\n",
869 | "\n",
870 | "One can alter this by modifying the `new_whale_threshold` above."
871 | ]
872 | },
873 | {
874 | "cell_type": "code",
875 | "execution_count": 69,
876 | "metadata": {},
877 | "outputs": [
878 | {
879 | "data": {
880 | "text/plain": [
881 | "0.3466515893956527"
882 | ]
883 | },
884 | "execution_count": 69,
885 | "metadata": {},
886 | "output_type": "execute_result"
887 | }
888 | ],
889 | "source": [
890 | "np.mean([preds[0] == 'new_whale' for preds in all_preds])"
891 | ]
892 | },
893 | {
894 | "cell_type": "markdown",
895 | "metadata": {},
896 | "source": [
897 | "Let's generate the submission."
898 | ]
899 | },
900 | {
901 | "cell_type": "code",
902 | "execution_count": 70,
903 | "metadata": {},
904 | "outputs": [],
905 | "source": [
906 | "sub_name = 'res50_similarity'\n",
907 | "\n",
908 | "sub = pd.DataFrame({'Image': test_fns, 'Id': all_preds})\n",
909 | "sub.Id = sub.Id.str.join(' ')\n",
910 | "sub.to_csv(f'subs/{sub_name}.csv.gz', index=False, compression='gzip')"
911 | ]
912 | },
913 | {
914 | "cell_type": "markdown",
915 | "metadata": {},
916 | "source": [
917 | "There was an image missing from the test set (one where I was unable to extract a bounding box) so here I am adding a prediction for it."
918 | ]
919 | },
920 | {
921 | "cell_type": "code",
922 | "execution_count": 71,
923 | "metadata": {},
924 | "outputs": [],
925 | "source": [
926 | "sub = pd.read_csv(f'subs/{sub_name}.csv.gz')\n",
927 | "sub.append({'Image': '6a72d84ca.jpg', 'Id': 'new_whale'}, ignore_index=True).to_csv(f'subs/{sub_name}.csv.gz', index=False, compression='gzip')"
928 | ]
929 | },
930 | {
931 | "cell_type": "code",
932 | "execution_count": 72,
933 | "metadata": {},
934 | "outputs": [
935 | {
936 | "name": "stdout",
937 | "output_type": "stream",
938 | "text": [
939 | "100%|████████████████████████████████████████| 186k/186k [00:01<00:00, 73.7kB/s]\n",
940 | "Successfully submitted to Humpback Whale Identification"
941 | ]
942 | }
943 | ],
944 | "source": [
945 | "!kaggle competitions submit -c humpback-whale-identification -f subs/{sub_name}.csv.gz -m \"{sub_name}\""
946 | ]
947 | },
948 | {
949 | "cell_type": "markdown",
950 | "metadata": {},
951 | "source": [
952 | "The model as trained above achieves 0.84812 on private LB. With a bit more training on 448x448 images the score increased to 0.90813. As I have not spent a lot of time training the model, there is some chance the score would improve further with more training."
953 | ]
954 | }
955 | ],
956 | "metadata": {
957 | "kernelspec": {
958 | "display_name": "Python 3",
959 | "language": "python",
960 | "name": "python3"
961 | },
962 | "language_info": {
963 | "codemirror_mode": {
964 | "name": "ipython",
965 | "version": 3
966 | },
967 | "file_extension": ".py",
968 | "mimetype": "text/x-python",
969 | "name": "python",
970 | "nbconvert_exporter": "python",
971 | "pygments_lexer": "ipython3",
972 | "version": "3.7.3"
973 | }
974 | },
975 | "nbformat": 4,
976 | "nbformat_minor": 2
977 | }
978 |
--------------------------------------------------------------------------------
/siamese_network_prototype.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 1,
6 | "metadata": {},
7 | "outputs": [],
8 | "source": [
9 | "# http://www.cs.utoronto.ca/~gkoch/files/msc-thesis.pdf\n",
10 | "# https://www.cs.cmu.edu/~rsalakhu/papers/oneshot1.pdf"
11 | ]
12 | },
13 | {
14 | "cell_type": "code",
15 | "execution_count": 2,
16 | "metadata": {},
17 | "outputs": [],
18 | "source": [
19 | "%matplotlib inline\n",
20 | "import matplotlib.pyplot as plt\n",
21 | "from fastai.vision import *\n",
22 | "from fastai.metrics import accuracy_thresh\n",
23 | "from fastai.basic_data import *\n",
24 | "from torch.utils.data import DataLoader, Dataset\n",
25 | "from torch import nn\n",
26 | "from fastai.callbacks.hooks import num_features_model, model_sizes\n",
27 | "from fastai.layers import BCEWithLogitsFlat\n",
28 | "from fastai.basic_train import Learner\n",
29 | "from skimage.util import montage\n",
30 | "import pandas as pd\n",
31 | "from torch import optim\n",
32 | "import re\n",
33 | "\n",
34 | "from utils import *"
35 | ]
36 | },
37 | {
38 | "cell_type": "code",
39 | "execution_count": 3,
40 | "metadata": {},
41 | "outputs": [],
42 | "source": [
43 | "# import fastai\n",
44 | "# from fastprogress import force_console_behavior\n",
45 | "# import fastprogress\n",
46 | "# fastprogress.fastprogress.NO_BAR = True\n",
47 | "# master_bar, progress_bar = force_console_behavior()\n",
48 | "# fastai.basic_train.master_bar, fastai.basic_train.progress_bar = master_bar, progress_bar"
49 | ]
50 | },
51 | {
52 | "cell_type": "markdown",
53 | "metadata": {},
54 | "source": [
55 | "Posing the problem as a classification task is probably not ideal. We are asking our NN to learn to recognize a whale out of 5004 possible candidates based on what it has learned about the whales. That is a tall order.\n",
56 | "\n",
57 | "Instead, here we will try to pose the problem as a verification task. When presented with two images of whale flukes, we will ask the network - are the images of the same whale or of different whales? In particular, we will try to teach our network to learn features that can be useful in determining the similarity between whale images (hence the name of this approach - feature learning).\n",
58 | "\n",
59 | "This seems like a much easier task, at least in theory. Either way, no need to start with a relatively big CNN like resnet50. Let's see what mileage we can get out of resnet18."
60 | ]
61 | },
62 | {
63 | "cell_type": "code",
64 | "execution_count": 3,
65 | "metadata": {},
66 | "outputs": [],
67 | "source": [
68 | "# new architecture calls for a new validation set, this time our validation set will consist of all whales that have exactly two images\n",
69 | "df = pd.read_csv('data/train.csv')\n",
70 | "im_count = df[df.Id != 'new_whale'].Id.value_counts()\n",
71 | "im_count.name = 'sighting_count'\n",
72 | "df = df.join(im_count, on='Id')\n",
73 | "val_fns = set(df[df.sighting_count == 2].Image)"
74 | ]
75 | },
76 | {
77 | "cell_type": "code",
78 | "execution_count": 4,
79 | "metadata": {},
80 | "outputs": [
81 | {
82 | "data": {
83 | "text/plain": [
84 | "2570"
85 | ]
86 | },
87 | "execution_count": 4,
88 | "metadata": {},
89 | "output_type": "execute_result"
90 | }
91 | ],
92 | "source": [
93 | "len(val_fns)"
94 | ]
95 | },
96 | {
97 | "cell_type": "code",
98 | "execution_count": 5,
99 | "metadata": {},
100 | "outputs": [],
101 | "source": [
102 | "fn2label = {row[1].Image: row[1].Id for row in df.iterrows()}\n",
103 | "path2fn = lambda path: re.search('\\w*\\.jpg$', path).group(0)\n",
104 | "\n",
105 | "name = f'res18-siamese'"
106 | ]
107 | },
108 | {
109 | "cell_type": "code",
110 | "execution_count": 6,
111 | "metadata": {},
112 | "outputs": [],
113 | "source": [
114 | "SZ = 224\n",
115 | "BS = 64\n",
116 | "NUM_WORKERS = 12\n",
117 | "SEED=0"
118 | ]
119 | },
120 | {
121 | "cell_type": "code",
122 | "execution_count": 7,
123 | "metadata": {},
124 | "outputs": [],
125 | "source": [
126 | "# data_block api creates categories based on classes it sees in the train set and\n",
127 | "# our val set contains whales whose ids do not appear in the train set\n",
128 | "classes = df.Id.unique()"
129 | ]
130 | },
131 | {
132 | "cell_type": "code",
133 | "execution_count": 8,
134 | "metadata": {},
135 | "outputs": [],
136 | "source": [
137 | "data = (\n",
138 | " ImageItemList\n",
139 | " .from_df(df[df.Id != 'new_whale'], f'data/train-{SZ}', cols=['Image'])\n",
140 | " .split_by_valid_func(lambda path: path2fn(path) in val_fns)\n",
141 | " .label_from_func(lambda path: fn2label[path2fn(path)], classes=classes)\n",
142 | " .add_test(ImageItemList.from_folder(f'data/test-{SZ}'))\n",
143 | " .transform(get_transforms(do_flip=False), size=SZ, resize_method=ResizeMethod.SQUISH)\n",
144 | "# .databunch(bs=BS, num_workers=NUM_WORKERS, path='data')\n",
145 | "# .normalize(imagenet_stats)\n",
146 | ")"
147 | ]
148 | },
149 | {
150 | "cell_type": "markdown",
151 | "metadata": {},
152 | "source": [
153 | "I am still using the ImageItemList even though I will create my own datasets. Why? Because I want to reuse the functionality that is already there (creating datasets from files, augmentations, resizing, etc).\n",
154 | "\n",
155 | "I realize the code is neither clean nor elegant but for the time being I am happy with this approach."
156 | ]
157 | },
158 | {
159 | "cell_type": "code",
160 | "execution_count": 9,
161 | "metadata": {},
162 | "outputs": [],
163 | "source": [
164 | "def is_even(num): return num % 2 == 0\n",
165 | "\n",
166 | "class TwoImDataset(Dataset):\n",
167 | " def __init__(self, ds):\n",
168 | " self.ds = ds\n",
169 | " self.whale_ids = ds.y.items\n",
170 | " def __len__(self):\n",
171 | " return 2 * len(self.ds)\n",
172 | " def __getitem__(self, idx):\n",
173 | " if is_even(idx):\n",
174 | " return self.sample_same(idx // 2)\n",
175 | " else: return self.sample_different((idx-1) // 2)\n",
176 | " def sample_same(self, idx):\n",
177 | " whale_id = self.whale_ids[idx] \n",
178 | " candidates = list(np.where(self.whale_ids == whale_id)[0])\n",
179 | " candidates.remove(idx) # dropping our current whale - we don't want to compare against an identical image!\n",
180 | " \n",
181 | " if len(candidates) == 0: # oops, there is only a single whale with this id in the dataset\n",
182 | " return self.sample_different(idx)\n",
183 | " \n",
184 | " np.random.shuffle(candidates)\n",
185 | " return self.construct_example(self.ds[idx][0], self.ds[candidates[0]][0], 1)\n",
186 | " def sample_different(self, idx):\n",
187 | " whale_id = self.whale_ids[idx]\n",
188 | " candidates = list(np.where(self.whale_ids != whale_id)[0])\n",
189 | " np.random.shuffle(candidates)\n",
190 | " return self.construct_example(self.ds[idx][0], self.ds[candidates[0]][0], 0)\n",
191 | " \n",
192 | " def construct_example(self, im_A, im_B, class_idx):\n",
193 | " return [im_A, im_B], class_idx"
194 | ]
195 | },
196 | {
197 | "cell_type": "code",
198 | "execution_count": 10,
199 | "metadata": {},
200 | "outputs": [],
201 | "source": [
202 | "train_dl = DataLoader(\n",
203 | " TwoImDataset(data.train),\n",
204 | " batch_size=BS,\n",
205 | " shuffle=True,\n",
206 | " num_workers=NUM_WORKERS\n",
207 | ")\n",
208 | "valid_dl = DataLoader(\n",
209 | " TwoImDataset(data.valid),\n",
210 | " batch_size=BS,\n",
211 | " shuffle=False,\n",
212 | " num_workers=NUM_WORKERS\n",
213 | ")\n",
214 | "\n",
215 | "data_bunch = ImageDataBunch(train_dl, valid_dl)"
216 | ]
217 | },
218 | {
219 | "cell_type": "code",
220 | "execution_count": 11,
221 | "metadata": {},
222 | "outputs": [],
223 | "source": [
224 | "def normalize_batch(batch):\n",
225 | " stat_tensors = [torch.tensor(l).cuda() for l in imagenet_stats]\n",
226 | " return [normalize(batch[0][0], *stat_tensors), normalize(batch[0][1], *stat_tensors)], batch[1]"
227 | ]
228 | },
229 | {
230 | "cell_type": "code",
231 | "execution_count": 12,
232 | "metadata": {},
233 | "outputs": [],
234 | "source": [
235 | "data_bunch.add_tfm(normalize_batch)"
236 | ]
237 | },
238 | {
239 | "cell_type": "code",
240 | "execution_count": 13,
241 | "metadata": {},
242 | "outputs": [],
243 | "source": [
244 | "from functional import seq\n",
245 | "\n",
246 | "class SiameseNetwork(nn.Module):\n",
247 | " def __init__(self, arch=models.resnet18):\n",
248 | " super().__init__() \n",
249 | " self.cnn = create_body(arch)\n",
250 | " self.head = nn.Linear(num_features_model(self.cnn), 1)\n",
251 | " \n",
252 | " def forward(self, im_A, im_B):\n",
253 | " # dl - distance layer\n",
254 | " x1, x2 = seq(im_A, im_B).map(self.cnn).map(self.process_features)\n",
255 | " dl = self.calculate_distance(x1, x2)\n",
256 | " out = self.head(dl)\n",
257 | " return out\n",
258 | " \n",
259 | " def process_features(self, x): return x.reshape(*x.shape[:2], -1).max(-1)[0]\n",
260 | " def calculate_distance(self, x1, x2): return (x1 - x2).abs_()\n",
261 | " "
262 | ]
263 | },
264 | {
265 | "cell_type": "markdown",
266 | "metadata": {},
267 | "source": [
268 | "Below I include two slightly different siamese networks. I leave the code commented out and choose to use the one above."
269 | ]
270 | },
271 | {
272 | "cell_type": "code",
273 | "execution_count": 16,
274 | "metadata": {},
275 | "outputs": [],
276 | "source": [
277 | "# from functional import seq\n",
278 | "\n",
279 | "# def cnn_activations_count(model):\n",
280 | "# _, ch, h, w = model_sizes(create_body(models.resnet18), (SZ, SZ))[-1]\n",
281 | "# return ch * h * w\n",
282 | "\n",
283 | "# class SiameseNetwork(nn.Module):\n",
284 | "# def __init__(self, lin_ftrs=2048, arch=models.resnet18):\n",
285 | "# super().__init__() \n",
286 | "# self.cnn = create_body(arch)\n",
287 | "# self.fc1 = nn.Linear(cnn_activations_count(self.cnn), lin_ftrs)\n",
288 | "# self.fc2 = nn.Linear(lin_ftrs, 1)\n",
289 | " \n",
290 | "# def forward(self, im_A, im_B):\n",
291 | "# x1, x2 = seq(im_A, im_B).map(self.cnn).map(self.process_features).map(self.fc1)\n",
292 | "# dl = self.calculate_distance(x1.sigmoid(), x2.sigmoid())\n",
293 | "# out = self.fc2(dl)\n",
294 | "# return out\n",
295 | " \n",
296 | "# def calculate_distance(self, x1, x2): return (x1 - x2).abs_()\n",
297 | "# def process_features(self, x): return x.reshape(x.shape[0], -1)"
298 | ]
299 | },
300 | {
301 | "cell_type": "code",
302 | "execution_count": 17,
303 | "metadata": {},
304 | "outputs": [],
305 | "source": [
306 | "# from functional import seq\n",
307 | "\n",
308 | "# def cnn_activations_count(model):\n",
309 | "# _, ch, h, w = model_sizes(create_body(models.resnet18), (SZ, SZ))[-1]\n",
310 | "# return ch * h * w\n",
311 | "\n",
312 | "# class SiameseNetwork(nn.Module):\n",
313 | "# def __init__(self, lin_ftrs=2048, pool_to=3, arch=models.resnet18, pooling_layer=nn.AdaptiveMaxPool2d):\n",
314 | "# super().__init__() \n",
315 | "# self.cnn = create_body(arch)\n",
316 | "# self.pool = pooling_layer(pool_to)\n",
317 | "# self.fc1 = nn.Linear(num_features_model(self.cnn) * pool_to**2, lin_ftrs)\n",
318 | "# self.fc2 = nn.Linear(lin_ftrs, 1)\n",
319 | " \n",
320 | "# def forward(self, im_A, im_B):\n",
321 | "# x1, x2 = seq(im_A, im_B).map(self.cnn).map(self.pool).map(self.process_features).map(self.fc1)\n",
322 | "# dl = self.calculate_distance(x1.sigmoid(), x2.sigmoid())\n",
323 | "# out = self.fc2(dl)\n",
324 | "# return out\n",
325 | " \n",
326 | "# def calculate_distance(self, x1, x2): return (x1 - x2).abs_()\n",
327 | "# def process_features(self, x): return x.reshape(x.shape[0], -1)"
328 | ]
329 | },
330 | {
331 | "cell_type": "code",
332 | "execution_count": 14,
333 | "metadata": {},
334 | "outputs": [],
335 | "source": [
336 | "learn = Learner(data_bunch, SiameseNetwork(), loss_func=BCEWithLogitsFlat(), metrics=[lambda preds, targs: accuracy_thresh(preds.squeeze(), targs, sigmoid=False)])"
337 | ]
338 | },
339 | {
340 | "cell_type": "code",
341 | "execution_count": 19,
342 | "metadata": {},
343 | "outputs": [],
344 | "source": [
345 | "learn.split([learn.model.cnn[:6], learn.model.cnn[6:], learn.model.head])"
346 | ]
347 | },
348 | {
349 | "cell_type": "code",
350 | "execution_count": 20,
351 | "metadata": {},
352 | "outputs": [],
353 | "source": [
354 | "learn.freeze_to(-1)"
355 | ]
356 | },
357 | {
358 | "cell_type": "code",
359 | "execution_count": 21,
360 | "metadata": {},
361 | "outputs": [
362 | {
363 | "name": "stdout",
364 | "output_type": "stream",
365 | "text": [
366 | "LR Finder is complete, type {learner_name}.recorder.plot() to see the graph.\n"
367 | ]
368 | }
369 | ],
370 | "source": [
371 | "learn.lr_find()"
372 | ]
373 | },
374 | {
375 | "cell_type": "code",
376 | "execution_count": 22,
377 | "metadata": {},
378 | "outputs": [
379 | {
380 | "data": {
381 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYUAAAEKCAYAAAD9xUlFAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4wLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvqOYd8AAAIABJREFUeJzt3Xl8XXWd//HXJ2uzNUuTtGm6pBstpUChAUQRWhBFmJ8siorLgMzIjOuoD2ec+fkbdfTnuDsuqPxQAR1HGBFQYUSQtazStHSlLV3Tplv2Pc12P78/7m1IQ5KmTU7OTe77+XjcB/fe8733vM+l937yPd9zvsfcHREREYCksAOIiEj8UFEQEZE+KgoiItJHRUFERPqoKIiISB8VBRER6aOiICIifVQURESkj4qCiIj0SQk7wMkqLCz0srKysGOIiEwoa9eurXX3ohO1m3BFoaysjIqKirBjiIhMKGZWOZJ22n0kIiJ9VBRERKSPioKIiPRRURARkT4qCiIi0kdFQURE+qgoiIhIHxUFEZE49NzOWtbvbxz39aooiIjEof/9wCY++LO/sKe2bVzXq6IgIhJnunsjVDV00NLZw0d+tZb2rp5xW7eKgohInDnQ0EFvxLl6+Uy2H2nhX+7fhLuPy7oDKwpmdoeZVZvZ5mHarDSz9Wa2xcyeDiqLiMhEUlnfDsD7L5jLZ95yGr9ff5BfvjCiqYtGLcgJ8e4CbgV+OdhCM8sDfgxc4e77zKw4wCwiIhNGZV10HKFsWiblc/NZv7+Rrzz0CstKp7JibkGg6w6sp+Duq4H6YZq8D7jf3ffF2lcHlUVEZCKprGsnIzWZopx0kpKM775nOaX5GTyzozbwdYc5dfZpQKqZPQXkAN9396F6FbcAtwDMmTNn3AKKiIShsq6dOQWZmBkAuRmpPPiJi5g6JTXwdYc50JwCrACuAt4G/KuZnTZYQ3e/3d3L3b28qOiE14gQEZnQKuvamDMt87jnxqMgQLhFoQr4k7u3uXstsBo4O8Q8IiKhi0ScffXtlA0oCuMlzKLwe+DNZpZiZpnABcDWEPOIiISuuqWTzp4Ic6ZlhbL+wMYUzOxuYCVQaGZVwBeBVAB3v83dt5rZn4CNQAT4mbsPefiqiEgiOHbk0dyCcHoKgRUFd79hBG2+BXwrqAwiIhNNZV30HIWykHoKOqNZRCSOVNa3kZJkzMybEsr6VRREROJIZV07pfkZpCSH8/OsoiAiEkeOnaMQFhUFEZE4UlnXFtp4AqgoiIjEjcb2LpqP9jA3pHMUQEVBRCRu7I0deaTdRyIi8to5Ctp9JCIi+9RTEBGRYyrr25k+NZ2MtOTQMqgoiIjEicq6NuYWhLfrCFQURETiRmVde6hHHoGKgohIXGjv6qG6pVNFQUQkEbV29vDKwea+x/vqY4PMIR55BCoKIiKh+N6fX+XKHzzD5x/YREdXb9/sqGFNmX1MmNdoFhFJWM/vqiMvM5X/+ss+XtpTz7lz8oHwpsw+Rj0FEZFx1ny0m22Hm7nxwjJ+efP5NLR3898V+8nNSCU3c3yuxTwU9RRERMbZusoGIg7nlRVw0aJC/vSpN/OF328mLzMt7GgqCiIi461ibwPJScY5c/IAKMxO58fvXxFyqqjAdh+Z2R1mVm1mg1532cxWmlmTma2P3b4QVBYRkXjy0t56zpg5laz0+Pu7PMgxhbuAK07Q5hl3Xx67fTnALCIicaGzp5cN+xs5r6wg7CiDCqwouPtqoD6o9xcRmYg2H2imsyfCeWX5YUcZVNhHH11oZhvM7GEzO2OoRmZ2i5lVmFlFTU3NeOYTERlTa/ZG/1YuT7SewgisA+a6+9nAD4HfDdXQ3W9393J3Ly8qKhq3gCIiY61ibz3zC7MozE4PO8qgQisK7t7s7q2x+38EUs2sMKw8IiJBi0ScNXsb4nY8AUIsCmY2w8wsdv/8WJa6sPKIiARtZ00rTR3dlMfpeAIEeJ6Cmd0NrAQKzawK+CKQCuDutwHvAj5iZj1AB/Bed/eg8oiIhO2lPdHxhPPnxW9PIbCi4O43nGD5rcCtQa1fRCTeVOytpygnPdTLbZ5I2EcfiYgkjDV7Gzi/rIDYnvO4pKIgIjIODjR2cKCxI67HE0BzH4mIBKKrJ8Ltq3ex5WAz+xva+66XEM9HHoGKgohIIO5bV8W3H32V+UVZzCnI5JzZ+ZxeMpUzZk4NO9qwVBRERALwm4r9LCrO5tFPXxzXYwgDaUxBRGSM7axu4eV9jby7fPaEKgigoiAiMubuXVtFcpJxzTmlYUc5aSoKIiJjqKc3wv3rDrBqcTFFOfE5v9FwVBRERMbQ6h011LR0cn35rLCjnBIVBRGRMXRvRRXTstK4dElx2FFOiYqCiMgYqW/r4rGtR7jmnFJSkyfmz+vETC0iEod+9/IBunt9wu46AhUFEZExc+/aKs4szWXJjPg+QW04KgoiImOgsq6NrYeaue7ciXcYan8qCiIiY2BtZQMAFy6YFnKS0VFREBEZA2srG8hJT2FRcU7YUUZFRUFEZAysrWxg+Zw8kpMm1rQWAwVWFMzsDjOrNrPNJ2h3npn1mtm7gsoiIhKklqPdbD/Swoq58X2thJEIsqdwF3DFcA3MLBn4BvBIgDlERAK1fn8j7qgoDMfdVwP1J2j2CeA+oDqoHCIiQVtb2YAZLJ+dF3aUUQttTMHMSoFrgdvCyiAiMhbWVjaweHoOOVNSw44yamEONH8P+Jy7956ooZndYmYVZlZRU1MzDtFEREYmEnHW72ucFLuOINwrr5UD98QuQFEIXGlmPe7+u4EN3f124HaA8vJyH9eUIiLD2FHdSktnj4rCaLn7vGP3zewu4KHBCoKISDw7dtKaisIJmNndwEqg0MyqgC8CqQDurnEEEZkU1lY2UJidxpyCzLCjjInAioK733ASbW8KKoeISJDW7WvgnDn5E+5azEPRGc0iIqeorrWTPbVtk2bXEagoiIicsnX7GoHJM54AKgoiIqdsbWUDqcnGmaW5YUcZMyoKIiKnaF1lA2fMzGVKanLYUcaMioKIyCmob+ti/f5Gzp9XEHaUMaWiICJyCu6t2E9Xb4R3njtxr8c8GBUFEZGTFIk4v35pH+eV5bN4xsS+qM5AKgoiIifp2Z21VNa184E3zA07yphTURAROUm/erGSgqw0rlg2I+woY05FQUTkJBxq6uDxbdVcXz6L9JTJc9TRMSoKIiIn4Z6X9hNx5/3nT75dR6CiICIyYt29Ee5Zs4+LFxUxZ9rkmABvIBUFEZERenxrNUeaO3n/BXPCjhIYFQURkRH6TcV+SnKncOmS4rCjBEZFQURkhI7NiJqSPHl/OifvlomIjLH6ti4KstLCjhEoFQURkRHo6Y3Q1NFNfqaKgohIwmto7wZgWraKwikxszvMrNrMNg+x/Goz22hm682swswuCiqLiMhoNbR3AainMAp3AVcMs/xx4Gx3Xw7cDPwswCwiIqNS3xYtChpTOEXuvhqoH2Z5q7t77GEW4EO1FREJm4rCODCza81sG/A/RHsLIiJxSUVhHLj7A+6+BLgG+MpQ7czslti4Q0VNTc34BRQRiWmIFYW8zNSQkwQrLo4+iu1qWmBmhUMsv93dy929vKioaJzTiYhAfXsXOekpk3Jm1P5CKwpmttDMLHb/XCANqAsrj4jIcOrbusif5LuOAFJG0sjMFgBV7t5pZiuBs4BfunvjMK+5G1gJFJpZFfBFIBXA3W8D3gn8tZl1Ax3Ae/oNPIuIxBUVhePdB5Sb2ULg58AfgF8DVw71Ane/Ybg3dPdvAN8Y4fpFRELV0N5Fcc6UsGMEbqS7jyLu3gNcC3zP3T8NlAQXS0QkvtS3dk36E9dg5EWh28xuAG4EHoo9N7mH4EVE+qlv76Iga/L/7I20KHwIuBD4qrvvMbN5wK+CiyUiEj86uno52h3RmMIx7v4K8EkAM8sHctz960EGExGJF3VtnQBMS4CiMKKegpk9ZWZTzawA2ADcaWbfDTaaiEh8aGiLzpCqMYXX5Lp7M3AdcKe7rwDeElwsEZH4Ud+eGFNcwMiLQoqZlQDv5rWBZhGRhNCQIPMewciLwpeBR4Bd7r7GzOYDO4KLJSISP+oSqCiMdKD5XuDefo93Ez0jWURk0mto6yI5yZg6RYekAmBms8zsgdiV1I6Y2X1mNivocCIi8aC+vYv8zFSSkizsKIEb6e6jO4lObTETKAUejD0nIjLpJcrZzDDyolDk7ne6e0/sdhegOaxFJCHUtyfGZHgw8qJQa2YfMLPk2O0DaJprEUkQDW1dCXHiGoy8KNxM9HDUw8Ah4F1Ep74QEZn0EmXabBhhUXD3fe7+Dncvcvdid7+G6IlsIiKTWiTiNLR3UaAxhRP6zJilEBGJU81Hu4l4YpyjAKMrCpP/2CwRSXj1CXTiGoyuKOjSmSIy6R0rChpTAMysxcyaB7m1ED1nYbjX3hE72W3zEMvfb2YbY7fnzezsUWyHiEggjhWFRDn6aNhpLtw9ZxTvfRdwK/DLIZbvAS5x9wYzeztwO3DBKNYnIjLmGtoTq6cwormPToW7rzazsmGWP9/v4YuAps0QkbjTNxmejj4aV38DPBx2CBGRgRraupiSmkRGWnLYUcZFYD2FkTKzVUSLwkXDtLkFuAVgzpw545RMRATq27qZlpUedoxxE2pPwczOAn4GXO3uQ06b4e63u3u5u5cXFWnKJREZP/VtneRnTf4ps48JrSiY2RzgfuCD7v5qWDlERIZT396dMDOkQoC7j8zsbmAlUGhmVcAXgVQAd78N+AIwDfixmQH0uHt5UHlERE5FQ1sX86Zlhh1j3AR59NENJ1j+t8DfBrV+EZGx0JBAk+FB/Bx9JCISdzp7emnp7EmYw1FBRUFEZEiN7d0AFGSrKIiIJLz6BDtxDVQURESGlGiT4YGKgojIkBJtMjxQURARGVKiTYYHKgoiIkOqa40WhbwMndEsIpLwGtq7yMtMJSU5cX4qE2dLRURO0p7aNqbnTAk7xrhSURARGUR7Vw9/2VPPRYsKw44yrlQUREQG8dzOOrp6Ily2pDjsKONKRUFEZBBPbDtCdnoK5WUFYUcZVyoKIiIDuDtPbKvmzYsKSUtJrJ/JxNpaEZER2HKwmSPNnVyaYLuOQEVBROR1ntxWDcDKxSoKIiIJ7/Ft1Zw9O4+inMS5NvMxKgoiIv3UtnayoaqRSxOwlwAqCiIix3lqew3ucNnpKgpjyszuMLNqM9s8xPIlZvaCmXWa2WeDyiEicjKe3FZNcU46Z8ycGnaUUATZU7gLuGKY5fXAJ4FvB5hBRGTEunsjrH61hkuXFGNmYccJRWBFwd1XE/3hH2p5tbuvAbqDyiAicjLW7K2npbOHVQl4KOoxE2JMwcxuMbMKM6uoqakJO46ITFLP7KglJcm4aGFizXfU34QoCu5+u7uXu3t5UVFR2HFEZJLasL+RJSU5ZKWnhB0lNBOiKIiIBC0ScTZVNXHWrLywo4RKRUFEBNhT10ZLZw/LE7woBNZHMrO7gZVAoZlVAV8EUgHc/TYzmwFUAFOBiJl9Cljq7s1BZRIRGcrGqkYAzpqdG3KScAVWFNz9hhMsPwzMCmr9IiInY8P+JjJSk1lYlB12lFBp95GICNGewrLSqQl1PebBJPbWi4gQPWlty8HmhB9kBhUFERG2H26hsyfCWbMSezwBVBRERNhY1QTA8tnqKagoiEjC21jVSF5mKnMKMsOOEjoVBRFJeBuqmjizNDdhJ8HrT0VBRBJaR1cvrx5p4WwNMgMqCiKS4F451ERvxDXIHKOiICIJbcP+6CDz2RpkBlQURCTBbaxqZMbUKUyfOiXsKHFBRUFEEtqGqibtOupHRUFEElZTRzd7atu066gfFQURSVibYietqafwGhUFEUlYGw9Ep8s+s1RF4RgVBRFJWJuqmpg7LZO8zLSwo8QNFQURSVgbdfnN11FREJGEVNvayYHGDs7SrqPjqCiISELadCA6yHymBpmPE1hRMLM7zKzazDYPsdzM7AdmttPMNprZuUFlEREZaOP+JsxgmXoKxwmyp3AXcMUwy98OLIrdbgF+EmAWEZHjbDrQyIKibLLTA7tU/YQUWFFw99VA/TBNrgZ+6VEvAnlmVhJUHhGR/jZWNWk8YRBhjimUAvv7Pa6KPfc6ZnaLmVWYWUVNTc24hBORyetw01GqWzo1njCIMIvCYFez8MEauvvt7l7u7uVFRUUBxxKRyW5jVfSkNR2O+nphFoUqYHa/x7OAgyFlEZEEsulAE8lJxtKSqWFHiTthFoU/AH8dOwrpDUCTux8KMY+IJIgNVU0sKs4mIy057ChxJ7BhdzO7G1gJFJpZFfBFIBXA3W8D/ghcCewE2oEPBZVlvFW3HKW+rYvF03N0zVeROOPubKpq5K1LZ4QdJS4FVhTc/YYTLHfgY0GtfzzVtnbyqxcr2bC/kS0Hm6lu6QTgDfML+PyVSzWYJRJHqho6aGjv1vdyCAl3gK678+T2an705C52HGnhQ2+ax4cvnj/sscq7a1r51Yv7yM1I5ZpzZjJ3WhYAnT29/OL5vfzw8Z20dfWwqDiHixYVcsbMXCIR5ydP7+J/3fosVy+fyU1vLKPlaA+Hmjo41HSU7PQU3nH2TIrH4WpPkYjT2NFNXWsndW1dlE3LYkaurjIlienYmcyaLntwFv2DfeIoLy/3ioqKk35db8R5ePMhfvTkLrYeaqY0L4PFM3J4Yls107LS+PilC3nfBXNIT3ltH+OWg038+Kld/HHTIVKTkuiORHCHFXPzWbW4iHvXVlFZ186qxUV8/qqlLCzOPm6dzUe7ue2pXfz82T109kRelynJ4OLTinjXilm8cUEheRmpJCWdeHdTQ1sXeZmpw+6acnee3VnLD5/YScXeeiL9/jcnJxlvO2M6N71xHueV5WsXlySUrz28lTuf3cumf3vrcd/3yc7M1rp7+QnbJUpRuOelffzz/ZuYX5TFRy5ZwDXnlJKanMT6/Y184+FtvLC7juQkIyM1mSmpyaSnJHGgsYOc9BQ+eOFcbr5oHl09EX6//iAPvFzFq0daWViczf+56nRWLi4edt2HmjpYV9lI8dT0vmvB7m9o5761VTzw8gEONR0Foj/WBVlpTMtK47yyAq47t5Tls/MwMyIR5/Ft1dy+ehdr9jYwpyCTK5bN4IplM1g+K4+kJMPd6e51nt9Vyw8e38G6fY2U5E7hmnNKmZ6TTkF2OnkZqTy3q5Z7XtpPU0c3p5dM5YyZU0lJMlKSjZSkJDp7euno6qWju5feiFM2LYvFM3JYMmMqi6ZnMyX15L5IDW1dPLOzlhd21ZGabJTkZjAzbwqz8jNZPjuP5BEUQpFT0Rtxvvvn7SwoyuavzppJWkoS7/vpi7R29vCHj18UdrxxpaIwQEdXL09ur+ZtZ8x43Y/Qsb+qX9xdx9HuCB3dvRzt6mVBcTYfeMNccjNSX9f+YNNRinPSSU0e3QFcvRHnxd11bD/cQl1bJ3WtXRxuPsoLu+ro7IkwvzCLtyydzuNbj7Crpo3SvAyuO7eUjVVNPL+rlu5eJzs9BXfnaE+E3liXoDQvg4+uWsC7Vswa9K+hjq5efr/+AHev2U9tSyfdvRF6Ik5Pb4T01GQy05LJiP3476lt6+vpmMGs/AzmF2azoCibOQUZFGSnk5+ZSn5mGr0R79tFdrCxgzV7G9hQ1Yg7TJ0S3UXXfLSnL8ei4mw+9ZbTePuyGSPqJYmcjP9es4/P3bcJgBlTp/ChN5Vx65M7ecfZM/nqtWeGnG58qShMcM1Hu3l40yHuX3eAv+ypZ2nJVP7ukvlcdWYJKbFC1NTRzRPbjrCuspG0lKRYLyeJWfmZXHlmCWkpY3PEcW/E2VvXxvbDLWw/3MLu2jZ217Syu6aNju7eIV+XnpLE0plTWXlaMRefVshZs6K9gtbOHg41drD5YBM/enIXO6tbWTIjh09ffhqXnz5dxUHGRFtnD6u+/RSz8jP4xKWL+Okzu3l+Vx0A33jnmbznvDkhJxxfKgqTSGtnD1lpyXG37z8Scerbu2hs76KhvZu61i6SDGbmZVCSO4WCrLQTZu6NOA9tPMj3HtvBnto2lszI4aOrFnLVmSXH9ejaOnto6+qhKDs97j4HiU/fe+xVvvfYDu77yBtZMTcfgM0HmvjzK0f42zfPI2dK6gneYXJRUZAJpac3wh82HOTHT0V7DvMLs/irs2dSWdfG5gNN7K5twx1y0lNYUJzNwuJsyufmc9VZJQn35ZYTO9J8lJXfeopLlxTzo/drVn5QUZAJKhJxHtlymB8+sZNXDjVTkjuFM2bmsqx0KnkZqeyqaWNndSs7qlupbe1kSmoSV55ZwrvLZ3PBvAL1IgSAz/12I/e/XMVjn7mk7xDyRDfSopBw5ylIfEtKMt5+ZglXLJtBa2fPkL0Ad2f9/kZ+U1HFgxsOcv+6AyyZkcPHVi3kygG7niSxbD3UzG/W7ufmN81TQTgF6inIhNfR1cv/bDrEbU9Hdz0tKMriY6sW8o6zZ/YNysvk19bZw9Ov1vDjp3ayr66d1f+0irzMtLBjxQ3tPpKE0xtx/rT5MD98YgfbDrcwKz+DD795Pu8un62JzyaxF3bV8fNnd7N6Ry1dPRHyM1P50jvO4Orlg16eJWGpKEjCikScx7Ye4band7FuXyP5manc+MYyrjtnFnOmZYYdT8ZQdfNRLvvO02SkJXPlmSW87YwZnFeWrx7iIFQURIA1e+u57aldPL6tGoAlM3J46xkzWLW4iJLcDPIyU487Q7s34rR39ZCekjxm53kMxd052h0hNdn0I3aKPnXPy/xx02Ee+fTFzCvU+MFwNNAsApxXVsB5NxWwv76dR7Yc5tEtR7j1iR384PEdfW3SU5LISEumvauXrtiZ2/mZqXx05UI+eOHcEU/rcezM+AdePkBHV2/sOeiJOG2dPbT2u7V39tDe3Ys7ZKYlc8G8At60sJCLFhUOOeW6u1PV0MGGqkbW72tk/f5G9tS2saw0lzcvGvq1zUe7eeyVI/xx0yGqGjrITk8he0oK2ekpnDUrl2vOKaU4Z+JNkPji7jp+t/4gn7h0oQrCGFJPQRJObWsnFXsbaGjvoqG9i6b2bjq6e8lISyYzNYXMtGSe2VnL6ldrmDF1Cp+8bBHXl88ackqT7t4ID208yO2r97D1UDP5makUZqdz7Lc5yYyc2I9w9pRUstKSyUxLISs9mYy0ZA43HeXZnbXsrmkDokViQVE284uymFeYRUNbF1sPtbD1cDMtsSlC0lKSWDZzKmWFWazf39j32rzMVEpyMyjKSac4Jz0679SOWrp6I8zMncLSmbm0d0ULU1NHN5V17SQnGasWF3F9+WwuXVI86qlbxkN3b4SrfvAMbZ29PPaZSzRmNALafSQySi/uruObf9rGun2NpKckcdr0nNjEgDl09kSorGtjb107O6tbqW/rYlFxNh++eD5XL595SrNvHmjs4LmdtbxysJndtW3sqm7lQGMHWWnJLCmZyukl0UkJz5qVy5IZU4/bvXWwsYNnd9Ty8v5GalqOUtPSSU1LJ8nJxluXzuCqs0r6Jk7sb1dNK/dWVHH/uiqqWzqZXZDBx1Yu5LpzZwW++2w0frp6N1/941Z++tflXL50ethxJgQVBZEx4O48/WoNz+6oZfuRFrYeaqG2NXoRpaKcdMqmZTJ3WhZXnjmDlacVj/m8TUe7e0lLTgp8Pqie3ghPbKvmR0/uZENVE6V5GfzdJfNZPjuPgqw0CrLSyEiNj6lWDjcd5bLvPMUF86fx8xvL4yLTRBAXRcHMrgC+DyQDP3P3rw9YPhe4AygC6oEPuHvVcO+poiBhq2/rIj0liaxhLsw0Ubk7q3fU8v3HXmXdvsbjlmWnp7BqSTHvOHsmF59WGMq1CKoa2vnIr9ax/UgLj336Eh1NdhJCLwpmlgy8ClwOVAFrgBvc/ZV+be4FHnL3X5jZpcCH3P2Dw72vioJI8NydLQebOdR0lIa2Lurbu9hT08ajrxymob2bqVNSeMvS6ZxfVsA5c/JZWJwd+FnkT79awz/c8zK9vc5337Ncu41OUjwcfXQ+sNPdd8cC3QNcDbzSr81S4NOx+08Cvwswj4iMkJmxrDSXZaXHX7Ly//Yu47mdtTy44RBPbDvC/esOANFexIq5+bxl6XQuP336mF7uNRJxfvjETr73+Kssnp7DTz6wQkcbBSjIolAK7O/3uAq4YECbDcA7ie5iuhbIMbNp7l4XYC4ROUWpyUmsXFzMysXFuDuVde2s29fAun0NPLezjn/93Wb+9XebOXt2Hlcum8G1547ucNfu3gif+c0GHtxwkOvOKeWr156pI40CFmRRGKwvOXBf1WeBW83sJmA1cADoGfgiM7sFuAVgzpzEujCGSLwyM8oKsygrzOK6c2fh7uyqaeWRLUd4dMthvvbwNr75yHZWLS7m3eWzWHWSh7se7e7l479ex2Nbq/ncFUv4+0vma1B5HAQ5pnAh8CV3f1vs8b8AuPvXhmifDWxz91nDva/GFEQmhp3Vrfx2bRX3rauipqWTktwp3PjGMm44bw65mcNfA6Ots4db/rOC53fV8ZWrl/GBN8wdp9STVzwMNKcQHWi+jGgPYA3wPnff0q9NIVDv7hEz+yrQ6+5fGO59VRREJpae3ghPbq/hzuf28PyuOjLTkrl+xSwuPq2IeYVZzC7IJDU5ia7YuR87qlv56TO72VjVxLevP4trzxn270QZodAHmt29x8w+DjxC9JDUO9x9i5l9Gahw9z8AK4GvmZkT3X30saDyiEg4UpKTuHzpdC5fOp0tB5v4+bN7+PVL+/jFC5UAJCcZxTnp1LR00hOJ/pE6JTWJH73vXK5YNiPM6AlJJ6+JyLhr6uhmZ3Ure2vb2FPbxoHGDkpyp7BoejaLinNYUJStAeUxFnpPQURkKLkZqayYm8+KuflhR5EB4ndyExERGXcqCiIi0kdFQURE+qgoiIhIHxUFERHpo6IgIiJ9VBRERKSPioKIiPSZcGc0m1kTsGOQRblA0zDPDVx+7PFgbQqB2lOMOFiOkSw/Uf6Bjwe7r/zxkR9OfRtOlH+4NsPlHfh4Mubvfz8e8g+Xs/+FCVNtAAAH0klEQVTj8foNmuvuRSd8tbtPqBtw+0if7//cwOXHHg/WhujcTGOab7T5h9uegdui/OHmH802nCj/yWxDouUfi39DY5l/uJzDfO6BfwdOdJuIu48ePInnHxxm+YMjaHMqTvQep5p/4OPB7iv/5M8/XJvh8g58PBnzj3T9wxnL/AOfi5ffoGFNuN1H48HMKnwEE0fFK+UP30TfBuUPV5j5J2JPYTzcHnaAUVL+8E30bVD+cIWWXz0FERHpo56CiIj0mfRFwczuMLNqM9t8Cq9dYWabzGynmf3A+l013Mw+YWbbzWyLmX1zbFMfl2HM85vZl8zsgJmtj92uHPvkfRkC+fxjyz9rZh67rGsgAvr8v2JmG2Of/aNmNnPsk/dlCCL/t8xsW2wbHjCzvLFPflyOILbh+th3N2JmY77vfjSZh3i/G81sR+x2Y7/nh/2OnJLRHLo0EW7AxcC5wOZTeO1LwIWAAQ8Db489vwp4DEiPPS6eYPm/BHx2on7+sWWziV7qtRIonEj5gan92nwSuG2C5X8rkBK7/w3gGxPt3xBwOrAYeAooj5fMsTxlA54rAHbH/psfu58/3PaN5jbpewruvhqo7/+cmS0wsz+Z2Voze8bMlgx8nZmVEP3yvuDRT/+XwDWxxR8Bvu7unbF1VE+w/OMmwPz/AfwTEOigWBD53b25X9MsAtyGgPI/6u49saYvArOCyh/gNmx19+3xlnkIbwP+7O717t4A/Bm4Iqjv+KQvCkO4HfiEu68APgv8eJA2pUBVv8dVsecATgPebGZ/MbOnzey8QNO+3mjzA3w81v2/w8zG+5qIo8pvZu8ADrj7hqCDDmHUn7+ZfdXM9gPvB74QYNbBjMW/n2NuJvoX6ngby20YLyPJPJhSYH+/x8e2I5DtS7hrNJtZNvBG4N5+u9/SB2s6yHPH/qJLIdqNewNwHvAbM5sfq9aBGqP8PwG+Env8FeA7RL/cgRttfjPLBD5PdBfGuBujzx93/zzweTP7F+DjwBfHOOqgxip/7L0+D/QA/zWWGU9kLLdhvAyX2cw+BPxD7LmFwB/NrAvY4+7XMvR2BLJ9CVcUiPaOGt19ef8nzSwZWBt7+AeiP5z9u8WzgIOx+1XA/bEi8JKZRYjOVVITZPCYUed39yP9XvdT4KEgAw8w2vwLgHnAhtiXaxawzszOd/fDAWeHsfn309+vgf9hnIoCY5Q/Ntj5V8Bl4/HH0ABj/f9gPAyaGcDd7wTuBDCzp4Cb3H1vvyZVwMp+j2cRHXuoIojtG+sBlni8AWX0G/ABngeuj9034OwhXreGaG/g2CDOlbHn/x74cuz+aUS7djaB8pf0a/Np4J6J9PkPaLOXAAeaA/r8F/Vr8wngtxMs/xXAK0BRkLnH498QAQ00n2pmhh5o3kN070R+7H7BSLbvlHKP1//UsG7A3cAhoJtoZf0bon9p/gnYEPvH/YUhXlsObAZ2Abfy2sl+acCvYsvWAZdOsPz/CWwCNhL9i6pkIuUf0GYvwR59FMTnf1/s+Y1E56kpnWD5dxL9Q2h97BbY0VMBbsO1sffqBI4Aj8RDZgYpCrHnb4597juBD53Md+RkbzqjWURE+iTq0UciIjIIFQUREemjoiAiIn1UFEREpI+KgoiI9FFRkEnBzFrHeX0/M7OlY/RevRadMXWzmT14ollHzSzPzD46FusWGUiHpMqkYGat7p49hu+X4q9N+hao/tnN7BfAq+7+1WHalwEPufuy8cgniUU9BZm0zKzIzO4zszWx25tiz59vZs+b2cux/y6OPX+Tmd1rZg8Cj5rZSjN7ysx+a9HrB/zXsfnqY8+Xx+63xia422BmL5rZ9NjzC2KP15jZl0fYm3mB1yb+yzazx81snUXnzL861ubrwIJY7+Jbsbb/GFvPRjP7tzH8GCXBqCjIZPZ94D/c/TzgncDPYs9vAy5293OIzlD67/1ecyFwo7tfGnt8DvApYCkwH3jTIOvJAl5097OB1cCH+63/+7H1n3BOmtjcPZcRPcsc4ChwrbufS/QaHt+JFaV/Bna5+3J3/0czeyuwCDgfWA6sMLOLT7Q+kcEk4oR4kjjeAiztNyvlVDPLAXKBX5jZIqKzSqb2e82f3b3/PPgvuXsVgJmtJzqfzbMD1tPFa5MKrgUuj92/kNfmt/818O0hcmb0e++1ROfLh+h8Nv8e+4GPEO1BTB/k9W+N3V6OPc4mWiRWD7E+kSGpKMhklgRc6O4d/Z80sx8CT7r7tbH980/1W9w24D06+93vZfDvTLe/Njg3VJvhdLj7cjPLJVpcPgb8gOi1FoqAFe7ebWZ7gSmDvN6Ar7n7/zvJ9Yq8jnYfyWT2KNFrFQBgZsemLc4FDsTu3xTg+l8kutsK4L0nauzuTUQvz/lZM0slmrM6VhBWAXNjTVuAnH4vfQS4OTZnP2ZWambFY7QNkmBUFGSyyDSzqn63zxD9gS2PDb6+QnTKc4BvAl8zs+eA5AAzfQr4jJm9BJQATSd6gbu/THQWzfcSvXhNuZlVEO01bIu1qQOeix3C+i13f5To7qkXzGwT8FuOLxoiI6ZDUkUCErtKXIe7u5m9F7jB3a8+0etEwqQxBZHgrABujR0x1Mg4XfJUZDTUUxARkT4aUxARkT4qCiIi0kdFQURE+qgoiIhIHxUFERHpo6IgIiJ9/j+RAs6Sj5jeEQAAAABJRU5ErkJggg==\n",
382 | "text/plain": [
383 | ""
384 | ]
385 | },
386 | "metadata": {},
387 | "output_type": "display_data"
388 | }
389 | ],
390 | "source": [
391 | "learn.recorder.plot()"
392 | ]
393 | },
394 | {
395 | "cell_type": "code",
396 | "execution_count": 23,
397 | "metadata": {},
398 | "outputs": [
399 | {
400 | "data": {
401 | "text/html": [
402 | "Total time: 05:11 \n",
403 | " \n",
404 | " | epoch | \n",
405 | " train_loss | \n",
406 | " valid_loss | \n",
407 | " | \n",
408 | "
\n",
409 | " \n",
410 | " | 1 | \n",
411 | " 0.525171 | \n",
412 | " 0.736362 | \n",
413 | " 0.532685 | \n",
414 | "
\n",
415 | " \n",
416 | " | 2 | \n",
417 | " 0.409036 | \n",
418 | " 0.425513 | \n",
419 | " 0.759533 | \n",
420 | "
\n",
421 | " \n",
422 | " | 3 | \n",
423 | " 0.369458 | \n",
424 | " 0.314608 | \n",
425 | " 0.868093 | \n",
426 | "
\n",
427 | " \n",
428 | " | 4 | \n",
429 | " 0.328593 | \n",
430 | " 0.296097 | \n",
431 | " 0.857588 | \n",
432 | "
\n",
433 | "
\n"
434 | ],
435 | "text/plain": [
436 | ""
437 | ]
438 | },
439 | "metadata": {},
440 | "output_type": "display_data"
441 | }
442 | ],
443 | "source": [
444 | "learn.fit_one_cycle(4, 1e-2)"
445 | ]
446 | },
447 | {
448 | "cell_type": "code",
449 | "execution_count": 24,
450 | "metadata": {},
451 | "outputs": [],
452 | "source": [
453 | "learn.save(f'{name}-stage-1')"
454 | ]
455 | },
456 | {
457 | "cell_type": "code",
458 | "execution_count": 25,
459 | "metadata": {},
460 | "outputs": [],
461 | "source": [
462 | "learn.unfreeze()"
463 | ]
464 | },
465 | {
466 | "cell_type": "code",
467 | "execution_count": 26,
468 | "metadata": {},
469 | "outputs": [],
470 | "source": [
471 | "max_lr = 5e-4\n",
472 | "lrs = [max_lr/100, max_lr/10, max_lr]"
473 | ]
474 | },
475 | {
476 | "cell_type": "code",
477 | "execution_count": 27,
478 | "metadata": {},
479 | "outputs": [
480 | {
481 | "data": {
482 | "text/html": [
483 | "Total time: 15:46 \n",
484 | " \n",
485 | " | epoch | \n",
486 | " train_loss | \n",
487 | " valid_loss | \n",
488 | " | \n",
489 | "
\n",
490 | " \n",
491 | " | 1 | \n",
492 | " 0.299970 | \n",
493 | " 0.285136 | \n",
494 | " 0.863424 | \n",
495 | "
\n",
496 | " \n",
497 | " | 2 | \n",
498 | " 0.286753 | \n",
499 | " 0.260144 | \n",
500 | " 0.887743 | \n",
501 | "
\n",
502 | " \n",
503 | " | 3 | \n",
504 | " 0.277695 | \n",
505 | " 0.269493 | \n",
506 | " 0.872763 | \n",
507 | "
\n",
508 | " \n",
509 | " | 4 | \n",
510 | " 0.259490 | \n",
511 | " 0.234493 | \n",
512 | " 0.895720 | \n",
513 | "
\n",
514 | " \n",
515 | " | 5 | \n",
516 | " 0.229194 | \n",
517 | " 0.224973 | \n",
518 | " 0.912257 | \n",
519 | "
\n",
520 | " \n",
521 | " | 6 | \n",
522 | " 0.217003 | \n",
523 | " 0.232760 | \n",
524 | " 0.897082 | \n",
525 | "
\n",
526 | " \n",
527 | " | 7 | \n",
528 | " 0.202161 | \n",
529 | " 0.215272 | \n",
530 | " 0.907977 | \n",
531 | "
\n",
532 | " \n",
533 | " | 8 | \n",
534 | " 0.203944 | \n",
535 | " 0.228468 | \n",
536 | " 0.894163 | \n",
537 | "
\n",
538 | " \n",
539 | " | 9 | \n",
540 | " 0.201418 | \n",
541 | " 0.222140 | \n",
542 | " 0.896498 | \n",
543 | "
\n",
544 | " \n",
545 | " | 10 | \n",
546 | " 0.198599 | \n",
547 | " 0.217933 | \n",
548 | " 0.899416 | \n",
549 | "
\n",
550 | "
\n"
551 | ],
552 | "text/plain": [
553 | ""
554 | ]
555 | },
556 | "metadata": {},
557 | "output_type": "display_data"
558 | }
559 | ],
560 | "source": [
561 | "learn.fit_one_cycle(10, lrs)"
562 | ]
563 | },
564 | {
565 | "cell_type": "code",
566 | "execution_count": 28,
567 | "metadata": {},
568 | "outputs": [],
569 | "source": [
570 | "learn.save(f'{name}-stage-2')"
571 | ]
572 | },
573 | {
574 | "cell_type": "code",
575 | "execution_count": 29,
576 | "metadata": {},
577 | "outputs": [
578 | {
579 | "data": {
580 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYsAAAEKCAYAAADjDHn2AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4wLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvqOYd8AAAIABJREFUeJzsnXd4VcXWh99J75BAqAFClRoghCDSiwiiYEEpchUb115QP7EjNsSGXlGuegE7ioqigCAKUpQSeu8BAkgJNaQn8/2xT83ZJ/2ksd7nOc/de/bM3nNyZa8za9b6LaW1RhAEQRDyw6u8JyAIgiBUfMRYCIIgCAUixkIQBEEoEDEWgiAIQoGIsRAEQRAKRIyFIAiCUCBiLARBEIQCEWMhCIIgFIgYC0EQBKFAfMp7AqVFzZo1dXR0dHlPQxAEoVKxbt26U1rryIL6VRljER0dTUJCQnlPQxAEoVKhlDpYmH7ihhIEQRAKRIyFIAiCUCBiLARBEIQCqTJ7FoIgVB2ysrJISkoiPT29vKdSZQgICCAqKgpfX99ijRdjIQhChSMpKYnQ0FCio6NRSpX3dCo9WmuSk5NJSkqicePGxbqHuKEEQahwpKenU6NGDTEUpYRSiho1apRopSbGQhCECokYitKlpH/PS95YXMzI5u1Fu9h4+Gx5T0UQBKHCcskbi7SsHN77Yy+bk8RYCIJgkJycTIcOHejQoQN16tShfv36tvPMzMxC3eP2229n165dHp5p2XHJb3BbF2Zal+s0BEGoQNSoUYONGzcCMGHCBEJCQnj88ced+mit0Vrj5WX+m3vGjBken2dZcsmvLKx+PC3WQhCEAti7dy9t27blnnvuITY2lmPHjjF27Fji4uJo06YNEydOtPXt3r07GzduJDs7m+rVqzN+/Hjat29P165dOXHiRDl+i+IhK4vynoAgCPny4s/b2H70fKnes3W9MF64tk2xxm7fvp0ZM2Ywbdo0ACZNmkRERATZ2dn06dOHYcOG0bp1a6cx586do1evXkyaNIlx48Yxffp0xo8fX+LvUZZc8isLK7KuEAShMDRt2pTOnTvbzr/++mtiY2OJjY1lx44dbN++3WVMYGAggwYNAqBTp04kJiaW1XRLDVlZWJYW4oUShIpJcVcAniI4ONh2vGfPHt59913WrFlD9erVGT16tGkug5+fn+3Y29ub7OzsMplraXLJryyUxRE1eeHOUrnfr1uPkXQmtVTuJQhCxeb8+fOEhoYSFhbGsWPHWLhwYXlPyWNc8isL66ZFelZuqdzuni/WA9C2fhi/PNijVO4pCELFJDY2ltatW9O2bVuaNGlCt27dyntKHuOSNxalmSSak2v3ZW09cp6cXI23l2yhC0JlZsKECbbjZs2a2UJqwYim/Pzzz03HrVixwnZ89qw9j2vEiBGMGDGi9CfqYcQN5XDs+LIvDlk5zquTFXtPleh+giAIFQUxFg5LizEz1pToXt+sPex0LrkbgiBUFTxqLJRSA5VSu5RSe5VSLkHFSql7lFJblFIblVIrlFKtLe1XKqXWWa6tU0r19eQ8rSzfU/yVwF97T/HC3G1ObRfSK1/EgyAIghkeMxZKKW9gKjAIaA2MtBoDB77SWrfTWncAJgNvW9pPAddqrdsBtwHmTsHSmGcJxq7an0z0+Hms2HOKUZ+sdrn+4NcbSEg8XYInCIIgVAw8ubKIB/ZqrfdrrTOBWcBQxw5aa8e0zGAsuXFa6w1a66OW9m1AgFLK3xOTLMkG94iPVgEw+n+uhsLKsGl/iztKEIRKjyeNRX3A0YmfZGlzQil1v1JqH8bK4iGT+9wIbNBaZ3hikqoEa4sbOrp8HQBqhjjbteSLmew7mcKZi4VTqxQEQahoeNJYmL2FXX5ia62naq2bAk8CzzrdQKk2wOvAv00foNRYpVSCUirh5MmTxZtkCVYWgX7epu2f3tHZ6Tzu5cX0e+tPBkxZxs+bjspKQxAqOL1793ZJsJsyZQr33Xef2zEhISEAHD16lGHDhrm9b0JCQr7PnjJlCqmp9sTeq6++2in0trzwpLFIAho4nEcBR930BcNNdZ31RCkVBcwBbtVa7zMboLX+SGsdp7WOi4yMLIUpF431h8z/D2xSM4T7+zR1aT95IYMHv97AD+uPeHpqgiCUgJEjRzJr1iyntlmzZjFy5MgCx9arV4/vvvuu2M/Oayzmz59P9erVi32/0sKTxmIt0Fwp1Vgp5QeMAOY6dlBKNXc4HQzssbRXB+YBT2mtV3pwji4ri4k/byc9K6dQY3ccc1XCTJw0mEA/bx7q15yxPZuYjjt0WuRABKEiM2zYMH755RcyMgzvd2JiIkePHqVDhw7069eP2NhY2rVrx08//eQyNjExkbZt2wKQlpbGiBEjiImJYfjw4aSlpdn63XvvvTZp8xdeeAGA9957j6NHj9KnTx/69OkDQHR0NKdOGZGab7/9Nm3btqVt27ZMmTLF9rxWrVpx991306ZNGwYMGOD0nNLCYxncWutspdQDwELAG5iutd6mlJoIJGit5wIPKKX6A1nAGYzIJ4AHgGbAc0qp5yxtA7TWHheBn77yAHWq+TO2p+vKoCC6NathO/b38eaamLp8tGy/S793f9/Do1e2KNE8BeGSYcF4+GdL6d6zTjsYNMnt5Ro1ahAfH8+vv/7K0KFDmTVrFsOHDycwMJA5c+YQFhbGqVOnuPzyyxkyZIjb+tYffvghQUFBbN68mc2bNxMbG2u79sorrxAREUFOTg79+vVj8+bNPPTQQ7z99tssWbKEmjVrOt1r3bp1zJgxg9WrV6O1pkuXLvTq1Yvw8HD27NnD119/zccff8zNN9/M999/z+jRo0vnb2XBo3kWWuv5WusWWuumWutXLG3PWwwFWuuHtdZttNYdtNZ9tNbbLO0va62DLe3Wj0cMhdkGd0YhdaKa1QpxOp95e7zTuZ/PJZ/zKAiVFkdXlNUFpbXm6aefJiYmhv79+3PkyBGOHz/u9h7Lli2zvbRjYmKIiYmxXfv222+JjY2lY8eObNu2zVTa3JEVK1Zw/fXXExwcTEhICDfccAPLly8HoHHjxnTo0AHwnAS6aEOZ/CDIKqTsR7aDvMdH/+qEr7ezcWhZJ6xEcxMEgXxXAJ7kuuuuY9y4caxfv560tDRiY2OZOXMmJ0+eZN26dfj6+hIdHW0qSe6I2arjwIEDvPnmm6xdu5bw8HDGjBlT4H3yC4zx97dHYHp7e3vEDXXJ//Q1Wzzm1Xhyh6NSbUSwn2mfcW7cTSJjLggVm5CQEHr37s0dd9xh29g+d+4ctWrVwtfXlyVLlnDw4MF879GzZ0++/PJLALZu3crmzZsBQ9o8ODiYatWqcfz4cRYsWGAbExoayoULF0zv9eOPP5KamsrFixeZM2cOPXqUnbK1GAsTq5+Wad/gfmL2Jv7z+x4XA7I56Sz/nLf/EqgeZG4sujatYdq+ogTSIoIglA0jR45k06ZNNpXYW265hYSEBOLi4vjyyy9p2bJlvuPvvfdeUlJSiImJYfLkycTHG67q9u3b07FjR9q0acMdd9zhJG0+duxYBg0aZNvgthIbG8uYMWOIj4+nS5cu3HXXXXTs2LGUv7F7VFWJ+Y+Li9MFxS+bkZurafL0fKe2EZ0bMOlGw7cYPX6erX3p472Jrhns1N6yTiiTboyhQwP3oW0bDp2hVlgA3Sb9YWtrH1WNnx7oXuT5CsKlwI4dO2jVqlV5T6PKYfZ3VUqt01rHFTRWVhYmfqhj58x9h2ahslqTr6EA6NgwnPrVA53a4htHFH6SgiAI5cwlbyzM+HO3eTb4ruMX+HmTc17hXT0aF+sZHy8/wK9bjxVrrCAIQllzyRsLd/HRZkxZvIcHv94AQKu6YVzZujY3xTUoYJSdxEmDeaBPM9v5vV+uL/xEBeESo6q4yCsKJf17XvLGIj/c/XHjXv6N82lZBPiaa0Plx2MD7NFR8m9BEMwJCAggOTlZDEYpobUmOTmZgICAYt/jks+zcMfPm47SrVlN02unUgz12KZ5kvIKg+NKxl+S9gTBlKioKJKSkiiuQKjgSkBAAFFRUcUeL8bCgYFt6vDrtn8Ao3DRU4PyD4vbeuRciZ7nTrVWEC51fH19ady4ePuBgmeQn7YOvHJ9W6fz1xbszLf/jbHm9SwKy9nUrBKNFwRBKCvEWDjg41W0P8eTA/NfeQiCIFQVxFg44OOtuKu769L3wb7NTHqDj3fJ/3yF2cDbcew8P25wroGRlplD9Ph53P1Z0RMRBUEQiooYCwd8vJUtQ9uR266IdmmbNjrWpa04pFlqZ6Rn5TBpwU5SM7NJy8zh8dmb2HrkHKv2JzPo3eU88s1Gp3G/7TCULn/bfpy/9op0iCAInkU2uB3w9fLC28s17yLA15s7uzfmfysO2NoGtq1bKs9MSc8myM+H//yxh2l/7mP1gWTaR1Xnu3VJfLcuyalvZnauTfbccUUy6pPVJE4aXCrzEQRBMENWFg54mRgKAG+lePrqVsy4vbPp9aLy8wPdubVrIwBSMrIBmLrEqBy74dBZZv6VaDruXJp9Q7ywNTcEQRBKAzEWebiuQ3388uxFeHspvL0UvVtE8nC/5qx5pl+JntEuqho9mxs1w6/9zwp2/nOe/q1qFzjOsdzr/32/2elaRnbhSsEKgiAUB48aC6XUQKXULqXUXqXUeJPr9yiltiilNiqlViilWjtce8oybpdS6ipPztORQD9vdr080KnN6ppSSvHolS2oFVr8LEgrIQGGB/BiZg4Dpyxn8Q731bas5GcQUtKzSzwnQRAEd3jMWCilvIGpwCCgNTDS0RhY+Epr3U5r3QGYDLxtGdsaGAG0AQYCH1juVybk1Ysy28coKSH+Rd8uSst073pydFEJgiCUNp5cWcQDe7XW+7XWmcAsYKhjB621o+Z3MGDdtR0KzNJaZ2itDwB7Lfcrc5646jKP3Lc4xiLdZGUxoLXhvlq6S2QRBEHwHJ40FvWBww7nSZY2J5RS9yul9mGsLB4qytiy4Ao3le5KitUNVRQcK/j1b1WLID9vxlmECSf+kn+xd0EQhJLgSWNh5rtxyUDTWk/VWjcFngSeLcpYpdRYpVSCUirBU4Jj/j6e8X45riwKu8rYePis7TgzR9O8dijVA83LuZYVogoqCJcGnjQWSYBjsYco4KibvmC4qa4rylit9Uda6zitdVxkZGQJp2uOv69n/kQBvt58eEssbeqF2cJnrTx/Td6tHYO3f9ttO07PyiHAx4vAYsiklyaNn5pP0zxlaQVBqHp40lisBZorpRorpfwwNqznOnZQSjV3OB0M7LEczwVGKKX8lVKNgebAGg/O1S2elBEf1K4uTSNdZc7XJp52Or/boRpfbq7xSz4jK4cAX2+PGbPCkJVjbLjn5MrqQhCqOh5702its4EHgIXADuBbrfU2pdREpdQQS7cHlFLblFIbgXHAbZax24Bvge3Ar8D9WutySSTwlBvKSligqwuqee1QFo/rCcBLQ9vwzGD7SuNiZjanL2ayKekcK/eecirAtPfEhSI9+/DpVKLHz2NLUvGk1tMc8j4kz0MQqjYelfvQWs8H5udpe97h+OF8xr4CvOK52dmZNjqW5rVDTa/5ebhAUV5JD4Cr2tSmWa1QDrx2tUsY7+erDhLXKAKAbMsv+mcHt+LleTvo//ayIsl+LNl1AoBZaw/RLqpdkeee7rDhvi7xDFe4KRYlCELlR7ShMNd5+vupvizddZJqgb4efXb3ZpG2hLwnrrqMHs1r0qZeNcC8PvjkX3cxqktDp7aM7MJJf6Rn5ZCZk0tYgPGdlu02ggKK60ayVgwE2HX8ghgLQajCiNyHG+pWC2RkfMOCO5aQN4bF2I5v6hRFTFR1035NIu1quF+tPuR07WxqZt7uptzwwV/ETFhkO1+8w1hZZBfTWLy1aJftuLw32gVB8CxiLMqZ8GA/WtYxXGBB+YTQXt/BNc3k3REdABjVpVGBz7mYkc32Y/YcyBV77LLmZq6wwhDgUBY2Sza5BaFKI8aiAvDJbXG8O6JDvvkWw+MbuLSFWhL7GtcMZniccT3X5KV9IT2LNi8stJ3n5GoOnU4t6bSZt/mY7fi5H7dyKiWjxPcUBKFiIsaiAhAVHsRQk5WDI2bihd4OZWAbW9xUZpIgJy84v8RT0rNpGBFU6PldSM8qVPLdqI9XFfqegiBULsRYVCLyypj7OAgcZlk2uR/6egMbDp1x6nfg1EWn8y9WHyQzx25UHPdD8rLvZArtJizi0zw1NvIaIIDdx1Py/wKCIFRaxFhUIl4c2sbp3FEN9w9LGOziHSe4/oO/+GPncVv9izs/da7T/cbCXZy5aFepzcwnmuq1+TsA+NnB5QQwY+UBs+6CIFRRxFhUIupXD3Q6d1xZXBtTz+naHTMTeCkfccHHZm8CYHC7ui5yI47ENzZyOtrVr+bUfuRsWuEmLQhClUCMRSVj6eO9bceOCYNjroh26fvl6kNEj5+X7/3Cg305m+p+T+LV+TsBWH/oDNHj5xE9fh5pmTnENQoHYOX4vux79WoA4qMjivJVBEGoRIixqGRE1wwmuoaxOR3sED3lrn54QXyxysjZ+Gbt4Xz7bXaQBDmdmslzP20DoEawH95einb1qxHsL7kWglBVEWNRCYkM9QdwqRVeEsb/sKXQfZfvtsvBW7WpAn29nbSiBEGoWoixqIRMHRXLS9e1pUERwl+/GXs5U0fFurRf39EI2fX2UmRk53Axn/0LK+kmRiHAz9upOJMgCFULMRblSU42pJ0puF8eaoUF8K/LXbO2h3aoZ9LbIMDXm8ExrhpYEy0RVqPiGzLo3eVOyXsAwX6urqVUE2Oxan8ym5LO8di3mwqcvyAIlQ8xFuXJT/fDp9dC2tmC+xaCl65ry4Rr7XLmjpvh1lXItNHOq4vQAF/Cgwxhwf0nnfMxHMc5YpUHcTRO1vDb79cnmWaRC4JQuRFjUZ7E3AQndsKXN0FGyRPawgJ8GdPNXijJugEe4u9DRLBRftVMYTckwMdt+GxmjmsOhtWo3Nm9scs1gHd/32PaLghC5UWMRXnSrD8Mmw5HEuCbWyArvVRvH+Lvw7In+vDrIz2c2h/t38Lp/PDpNOZsOGI7dwyjdUzYa1MvzGmcY+ElRzX1d3/fk2/uhiAIlQ8xFuVN6yEw9APYvxS+uwNysgocUlgCfL1oWCOIqHBnV9Id3aPzHZeYbBcZdKyV8d7IjjSvZS8D6yhLvuBhZ4P0wZK9xZmyIAgVFI8aC6XUQKXULqXUXqXUeJPr45RS25VSm5VSvyulGjlcm2wpubpDKfWeMqsEVFXoMBKufhN2zYMf74PcwhUzKgh3f7LQAF/WPduf7ROvMr2e7eB6SknPxtfbuE+dsACn1YRj/e+WdcJ4qJ+9pPoHS/eVaO6CIFQsPGYslFLewFRgENAaGKmUap2n2wYgTmsdA3wHTLaMvQLoBsQAbYHOQC9PzbVCEH839HsBtnwL8x+DQqi8loQaIf4E+ZlLon/290EAUjOzScvK4bEBl5E4aTDB/j5OVfUC8hQ8smZ1uyM3V/PduiSyTPZBBEGo2HhyZREP7NVa79daZwKzgKGOHbTWS7TWVp/HKiDKegkIAPwAf8AXOO7BuVYMeoyD7uMgYTr89nyxDcbQDvVcdKSKgvVlvuPYBcDI0rbiWEApwMfZWPRsEel0Hj1+HnuOX7Cd/7bjOI/P3sR7sgEuCJUOT9bgrg84akgkAV3y6X8nsABAa/23UmoJcAxQwPta6x15ByilxgJjARo29HwJ1DKh3/OQcQH+eg8CwqDnE0W+xbsjOhapv5+3l1PUU3zjCM6mZnLjh38B7mt8W91T+fHq/B3MuD0eMFxaAElnRIRQECobnlxZmL1JTH8qK6VGA3HAG5bzZkArjJVGfaCvUqqny820/khrHae1jouMjMx7uXKiFAyaDDEj4I+XYdU0jz/yx/u7OZ2nZ+Uy4iN7IaP64earlMJsI6U6ZHX7WIxLcWt+C4JQfnjSWCQBjrVAo4CjeTsppfoDzwBDtNbWijrXA6u01ila6xSMFcflHpxrxcLLC4ZOhZbXwK9PwoYvPPq41nlCYp+es4Wd/9jdR71bFN4Q77co0FpZfeC07XjRdsOTeORMyUu6CoJQtnjSWKwFmiulGiul/IARwFzHDkqpjsB/MQzFCYdLh4BeSikfpZQvxua2ixuqSuPtY+RgNO0Lcx+EbXPKbSqOK4gWtY3Q2XrVXMu8Qv7qt9aa3esPlU7GuiAIZYfHjIXWOht4AFiI8aL/Vmu9TSk1USk1xNLtDSAEmK2U2qiUshqT74B9wBZgE7BJa/2zp+ZaYfHxh+FfQIMu8P3dsHuRxx5lVg8DoJlDXgXA9DGd+XevJqx4sq/be704xLmiX5KsJASh0uPJDW601vOB+Xnannc47u9mXA7wb0/OrdLgFwyjvjE0pL79F4z+HqK7l9njFz3ivFUUFR7EU4Na5TvmYqZz9nb315dw4LWr3fQWBKEyIBnclYGAajB6DoRHw1fDIWldqT/ihtj61AlzdS0Vp6jSqv2nXdpGfrzK6TxHNrkFoVIhxqKyEFwD/vUjBNeEL26A49tK9fYxUdVZ9XQ/Zt/T1db2y4PFW8GYCQyu2n/aST+q6dPzuZBeetImgiB4FuWu9nJlIy4uTickJJT3NDzPmUSYPhByc+COX6FG0/KekSk5uZpnf9zK12uMsq1Xtq7Nb9uP06ZeGNuOGol9oQE+DO1Qj5eva1eeUxWESxql1DqtdVxB/WRlUdkIj4ZbfwKdA58NhXNJ5T0jU7y9FBOG2NVdfrOEzTZwEDW8kJ5tqwEuCELFRoxFZSTyMvjXHEg/bxiMlBMFjykH/H1cq+y5S/ATBKFiI8aislK3PdzyLZw/Cp9fX6zyrGXBiif7OJ2bbaILglDxEWNRmWl4OYz4Ek7thi+GGZpSFYy8tTR8TPSkRIVWECo+YiwqO037wrAZcHQDfD2y1KvtlQbD4xrgRS69vDZxxfrH+az5Mqfrn/6VWD4TEwSh0IixqAq0ugau+xASl8PsMaVaba/EnDvCqPRZLPN/hE/9XqfJ6WX0PDyNK7y22rq8PK/4Si7/nEvn8GnJEBcETyPGoqrQfjgMfgt2L4A59xihteVFTjbsnG8kEE5pS/u9U9mfW5f7Mh/i1TZzoUZzpoVOZ0Azu4vqzMXMIj9Ga83lr/1Oj8lLSnP2giCY4FG5D6GM6XwXZKTA4hcMmZBr34WyrEZ75iBs+NxQyb1wDELqQPdxLA8dyK0/GBFbI1QwXPchYdMH8HrILBZhyISdTs0k3KHIUl601vy5+yS9WkTahA2ldKsglB2ysqhqdH8EejwO6z+FRc96vDwrOVmw/Sf4/AZ4tz0sfwvqxMCIr+DRbdDvObwj7Bnd2bkaGnSGKx4ifOcsenttBMDLYgDeWLiTtYmuciHfJhxmzIy1/G/FAVvbrLXuczTOpWZx5KxRZCk9K4f0LPOVVnpWDsM+/IstSeeK/t0F4RJCjEVVpO+zEP9v+Pt9+HOyZ56RvA9+ewHebg3f3gond0Hv8fDIFiOkt+VgQ2YdCPa3L2BrhFhWD32ehshWTPL9mDBSyMzOZeuRc0xdso+bpv3t9KifNx3lye+3AM77G75e9v98z6U579O0n7iIbpP+YOPhs7R87lfaTVhITq7mj53HcVQt2Jx0joSDZ5jwc+nKpwhCVUPcUFURpWDgJMhMgaWvgn8IdL2/5PfNzoAdP8O6mcZmuvKGywZB7G3QrB94uSbhAQT729sf7d/COPDxh+s/pOZ/+/KC7+d8vaYtM02ioo6dS2Pqkr228yHt69mOrSsHgMe+3cgnt3V2GX/d1JUAZOVopv25jzcW7qJ9g+r8ZKkOuO9kCiAy6oJQEGIsqipeXnDte4bBWPg0+IVAp9uKd6+Tu2Ddp7Dpa0g7DdUbQd/noMMtEFa3wOGOK4sAXweDUq8jM71v5C41mw3bfwHa2i7l5GpSM7Pp+tofTvey5mTMTjjsVBt893HjpZ+WmcOCrcdM5/HGwl0AbDpsL7701A/GiuX4+QzTMYIgGIixqMp4+8ANn0DmRfj5YWOF0fbGwo3NSoNtPxp7H4f+Bi9fw7XU6TZo3NswRoXE0Vjkxaf3/7Ft8RqeyPqQX5jEWUIB2Hb0HL9u/celf5pl7+GJ7zY7tR86nUpOrqbV878Wel5VRURTEMoCMRZVHR8/uPlz+HIY/DAWfIPhsoHu+/+z1TAQm76BjHMQ0RSunAjtR0FI4WtxOxLka+6eArg2thG3LLiXX7ye5SXfGTyY9RAAQ95fadq/fnX32lJnUwsXftukZjAA//689OuCCEJVxaMb3EqpgUqpXUqpvUqp8SbXxymltiulNiulfldKNXK41lAptUgptcPSJ9qTc63S+AXByFlQp52xGX3AOYOajBRY/xl83BemdTNcTi0GwG2/wIProNvDxTYUAD7e7v8z8/f1ZqduyDtZN3Ct9yqu9lrlti8YbqaVe0+ZXruQnm3a7o5FFiVcgGLUeBKES4pCGQulVFOllL/luLdS6iGlVPUCxngDU4FBQGtgpFKqdZ5uG4A4rXUMRt1tx9Cdz4A3tNatgHigYkqrVhYCwmD0DxDRBL4aAYfXGhIhPz8Cb7WEuQ8a7qqrXoPHdsKNn0DjHqWWpxEe5Ms9vVxrb4RYXFTTcq5lU24TXvKdQQ3ch7FuPHyWWz5ZbTtvWz/Mdtz7zaWFmsv+Uxc5ecF5jyJXwwdL97oZIQhCoYofKaU2AnFANLAQmAtcprV2W1hZKdUVmKC1vspy/hSA1vo1N/07Au9rrbtZjMpHWutCl2q7ZIoflZQL/xjFk84eMmpi+ARCm+uh0xhoEF+2SXwWosfPA6CZSmKe3zMsye3APVmPAAXP5a2b2vPY7E2lNpfESYOZt/kYGs01MfUKHiAIlZzSLn6Uq7XOBq4HpmitHwUKCoOpDxx2OE+ytLnjTmCB5bgFcFYp9YNSaoNS6g3LSsUJpdRYpVSCUirh5MmThfwqlzihdYziSa2uhavfNFYR138IDbuUi6FwZK+O4q3sYQz0XstQL/M9i7wMaFPbtP22ro1M2wvD/V/hDkaMAAAgAElEQVSt54GvNhR7vCBURQprLLKUUiOB24BfLG2+BYwxe/OYLmOUUqMxVi5vWJp8gB7A40BnoAkwxuVmWn+ktY7TWsdFRhbfp37JEd4Ibv4U4u+GwHy9iWXCXQ41u9fWHUVSSDte9P2UWjjX6HjtBufyqyvH9yU0wJfIUH+n9rduas+EIW1Mn7VyfN9Cz8sappuelcO51CyycnJZd9A1u1wQLgUKayxuB7oCr2itDyilGgNfFDAmCWjgcB4FHM3bSSnVH3gGGKK1znAYu0Frvd+yovkRiC3kXIVKhpfD7nK/1nU5O2AK/mTxmu8nOP6+CHEIwR0e18AWGXVla+fVxQ2x9VFKseDhHjSNDLa1d44Op371QJrXCnHq7+dj/2fwl8Pm+TSL9tTw//5N+4mLeGbOFm788G+nZEBBuFQoVOis1no78BCAUiocCNVaTypg2FqgucWwHAFGAKMcO1j2Kf4LDNRan8gzNlwpFam1Pgn0BWRDoorTv1Vt7uvdjHWHzjA5ezgv+H7Om4228vheY0URGmD/zzXXYa/txSFt+Gq1oRO1feJVNqHBVnXDWPRoLy5mZuPv44WPJTckyGJ0agT78fdT/TiXlkXnVxYDMMph8/zoOaM2yCaLbtS3CUa989SMokVdCUJVoLDRUEuVUmFKqQhgEzBDKfV2fmMsK4IHMDbEdwDfaq23KaUmKqWGWLq9AYQAs5VSG5VScy1jczBcUL8rpbZguLQ+Lsb3EyoBfpbQ2u7NauDlpYhrFM4vAUM4HBbLsJNTqUsy4KyJmOtw7OvtxRd3dqF/q9oE5snp8PZShAX44u/jjbdlBWPN4E6+mImfj5eLG8tKZIi5Cu4mER0ULkEKm5RXTWt9Xil1FzBDa/2CUmpzQYO01vOB+Xnannc47p/P2N+AmELOT6jEjO3VhKycXEbENwRAKcXa5wbA6ebwYTde9/2Ip4JeJMjPbgjSspx/3XdvXpPuzWsWew4Trm3NhJ+3O7V9teYQD/Zr7tL38dmbGNYpqtjPEoTKSGH3LHyUUnWBm7FvcAtCqRAW4MtTV7dy1o0CiGgMAybS03sLS/smEt84wpY8N3+LqxRISTCTJDmVkuk2AVAQLjUKaywmYriT9mmt1yqlmgB7PDctQbAQdyc06Y3v4udQZw+y6ql+AHxwS/HjHW6IdY3gDnGjXzV7XVKxnyMIVYlCGQut9WytdYzW+l7L+X6tdSEV6QShBCgFQ9435M9/vJ9aIX4kThrM1e0KVrt1x5vD2gM4ubUuZpoXR5q32VzB1pHsnNwC+whCZaewG9xRSqk5SqkTSqnjSqnvlVLitBXKhuoN4KpX4eAKWFvyOAcvL8X+V69m24tX2dryyn/kxTESy5GfNh6h2TMLOJh8scTzEoSKTGHdUDMwJD7qYWRh/2xpE4SyoeNoaD7AqM6XXPLa215eyhZiC3B9R7tr6n+3xXFvb2cdqx8txZLy8p3FTbXj2IUSz0kQKjKFNRaRWusZWutsy2cmICnTQtmhlFHMyccPfrwXcs3dRsWldpg9fLZfq9rc09PZWDSpGczicT3p36oW4UF28YLle4wNcKmNIVR1CmssTimlRiulvC2f0WAJfheEsiKsLgx6Aw6vhr+nluqtVR5dLMdSsNbrzWqF0iQyhLSsHJbtPmkTQATnvA9BqIoU1ljcgRE2+w9wDBiGIQEiCGVLzM3Q8hr442Wj3Gspcn+fpkwbbURZ+Xh7mda4CPD1Jj0rl1unr3Fq1+ayZ4JQZShsNNQhrfUQrXWk1rqW1vo64AYPz00QXFEKrnkH/IJhzj2QU3rSG09c1ZKBbe1RVvtfGwxA3WoBtra8GeJWcnI1W4+cI3r8PCYt2FlqcxKEikJJKuWNK7VZCEJRCKkFg9+Co+th5RSPPipx0mD+tuR2AAT6Gv9kQvPkZWRk5zLzr0QApv1Z8g14QaholMRYSCFKofxoe4NRtGnpJKNueBkRaMnNuJBHTDA1I5vle6SmilB1KYmxECetUL5c/ZZRj+PHeyAnq0we6SJJYmHCz9s5ft7I1WjiIIsuCFWFfI2FUuqCUuq8yecCRs6FIJQfwTXgminwzxZY9maZPNLdnoUj+09Kgp5Q9cjXWGitQ7XWYSafUK11YRVrBcFztLoGYobD8jfh6EaPPy7Qz9VY+Pt4EeBbkkW6IFR85L9wofIz6HUIjjSio7Lzl+0oKVZXkyMt64SSniX6UELVRoyFUPkJDDeyu0/uMDa8PYhjpneTyGAe6NOMzBzZvhOqPh41FkqpgUqpXUqpvUqp8SbXxymltiulNiulfldKNcpzPUwpdUQp9b4n5ylUAVoMMPSjVk6BJM9V4O3R3K5y8+KQNjx+1WXsOHbepV9mtqw0hKqFx4yFUsobmAoMAloDI5VSrfN02wDEaa1jgO+AyXmuvwT86ak5ClWMq16F0HqGOyorzeOPC/Iztu3ioyNsbSMt1f5EhVaoanhyZREP7LXUvsgEZgFDHTtorZdorVMtp6sAm+y5UqoTUBtY5ME5ClWJgGow9H1I3mPIgXgIq4yUtR7Gte2NrG8fL8XWI0Z97ie/N686/MP6JL5cfdBjcxMET+FJY1EfOOxwnmRpc8edwAIApZQX8BbwhMdmJ1RNmvYxquv9PRUO/u3RR1mNRYil1kWO1gyOMQzH+kNnTceM+3YTz8zZyh87j3t0boJQ2njSWJhleJvuBFpUbOOANyxN9wHztdaHzfo7jBurlEpQSiWcPCnZs4KFKydC9YaGlHmm59xBVjdUsOV/tYaxPZoA0NQkMc9RxnxBKdcQFwRP40ljkQQ0cDiPAo7m7aSU6g88AwzRWlvjErsCDyilEoE3gVuVUi5hLlrrj7TWcVrruMhIKa8hWPAPges+gDMHYPGEUr9992Y1AXv1vBwHfXIvi1TtPpPEPKt2FEBEsF+pz0sQPIknE+vWAs2VUo2BI8AIYJRjB6VUR+C/wECt9Qlru9b6Foc+YzA2wV2iqQTBLdHdocu9sPpDQ9K8Sa9Su/VbN7dn74kUm/RHppsa3Nk5ufh423+PvbVot+04ItiPv/cl07VpjVKblyB4Eo+tLLTW2cADwEJgB/Ct1nqbUmqiUmqIpdsbQAgwWym1USk111PzES5B+j0PEU3hpwcgo/TKntYKDeCKpjVt592a1TTtdzHDuZpfioP44GsLdjLy41WsO3i61OYlCJ7Eo3kWWuv5WusWWuumWutXLG3Pa63nWo77a61ra607WD5DTO4xU2v9gCfnKVRR/ILgug/hfBIsetZjj6kZ4m/avsUSGQXuy65+seqQR+YkCKWNZHALVZuGXaDrA7BuJuxd7LHHfPvvrvzxmLOra/T/VtuOs93UXZ2z4YjH5iQIpYkYC6Hq0+cZiGxpuKO2/egROfP4xhE0iQwBIDzI1+X66v3ibhIqN2IshKqPbwDc8BEob5h9G7zTBn6fCGc8kxw38/Z4l7ZlDoWRrm0v6v5C5UOMhXBpULc9PLIZRn4D9TrCinfg3fbwxY2w45dSreVtDakF2H38AulZOcxcmQjAsif68NZN7UvtWW7JzYEDy2Hr98axIJQQqUkhXDp4ecNlA43P2cOw/jPY8Dl8c4uhKRX7L4i9FapFFXyvfKgWaHdDDXhnGVe1qW0Lr60W5Iufj/NvtE2Hz9K+QXW390tOySDIz8e0loYTOdlwcIXhatv5C1y0rGaiZ8ANH0NY3eJ9IUFAVhbCpUr1BtD3GXhkKwz/Emq3hj8nw5R28NVw2L2w2L/Ia4T4ExNVzXa+cJtd2sPfx/WfXEG1uzu9vJhh0/5yaoseP4+7Pl1r7L/s/R3mPgRvtYDPhsLmb4w8k5tmwpD/wJF1MK077LFv8K87eJr0LFlxCIVHVhbCpY23j1Ftr9U1cCbRWG2s/xx2/wrVGhgrjY7/KvKv8g4NqrM56ZxLu5mx+Od8utv7LNll5KpuO2rIoO/65wJ1gr3o7bWRK/esgTc3QdoZ8AuBFgOh9VBo1t8IG7bSoAv629tQX94I3R4hMeZRbvzwb0Zf3pCXr2tXpO8lXLqIsRAEK+HRRiJf76dg5zxYNwOWvGIUVLpsEMTdDk36glfBC/IAk1rdcY3CUcpVMq16oHvpj9tnrAXAjyzYvZAtn79PHa8EZvqlcl4HQvMhhoFo2hd8A03vcSaoMZcffpKvo+YQu3IK5/9aQH3uY/c/Eab9BcEMMRaCkBdvX2hznfFJ3gfrP4UNXxj7ANUbQafbjNVGSC23tzBT0XxneAeXNj8fL7LcyIWQlU5/r3Vc7b2a/l7r4Ks0BngF8VtuJ+bldGFFbjt233BdgV/nVEoGGfhxe/Johvo244nMD5jv/xRf6CcxZNgEoWDEWAhCftRoaqjY9nkGdvxsJPf9PhGWvAotB0PcHRDd02W14SgaaKVBhN01dF/vpmxKOsu+Exc5k5pp75SVZiQPbv8Jdv3KJ34XOKuD+TUnnvm58azMbUdWEf/ZTrdEYp1Pz+Iz3Yml6lXe932P+4+/APOPwoCXwMc8C71SkbwPfn8Rjm+DRldAkz7QuBcEi/5WaSDGQhAKg48/tBtmfE7tMYzGxi+Nl3pEE+g0BjrcAsGGTlRGnrKqbeqFOZ3/38CWAFz59p9kpKUYEUzbfzI21rMuQmAEtL2eW1fV5a/cNmS7+aeamZ2Ln48XGw6dISo8iMhQ15f+12sMSRGr4sghXZthmRN40mcWd675LxxeBcNmGIaxMpJ62ghOWPsx+ARAw66w7Sdj/wkFdWOgSW/DeDTsauTdCEVGudOsqWzExcXphATP1V4WBBey0mHHXEiYDof+Bm8/aHUtxN3BjKR6vPjLDmKiqrE56RzfjL2cLk0cfuFmpMCehaycO524rAT8dToE1TTGtx4K0d15c/F+3l+yN98pbHz+Sl78eTtzNhyhbf0wfnmwh0ufYR/+RcLBM6bjd43W+P9yP+RmwzVTIOamEv1JypTsDFjzMSybbAhFxt4KvZ+G0NpGGPHRDbB/KexfAofXQG6WxZhcbhiOJr2hTkyh9qCqMkqpdVrruIL6ycpCEIqLbwDE3Gx8TuwwVhubvoat3zOmZgu6drmRZlfezUWvMKoF+RovtN0LYdscw9WUnU4br3Dm0oubbnsAGl5hRGdZKMhQgKFka9WX2nrkvGmfK5rWcGss0ptchf89K+D7u+CHu+DAnzBosnM0VUVDa2MVtvgFI4KtWX+48iUj/NmKtw806Gx8ej1hGOeDfxmGY/9SYywYK7gmvezGI7xR2X+fSoIYC0EoDWq1gkGvQ78XYPuPqIQZtNz0Gmx9m2qth0JmipEPkZMBIXWMX8GthxI77Qy5eNGrZjy1HAxFYXMgDp9OK7BPWp57XdehHj9uNOqQZeTkGDknY+bB0ldh+duQtNbI0ajVqvDfv6xISoCFzxius1qtYfQP0KxfweP8Q6DFAOMDcOEf2P+n3Xhsm2O0RzSxG47GPSHQfbLkpYYYC0EoTfyCoMMo4/PPViP8dvO3Rh5E3B2Gi6lBF5vrI5d5AJy5mMXpi5nUrx5IaIAvX612li5/7YZ2PPXDFpfHjfx4ldN5SkY2z87ZwiP9WxBd0yjtmtdYNK4Zwg0d6/PDhiP833ebaR9VnUevbGGEDUd3hx/Gwkd94OrJRtSXSbhvmXPmoLF5vfV7CK4F175rzM2rgKx2d4TWgfbDjY/WcHKX3WW1+RtI+B8oL0Mapkkfo7Z7VOeqEQhQTMRYCIKnqNMWBr9luHVQ+frGk1MyGPWJIWm+5PHeTi/4W7o0pF39au6GOtH2hYUA/LjxKImTBgOQmulsLJIvZtDrskh+2HCEpbtOsnTXScNYgJGvcc9K+OFumPug8ev7mncgwHmDvsxIPwfL34JV04yXd88noNvD4B9aes9QCmq1ND6X32NkxScl2FcdK96B5W+CbxA06masOpr2MVY2FcGQlhFiLATB0+Tz63dkfEO+XnOIWWsP29r6vLkUP4dyrFe3q0vbQhoLM/7am0z96oEcOWu4rC5m5Djd34XQ2vCvObDibSNE+Oh6I1qqnmueiMfIyTL2gJa+BqnJ0H4k9H0OqtX3/LO9faFRV+PT52nDYCWuNIzHviWw6BmjX3AtS5RVb8N4hFVtNWGPGgul1EDgXcAb+ERrPSnP9XHAXUA2cBK4Q2t9UCnVAfgQCANygFe01t94cq6CUB7c1aMxX685xNxNR53arcKDoy9vSJfGRqb15BtjiAzzp05YAM1rhdDsmQUF3n/nP+dtciLbXryKP3efZFDbOnywdJ9TP2u98IPJF6kW6Ev1ID/jV3yjbvDdnfC/K2HAyxA/1rO/prU2ggB+ew5O7YboHsZzy9JQ5SWgGrS82vgAnEuyuKyWGgZky7dGe80WhtuqdhvjU6uN4e6qIqsPj4XOKqW8gd3AlUASsBYYqbXe7tCnD7Baa52qlLoX6K21Hq6UagForfUepVQ9YB3QSmt91t3zJHRWqIwkp2TQ6WX3Ffy2T7yKID/z33TR4+fle+/ESYOd+ljdUmAo3Q6dutKp/7ODW/HyvB0ufbmYDD/dZ+hltbwGhr4PgeH5PrtYHNtklL89sAxqNDMinC4bVLFftrm5cGK7YTQOLIfjW+G8Q/XDwAi78bAakFotwS+4/Oach4oQOhsP7NVa77dMaBYwFLAZC631Eof+q4DRlvbdDn2OKqVOAJGAW2MhCJWRsEDXqnqOuDMUjoyMb8h9vZvSY/ISp3ZHQxEV7qwbVd2kmp/VUACcuJBOrVBL8lpwDRg5C1Z9AL+9ANN6wLDp0CCeH9YncUXTmtSpVoJEt/NH4feXjLDjwHAY9Iahw+Wd/9+mQuDlZexN1WkLVzxotKWeNgzI8e2G8Ti+zRCnzLpoGaQgorHFgLQ19j5qt4HwxhU658OTxqI+cNjhPAnokk//OwGXdbVSKh7wA/a5jBCESo6vtxf1qgVw9Jx75Vl31AkL4J/z6bx2Q8HKsYPa1nE6NxM6dCT+ld/ZMXGgvYaGUtD1fmhwOXx3O0wfSFbvZ3lsQXM0Xs4rkcKSkQIr34W//gM6x3jZ9nis8oerBkUYUWXR3e1tublwNtEwHFYjcmK7UXgLi3fHN8gIV85rRIIqhuCjJ42F2drR1OellBoNxAG98rTXBT4HbtNau6itKaXGAmMBGjZsWNL5CkK58MZN7bnFEglVFJY+0ZvsXPs/qRljOnP7zLWmffNqFQb4FBxyumTXCa5ul0eaPaoT3LOcnJ8exHfJi8z0jWFc1r1Fm3hujiGV8sfLkHIc2twA/V8wVH+rKl5eRg5HRBMjS99KZiqc3GExINsMI7LjF4tUiYXQus5urNptjP0RH/dqxZ7Ak8YiCWjgcB4FHM3bSSnVH3gG6KW1znBoDwPmAc9qrVflHQegtf4I+AiMPYvSm7ogVHzyrg76tHSvghsR7OzSCfAr2N3x0bL9LsYiMzuXFhNWADczyrsmL/h8xnz/p+BAlJHEVhD7/oBFzxkvxah4o/BUg84Fj6uq+AVB/U7Gx4rWhhE9vs3+ObHN2MvJsYhOevkYBqN2G2MFUq+jEZHlQTxpLNYCzZVSjYEjwAhglGMHpVRH4L/AQK31CYd2P2AO8JnWerYH5ygI5c4VTc1VUX95sLtpe34s/78+/Lr1H16Zb99/eLhfc8b2dBYJzDd01sLGw65bhD9ttG7eKr7K6cf63OZM9X2X2p8OgV5PQq//Mw8VPrHDMBJ7fzNk3m+aCa2vq9ib1+WFUkYUVWgd5+z0nCxI3utsRA6tgi2zDcNbWY2F1jpbKfUAsBAjdHa61nqbUmoikKC1ngu8AYQAsy1FYQ5prYcANwM9gRpKqTGWW47RWm/01HwFobxwLIjk46VsrqX61c2LGeVHg4ggDp9JdWobGd/Qpe63WRGmvFxWO5S1iaeJbRiOt5fRP+/yfaduyLWZr7C9y2L4cxIkroAbP7bnHKScMHI11n8KfqFGhFOXf1/SmdDFxtvX2NOo1cpQP7aSdhbSTnv88R7Ns9Bazwfm52l73uG4v5txXwBfeHJuglARWfJ4b1tUk4938X5139q1EZ/9fdB2ntdQFMS7Izrw7uI97Dp+gZum/c0j/ZvzSH8jw7uaSfRWKgHo6z5ANe4J8x4z6n1f+x6c3GlkP2enQ+e7jZWH1JYofQKrl0lQQMWN0xKESxDHAkmBBUQsuaNZrVASJw2mQYSxMnFndLo1M17cy//P7r6YMrwDQzvUJy7ankdhVbO9bfoa/v35OtN7xb70G+dbDoOxS40N2W9ugT9eMooP3bfa0JkSQ1GpEbkPQaig+BRiXyE/vr/nCv7en0xYgHm+wqe3x5Odq502yq9qY4TYhjqM8bG4oP7cfdLlHk8ObMnrv+7kTGoWs9YcYmzPFnDXYlj9X2PTtrFrfQ2hciIrC0GoAHRoYHcjWOU9SkqtsACGdnCvpeTj7eUSUWV1WTlKpOfmo/Jw5Kx9f+RcWpZx4BsI3R8RQ1HFkJWFIFQAvrunK9aUiS/u6kJ2TtlGgn9yaxxfrj5o28h2XFks2n4cM1mgTo3CGdyuHl+sMuTU61Yr+oa8UHkQYyEIFQBHl5OvtxfF3K4oNv1b16Z/69q282A/5wnkFToEeOHa1rSrXw0/by8yc3JNDYpQdRA3lCAILkSEOGcHPzxrI0EOBmTRoz2JiaqOUopNLxjV51IyClfdT6iciLEQBMGFm+Ma8KglXNaKtYhSsJ83LWrbiw8F+Hrh7aVIycgq0zkKZYsYC0EQXPD19uLh/s1d2ttHVePXR5xlPZRSeCk45FAP/O1Fuxjx0d8kp2TYN76FSo0YC0EQ3PLydW2dzjs1inDKBbGSlaP52WFf470/9rJq/2k6vbyYzq+4r9chVB5kg1sQBLf0bB7pdO5VQFJ5elYOX6w66NSWme0iGC1UQsRYCILgloY1nFcRjpLoZlw1ZRkHk1Nd2rXWhdKjEiou4oYSBCFfoh0Mxq1dG5n2aRpplAk1MxQA//ljb+lPTChTxFgIgpAv3/y7K9fE1GXnSwNpEhli2ie+gKzzD5aWjrFIz8rhvi/XceDUxYI7C6WKGAtBEPKldlgA74+KzbcUa/uo/FVP07NKJ2lv/aEzzN/yD33eXMrnfye67bfn+AWy85YHFEqEGAtBEErM8M4NCuxzMbPkSXv+DuVgn/tpm8v1mSsP0PipeVz5zjJe/3VniZ8n2BFjIQhCiXG3eb1lwgDbcWpmdomfk1aAwZnw83asC5iPlx8o8fNKm8ocGSbGQhCEUiHA1/l1Uq9agJMgYUEv+sJwsRQMTnmx49h5Wjy7gIXb/invqRQLjxoLpdRApdQupdRepdR4k+vjlFLblVKblVK/K6UaOVy7TSm1x/K5zZPzFASh5Izt0cTpPCfPHsXFUtCOclydhAeZ1+koDy6kZxE9fh5zNiS57WOtB/Lvz9exbPdJNie51jivyHgsz0Ip5Q1MBa4EkoC1Sqm5WuvtDt02AHFa61Sl1L3AZGC4UioCeAGIwyj7u84y9oyn5isIQsl4qF9z0rNz6duyFr/vOM51HY1aGp/eEc9t09eQllXyVYGjwTmTmkVGdo5tH2Px9uMlvn9xWbb7FACPfrOJ6ztGmfaZtMC+h3Lr9DUAJE4a7PnJlRKeTMqLB/ZqrfcDKKVmAUMBm7HQWi9x6L8KGG05vgr4TWt92jL2N2Ag8LUH5ysIQgnw8fbi6atbAXB5E3sJVata7YX0ohuLzOxcLqRnUSPEn2Pn0nh9gfOmdUp6Nv4hxv3v+iyhuFMvMZk5+a+aTl7IKPEzzqdnua16WBZ40g1VHzjscJ5kaXPHncCCYo4VBKGCEmD55T87wb2Lxh2Pzd5Ep5cXo7Xm4VkbuZBhGByrZlVmAeGxP208UuRnFobcXE30+Hm8tWgXAOFBdkn3C+nOwok9Jy8pkT5WelYO0ePnETNhEd+sPVTs+5QUTxoLs/AI00BrpdRoDJfTG0UZq5Qaq5RKUEolnDzpWh9YEITyp2VdQ848xL/ojox5mw1xwpMpGU77Fdbgq53HLgDQ6rlfTcf/svlYkZ9ZGKxKutbMdGs5WoB2Exaxan+y7fzQafOsdihcdNTpi5m243cX73G5/tPGI2Wyae5JY5EEOAZfRwEu5baUUv2BZ4AhWuuMoozVWn+ktY7TWsdFRkbmvSwIQgXA19sLfx8vqhdjQ9oqRRX/yu9kZdt/L7apVw2A22eu5WxqJmlZ5m6g3zy0j3EmNdPpPO9Lf8RHqwC4/8v1+d7H0RCY8d8/93HFpD9s50fPpbv0+Xj5fr5Ze9ilvbTxpLFYCzRXSjVWSvkBI4C5jh2UUh2B/2IYihMOlxYCA5RS4UqpcGCApU0QhEpIgK836W5e6Pnh7SBzu+v4Bduxv8Mv+Q4Tf3MZd1f3xkV+VlE4k2p3NX295hAZJiuE9Kwc5m1xXtkse6KP0/naxNP5PqegXJE9xy+w9ch5U1dMaeMxY6G1zgYewHjJ7wC+1VpvU0pNVEoNsXR7AwgBZiulNiql5lrGngZewjA4a4GJ1s1uQRAqHwG+XqRnFT0hLceNym2NYD/TdoCEZ/vz7DWt3V5/Zd52osfPY91B51fK0bNpHEwunObUWYeVxVM/bOHfn69z6ZOS4bqh37BGELc5iDE++PWGfJ/jU4Am/JXvLAPg950n8u1XGnhUolxrPR+Yn6fteYfj/vmMnQ5M99zsBEEoKwJ8vUnPLr0a3bXCAkzbF4/rRc0QfwDu692Uj5btJydX21YoP244Yvu1/vQPW1n4qL3qn9XdU5hw1rOpBVf/c7eS2nDYnl/RvJa5MKOVIH/3elxljWRwC4LgcQ4mp/LTxqNMXbK30L/e3fHLg93dXmtSM9h2XD3Il+xcbdvPOH4+nUe+2Wi7Xj88sNhzcLdH0sVBfTdvOdmJQ9sAOAkytq1fLd/n7D9p/1sN62Tkb1gFElJJbMkAABE0SURBVFfuPVWEGZccMRaCIJQZbyzcRa83lhZ7/C8Pds/3Bevl4LYJtLyUrTIji/Jsdl80cRMBDH1/RYHzcLdq+PKuLky6oR0ASWfsNcm7NI5gdBfD/fT5nfF0ahROkJ83R8+mmd7HjIaWcrbWxMRbPlltu/bdPV0LfZ/iIsZCEASP8+EtsU7neX91F5bIUH+31x4f0MLp3FrVzxpxVDeP6ypvRJOVTUnnmL/lWL5yHGYb2r882B0fby+bG8y6j/Fg32bMGnu5zZD5+3jz/b1XkJqZw+oD+W/FdmtmT2603vdChvPf7vlrWhMXnX89kdJAjIUgCB7HGupqxSxfIC/Wze1H+je3tTm6cK6JqevUv3ueeuG7j6cAMO5bw/WU9wV/+qL9pZuVJ7nvvi/XM+T9leS62WA3MxbWFU/NPAatX6va+ZaUTU5xn929cq+RrzHj9s5UCzRCjy9m5JBRivs/hUWMhSAIHic82DnHYvrKA/m+JAGWWYT3HA2E9YUJ8M7wDkwbbV+x1K3mvHK4s3s0ANuOngfg/q+ccx46NbIXbPpgyT7TOaw6kGzanpHHDdXMYaO6SWSw22tm7D2Rku91gD6X1SLYstmdkpHNf363Vx48WcDfsbTwaDSUIAgC4CRVbmX38RS6hpi7lSb+vJ3pK42opUBfb366vxvJF51fir7eXgxsa19d1M7jZqruIMFx2CGLun2D6qRmZLNw23Eys3Px8/HincW7i/R9MrJzCQvwYWzPJnRoEE7HhnbDk1e/yV3m+tRRsdz/1Xq3Lrm8iX7W+6RkZDNng13GZHA75xWWpxBjIQhCuZCfCq3VUICRo9G+gfuyrZueH2Ba58IxF6PHZLtm6ae3d7Yl8rV4dgH//Vcnt/dOdSOrnp6VQ4CvNw/0bW56vTDERYcDcMKNyOA3Cc5Z2cEWY7Hn+AWOWDbGH7uyRYERVaWFuKEEQSgXUgtZDCm/2t8A1YJ8qVfdNQzW3T6BY2lWwCmhbnhcA4Z2qGc7/2OXebJbRnYu/r4le31ajdmzP24F4FByKl+vsQsFrrVsfls3tq0ri/lbjtmO7+3dtERzKApiLARBKBce+GoDT3632fRa/1a1bceO0h5FJTTA2XlyWe1QAny98PM2v+frw2IY1LaO7fyr1eYqr451NMyYOsrYS5n/UA+3fXwc5pCRncNdn63lqR+2sO6gUbZn7iZDDu+Nm2IAqG8xiN2bR3J5kwha1w1zuoenEWMhCEK5kdfVYmW3gw6UNg9IKhS/PtLT6fzpwa1QShHo5/qiHxnfEICODcOd2s1qh8/f8k++G9ODY+qSOGkwreuFFWqe51KzbNFbN374l9O1CMveizX09r3f97B4xwknpduyQIyFIAjlyvpDrgUwsx1CWfOWZy0K9fO4p8IsK43broh26WtN4qsdFsCB1662tZdGOVh3jLHM41SKe/VZd99+4+GyLcsqxkIQhDJh5fi+zLy9My9c6yzyZxYNdOx8us1VFFPf/eZ2YXj1+na245go416P9m/OZ3fEO/XbcNhutBz3O8xWFoBbV1ZRsK5wrn5vuVO7Y35HceqAeAIxFoIglAn1qwfS+7Ja3N7NWT7cUXZjz/ELRI+fh9aGdlPipME0rBFUoueO6tKQ2fd05c7ujW2CgkopWtYJdeq34ZD5L/Vebyzljplrndoigv24ubN5re2i4C7s9YOl9jyKgvI0ygoxFoIglDmO0tsJifZf9FbJbYB2pRgS2jk6gufyyJbn9fkH5hN19cfOE8zfcoycXM3WI+c4fTHTVi62JDSIMDeEby7abbnuXuywrI2IGAtBEMqcni3s0hwz/0okN1fzn9+dJUAey6P1VNpUC/Slc7R9M/t/Y+Kcrv9w3xVO5/d9uZ6mT8/nmv8YQoOzSqE6XbDJRrsjeV1dz1/Tmus71gfg9m7RJX5+UagYzjBBEC4p4htH8IdDwZ41iad56zd7FvWVrWvTqEaw2dBSQynF7HuuQGtNZk6uSyhsQfvqUSWQOLeSN/S1f6vaLN5hV8d99Epng3mHpQLg2ze3z1dvyhPIykIQhDLn9m7RfDP2ctv5B0vt2kz/urwRH98aZzbMIyilTHMm6lQzL7BkZdKNMaU6j+X/14e3h7d3ahvU1nxPo6wNBXjYWCilBiqldiml9iqlxptc76mUWq+UylZKDctzbbJSaptSaodS6j1VHn8dQRA8gr+PN12a2OW3raKB4JpIV17Urx7IeyM7ur3eIR8JkqJwTUxd+rasRYOIIBddKe8CyqqWJR4zFkopb2AqMAhoDYxUSuUtjHsIGAN8lWfsFUA3IAZoC3QGenlqroIglA9392js0mYmOlhe1HOzunjl+ral9oz3R8UyfUxn27l143rK8A6l9ozSwJMmPB7Yq7XeD6CUmgUMBbZbO2itEy3X8orDa/6/vbsPsqqu4zj+/swKLEaxC67KLKQgmIIa0iqWSroaKDr5nJQzUtqTTk5mo2mWZZOp0VhT2jhGqDSGT/lApKEjGDMOIKsCkqSuDxVKAj4hPqHrtz9+35Xj7r2c6+Dde3fv9zWzc8/53d85+/t9d/Z+z/mdc38H6oH+gIB+wAuEEPqUCZ9sBJ75QFmxJ9hVwrACc04BTBm3c8Hyj8K8sw5ic8d73c4yKq2cw1DNQPZ2gTVelsvMFgMLgbX+M9/MVnetJ+mbktokta1fv77r2yGEKjeowJDT1m4X7WnNDQP5/he635XVMLB8H+T1/eqqLlFAeZNFocG2kr63L2k0sCcwnJRgWiVN6lrPzK4xsxYza2lqaur6dgihynUdkz9+QjPHjC/pmLLHjGvuPr9TT07gVy3K2eM1wIjM+nDg+RK3PQ5YYmabzGwTcDdwQM42IYTeJnP4OHHkEK740vjcKcl72uZ3P3iMW0XXnHtUOa9ZLAPGSBoJPAdMA75S4rb/Ab4h6VLSGcrngd+UpZUhhIqZOGooZ7WOZsq4ndmtqTqmteiqdY8dObllBF8/eCQD+9dVXTLrKbJtmf83b+fSVNKHfB0wy8wukfQzoM3M5kraD7gdaATeAv5nZuP8TqrfA5NIxx5/N7Nztva7WlparK2trWx9CSGEvkjSQ2aW+8WWsiaLnhTJIoQQPrxSk0XtXaUJIYTwoUWyCCGEkCuSRQghhFyRLEIIIeSKZBFCCCFXJIsQQgi5IlmEEELI1We+ZyFpPfDvbdjFDsCGj6g5fVnEqTQRp9JEnEpTzjjtYma5k+v1mWSxrSS1lfLFlFoXcSpNxKk0EafSVEOcYhgqhBBCrkgWIYQQckWy2OKaSjegl4g4lSbiVJqIU2kqHqe4ZhFCCCFXnFmEEELIVfPJQtIRkh6X1C7p/Eq3p6dJmiVpnaRVmbIhku6V9KS/Nnq5JP3WY7VS0oTMNtO9/pOSpleiL+UkaYSkhZJWS/qnpO96ecQqQ1K9pAclrfA4XezlIyUt9T7fJKm/lw/w9XZ/f9fMvi7w8sclTalMj8pLUp2kRyTN8/XqjZOZ1ewP6aFMTwGjgP7ACmBspdvVwzGYBEwAVmXKfgmc78vnA5f78lTSI25FesztUi8fAjztr42+3Fjpvn3EcRoGTPDljwNPAGMjVt3iJGCQL/cDlnr/bwamefnVwBm+fCZwtS9PA27y5bH+/zgAGOn/p3WV7l8Z4nUO8Gdgnq9XbZxq/cxif6DdzJ42s83AjcAxFW5TjzKzRcBLXYqPAa735euBYzPlsy1ZAjRIGgZMAe41s5fM7GXgXuCI8re+55jZWjN72JdfA1YDzUSsPsD7u8lX+/mPAa3ArV7eNU6d8bsVOEySvPxGM3vbzJ4B2kn/r32GpOHAUcBMXxdVHKdaTxbNwH8z62u8rNbtZGZrIX1IAjt6ebF41VQcfQhgX9JRc8SqCx9aWQ6sIyXDp4BXzOxdr5Lt8/vx8PdfBYZSA3EiPXL6POA9Xx9KFcep1pOFCpTF7WHFFYtXzcRR0iDgL8DZZrZxa1ULlNVErMysw8zGA8NJR7l7FqrmrzUZJ0lHA+vM7KFscYGqVROnWk8Wa4ARmfXhwPMVaks1ecGHTPDXdV5eLF41EUdJ/UiJ4gYzu82LI1ZFmNkrwP2kaxYNkrbzt7J9fj8e/v5g0rBoX4/TgcAXJT1LGv5uJZ1pVG2caj1ZLAPG+B0I/UkXjuZWuE3VYC7QeZfOdODOTPmpfqfPAcCrPvQyH5gsqdHvBprsZX2Gjw//EVhtZldk3opYZUhqktTgywOBw0nXdxYCJ3q1rnHqjN+JwAJLV27nAtP8LqCRwBjgwZ7pRfmZ2QVmNtzMdiV97iwws1Oo5jhV+m6ASv+Q7lp5gjSuemGl21OB/s8B1gLvkI5STieNhd4HPOmvQ7yugKs8Vo8CLZn9nEa6uNYOfK3S/SpDnA4ind6vBJb7z9SIVbc47QM84nFaBVzk5aNIH2LtwC3AAC+v9/V2f39UZl8XevweB46sdN/KGLND2HI3VNXGKb7BHUIIIVetD0OFEEIoQSSLEEIIuSJZhBBCyBXJIoQQQq5IFiGEEHJFsgi9iqQOSct9VtOHJX0up36DpDNL2O/9kuJZ0BmSrpN0Yn7NUAsiWYTe5k0zG29mnwYuAC7Nqd9AmrGzKmW+rRtCVYtkEXqzTwAvQ5qzSdJ9frbxqKTO2YMvA3bzs5EZXvc8r7NC0mWZ/Z3kz2J4QtLBXrdO0gxJy/y5FN/y8mGSFvl+V3XWz5L0rKTLfZ8PShrt5ddJukLSQuBypWdi3OH7XyJpn0yfrvW2rpR0gpdPlrTY+3qLz1eFpMskPeZ1f+VlJ3n7VkhalNMnSbrS9/E3tkyKGAJxVBN6m4E+o2k96RkTrV7+FnCcmW2UtAOwRNJc0jMm9rI0sR2SjiRN+zzRzN6QNCSz7+3MbH9JU4GfkKaqOJ00Vcd+kgYAD0i6BzgemG9ml0iqA7Yv0t6Nvs9TSXP/HO3luwOHm1mHpN8Bj5jZsZJagdnAeODH/rv39rY3et9+5Nu+LukHwDmSrgSOA/YwM+uccgO4CJhiZs9lyor1aV/gU8DewE7AY8Cskv4qoc+LZBF6mzczH/yfBWZL2os0vcYvJE0iTfncTPrA6+pw4FozewPAzLLP8uicHPAhYFdfngzskxm7H0yaf2cZMEtpcsE7zGx5kfbOybz+OlN+i5l1+PJBwAnengWShkoa7G2d1rmBmb2sNFvpWNIHPKSHdi0GNpIS5kw/K5jnmz0AXCfp5kz/ivVpEjDH2/W8pAVF+hRqUCSL0GuZ2WI/0m4izdPUBHzGzN5Rms2zvsBmovgUzm/7awdb/jcEnGVm3Sb788R0FPAnSTPMbHahZhZZfr1LmwptV6itIj086csF2rM/cBgpwXwHaDWzb0ua6O1cLml8sT75GVXM/xMKimsWodeStAfp0bgvko6O13miOBTYxau9RnoMaqd7gNMkbe/7yA5DFTIfOMPPIJC0u6SPSdrFf98fSLPRTiiy/cmZ18VF6iwCTvH9HwJssPSsjHtIH/qd/W0ElgAHZq5/bO9tGgQMNrO7gLNJw1hI2s3MlprZRcAG0nTWBfvk7Zjm1zSGAYfmxCbUkDizCL1N5zULSEfI033c/wbgr5LaSDPC/gvAzF6U9ICkVcDdZnauH123SdoM3AX8cCu/byZpSOphpXGf9aRrHocA50p6B9gEnFpk+wGSlpIOzLqdDbifAtdKWgm8wZapqH8OXOVt7wAuNrPbJH0VmOPXGyBdw3gNuFNSvcfle/7eDEljvOw+0vOaVxbp0+2ka0CPkmZi/sdW4hJqTMw6G0KZ+FBYi5ltqHRbQthWMQwVQgghV5xZhBBCyBVnFiGEEHJFsgghhJArkkUIIYRckSxCCCHkimQRQgghVySLEEIIuf4PdUEKgTJEw50AAAAASUVORK5CYII=\n",
581 | "text/plain": [
582 | ""
583 | ]
584 | },
585 | "metadata": {},
586 | "output_type": "display_data"
587 | }
588 | ],
589 | "source": [
590 | "learn.recorder.plot_losses()"
591 | ]
592 | },
593 | {
594 | "cell_type": "markdown",
595 | "metadata": {},
596 | "source": [
597 | "The model is not doing that well - out of presented pairs it gets roughly 10% of examples wrong. I also did a cursory error analysis (not shown here for the sake of brevity) and the model is not doing that great at all.\n",
598 | "\n",
599 | "How can this be? Maybe the nearly absolute positional invariance through the use of global max pooling is not working that well. Maybe there is a bug somewhere? Maybe the model has not been trained for long enough or lacks capacity?\n",
600 | "\n",
601 | "If I do continue to work on this I will definitely take a closer look at each of the angles I list above. For the time being, let's try to predict on the validation set and finish off with making a submission.\n",
602 | "\n",
603 | "The predicting part is where the code gets really messy. That is good enough for now though."
604 | ]
605 | },
606 | {
607 | "cell_type": "code",
608 | "execution_count": 15,
609 | "metadata": {},
610 | "outputs": [],
611 | "source": [
612 | "learn.load(f'{name}-stage-2');"
613 | ]
614 | },
615 | {
616 | "cell_type": "code",
617 | "execution_count": 16,
618 | "metadata": {},
619 | "outputs": [],
620 | "source": [
621 | "new_whale_fns = set(df[df.Id == 'new_whale'].sample(frac=1).Image.iloc[:1000])"
622 | ]
623 | },
624 | {
625 | "cell_type": "code",
626 | "execution_count": 17,
627 | "metadata": {},
628 | "outputs": [],
629 | "source": [
630 | "data = (\n",
631 | " ImageItemList\n",
632 | " .from_df(df, f'data/train-{SZ}', cols=['Image'])\n",
633 | " .split_by_valid_func(lambda path: path2fn(path) in val_fns.union(new_whale_fns))\n",
634 | " .label_from_func(lambda path: fn2label[path2fn(path)], classes=classes)\n",
635 | " .add_test(ImageItemList.from_folder(f'data/test-{SZ}'))\n",
636 | " .transform(get_transforms(do_flip=False), size=SZ, resize_method=ResizeMethod.SQUISH)\n",
637 | " .databunch(bs=BS, num_workers=NUM_WORKERS, path='data')\n",
638 | " .normalize(imagenet_stats)\n",
639 | ")"
640 | ]
641 | },
642 | {
643 | "cell_type": "code",
644 | "execution_count": 18,
645 | "metadata": {},
646 | "outputs": [
647 | {
648 | "data": {
649 | "text/plain": [
650 | "3570"
651 | ]
652 | },
653 | "execution_count": 18,
654 | "metadata": {},
655 | "output_type": "execute_result"
656 | }
657 | ],
658 | "source": [
659 | "len(data.valid_ds)"
660 | ]
661 | },
662 | {
663 | "cell_type": "code",
664 | "execution_count": 19,
665 | "metadata": {},
666 | "outputs": [
667 | {
668 | "name": "stdout",
669 | "output_type": "stream",
670 | "text": [
671 | "CPU times: user 1.93 s, sys: 1.13 s, total: 3.06 s\n",
672 | "Wall time: 3.57 s\n"
673 | ]
674 | }
675 | ],
676 | "source": [
677 | "%%time\n",
678 | "targs = []\n",
679 | "feats = []\n",
680 | "learn.model.eval()\n",
681 | "for ims, ts in data.valid_dl:\n",
682 | " feats.append(learn.model.process_features(learn.model.cnn(ims)).detach().cpu())\n",
683 | " targs.append(ts)"
684 | ]
685 | },
686 | {
687 | "cell_type": "code",
688 | "execution_count": 20,
689 | "metadata": {},
690 | "outputs": [],
691 | "source": [
692 | "feats = torch.cat(feats)"
693 | ]
694 | },
695 | {
696 | "cell_type": "code",
697 | "execution_count": 21,
698 | "metadata": {},
699 | "outputs": [
700 | {
701 | "data": {
702 | "text/plain": [
703 | "torch.Size([3570, 512])"
704 | ]
705 | },
706 | "execution_count": 21,
707 | "metadata": {},
708 | "output_type": "execute_result"
709 | }
710 | ],
711 | "source": [
712 | "feats.shape"
713 | ]
714 | },
715 | {
716 | "cell_type": "code",
717 | "execution_count": 22,
718 | "metadata": {},
719 | "outputs": [
720 | {
721 | "name": "stdout",
722 | "output_type": "stream",
723 | "text": [
724 | "CPU times: user 21.5 s, sys: 0 ns, total: 21.5 s\n",
725 | "Wall time: 21.5 s\n"
726 | ]
727 | }
728 | ],
729 | "source": [
730 | "%%time\n",
731 | "sims = []\n",
732 | "for feat in feats:\n",
733 | " dists = learn.model.calculate_distance(feats, feat.unsqueeze(0).repeat(3570, 1))\n",
734 | " predicted_similarity = learn.model.head(dists.cuda()).sigmoid_()\n",
735 | " sims.append(predicted_similarity.squeeze().detach().cpu())"
736 | ]
737 | },
738 | {
739 | "cell_type": "code",
740 | "execution_count": 23,
741 | "metadata": {},
742 | "outputs": [
743 | {
744 | "data": {
745 | "text/plain": [
746 | "3570"
747 | ]
748 | },
749 | "execution_count": 23,
750 | "metadata": {},
751 | "output_type": "execute_result"
752 | }
753 | ],
754 | "source": [
755 | "len(sims)"
756 | ]
757 | },
758 | {
759 | "cell_type": "code",
760 | "execution_count": 24,
761 | "metadata": {},
762 | "outputs": [],
763 | "source": [
764 | "new_whale_idx = np.where(classes == 'new_whale')[0][0]"
765 | ]
766 | },
767 | {
768 | "cell_type": "code",
769 | "execution_count": 44,
770 | "metadata": {},
771 | "outputs": [
772 | {
773 | "name": "stdout",
774 | "output_type": "stream",
775 | "text": [
776 | "CPU times: user 1.2 s, sys: 0 ns, total: 1.2 s\n",
777 | "Wall time: 1.19 s\n"
778 | ]
779 | }
780 | ],
781 | "source": [
782 | "%%time\n",
783 | "top_5s = []\n",
784 | "for i, sim in enumerate(sims):\n",
785 | " idxs = sim.argsort(descending=True)\n",
786 | " probs = sim[idxs]\n",
787 | " top_5 = []\n",
788 | " for j, p in zip(idxs, probs):\n",
789 | " if len(top_5) == 5: break\n",
790 | " if j == i: continue\n",
791 | " predicted_class = data.valid_ds.y.items[j]\n",
792 | " if j == predicted_class: continue\n",
793 | " if predicted_class not in top_5: top_5.append(predicted_class)\n",
794 | " top_5s.append(top_5)"
795 | ]
796 | },
797 | {
798 | "cell_type": "code",
799 | "execution_count": 45,
800 | "metadata": {},
801 | "outputs": [
802 | {
803 | "data": {
804 | "text/plain": [
805 | "0.25805322128851543"
806 | ]
807 | },
808 | "execution_count": 45,
809 | "metadata": {},
810 | "output_type": "execute_result"
811 | }
812 | ],
813 | "source": [
814 | "# without predicting new_whale\n",
815 | "mapk(data.valid_ds.y.items.reshape(-1,1), np.stack(top_5s), 5)"
816 | ]
817 | },
818 | {
819 | "cell_type": "code",
820 | "execution_count": 46,
821 | "metadata": {},
822 | "outputs": [
823 | {
824 | "name": "stdout",
825 | "output_type": "stream",
826 | "text": [
827 | "0.98 0.28244631185807656\n",
828 | "0.9822222222222222 0.2875023342670402\n",
829 | "0.9844444444444445 0.2908216619981326\n",
830 | "0.9866666666666667 0.29868347338935575\n",
831 | "0.9888888888888889 0.3054715219421102\n",
832 | "0.991111111111111 0.3146778711484594\n",
833 | "0.9933333333333333 0.32679271708683477\n",
834 | "0.9955555555555555 0.3353548085901027\n",
835 | "0.9977777777777778 0.34555555555555556\n",
836 | "1.0 0.3471428571428572\n",
837 | "CPU times: user 12.9 s, sys: 0 ns, total: 12.9 s\n",
838 | "Wall time: 12.9 s\n"
839 | ]
840 | }
841 | ],
842 | "source": [
843 | "%%time\n",
844 | "\n",
845 | "for thresh in np.linspace(0.98, 1, 10):\n",
846 | " top_5s = []\n",
847 | " for i, sim in enumerate(sims):\n",
848 | " idxs = sim.argsort(descending=True)\n",
849 | " probs = sim[idxs]\n",
850 | " top_5 = []\n",
851 | " for j, p in zip(idxs, probs):\n",
852 | " if new_whale_idx not in top_5 and p < thresh and len(top_5) < 5: top_5.append(new_whale_idx)\n",
853 | " if len(top_5) == 5: break\n",
854 | " if j == new_whale_idx or j == i: continue\n",
855 | " predicted_class = data.valid_ds.y.items[j]\n",
856 | " if predicted_class not in top_5: top_5.append(predicted_class)\n",
857 | " top_5s.append(top_5)\n",
858 | " print(thresh, mapk(data.valid_ds.y.items.reshape(-1,1), np.stack(top_5s), 5))"
859 | ]
860 | },
861 | {
862 | "cell_type": "markdown",
863 | "metadata": {},
864 | "source": [
865 | "There are many reasons why the best threshold here might not carry over to what would make sense on the test set. It is some indication though of how our model is doing and a useful data point."
866 | ]
867 | },
868 | {
869 | "cell_type": "markdown",
870 | "metadata": {},
871 | "source": [
872 | "## Predict"
873 | ]
874 | },
875 | {
876 | "cell_type": "code",
877 | "execution_count": 163,
878 | "metadata": {},
879 | "outputs": [
880 | {
881 | "data": {
882 | "text/plain": [
883 | "7960"
884 | ]
885 | },
886 | "execution_count": 163,
887 | "metadata": {},
888 | "output_type": "execute_result"
889 | }
890 | ],
891 | "source": [
892 | "len(data.test_ds)"
893 | ]
894 | },
895 | {
896 | "cell_type": "code",
897 | "execution_count": 189,
898 | "metadata": {},
899 | "outputs": [],
900 | "source": [
901 | "data = (\n",
902 | " ImageItemList\n",
903 | " .from_df(df, f'data/train-{SZ}', cols=['Image'])\n",
904 | " .split_by_valid_func(lambda path: path2fn(path) in {'69823499d.jpg'}) # in newer version of the fastai library there is .no_split that could be used here\n",
905 | " .label_from_func(lambda path: fn2label[path2fn(path)], classes=classes)\n",
906 | " .add_test(ImageItemList.from_folder(f'data/test-{SZ}'))\n",
907 | " .transform(None, size=SZ, resize_method=ResizeMethod.SQUISH)\n",
908 | " .databunch(bs=BS, num_workers=NUM_WORKERS, path='data')\n",
909 | " .normalize(imagenet_stats)\n",
910 | ")"
911 | ]
912 | },
913 | {
914 | "cell_type": "code",
915 | "execution_count": 190,
916 | "metadata": {},
917 | "outputs": [
918 | {
919 | "name": "stdout",
920 | "output_type": "stream",
921 | "text": [
922 | "CPU times: user 2.9 s, sys: 1.79 s, total: 4.69 s\n",
923 | "Wall time: 5.03 s\n"
924 | ]
925 | }
926 | ],
927 | "source": [
928 | "%%time\n",
929 | "test_feats = []\n",
930 | "learn.model.eval()\n",
931 | "for ims, _ in data.test_dl:\n",
932 | " test_feats.append(learn.model.process_features(learn.model.cnn(ims)).detach().cpu())"
933 | ]
934 | },
935 | {
936 | "cell_type": "code",
937 | "execution_count": 195,
938 | "metadata": {},
939 | "outputs": [
940 | {
941 | "name": "stdout",
942 | "output_type": "stream",
943 | "text": [
944 | "CPU times: user 9.02 s, sys: 5.04 s, total: 14.1 s\n",
945 | "Wall time: 14.4 s\n"
946 | ]
947 | }
948 | ],
949 | "source": [
950 | "%%time\n",
951 | "train_feats = []\n",
952 | "train_class_idxs = []\n",
953 | "learn.model.eval()\n",
954 | "for ims, t in data.train_dl:\n",
955 | " train_feats.append(learn.model.process_features(learn.model.cnn(ims)).detach().cpu())\n",
956 | " train_class_idxs.append(t)"
957 | ]
958 | },
959 | {
960 | "cell_type": "code",
961 | "execution_count": 196,
962 | "metadata": {},
963 | "outputs": [],
964 | "source": [
965 | "train_class_idxs = torch.cat(train_class_idxs)\n",
966 | "train_feats = torch.cat(train_feats)"
967 | ]
968 | },
969 | {
970 | "cell_type": "code",
971 | "execution_count": 206,
972 | "metadata": {},
973 | "outputs": [],
974 | "source": [
975 | "test_feats = torch.cat(test_feats)"
976 | ]
977 | },
978 | {
979 | "cell_type": "code",
980 | "execution_count": 209,
981 | "metadata": {},
982 | "outputs": [
983 | {
984 | "name": "stdout",
985 | "output_type": "stream",
986 | "text": [
987 | "CPU times: user 5min 7s, sys: 2min 58s, total: 8min 6s\n",
988 | "Wall time: 8min 6s\n"
989 | ]
990 | }
991 | ],
992 | "source": [
993 | "%%time\n",
994 | "sims = []\n",
995 | "for feat in test_feats:\n",
996 | " dists = learn.model.calculate_distance(train_feats, feat.unsqueeze(0).repeat(25344, 1))\n",
997 | " predicted_similarity = learn.model.head(dists.cuda()).sigmoid_()\n",
998 | " sims.append(predicted_similarity.squeeze().detach().cpu())"
999 | ]
1000 | },
1001 | {
1002 | "cell_type": "code",
1003 | "execution_count": 211,
1004 | "metadata": {},
1005 | "outputs": [
1006 | {
1007 | "name": "stdout",
1008 | "output_type": "stream",
1009 | "text": [
1010 | "CPU times: user 19.6 s, sys: 128 ms, total: 19.7 s\n",
1011 | "Wall time: 19.7 s\n"
1012 | ]
1013 | }
1014 | ],
1015 | "source": [
1016 | "%%time\n",
1017 | "thresh = 1\n",
1018 | "\n",
1019 | "top_5s = []\n",
1020 | "for sim in sims:\n",
1021 | " idxs = sim.argsort(descending=True)\n",
1022 | " probs = sim[idxs]\n",
1023 | " top_5 = []\n",
1024 | " for i, p in zip(idxs, probs):\n",
1025 | " if new_whale_idx not in top_5 and p < thresh and len(top_5) < 5: top_5.append(new_whale_idx)\n",
1026 | " if len(top_5) == 5: break\n",
1027 | " if i == new_whale_idx: continue\n",
1028 | " predicted_class = train_class_idxs[i]\n",
1029 | " if predicted_class not in top_5: top_5.append(predicted_class)\n",
1030 | " top_5s.append(top_5)"
1031 | ]
1032 | },
1033 | {
1034 | "cell_type": "code",
1035 | "execution_count": 221,
1036 | "metadata": {},
1037 | "outputs": [],
1038 | "source": [
1039 | "top_5_classes = []\n",
1040 | "for top_5 in top_5s:\n",
1041 | " top_5_classes.append(' '.join([classes[t] for t in top_5]))"
1042 | ]
1043 | },
1044 | {
1045 | "cell_type": "code",
1046 | "execution_count": 222,
1047 | "metadata": {},
1048 | "outputs": [
1049 | {
1050 | "data": {
1051 | "text/plain": [
1052 | "['new_whale w_9bedea6 w_448e190 w_ab629bb w_67e9aa8',\n",
1053 | " 'new_whale w_edce644 w_dd79a10 w_99af1a9 w_ae393cd',\n",
1054 | " 'new_whale w_4516ff1 w_d1207d9 w_02c7e9d w_8003858',\n",
1055 | " 'new_whale w_0369a5c w_f66ec54 w_ae8982d w_d0475b2',\n",
1056 | " 'new_whale w_8cd5c91 w_0cc0430 w_06460d7 w_e8b82f6']"
1057 | ]
1058 | },
1059 | "execution_count": 222,
1060 | "metadata": {},
1061 | "output_type": "execute_result"
1062 | }
1063 | ],
1064 | "source": [
1065 | "top_5_classes[:5]"
1066 | ]
1067 | },
1068 | {
1069 | "cell_type": "code",
1070 | "execution_count": 223,
1071 | "metadata": {},
1072 | "outputs": [],
1073 | "source": [
1074 | "sub = pd.DataFrame({'Image': [path.name for path in data.test_ds.x.items]})\n",
1075 | "sub['Id'] = top_5_classes\n",
1076 | "sub.to_csv(f'subs/{name}.csv.gz', index=False, compression='gzip')"
1077 | ]
1078 | },
1079 | {
1080 | "cell_type": "code",
1081 | "execution_count": 224,
1082 | "metadata": {},
1083 | "outputs": [
1084 | {
1085 | "data": {
1086 | "text/html": [
1087 | "\n",
1088 | "\n",
1101 | "
\n",
1102 | " \n",
1103 | " \n",
1104 | " | \n",
1105 | " Image | \n",
1106 | " Id | \n",
1107 | "
\n",
1108 | " \n",
1109 | " \n",
1110 | " \n",
1111 | " | 0 | \n",
1112 | " 47380533f.jpg | \n",
1113 | " new_whale w_9bedea6 w_448e190 w_ab629bb w_67e9aa8 | \n",
1114 | "
\n",
1115 | " \n",
1116 | " | 1 | \n",
1117 | " 1d9de38ba.jpg | \n",
1118 | " new_whale w_edce644 w_dd79a10 w_99af1a9 w_ae393cd | \n",
1119 | "
\n",
1120 | " \n",
1121 | " | 2 | \n",
1122 | " b3d4ee916.jpg | \n",
1123 | " new_whale w_4516ff1 w_d1207d9 w_02c7e9d w_8003858 | \n",
1124 | "
\n",
1125 | " \n",
1126 | " | 3 | \n",
1127 | " 460fd63ae.jpg | \n",
1128 | " new_whale w_0369a5c w_f66ec54 w_ae8982d w_d0475b2 | \n",
1129 | "
\n",
1130 | " \n",
1131 | " | 4 | \n",
1132 | " 79738ffc1.jpg | \n",
1133 | " new_whale w_8cd5c91 w_0cc0430 w_06460d7 w_e8b82f6 | \n",
1134 | "
\n",
1135 | " \n",
1136 | "
\n",
1137 | "
"
1138 | ],
1139 | "text/plain": [
1140 | " Image Id\n",
1141 | "0 47380533f.jpg new_whale w_9bedea6 w_448e190 w_ab629bb w_67e9aa8\n",
1142 | "1 1d9de38ba.jpg new_whale w_edce644 w_dd79a10 w_99af1a9 w_ae393cd\n",
1143 | "2 b3d4ee916.jpg new_whale w_4516ff1 w_d1207d9 w_02c7e9d w_8003858\n",
1144 | "3 460fd63ae.jpg new_whale w_0369a5c w_f66ec54 w_ae8982d w_d0475b2\n",
1145 | "4 79738ffc1.jpg new_whale w_8cd5c91 w_0cc0430 w_06460d7 w_e8b82f6"
1146 | ]
1147 | },
1148 | "execution_count": 224,
1149 | "metadata": {},
1150 | "output_type": "execute_result"
1151 | }
1152 | ],
1153 | "source": [
1154 | "pd.read_csv(f'subs/{name}.csv.gz').head()"
1155 | ]
1156 | },
1157 | {
1158 | "cell_type": "code",
1159 | "execution_count": 225,
1160 | "metadata": {},
1161 | "outputs": [
1162 | {
1163 | "data": {
1164 | "text/plain": [
1165 | "1.0"
1166 | ]
1167 | },
1168 | "execution_count": 225,
1169 | "metadata": {},
1170 | "output_type": "execute_result"
1171 | }
1172 | ],
1173 | "source": [
1174 | "pd.read_csv(f'subs/{name}.csv.gz').Id.str.split().apply(lambda x: x[0] == 'new_whale').mean()"
1175 | ]
1176 | },
1177 | {
1178 | "cell_type": "code",
1179 | "execution_count": 226,
1180 | "metadata": {},
1181 | "outputs": [
1182 | {
1183 | "name": "stdout",
1184 | "output_type": "stream",
1185 | "text": [
1186 | "100%|████████████████████████████████████████| 164k/164k [00:03<00:00, 46.1kB/s]\n",
1187 | "Successfully submitted to Humpback Whale Identification"
1188 | ]
1189 | }
1190 | ],
1191 | "source": [
1192 | "!kaggle competitions submit -c humpback-whale-identification -f subs/{name}.csv.gz -m \"{name}\""
1193 | ]
1194 | }
1195 | ],
1196 | "metadata": {
1197 | "kernelspec": {
1198 | "display_name": "Python 3",
1199 | "language": "python",
1200 | "name": "python3"
1201 | },
1202 | "language_info": {
1203 | "codemirror_mode": {
1204 | "name": "ipython",
1205 | "version": 3
1206 | },
1207 | "file_extension": ".py",
1208 | "mimetype": "text/x-python",
1209 | "name": "python",
1210 | "nbconvert_exporter": "python",
1211 | "pygments_lexer": "ipython3",
1212 | "version": "3.7.0"
1213 | }
1214 | },
1215 | "nbformat": 4,
1216 | "nbformat_minor": 2
1217 | }
1218 |
--------------------------------------------------------------------------------